From 99cbdf2afccd829731cf29a9003fe87e4f80dd0a Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Wed, 23 Jun 2021 16:13:22 -0700 Subject: [PATCH 01/89] cli: add curl command Just a hackweek project at this point. --- command/commands.go | 5 + command/curl.go | 436 +++++++++++++++++++++++++ website/content/docs/commands/curl.mdx | 54 +++ website/data/docs-nav-data.json | 4 + 4 files changed, 499 insertions(+) create mode 100644 command/curl.go create mode 100644 website/content/docs/commands/curl.mdx diff --git a/command/commands.go b/command/commands.go index 287cdfaf1..ae770ee57 100644 --- a/command/commands.go +++ b/command/commands.go @@ -214,6 +214,11 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Meta: meta, }, nil }, + "curl": func() (cli.Command, error) { + return &CurlCommand{ + Meta: meta, + }, nil + }, // operator debug was released in 0.12 as debug. This top-level alias preserves compatibility "debug": func() (cli.Command, error) { return &OperatorDebugCommand{ diff --git a/command/curl.go b/command/curl.go new file mode 100644 index 000000000..8fe18a77e --- /dev/null +++ b/command/curl.go @@ -0,0 +1,436 @@ +package command + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/nomad/api" + "github.com/posener/complete" +) + +type CurlCommand struct { + Meta + + verboseFlag bool + method string + body io.Reader +} + +func (*CurlCommand) Help() string { + helpText := ` +Usage: nomad curl [options] + + curl is a utility command for accessing Nomad's HTTP API and is inspired by + the popular curl command line program. Nomad's curl command populates Nomad's + standard environment variables into their appropriate HTTP headers. If the + 'path' does not begin with "http" then $NOMAD_ADDR will be used. + + The 'path' can be in one of the following forms: + + /v1/allocations <- API Paths must start with a / + localhost:4646/v1/allocations <- Scheme will be inferred + https://localhost:4646/v1/allocations <- Scheme will be https:// + + Note that Nomad's curl does not always match the popular curl programs + behavior. Instead Nomad's curl is optimized for common Nomad HTTP API + operations. + +General Options: + + ` + generalOptionsUsage(usageOptsDefault) + ` + +Curl Specific Options: + + -dryrun + Output curl command to stdout and exit. + HTTP Basic Auth will never be output. If the $NOMAD_HTTP_AUTH environment + variable is set, it will be referenced in the appropriate curl flag in the + output. + ACL tokens set via the $NOMAD_TOKEN environment variable will only be + referenced by environment variable as with HTTP Basic Auth above. However + if the -token flag is explicitly used, the token will also be included in + the output. + + -H
+ Adds an additional HTTP header to the request. May be specified more than + once. These headers take precedent over automatically ones such as + X-Nomad-Token. + + -verbose + Output extra information to stderr similar to curl's --verbose flag. + + -X + HTTP method of request. Defaults to GET. +` + + return strings.TrimSpace(helpText) +} + +func (*CurlCommand) Synopsis() string { + return "Query Nomad's HTTP API like curl" +} + +func (c *CurlCommand) AutocompleteFlags() complete.Flags { + return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), + complete.Flags{ + "-dryrun": complete.PredictNothing, + }) +} + +func (c *CurlCommand) AutocompleteArgs() complete.Predictor { + //TODO(schmichael) wouldn't it be cool to build path autocompletion off + // of our http mux? + return complete.PredictNothing +} + +func (*CurlCommand) Name() string { return "curl" } + +func (c *CurlCommand) Run(args []string) int { + var dryrun bool + headerFlags := newHeaderFlags() + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.BoolVar(&dryrun, "dryrun", false, "") + flags.BoolVar(&c.verboseFlag, "verbose", false, "") + flags.StringVar(&c.method, "X", "", "") + flags.Var(headerFlags, "H", "") + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing flags: %v", err)) + return 1 + } + args = flags.Args() + + if len(args) < 1 { + c.Ui.Error("A path or URL is required") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + if n := len(args); n > 1 { + c.Ui.Error(fmt.Sprintf("curl accepts exactly 1 argument, but %d arguments were found", n)) + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // By default verbose func is a noop + verbose := func(string, ...interface{}) {} + if c.verboseFlag { + verbose = func(format string, a ...interface{}) { + // Use Warn instead of Info because Info goes to stdout + c.Ui.Warn(fmt.Sprintf(format, a...)) + } + } + + // Opportunistically read from stdin and POST unless method has been + // explicitly set. + stat, _ := os.Stdin.Stat() + if (stat.Mode() & os.ModeCharDevice) == 0 { + verbose("* Reading request body from stdin.") + c.body = os.Stdin + if c.method == "" { + c.method = "POST" + } + } else { + c.method = "GET" + } + + config := c.clientConfig() + + // NewClient mutates or validates Config.Address, so call it to match + // the behavior of other commands. + _, err := api.NewClient(config) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %v", err)) + return 1 + } + + path, err := pathToURL(config, args[0]) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error turning path into URL: %v", err)) + return 2 + } + + if dryrun { + out, err := c.apiToCurl(config, headerFlags.headers, path) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error creating curl command: %v", err)) + return 3 + } + c.Ui.Output(out) + return 0 + } + + // Re-implement a big chunk of api/api.go since we don't export it. + client := cleanhttp.DefaultClient() + transport := client.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + + if err := api.ConfigureTLS(client, config.TLSConfig); err != nil { + c.Ui.Error(fmt.Sprintf("Error configuring TLS: %v", err)) + return 4 + } + + setQueryParams(config, path) + + verbose("> %s %s", c.method, path) + + req, err := http.NewRequest(c.method, path.String(), c.body) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error making request: %v", err)) + return 5 + } + + // Set headers from command line + req.Header = headerFlags.headers + + // Add token header if it doesn't already exist and is set + if req.Header.Get("X-Nomad-Token") == "" && config.SecretID != "" { + req.Header.Set("X-Nomad-Token", config.SecretID) + } + + // Configure HTTP basic authentication if set + if path.User != nil { + username := path.User.Username() + password, _ := path.User.Password() + req.SetBasicAuth(username, password) + } else if config.HttpAuth != nil { + req.SetBasicAuth(config.HttpAuth.Username, config.HttpAuth.Password) + } + + for k, vals := range req.Header { + for _, v := range vals { + verbose("> %s: %s", k, v) + } + } + + verbose("* Sending request and receiving response...") + + // Do the request! + resp, err := client.Do(req) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error performing request: %v", err)) + return 6 + } + defer resp.Body.Close() + + verbose("< %s %s", resp.Proto, resp.Status) + for k, vals := range resp.Header { + for _, v := range vals { + verbose("< %s: %s", k, v) + } + } + + n, err := io.Copy(os.Stdout, resp.Body) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error reading response after %d bytes: %v", n, err)) + return 7 + } + + if len(resp.Trailer) > 0 { + verbose("* Trailer Headers") + for k, vals := range resp.Trailer { + for _, v := range vals { + verbose("< %s: %s", k, v) + } + } + } + + return 0 +} + +// setQueryParams converts API configuration to query parameters. Updates path +// parameter in place. +func setQueryParams(config *api.Config, path *url.URL) { + queryParams := path.Query() + + // Prefer region explicitly set in path, otherwise fallback to config + // if one is set. + if queryParams.Get("region") == "" && config.Region != "" { + queryParams["region"] = []string{config.Region} + } + + // Prefer namespace explicitly set in path, otherwise fallback to + // config if one is set. + if queryParams.Get("namespace") == "" && config.Namespace != "" { + queryParams["namespace"] = []string{config.Namespace} + } + + // Re-encode query parameters + path.RawQuery = queryParams.Encode() +} + +// apiToCurl converts a Nomad HTTP API config and path to its corresponding +// curl command or returns an error. +func (c *CurlCommand) apiToCurl(config *api.Config, headers http.Header, path *url.URL) (string, error) { + parts := []string{"curl"} + + if c.verboseFlag { + parts = append(parts, "--verbose") + } + + if c.method != "" { + parts = append(parts, "-X "+c.method) + } + + if c.body != nil { + parts = append(parts, "--data-binary @-") + } + + if config.TLSConfig != nil { + parts = tlsToCurl(parts, config.TLSConfig) + + // If a TLS server name is set we must alter the URL and use + // curl's --connect-to flag. + if v := config.TLSConfig.TLSServerName; v != "" { + pathHost, port, err := net.SplitHostPort(path.Host) + if err != nil { + return "", fmt.Errorf("error determining port: %v", err) + } + + // curl uses the url for SNI so override it with the + // configured server name + path.Host = net.JoinHostPort(v, port) + + // curl uses --connect-to to allow specifying a + // different connection address for the hostname in the + // path. The format is: + // logical-host:logical-port:actual-host:actual-port + // Ports will always match since only the hostname is + // overridden for SNI. + parts = append(parts, fmt.Sprintf(`--connect-to "%s:%s:%s:%s"`, + v, port, pathHost, port)) + } + } + + // Add headers + for k, vals := range headers { + for _, v := range vals { + parts = append(parts, fmt.Sprintf(`-H '%s: %s'`, k, v)) + } + } + + // Only write NOMAD_TOKEN to stdout if it was specified via -token. + // Otherwise output a static string that references the ACL token + // environment variable. + if headers.Get("X-Nomad-Token") == "" { + if c.Meta.token != "" { + parts = append(parts, fmt.Sprintf(`-H "X-Nomad-Token: %s"`, c.Meta.token)) + } else if v := os.Getenv("NOMAD_TOKEN"); v != "" { + parts = append(parts, `-H "X-Nomad-Token: ${NOMAD_TOKEN}"`) + } + } + + // Never write http auth to stdout. Instead output a static string that + // references the HTTP auth environment variable. + if auth := os.Getenv("NOMAD_HTTP_AUTH"); auth != "" { + parts = append(parts, `-u "$NOMAD_HTTP_AUTH"`) + } + + setQueryParams(config, path) + + parts = append(parts, path.String()) + + return strings.Join(parts, " \\\n "), nil +} + +// tlsToCurl converts TLS configuration to their corresponding curl flags. +func tlsToCurl(parts []string, tlsConfig *api.TLSConfig) []string { + if v := tlsConfig.CACert; v != "" { + parts = append(parts, fmt.Sprintf(`--cacert "%s"`, v)) + } + + if v := tlsConfig.CAPath; v != "" { + parts = append(parts, fmt.Sprintf(`--capath "%s"`, v)) + } + + if v := tlsConfig.ClientCert; v != "" { + parts = append(parts, fmt.Sprintf(`--cert "%s"`, v)) + } + + if v := tlsConfig.ClientKey; v != "" { + parts = append(parts, fmt.Sprintf(`--key "%s"`, v)) + } + + // TLSServerName has already been configured as it may change the path. + + if tlsConfig.Insecure { + parts = append(parts, `--insecure`) + } + + return parts +} + +// pathToURL converts a curl path argumet to URL. Paths without a host are +// prefixed with $NOMAD_ADDR or http://127.0.0.1:4646. +func pathToURL(config *api.Config, path string) (*url.URL, error) { + // If the scheme is missing, add it + if !strings.HasPrefix(path, "http://") && !strings.HasPrefix(path, "https://") { + scheme := "http" + if config.TLSConfig != nil { + if config.TLSConfig.CACert != "" || + config.TLSConfig.CAPath != "" || + config.TLSConfig.ClientCert != "" || + config.TLSConfig.TLSServerName != "" || + config.TLSConfig.Insecure { + + // TLS configured, but scheme not set. Assume + // https. + scheme = "https" + } + } + + path = fmt.Sprintf("%s://%s", scheme, path) + } + + u, err := url.Parse(path) + if err != nil { + return nil, err + } + + // If URL.Scheme is empty, use defaults from client config + if u.Host == "" { + confURL, err := url.Parse(config.Address) + if err != nil { + return nil, fmt.Errorf("Unable to parse configured address: %v", err) + } + u.Host = confURL.Host + } + + return u, nil +} + +// headerFlags is a flag.Value implementation for collecting multiple -H flags. +type headerFlags struct { + headers http.Header +} + +func newHeaderFlags() *headerFlags { + return &headerFlags{ + headers: make(http.Header), + } +} + +func (*headerFlags) String() string { return "" } + +func (h *headerFlags) Set(v string) error { + parts := strings.SplitN(v, ":", 2) + if len(parts) != 2 { + return fmt.Errorf("Headers must be in the form 'Key: Value' but found: %q", v) + } + + h.headers.Add(parts[0], parts[1]) + return nil +} diff --git a/website/content/docs/commands/curl.mdx b/website/content/docs/commands/curl.mdx new file mode 100644 index 000000000..fa5be8b01 --- /dev/null +++ b/website/content/docs/commands/curl.mdx @@ -0,0 +1,54 @@ +--- +layout: docs +page_title: 'Commands: curl' +description: | + curl is a utility command for accessing Nomad's HTTP API similar to the + popular open source program of the same name. +--- + +# Command: curl + +The curl command allows easy access to Nomad's HTTP API similar to the popular +[curl] program. Nomad's curl command reads [environment variables][envvars] to +dramatically ease HTTP API access compared to trying to manually write the same +command with the third party `curl` command. + +For example for the following environment: + +``` +NOMAD_TOKEN=d4434353-c797-19e4-a14d-4068241f86a4 +NOMAD_CACERT=$HOME/.nomad/ca.pem +NOMAD_CLIENT_CERT=$HOME/.nomad/cli.pem +NOMAD_CLIENT_KEY=$HOME/.nomad/client-key.pem +NOMAD_TLS_SERVER_NAME=client.global.nomad +NOMAD_ADDR=https://remote.client123.internal:4646 +``` + +Accessing Nomad's [`/v1/metrics`][metrics] HTTP endpoint with `nomad curl` +would require: + +``` +nomad curl /v1/metrics +``` + +Performing the same request using the external `curl` tool would require: + +``` +curl \ + --cacert "$HOME/.nomad/ca.pem" \ + --cert "$HOME/.nomad/client.pem" \ + --key "$HOME/.nomad/client-key.pem" \ + --connect-to "client.global.nomad:4646:remote.client123.internal:4646" \ + -H "X-Nomad-Token: ${NOMAD_TOKEN}" \ + https://client.global.nomad:4646/v1/metrics +``` + +The `-dryrun` flag for `nomad curl` will output a curl command instead of +performing the HTTP request immediately. Note that you do *not* need the 3rd +party `curl` command installed to use `nomad curl`. The `curl` output from +`-dryrun` is intended for use in scripts or running in locations without a +Nomad binary present. + +[curl]: https://curl.se/ +[envvars]: /docs/commands#environment-variables +[metrics]: /api-docs/metrics#metrics-http-api diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 7a6b09ea8..313afb421 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -328,6 +328,10 @@ } ] }, + { + "title": "curl", + "path": "commands/curl" + }, { "title": "deployment", "routes": [ From 6bc962fe03021337398e05caf32d9c7d9ed2bcb5 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 18 Feb 2022 16:47:39 -0800 Subject: [PATCH 02/89] rename `nomad curl` to `nomad operator api` --- command/commands.go | 11 ++-- command/{curl.go => operator_api.go} | 36 ++++++------- website/content/docs/commands/curl.mdx | 54 ------------------- .../content/docs/commands/operator/api.mdx | 54 +++++++++++++++++++ 4 files changed, 78 insertions(+), 77 deletions(-) rename command/{curl.go => operator_api.go} (90%) delete mode 100644 website/content/docs/commands/curl.mdx create mode 100644 website/content/docs/commands/operator/api.mdx diff --git a/command/commands.go b/command/commands.go index ae770ee57..63bb6d83f 100644 --- a/command/commands.go +++ b/command/commands.go @@ -214,11 +214,6 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Meta: meta, }, nil }, - "curl": func() (cli.Command, error) { - return &CurlCommand{ - Meta: meta, - }, nil - }, // operator debug was released in 0.12 as debug. This top-level alias preserves compatibility "debug": func() (cli.Command, error) { return &OperatorDebugCommand{ @@ -501,6 +496,12 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { }, nil }, + "operator api": func() (cli.Command, error) { + return &OperatorAPICommand{ + Meta: meta, + }, nil + }, + "operator autopilot": func() (cli.Command, error) { return &OperatorAutopilotCommand{ Meta: meta, diff --git a/command/curl.go b/command/operator_api.go similarity index 90% rename from command/curl.go rename to command/operator_api.go index 8fe18a77e..fe6d16d55 100644 --- a/command/curl.go +++ b/command/operator_api.go @@ -16,7 +16,7 @@ import ( "github.com/posener/complete" ) -type CurlCommand struct { +type OperatorAPICommand struct { Meta verboseFlag bool @@ -24,14 +24,14 @@ type CurlCommand struct { body io.Reader } -func (*CurlCommand) Help() string { +func (*OperatorAPICommand) Help() string { helpText := ` -Usage: nomad curl [options] +Usage: nomad operator api [options] - curl is a utility command for accessing Nomad's HTTP API and is inspired by - the popular curl command line program. Nomad's curl command populates Nomad's - standard environment variables into their appropriate HTTP headers. If the - 'path' does not begin with "http" then $NOMAD_ADDR will be used. + api is a utility command for accessing Nomad's HTTP API and is inspired by + the popular curl command line program. Nomad's operator api command populates + Nomad's standard environment variables into their appropriate HTTP headers. + If the 'path' does not begin with "http" then $NOMAD_ADDR will be used. The 'path' can be in one of the following forms: @@ -39,15 +39,15 @@ Usage: nomad curl [options] localhost:4646/v1/allocations <- Scheme will be inferred https://localhost:4646/v1/allocations <- Scheme will be https:// - Note that Nomad's curl does not always match the popular curl programs - behavior. Instead Nomad's curl is optimized for common Nomad HTTP API - operations. + Note that this command does not always match the popular curl program's + behavior. Instead Nomad's operator api command is optimized for common Nomad + HTTP API operations. General Options: ` + generalOptionsUsage(usageOptsDefault) + ` -Curl Specific Options: +Operator API Specific Options: -dryrun Output curl command to stdout and exit. @@ -74,26 +74,26 @@ Curl Specific Options: return strings.TrimSpace(helpText) } -func (*CurlCommand) Synopsis() string { +func (*OperatorAPICommand) Synopsis() string { return "Query Nomad's HTTP API like curl" } -func (c *CurlCommand) AutocompleteFlags() complete.Flags { +func (c *OperatorAPICommand) AutocompleteFlags() complete.Flags { return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), complete.Flags{ "-dryrun": complete.PredictNothing, }) } -func (c *CurlCommand) AutocompleteArgs() complete.Predictor { +func (c *OperatorAPICommand) AutocompleteArgs() complete.Predictor { //TODO(schmichael) wouldn't it be cool to build path autocompletion off // of our http mux? return complete.PredictNothing } -func (*CurlCommand) Name() string { return "curl" } +func (*OperatorAPICommand) Name() string { return "operator api" } -func (c *CurlCommand) Run(args []string) int { +func (c *OperatorAPICommand) Run(args []string) int { var dryrun bool headerFlags := newHeaderFlags() @@ -117,7 +117,7 @@ func (c *CurlCommand) Run(args []string) int { } if n := len(args); n > 1 { - c.Ui.Error(fmt.Sprintf("curl accepts exactly 1 argument, but %d arguments were found", n)) + c.Ui.Error(fmt.Sprintf("operator api accepts exactly 1 argument, but %d arguments were found", n)) c.Ui.Error(commandErrorText(c)) return 1 } @@ -274,7 +274,7 @@ func setQueryParams(config *api.Config, path *url.URL) { // apiToCurl converts a Nomad HTTP API config and path to its corresponding // curl command or returns an error. -func (c *CurlCommand) apiToCurl(config *api.Config, headers http.Header, path *url.URL) (string, error) { +func (c *OperatorAPICommand) apiToCurl(config *api.Config, headers http.Header, path *url.URL) (string, error) { parts := []string{"curl"} if c.verboseFlag { diff --git a/website/content/docs/commands/curl.mdx b/website/content/docs/commands/curl.mdx deleted file mode 100644 index fa5be8b01..000000000 --- a/website/content/docs/commands/curl.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -layout: docs -page_title: 'Commands: curl' -description: | - curl is a utility command for accessing Nomad's HTTP API similar to the - popular open source program of the same name. ---- - -# Command: curl - -The curl command allows easy access to Nomad's HTTP API similar to the popular -[curl] program. Nomad's curl command reads [environment variables][envvars] to -dramatically ease HTTP API access compared to trying to manually write the same -command with the third party `curl` command. - -For example for the following environment: - -``` -NOMAD_TOKEN=d4434353-c797-19e4-a14d-4068241f86a4 -NOMAD_CACERT=$HOME/.nomad/ca.pem -NOMAD_CLIENT_CERT=$HOME/.nomad/cli.pem -NOMAD_CLIENT_KEY=$HOME/.nomad/client-key.pem -NOMAD_TLS_SERVER_NAME=client.global.nomad -NOMAD_ADDR=https://remote.client123.internal:4646 -``` - -Accessing Nomad's [`/v1/metrics`][metrics] HTTP endpoint with `nomad curl` -would require: - -``` -nomad curl /v1/metrics -``` - -Performing the same request using the external `curl` tool would require: - -``` -curl \ - --cacert "$HOME/.nomad/ca.pem" \ - --cert "$HOME/.nomad/client.pem" \ - --key "$HOME/.nomad/client-key.pem" \ - --connect-to "client.global.nomad:4646:remote.client123.internal:4646" \ - -H "X-Nomad-Token: ${NOMAD_TOKEN}" \ - https://client.global.nomad:4646/v1/metrics -``` - -The `-dryrun` flag for `nomad curl` will output a curl command instead of -performing the HTTP request immediately. Note that you do *not* need the 3rd -party `curl` command installed to use `nomad curl`. The `curl` output from -`-dryrun` is intended for use in scripts or running in locations without a -Nomad binary present. - -[curl]: https://curl.se/ -[envvars]: /docs/commands#environment-variables -[metrics]: /api-docs/metrics#metrics-http-api diff --git a/website/content/docs/commands/operator/api.mdx b/website/content/docs/commands/operator/api.mdx new file mode 100644 index 000000000..6155b3115 --- /dev/null +++ b/website/content/docs/commands/operator/api.mdx @@ -0,0 +1,54 @@ +--- +layout: docs +page_title: 'Commands: operator api' +description: | + operator api is a utility command for accessing Nomad's HTTP API similar to + the popular open source program curl. +--- + +# Command: operator api + +The `operator api` command allows easy access to Nomad's HTTP API similar to +the popular [curl] program. Nomad's `operator api` command reads [environment +variables][envvars] to dramatically ease HTTP API access compared to trying to +manually write the same command with the third party `curl` command. + +For example for the following environment: + +``` +NOMAD_TOKEN=d4434353-c797-19e4-a14d-4068241f86a4 +NOMAD_CACERT=$HOME/.nomad/ca.pem +NOMAD_CLIENT_CERT=$HOME/.nomad/cli.pem +NOMAD_CLIENT_KEY=$HOME/.nomad/client-key.pem +NOMAD_TLS_SERVER_NAME=client.global.nomad +NOMAD_ADDR=https://remote.client123.internal:4646 +``` + +Accessing Nomad's [`/v1/jobs`][jobs] HTTP endpoint with `nomad operator +api` would require: + +``` +nomad operator api /v1/jobs +``` + +Performing the same request using the external `curl` tool would require: + +``` +curl \ + --cacert "$HOME/.nomad/ca.pem" \ + --cert "$HOME/.nomad/client.pem" \ + --key "$HOME/.nomad/client-key.pem" \ + --connect-to "client.global.nomad:4646:remote.client123.internal:4646" \ + -H "X-Nomad-Token: ${NOMAD_TOKEN}" \ + https://client.global.nomad:4646/v1/jobs +``` + +The `-dryrun` flag for `nomad operator api` will output a curl command instead +of performing the HTTP request immediately. Note that you do *not* need the 3rd +party `curl` command installed to use `operator api`. The `curl` output from +`-dryrun` is intended for use in scripts or running in locations without a +Nomad binary present. + +[curl]: https://curl.se/ +[envvars]: /docs/commands#environment-variables +[jobs]: /api-docs/jobs From bd6d7e0057c3ce7d58a7899e08456d687f9130bc Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Thu, 24 Feb 2022 15:52:16 -0800 Subject: [PATCH 03/89] cli: add filter support --- command/operator_api.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/command/operator_api.go b/command/operator_api.go index fe6d16d55..dc9bb8200 100644 --- a/command/operator_api.go +++ b/command/operator_api.go @@ -59,6 +59,9 @@ Operator API Specific Options: if the -token flag is explicitly used, the token will also be included in the output. + -filter + Specifies an expression used to filter query results. + -H
Adds an additional HTTP header to the request. May be specified more than once. These headers take precedent over automatically ones such as @@ -95,11 +98,13 @@ func (*OperatorAPICommand) Name() string { return "operator api" } func (c *OperatorAPICommand) Run(args []string) int { var dryrun bool + var filter string headerFlags := newHeaderFlags() flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.BoolVar(&dryrun, "dryrun", false, "") + flags.StringVar(&filter, "filter", "", "") flags.BoolVar(&c.verboseFlag, "verbose", false, "") flags.StringVar(&c.method, "X", "", "") flags.Var(headerFlags, "H", "") @@ -160,6 +165,13 @@ func (c *OperatorAPICommand) Run(args []string) int { return 2 } + // Set Filter query param + if filter != "" { + q := path.Query() + q.Set("filter", filter) + path.RawQuery = q.Encode() + } + if dryrun { out, err := c.apiToCurl(config, headerFlags.headers, path) if err != nil { From a137c1dfe0f4f5136d8f0e207b192db7692eb5a1 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Thu, 24 Feb 2022 17:06:07 -0800 Subject: [PATCH 04/89] cli: add tests and minor fixes for op api Trimmed spaces around header values. Fixed method getting forced to GET. --- command/operator_api.go | 8 +-- command/operator_api_test.go | 103 +++++++++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 5 deletions(-) create mode 100644 command/operator_api_test.go diff --git a/command/operator_api.go b/command/operator_api.go index dc9bb8200..8b28ec4c1 100644 --- a/command/operator_api.go +++ b/command/operator_api.go @@ -106,7 +106,7 @@ func (c *OperatorAPICommand) Run(args []string) int { flags.BoolVar(&dryrun, "dryrun", false, "") flags.StringVar(&filter, "filter", "", "") flags.BoolVar(&c.verboseFlag, "verbose", false, "") - flags.StringVar(&c.method, "X", "", "") + flags.StringVar(&c.method, "X", "GET", "") flags.Var(headerFlags, "H", "") if err := flags.Parse(args); err != nil { @@ -145,8 +145,6 @@ func (c *OperatorAPICommand) Run(args []string) int { if c.method == "" { c.method = "POST" } - } else { - c.method = "GET" } config := c.clientConfig() @@ -339,7 +337,7 @@ func (c *OperatorAPICommand) apiToCurl(config *api.Config, headers http.Header, // environment variable. if headers.Get("X-Nomad-Token") == "" { if c.Meta.token != "" { - parts = append(parts, fmt.Sprintf(`-H "X-Nomad-Token: %s"`, c.Meta.token)) + parts = append(parts, fmt.Sprintf(`-H 'X-Nomad-Token: %s'`, c.Meta.token)) } else if v := os.Getenv("NOMAD_TOKEN"); v != "" { parts = append(parts, `-H "X-Nomad-Token: ${NOMAD_TOKEN}"`) } @@ -443,6 +441,6 @@ func (h *headerFlags) Set(v string) error { return fmt.Errorf("Headers must be in the form 'Key: Value' but found: %q", v) } - h.headers.Add(parts[0], parts[1]) + h.headers.Add(parts[0], strings.TrimSpace(parts[1])) return nil } diff --git a/command/operator_api_test.go b/command/operator_api_test.go new file mode 100644 index 000000000..937db63aa --- /dev/null +++ b/command/operator_api_test.go @@ -0,0 +1,103 @@ +package command + +import ( + "bytes" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +// TestOperatorAPICommand_Paths asserts that the op api command normalizes +// various path formats to the proper full address. +func TestOperatorAPICommand_Paths(t *testing.T) { + hits := make(chan *url.URL, 1) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + hits <- r.URL + })) + defer ts.Close() + + // Always expect the same URL to be hit + expected := "/v1/jobs" + + buf := bytes.NewBuffer(nil) + ui := &cli.BasicUi{ + ErrorWriter: buf, + Writer: buf, + } + cmd := &OperatorAPICommand{Meta: Meta{Ui: ui}} + + // Assert that absolute paths are appended to the configured address + exitCode := cmd.Run([]string{"-address=" + ts.URL, "/v1/jobs"}) + require.Zero(t, exitCode, buf.String()) + + select { + case hit := <-hits: + require.Equal(t, expected, hit.String()) + case <-time.After(10 * time.Second): + t.Fatalf("timed out waiting for hit") + } + + buf.Reset() + + // Assert that full URLs are used as-is even if an invalid address is + // set. + exitCode = cmd.Run([]string{"-address=ftp://127.0.0.2:1", ts.URL + "/v1/jobs"}) + require.Zero(t, exitCode, buf.String()) + + select { + case hit := <-hits: + require.Equal(t, expected, hit.String()) + case <-time.After(10 * time.Second): + t.Fatalf("timed out waiting for hit") + } + + buf.Reset() + + // Assert that URLs lacking a scheme are used even if an invalid + // address is set. + exitCode = cmd.Run([]string{"-address=ftp://127.0.0.2:1", ts.Listener.Addr().String() + "/v1/jobs"}) + require.Zero(t, exitCode, buf.String()) + + select { + case hit := <-hits: + require.Equal(t, expected, hit.String()) + case <-time.After(10 * time.Second): + t.Fatalf("timed out waiting for hit") + } +} + +// TestOperatorAPICommand_Curl asserts that -dryrun outputs a valid curl +// command. +func TestOperatorAPICommand_Curl(t *testing.T) { + buf := bytes.NewBuffer(nil) + ui := &cli.BasicUi{ + ErrorWriter: buf, + Writer: buf, + } + cmd := &OperatorAPICommand{Meta: Meta{Ui: ui}} + + exitCode := cmd.Run([]string{ + "-dryrun", + "-address=http://127.0.0.1:1", + "-region=not even a valid region", + `-filter=this == "that" or this != "foo"`, + "-X", "POST", + "-token=acl-token", + "-H", "Some-Other-Header: ok", + "/url", + }) + require.Zero(t, exitCode, buf.String()) + + expected := `curl \ + -X POST \ + -H 'Some-Other-Header: ok' \ + -H 'X-Nomad-Token: acl-token' \ + http://127.0.0.1:1/url?filter=this+%3D%3D+%22that%22+or+this+%21%3D+%22foo%22®ion=not+even+a+valid+region +` + require.Equal(t, expected, buf.String()) +} From 29461444c61530efe66ea0f245d0f12d61af1a20 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Thu, 24 Feb 2022 17:13:42 -0800 Subject: [PATCH 05/89] docs: add changelog for #10808 --- .changelog/10808.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/10808.txt diff --git a/.changelog/10808.txt b/.changelog/10808.txt new file mode 100644 index 000000000..7bf406380 --- /dev/null +++ b/.changelog/10808.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Added `nomad operator api` command to ease querying Nomad's HTTP API. +``` From 3a6c824ea0296d05071f068457965fbf493914ab Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Fri, 25 Feb 2022 15:56:24 -0600 Subject: [PATCH 06/89] docs: clairfy advertise.rpc effect The advertise.rpc config option is not intuitive. At first glance you'd assume it works like advertise.http or advertise.serf, but it does not. The current behavior is working as intended, but the documentation is very hard to parse and doesn't draw a clear picture of what the setting actually does. Closes https://github.com/hashicorp/nomad/issues/11075 --- website/content/docs/configuration/index.mdx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/website/content/docs/configuration/index.mdx b/website/content/docs/configuration/index.mdx index b4ebd2c83..fb57fa81d 100644 --- a/website/content/docs/configuration/index.mdx +++ b/website/content/docs/configuration/index.mdx @@ -116,9 +116,12 @@ testing. reachable by all the nodes from which end users are going to use the Nomad CLI tools. - - `rpc` - The address advertised to Nomad client nodes. This allows - advertising a different RPC address than is used by Nomad Servers such that - the clients can connect to the Nomad servers if they are behind a NAT. + - `rpc` - The address used to advertise to Nomad clients for connecting to Nomad + servers for RPC. This allows Nomad clients to connect to Nomad servers from + behind a NAT gateway. This address much be reachable by all Nomad client nodes. + When set, the Nomad servers will use the `advertise.serf` address for RPC + connections amongst themselves. Setting this value on a Nomad client has no + effect. - `serf` - The address advertised for the gossip layer. This address must be reachable from all server nodes. It is not required that clients can reach From 10b60d598337b0c5d8e4b3c99ac35d058a4da032 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 25 Feb 2022 16:21:14 -0800 Subject: [PATCH 07/89] docs: fix nav for op api --- website/data/docs-nav-data.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 313afb421..f16701bd2 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -328,10 +328,6 @@ } ] }, - { - "title": "curl", - "path": "commands/curl" - }, { "title": "deployment", "routes": [ @@ -541,6 +537,10 @@ "title": "Overview", "path": "commands/operator" }, + { + "title": "api", + "path": "commands/operator/api" + }, { "title": "autopilot get-config", "path": "commands/operator/autopilot-get-config" From 08afbf476fecd626834243cc587eff0d8618b568 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 25 Feb 2022 16:23:31 -0800 Subject: [PATCH 08/89] cli: only return 1 on errors from op api We don't want people to expect stable error codes for errors, and I don't think these were useful for scripts anyway. --- command/operator_api.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/command/operator_api.go b/command/operator_api.go index 8b28ec4c1..be932b67a 100644 --- a/command/operator_api.go +++ b/command/operator_api.go @@ -160,7 +160,7 @@ func (c *OperatorAPICommand) Run(args []string) int { path, err := pathToURL(config, args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error turning path into URL: %v", err)) - return 2 + return 1 } // Set Filter query param @@ -174,7 +174,7 @@ func (c *OperatorAPICommand) Run(args []string) int { out, err := c.apiToCurl(config, headerFlags.headers, path) if err != nil { c.Ui.Error(fmt.Sprintf("Error creating curl command: %v", err)) - return 3 + return 1 } c.Ui.Output(out) return 0 @@ -190,7 +190,7 @@ func (c *OperatorAPICommand) Run(args []string) int { if err := api.ConfigureTLS(client, config.TLSConfig); err != nil { c.Ui.Error(fmt.Sprintf("Error configuring TLS: %v", err)) - return 4 + return 1 } setQueryParams(config, path) @@ -200,7 +200,7 @@ func (c *OperatorAPICommand) Run(args []string) int { req, err := http.NewRequest(c.method, path.String(), c.body) if err != nil { c.Ui.Error(fmt.Sprintf("Error making request: %v", err)) - return 5 + return 1 } // Set headers from command line @@ -232,7 +232,7 @@ func (c *OperatorAPICommand) Run(args []string) int { resp, err := client.Do(req) if err != nil { c.Ui.Error(fmt.Sprintf("Error performing request: %v", err)) - return 6 + return 1 } defer resp.Body.Close() @@ -246,7 +246,7 @@ func (c *OperatorAPICommand) Run(args []string) int { n, err := io.Copy(os.Stdout, resp.Body) if err != nil { c.Ui.Error(fmt.Sprintf("Error reading response after %d bytes: %v", n, err)) - return 7 + return 1 } if len(resp.Trailer) > 0 { From 3b49cde589dcc04072bc7836e1ead3a8429ecbd8 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 25 Feb 2022 16:31:56 -0800 Subject: [PATCH 09/89] cli: fix op api typos Co-authored-by: Seth Hoenig --- command/operator_api.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/command/operator_api.go b/command/operator_api.go index be932b67a..b21c34c08 100644 --- a/command/operator_api.go +++ b/command/operator_api.go @@ -29,7 +29,7 @@ func (*OperatorAPICommand) Help() string { Usage: nomad operator api [options] api is a utility command for accessing Nomad's HTTP API and is inspired by - the popular curl command line program. Nomad's operator api command populates + the popular curl command line tool. Nomad's operator api command populates Nomad's standard environment variables into their appropriate HTTP headers. If the 'path' does not begin with "http" then $NOMAD_ADDR will be used. @@ -50,7 +50,7 @@ General Options: Operator API Specific Options: -dryrun - Output curl command to stdout and exit. + Output equivalent curl command to stdout and exit. HTTP Basic Auth will never be output. If the $NOMAD_HTTP_AUTH environment variable is set, it will be referenced in the appropriate curl flag in the output. @@ -64,7 +64,7 @@ Operator API Specific Options: -H
Adds an additional HTTP header to the request. May be specified more than - once. These headers take precedent over automatically ones such as + once. These headers take precedence over automatically set ones such as X-Nomad-Token. -verbose @@ -78,7 +78,7 @@ Operator API Specific Options: } func (*OperatorAPICommand) Synopsis() string { - return "Query Nomad's HTTP API like curl" + return "Query Nomad's HTTP API" } func (c *OperatorAPICommand) AutocompleteFlags() complete.Flags { From 408a0edb172c73bdadd3caaefa90d529cab3a54c Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Sun, 27 Feb 2022 09:09:10 +0100 Subject: [PATCH 10/89] Add metadata to namespaces --- api/namespace.go | 1 + command/namespace_apply.go | 13 ++++++++++++ command/namespace_status.go | 13 ++++++++++++ nomad/structs/structs.go | 21 +++++++++++++++++++ .../content/docs/commands/namespace/apply.mdx | 5 +++++ 5 files changed, 53 insertions(+) diff --git a/api/namespace.go b/api/namespace.go index 409a62bdd..7e5352126 100644 --- a/api/namespace.go +++ b/api/namespace.go @@ -71,6 +71,7 @@ type Namespace struct { Description string Quota string Capabilities *NamespaceCapabilities `hcl:"capabilities,block"` + Meta map[string]string CreateIndex uint64 ModifyIndex uint64 } diff --git a/command/namespace_apply.go b/command/namespace_apply.go index 118645963..edab6bff2 100644 --- a/command/namespace_apply.go +++ b/command/namespace_apply.go @@ -216,6 +216,7 @@ func parseNamespaceSpecImpl(result *api.Namespace, list *ast.ObjectList) error { } delete(m, "capabilities") + delete(m, "meta") // Decode the rest if err := mapstructure.WeakDecode(m, result); err != nil { @@ -238,5 +239,17 @@ func parseNamespaceSpecImpl(result *api.Namespace, list *ast.ObjectList) error { } } + if metaO := list.Filter("meta"); len(metaO.Items) > 0 { + for _, o := range metaO.Elem().Items { + var m map[string]interface{} + if err := hcl.DecodeObject(&m, o.Val); err != nil { + return err + } + if err := mapstructure.WeakDecode(m, &result.Meta); err != nil { + return err + } + } + } + return nil } diff --git a/command/namespace_status.go b/command/namespace_status.go index 4f0f58b7f..654d1883d 100644 --- a/command/namespace_status.go +++ b/command/namespace_status.go @@ -2,6 +2,7 @@ package command import ( "fmt" + "sort" "strings" "github.com/hashicorp/nomad/api" @@ -81,6 +82,18 @@ func (c *NamespaceStatusCommand) Run(args []string) int { c.Ui.Output(formatNamespaceBasics(ns)) + if len(ns.Meta) > 0 { + c.Ui.Output(c.Colorize().Color("\n[bold]Metadata[reset]")) + var keys []string + for k := range ns.Meta { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + c.Ui.Output(fmt.Sprintf("%s:\x1f%s", k, ns.Meta[k])) + } + } + if ns.Quota != "" { quotas := client.Quotas() spec, _, err := quotas.Info(ns.Quota, nil) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 9a30381cf..78082eb44 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -4963,6 +4963,9 @@ type Namespace struct { // Capabilities is the set of capabilities allowed for this namespace Capabilities *NamespaceCapabilities + // Meta is the set of metadata key/value pairs that attached to the namespace + Meta map[string]string + // Hash is the hash of the namespace which is used to efficiently replicate // cross-regions. Hash []byte @@ -5016,6 +5019,18 @@ func (n *Namespace) SetHash() []byte { } } + // sort keys to ensure hash stability when meta is stored later + var keys []string + for k := range n.Meta { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + _, _ = hash.Write([]byte(k)) + _, _ = hash.Write([]byte(n.Meta[k])) + } + // Finalize the hash hashVal := hash.Sum(nil) @@ -5035,6 +5050,12 @@ func (n *Namespace) Copy() *Namespace { c.DisabledTaskDrivers = helper.CopySliceString(n.Capabilities.DisabledTaskDrivers) nc.Capabilities = c } + if n.Meta != nil { + nc.Meta = make(map[string]string, len(n.Meta)) + for k, v := range n.Meta { + nc.Meta[k] = v + } + } copy(nc.Hash, n.Hash) return nc } diff --git a/website/content/docs/commands/namespace/apply.mdx b/website/content/docs/commands/namespace/apply.mdx index d9f93453c..fa54517c2 100644 --- a/website/content/docs/commands/namespace/apply.mdx +++ b/website/content/docs/commands/namespace/apply.mdx @@ -64,5 +64,10 @@ capabilities { enabled_task_drivers = ["docker", "exec"] disabled_task_drivers = ["raw_exec"] } + +meta { + owner = "John Doe" + contact_mail = "john@mycompany.com +} $ nomad namespace apply namespace.json ``` From 1cb00e89984a268d92b19d0435ac144f3dfeb933 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 28 Feb 2022 11:30:59 -0500 Subject: [PATCH 11/89] CI: increase test run timeout (#12143) --- GNUmakefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 4f7ee7d1f..6a0c88f7d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -300,7 +300,7 @@ test-nomad: dev ## Run Nomad test suites $(if $(ENABLE_RACE),GORACE="strip_path_prefix=$(GOPATH)/src") $(GO_TEST_CMD) \ $(if $(ENABLE_RACE),-race) $(if $(VERBOSE),-v) \ -cover \ - -timeout=15m \ + -timeout=20m \ -tags "$(GO_TAGS)" \ $(GOTEST_PKGS) $(if $(VERBOSE), >test.log ; echo $$? > exit-code) @if [ $(VERBOSE) ] ; then \ @@ -313,7 +313,7 @@ test-nomad-module: dev ## Run Nomad test suites on a sub-module @cd $(GOTEST_MOD) && $(if $(ENABLE_RACE),GORACE="strip_path_prefix=$(GOPATH)/src") $(GO_TEST_CMD) \ $(if $(ENABLE_RACE),-race) $(if $(VERBOSE),-v) \ -cover \ - -timeout=15m \ + -timeout=20m \ -tags "$(GO_TAGS)" \ ./... $(if $(VERBOSE), >test.log ; echo $$? > exit-code) @if [ $(VERBOSE) ] ; then \ From 636345a1677ebc4648a9bab64f299ffa206801c2 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 28 Feb 2022 12:44:08 -0500 Subject: [PATCH 12/89] fix(website): hide version select on `/plugins` & `/tools` (#12145) * fix(website/plugins): display version select * fix: hide version select on `/tools` + `/plugins` --- website/pages/api-docs/[[...page]].jsx | 1 - website/pages/docs/[[...page]].jsx | 1 - website/pages/intro/[[...page]].jsx | 1 - website/pages/plugins/[[...page]].tsx | 1 + website/pages/tools/[[...page]].tsx | 1 + 5 files changed, 2 insertions(+), 3 deletions(-) diff --git a/website/pages/api-docs/[[...page]].jsx b/website/pages/api-docs/[[...page]].jsx index af833261c..b07c16959 100644 --- a/website/pages/api-docs/[[...page]].jsx +++ b/website/pages/api-docs/[[...page]].jsx @@ -12,7 +12,6 @@ export default function DocsLayout(props) { product={{ name: productName, slug: productSlug }} baseRoute={basePath} staticProps={props} - showVersionSelect={process.env.ENABLE_VERSIONED_DOCS === 'true'} /> ) } diff --git a/website/pages/docs/[[...page]].jsx b/website/pages/docs/[[...page]].jsx index b61b2027c..1ced42701 100644 --- a/website/pages/docs/[[...page]].jsx +++ b/website/pages/docs/[[...page]].jsx @@ -15,7 +15,6 @@ export default function DocsLayout(props) { baseRoute={basePath} staticProps={props} additionalComponents={additionalComponents} - showVersionSelect={process.env.ENABLE_VERSIONED_DOCS === 'true'} /> ) } diff --git a/website/pages/intro/[[...page]].jsx b/website/pages/intro/[[...page]].jsx index 6b118e372..e2f1e4ec3 100644 --- a/website/pages/intro/[[...page]].jsx +++ b/website/pages/intro/[[...page]].jsx @@ -12,7 +12,6 @@ export default function DocsLayout(props) { product={{ name: productName, slug: productSlug }} baseRoute={basePath} staticProps={props} - showVersionSelect={process.env.ENABLE_VERSIONED_DOCS === 'true'} /> ) } diff --git a/website/pages/plugins/[[...page]].tsx b/website/pages/plugins/[[...page]].tsx index d1e79130b..3f2bc7618 100644 --- a/website/pages/plugins/[[...page]].tsx +++ b/website/pages/plugins/[[...page]].tsx @@ -12,6 +12,7 @@ export default function DocsLayout(props) { product={{ name: productName, slug: productSlug }} baseRoute={basePath} staticProps={props} + showVersionSelect={false} /> ) } diff --git a/website/pages/tools/[[...page]].tsx b/website/pages/tools/[[...page]].tsx index cbad0b885..66671b5e7 100644 --- a/website/pages/tools/[[...page]].tsx +++ b/website/pages/tools/[[...page]].tsx @@ -12,6 +12,7 @@ export default function DocsLayout(props) { product={{ name: productName, slug: productSlug }} baseRoute={basePath} staticProps={props} + showVersionSelect={false} /> ) } From 8c8b997f1e56448b9ef5a7b453264a2a94fea459 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 07:57:29 -0500 Subject: [PATCH 13/89] csi: respect -verbose flag for allocs in volume status (#12153) --- .changelog/12153.txt | 3 +++ command/volume_deregister.go | 2 +- command/volume_detach.go | 2 +- command/volume_status.go | 5 +++++ command/volume_status_csi.go | 6 +++--- 5 files changed, 13 insertions(+), 5 deletions(-) create mode 100644 .changelog/12153.txt diff --git a/.changelog/12153.txt b/.changelog/12153.txt new file mode 100644 index 000000000..6ab989389 --- /dev/null +++ b/.changelog/12153.txt @@ -0,0 +1,3 @@ +```release-note:improvement +csi: Respect the verbose flag in the output of `volume status` +``` diff --git a/command/volume_deregister.go b/command/volume_deregister.go index 5dc067d50..4842aae51 100644 --- a/command/volume_deregister.go +++ b/command/volume_deregister.go @@ -106,7 +106,7 @@ func (c *VolumeDeregisterCommand) Run(args []string) int { if len(vols) > 1 { if (volID != vols[0].ID) || (c.allNamespaces() && vols[0].ID == vols[1].ID) { sort.Slice(vols, func(i, j int) bool { return vols[i].ID < vols[j].ID }) - out, err := csiFormatSortedVolumes(vols, fullId) + out, err := csiFormatSortedVolumes(vols) if err != nil { c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) return 1 diff --git a/command/volume_detach.go b/command/volume_detach.go index 2de8e1d97..aeea784c4 100644 --- a/command/volume_detach.go +++ b/command/volume_detach.go @@ -128,7 +128,7 @@ func (c *VolumeDetachCommand) Run(args []string) int { if len(vols) > 1 { if (volID != vols[0].ID) || (c.allNamespaces() && vols[0].ID == vols[1].ID) { sort.Slice(vols, func(i, j int) bool { return vols[i].ID < vols[j].ID }) - out, err := csiFormatSortedVolumes(vols, fullId) + out, err := csiFormatSortedVolumes(vols) if err != nil { c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) return 1 diff --git a/command/volume_status.go b/command/volume_status.go index f89ac87ff..39157af39 100644 --- a/command/volume_status.go +++ b/command/volume_status.go @@ -109,6 +109,11 @@ func (c *VolumeStatusCommand) Run(args []string) int { return 1 } + // Truncate alloc and node IDs unless full length is requested + c.length = shortId + if c.verbose { + c.length = fullId + } c.length = fullId // Get the HTTP client diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go index bce7379cc..36724d34a 100644 --- a/command/volume_status_csi.go +++ b/command/volume_status_csi.go @@ -160,16 +160,16 @@ func (c *VolumeStatusCommand) csiFormatVolumes(vols []*api.CSIVolumeListStub) (s return out, nil } - return csiFormatSortedVolumes(vols, c.length) + return csiFormatSortedVolumes(vols) } // Format the volumes, assumes that we're already sorted by volume ID -func csiFormatSortedVolumes(vols []*api.CSIVolumeListStub, length int) (string, error) { +func csiFormatSortedVolumes(vols []*api.CSIVolumeListStub) (string, error) { rows := make([]string, len(vols)+1) rows[0] = "ID|Name|Plugin ID|Schedulable|Access Mode" for i, v := range vols { rows[i+1] = fmt.Sprintf("%s|%s|%s|%t|%s", - limit(v.ID, length), + v.ID, v.Name, v.PluginID, v.Schedulable, From 05034a6cd0c1bfe0e69391f994c38656d3896e44 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 07:57:41 -0500 Subject: [PATCH 14/89] docs: clarify that plugin commands are for CSI only (#12151) --- website/content/docs/commands/plugin/index.mdx | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/website/content/docs/commands/plugin/index.mdx b/website/content/docs/commands/plugin/index.mdx index 001d5ae75..e7d946279 100644 --- a/website/content/docs/commands/plugin/index.mdx +++ b/website/content/docs/commands/plugin/index.mdx @@ -7,7 +7,9 @@ description: | # Command: plugin -The `plugin` command is used to interact with plugins. +The `plugin` command is used to interact with external plugins that +can be registered by Nomad jobs. Currently Nomad supports [Container +Storage Interface (CSI)][csi] plugins. ## Usage @@ -18,4 +20,5 @@ subcommands are available: - [`plugin status`][status] - Display status information about a plugin +[csi]: https://github.com/container-storage-interface/spec [status]: /docs/commands/plugin/status 'Display status information about a plugin' From c06f31eef011e1f4bebd629e3145abecbd266ae6 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 07:59:31 -0500 Subject: [PATCH 15/89] CSI: sort capabilities in `plugin status` (#12154) Also fix `LIST_SNAPSHOTS` capability name --- .changelog/12154.txt | 3 +++ command/plugin_status_csi.go | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 .changelog/12154.txt diff --git a/.changelog/12154.txt b/.changelog/12154.txt new file mode 100644 index 000000000..e3e7cbb7f --- /dev/null +++ b/.changelog/12154.txt @@ -0,0 +1,3 @@ +```release-note:improvement +csi: Sort allocations in `plugin status` output +``` diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go index a72de5f20..3ef4d0ee8 100644 --- a/command/plugin_status_csi.go +++ b/command/plugin_status_csi.go @@ -148,7 +148,7 @@ func (c *PluginStatusCommand) formatControllerCaps(controllers map[string]*api.C caps = append(caps, "CREATE_DELETE_SNAPSHOT") fallthrough case info.SupportsListSnapshots: - caps = append(caps, "CREATE_LIST_SNAPSHOTS") + caps = append(caps, "LIST_SNAPSHOTS") fallthrough case info.SupportsClone: caps = append(caps, "CLONE_VOLUME") @@ -177,7 +177,7 @@ func (c *PluginStatusCommand) formatControllerCaps(controllers map[string]*api.C return "" } - return strings.Join(caps, "\n\t") + return " " + strings.Join(sort.StringSlice(caps), "\n ") } func (c *PluginStatusCommand) formatNodeCaps(nodes map[string]*api.CSIInfo) string { @@ -205,5 +205,5 @@ func (c *PluginStatusCommand) formatNodeCaps(nodes map[string]*api.CSIInfo) stri return "" } - return " " + strings.Join(caps, "\n ") + return " " + strings.Join(sort.StringSlice(caps), "\n ") } From 8ccb9a3271bcc3fad71e3ba0aada462d4414e91a Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 08:34:03 -0500 Subject: [PATCH 16/89] csi: fix redaction of `volume status` mount flags (#12150) The `volume status` command and associated API redacts the entire mount options instead of just the `MountFlags` field that can contain sensitive data. Return a redacted value so that the return value makes sense to operators who have set this field. --- .changelog/12150.txt | 3 +++ command/agent/csi_endpoint.go | 12 +++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 .changelog/12150.txt diff --git a/.changelog/12150.txt b/.changelog/12150.txt new file mode 100644 index 000000000..579355b37 --- /dev/null +++ b/.changelog/12150.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli: Return a redacted value for mount flags in the `volume status` command, instead of `` +``` diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index 0a3bd0433..c57c18165 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -136,7 +136,6 @@ func (s *HTTPServer) csiVolumeGet(id string, resp http.ResponseWriter, req *http // remove sensitive fields, as our redaction mechanism doesn't // help serializing here vol.Secrets = nil - vol.MountOptions = nil return vol, nil } @@ -761,11 +760,14 @@ func structsCSIMountOptionsToApi(opts *structs.CSIMountOptions) *api.CSIMountOpt if opts == nil { return nil } - - return &api.CSIMountOptions{ - FSType: opts.FSType, - MountFlags: opts.MountFlags, + apiOpts := &api.CSIMountOptions{ + FSType: opts.FSType, } + if len(opts.MountFlags) > 0 { + apiOpts.MountFlags = []string{"[REDACTED]"} + } + + return apiOpts } func structsCSISecretsToApi(secrets structs.CSISecrets) api.CSISecrets { From 3fd968310d62dcb9e0112541d93e66a45cce858f Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 08:47:01 -0500 Subject: [PATCH 17/89] CSI: use HTTP headers for passing CSI secrets (#12144) --- .changelog/12144.txt | 7 +++ api/api.go | 14 +++++ api/csi.go | 56 +++++++++++++++++-- command/agent/csi_endpoint.go | 46 ++++++++------- command/agent/csi_endpoint_test.go | 23 ++++++++ command/volume_snapshot_delete.go | 2 +- command/volume_snapshot_list.go | 29 +++++++--- .../docs/commands/volume/snapshot-delete.mdx | 5 ++ .../docs/commands/volume/snapshot-list.mdx | 10 ++-- 9 files changed, 154 insertions(+), 38 deletions(-) create mode 100644 .changelog/12144.txt diff --git a/.changelog/12144.txt b/.changelog/12144.txt new file mode 100644 index 000000000..7c1ccd49b --- /dev/null +++ b/.changelog/12144.txt @@ -0,0 +1,7 @@ +```release-note:improvement +api: CSI secrets for list and delete snapshots are now passed in HTTP headers +``` + +```release-note:improvement +cli: CSI secrets argument for `volume snapshot list` has been made consistent with `volume snapshot delete` +``` diff --git a/api/api.go b/api/api.go index a92df23fa..08521179b 100644 --- a/api/api.go +++ b/api/api.go @@ -62,6 +62,9 @@ type QueryOptions struct { // Set HTTP parameters on the query. Params map[string]string + // Set HTTP headers on the query. + Headers map[string]string + // AuthToken is the secret ID of an ACL token AuthToken string @@ -101,6 +104,9 @@ type WriteOptions struct { // AuthToken is the secret ID of an ACL token AuthToken string + // Set HTTP headers on the query. + Headers map[string]string + // ctx is an optional context pass through to the underlying HTTP // request layer. Use Context() and WithContext() to manage this. ctx context.Context @@ -606,6 +612,10 @@ func (r *request) setQueryOptions(q *QueryOptions) { r.params.Set(k, v) } r.ctx = q.Context() + + for k, v := range q.Headers { + r.header.Set(k, v) + } } // durToMsec converts a duration to a millisecond specified string @@ -632,6 +642,10 @@ func (r *request) setWriteOptions(q *WriteOptions) { r.params.Set("idempotency_token", q.IdempotencyToken) } r.ctx = q.Context() + + for k, v := range q.Headers { + r.header.Set(k, v) + } } // toHTTP converts the request to an HTTP request diff --git a/api/csi.go b/api/csi.go index 120c239fd..316841f25 100644 --- a/api/csi.go +++ b/api/csi.go @@ -4,6 +4,7 @@ import ( "fmt" "net/url" "sort" + "strings" "time" ) @@ -129,13 +130,37 @@ func (v *CSIVolumes) DeleteSnapshot(snap *CSISnapshot, w *WriteOptions) error { qp := url.Values{} qp.Set("snapshot_id", snap.ID) qp.Set("plugin_id", snap.PluginID) - for k, v := range snap.Secrets { - qp.Set("secret", fmt.Sprintf("%v=%v", k, v)) - } + w.SetHeadersFromCSISecrets(snap.Secrets) _, err := v.client.delete("/v1/volumes/snapshot?"+qp.Encode(), nil, w) return err } +// ListSnapshotsOpts lists external storage volume snapshots. +func (v *CSIVolumes) ListSnapshotsOpts(req *CSISnapshotListRequest) (*CSISnapshotListResponse, *QueryMeta, error) { + var resp *CSISnapshotListResponse + + qp := url.Values{} + if req.PluginID != "" { + qp.Set("plugin_id", req.PluginID) + } + if req.NextToken != "" { + qp.Set("next_token", req.NextToken) + } + if req.PerPage != 0 { + qp.Set("per_page", fmt.Sprint(req.PerPage)) + } + req.QueryOptions.SetHeadersFromCSISecrets(req.Secrets) + + qm, err := v.client.query("/v1/volumes/snapshot?"+qp.Encode(), &resp, &req.QueryOptions) + if err != nil { + return nil, nil, err + } + + sort.Sort(CSISnapshotSort(resp.Snapshots)) + return resp, qm, nil +} + +// DEPRECATED: will be removed in Nomad 1.4.0 // ListSnapshots lists external storage volume snapshots. func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOptions) (*CSISnapshotListResponse, *QueryMeta, error) { var resp *CSISnapshotListResponse @@ -150,9 +175,6 @@ func (v *CSIVolumes) ListSnapshots(pluginID string, secrets string, q *QueryOpti if q.PerPage != 0 { qp.Set("per_page", fmt.Sprint(q.PerPage)) } - if secrets != "" { - qp.Set("secrets", secrets) - } qm, err := v.client.query("/v1/volumes/snapshot?"+qp.Encode(), &resp, q) if err != nil { @@ -206,6 +228,28 @@ type CSIMountOptions struct { // API or in Nomad's logs. type CSISecrets map[string]string +func (q *QueryOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { + pairs := []string{} + for k, v := range secrets { + pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) + } + if q.Headers == nil { + q.Headers = map[string]string{} + } + q.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") +} + +func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { + pairs := []string{} + for k, v := range secrets { + pairs = append(pairs, fmt.Sprintf("%v=%v", k, v)) + } + if w.Headers == nil { + w.Headers = map[string]string{} + } + w.Headers["X-Nomad-CSI-Secrets"] = strings.Join(pairs, ",") +} + // CSIVolume is used for serialization, see also nomad/structs/csi.go type CSIVolume struct { ID string diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index c57c18165..b23101fba 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -304,13 +304,9 @@ func (s *HTTPServer) csiSnapshotDelete(resp http.ResponseWriter, req *http.Reque query := req.URL.Query() snap.PluginID = query.Get("plugin_id") snap.ID = query.Get("snapshot_id") - secrets := query["secret"] - for _, raw := range secrets { - secret := strings.Split(raw, "=") - if len(secret) == 2 { - snap.Secrets[secret[0]] = secret[1] - } - } + + secrets := parseCSISecrets(req) + snap.Secrets = secrets args.Snapshots = []*structs.CSISnapshot{snap} @@ -332,19 +328,9 @@ func (s *HTTPServer) csiSnapshotList(resp http.ResponseWriter, req *http.Request query := req.URL.Query() args.PluginID = query.Get("plugin_id") - querySecrets := query["secrets"] - // Parse comma separated secrets only when provided - if len(querySecrets) >= 1 { - secrets := strings.Split(querySecrets[0], ",") - args.Secrets = make(structs.CSISecrets) - for _, raw := range secrets { - secret := strings.Split(raw, "=") - if len(secret) == 2 { - args.Secrets[secret[0]] = secret[1] - } - } - } + secrets := parseCSISecrets(req) + args.Secrets = secrets var out structs.CSISnapshotListResponse if err := s.agent.RPC("CSIVolume.ListSnapshots", &args, &out); err != nil { @@ -419,6 +405,28 @@ func (s *HTTPServer) CSIPluginSpecificRequest(resp http.ResponseWriter, req *htt return structsCSIPluginToApi(out.Plugin), nil } +// parseCSISecrets extracts a map of k/v pairs from the CSI secrets +// header. Silently ignores invalid secrets +func parseCSISecrets(req *http.Request) structs.CSISecrets { + secretsHeader := req.Header.Get("X-Nomad-CSI-Secrets") + if secretsHeader == "" { + return nil + } + + secrets := map[string]string{} + secretkvs := strings.Split(secretsHeader, ",") + for _, secretkv := range secretkvs { + kv := strings.Split(secretkv, "=") + if len(kv) == 2 { + secrets[kv[0]] = kv[1] + } + } + if len(secrets) == 0 { + return nil + } + return structs.CSISecrets(secrets) +} + // structsCSIPluginToApi converts CSIPlugin, setting Expected the count of known plugin // instances func structsCSIPluginToApi(plug *structs.CSIPlugin) *api.CSIPlugin { diff --git a/command/agent/csi_endpoint_test.go b/command/agent/csi_endpoint_test.go index d8de9cb88..bbb997857 100644 --- a/command/agent/csi_endpoint_test.go +++ b/command/agent/csi_endpoint_test.go @@ -44,6 +44,29 @@ func TestHTTP_CSIEndpointPlugin(t *testing.T) { }) } +func TestHTTP_CSIParseSecrets(t *testing.T) { + t.Parallel() + testCases := []struct { + val string + expect structs.CSISecrets + }{ + {"", nil}, + {"one", nil}, + {"one,two", nil}, + {"one,two=value_two", + structs.CSISecrets(map[string]string{"two": "value_two"})}, + {"one=value_one,one=overwrite", + structs.CSISecrets(map[string]string{"one": "overwrite"})}, + {"one=value_one,two=value_two", + structs.CSISecrets(map[string]string{"one": "value_one", "two": "value_two"})}, + } + for _, tc := range testCases { + req, _ := http.NewRequest("GET", "/v1/plugin/csi/foo", nil) + req.Header.Add("X-Nomad-CSI-Secrets", tc.val) + require.Equal(t, tc.expect, parseCSISecrets(req), tc.val) + } +} + func TestHTTP_CSIEndpointUtils(t *testing.T) { secrets := structsCSISecretsToApi(structs.CSISecrets{ "foo": "bar", diff --git a/command/volume_snapshot_delete.go b/command/volume_snapshot_delete.go index 4c11a4757..a2ac23c7b 100644 --- a/command/volume_snapshot_delete.go +++ b/command/volume_snapshot_delete.go @@ -30,7 +30,7 @@ General Options: Snapshot Options: -secret - Secrets to pass to the plugin to create the snapshot. Accepts multiple + Secrets to pass to the plugin to delete the snapshot. Accepts multiple flags in the form -secret key=value ` diff --git a/command/volume_snapshot_list.go b/command/volume_snapshot_list.go index 2542b3d8c..02c7f7ffa 100644 --- a/command/volume_snapshot_list.go +++ b/command/volume_snapshot_list.go @@ -9,6 +9,7 @@ import ( humanize "github.com/dustin/go-humanize" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" + flaghelper "github.com/hashicorp/nomad/helper/flags" "github.com/pkg/errors" "github.com/posener/complete" ) @@ -36,7 +37,9 @@ List Options: -plugin: Display only snapshots managed by a particular plugin. By default this command will query all plugins for their snapshots. - -secrets: A set of key/value secrets to be used when listing snapshots. + -secret + Secrets to pass to the plugin to list snapshots. Accepts multiple + flags in the form -secret key=value ` return strings.TrimSpace(helpText) } @@ -70,13 +73,13 @@ func (c *VolumeSnapshotListCommand) Name() string { return "volume snapshot list func (c *VolumeSnapshotListCommand) Run(args []string) int { var pluginID string var verbose bool - var secrets string + var secretsArgs flaghelper.StringFlag flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.StringVar(&pluginID, "plugin", "", "") flags.BoolVar(&verbose, "verbose", false, "") - flags.StringVar(&secrets, "secrets", "", "") + flags.Var(&secretsArgs, "secret", "secrets for snapshot, ex. -secret key=value") if err := flags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) @@ -122,10 +125,22 @@ func (c *VolumeSnapshotListCommand) Run(args []string) int { pluginID = plugs[0].ID } - q := &api.QueryOptions{PerPage: 30} // TODO: tune page size + secrets := api.CSISecrets{} + for _, kv := range secretsArgs { + s := strings.Split(kv, "=") + if len(s) == 2 { + secrets[s[0]] = s[1] + } + } + + req := &api.CSISnapshotListRequest{ + PluginID: pluginID, + Secrets: secrets, + QueryOptions: api.QueryOptions{PerPage: 30}, + } for { - resp, _, err := client.CSIVolumes().ListSnapshots(pluginID, secrets, q) + resp, _, err := client.CSIVolumes().ListSnapshotsOpts(req) if err != nil && !errors.Is(err, io.EOF) { c.Ui.Error(fmt.Sprintf( "Error querying CSI external snapshots for plugin %q: %s", pluginID, err)) @@ -138,8 +153,8 @@ func (c *VolumeSnapshotListCommand) Run(args []string) int { } c.Ui.Output(csiFormatSnapshots(resp.Snapshots, verbose)) - q.NextToken = resp.NextToken - if q.NextToken == "" { + req.NextToken = resp.NextToken + if req.NextToken == "" { break } // we can't know the shape of arbitrarily-sized lists of snapshots, diff --git a/website/content/docs/commands/volume/snapshot-delete.mdx b/website/content/docs/commands/volume/snapshot-delete.mdx index 33cb6eab7..d0e18ea04 100644 --- a/website/content/docs/commands/volume/snapshot-delete.mdx +++ b/website/content/docs/commands/volume/snapshot-delete.mdx @@ -29,6 +29,11 @@ volume` and `plugin:read` capabilities. @include 'general_options.mdx' +## Snapshot Delete Options + +- `-secret`: Secrets to pass to the plugin to delete the + snapshot. Accepts multiple flags in the form `-secret key=value` + ## Examples Delete a volume snapshot: diff --git a/website/content/docs/commands/volume/snapshot-list.mdx b/website/content/docs/commands/volume/snapshot-list.mdx index 078271db7..5977321b6 100644 --- a/website/content/docs/commands/volume/snapshot-list.mdx +++ b/website/content/docs/commands/volume/snapshot-list.mdx @@ -27,7 +27,7 @@ Nomad. @include 'general_options.mdx' -## List Options +## Snapshot List Options - `-plugin`: Display only snapshots managed by a particular [CSI plugin][csi_plugin]. By default the `snapshot list` command will query all @@ -35,8 +35,8 @@ Nomad. there is an exact match based on the provided plugin, then that specific plugin will be queried. Otherwise, a list of matching plugins will be displayed. -- `-secrets`: A list of comma separated secret key/value pairs to be passed - to the CSI driver. +- `-secret`: Secrets to pass to the plugin to list snapshots. Accepts + multiple flags in the form `-secret key=value` When ACLs are enabled, this command requires a token with the `csi-list-volumes` capability for the plugin's namespace. @@ -54,7 +54,7 @@ snap-67890 vol-fedcba 50GiB 2021-01-04T15:45:00Z true List volume snapshots with two secret key/value pairs: ```shell-session -$ nomad volume snapshot list -secrets key1=value1,key2=val2 +$ nomad volume snapshot list -secret key1=value1 -secret key2=val2 Snapshot ID External ID Size Creation Time Ready? snap-12345 vol-abcdef 50GiB 2021-01-03T12:15:02Z true ``` @@ -62,4 +62,4 @@ snap-12345 vol-abcdef 50GiB 2021-01-03T12:15:02Z true [csi]: https://github.com/container-storage-interface/spec [csi_plugin]: /docs/job-specification/csi_plugin [registered]: /docs/commands/volume/register -[csi_plugins_internals]: /docs/internals/plugins/csi#csi-plugins \ No newline at end of file +[csi_plugins_internals]: /docs/internals/plugins/csi#csi-plugins From 03a8d72dbac3891512b28ce888c182724973ab2b Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 10:15:46 -0500 Subject: [PATCH 18/89] CSI: implement support for topology (#12129) --- .changelog/12129.txt | 3 + api/csi.go | 20 +++- api/nodes.go | 7 +- client/csi_endpoint.go | 6 ++ client/structs/csi.go | 46 +++++---- command/agent/csi_endpoint.go | 15 ++- command/plugin_status_csi.go | 27 ++++++ command/volume_init.go | 42 +++++++- command/volume_register_csi.go | 43 +++++++++ command/volume_register_test.go | 41 ++++++++ command/volume_status_csi.go | 55 ++++++----- e2e/csi/csi.go | 10 +- e2e/csi/input/ebs-volume0.hcl | 13 ++- e2e/csi/input/ebs-volume1.hcl | 13 ++- e2e/csi/input/plugin-aws-ebs-controller.nomad | 2 +- e2e/csi/input/plugin-aws-ebs-nodes.nomad | 2 +- e2e/terraform/variables.tf | 2 +- nomad/csi_endpoint.go | 28 ++++-- nomad/csi_endpoint_test.go | 15 ++- nomad/structs/csi.go | 45 +++++---- nomad/structs/csi_test.go | 15 +++ nomad/structs/node.go | 8 ++ plugins/csi/client.go | 5 + plugins/csi/client_test.go | 30 +++++- plugins/csi/plugin.go | 9 +- scheduler/feasible.go | 17 ++++ scheduler/feasible_test.go | 96 ++++++++++++++++--- .../content/docs/commands/volume/create.mdx | 53 ++++++++++ .../content/docs/commands/volume/register.mdx | 48 ++++++++++ 29 files changed, 598 insertions(+), 118 deletions(-) create mode 100644 .changelog/12129.txt diff --git a/.changelog/12129.txt b/.changelog/12129.txt new file mode 100644 index 000000000..246899e83 --- /dev/null +++ b/.changelog/12129.txt @@ -0,0 +1,3 @@ +```release-note:improvement +csi: Added support for storage topology +``` diff --git a/api/csi.go b/api/csi.go index 316841f25..1950e4ba7 100644 --- a/api/csi.go +++ b/api/csi.go @@ -252,11 +252,21 @@ func (w *WriteOptions) SetHeadersFromCSISecrets(secrets CSISecrets) { // CSIVolume is used for serialization, see also nomad/structs/csi.go type CSIVolume struct { - ID string - Name string - ExternalID string `mapstructure:"external_id" hcl:"external_id"` - Namespace string - Topologies []*CSITopology + ID string + Name string + ExternalID string `mapstructure:"external_id" hcl:"external_id"` + Namespace string + + // RequestedTopologies are the topologies submitted as options to + // the storage provider at the time the volume was created. After + // volumes are created, this field is ignored. + RequestedTopologies *CSITopologyRequest `hcl:"topology_request"` + + // Topologies are the topologies returned by the storage provider, + // based on the RequestedTopologies and what the storage provider + // could support. This value cannot be set by the user. + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode `hcl:"access_mode"` AttachmentMode CSIVolumeAttachmentMode `hcl:"attachment_mode"` MountOptions *CSIMountOptions `hcl:"mount_options"` diff --git a/api/nodes.go b/api/nodes.go index 3fd24d81a..0a9ed2833 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -614,8 +614,13 @@ type NodeReservedNetworkResources struct { ReservedHostPorts string } +type CSITopologyRequest struct { + Required []*CSITopology `hcl:"required"` + Preferred []*CSITopology `hcl:"preferred"` +} + type CSITopology struct { - Segments map[string]string + Segments map[string]string `hcl:"segments"` } // CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to diff --git a/client/csi_endpoint.go b/client/csi_endpoint.go index 029c91ede..2438edce0 100644 --- a/client/csi_endpoint.go +++ b/client/csi_endpoint.go @@ -216,6 +216,12 @@ func (c *CSI) ControllerCreateVolume(req *structs.ClientCSIControllerCreateVolum resp.CapacityBytes = cresp.Volume.CapacityBytes resp.VolumeContext = cresp.Volume.VolumeContext + resp.Topologies = make([]*nstructs.CSITopology, len(cresp.Volume.AccessibleTopology)) + for _, topo := range cresp.Volume.AccessibleTopology { + resp.Topologies = append(resp.Topologies, + &nstructs.CSITopology{Segments: topo.Segments}) + } + return nil } diff --git a/client/structs/csi.go b/client/structs/csi.go index fb6ca6c7a..642f02b01 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -211,17 +211,16 @@ type ClientCSIControllerDetachVolumeResponse struct{} // Nomad client to tell a CSI controller plugin on that client to perform // CreateVolume type ClientCSIControllerCreateVolumeRequest struct { - Name string - VolumeCapabilities []*structs.CSIVolumeCapability - MountOptions *structs.CSIMountOptions - Parameters map[string]string - Secrets structs.CSISecrets - CapacityMin int64 - CapacityMax int64 - SnapshotID string - CloneID string - // TODO: topology is not yet supported - // TopologyRequirement + Name string + VolumeCapabilities []*structs.CSIVolumeCapability + MountOptions *structs.CSIMountOptions + Parameters map[string]string + Secrets structs.CSISecrets + CapacityMin int64 + CapacityMax int64 + SnapshotID string + CloneID string + RequestedTopologies *structs.CSITopologyRequest CSIControllerQuery } @@ -237,8 +236,10 @@ func (req *ClientCSIControllerCreateVolumeRequest) ToCSIRequest() (*csi.Controll CloneID: req.CloneID, SnapshotID: req.SnapshotID, }, - // TODO: topology is not yet supported - AccessibilityRequirements: &csi.TopologyRequirement{}, + AccessibilityRequirements: &csi.TopologyRequirement{ + Requisite: []*csi.Topology{}, + Preferred: []*csi.Topology{}, + }, } // The CSI spec requires that at least one of the fields in CapacityRange @@ -258,6 +259,21 @@ func (req *ClientCSIControllerCreateVolumeRequest) ToCSIRequest() (*csi.Controll } creq.VolumeCapabilities = append(creq.VolumeCapabilities, ccap) } + + if req.RequestedTopologies != nil { + for _, topo := range req.RequestedTopologies.Required { + creq.AccessibilityRequirements.Requisite = append( + creq.AccessibilityRequirements.Requisite, &csi.Topology{ + Segments: topo.Segments, + }) + } + for _, topo := range req.RequestedTopologies.Preferred { + creq.AccessibilityRequirements.Preferred = append( + creq.AccessibilityRequirements.Preferred, &csi.Topology{ + Segments: topo.Segments, + }) + } + } return creq, nil } @@ -265,9 +281,7 @@ type ClientCSIControllerCreateVolumeResponse struct { ExternalVolumeID string CapacityBytes int64 VolumeContext map[string]string - - // TODO: topology is not yet supported - // AccessibleTopology []*Topology + Topologies []*structs.CSITopology } // ClientCSIControllerDeleteVolumeRequest the RPC made from the server to a diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index b23101fba..4e0a8ea08 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -508,6 +508,13 @@ func structsCSIVolumeToApi(vol *structs.CSIVolume) *api.CSIVolume { ModifyIndex: vol.ModifyIndex, } + if vol.RequestedTopologies != nil { + out.RequestedTopologies = &api.CSITopologyRequest{ + Preferred: structsCSITopolgiesToApi(vol.RequestedTopologies.Preferred), + Required: structsCSITopolgiesToApi(vol.RequestedTopologies.Required), + } + } + // WriteAllocs and ReadAllocs will only ever contain the Allocation ID, // with a null value for the Allocation; these IDs are mapped to // allocation stubs in the Allocations field. This indirection is so the @@ -725,9 +732,11 @@ func structsTaskEventToApi(te *structs.TaskEvent) *api.TaskEvent { func structsCSITopolgiesToApi(tops []*structs.CSITopology) []*api.CSITopology { out := make([]*api.CSITopology, 0, len(tops)) for _, t := range tops { - out = append(out, &api.CSITopology{ - Segments: t.Segments, - }) + if t != nil { + out = append(out, &api.CSITopology{ + Segments: t.Segments, + }) + } } return out diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go index 3ef4d0ee8..b3a6ac36b 100644 --- a/command/plugin_status_csi.go +++ b/command/plugin_status_csi.go @@ -118,6 +118,12 @@ func (c *PluginStatusCommand) csiFormatPlugin(plug *api.CSIPlugin) (string, erro full = append(full, c.Colorize().Color("\n[bold]Node Capabilities[reset]")) full = append(full, nodeCaps) } + topos := c.formatTopology(plug.Nodes) + if topos != "" { + full = append(full, c.Colorize().Color("\n[bold]Accessible Topologies[reset]")) + full = append(full, topos) + } + } // Format the allocs @@ -183,6 +189,9 @@ func (c *PluginStatusCommand) formatControllerCaps(controllers map[string]*api.C func (c *PluginStatusCommand) formatNodeCaps(nodes map[string]*api.CSIInfo) string { caps := []string{} for _, node := range nodes { + if node.RequiresTopologies { + caps = append(caps, "VOLUME_ACCESSIBILITY_CONSTRAINTS") + } switch info := node.NodeInfo; { case info.RequiresNodeStageVolume: caps = append(caps, "STAGE_UNSTAGE_VOLUME") @@ -207,3 +216,21 @@ func (c *PluginStatusCommand) formatNodeCaps(nodes map[string]*api.CSIInfo) stri return " " + strings.Join(sort.StringSlice(caps), "\n ") } + +func (c *PluginStatusCommand) formatTopology(nodes map[string]*api.CSIInfo) string { + rows := []string{"Node ID|Accessible Topology"} + for nodeID, node := range nodes { + if node.NodeInfo.AccessibleTopology != nil { + segments := node.NodeInfo.AccessibleTopology.Segments + segmentPairs := make([]string, 0, len(segments)) + for k, v := range segments { + segmentPairs = append(segmentPairs, fmt.Sprintf("%s=%s", k, v)) + } + rows = append(rows, fmt.Sprintf("%s|%s", nodeID[:8], strings.Join(segmentPairs, ","))) + } + } + if len(rows) == 1 { + return "" + } + return formatList(rows) +} diff --git a/command/volume_init.go b/command/volume_init.go index c1714f7bf..ce0ffba3c 100644 --- a/command/volume_init.go +++ b/command/volume_init.go @@ -133,7 +133,7 @@ capacity_max = "20G" # capabilities to validate. Registering an existing volume will record but # ignore these fields. capability { - access_mode = "single-node-writer" + access_mode = "single-node-writer" attachment_mode = "file-system" } @@ -150,6 +150,18 @@ mount_options { mount_flags = ["ro"] } +# Optional: specify one or more locations where the volume must be accessible +# from. Refer to the plugin documentation for what segment values are supported. +topology_request { + preferred { + topology { segments { rack = "R1" } } + } + required { + topology { segments { rack = "R1" } } + topology { segments { rack = "R2", zone = "us-east-1a" } } + } +} + # Optional: provide any secrets specified by the plugin. secrets { example_secret = "xyzzy" @@ -201,6 +213,34 @@ var defaultJsonVolumeSpec = strings.TrimSpace(` ] } ], + "topology_request": { + "preferred": [ + { + "topology": { + "segments": { + "rack": "R1" + } + } + } + ], + "required": [ + { + "topology": { + "segments": { + "rack": "R1" + } + } + }, + { + "topology": { + "segments": { + "rack": "R2", + "zone": "us-east-1a" + } + } + } + ] + }, "parameters": [ { "skuname": "Premium_LRS" diff --git a/command/volume_register_csi.go b/command/volume_register_csi.go index aa68d6351..b3cf9e2fe 100644 --- a/command/volume_register_csi.go +++ b/command/volume_register_csi.go @@ -48,6 +48,7 @@ func csiDecodeVolume(input *ast.File) (*api.CSIVolume, error) { delete(m, "mount_options") delete(m, "capacity_max") delete(m, "capacity_min") + delete(m, "topology_request") delete(m, "type") // Decode the rest @@ -116,6 +117,48 @@ func csiDecodeVolume(input *ast.File) (*api.CSIVolume, error) { } } + requestedTopos := list.Filter("topology_request") + if len(requestedTopos.Items) > 0 { + + vol.RequestedTopologies = &api.CSITopologyRequest{} + + for _, o := range requestedTopos.Elem().Items { + if err := helper.CheckHCLKeys(o.Val, []string{"preferred", "required"}); err != nil { + return nil, err + } + ot, ok := o.Val.(*ast.ObjectType) + if !ok { + break + } + + // topology_request -> required|preferred -> []topology -> []segments (kv) + decoded := map[string][]map[string][]map[string][]map[string]string{} + if err := hcl.DecodeObject(&decoded, ot.List); err != nil { + return nil, err + } + + getTopologies := func(topKey string) []*api.CSITopology { + for _, topo := range decoded[topKey] { + var topos []*api.CSITopology + for _, segments := range topo["topology"] { + for _, segment := range segments["segments"] { + if len(segment) > 0 { + topos = append(topos, &api.CSITopology{Segments: segment}) + } + } + } + if len(topos) > 0 { + return topos + } + } + return nil + } + + vol.RequestedTopologies.Required = getTopologies("required") + vol.RequestedTopologies.Preferred = getTopologies("preferred") + } + } + return vol, nil } diff --git a/command/volume_register_test.go b/command/volume_register_test.go index b69816bfa..b65b923cd 100644 --- a/command/volume_register_test.go +++ b/command/volume_register_test.go @@ -84,6 +84,17 @@ capability { access_mode = "single-node-reader-only" attachment_mode = "block-device" } + +topology_request { + preferred { + topology { segments {rack = "R1"} } + } + + required { + topology { segments {rack = "R1"} } + topology { segments {rack = "R2", zone = "us-east-1a"} } + } +} `, expected: &api.CSIVolume{ ID: "testvolume", @@ -108,6 +119,16 @@ capability { }, Parameters: map[string]string{"skuname": "Premium_LRS"}, Secrets: map[string]string{"password": "xyzzy"}, + RequestedTopologies: &api.CSITopologyRequest{ + Required: []*api.CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"rack": "R2", "zone": "us-east-1a"}}, + }, + Preferred: []*api.CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + }, + Topologies: nil, // this is left empty }, err: "", }, { @@ -124,6 +145,19 @@ capability { access_mode = "single-node-writer" attachment_mode = "file-system" } + +topology_request { + # make sure we safely handle empty blocks even + # if they're invalid + preferred { + topology {} + topology { segments {} } + } + + required { + topology { segments { rack = "R2", zone = "us-east-1a"} } + } +} `, expected: &api.CSIVolume{ ID: "testvolume", @@ -136,6 +170,13 @@ capability { AttachmentMode: api.CSIVolumeAttachmentModeFilesystem, }, }, + RequestedTopologies: &api.CSITopologyRequest{ + Required: []*api.CSITopology{ + {Segments: map[string]string{"rack": "R2", "zone": "us-east-1a"}}, + }, + Preferred: nil, + }, + Topologies: nil, }, err: "", }, diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go index 36724d34a..84a4abb09 100644 --- a/command/volume_status_csi.go +++ b/command/volume_status_csi.go @@ -212,43 +212,42 @@ func (c *VolumeStatusCommand) formatBasic(vol *api.CSIVolume) (string, error) { return formatKV(output), nil } + full := []string{formatKV(output)} + + if len(vol.Topologies) > 0 { + topoBanner := c.Colorize().Color("\n[bold]Topologies[reset]") + topo := c.formatTopology(vol) + full = append(full, topoBanner) + full = append(full, topo) + } + // Format the allocs banner := c.Colorize().Color("\n[bold]Allocations[reset]") allocs := formatAllocListStubs(vol.Allocations, c.verbose, c.length) - full := []string{formatKV(output), banner, allocs} + full = append(full, banner) + full = append(full, allocs) + return strings.Join(full, "\n"), nil } -func (c *VolumeStatusCommand) formatTopologies(vol *api.CSIVolume) string { - var out []string - - // Find the union of all the keys - head := map[string]string{} - for _, t := range vol.Topologies { - for key := range t.Segments { - if _, ok := head[key]; !ok { - head[key] = "" - } +func (c *VolumeStatusCommand) formatTopology(vol *api.CSIVolume) string { + rows := []string{"Topology|Segments"} + for i, t := range vol.Topologies { + segmentPairs := make([]string, 0, len(t.Segments)) + for k, v := range t.Segments { + segmentPairs = append(segmentPairs, fmt.Sprintf("%s=%s", k, v)) } + // note: this looks awkward because we don't have any other + // place where we list collections of arbitrary k/v's like + // this without just dumping JSON formatted outputs. It's likely + // the spec will expand to add extra fields, in which case we'll + // add them here and drop the first column + rows = append(rows, fmt.Sprintf("%02d|%v", i, strings.Join(segmentPairs, ", "))) } - - // Append the header - var line []string - for key := range head { - line = append(line, key) + if len(rows) == 1 { + return "" } - out = append(out, strings.Join(line, " ")) - - // Append each topology - for _, t := range vol.Topologies { - line = []string{} - for key := range head { - line = append(line, t.Segments[key]) - } - out = append(out, strings.Join(line, " ")) - } - - return strings.Join(out, "\n") + return formatList(rows) } func csiVolMountOption(volume, request *api.CSIMountOptions) string { diff --git a/e2e/csi/csi.go b/e2e/csi/csi.go index d500b6d2c..e570f7383 100644 --- a/e2e/csi/csi.go +++ b/e2e/csi/csi.go @@ -226,9 +226,13 @@ func volumeRegister(volID, volFilePath, createOrRegister string) error { } // hack off the first line to replace with our unique ID - var re = regexp.MustCompile(`(?m)^id ".*"`) - volspec := re.ReplaceAllString(string(content), - fmt.Sprintf("id = \"%s\"", volID)) + var idRegex = regexp.MustCompile(`(?m)^id ".*"`) + volspec := idRegex.ReplaceAllString(string(content), + fmt.Sprintf("id = %q", volID)) + + var nameRegex = regexp.MustCompile(`(?m)^name ".*"`) + volspec = nameRegex.ReplaceAllString(volspec, + fmt.Sprintf("name = %q", volID)) go func() { defer stdin.Close() diff --git a/e2e/csi/input/ebs-volume0.hcl b/e2e/csi/input/ebs-volume0.hcl index bf961efed..b3e8fd93d 100644 --- a/e2e/csi/input/ebs-volume0.hcl +++ b/e2e/csi/input/ebs-volume0.hcl @@ -1,5 +1,5 @@ id = "ebs-vol[0]" -name = "this-is-a-test-0" # CSIVolumeName tag +name = "idempotency-token" # CSIVolumeName tag, must be idempotent type = "csi" plugin_id = "aws-ebs0" @@ -19,3 +19,14 @@ capability { parameters { type = "gp2" } + +topology_request { + required { + topology { + segments { + # this zone should match the one set in e2e/terraform/variables.tf + "topology.ebs.csi.aws.com/zone" = "us-east-1b" + } + } + } +} diff --git a/e2e/csi/input/ebs-volume1.hcl b/e2e/csi/input/ebs-volume1.hcl index df38b9034..57f715a78 100644 --- a/e2e/csi/input/ebs-volume1.hcl +++ b/e2e/csi/input/ebs-volume1.hcl @@ -1,5 +1,5 @@ id = "ebs-vol[1]" -name = "this-is-a-test-1" # CSIVolumeName tag +name = "idempotency-token" # CSIVolumeName tag type = "csi" plugin_id = "aws-ebs0" @@ -19,3 +19,14 @@ capability { parameters { type = "gp2" } + +topology_request { + required { + topology { + segments { + # this zone should match the one set in e2e/terraform/variables.tf + "topology.ebs.csi.aws.com/zone" = "us-east-1b" + } + } + } +} diff --git a/e2e/csi/input/plugin-aws-ebs-controller.nomad b/e2e/csi/input/plugin-aws-ebs-controller.nomad index b4bd6d626..dd0b675c7 100644 --- a/e2e/csi/input/plugin-aws-ebs-controller.nomad +++ b/e2e/csi/input/plugin-aws-ebs-controller.nomad @@ -22,7 +22,7 @@ job "plugin-aws-ebs-controller" { driver = "docker" config { - image = "amazon/aws-ebs-csi-driver:v0.9.0" + image = "public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver:v1.5.1" args = [ "controller", diff --git a/e2e/csi/input/plugin-aws-ebs-nodes.nomad b/e2e/csi/input/plugin-aws-ebs-nodes.nomad index 3411990a3..206b1df81 100644 --- a/e2e/csi/input/plugin-aws-ebs-nodes.nomad +++ b/e2e/csi/input/plugin-aws-ebs-nodes.nomad @@ -19,7 +19,7 @@ job "plugin-aws-ebs-nodes" { driver = "docker" config { - image = "amazon/aws-ebs-csi-driver:v0.9.0" + image = "public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver:v1.5.1" args = [ "node", diff --git a/e2e/terraform/variables.tf b/e2e/terraform/variables.tf index c48c6ef67..49f8f8790 100644 --- a/e2e/terraform/variables.tf +++ b/e2e/terraform/variables.tf @@ -10,7 +10,7 @@ variable "region" { variable "availability_zone" { description = "The AWS availability zone to deploy to." - default = "us-east-1a" + default = "us-east-1b" } variable "instance_type" { diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index ac63b8fa5..092fc16ff 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -301,6 +301,14 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru if err := v.controllerValidateVolume(args, vol, plugin); err != nil { return err } + + // The topologies for the volume have already been set when it was + // created, so we accept the user's description of that topology + if vol.Topologies == nil || len(vol.Topologies) == 0 { + if vol.RequestedTopologies != nil { + vol.Topologies = vol.RequestedTopologies.Required + } + } } resp, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) @@ -898,15 +906,16 @@ func (v *CSIVolume) createVolume(vol *structs.CSIVolume, plugin *structs.CSIPlug method := "ClientCSI.ControllerCreateVolume" cReq := &cstructs.ClientCSIControllerCreateVolumeRequest{ - Name: vol.Name, - VolumeCapabilities: vol.RequestedCapabilities, - MountOptions: vol.MountOptions, - Parameters: vol.Parameters, - Secrets: vol.Secrets, - CapacityMin: vol.RequestedCapacityMin, - CapacityMax: vol.RequestedCapacityMax, - SnapshotID: vol.SnapshotID, - CloneID: vol.CloneID, + Name: vol.Name, + VolumeCapabilities: vol.RequestedCapabilities, + MountOptions: vol.MountOptions, + Parameters: vol.Parameters, + Secrets: vol.Secrets, + CapacityMin: vol.RequestedCapacityMin, + CapacityMax: vol.RequestedCapacityMax, + SnapshotID: vol.SnapshotID, + CloneID: vol.CloneID, + RequestedTopologies: vol.RequestedTopologies, } cReq.PluginID = plugin.ID cResp := &cstructs.ClientCSIControllerCreateVolumeResponse{} @@ -918,6 +927,7 @@ func (v *CSIVolume) createVolume(vol *structs.CSIVolume, plugin *structs.CSIPlug vol.ExternalID = cResp.ExternalVolumeID vol.Capacity = cResp.CapacityBytes vol.Context = cResp.VolumeContext + vol.Topologies = cResp.Topologies return nil } diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 0d06a8591..e4235ee21 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -275,9 +275,10 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { ID: id0, Namespace: structs.DefaultNamespace, PluginID: "minnie", - Topologies: []*structs.CSITopology{{ - Segments: map[string]string{"foo": "bar"}, - }}, + RequestedTopologies: &structs.CSITopologyRequest{ + Required: []*structs.CSITopology{ + {Segments: map[string]string{"foo": "bar"}}}, + }, Secrets: structs.CSISecrets{"mysecret": "secretvalue"}, RequestedCapabilities: []*structs.CSIVolumeCapability{{ AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, @@ -754,6 +755,9 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { ExternalVolumeID: "vol-12345", CapacityBytes: 42, VolumeContext: map[string]string{"plugincontext": "bar"}, + Topologies: []*structs.CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, } client, cleanup := client.TestClientWithRPCs(t, @@ -829,6 +833,10 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }, }, + Topologies: []*structs.CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"zone": "Z2"}}, + }, }} // Create the create request @@ -872,6 +880,7 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { require.Equal(t, int64(42), vol.Capacity) require.Equal(t, "bar", vol.Context["plugincontext"]) require.Equal(t, "", vol.Context["mycontext"]) + require.Equal(t, map[string]string{"rack": "R1"}, vol.Topologies[0].Segments) } func TestCSIVolumeEndpoint_Delete(t *testing.T) { diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 9540d8445..3d7c5178f 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -246,9 +246,19 @@ type CSIVolume struct { // Name is a display name for the volume, not required to be unique Name string // ExternalID identifies the volume for the CSI interface, may be URL unsafe - ExternalID string - Namespace string - Topologies []*CSITopology + ExternalID string + Namespace string + + // RequestedTopologies are the topologies submitted as options to + // the storage provider at the time the volume was created. After + // volumes are created, this field is ignored. + RequestedTopologies *CSITopologyRequest + + // Topologies are the topologies returned by the storage provider, + // based on the RequestedTopologies and what the storage provider + // could support. This value cannot be set by the user. + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode // *current* access mode AttachmentMode CSIVolumeAttachmentMode // *current* attachment mode MountOptions *CSIMountOptions @@ -679,20 +689,18 @@ func (v *CSIVolume) Validate() error { if len(v.RequestedCapabilities) == 0 { errs = append(errs, "must include at least one capability block") } - - // TODO: Volume Topologies are optional - We should check to see if the plugin - // the volume is being registered with requires them. - // var ok bool - // for _, t := range v.Topologies { - // if t != nil && len(t.Segments) > 0 { - // ok = true - // break - // } - // } - // if !ok { - // errs = append(errs, "missing topology") - // } - + if v.RequestedTopologies != nil { + for _, t := range v.RequestedTopologies.Required { + if t != nil && len(t.Segments) == 0 { + errs = append(errs, "required topology is missing segments field") + } + } + for _, t := range v.RequestedTopologies.Preferred { + if t != nil && len(t.Segments) == 0 { + errs = append(errs, "preferred topology is missing segments field") + } + } + } if len(errs) > 0 { return fmt.Errorf("validation: %s", strings.Join(errs, ", ")) } @@ -836,9 +844,6 @@ type CSIVolumeExternalStub struct { CloneID string SnapshotID string - // TODO: topology support - // AccessibleTopology []*Topology - PublishedExternalNodeIDs []string IsAbnormal bool Status string diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 32d150816..855a65871 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -554,6 +554,21 @@ func TestVolume_Copy(t *testing.T) { } +func TestCSIVolume_Validate(t *testing.T) { + vol := &CSIVolume{ + ID: "test", + PluginID: "test", + SnapshotID: "test-snapshot", + CloneID: "test-clone", + RequestedTopologies: &CSITopologyRequest{ + Required: []*CSITopology{{}, {}}, + }, + } + err := vol.Validate() + require.EqualError(t, err, "validation: missing namespace, only one of snapshot_id and clone_id is allowed, must include at least one capability block, required topology is missing segments field, required topology is missing segments field") + +} + func TestCSIPluginJobs(t *testing.T) { plug := NewCSIPlugin("foo", 1000) controller := &Job{ diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 71be7796f..6a8f9cd47 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -62,6 +62,14 @@ func (t *CSITopology) Equal(o *CSITopology) bool { return helper.CompareMapStringString(t.Segments, o.Segments) } +// CSITopologyRequest are the topologies submitted as options to the +// storage provider at the time the volume was created. The storage +// provider will return a single topology. +type CSITopologyRequest struct { + Required []*CSITopology + Preferred []*CSITopology +} + // CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to // the Node API. type CSINodeInfo struct { diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 6d34d8f55..8c5185bbe 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -740,6 +740,11 @@ func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) result.MaxVolumes = math.MaxInt64 } + topo := resp.GetAccessibleTopology() + if topo != nil { + result.AccessibleTopology = &Topology{Segments: topo.Segments} + } + return result, nil } diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 1c951b2ba..6554662c7 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -746,7 +746,7 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { }, { - Name: "handles success with capacity range and source", + Name: "handles success with capacity range, source, and topology", CapacityRange: &CapacityRange{ RequiredBytes: 500, LimitBytes: 1000, @@ -764,6 +764,9 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { }, }, }, + AccessibleTopology: []*csipbv1.Topology{ + {Segments: map[string]string{"rack": "R1"}}, + }, }, }, }, @@ -782,10 +785,19 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { AccessMode: VolumeAccessModeMultiNodeMultiWriter, }, }, - Parameters: map[string]string{}, - Secrets: structs.CSISecrets{}, - ContentSource: tc.ContentSource, - AccessibilityRequirements: &TopologyRequirement{}, + Parameters: map[string]string{}, + Secrets: structs.CSISecrets{}, + ContentSource: tc.ContentSource, + AccessibilityRequirements: &TopologyRequirement{ + Requisite: []*Topology{ + { + Segments: map[string]string{"rack": "R1"}, + }, + { + Segments: map[string]string{"rack": "R2"}, + }, + }, + }, } cc.NextCreateVolumeResponse = tc.Response @@ -808,6 +820,14 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { require.Equal(t, tc.ContentSource.CloneID, resp.Volume.ContentSource.CloneID) require.Equal(t, tc.ContentSource.SnapshotID, resp.Volume.ContentSource.SnapshotID) } + if tc.Response != nil && tc.Response.Volume != nil { + require.Len(t, resp.Volume.AccessibleTopology, 1) + require.Equal(t, + req.AccessibilityRequirements.Requisite[0].Segments, + resp.Volume.AccessibleTopology[0].Segments, + ) + } + }) } } diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 54c664b8f..c03390688 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -581,10 +581,11 @@ type ControllerCreateVolumeResponse struct { func NewCreateVolumeResponse(resp *csipbv1.CreateVolumeResponse) *ControllerCreateVolumeResponse { vol := resp.GetVolume() return &ControllerCreateVolumeResponse{Volume: &Volume{ - CapacityBytes: vol.GetCapacityBytes(), - ExternalVolumeID: vol.GetVolumeId(), - VolumeContext: vol.GetVolumeContext(), - ContentSource: newVolumeContentSource(vol.GetContentSource()), + CapacityBytes: vol.GetCapacityBytes(), + ExternalVolumeID: vol.GetVolumeId(), + VolumeContext: vol.GetVolumeContext(), + ContentSource: newVolumeContentSource(vol.GetContentSource()), + AccessibleTopology: newTopologies(vol.GetAccessibleTopology()), }} } diff --git a/scheduler/feasible.go b/scheduler/feasible.go index fecd97286..3c478eb69 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -27,6 +27,7 @@ const ( FilterConstraintCSIVolumeGCdAllocationTemplate = "CSI volume %s has exhausted its available writer claims and is claimed by a garbage collected allocation %s; waiting for claim to be released" FilterConstraintDrivers = "missing drivers" FilterConstraintDevices = "missing devices" + FilterConstraintsCSIPluginTopology = "did not meet topology requirement" ) var ( @@ -313,6 +314,22 @@ func (c *CSIVolumeChecker) isFeasible(n *structs.Node) (bool, string) { return false, fmt.Sprintf(FilterConstraintCSIPluginMaxVolumesTemplate, vol.PluginID, n.ID) } + // CSI spec: "If requisite is specified, the provisioned + // volume MUST be accessible from at least one of the + // requisite topologies." + if len(vol.Topologies) > 0 { + var ok bool + for _, requiredTopo := range vol.Topologies { + if requiredTopo.Equal(plugin.NodeInfo.AccessibleTopology) { + ok = true + break + } + } + if !ok { + return false, FilterConstraintsCSIPluginTopology + } + } + if req.ReadOnly { if !vol.ReadSchedulable() { return false, fmt.Sprintf(FilterConstraintCSIVolumeNoReadTemplate, vol.ID) diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index da590c438..26e2e7907 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -240,6 +241,8 @@ func TestCSIVolumeChecker(t *testing.T) { mock.Node(), mock.Node(), mock.Node(), + mock.Node(), + mock.Node(), } // Register running plugins on some nodes @@ -254,28 +257,69 @@ func TestCSIVolumeChecker(t *testing.T) { "foo": { PluginID: "foo", Healthy: true, - NodeInfo: &structs.CSINodeInfo{MaxVolumes: 1}, + NodeInfo: &structs.CSINodeInfo{ + MaxVolumes: 1, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{"rack": "R1"}, + }, + }, }, } nodes[1].CSINodePlugins = map[string]*structs.CSIInfo{ "foo": { PluginID: "foo", Healthy: false, - NodeInfo: &structs.CSINodeInfo{MaxVolumes: 1}, + NodeInfo: &structs.CSINodeInfo{ + MaxVolumes: 1, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{"rack": "R1"}, + }, + }, }, } nodes[2].CSINodePlugins = map[string]*structs.CSIInfo{ "bar": { PluginID: "bar", Healthy: true, - NodeInfo: &structs.CSINodeInfo{MaxVolumes: 1}, + NodeInfo: &structs.CSINodeInfo{ + MaxVolumes: 1, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{"rack": "R1"}, + }, + }, }, } nodes[4].CSINodePlugins = map[string]*structs.CSIInfo{ "foo": { PluginID: "foo", Healthy: true, - NodeInfo: &structs.CSINodeInfo{MaxVolumes: 1}, + NodeInfo: &structs.CSINodeInfo{ + MaxVolumes: 1, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{"rack": "R1"}, + }, + }, + }, + } + nodes[5].CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{ + MaxVolumes: 1, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{"rack": "R4"}, + }, + }, + }, + } + nodes[6].CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{ + MaxVolumes: 1, + }, }, } @@ -294,6 +338,10 @@ func TestCSIVolumeChecker(t *testing.T) { vol.Namespace = structs.DefaultNamespace vol.AccessMode = structs.CSIVolumeAccessModeMultiNodeMultiWriter vol.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + vol.Topologies = []*structs.CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"rack": "R2"}}, + } err := state.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) require.NoError(t, err) index++ @@ -361,52 +409,70 @@ func TestCSIVolumeChecker(t *testing.T) { checker.SetNamespace(structs.DefaultNamespace) cases := []struct { + Name string Node *structs.Node RequestedVolumes map[string]*structs.VolumeRequest Result bool }{ - { // Get it + { + Name: "ok", Node: nodes[0], RequestedVolumes: volumes, Result: true, }, - { // Unhealthy + { + Name: "unhealthy node", Node: nodes[1], RequestedVolumes: volumes, Result: false, }, - { // Wrong id + { + Name: "wrong id", Node: nodes[2], RequestedVolumes: volumes, Result: false, }, - { // No Volumes requested or available + { + Name: "no volumes requested or available", Node: nodes[3], RequestedVolumes: noVolumes, Result: true, }, - { // No Volumes requested, some available + { + Name: "no volumes requested, some available", Node: nodes[0], RequestedVolumes: noVolumes, Result: true, }, - { // Volumes requested, none available + { + Name: "volumes requested, none available", Node: nodes[3], RequestedVolumes: volumes, Result: false, }, - { // Volumes requested, MaxVolumes exceeded + { + Name: "volumes requested, max volumes exceeded", Node: nodes[4], RequestedVolumes: volumes, Result: false, }, + { + Name: "no matching topology", + Node: nodes[5], + RequestedVolumes: volumes, + Result: false, + }, + { + Name: "nil topology", + Node: nodes[6], + RequestedVolumes: volumes, + Result: false, + }, } - for i, c := range cases { + for _, c := range cases { checker.SetVolumes(alloc.Name, c.RequestedVolumes) - if act := checker.Feasible(c.Node); act != c.Result { - t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) - } + assert.Equal(t, c.Result, checker.Feasible(c.Node), c.Name) } // add a missing volume diff --git a/website/content/docs/commands/volume/create.mdx b/website/content/docs/commands/volume/create.mdx index dcf276d99..0ffc78da0 100644 --- a/website/content/docs/commands/volume/create.mdx +++ b/website/content/docs/commands/volume/create.mdx @@ -59,6 +59,16 @@ mount_options { mount_flags = ["noatime"] } +topology_request { + required { + topology { segments { "rack" = "R2" } } + topology { segments { "rack" = "R1", "zone" = "us-east-1a"} } + } + preferred { + topology { segments { "rack" = "R1", "zone" = "us-east-1a"} } + } +} + secrets { example_secret = "xyzzy" } @@ -134,6 +144,15 @@ parameters { - `mount_flags` `([]string: )` - The flags passed to `mount` (ex. `["ro", "noatime"]`) +- `topology_request` ([TopologyRequest](#topology_request-parameters): nil) - + Specify locations (region, zone, rack, etc.) where the provisioned + volume must be accessible from. Consult the documentation for your + storage provider and CSI plugin as to whether it supports defining + topology and what values it expects for topology + segments. Specifying topology segments that aren't supported by the + storage provider may return an error or may be silently removed by + the plugin. + - `secrets` (map:nil) - An optional key-value map of strings used as credentials for publishing and unpublishing volumes. @@ -144,6 +163,40 @@ parameters { to each storage provider, so please see the specific plugin documentation for more information. +### `topology_request` Parameters + +For the `topology_request` field, you may specify a list of either +`required` or `preferred` topologies (or both). The `required` +topologies indicate that the volume must be created in a location +accessible from at least one of the listed topologies. The `preferred` +topologies indicate that you would prefer the storage provider to +create the volume in one of the provided topologies. + +Each topology listed has a single field: + +- `segments` `(map[string]string)` - A map of location types to their + values. The specific fields required are defined by the CSI + plugin. For example, a plugin might require defining both a rack and + a zone: `segments {rack = "R2", zone = "us-east-1a"}`. + +For example: + +```hcl +topology_request { + required { + topology { segments { "rack" = "R1", "zone" = "us-east-1a" } } + topology { segments { "rack" = "R2", "zone" = "us-east-1a" } } + } + preferred { + topology { segments { "rack" = "R1", "zone" = "us-east-1a"} } + } +} +``` + +This configuration indicates you require the volume to be created +within racks `R1` or `R2`, but that you prefer the volume to be +created within `R1`. + ### Unused Fields Note that several fields used in the [`volume register`] command are set diff --git a/website/content/docs/commands/volume/register.mdx b/website/content/docs/commands/volume/register.mdx index 1e2023250..343f8b942 100644 --- a/website/content/docs/commands/volume/register.mdx +++ b/website/content/docs/commands/volume/register.mdx @@ -61,6 +61,13 @@ mount_options { mount_flags = ["noatime"] } +topology_request { + required { + topology { segments { "rack" = "R2" } } + topology { segments { "rack" = "R1", "zone" = "us-east-1a"} } + } +} + secrets { example_secret = "xyzzy" } @@ -120,6 +127,15 @@ context { - `fs_type`: file system type (ex. `"ext4"`) - `mount_flags`: the flags passed to `mount` (ex. `"ro,noatime"`) +- `topology_request` ([TopologyRequest](#topology_request-parameters): nil) - + Specify locations (region, zone, rack, etc.) where the provisioned + volume is accessible from. Consult the documentation for your + storage provider and CSI plugin as to whether it supports defining + topology and what values it expects for topology + segments. Specifying topology segments that aren't supported by the + storage provider may return an error or may be silently removed by + the plugin. + - `secrets` (map:nil) - An optional key-value map of strings used as credentials for publishing and unpublishing volumes. @@ -136,6 +152,38 @@ context { each storage provider, so please see the specific plugin documentation for more information. +### `topology_request` Parameters + +For the `topology_request` field, you may specify a list of `required` +topologies. The `required` topologies indicate that the volume was +created in a location accessible from all the listed topologies. + +Note this behavior is different from the `nomad volume create` +command, because the volume has already been created and you are +defining the topology for Nomad. The `register` command does not +support setting `preferred` topologies. + +Each topology listed has a single field: + +- `segments` `(map[string]string)` - A map of location types to their + values. The specific fields required are defined by the CSI + plugin. For example, a plugin might require defining both a rack and + a zone: `segments {rack = "R2", zone = "us-east-1a"}`. + +For example: + +```hcl +topology_request { + required { + topology { segments { "rack" = "R1", "zone" = "us-east-1a" } } + topology { segments { "rack" = "R2", "zone" = "us-east-1a" } } + } +} +``` + +This configuration indicates that the volume is accessible from both +racks `R1` or `R2`. + ### Unused Fields Note that several fields used in the [`volume create`] command are set From 907c795874829f375c54a1652da416c843254848 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 10:22:52 -0500 Subject: [PATCH 19/89] CSI: set plugin socket path on restore (#12149) The Prestart hook for task runner hooks doesn't get called when we restore a task, because the task is already running. The Postrun hook for CSI plugin supervisors needs the socket path to have been populated so that the client has a valid path. --- .../taskrunner/plugin_supervisor_hook.go | 43 +++++++++++-------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 94909d3ce..122386fd7 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -129,12 +129,13 @@ func (*csiPluginSupervisorHook) Name() string { } // Prestart is called before the task is started including after every -// restart. This requires that the mount paths for a plugin be idempotent, -// despite us not knowing the name of the plugin ahead of time. -// Because of this, we use the allocid_taskname as the unique identifier for a -// plugin on the filesystem. +// restart (but not after restore). This requires that the mount paths +// for a plugin be idempotent, despite us not knowing the name of the +// plugin ahead of time. Because of this, we use the allocid_taskname +// as the unique identifier for a plugin on the filesystem. func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + // Create the mount directory that the container will access if it doesn't // already exist. Default to only nomad user access. if err := os.MkdirAll(h.mountPoint, 0700); err != nil && !os.IsExist(err) { @@ -167,19 +168,7 @@ func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, Readonly: false, } - // TODO(tgross): https://github.com/hashicorp/nomad/issues/11786 - // If we're already registered, we should be able to update the - // definition in the update hook - - // For backwards compatibility, ensure that we don't overwrite the - // socketPath on client restart with existing plugin allocations. - pluginInfo, _ := h.runner.dynamicRegistry.PluginForAlloc( - string(h.task.CSIPluginConfig.Type), h.task.CSIPluginConfig.ID, h.alloc.ID) - if pluginInfo != nil { - h.socketPath = pluginInfo.ConnectionInfo.SocketPath - } else { - h.socketPath = filepath.Join(h.socketMountPoint, structs.CSISocketName) - } + h.setSocketHook() switch h.caps.FSIsolation { case drivers.FSIsolationNone: @@ -206,11 +195,29 @@ func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, return nil } +func (h *csiPluginSupervisorHook) setSocketHook() { + + // TODO(tgross): https://github.com/hashicorp/nomad/issues/11786 + // If we're already registered, we should be able to update the + // definition in the update hook + + // For backwards compatibility, ensure that we don't overwrite the + // socketPath on client restart with existing plugin allocations. + pluginInfo, _ := h.runner.dynamicRegistry.PluginForAlloc( + string(h.task.CSIPluginConfig.Type), h.task.CSIPluginConfig.ID, h.alloc.ID) + if pluginInfo != nil && pluginInfo.ConnectionInfo.SocketPath != "" { + h.socketPath = pluginInfo.ConnectionInfo.SocketPath + return + } + h.socketPath = filepath.Join(h.socketMountPoint, structs.CSISocketName) +} + // Poststart is called after the task has started. Poststart is not // called if the allocation is terminal. // // The context is cancelled if the task is killed. func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.TaskPoststartRequest, _ *interfaces.TaskPoststartResponse) error { + // If we're already running the supervisor routine, then we don't need to try // and restart it here as it only terminates on `Stop` hooks. h.runningLock.Lock() @@ -220,6 +227,8 @@ func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.Tas } h.runningLock.Unlock() + h.setSocketHook() + go h.ensureSupervisorLoop(h.shutdownCtx) return nil } From 9cf99ce5ec453582c899742e430f57ac4b11c8e5 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 1 Mar 2022 13:30:30 -0500 Subject: [PATCH 20/89] csi: subcommand for volume snapshot (#12152) --- command/commands.go | 5 +++++ command/volume_snapshot.go | 45 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 command/volume_snapshot.go diff --git a/command/commands.go b/command/commands.go index 287cdfaf1..5282f2831 100644 --- a/command/commands.go +++ b/command/commands.go @@ -854,6 +854,11 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Meta: meta, }, nil }, + "volume snapshot": func() (cli.Command, error) { + return &VolumeSnapshotCommand{ + Meta: meta, + }, nil + }, "volume snapshot create": func() (cli.Command, error) { return &VolumeSnapshotCreateCommand{ Meta: meta, diff --git a/command/volume_snapshot.go b/command/volume_snapshot.go new file mode 100644 index 000000000..ccaad6b74 --- /dev/null +++ b/command/volume_snapshot.go @@ -0,0 +1,45 @@ +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +type VolumeSnapshotCommand struct { + Meta +} + +func (f *VolumeSnapshotCommand) Name() string { return "snapshot" } + +func (f *VolumeSnapshotCommand) Run(args []string) int { + return cli.RunResultHelp +} + +func (f *VolumeSnapshotCommand) Synopsis() string { + return "Interact with volume snapshots" +} + +func (f *VolumeSnapshotCommand) Help() string { + helpText := ` +Usage: nomad volume snapshot [options] [args] + + This command groups subcommands for interacting with CSI volume snapshots. + + Create a snapshot of an external storage volume: + + $ nomad volume snapshot create + + Display a list of CSI volume snapshots along with their + source volume ID as known to the external storage provider. + + $ nomad volume snapshot list -plugin + + Delete a snapshot of an external storage volume: + + $ nomad volume snapshot delete + + Please see the individual subcommand help for detailed usage information. +` + return strings.TrimSpace(helpText) +} From b2462278699cdf1a92a6518d8e0a8d1798471bcf Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Tue, 1 Mar 2022 15:36:49 -0500 Subject: [PATCH 21/89] api: paginated results with different ordering (#12128) The paginator logic was built when go-memdb iterators would return items ordered lexicographically by their ID prefixes, but #12054 added the option for some tables to return results ordered by their `CreateIndex` instead, which invalidated the previous paginator assumption. The iterator used for pagination must still return results in some order so that the paginator can properly handle requests where the next_token value is not present in the results anymore (e.g., the eval was GC'ed). In these situations, the paginator will start the returned page in the first element right after where the requested token should've been. This commit moves the logic to generate pagination tokens from the elements being paginated to the iterator itself so that callers can have more control over the token format to make sure they are properly ordered and stable. It also allows configuring the paginator as being ordered in ascending or descending order, which is relevant when looking for a token that may not be present anymore. --- api/evaluations_test.go | 119 +++++++++------------------ nomad/deployment_endpoint.go | 32 +++++++- nomad/deployment_endpoint_test.go | 59 +++++++++++--- nomad/eval_endpoint.go | 31 +++++++- nomad/eval_endpoint_test.go | 128 ++++++++++++++++++++---------- nomad/state/filter_test.go | 27 ++++++- nomad/state/paginator.go | 30 ++++--- nomad/state/paginator_test.go | 34 ++++---- nomad/state/schema.go | 36 +++++++-- nomad/structs/structs.go | 8 -- 10 files changed, 319 insertions(+), 185 deletions(-) diff --git a/api/evaluations_test.go b/api/evaluations_test.go index 2b5b56134..226db8460 100644 --- a/api/evaluations_test.go +++ b/api/evaluations_test.go @@ -1,12 +1,11 @@ package api import ( - "reflect" "sort" - "strings" "testing" "github.com/hashicorp/nomad/api/internal/testutil" + "github.com/stretchr/testify/require" ) func TestEvaluations_List(t *testing.T) { @@ -17,41 +16,27 @@ func TestEvaluations_List(t *testing.T) { // Listing when nothing exists returns empty result, qm, err := e.List(nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if qm.LastIndex != 0 { - t.Fatalf("bad index: %d", qm.LastIndex) - } - if n := len(result); n != 0 { - t.Fatalf("expected 0 evaluations, got: %d", n) - } + require.NoError(t, err) + require.Equal(t, uint64(0), qm.LastIndex, "bad index") + require.Equal(t, 0, len(result), "expected 0 evaluations") // Register a job. This will create an evaluation. jobs := c.Jobs() job := testJob() resp, wm, err := jobs.Register(job, nil) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) assertWriteMeta(t, wm) // Check the evaluations again result, qm, err = e.List(nil) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) assertQueryMeta(t, qm) // if the eval fails fast there can be more than 1 // but they are in order of most recent first, so look at the last one - if len(result) == 0 { - t.Fatalf("expected eval (%s), got none", resp.EvalID) - } + require.Greater(t, len(result), 0, "expected eval (%s), got none", resp.EvalID) idx := len(result) - 1 - if result[idx].ID != resp.EvalID { - t.Fatalf("expected eval (%s), got: %#v", resp.EvalID, result[idx]) - } + require.Equal(t, resp.EvalID, result[idx].ID, "expected eval (%s), got: %#v", resp.EvalID, result[idx]) // wait until the 2nd eval shows up before we try paging results := []*Evaluation{} @@ -65,26 +50,26 @@ func TestEvaluations_List(t *testing.T) { t.Fatalf("err: %s", err) }) - // Check the evaluations again with paging; note that while this - // package sorts by timestamp, the actual HTTP API sorts by ID - // so we need to use that for the NextToken - ids := []string{results[0].ID, results[1].ID} - sort.Strings(ids) - result, qm, err = e.List(&QueryOptions{PerPage: int32(1), NextToken: ids[1]}) - if err != nil { - t.Fatalf("err: %s", err) - } - if len(result) != 1 { - t.Fatalf("expected no evals after last one but got %v", result[0]) - } + // query first page + result, qm, err = e.List(&QueryOptions{ + PerPage: int32(1), + }) + require.NoError(t, err) + require.Equal(t, 1, len(result), "expected no evals after last one but got %d: %#v", len(result), result) + + // query second page + result, qm, err = e.List(&QueryOptions{ + PerPage: int32(1), + NextToken: qm.NextToken, + }) + require.NoError(t, err) + require.Equal(t, 1, len(result), "expected no evals after last one but got %d: %#v", len(result), result) // Query evaluations using a filter. results, _, err = e.List(&QueryOptions{ Filter: `TriggeredBy == "job-register"`, }) - if len(result) != 1 { - t.Fatalf("expected 1 eval, got %d", len(result)) - } + require.Equal(t, 1, len(result), "expected 1 eval, got %d", len(result)) } func TestEvaluations_PrefixList(t *testing.T) { @@ -95,36 +80,25 @@ func TestEvaluations_PrefixList(t *testing.T) { // Listing when nothing exists returns empty result, qm, err := e.PrefixList("abcdef") - if err != nil { - t.Fatalf("err: %s", err) - } - if qm.LastIndex != 0 { - t.Fatalf("bad index: %d", qm.LastIndex) - } - if n := len(result); n != 0 { - t.Fatalf("expected 0 evaluations, got: %d", n) - } + require.NoError(t, err) + require.Equal(t, uint64(0), qm.LastIndex, "bad index") + require.Equal(t, 0, len(result), "expected 0 evaluations") // Register a job. This will create an evaluation. jobs := c.Jobs() job := testJob() resp, wm, err := jobs.Register(job, nil) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) assertWriteMeta(t, wm) // Check the evaluations again result, qm, err = e.PrefixList(resp.EvalID[:4]) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) assertQueryMeta(t, qm) // Check if we have the right list - if len(result) != 1 || result[0].ID != resp.EvalID { - t.Fatalf("bad: %#v", result) - } + require.Equal(t, 1, len(result)) + require.Equal(t, resp.EvalID, result[0].ID) } func TestEvaluations_Info(t *testing.T) { @@ -135,30 +109,23 @@ func TestEvaluations_Info(t *testing.T) { // Querying a nonexistent evaluation returns error _, _, err := e.Info("8E231CF4-CA48-43FF-B694-5801E69E22FA", nil) - if err == nil || !strings.Contains(err.Error(), "not found") { - t.Fatalf("expected not found error, got: %s", err) - } + require.Error(t, err) // Register a job. Creates a new evaluation. jobs := c.Jobs() job := testJob() resp, wm, err := jobs.Register(job, nil) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) assertWriteMeta(t, wm) // Try looking up by the new eval ID result, qm, err := e.Info(resp.EvalID, nil) - if err != nil { - t.Fatalf("err: %s", err) - } + require.NoError(t, err) assertQueryMeta(t, qm) // Check that we got the right result - if result == nil || result.ID != resp.EvalID { - t.Fatalf("expected eval %q, got: %#v", resp.EvalID, result) - } + require.NotNil(t, result) + require.Equal(t, resp.EvalID, result.ID) } func TestEvaluations_Allocations(t *testing.T) { @@ -169,15 +136,9 @@ func TestEvaluations_Allocations(t *testing.T) { // Returns empty if no allocations allocs, qm, err := e.Allocations("8E231CF4-CA48-43FF-B694-5801E69E22FA", nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if qm.LastIndex != 0 { - t.Fatalf("bad index: %d", qm.LastIndex) - } - if n := len(allocs); n != 0 { - t.Fatalf("expected 0 allocs, got: %d", n) - } + require.NoError(t, err) + require.Equal(t, uint64(0), qm.LastIndex, "bad index") + require.Equal(t, 0, len(allocs), "expected 0 evaluations") } func TestEvaluations_Sort(t *testing.T) { @@ -194,7 +155,5 @@ func TestEvaluations_Sort(t *testing.T) { {CreateIndex: 2}, {CreateIndex: 1}, } - if !reflect.DeepEqual(evals, expect) { - t.Fatalf("\n\n%#v\n\n%#v", evals, expect) - } + require.Equal(t, expect, evals) } diff --git a/nomad/deployment_endpoint.go b/nomad/deployment_endpoint.go index bfd3be7f4..70f685d4a 100644 --- a/nomad/deployment_endpoint.go +++ b/nomad/deployment_endpoint.go @@ -13,6 +13,30 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) +// DeploymentPaginationIterator is a wrapper over a go-memdb iterator that +// implements the paginator Iterator interface. +type DeploymentPaginationIterator struct { + iter memdb.ResultIterator + byCreateIndex bool +} + +func (it DeploymentPaginationIterator) Next() (string, interface{}) { + raw := it.iter.Next() + if raw == nil { + return "", nil + } + + d := raw.(*structs.Deployment) + token := d.ID + + // prefix the pagination token by CreateIndex to keep it properly sorted. + if it.byCreateIndex { + token = fmt.Sprintf("%v-%v", d.CreateIndex, d.ID) + } + + return token, d +} + // Deployment endpoint is used for manipulating deployments type Deployment struct { srv *Server @@ -409,20 +433,26 @@ func (d *Deployment) List(args *structs.DeploymentListRequest, reply *structs.De // Capture all the deployments var err error var iter memdb.ResultIterator + var deploymentIter DeploymentPaginationIterator if prefix := args.QueryOptions.Prefix; prefix != "" { iter, err = store.DeploymentsByIDPrefix(ws, namespace, prefix) + deploymentIter.byCreateIndex = false } else if namespace != structs.AllNamespacesSentinel { iter, err = store.DeploymentsByNamespaceOrdered(ws, namespace, args.Ascending) + deploymentIter.byCreateIndex = true } else { iter, err = store.Deployments(ws, args.Ascending) + deploymentIter.byCreateIndex = true } if err != nil { return err } + deploymentIter.iter = iter + var deploys []*structs.Deployment - paginator, err := state.NewPaginator(iter, args.QueryOptions, + paginator, err := state.NewPaginator(deploymentIter, args.QueryOptions, func(raw interface{}) error { deploy := raw.(*structs.Deployment) deploys = append(deploys, deploy) diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index 7170b507b..e91bc28a8 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -1271,11 +1271,18 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { {id: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9"}, // 4 {id: "aaaaaacc-3350-4b4b-d185-0e1992ed43e9"}, // 5 {id: "aaaaaadd-3350-4b4b-d185-0e1992ed43e9"}, // 6 + {id: "00000111-3350-4b4b-d185-0e1992ed43e9"}, // 7 + {}, // 8, index missing + {id: "bbbb1111-3350-4b4b-d185-0e1992ed43e9"}, // 9 } state := s1.fsm.State() for i, m := range mocks { + if m.id == "" { + continue + } + index := 1000 + uint64(i) deployment := mock.Deployment() deployment.Status = structs.DeploymentStatusCancelled @@ -1305,7 +1312,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { { name: "test01 size-2 page-1 default NS", pageSize: 2, - expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1315,7 +1322,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { name: "test02 size-2 page-1 default NS with prefix", prefix: "aaaa", pageSize: 2, - expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // prefix results are not sorted by create index expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1324,8 +1331,8 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { { name: "test03 size-2 page-2 default NS", pageSize: 2, - nextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "aaaaaacc-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1005-aaaaaacc-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", @@ -1343,14 +1350,25 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { }, }, { - name: "test05 no valid results with filters and prefix", + name: "test05 size-2 page-2 all namespaces", + namespace: "*", + pageSize: 2, + nextToken: "1002-aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1004-aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test06 no valid results with filters and prefix", prefix: "cccc", pageSize: 2, nextToken: "", expectedIDs: []string{}, }, { - name: "test06 go-bexpr filter", + name: "test07 go-bexpr filter", namespace: "*", filter: `ID matches "^a+[123]"`, expectedIDs: []string{ @@ -1360,18 +1378,18 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { }, }, { - name: "test07 go-bexpr filter with pagination", + name: "test08 go-bexpr filter with pagination", namespace: "*", filter: `ID matches "^a+[123]"`, pageSize: 2, - expectedNextToken: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1002-aaaaaa33-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", }, }, { - name: "test08 go-bexpr filter in namespace", + name: "test09 go-bexpr filter in namespace", namespace: "non-default", filter: `Status == "cancelled"`, expectedIDs: []string{ @@ -1379,21 +1397,38 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { }, }, { - name: "test09 go-bexpr wrong namespace", + name: "test10 go-bexpr wrong namespace", namespace: "default", filter: `Namespace == "non-default"`, expectedIDs: []string{}, }, { - name: "test10 go-bexpr invalid expression", + name: "test11 go-bexpr invalid expression", filter: `NotValid`, expectedError: "failed to read filter expression", }, { - name: "test11 go-bexpr invalid field", + name: "test12 go-bexpr invalid field", filter: `InvalidField == "value"`, expectedError: "error finding value in datum", }, + { + name: "test13 non-lexicographic order", + pageSize: 1, + nextToken: "1007-00000111-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1009-bbbb1111-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "00000111-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test14 missing index", + pageSize: 1, + nextToken: "1008-e9522802-0cd8-4b1d-9c9e-ab3d97938371", + expectedIDs: []string{ + "bbbb1111-3350-4b4b-d185-0e1992ed43e9", + }, + }, } for _, tc := range cases { diff --git a/nomad/eval_endpoint.go b/nomad/eval_endpoint.go index b938ec928..0b6b26f59 100644 --- a/nomad/eval_endpoint.go +++ b/nomad/eval_endpoint.go @@ -21,6 +21,30 @@ const ( DefaultDequeueTimeout = time.Second ) +// EvalPaginationIterator is a wrapper over a go-memdb iterator that implements +// the paginator Iterator interface. +type EvalPaginationIterator struct { + iter memdb.ResultIterator + byCreateIndex bool +} + +func (it EvalPaginationIterator) Next() (string, interface{}) { + raw := it.iter.Next() + if raw == nil { + return "", nil + } + + eval := raw.(*structs.Evaluation) + token := eval.ID + + // prefix the pagination token by CreateIndex to keep it properly sorted. + if it.byCreateIndex { + token = fmt.Sprintf("%v-%v", eval.CreateIndex, eval.ID) + } + + return token, eval +} + // Eval endpoint is used for eval interactions type Eval struct { srv *Server @@ -414,13 +438,17 @@ func (e *Eval) List(args *structs.EvalListRequest, reply *structs.EvalListRespon // Scan all the evaluations var err error var iter memdb.ResultIterator + var evalIter EvalPaginationIterator if prefix := args.QueryOptions.Prefix; prefix != "" { iter, err = store.EvalsByIDPrefix(ws, namespace, prefix) + evalIter.byCreateIndex = false } else if namespace != structs.AllNamespacesSentinel { iter, err = store.EvalsByNamespaceOrdered(ws, namespace, args.Ascending) + evalIter.byCreateIndex = true } else { iter, err = store.Evals(ws, args.Ascending) + evalIter.byCreateIndex = true } if err != nil { return err @@ -432,9 +460,10 @@ func (e *Eval) List(args *structs.EvalListRequest, reply *structs.EvalListRespon } return false }) + evalIter.iter = iter var evals []*structs.Evaluation - paginator, err := state.NewPaginator(iter, args.QueryOptions, + paginator, err := state.NewPaginator(evalIter, args.QueryOptions, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 3aa327508..92463394e 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -1013,40 +1013,51 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { // in the order that the state store will return them from the // iterator (sorted by create index), for ease of writing tests mocks := []struct { - id string + ids []string namespace string jobID string status string }{ - {id: "aaaa1111-3350-4b4b-d185-0e1992ed43e9", jobID: "example"}, // 0 - {id: "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", jobID: "example"}, // 1 - {id: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", namespace: "non-default"}, // 2 - {id: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", jobID: "example", status: "blocked"}, // 3 - {id: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9"}, // 4 - {id: "aaaaaacc-3350-4b4b-d185-0e1992ed43e9"}, // 5 - {id: "aaaaaadd-3350-4b4b-d185-0e1992ed43e9", jobID: "example"}, // 6 - {id: "aaaaaaee-3350-4b4b-d185-0e1992ed43e9", jobID: "example"}, // 7 - {id: "aaaaaaff-3350-4b4b-d185-0e1992ed43e9"}, // 8 + {ids: []string{"aaaa1111-3350-4b4b-d185-0e1992ed43e9"}, jobID: "example"}, // 0 + {ids: []string{"aaaaaa22-3350-4b4b-d185-0e1992ed43e9"}, jobID: "example"}, // 1 + {ids: []string{"aaaaaa33-3350-4b4b-d185-0e1992ed43e9"}, namespace: "non-default"}, // 2 + {ids: []string{"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9"}, jobID: "example", status: "blocked"}, // 3 + {ids: []string{"aaaaaabb-3350-4b4b-d185-0e1992ed43e9"}}, // 4 + {ids: []string{"aaaaaacc-3350-4b4b-d185-0e1992ed43e9"}}, // 5 + {ids: []string{"aaaaaadd-3350-4b4b-d185-0e1992ed43e9"}, jobID: "example"}, // 6 + {ids: []string{"aaaaaaee-3350-4b4b-d185-0e1992ed43e9"}, jobID: "example"}, // 7 + {ids: []string{"aaaaaaff-3350-4b4b-d185-0e1992ed43e9"}}, // 8 + {ids: []string{"00000111-3350-4b4b-d185-0e1992ed43e9"}}, // 9 + {ids: []string{ // 10 + "00000222-3350-4b4b-d185-0e1992ed43e9", + "00000333-3350-4b4b-d185-0e1992ed43e9", + }}, + {}, // 11, index missing + {ids: []string{"bbbb1111-3350-4b4b-d185-0e1992ed43e9"}}, // 12 } state := s1.fsm.State() var evals []*structs.Evaluation for i, m := range mocks { - eval := mock.Eval() - eval.ID = m.id - if m.namespace != "" { // defaults to "default" - eval.Namespace = m.namespace + evalsInTx := []*structs.Evaluation{} + for _, id := range m.ids { + eval := mock.Eval() + eval.ID = id + if m.namespace != "" { // defaults to "default" + eval.Namespace = m.namespace + } + if m.jobID != "" { // defaults to some random UUID + eval.JobID = m.jobID + } + if m.status != "" { // defaults to "pending" + eval.Status = m.status + } + evals = append(evals, eval) + evalsInTx = append(evalsInTx, eval) } - if m.jobID != "" { // defaults to some random UUID - eval.JobID = m.jobID - } - if m.status != "" { // defaults to "pending" - eval.Status = m.status - } - evals = append(evals, eval) index := 1000 + uint64(i) - require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, index, []*structs.Evaluation{eval})) + require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, index, evalsInTx)) } aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read", @@ -1073,13 +1084,13 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", }, - expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // next one in default namespace + expectedNextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // next one in default namespace }, { name: "test02 size-2 page-1 default NS with prefix", prefix: "aaaa", pageSize: 2, - expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // prefix results are not sorted by create index expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1088,8 +1099,8 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { { name: "test03 size-2 page-2 default NS", pageSize: 2, - nextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "aaaaaacc-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1005-aaaaaacc-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", @@ -1112,7 +1123,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { filterJobID: "example", filterStatus: "pending", // aaaaaaaa, bb, and cc are filtered by status - expectedNextToken: "aaaaaadd-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1006-aaaaaadd-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1148,7 +1159,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { pageSize: 3, // reads off the end filterJobID: "example", filterStatus: "pending", - nextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", expectedNextToken: "", expectedIDs: []string{ "aaaaaadd-3350-4b4b-d185-0e1992ed43e9", @@ -1169,14 +1180,25 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { }, }, { - name: "test10 no valid results with filters", + name: "test10 size-2 page-2 all namespaces", + namespace: "*", + pageSize: 2, + nextToken: "1002-aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1004-aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test11 no valid results with filters", pageSize: 2, filterJobID: "whatever", nextToken: "", expectedIDs: []string{}, }, { - name: "test11 no valid results with filters and prefix", + name: "test12 no valid results with filters and prefix", prefix: "aaaa", pageSize: 2, filterJobID: "whatever", @@ -1184,36 +1206,36 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { expectedIDs: []string{}, }, { - name: "test12 no valid results with filters page-2", + name: "test13 no valid results with filters page-2", filterJobID: "whatever", nextToken: "aaaaaa11-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{}, }, { - name: "test13 no valid results with filters page-2 with prefix", + name: "test14 no valid results with filters page-2 with prefix", prefix: "aaaa", filterJobID: "whatever", nextToken: "aaaaaa11-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{}, }, { - name: "test14 go-bexpr filter", + name: "test15 go-bexpr filter", filter: `Status == "blocked"`, nextToken: "", expectedIDs: []string{"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9"}, }, { - name: "test15 go-bexpr filter with pagination", + name: "test16 go-bexpr filter with pagination", filter: `JobID == "example"`, pageSize: 2, - expectedNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", }, }, { - name: "test16 go-bexpr filter namespace", + name: "test17 go-bexpr filter namespace", namespace: "non-default", filter: `ID contains "aaa"`, expectedIDs: []string{ @@ -1221,27 +1243,53 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { }, }, { - name: "test17 go-bexpr wrong namespace", + name: "test18 go-bexpr wrong namespace", namespace: "default", filter: `Namespace == "non-default"`, expectedIDs: []string{}, }, { - name: "test18 incompatible filtering", + name: "test19 incompatible filtering", filter: `JobID == "example"`, filterStatus: "complete", expectedError: structs.ErrIncompatibleFiltering.Error(), }, { - name: "test19 go-bexpr invalid expression", + name: "test20 go-bexpr invalid expression", filter: `NotValid`, expectedError: "failed to read filter expression", }, { - name: "test20 go-bexpr invalid field", + name: "test21 go-bexpr invalid field", filter: `InvalidField == "value"`, expectedError: "error finding value in datum", }, + { + name: "test22 non-lexicographic order", + pageSize: 1, + nextToken: "1009-00000111-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1010-00000222-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "00000111-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test23 same index", + pageSize: 1, + nextToken: "1010-00000222-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1010-00000333-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "00000222-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test24 missing index", + pageSize: 1, + nextToken: "1011-e9522802-0cd8-4b1d-9c9e-ab3d97938371", + expectedIDs: []string{ + "bbbb1111-3350-4b4b-d185-0e1992ed43e9", + }, + }, } for _, tc := range cases { diff --git a/nomad/state/filter_test.go b/nomad/state/filter_test.go index f0ba14a73..2fa1b02ad 100644 --- a/nomad/state/filter_test.go +++ b/nomad/state/filter_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/go-bexpr" + memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" ) @@ -75,8 +76,9 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.EvalsByNamespace(nil, structs.DefaultNamespace) + evalIter := evalPaginationIterator{iter} var evals []*structs.Evaluation - paginator, err := NewPaginator(iter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -98,8 +100,9 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.Evals(nil, false) + evalIter := evalPaginationIterator{iter} var evals []*structs.Evaluation - paginator, err := NewPaginator(iter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -134,8 +137,9 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.EvalsByNamespace(nil, structs.DefaultNamespace) + evalIter := evalPaginationIterator{iter} var evals []*structs.Evaluation - paginator, err := NewPaginator(iter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -171,8 +175,9 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.Evals(nil, false) + evalIter := evalPaginationIterator{iter} var evals []*structs.Evaluation - paginator, err := NewPaginator(iter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -230,3 +235,17 @@ func generateEval(i int, ns string) *structs.Evaluation { ModifyTime: now, } } + +type evalPaginationIterator struct { + iter memdb.ResultIterator +} + +func (it evalPaginationIterator) Next() (string, interface{}) { + raw := it.iter.Next() + if raw == nil { + return "", nil + } + + eval := raw.(*structs.Evaluation) + return eval.ID, eval +} diff --git a/nomad/state/paginator.go b/nomad/state/paginator.go index 02f7f6fa8..607ff8cde 100644 --- a/nomad/state/paginator.go +++ b/nomad/state/paginator.go @@ -9,9 +9,12 @@ import ( // Iterator is the interface that must be implemented to use the Paginator. type Iterator interface { - // Next returns the next element to be considered for pagination. + // Next returns the next element to be considered for pagination along with + // a token string used to uniquely identify elements in the iteration. // The page will end if nil is returned. - Next() interface{} + // Tokens should have a stable order and the order must match the paginator + // ascending property. + Next() (string, interface{}) } // Paginator is an iterator over a memdb.ResultIterator that returns @@ -22,6 +25,7 @@ type Paginator struct { itemCount int32 seekingToken string nextToken string + ascending bool nextTokenFound bool pageErr error @@ -50,6 +54,7 @@ func NewPaginator(iter Iterator, opts structs.QueryOptions, appendFunc func(inte iter: iter, perPage: opts.PerPage, seekingToken: opts.NextToken, + ascending: opts.Ascending, nextTokenFound: opts.NextToken == "", filterEvaluator: evaluator, appendFunc: appendFunc, @@ -79,16 +84,23 @@ DONE: } func (p *Paginator) next() (interface{}, paginatorState) { - raw := p.iter.Next() + token, raw := p.iter.Next() if raw == nil { p.nextToken = "" return nil, paginatorComplete } // have we found the token we're seeking (if any)? - id := raw.(IDGetter).GetID() - p.nextToken = id - if !p.nextTokenFound && id < p.seekingToken { + p.nextToken = token + + var passedToken bool + if p.ascending { + passedToken = token < p.seekingToken + } else { + passedToken = token > p.seekingToken + } + + if !p.nextTokenFound && passedToken { return nil, paginatorSkip } @@ -115,12 +127,6 @@ func (p *Paginator) next() (interface{}, paginatorState) { return raw, paginatorInclude } -// IDGetter must be implemented for the results of any iterator we -// want to paginate -type IDGetter interface { - GetID() string -} - type paginatorState int const ( diff --git a/nomad/state/paginator_test.go b/nomad/state/paginator_test.go index b0871ddd3..0d6f07fda 100644 --- a/nomad/state/paginator_test.go +++ b/nomad/state/paginator_test.go @@ -6,7 +6,6 @@ import ( "github.com/stretchr/testify/require" - memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/nomad/structs" ) @@ -63,7 +62,9 @@ func TestPaginator(t *testing.T) { paginator, err := NewPaginator(iter, structs.QueryOptions{ - PerPage: tc.perPage, NextToken: tc.nextToken, + PerPage: tc.perPage, + NextToken: tc.nextToken, + Ascending: true, }, func(raw interface{}) error { if tc.expectedError != "" { @@ -71,7 +72,7 @@ func TestPaginator(t *testing.T) { } result := raw.(*mockObject) - results = append(results, result.GetID()) + results = append(results, result.id) return nil }, ) @@ -96,32 +97,27 @@ func TestPaginator(t *testing.T) { // implements memdb.ResultIterator interface type testResultIterator struct { results chan interface{} - idx int } -func (i testResultIterator) Next() interface{} { +func (i testResultIterator) Next() (string, interface{}) { select { - case result := <-i.results: - return result - default: - return nil - } -} + case raw := <-i.results: + if raw == nil { + return "", nil + } -// not used, but required to implement memdb.ResultIterator -func (i testResultIterator) WatchCh() <-chan struct{} { - return make(<-chan struct{}) + m := raw.(*mockObject) + return m.id, m + default: + return "", nil + } } type mockObject struct { id string } -func (m *mockObject) GetID() string { - return m.id -} - -func newTestIterator(ids []string) memdb.ResultIterator { +func newTestIterator(ids []string) testResultIterator { iter := testResultIterator{results: make(chan interface{}, 20)} for _, id := range ids { iter.results <- &mockObject{id: id} diff --git a/nomad/state/schema.go b/nomad/state/schema.go index eb6805f04..5c62ae2fd 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -320,9 +320,16 @@ func deploymentSchema() *memdb.TableSchema { "create": { Name: "create", AllowMissing: false, - Unique: false, - Indexer: &memdb.UintFieldIndex{ - Field: "CreateIndex", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.UintFieldIndex{ + Field: "CreateIndex", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, }, }, @@ -346,7 +353,7 @@ func deploymentSchema() *memdb.TableSchema { "namespace_create": { Name: "namespace_create", AllowMissing: false, - Unique: false, + Unique: true, Indexer: &memdb.CompoundIndex{ AllowMissing: false, Indexes: []memdb.Indexer{ @@ -356,6 +363,9 @@ func deploymentSchema() *memdb.TableSchema { &memdb.UintFieldIndex{ Field: "CreateIndex", }, + &memdb.StringFieldIndex{ + Field: "ID", + }, }, }, }, @@ -438,9 +448,16 @@ func evalTableSchema() *memdb.TableSchema { "create": { Name: "create", AllowMissing: false, - Unique: false, - Indexer: &memdb.UintFieldIndex{ - Field: "CreateIndex", + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.UintFieldIndex{ + Field: "CreateIndex", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, }, }, @@ -486,7 +503,7 @@ func evalTableSchema() *memdb.TableSchema { "namespace_create": { Name: "namespace_create", AllowMissing: false, - Unique: false, + Unique: true, Indexer: &memdb.CompoundIndex{ AllowMissing: false, Indexes: []memdb.Indexer{ @@ -496,6 +513,9 @@ func evalTableSchema() *memdb.TableSchema { &memdb.UintFieldIndex{ Field: "CreateIndex", }, + &memdb.StringFieldIndex{ + Field: "ID", + }, }, }, }, diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 9a30381cf..739340105 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -10548,14 +10548,6 @@ type Evaluation struct { ModifyTime int64 } -// GetID implements the IDGetter interface, required for pagination -func (e *Evaluation) GetID() string { - if e == nil { - return "" - } - return e.ID -} - // TerminalStatus returns if the current status is terminal and // will no longer transition. func (e *Evaluation) TerminalStatus() bool { From 2f14ceef05ddfbbfc4a67b25daafbcd408ab8389 Mon Sep 17 00:00:00 2001 From: Ashlee M Boyer <43934258+ashleemboyer@users.noreply.github.com> Date: Tue, 1 Mar 2022 17:24:12 -0500 Subject: [PATCH 22/89] docs: Fixing path for autoscaling/agent/source nav item (#12166) --- website/content/{docs => tools}/autoscaling/agent/source.mdx | 0 website/data/docs-nav-data.json | 2 +- website/data/tools-nav-data.json | 4 ++++ 3 files changed, 5 insertions(+), 1 deletion(-) rename website/content/{docs => tools}/autoscaling/agent/source.mdx (100%) diff --git a/website/content/docs/autoscaling/agent/source.mdx b/website/content/tools/autoscaling/agent/source.mdx similarity index 100% rename from website/content/docs/autoscaling/agent/source.mdx rename to website/content/tools/autoscaling/agent/source.mdx diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 7a6b09ea8..f6b3200d4 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -1575,7 +1575,7 @@ }, { "title": "source", - "path": "autoscaling/agent/source" + "href": "/tools/autoscaling/agent/source" }, { "title": "strategy", diff --git a/website/data/tools-nav-data.json b/website/data/tools-nav-data.json index 34ad0ecce..cc589f40e 100644 --- a/website/data/tools-nav-data.json +++ b/website/data/tools-nav-data.json @@ -37,6 +37,10 @@ "title": "policy_eval", "path": "autoscaling/agent/policy_eval" }, + { + "title": "source", + "path": "autoscaling/agent/source" + }, { "title": "strategy", "path": "autoscaling/agent/strategy" From ed95316bdf3e818543b79340e8466f03298774ea Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 1 Mar 2022 16:43:53 -0800 Subject: [PATCH 23/89] docs: add op api options --- .../content/docs/commands/operator/api.mdx | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/website/content/docs/commands/operator/api.mdx b/website/content/docs/commands/operator/api.mdx index 6155b3115..0449894b1 100644 --- a/website/content/docs/commands/operator/api.mdx +++ b/website/content/docs/commands/operator/api.mdx @@ -43,11 +43,29 @@ curl \ https://client.global.nomad:4646/v1/jobs ``` -The `-dryrun` flag for `nomad operator api` will output a curl command instead -of performing the HTTP request immediately. Note that you do *not* need the 3rd -party `curl` command installed to use `operator api`. The `curl` output from -`-dryrun` is intended for use in scripts or running in locations without a -Nomad binary present. +## General Options + +@include 'general_options.mdx' + +## Operator API Options + +- `-dryrun`: output a curl command instead of performing the HTTP request + immediately. Note that you do *not* need the 3rd party `curl` command + installed to use `operator api`. The `curl` output from `-dryrun` is intended + for use in scripts or running in locations without a Nomad binary present. + +- `-filter`: Specifies an expression used to filter query results. + +- `-H`: Adds an additional HTTP header to the request. May be specified more + than once. These headers take precedence over automatically set ones such as + X-Nomad-Token. + +- `-verbose`: Output extra information to stderr similar to curl's --verbose + flag. + +- `-X`: HTTP method of request. If there is data piped to stdin, then the + method defaults to POST. Otherwise the method defaults to GET. + [curl]: https://curl.se/ [envvars]: /docs/commands#environment-variables From c23891b976c6c6ac1e9dfb10a7b73b295a751696 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 1 Mar 2022 16:44:15 -0800 Subject: [PATCH 24/89] cli: fix op api method handling --- command/operator_api.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/command/operator_api.go b/command/operator_api.go index b21c34c08..81f818362 100644 --- a/command/operator_api.go +++ b/command/operator_api.go @@ -71,7 +71,8 @@ Operator API Specific Options: Output extra information to stderr similar to curl's --verbose flag. -X - HTTP method of request. Defaults to GET. + HTTP method of request. If there is data piped to stdin, then the method + defaults to POST. Otherwise the method defaults to GET. ` return strings.TrimSpace(helpText) @@ -106,7 +107,7 @@ func (c *OperatorAPICommand) Run(args []string) int { flags.BoolVar(&dryrun, "dryrun", false, "") flags.StringVar(&filter, "filter", "", "") flags.BoolVar(&c.verboseFlag, "verbose", false, "") - flags.StringVar(&c.method, "X", "GET", "") + flags.StringVar(&c.method, "X", "", "") flags.Var(headerFlags, "H", "") if err := flags.Parse(args); err != nil { @@ -145,6 +146,8 @@ func (c *OperatorAPICommand) Run(args []string) int { if c.method == "" { c.method = "POST" } + } else if c.method == "" { + c.method = "GET" } config := c.clientConfig() From a1000ee5b8e680643caae103cb1dd0ac40029f93 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 1 Mar 2022 17:12:58 -0800 Subject: [PATCH 25/89] docs: add op api examples --- .../content/docs/commands/operator/api.mdx | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/website/content/docs/commands/operator/api.mdx b/website/content/docs/commands/operator/api.mdx index 0449894b1..b05c6dc54 100644 --- a/website/content/docs/commands/operator/api.mdx +++ b/website/content/docs/commands/operator/api.mdx @@ -66,6 +66,29 @@ curl \ - `-X`: HTTP method of request. If there is data piped to stdin, then the method defaults to POST. Otherwise the method defaults to GET. +## Examples + +```shell-session +$ nomad operator api -verbose /v1/agent/members?pretty +> GET http://127.0.0.1:4646/v1/agent/members?pretty= +* Sending request and receiving response... +< HTTP/1.1 200 OK +< Date: Wed, 02 Mar 2022 01:10:59 GMT +< Content-Type: application/json +< Vary: Accept-Encoding +{ + "Members": [ +... + + +$ nomad operator api -region eu-west -filter '.Status == "completed"' -dryrun /v1/evaluations +curl \ + -X GET \ + http://127.0.0.1:4646/v1/evaluations?filter=.Status+%3D%3D+%22completed%22®ion=eu-west +``` + + + [curl]: https://curl.se/ [envvars]: /docs/commands#environment-variables From 3020b4e8513f0dc3ae8d64856fac0e221052e79f Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Tue, 1 Mar 2022 17:15:26 -0800 Subject: [PATCH 26/89] docs: add op api examples --- website/content/docs/commands/operator/api.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/docs/commands/operator/api.mdx b/website/content/docs/commands/operator/api.mdx index b05c6dc54..913831a86 100644 --- a/website/content/docs/commands/operator/api.mdx +++ b/website/content/docs/commands/operator/api.mdx @@ -81,7 +81,7 @@ $ nomad operator api -verbose /v1/agent/members?pretty ... -$ nomad operator api -region eu-west -filter '.Status == "completed"' -dryrun /v1/evaluations +$ nomad operator api -region eu-west -filter 'Status == "completed"' -dryrun /v1/evaluations curl \ -X GET \ http://127.0.0.1:4646/v1/evaluations?filter=.Status+%3D%3D+%22completed%22®ion=eu-west From c3a4abc1ac8556d12012fd2fc04776624c7dc7a6 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Wed, 2 Mar 2022 20:30:27 -0500 Subject: [PATCH 27/89] ci: disable Go test semgrep rules (#12175) --- .semgrep/go_tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.semgrep/go_tests.yml b/.semgrep/go_tests.yml index 20f2797c4..e1f085bb6 100644 --- a/.semgrep/go_tests.yml +++ b/.semgrep/go_tests.yml @@ -13,6 +13,7 @@ rules: # TODO(luiz): figure out how to do a 'delete line' fix. fix: " " paths: + exclude: ["*"] include: - "*_test.go" @@ -43,6 +44,7 @@ rules: severity: "WARNING" fix: "assert.$FUNC($T, $...ARGS)" paths: + exclude: ["*"] include: - "*_test.go" - id: "tests-no-assert-without-t-nested" @@ -64,6 +66,7 @@ rules: severity: "WARNING" fix: "assert.$FUNC($T, $...ARGS)" paths: + exclude: ["*"] include: - "*_test.go" - id: "tests-no-require-without-t" @@ -91,6 +94,7 @@ rules: severity: "WARNING" fix: "require.$FUNC($T, $...ARGS)" paths: + exclude: ["*"] include: - "*_test.go" - id: "tests-no-require-without-t-nested" @@ -112,5 +116,6 @@ rules: severity: "WARNING" fix: "require.$FUNC($T, $...ARGS)" paths: + exclude: ["*"] include: - "*_test.go" From cd928d2cdc8b04e92859f6c1d80ab4436fd8034a Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 3 Mar 2022 15:15:28 -0500 Subject: [PATCH 28/89] csi: add missing fields to HTTP API response (#12178) The HTTP endpoint for CSI manually serializes the internal struct to the API struct for purposes of redaction (see also #10470). Add fields that were missing from this serialization so they don't show up as always empty in the API response. --- .changelog/12178.txt | 3 +++ command/agent/csi_endpoint.go | 28 ++++++++++++++++++++++++---- 2 files changed, 27 insertions(+), 4 deletions(-) create mode 100644 .changelog/12178.txt diff --git a/.changelog/12178.txt b/.changelog/12178.txt new file mode 100644 index 000000000..0261e5237 --- /dev/null +++ b/.changelog/12178.txt @@ -0,0 +1,3 @@ +```release-note:bug +csi: Fixed a bug where fields were missing from the Read Volume API response +``` diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index 4e0a8ea08..a8e6d599f 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -475,10 +475,11 @@ func structsCSIVolumeToApi(vol *structs.CSIVolume) *api.CSIVolume { allocCount := len(vol.ReadAllocs) + len(vol.WriteAllocs) out := &api.CSIVolume{ - ID: vol.ID, - Name: vol.Name, - ExternalID: vol.ExternalID, - Namespace: vol.Namespace, + ID: vol.ID, + Name: vol.Name, + ExternalID: vol.ExternalID, + Namespace: vol.Namespace, + Topologies: structsCSITopolgiesToApi(vol.Topologies), AccessMode: structsCSIAccessModeToApi(vol.AccessMode), AttachmentMode: structsCSIAttachmentModeToApi(vol.AttachmentMode), @@ -486,6 +487,13 @@ func structsCSIVolumeToApi(vol *structs.CSIVolume) *api.CSIVolume { Secrets: structsCSISecretsToApi(vol.Secrets), Parameters: vol.Parameters, Context: vol.Context, + Capacity: vol.Capacity, + + RequestedCapacityMin: vol.RequestedCapacityMin, + RequestedCapacityMax: vol.RequestedCapacityMax, + RequestedCapabilities: structsCSICapabilityToApi(vol.RequestedCapabilities), + CloneID: vol.CloneID, + SnapshotID: vol.SnapshotID, // Allocations is the collapsed list of both read and write allocs Allocations: make([]*api.AllocationListStub, 0, allocCount), @@ -772,6 +780,18 @@ func structsCSIAttachmentModeToApi(mode structs.CSIVolumeAttachmentMode) api.CSI return api.CSIVolumeAttachmentModeUnknown } +// structsCSICapabilityToApi converts capabilities, part of structsCSIVolumeToApi +func structsCSICapabilityToApi(caps []*structs.CSIVolumeCapability) []*api.CSIVolumeCapability { + out := make([]*api.CSIVolumeCapability, len(caps)) + for i, cap := range caps { + out[i] = &api.CSIVolumeCapability{ + AccessMode: api.CSIVolumeAccessMode(cap.AccessMode), + AttachmentMode: api.CSIVolumeAttachmentMode(cap.AttachmentMode), + } + } + return out +} + // structsCSIMountOptionsToApi converts mount options, part of structsCSIVolumeToApi func structsCSIMountOptionsToApi(opts *structs.CSIMountOptions) *api.CSIMountOptions { if opts == nil { From c27d9b1b67da3b3e7916a0079f1e4774cd4149b1 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 3 Mar 2022 15:17:29 -0500 Subject: [PATCH 29/89] e2e: use `operator api` for Networking suite validation (#12180) With mTLS enabled, using `curl` in a bash script for validation involves having to configure arguments to `curl` based on whether or not the test infrastructure is using mTLS, whether ACLs are enabled, etc. Use the new `operator api` command instead to pick up the client configuration from the test environment automatically. --- e2e/networking/inputs/validate.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/networking/inputs/validate.sh b/e2e/networking/inputs/validate.sh index e8022c1fb..6844bf002 100755 --- a/e2e/networking/inputs/validate.sh +++ b/e2e/networking/inputs/validate.sh @@ -1,3 +1,3 @@ #!/bin/bash -test $(curl --silent ${NOMAD_ADDR}/v1/allocation/${NOMAD_ALLOC_ID} | jq '.NetworkStatus.Address | length') -ne 0 \ No newline at end of file +nomad operator api "/v1/allocation/${NOMAD_ALLOC_ID}" | jq '.NetworkStatus.Address | length' From 0292fd402d7430685c5a75a99aa657499ade95b8 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 3 Mar 2022 17:00:00 -0500 Subject: [PATCH 30/89] e2e: use UUID for CSI idempotency token (#12183) The AWS EBS plugin appears to use the name field of the volume as an idempotency token that persists across the entire AWS account, not just the plugin lifespan. Also fix the regex for the volume ID, which was originally taken from the job ID regex but isn't actually the same. This hasn't failed tests for us because we've always passed in the same volume ID. --- e2e/csi/csi.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/e2e/csi/csi.go b/e2e/csi/csi.go index e570f7383..dc2b36473 100644 --- a/e2e/csi/csi.go +++ b/e2e/csi/csi.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/nomad/api" e2e "github.com/hashicorp/nomad/e2e/e2eutil" "github.com/hashicorp/nomad/e2e/framework" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/testutil" ) @@ -226,13 +227,15 @@ func volumeRegister(volID, volFilePath, createOrRegister string) error { } // hack off the first line to replace with our unique ID - var idRegex = regexp.MustCompile(`(?m)^id ".*"`) + var idRegex = regexp.MustCompile(`(?m)^id[\s]+= ".*"`) volspec := idRegex.ReplaceAllString(string(content), fmt.Sprintf("id = %q", volID)) - var nameRegex = regexp.MustCompile(`(?m)^name ".*"`) + // the EBS plugin uses the name as an idempotency token across the + // whole AWS account, so it has to be globally unique + var nameRegex = regexp.MustCompile(`(?m)^name[\s]+= ".*"`) volspec = nameRegex.ReplaceAllString(volspec, - fmt.Sprintf("name = %q", volID)) + fmt.Sprintf("name = %q", uuid.Generate())) go func() { defer stdin.Close() From ad99a4501266cd6d3291d0d1798f0d20d3c66da9 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Thu, 3 Mar 2022 17:27:04 -0500 Subject: [PATCH 31/89] Fix CSI volume list with prefix and `*` namespace (#12184) When using a prefix value and the * wildcard for namespace, the endpoint would not take the prefix value into consideration due to the order in which the checks were executed but also the logic for retrieving volumes from the state store. This commit changes the order to check for a prefix first and wraps the result iterator of the state store query in a filter to apply the prefix. --- .changelog/12184.txt | 3 +++ nomad/csi_endpoint.go | 8 +++++--- nomad/csi_endpoint_test.go | 16 ++++++++++++++++ nomad/state/state_store.go | 31 ++++++++++++++++++++++++++++++- 4 files changed, 54 insertions(+), 4 deletions(-) create mode 100644 .changelog/12184.txt diff --git a/.changelog/12184.txt b/.changelog/12184.txt new file mode 100644 index 000000000..e885a9ad5 --- /dev/null +++ b/.changelog/12184.txt @@ -0,0 +1,3 @@ +```release-note:bug +api: Apply prefix filter when querying CSI volumes in all namespaces +``` diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 092fc16ff..66083452f 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -129,10 +129,12 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV iter, err = snap.CSIVolumesByNodeID(ws, prefix, args.NodeID) } else if args.PluginID != "" { iter, err = snap.CSIVolumesByPluginID(ws, ns, prefix, args.PluginID) - } else if ns == structs.AllNamespacesSentinel { - iter, err = snap.CSIVolumes(ws) - } else { + } else if prefix != "" { + iter, err = snap.CSIVolumesByIDPrefix(ws, ns, prefix) + } else if ns != structs.AllNamespacesSentinel { iter, err = snap.CSIVolumesByNamespace(ws, ns, prefix) + } else { + iter, err = snap.CSIVolumes(ws) } if err != nil { diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index e4235ee21..4b8275064 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -736,6 +736,22 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(1001), resp.Index) require.Len(t, resp.Volumes, len(vols)) + + // Lookup volumes in all namespaces with prefix + get = &structs.CSIVolumeListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Prefix: id0[:4], + Namespace: "*", + }, + } + var resp2 structs.CSIVolumeListResponse + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", get, &resp2) + require.NoError(t, err) + require.Equal(t, uint64(1001), resp.Index) + require.Len(t, resp2.Volumes, 1) + require.Equal(t, vols[0].ID, resp2.Volumes[0].ID) + require.Equal(t, structs.DefaultNamespace, resp2.Volumes[0].Namespace) } func TestCSIVolumeEndpoint_Create(t *testing.T) { diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index b8215b4ba..4cc1902aa 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -2265,8 +2265,13 @@ func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, namespace, prefix, } // CSIVolumesByIDPrefix supports search. Caller should snapshot if it wants to -// also denormalize the plugins. +// also denormalize the plugins. If using a prefix with the wildcard namespace, +// the results will not use the index prefix. func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID string) (memdb.ResultIterator, error) { + if namespace == structs.AllNamespacesSentinel { + return s.csiVolumeByIDPrefixAllNamespaces(ws, volumeID) + } + txn := s.db.ReadTxn() iter, err := txn.Get("csi_volumes", "id_prefix", namespace, volumeID) @@ -2279,6 +2284,30 @@ func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID return iter, nil } +func (s *StateStore) csiVolumeByIDPrefixAllNamespaces(ws memdb.WatchSet, prefix string) (memdb.ResultIterator, error) { + txn := s.db.ReadTxn() + + // Walk the entire csi_volumes table + iter, err := txn.Get("csi_volumes", "id") + + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + + // Filter the iterator by ID prefix + f := func(raw interface{}) bool { + v, ok := raw.(*structs.CSIVolume) + if !ok { + return false + } + return !strings.HasPrefix(v.ID, prefix) + } + wrap := memdb.NewFilterIterator(iter, f) + return wrap, nil +} + // CSIVolumesByNodeID looks up CSIVolumes in use on a node. Caller should // snapshot if it wants to also denormalize the plugins. func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, prefix, nodeID string) (memdb.ResultIterator, error) { From 180bc01d817ea768ba9865b4fcde3256d78f7c8e Mon Sep 17 00:00:00 2001 From: James Rasell Date: Fri, 4 Mar 2022 12:06:25 +0100 Subject: [PATCH 32/89] docs: add note regarding HCLv2 func and interpolation. --- .../content/docs/job-specification/hcl2/functions/index.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/content/docs/job-specification/hcl2/functions/index.mdx b/website/content/docs/job-specification/hcl2/functions/index.mdx index 32660de8c..df6f9a529 100644 --- a/website/content/docs/job-specification/hcl2/functions/index.mdx +++ b/website/content/docs/job-specification/hcl2/functions/index.mdx @@ -17,6 +17,9 @@ arguments in parentheses: max(5, 12, 9) ``` +HCL functions are executed on job submission and cannot be combined with client +side interpolation. + The HCL language does not support user-defined functions, and so only the functions built in to the language are available for use. The navigation for this section includes a list of all of the available built-in functions. From 0509b553b3cfc91a8b3f6f95d41b9c4efdb6606d Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 4 Mar 2022 08:49:04 -0500 Subject: [PATCH 33/89] csi: ensure `WriteOptions` aren't nil when handling secrets (#12182) When we set the headers for CSI secrets in the `WriteOptions`, it turns out that we're not always passing a non-nil object. In that case, instanstiate it on demand in the API. --- api/csi.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/api/csi.go b/api/csi.go index 1950e4ba7..25cddc50b 100644 --- a/api/csi.go +++ b/api/csi.go @@ -130,6 +130,9 @@ func (v *CSIVolumes) DeleteSnapshot(snap *CSISnapshot, w *WriteOptions) error { qp := url.Values{} qp.Set("snapshot_id", snap.ID) qp.Set("plugin_id", snap.PluginID) + if w == nil { + w = &WriteOptions{} + } w.SetHeadersFromCSISecrets(snap.Secrets) _, err := v.client.delete("/v1/volumes/snapshot?"+qp.Encode(), nil, w) return err From 31b7de78fd1852ab9c1676b294db03d62387add9 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 4 Mar 2022 08:55:06 -0500 Subject: [PATCH 34/89] e2e: configure prometheus for mTLS for `Metrics` suite (#12181) The `Metrics` suite uses prometheus to scrape Nomad metrics so that we're testing the full user experience of extracting metrics from Nomad. With the addition of mTLS, we need to make sure prometheus also has mTLS configuration because the metrics endpoint is protected. Update the Nomad client configuration and prometheus job to bind-mount the client's certs into the task so that the job can use these certs to scrape the server. This is a temporary solution that gets the job passing; we should give the job its own certificates (issued by Vault?) when we've done some of the infrastructure rework we'd like. --- e2e/metrics/input/prometheus.nomad | 18 +++++++++++++++ .../dev-cluster/nomad/client-linux/client.hcl | 22 ++++++++++++++----- .../nomad/client-linux/indexed/client-0.hcl | 21 +++++++++++++----- .../nomad/client-linux/indexed/client-1.hcl | 21 +++++++++++++----- .../nomad/client-linux/indexed/client-2.hcl | 21 +++++++++++++----- .../nomad/client-linux/indexed/client-3.hcl | 21 +++++++++++++----- 6 files changed, 98 insertions(+), 26 deletions(-) diff --git a/e2e/metrics/input/prometheus.nomad b/e2e/metrics/input/prometheus.nomad index 294db46b1..f191490bf 100644 --- a/e2e/metrics/input/prometheus.nomad +++ b/e2e/metrics/input/prometheus.nomad @@ -51,11 +51,18 @@ scrape_configs: regex: '(.*)http(.*)' action: keep + scheme: https + tls_config: + ca_file: '/etc/nomad.d/tls/ca.crt' + cert_file: '/etc/nomad.d/tls/agent.crt' + key_file: '/etc/nomad.d/tls/agent.key' + scrape_interval: 5s metrics_path: /v1/metrics params: format: ['prometheus'] EOH + } driver = "docker" @@ -67,6 +74,17 @@ EOH "local/prometheus.yml:/etc/prometheus/prometheus.yml", ] + # TODO: https://github.com/hashicorp/nomad/issues/11484 + # This is very much not how we should do this, because it + # exposes the client's mTLS cert to the task and lets the + # prometheus masquerade as the client. + mount { + type = "bind" + target = "/etc/nomad.d/tls" + source = "/etc/nomad.d/tls" + readonly = true + } + ports = ["prometheus_ui"] } diff --git a/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl b/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl index 56a5c2615..cb3c09ae5 100644 --- a/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl +++ b/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl @@ -6,12 +6,6 @@ client { options { # Allow jobs to run as root "user.denylist" = "" - - # Allow rawexec jobs - "driver.raw_exec.enable" = "1" - - # Allow privileged docker jobs - "docker.privileged.enabled" = "true" } host_volume "shared_data" { @@ -35,6 +29,22 @@ plugin "nomad-driver-ecs" { } } +plugin "raw_exec" { + config { + enabled = true + } +} + +plugin "docker" { + config { + allow_privileged = true + + volumes { + enabled = true + } + } +} + vault { enabled = true address = "http://active.vault.service.consul:8200" diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl index f4ebb14e0..cbc2b036e 100644 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl +++ b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl @@ -2,11 +2,6 @@ client { enabled = true - options { - "driver.raw_exec.enable" = "1" - "docker.privileged.enabled" = "true" - } - meta { "rack" = "r1" } @@ -33,6 +28,22 @@ plugin "nomad-driver-ecs" { } } +plugin "raw_exec" { + config { + enabled = true + } +} + +plugin "docker" { + config { + allow_privileged = true + + volumes { + enabled = true + } + } +} + vault { enabled = true address = "http://active.vault.service.consul:8200" diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl index 2b425d01c..c36957389 100644 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl +++ b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl @@ -1,11 +1,6 @@ client { enabled = true - options { - "driver.raw_exec.enable" = "1" - "docker.privileged.enabled" = "true" - } - meta { "rack" = "r2" } @@ -28,6 +23,22 @@ plugin "nomad-driver-ecs" { } } +plugin "raw_exec" { + config { + enabled = true + } +} + +plugin "docker" { + config { + allow_privileged = true + + volumes { + enabled = true + } + } +} + vault { enabled = true address = "http://active.vault.service.consul:8200" diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl index 38abac096..8fc1d63fd 100644 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl +++ b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl @@ -3,11 +3,6 @@ datacenter = "dc2" client { enabled = true - options { - "driver.raw_exec.enable" = "1" - "docker.privileged.enabled" = "true" - } - meta { "rack" = "r1" } @@ -22,6 +17,22 @@ plugin "nomad-driver-podman" { } } +plugin "raw_exec" { + config { + enabled = true + } +} + +plugin "docker" { + config { + allow_privileged = true + + volumes { + enabled = true + } + } +} + vault { enabled = true address = "http://active.vault.service.consul:8200" diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl index 627265b39..8135c486d 100644 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl +++ b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl @@ -3,16 +3,27 @@ datacenter = "dc2" client { enabled = true - options { - "driver.raw_exec.enable" = "1" - "docker.privileged.enabled" = "true" - } - meta { "rack" = "r2" } } +plugin "raw_exec" { + config { + enabled = true + } +} + +plugin "docker" { + config { + allow_privileged = true + + volumes { + enabled = true + } + } +} + vault { enabled = true address = "http://active.vault.service.consul:8200" From a69bb6bd3bb84576cc862deb239db142e6240c56 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 4 Mar 2022 08:55:22 -0500 Subject: [PATCH 35/89] e2e: `StopJob` should tolerate progress deadline expired (#12179) The `TestRescheduleProgressDeadlineFail` E2E test failed during test cleanup because the error message "progress deadline expired" that it emits when we stop the job does not match the one expected from monitoring the `job stop` command. Update the `StopJob` helper to tolerate this use case as well. --- e2e/e2eutil/job.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/e2e/e2eutil/job.go b/e2e/e2eutil/job.go index cb67ca6d5..a11419ddc 100644 --- a/e2e/e2eutil/job.go +++ b/e2e/e2eutil/job.go @@ -211,7 +211,8 @@ func StopJob(jobID string, args ...string) error { // expect that the monitor fails and exits with status code one because // technically the deployment has failed. Overwrite the error to be // nil. - if strings.Contains(err.Error(), "Description = Cancelled because job is stopped") { + if strings.Contains(err.Error(), "Description = Cancelled because job is stopped") || + strings.Contains(err.Error(), "Description = Failed due to progress deadline") { err = nil } } From 67a6ba5e02fe7e36a4116ebcc1646667ff3b9bd2 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 4 Mar 2022 08:55:36 -0500 Subject: [PATCH 36/89] e2e: use context for executing external commands (#12185) If any E2E test hangs, it'll eventually timeout and panic, causing the all the remaining tests to fail. External commands should use a short context whenever possible so we can fail the test quickly and move on to the next test. --- e2e/csi/csi.go | 12 +++++++++++- e2e/e2eutil/cli.go | 6 +++++- e2e/e2eutil/job.go | 27 ++++++++++++++++++++------- e2e/vaultsecrets/vaultsecrets.go | 5 ++++- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/e2e/csi/csi.go b/e2e/csi/csi.go index dc2b36473..3fdc15c49 100644 --- a/e2e/csi/csi.go +++ b/e2e/csi/csi.go @@ -215,7 +215,17 @@ func waitForPluginStatusCompare(pluginID string, compare func(got string) (bool, // cleanup. func volumeRegister(volID, volFilePath, createOrRegister string) error { - cmd := exec.Command("nomad", "volume", createOrRegister, "-") + // a CSI RPC to create a volume can take a long time because we + // have to wait on the AWS API to provision a disk, but a register + // should not because it only has to check the API for compatibility + timeout := time.Second * 30 + if createOrRegister == "create" { + timeout = time.Minute * 2 + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, "nomad", "volume", createOrRegister, "-") stdin, err := cmd.StdinPipe() if err != nil { return fmt.Errorf("could not open stdin?: %w", err) diff --git a/e2e/e2eutil/cli.go b/e2e/e2eutil/cli.go index 9d5293fd9..46694663b 100644 --- a/e2e/e2eutil/cli.go +++ b/e2e/e2eutil/cli.go @@ -1,16 +1,20 @@ package e2eutil import ( + "context" "fmt" "os/exec" "regexp" "strings" + "time" ) // Command sends a command line argument to Nomad and returns the unbuffered // stdout as a string (or, if there's an error, the stderr) func Command(cmd string, args ...string) (string, error) { - bytes, err := exec.Command(cmd, args...).CombinedOutput() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + bytes, err := exec.CommandContext(ctx, cmd, args...).CombinedOutput() out := string(bytes) if err != nil { return out, fmt.Errorf("command %v %v failed: %v\nOutput: %v", cmd, args, err, out) diff --git a/e2e/e2eutil/job.go b/e2e/e2eutil/job.go index a11419ddc..7b7f14a57 100644 --- a/e2e/e2eutil/job.go +++ b/e2e/e2eutil/job.go @@ -1,18 +1,22 @@ package e2eutil import ( + "context" "fmt" "io" "io/ioutil" "os/exec" "regexp" "strings" + "time" ) // Register registers a jobspec from a file but with a unique ID. // The caller is responsible for recording that ID for later cleanup. func Register(jobID, jobFilePath string) error { - return register(jobID, jobFilePath, exec.Command("nomad", "job", "run", "-detach", "-")) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + return register(jobID, jobFilePath, exec.CommandContext(ctx, "nomad", "job", "run", "-detach", "-")) } // RegisterWithArgs registers a jobspec from a file but with a unique ID. The @@ -25,8 +29,10 @@ func RegisterWithArgs(jobID, jobFilePath string, args ...string) error { baseArgs = append(baseArgs, args[i]) } baseArgs = append(baseArgs, "-") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() - return register(jobID, jobFilePath, exec.Command("nomad", baseArgs...)) + return register(jobID, jobFilePath, exec.CommandContext(ctx, "nomad", baseArgs...)) } func register(jobID, jobFilePath string, cmd *exec.Cmd) error { @@ -60,7 +66,9 @@ func register(jobID, jobFilePath string, cmd *exec.Cmd) error { // PeriodicForce forces a periodic job to dispatch func PeriodicForce(jobID string) error { // nomad job periodic force - cmd := exec.Command("nomad", "job", "periodic", "force", jobID) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + cmd := exec.CommandContext(ctx, "nomad", "job", "periodic", "force", jobID) out, err := cmd.CombinedOutput() if err != nil { @@ -82,7 +90,9 @@ func Dispatch(jobID string, meta map[string]string, payload string) error { args = append(args, "-") } - cmd := exec.Command("nomad", args...) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + cmd := exec.CommandContext(ctx, "nomad", args...) cmd.Stdin = strings.NewReader(payload) out, err := cmd.CombinedOutput() @@ -96,7 +106,9 @@ func Dispatch(jobID string, meta map[string]string, payload string) error { // JobInspectTemplate runs nomad job inspect and formats the output // using the specified go template func JobInspectTemplate(jobID, template string) (string, error) { - cmd := exec.Command("nomad", "job", "inspect", "-t", template, jobID) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + cmd := exec.CommandContext(ctx, "nomad", "job", "inspect", "-t", template, jobID) out, err := cmd.CombinedOutput() if err != nil { return "", fmt.Errorf("could not inspect job: %w\n%v", err, string(out)) @@ -109,8 +121,9 @@ func JobInspectTemplate(jobID, template string) (string, error) { // RegisterFromJobspec registers a jobspec from a string, also with a unique // ID. The caller is responsible for recording that ID for later cleanup. func RegisterFromJobspec(jobID, jobspec string) error { - - cmd := exec.Command("nomad", "job", "run", "-detach", "-") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + cmd := exec.CommandContext(ctx, "nomad", "job", "run", "-detach", "-") stdin, err := cmd.StdinPipe() if err != nil { return fmt.Errorf("could not open stdin?: %w", err) diff --git a/e2e/vaultsecrets/vaultsecrets.go b/e2e/vaultsecrets/vaultsecrets.go index 7f53a3792..8a13611d2 100644 --- a/e2e/vaultsecrets/vaultsecrets.go +++ b/e2e/vaultsecrets/vaultsecrets.go @@ -1,6 +1,7 @@ package vaultsecrets import ( + "context" "fmt" "io" "io/ioutil" @@ -226,7 +227,9 @@ func writePolicy(policyID, policyPath, testID string) (string, error) { policyDoc := string(raw) policyDoc = strings.ReplaceAll(policyDoc, "TESTID", testID) - cmd := exec.Command("vault", "policy", "write", policyID, "-") + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + cmd := exec.CommandContext(ctx, "vault", "policy", "write", policyID, "-") stdin, err := cmd.StdinPipe() if err != nil { return "", err From 432293f72f6caf68c5b62b51a76a08363ce72c4b Mon Sep 17 00:00:00 2001 From: Mike Nomitch Date: Fri, 4 Mar 2022 09:53:52 -0600 Subject: [PATCH 37/89] Add openapi and caravan to tools page --- website/content/tools/index.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/content/tools/index.mdx b/website/content/tools/index.mdx index 68e2b079e..0f77dba53 100644 --- a/website/content/tools/index.mdx +++ b/website/content/tools/index.mdx @@ -18,11 +18,13 @@ The following external tools are currently available for Nomad and maintained by - [Damon](https://github.com/hashicorp/damon) - An terminal dashboard for Nomad. - [Levant](https://github.com/hashicorp/levant) - A templating and deployment tool for HashiCorp Nomad jobs that provides realtime feedback and detailed failure messages upon deployment issues. - [Nomad Pack](https://github.com/hashicorp/nomad-pack) - An official package manager and templating tool for Nomad, currently a Tech Preview. +- [OpenAPI](https://github.com/hashicorp/nomad-openapi) - An OpenAPI/Swagger spec for Nomad, allowing for programatic generation of SDKs and documentation. ## Community Tools The following external tools are currently available for Nomad and maintained by members of the Nomad Community: +- [Caravan](https://caravanproject.io/) - Caravan is a tool to deploy and configure Nomad, Consul and Vault to AWS, Azure, or GCP, all with a single script. - [Chaotic](https://github.com/ngine-io/chaotic) - A Chaos Engineering tool to stop allocations, reboot or stop/start virtual machines in your cloud environment - [Deadman Check](https://github.com/sepulworld/deadman-check) - A monitoring companion for Nomad periodic jobs that alerts if periodic isn't running at the expected interval - [Hashi Up](https://github.com/jsiebens/hashi-up) - A utility to install Nomad on remote Linux hosts From dbc646d7c0741c446de90038941343d56bd54ff2 Mon Sep 17 00:00:00 2001 From: Mike Nomitch Date: Fri, 4 Mar 2022 12:54:08 -0800 Subject: [PATCH 38/89] Updated OpenAPI info on tools page Co-authored-by: Derek Strickland <1111455+DerekStrickland@users.noreply.github.com> --- website/content/tools/index.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/content/tools/index.mdx b/website/content/tools/index.mdx index 0f77dba53..cc04e64e6 100644 --- a/website/content/tools/index.mdx +++ b/website/content/tools/index.mdx @@ -18,7 +18,7 @@ The following external tools are currently available for Nomad and maintained by - [Damon](https://github.com/hashicorp/damon) - An terminal dashboard for Nomad. - [Levant](https://github.com/hashicorp/levant) - A templating and deployment tool for HashiCorp Nomad jobs that provides realtime feedback and detailed failure messages upon deployment issues. - [Nomad Pack](https://github.com/hashicorp/nomad-pack) - An official package manager and templating tool for Nomad, currently a Tech Preview. -- [OpenAPI](https://github.com/hashicorp/nomad-openapi) - An OpenAPI/Swagger spec for Nomad, allowing for programatic generation of SDKs and documentation. +- [OpenAPI](https://github.com/hashicorp/nomad-openapi) - An OpenAPI/Swagger spec for Nomad, allowing for programmatic generation of SDKs and documentation. Includes a reference implementation in Go built on top of a generated client. ## Community Tools From 9ed4d962fd0de4154b1890eccb77c3a541da2f19 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 4 Mar 2022 16:44:09 -0500 Subject: [PATCH 39/89] csi: fix prefix queries for plugin list RPC (#12194) The `CSIPlugin.List` RPC was intended to accept a prefix to filter the list of plugins being listed. This was being accidentally being done in the state store instead, which contributed to incorrect filtering behavior for plugins in the `volume plugin status` command. Move the prefix matching into the RPC so that it calls the prefix-matching method in the state store if we're looking for a prefix. Update the `plugin status command` to accept a prefix for the plugin ID argument so that it matches the expected behavior of other commands. --- .changelog/12194.txt | 3 +++ command/plugin_status_csi.go | 23 +++++++++++++++++++++++ nomad/csi_endpoint.go | 18 ++++++++++++++---- nomad/csi_endpoint_test.go | 3 ++- nomad/state/state_store.go | 2 +- 5 files changed, 43 insertions(+), 6 deletions(-) create mode 100644 .changelog/12194.txt diff --git a/.changelog/12194.txt b/.changelog/12194.txt new file mode 100644 index 000000000..330a5d8cc --- /dev/null +++ b/.changelog/12194.txt @@ -0,0 +1,3 @@ +```release-note:bug +csi: Fixed a bug where `plugin status` commands could choose the incorrect plugin if a plugin with a name that matched the same prefix existed. +``` diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go index b3a6ac36b..cdf8e03c5 100644 --- a/command/plugin_status_csi.go +++ b/command/plugin_status_csi.go @@ -37,6 +37,29 @@ func (c *PluginStatusCommand) csiStatus(client *api.Client, id string) int { return 0 } + // filter by plugin if a plugin ID was passed + plugs, _, err := client.CSIPlugins().List(&api.QueryOptions{Prefix: id}) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying CSI plugins: %s", err)) + return 1 + } + if len(plugs) == 0 { + c.Ui.Error(fmt.Sprintf("No plugins(s) with prefix or ID %q found", id)) + return 1 + } + if len(plugs) > 1 { + if id != plugs[0].ID { + out, err := c.csiFormatPlugins(plugs) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) + return 1 + } + c.Ui.Error(fmt.Sprintf("Prefix matched multiple plugins\n\n%s", out)) + return 1 + } + } + id = plugs[0].ID + // Lookup matched a single plugin plug, _, err := client.CSIPlugins().Info(id, nil) if err != nil { diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 66083452f..ed0f95131 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -1294,10 +1294,20 @@ func (v *CSIPlugin) List(args *structs.CSIPluginListRequest, reply *structs.CSIP queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, run: func(ws memdb.WatchSet, state *state.StateStore) error { - // Query all plugins - iter, err := state.CSIPlugins(ws) - if err != nil { - return err + + var iter memdb.ResultIterator + var err error + if args.Prefix != "" { + iter, err = state.CSIPluginsByIDPrefix(ws, args.Prefix) + if err != nil { + return err + } + } else { + // Query all plugins + iter, err = state.CSIPlugins(ws) + if err != nil { + return err + } } // Collect results diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 4b8275064..4c846d549 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -1100,6 +1100,7 @@ func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { // List external volumes; note that none of these exist in the state store req := &structs.CSIVolumeExternalListRequest{ + PluginID: "minnie", QueryOptions: structs.QueryOptions{ Region: "global", Namespace: structs.DefaultNamespace, @@ -1371,8 +1372,8 @@ func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node)) // List snapshots - req := &structs.CSISnapshotListRequest{ + PluginID: "minnie", Secrets: structs.CSISecrets{ "secret-key-1": "secret-val-1", }, diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 4cc1902aa..8ebbc6bc2 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -2696,7 +2696,7 @@ func (s *StateStore) CSIPluginByID(ws memdb.WatchSet, id string) (*structs.CSIPl // CSIPluginByIDTxn returns a named CSIPlugin func (s *StateStore) CSIPluginByIDTxn(txn Txn, ws memdb.WatchSet, id string) (*structs.CSIPlugin, error) { - watchCh, obj, err := txn.FirstWatch("csi_plugins", "id_prefix", id) + watchCh, obj, err := txn.FirstWatch("csi_plugins", "id", id) if err != nil { return nil, fmt.Errorf("csi_plugin lookup failed: %s %v", id, err) } From 2da6fc76642f84b08bfe2047b6e5a073a79ba854 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 4 Mar 2022 12:58:17 -0800 Subject: [PATCH 40/89] cli: namespace apply should autocomplete hcl files --- command/namespace_apply.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/command/namespace_apply.go b/command/namespace_apply.go index edab6bff2..a6acd0da7 100644 --- a/command/namespace_apply.go +++ b/command/namespace_apply.go @@ -61,7 +61,11 @@ func (c *NamespaceApplyCommand) AutocompleteFlags() complete.Flags { } func (c *NamespaceApplyCommand) AutocompleteArgs() complete.Predictor { - return NamespacePredictor(c.Meta.Client, nil) + return complete.PredictOr( + NamespacePredictor(c.Meta.Client, nil), + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + ) } func (c *NamespaceApplyCommand) Synopsis() string { From 40476143b1432c65c11f51b025851c7ee0633859 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 4 Mar 2022 12:58:58 -0800 Subject: [PATCH 41/89] cli: namespace tests should be run on oss --- command/agent/namespace_endpoint_test.go | 3 --- command/namespace_apply_test.go | 3 --- command/namespace_delete_test.go | 3 --- command/namespace_inspect_test.go | 3 --- command/namespace_list_test.go | 8 +------- command/namespace_status_oss_test.go | 10 ++++++++++ command/namespace_status_test.go | 7 ++++--- 7 files changed, 15 insertions(+), 22 deletions(-) create mode 100644 command/namespace_status_oss_test.go diff --git a/command/agent/namespace_endpoint_test.go b/command/agent/namespace_endpoint_test.go index 7cbf2301b..3c5b1bf2f 100644 --- a/command/agent/namespace_endpoint_test.go +++ b/command/agent/namespace_endpoint_test.go @@ -1,6 +1,3 @@ -//go:build ent -// +build ent - package agent import ( diff --git a/command/namespace_apply_test.go b/command/namespace_apply_test.go index a5611e2ad..95164b2ba 100644 --- a/command/namespace_apply_test.go +++ b/command/namespace_apply_test.go @@ -1,6 +1,3 @@ -//go:build ent -// +build ent - package command import ( diff --git a/command/namespace_delete_test.go b/command/namespace_delete_test.go index 155f2cff7..95fe7c8ba 100644 --- a/command/namespace_delete_test.go +++ b/command/namespace_delete_test.go @@ -1,6 +1,3 @@ -//go:build ent -// +build ent - package command import ( diff --git a/command/namespace_inspect_test.go b/command/namespace_inspect_test.go index 296b0fa5d..35bfef085 100644 --- a/command/namespace_inspect_test.go +++ b/command/namespace_inspect_test.go @@ -1,6 +1,3 @@ -//go:build ent -// +build ent - package command import ( diff --git a/command/namespace_list_test.go b/command/namespace_list_test.go index 822199c62..b8e662bdd 100644 --- a/command/namespace_list_test.go +++ b/command/namespace_list_test.go @@ -1,6 +1,3 @@ -//go:build ent -// +build ent - package command import ( @@ -10,10 +7,7 @@ import ( "github.com/mitchellh/cli" ) -func TestNamespaceListCommand_Implements(t *testing.T) { - t.Parallel() - var _ cli.Command = &NamespaceListCommand{} -} +var _ cli.Command = (*NamespaceListCommand)(nil) func TestNamespaceListCommand_Fails(t *testing.T) { t.Parallel() diff --git a/command/namespace_status_oss_test.go b/command/namespace_status_oss_test.go new file mode 100644 index 000000000..7bc4d2e56 --- /dev/null +++ b/command/namespace_status_oss_test.go @@ -0,0 +1,10 @@ +//go:build !ent +// +build !ent + +package command + +import "github.com/hashicorp/nomad/api" + +func testQuotaSpec() *api.QuotaSpec { + panic("not implemented - enterprise only") +} diff --git a/command/namespace_status_test.go b/command/namespace_status_test.go index 74073b8f2..fa70ed218 100644 --- a/command/namespace_status_test.go +++ b/command/namespace_status_test.go @@ -1,6 +1,3 @@ -//go:build ent -// +build ent - package command import ( @@ -77,6 +74,10 @@ func TestNamespaceStatusCommand_Good_Quota(t *testing.T) { srv, client, url := testServer(t, true, nil) defer srv.Shutdown() + if !srv.Enterprise { + t.Skip("Skipping enterprise-only quota test") + } + ui := cli.NewMockUi() cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui}} From 7706162d681b03f3cb6a68d2d97454761ad71764 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 4 Mar 2022 12:59:17 -0800 Subject: [PATCH 42/89] cli: namespace meta should be formatted consistently --- command/namespace_status.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/command/namespace_status.go b/command/namespace_status.go index 654d1883d..d9d59d76d 100644 --- a/command/namespace_status.go +++ b/command/namespace_status.go @@ -84,14 +84,12 @@ func (c *NamespaceStatusCommand) Run(args []string) int { if len(ns.Meta) > 0 { c.Ui.Output(c.Colorize().Color("\n[bold]Metadata[reset]")) - var keys []string + var meta []string for k := range ns.Meta { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - c.Ui.Output(fmt.Sprintf("%s:\x1f%s", k, ns.Meta[k])) + meta = append(meta, fmt.Sprintf("%s|%s", k, ns.Meta[k])) } + sort.Strings(meta) + c.Ui.Output(formatKV(meta)) } if ns.Quota != "" { From c5922f27d11215e535920744d5be46517a5c7691 Mon Sep 17 00:00:00 2001 From: Michael Schurter Date: Fri, 4 Mar 2022 14:18:57 -0800 Subject: [PATCH 43/89] docs: add meta to namespace docs --- website/content/api-docs/namespaces.mdx | 50 +++++++++++++------ .../content/docs/commands/namespace/apply.mdx | 10 ++-- .../docs/commands/namespace/status.mdx | 9 ++-- 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/website/content/api-docs/namespaces.mdx b/website/content/api-docs/namespaces.mdx index 882a279a4..33c6363a8 100644 --- a/website/content/api-docs/namespaces.mdx +++ b/website/content/api-docs/namespaces.mdx @@ -48,17 +48,24 @@ $ curl \ ```json [ { - "CreateIndex": 31, - "Description": "Production API Servers", - "ModifyIndex": 31, - "Name": "api-prod", + "Capabilities": null, + "CreateIndex": 1, + "Description": "Default shared namespace", + "Meta": null, + "ModifyIndex": 1, + "Name": "default", "Quota": "" }, { - "CreateIndex": 5, - "Description": "Default shared namespace", - "ModifyIndex": 5, - "Name": "default", + "Capabilities": null, + "CreateIndex": 17, + "Description": "Development Staging Namespace", + "Meta": { + "type": "dev", + "contact": "helpdesk@example.com" + }, + "ModifyIndex": 17, + "Name": "staging", "Quota": "" } ] @@ -88,19 +95,23 @@ The table below shows this endpoint's support for ```shell-session $ curl \ - https://localhost:4646/v1/namespace/api-prod + https://localhost:4646/v1/namespace/staging ``` ### Sample Response ```json { - "CreateIndex": 31, - "Description": "Production API Servers", - "Quota": "", - "Hash": "N8WvePwqkp6J354eLJMKyhvsFdPELAos0VuBfMoVKoU=", - "ModifyIndex": 31, - "Name": "api-prod" + "Capabilities": null, + "CreateIndex": 17, + "Description": "Development Staging Namespace", + "Meta": { + "type": "dev", + "contact": "helpdesk@example.com" + }, + "ModifyIndex": 17, + "Name": "staging", + "Quota": "" } ``` @@ -128,6 +139,10 @@ The table below shows this endpoint's support for - `Description` `(string: "")` - Specifies an optional human-readable description of the namespace. +- `Meta` `(object: null)` - Optional object with string keys and values of + metadata to attach to the namespace. Namespace metadata is not used by Nomad + and is intended for use by operators and third party tools. + - `Quota` `(string: "")` - Specifies an quota to attach to the namespace. ### Sample Payload @@ -136,10 +151,15 @@ The table below shows this endpoint's support for { "Name": "api-prod", "Description": "Production API Servers", + "Meta": { + "contact": "platform-eng@example.com" + }, "Quota": "prod-quota" } ``` +Note that the `Quota` key is Enterprise-only. + ### Sample Request ```shell-session diff --git a/website/content/docs/commands/namespace/apply.mdx b/website/content/docs/commands/namespace/apply.mdx index fa54517c2..3f42e8e8b 100644 --- a/website/content/docs/commands/namespace/apply.mdx +++ b/website/content/docs/commands/namespace/apply.mdx @@ -18,7 +18,7 @@ when introduced in Nomad 0.7. nomad namespace apply [options] ``` -Apply is used to create or update a namespace. The specification file +Apply is used to create or update a namespace. The HCL specification file will be read from stdin by specifying "-", otherwise a path to the file is expected. @@ -37,7 +37,7 @@ If ACLs are enabled, this command requires a management ACL token. - `-description` : An optional human readable description for the namespace. -- `json` : Parse the input as a JSON namespace specification. +- `-json` : Parse the input as a JSON namespace specification. ## Examples @@ -56,7 +56,7 @@ $ nomad namespace apply -quota= api-prod Create a namespace from a file: ```shell-session -$ cat namespace.json +$ cat namespace.hcl name = "dev" description = "Namespace for developers" @@ -66,8 +66,8 @@ capabilities { } meta { - owner = "John Doe" + owner = "John Doe" contact_mail = "john@mycompany.com } -$ nomad namespace apply namespace.json +$ nomad namespace apply namespace.hcl ``` diff --git a/website/content/docs/commands/namespace/status.mdx b/website/content/docs/commands/namespace/status.mdx index 570b065e6..5a51e3ac6 100644 --- a/website/content/docs/commands/namespace/status.mdx +++ b/website/content/docs/commands/namespace/status.mdx @@ -33,11 +33,14 @@ View the status of a namespace: ```shell-session $ nomad namespace status default -Name = default -Description = Default shared namespace -Quota = shared-default-quota +Name = api-prod +Description = Prod API servers +Quota = prod EnabledDrivers = docker,exec DisabledDrivers = raw_exec + +Metadata +contact = platform-eng@example.com Quota Limits Region CPU Usage Memory Usage From 451586afd9e826f2b817290aca6518aad1bb5d0c Mon Sep 17 00:00:00 2001 From: Jorge Marey Date: Mon, 7 Mar 2022 10:56:56 +0100 Subject: [PATCH 44/89] Add changelog file. Add meta to ns mock for testing --- .changelog/12138.txt | 3 +++ nomad/mock/mock.go | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 .changelog/12138.txt diff --git a/.changelog/12138.txt b/.changelog/12138.txt new file mode 100644 index 000000000..297eedf94 --- /dev/null +++ b/.changelog/12138.txt @@ -0,0 +1,3 @@ +```release-note:improvement +namespaces: Allow adding custom metadata to namespaces. +``` diff --git a/nomad/mock/mock.go b/nomad/mock/mock.go index 0b817d5fa..ef70968ca 100644 --- a/nomad/mock/mock.go +++ b/nomad/mock/mock.go @@ -2246,8 +2246,10 @@ func AllocNetworkStatus() *structs.AllocNetworkStatus { } func Namespace() *structs.Namespace { + uuid := uuid.Generate() ns := &structs.Namespace{ - Name: fmt.Sprintf("team-%s", uuid.Generate()), + Name: fmt.Sprintf("team-%s", uuid), + Meta: map[string]string{"team": uuid}, Description: "test namespace", CreateIndex: 100, ModifyIndex: 200, From bec44cc6f9825705551eb3dfe504de0028e13e20 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 7 Mar 2022 09:06:50 -0500 Subject: [PATCH 45/89] csi: get plugin ID for creating snapshot from volume, not args (#12195) The `CreateSnapshot` RPC expects a plugin ID to be set by the API, but in the common case of the `nomad volume snapshot create` command, we don't ask the user for the plugin ID because it's available from the volume we're snapshotting. Change the order of the RPC so that we get the volume first and then use the volume's plugin ID for the plugin if the API didn't set the value. --- .changelog/12195.txt | 3 +++ nomad/csi_endpoint.go | 39 +++++++++++++++++++++----------------- nomad/csi_endpoint_test.go | 1 - 3 files changed, 25 insertions(+), 18 deletions(-) create mode 100644 .changelog/12195.txt diff --git a/.changelog/12195.txt b/.changelog/12195.txt new file mode 100644 index 000000000..97ec9bc75 --- /dev/null +++ b/.changelog/12195.txt @@ -0,0 +1,3 @@ +```release-note:bug +csi: Fixed a bug where creating snapshots required a plugin ID instead of falling back to the volume's plugin ID +``` diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index ed0f95131..b95afc3a4 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -1095,22 +1095,6 @@ func (v *CSIVolume) CreateSnapshot(args *structs.CSISnapshotCreateRequest, reply return fmt.Errorf("snapshot cannot be nil") } - plugin, err := state.CSIPluginByID(nil, snap.PluginID) - if err != nil { - multierror.Append(&mErr, - fmt.Errorf("error querying plugin %q: %v", snap.PluginID, err)) - continue - } - if plugin == nil { - multierror.Append(&mErr, fmt.Errorf("no such plugin %q", snap.PluginID)) - continue - } - if !plugin.HasControllerCapability(structs.CSIControllerSupportsCreateDeleteSnapshot) { - multierror.Append(&mErr, - fmt.Errorf("plugin %q does not support snapshot", snap.PluginID)) - continue - } - vol, err := state.CSIVolumeByID(nil, args.RequestNamespace(), snap.SourceVolumeID) if err != nil { multierror.Append(&mErr, fmt.Errorf("error querying volume %q: %v", snap.SourceVolumeID, err)) @@ -1121,13 +1105,34 @@ func (v *CSIVolume) CreateSnapshot(args *structs.CSISnapshotCreateRequest, reply continue } + pluginID := snap.PluginID + if pluginID == "" { + pluginID = vol.PluginID + } + + plugin, err := state.CSIPluginByID(nil, pluginID) + if err != nil { + multierror.Append(&mErr, + fmt.Errorf("error querying plugin %q: %v", pluginID, err)) + continue + } + if plugin == nil { + multierror.Append(&mErr, fmt.Errorf("no such plugin %q", pluginID)) + continue + } + if !plugin.HasControllerCapability(structs.CSIControllerSupportsCreateDeleteSnapshot) { + multierror.Append(&mErr, + fmt.Errorf("plugin %q does not support snapshot", pluginID)) + continue + } + cReq := &cstructs.ClientCSIControllerCreateSnapshotRequest{ ExternalSourceVolumeID: vol.ExternalID, Name: snap.Name, Secrets: vol.Secrets, Parameters: snap.Parameters, } - cReq.PluginID = plugin.ID + cReq.PluginID = pluginID cResp := &cstructs.ClientCSIControllerCreateSnapshotResponse{} err = v.srv.RPC(method, cReq, cResp) if err != nil { diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 4c846d549..305604a5c 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -1200,7 +1200,6 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { SourceVolumeID: "test-volume0", Secrets: structs.CSISecrets{"mysecret": "secretvalue"}, Parameters: map[string]string{"myparam": "paramvalue"}, - PluginID: "minnie", }}, WriteRequest: structs.WriteRequest{ Region: "global", From 711a9d9a8fce5932953f419fcdf26bf87910a04d Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 7 Mar 2022 09:58:29 -0500 Subject: [PATCH 46/89] csi: volume snapshot list plugin option is required (#12197) The RPC for listing volume snapshots requires a plugin ID. Update the `volume snapshot list` command to find the specific plugin from the provided prefix. --- .changelog/12197.txt | 3 ++ command/volume_snapshot_list.go | 38 +++++++++---------- e2e/csi/ebs.go | 2 +- .../docs/commands/volume/snapshot-list.mdx | 11 +++--- 4 files changed, 27 insertions(+), 27 deletions(-) create mode 100644 .changelog/12197.txt diff --git a/.changelog/12197.txt b/.changelog/12197.txt new file mode 100644 index 000000000..bf07836e0 --- /dev/null +++ b/.changelog/12197.txt @@ -0,0 +1,3 @@ +```release-note:bug +csi: Fixed a bug where `volume snapshot list` did not correctly filter by plugin IDs. The `-plugin` parameter is required. +``` diff --git a/command/volume_snapshot_list.go b/command/volume_snapshot_list.go index 02c7f7ffa..848552118 100644 --- a/command/volume_snapshot_list.go +++ b/command/volume_snapshot_list.go @@ -22,8 +22,9 @@ func (c *VolumeSnapshotListCommand) Help() string { helpText := ` Usage: nomad volume snapshot list [-plugin plugin_id] - Display a list of CSI volume snapshots along with their - source volume ID as known to the external storage provider. + Display a list of CSI volume snapshots for a plugin along + with their source volume ID as known to the external + storage provider. When ACLs are enabled, this command requires a token with the 'csi-list-volumes' capability for the plugin's namespace. @@ -34,8 +35,8 @@ General Options: List Options: - -plugin: Display only snapshots managed by a particular plugin. By default - this command will query all plugins for their snapshots. + -plugin: Display only snapshots managed by a particular plugin. This + parameter is required. -secret Secrets to pass to the plugin to list snapshots. Accepts multiple @@ -45,7 +46,7 @@ List Options: } func (c *VolumeSnapshotListCommand) Synopsis() string { - return "Display a list of volume snapshots" + return "Display a list of volume snapshots for plugin" } func (c *VolumeSnapshotListCommand) AutocompleteFlags() complete.Flags { @@ -100,15 +101,17 @@ func (c *VolumeSnapshotListCommand) Run(args []string) int { return 1 } - // filter by plugin if a plugin ID was passed - if pluginID != "" { - plugs, _, err := client.CSIPlugins().List(&api.QueryOptions{Prefix: pluginID}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error querying CSI plugins: %s", err)) - return 1 - } - - if len(plugs) > 1 { + plugs, _, err := client.CSIPlugins().List(&api.QueryOptions{Prefix: pluginID}) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying CSI plugins: %s", err)) + return 1 + } + if len(plugs) == 0 { + c.Ui.Error(fmt.Sprintf("No plugins(s) with prefix or ID %q found", pluginID)) + return 1 + } + if len(plugs) > 1 { + if pluginID != plugs[0].ID { out, err := c.csiFormatPlugins(plugs) if err != nil { c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) @@ -117,13 +120,8 @@ func (c *VolumeSnapshotListCommand) Run(args []string) int { c.Ui.Error(fmt.Sprintf("Prefix matched multiple plugins\n\n%s", out)) return 1 } - if len(plugs) == 0 { - c.Ui.Error(fmt.Sprintf("No plugins(s) with prefix or ID %q found", pluginID)) - return 1 - } - - pluginID = plugs[0].ID } + pluginID = plugs[0].ID secrets := api.CSISecrets{} for _, kv := range secretsArgs { diff --git a/e2e/csi/ebs.go b/e2e/csi/ebs.go index 6935fd8b5..880e064f9 100644 --- a/e2e/csi/ebs.go +++ b/e2e/csi/ebs.go @@ -176,7 +176,7 @@ func (tc *CSIControllerPluginEBSTest) TestSnapshot(f *framework.F) { f.NoError(err, fmt.Sprintf("could not parse output:\n%v", out)) f.Len(snaps, 1, fmt.Sprintf("could not parse output:\n%v", out)) - out, err = e2e.Command("nomad", "volume", "snapshot", "list") + out, err = e2e.Command("nomad", "volume", "snapshot", "list", "-plugin", ebsPluginID) requireNoErrorElseDump(f, err, "could not list volume snapshots", tc.pluginJobIDs) f.Contains(out, snaps[0]["ID"], fmt.Sprintf("volume snapshot list did not include expected snapshot:\n%v", out)) diff --git a/website/content/docs/commands/volume/snapshot-list.mdx b/website/content/docs/commands/volume/snapshot-list.mdx index 5977321b6..0135cf967 100644 --- a/website/content/docs/commands/volume/snapshot-list.mdx +++ b/website/content/docs/commands/volume/snapshot-list.mdx @@ -30,11 +30,10 @@ Nomad. ## Snapshot List Options - `-plugin`: Display only snapshots managed by a particular [CSI - plugin][csi_plugin]. By default the `snapshot list` command will query all - plugins for their snapshots. This flag accepts a plugin ID or prefix. If - there is an exact match based on the provided plugin, then that specific - plugin will be queried. Otherwise, a list of matching plugins will be - displayed. + plugin][csi_plugin]. This flag is required and accepts a plugin ID + or prefix. If there is an exact match based on the provided plugin, + then that specific plugin will be queried. Otherwise, a list of + matching plugins will be displayed. - `-secret`: Secrets to pass to the plugin to list snapshots. Accepts multiple flags in the form `-secret key=value` @@ -54,7 +53,7 @@ snap-67890 vol-fedcba 50GiB 2021-01-04T15:45:00Z true List volume snapshots with two secret key/value pairs: ```shell-session -$ nomad volume snapshot list -secret key1=value1 -secret key2=val2 +$ nomad volume snapshot list -plugin aws-ebs0 -secret key1=value1 -secret key2=val2 Snapshot ID External ID Size Creation Time Ready? snap-12345 vol-abcdef 50GiB 2021-01-03T12:15:02Z true ``` From 7d0f87b9106a8557896c6cc09576c846235495a5 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 7 Mar 2022 11:06:59 -0500 Subject: [PATCH 47/89] CSI: allow updates to volumes on re-registration (#12167) CSI `CreateVolume` RPC is idempotent given that the topology, capabilities, and parameters are unchanged. CSI volumes have many user-defined fields that are immutable once set, and many fields that are not user-settable. Update the `Register` RPC so that updating a volume via the API merges onto any existing volume without touching Nomad-controlled fields, while validating it with the same strict requirements expected for idempotent `CreateVolume` RPCs. Also, clarify that this state store method is used for everything, not just for the `Register` RPC. --- .changelog/12167.txt | 3 + client/csi_endpoint.go | 4 + command/alloc_status_test.go | 2 +- command/volume_register_csi.go | 1 + command/volume_status_test.go | 2 +- nomad/csi_endpoint.go | 60 +++++-- nomad/csi_endpoint_test.go | 21 ++- nomad/fsm.go | 2 +- nomad/search_endpoint_test.go | 4 +- nomad/state/state_store.go | 22 +-- nomad/state/state_store_test.go | 18 ++- nomad/state/testing.go | 2 +- nomad/structs/csi.go | 114 +++++++++++++ nomad/structs/csi_test.go | 169 ++++++++++++++++++++ nomad/structs/node.go | 36 +++++ nomad/volumewatcher/volumes_watcher_test.go | 8 +- scheduler/feasible.go | 9 +- scheduler/feasible_test.go | 6 +- scheduler/generic_sched_test.go | 4 +- scheduler/stack_test.go | 2 +- 20 files changed, 429 insertions(+), 60 deletions(-) create mode 100644 .changelog/12167.txt diff --git a/.changelog/12167.txt b/.changelog/12167.txt new file mode 100644 index 000000000..adfc73082 --- /dev/null +++ b/.changelog/12167.txt @@ -0,0 +1,3 @@ +```release-note:improvement +csi: Allow volumes to be re-registered to be updated while not in use +``` diff --git a/client/csi_endpoint.go b/client/csi_endpoint.go index 2438edce0..abbe0b8e3 100644 --- a/client/csi_endpoint.go +++ b/client/csi_endpoint.go @@ -216,6 +216,10 @@ func (c *CSI) ControllerCreateVolume(req *structs.ClientCSIControllerCreateVolum resp.CapacityBytes = cresp.Volume.CapacityBytes resp.VolumeContext = cresp.Volume.VolumeContext + // Note: we safely throw away cresp.Volume.ContentSource here + // because it's just round-tripping the value set by the user in + // the server RPC call + resp.Topologies = make([]*nstructs.CSITopology, len(cresp.Volume.AccessibleTopology)) for _, topo := range cresp.Volume.AccessibleTopology { resp.Topologies = append(resp.Topologies, diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index 69626b50e..fca8a2a19 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -479,7 +479,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) { Segments: map[string]string{"foo": "bar"}, }}, }} - err = state.CSIVolumeRegister(1002, vols) + err = state.UpsertCSIVolume(1002, vols) require.NoError(t, err) // Upsert the job and alloc diff --git a/command/volume_register_csi.go b/command/volume_register_csi.go index b3cf9e2fe..c86735bc5 100644 --- a/command/volume_register_csi.go +++ b/command/volume_register_csi.go @@ -24,6 +24,7 @@ func (c *VolumeRegisterCommand) csiRegister(client *api.Client, ast *ast.File) i return 1 } + c.Ui.Output(fmt.Sprintf("Volume %q registered", vol.ID)) return 0 } diff --git a/command/volume_status_test.go b/command/volume_status_test.go index afd35213c..313d57502 100644 --- a/command/volume_status_test.go +++ b/command/volume_status_test.go @@ -46,7 +46,7 @@ func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { PluginID: "glade", } - require.NoError(t, state.CSIVolumeRegister(1000, []*structs.CSIVolume{vol})) + require.NoError(t, state.UpsertCSIVolume(1000, []*structs.CSIVolume{vol})) prefix := vol.ID[:len(vol.ID)-5] args := complete.Args{Last: prefix} diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index b95afc3a4..139ed4b72 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -240,6 +240,7 @@ func (v *CSIVolume) pluginValidateVolume(req *structs.CSIVolumeRegisterRequest, vol.Provider = plugin.Provider vol.ProviderVersion = plugin.Version + return plugin, nil } @@ -265,7 +266,15 @@ func (v *CSIVolume) controllerValidateVolume(req *structs.CSIVolumeRegisterReque return v.srv.RPC(method, cReq, cResp) } -// Register registers a new volume +// Register registers a new volume or updates an existing volume. Note +// that most user-defined CSIVolume fields are immutable once the +// volume has been created. +// +// If the user needs to change fields because they've misconfigured +// the registration of the external volume, we expect that claims +// won't work either, and the user can deregister the volume and try +// again with the right settings. This lets us be as strict with +// validation here as the CreateVolume CSI RPC is expected to be. func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *structs.CSIVolumeRegisterResponse) error { if done, err := v.srv.forward("CSIVolume.Register", args, args, reply); done { return err @@ -291,11 +300,50 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru // We also validate that the plugin exists for each plugin, and validate the // capabilities when the plugin has a controller. for _, vol := range args.Volumes { - vol.Namespace = args.RequestNamespace() + + snap, err := v.srv.State().Snapshot() + if err != nil { + return err + } + // TODO: allow volume spec file to set namespace + // https://github.com/hashicorp/nomad/issues/11196 + if vol.Namespace == "" { + vol.Namespace = args.RequestNamespace() + } if err = vol.Validate(); err != nil { return err } + ws := memdb.NewWatchSet() + existingVol, err := snap.CSIVolumeByID(ws, vol.Namespace, vol.ID) + if err != nil { + return err + } + + // CSIVolume has many user-defined fields which are immutable + // once set, and many fields that are controlled by Nomad and + // are not user-settable. We merge onto a copy of the existing + // volume to allow a user to submit a volume spec for `volume + // create` and reuse it for updates in `volume register` + // without having to manually remove the fields unused by + // register (and similar use cases with API consumers such as + // Terraform). + if existingVol != nil { + existingVol = existingVol.Copy() + err = existingVol.Merge(vol) + if err != nil { + return err + } + *vol = *existingVol + } else if vol.Topologies == nil || len(vol.Topologies) == 0 { + // The topologies for the volume have already been set + // when it was created, so for newly register volumes + // we accept the user's description of that topology + if vol.RequestedTopologies != nil { + vol.Topologies = vol.RequestedTopologies.Required + } + } + plugin, err := v.pluginValidateVolume(args, vol) if err != nil { return err @@ -303,14 +351,6 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru if err := v.controllerValidateVolume(args, vol, plugin); err != nil { return err } - - // The topologies for the volume have already been set when it was - // created, so we accept the user's description of that topology - if vol.Topologies == nil || len(vol.Topologies) == 0 { - if vol.RequestedTopologies != nil { - vol.Topologies = vol.RequestedTopologies.Required - } - } } resp, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 305604a5c..528020bc9 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -45,7 +45,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err := state.CSIVolumeRegister(999, vols) + err := state.UpsertCSIVolume(999, vols) require.NoError(t, err) // Create the register request @@ -95,7 +95,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err := state.CSIVolumeRegister(999, vols) + err := state.UpsertCSIVolume(999, vols) require.NoError(t, err) // Create the register request @@ -145,7 +145,6 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { // Create the volume vols := []*structs.CSIVolume{{ ID: id0, - Namespace: "notTheNamespace", PluginID: "minnie", AccessMode: structs.CSIVolumeAccessModeSingleNodeReader, // legacy field ignored AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, // legacy field ignored @@ -286,7 +285,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { }}, }} index++ - err = state.CSIVolumeRegister(index, vols) + err = state.UpsertCSIVolume(index, vols) require.NoError(t, err) // Verify that the volume exists, and is healthy @@ -425,7 +424,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err = state.CSIVolumeRegister(1003, vols) + err = state.UpsertCSIVolume(1003, vols) require.NoError(t, err) alloc := mock.BatchAlloc() @@ -535,7 +534,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { } index++ - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(t, err) // setup: create an alloc that will claim our volume @@ -642,7 +641,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }}, }} - err = state.CSIVolumeRegister(1002, vols) + err = state.UpsertCSIVolume(1002, vols) require.NoError(t, err) // Query everything in the namespace @@ -721,7 +720,7 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { }}, }, } - err = state.CSIVolumeRegister(1001, vols) + err = state.UpsertCSIVolume(1001, vols) require.NoError(t, err) // Lookup volumes in all namespaces @@ -972,7 +971,7 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { Secrets: structs.CSISecrets{"mysecret": "secretvalue"}, }} index++ - err = state.CSIVolumeRegister(index, vols) + err = state.UpsertCSIVolume(index, vols) require.NoError(t, err) // Delete volumes @@ -1191,7 +1190,7 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { ExternalID: "vol-12345", }} index++ - require.NoError(t, state.CSIVolumeRegister(index, vols)) + require.NoError(t, state.UpsertCSIVolume(index, vols)) // Create the snapshot request req1 := &structs.CSISnapshotCreateRequest{ @@ -1665,7 +1664,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { ControllerRequired: false, }, } - err = state.CSIVolumeRegister(1002, vols) + err = state.UpsertCSIVolume(1002, vols) require.NoError(t, err) // has controller diff --git a/nomad/fsm.go b/nomad/fsm.go index 6fcd0a044..c288fc09b 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1260,7 +1260,7 @@ func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{} } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now()) - if err := n.state.CSIVolumeRegister(index, req.Volumes); err != nil { + if err := n.state.UpsertCSIVolume(index, req.Volumes); err != nil { n.logger.Error("CSIVolumeRegister failed", "error", err) return err } diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index f4a4fa8ef..cbd634b3e 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -728,7 +728,7 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { testutil.WaitForLeader(t, s.RPC) id := uuid.Generate() - err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{ + err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{ ID: id, Namespace: structs.DefaultNamespace, PluginID: "glade", @@ -1348,7 +1348,7 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { testutil.WaitForLeader(t, s.RPC) id := uuid.Generate() - err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{ + err := s.fsm.State().UpsertCSIVolume(1000, []*structs.CSIVolume{{ ID: id, Namespace: structs.DefaultNamespace, PluginID: "glade", diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 8ebbc6bc2..1dd9e4814 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -2143,8 +2143,8 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) return iter, nil } -// CSIVolumeRegister adds a volume to the server store, failing if it already exists -func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolume) error { +// UpsertCSIVolume inserts a volume in the state store. +func (s *StateStore) UpsertCSIVolume(index uint64, volumes []*structs.CSIVolume) error { txn := s.db.WriteTxn(index) defer txn.Abort() @@ -2155,7 +2155,6 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum return fmt.Errorf("volume %s is in nonexistent namespace %s", v.ID, v.Namespace) } - // Check for volume existence obj, err := txn.First("csi_volumes", "id", v.Namespace, v.ID) if err != nil { return fmt.Errorf("volume existence check error: %v", err) @@ -2164,17 +2163,20 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum // Allow some properties of a volume to be updated in place, but // prevent accidentally overwriting important properties, or // overwriting a volume in use - old, ok := obj.(*structs.CSIVolume) - if ok && - old.InUse() || - old.ExternalID != v.ExternalID || + old := obj.(*structs.CSIVolume) + if old.ExternalID != v.ExternalID || old.PluginID != v.PluginID || old.Provider != v.Provider { - return fmt.Errorf("volume exists: %s", v.ID) + return fmt.Errorf("volume identity cannot be updated: %s", v.ID) + } + s.CSIVolumeDenormalize(nil, old.Copy()) + if old.InUse() { + return fmt.Errorf("volume cannot be updated while in use") } - } - if v.CreateIndex == 0 { + v.CreateIndex = old.CreateIndex + v.ModifyIndex = index + } else { v.CreateIndex = index v.ModifyIndex = index } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 9f4dd3930..32987465a 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2684,6 +2684,10 @@ func TestStateStore_CSIVolume(t *testing.T) { v0.Schedulable = true v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + v0.RequestedCapabilities = []*structs.CSIVolumeCapability{{ + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} index++ v1 := structs.NewCSIVolume("foo", index) @@ -2693,20 +2697,24 @@ func TestStateStore_CSIVolume(t *testing.T) { v1.Schedulable = true v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + v1.RequestedCapabilities = []*structs.CSIVolumeCapability{{ + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} index++ - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) // volume registration is idempotent, unless identies are changed index++ - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) index++ v2 := v0.Copy() v2.PluginID = "new-id" - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v2}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v2}) require.Error(t, err, fmt.Sprintf("volume exists: %s", v0.ID)) ws := memdb.NewWatchSet() @@ -2786,7 +2794,7 @@ func TestStateStore_CSIVolume(t *testing.T) { // registration is an error when the volume is in use index++ - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{v0}) require.Error(t, err, "volume re-registered while in use") // as is deregistration index++ @@ -3126,7 +3134,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { Namespace: structs.DefaultNamespace, PluginID: plugID, } - err = store.CSIVolumeRegister(nextIndex(store), []*structs.CSIVolume{vol}) + err = store.UpsertCSIVolume(nextIndex(store), []*structs.CSIVolume{vol}) require.NoError(t, err) err = store.DeleteJob(nextIndex(store), structs.DefaultNamespace, controllerJobID) diff --git a/nomad/state/testing.go b/nomad/state/testing.go index c7a2f3e8e..b288f7d3f 100644 --- a/nomad/state/testing.go +++ b/nomad/state/testing.go @@ -305,7 +305,7 @@ func TestBadCSIState(t testing.TB, store *StateStore) error { } vol = vol.Copy() // canonicalize - err = store.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err = store.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) if err != nil { return err } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 3d7c5178f..71b28753e 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -1,6 +1,7 @@ package structs import ( + "errors" "fmt" "strings" "time" @@ -180,6 +181,22 @@ func (o *CSIMountOptions) Merge(p *CSIMountOptions) { } } +func (o *CSIMountOptions) Equal(p *CSIMountOptions) bool { + if o == nil && p == nil { + return true + } + if o == nil || p == nil { + return false + } + + if o.FSType != p.FSType { + return false + } + + return helper.CompareSliceSetString( + o.MountFlags, p.MountFlags) +} + // CSIMountOptions implements the Stringer and GoStringer interfaces to prevent // accidental leakage of sensitive mount flags via logs. var _ fmt.Stringer = &CSIMountOptions{} @@ -707,6 +724,103 @@ func (v *CSIVolume) Validate() error { return nil } +// Merge updates the mutable fields of a volume with those from +// another volume. CSIVolume has many user-defined fields which are +// immutable once set, and many fields that are not +// user-settable. Merge will return an error if we try to mutate the +// user-defined immutable fields after they're set, but silently +// ignore fields that are controlled by Nomad. +func (v *CSIVolume) Merge(other *CSIVolume) error { + if other == nil { + return nil + } + + var errs *multierror.Error + + if v.Name != other.Name && other.Name != "" { + errs = multierror.Append(errs, errors.New("volume name cannot be updated")) + } + if v.ExternalID != other.ExternalID && other.ExternalID != "" { + errs = multierror.Append(errs, errors.New( + "volume external ID cannot be updated")) + } + if v.PluginID != other.PluginID { + errs = multierror.Append(errs, errors.New( + "volume plugin ID cannot be updated")) + } + if v.CloneID != other.CloneID && other.CloneID != "" { + errs = multierror.Append(errs, errors.New( + "volume clone ID cannot be updated")) + } + if v.SnapshotID != other.SnapshotID && other.SnapshotID != "" { + errs = multierror.Append(errs, errors.New( + "volume snapshot ID cannot be updated")) + } + + // must be compatible with capacity range + // TODO: when ExpandVolume is implemented we'll need to update + // this logic https://github.com/hashicorp/nomad/issues/10324 + if v.Capacity != 0 { + if other.RequestedCapacityMax < v.Capacity || + other.RequestedCapacityMin > v.Capacity { + errs = multierror.Append(errs, errors.New( + "volume requested capacity update was not compatible with existing capacity")) + } else { + v.RequestedCapacityMin = other.RequestedCapacityMin + v.RequestedCapacityMax = other.RequestedCapacityMax + } + } + + // must be compatible with volume_capabilities + if v.AccessMode != CSIVolumeAccessModeUnknown || + v.AttachmentMode != CSIVolumeAttachmentModeUnknown { + var ok bool + for _, cap := range other.RequestedCapabilities { + if cap.AccessMode == v.AccessMode && + cap.AttachmentMode == v.AttachmentMode { + ok = true + break + } + } + if ok { + v.RequestedCapabilities = other.RequestedCapabilities + } else { + errs = multierror.Append(errs, errors.New( + "volume requested capabilities update was not compatible with existing capability in use")) + } + } else { + v.RequestedCapabilities = other.RequestedCapabilities + } + + // topologies are immutable, so topology request changes must be + // compatible with the existing topology, if any + if len(v.Topologies) > 0 { + if !v.RequestedTopologies.Equal(other.RequestedTopologies) { + errs = multierror.Append(errs, errors.New( + "volume topology request update was not compatible with existing topology")) + } + } + + // MountOptions can be updated so long as the volume isn't in use, + // but the caller will reject updating an in-use volume + v.MountOptions = other.MountOptions + + // Secrets can be updated freely + v.Secrets = other.Secrets + + // must be compatible with parameters set by from CreateVolumeResponse + + if len(other.Parameters) != 0 && !helper.CompareMapStringString(v.Parameters, other.Parameters) { + errs = multierror.Append(errs, errors.New( + "volume parameters cannot be updated")) + } + + // Context is mutable and will be used during controller + // validation + v.Context = other.Context + return errs.ErrorOrNil() +} + // Request and response wrappers type CSIVolumeRegisterRequest struct { Volumes []*CSIVolume diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 855a65871..8f84d6226 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -569,6 +569,175 @@ func TestCSIVolume_Validate(t *testing.T) { } +func TestCSIVolume_Merge(t *testing.T) { + + testCases := []struct { + name string + v *CSIVolume + update *CSIVolume + expected string + expectFn func(t *testing.T, v *CSIVolume) + }{ + { + name: "invalid capacity update", + v: &CSIVolume{Capacity: 100}, + update: &CSIVolume{ + RequestedCapacityMax: 300, RequestedCapacityMin: 200}, + expected: "volume requested capacity update was not compatible with existing capacity", + expectFn: func(t *testing.T, v *CSIVolume) { + require.NotEqual(t, 300, v.RequestedCapacityMax) + require.NotEqual(t, 200, v.RequestedCapacityMin) + }, + }, + { + name: "invalid capability update", + v: &CSIVolume{ + AccessMode: CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + }, + update: &CSIVolume{ + RequestedCapabilities: []*CSIVolumeCapability{ + { + AccessMode: CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + }, + }, + }, + expected: "volume requested capabilities update was not compatible with existing capability in use", + }, + { + name: "invalid topology update - removed", + v: &CSIVolume{ + RequestedTopologies: &CSITopologyRequest{ + Required: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + }, + Topologies: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + }, + update: &CSIVolume{}, + expected: "volume topology request update was not compatible with existing topology", + expectFn: func(t *testing.T, v *CSIVolume) { + require.Len(t, v.Topologies, 1) + }, + }, + { + name: "invalid topology requirement added", + v: &CSIVolume{ + Topologies: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + }, + update: &CSIVolume{ + RequestedTopologies: &CSITopologyRequest{ + Required: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"rack": "R3"}}, + }, + }, + }, + expected: "volume topology request update was not compatible with existing topology", + expectFn: func(t *testing.T, v *CSIVolume) { + require.Len(t, v.Topologies, 1) + require.Equal(t, "R1", v.Topologies[0].Segments["rack"]) + }, + }, + { + name: "invalid topology preference removed", + v: &CSIVolume{ + Topologies: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + RequestedTopologies: &CSITopologyRequest{ + Preferred: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"rack": "R3"}}, + }, + }, + }, + update: &CSIVolume{ + Topologies: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + RequestedTopologies: &CSITopologyRequest{ + Preferred: []*CSITopology{ + {Segments: map[string]string{"rack": "R3"}}, + }, + }, + }, + expected: "volume topology request update was not compatible with existing topology", + }, + { + name: "valid update", + v: &CSIVolume{ + Topologies: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"rack": "R2"}}, + }, + AccessMode: CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + MountOptions: &CSIMountOptions{ + FSType: "ext4", + MountFlags: []string{"noatime"}, + }, + RequestedTopologies: &CSITopologyRequest{ + Required: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + Preferred: []*CSITopology{ + {Segments: map[string]string{"rack": "R2"}}, + }, + }, + }, + update: &CSIVolume{ + Topologies: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + {Segments: map[string]string{"rack": "R2"}}, + }, + MountOptions: &CSIMountOptions{ + FSType: "ext4", + MountFlags: []string{"noatime"}, + }, + RequestedTopologies: &CSITopologyRequest{ + Required: []*CSITopology{ + {Segments: map[string]string{"rack": "R1"}}, + }, + Preferred: []*CSITopology{ + {Segments: map[string]string{"rack": "R2"}}, + }, + }, + RequestedCapabilities: []*CSIVolumeCapability{ + { + AccessMode: CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + }, + { + AccessMode: CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc = tc + t.Run(tc.name, func(t *testing.T) { + err := tc.v.Merge(tc.update) + if tc.expected == "" { + require.NoError(t, err) + } else { + if tc.expectFn != nil { + tc.expectFn(t, tc.v) + } + require.Error(t, err, tc.expected) + require.Contains(t, err.Error(), tc.expected) + } + }) + } +} + func TestCSIPluginJobs(t *testing.T) { plug := NewCSIPlugin("foo", 1000) controller := &Job{ diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 6a8f9cd47..8bbdc7a41 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -62,6 +62,19 @@ func (t *CSITopology) Equal(o *CSITopology) bool { return helper.CompareMapStringString(t.Segments, o.Segments) } +func (t *CSITopology) MatchFound(o []*CSITopology) bool { + if t == nil || o == nil || len(o) == 0 { + return false + } + + for _, other := range o { + if t.Equal(other) { + return true + } + } + return false +} + // CSITopologyRequest are the topologies submitted as options to the // storage provider at the time the volume was created. The storage // provider will return a single topology. @@ -70,6 +83,29 @@ type CSITopologyRequest struct { Preferred []*CSITopology } +func (tr *CSITopologyRequest) Equal(o *CSITopologyRequest) bool { + if tr == nil && o == nil { + return true + } + if tr == nil && o != nil || tr != nil && o == nil { + return false + } + if len(tr.Required) != len(o.Required) || len(tr.Preferred) != len(o.Preferred) { + return false + } + for i, topo := range tr.Required { + if !topo.Equal(o.Required[i]) { + return false + } + } + for i, topo := range tr.Preferred { + if !topo.Equal(o.Preferred[i]) { + return false + } + } + return true +} + // CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to // the Node API. type CSINodeInfo struct { diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 1652faa40..47f1c970a 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -32,7 +32,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { vol := testVolume(plugin, alloc, node.ID) index++ - err := srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(err) claim := &structs.CSIVolumeClaim{ @@ -78,7 +78,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { watcher.SetEnabled(true, srv.State(), "") index++ - err = srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(err) // we should get or start up a watcher when we get an update for @@ -167,7 +167,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { // register a volume vol := testVolume(plugin, alloc1, node.ID) index++ - err = srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err = srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(err) // assert we get a watcher; there are no claims so it should immediately stop @@ -254,7 +254,7 @@ func TestVolumeWatch_RegisterDeregister(t *testing.T) { // register a volume without claims vol := mock.CSIVolume(plugin) index++ - err := srv.State().CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err := srv.State().UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(err) // watcher should be started but immediately stopped diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 3c478eb69..2fcf1a325 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -318,14 +318,7 @@ func (c *CSIVolumeChecker) isFeasible(n *structs.Node) (bool, string) { // volume MUST be accessible from at least one of the // requisite topologies." if len(vol.Topologies) > 0 { - var ok bool - for _, requiredTopo := range vol.Topologies { - if requiredTopo.Equal(plugin.NodeInfo.AccessibleTopology) { - ok = true - break - } - } - if !ok { + if !plugin.NodeInfo.AccessibleTopology.MatchFound(vol.Topologies) { return false, FilterConstraintsCSIPluginTopology } } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 26e2e7907..1689fd415 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -342,7 +342,7 @@ func TestCSIVolumeChecker(t *testing.T) { {Segments: map[string]string{"rack": "R1"}}, {Segments: map[string]string{"rack": "R2"}}, } - err := state.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + err := state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) require.NoError(t, err) index++ @@ -353,14 +353,14 @@ func TestCSIVolumeChecker(t *testing.T) { vol2.Namespace = structs.DefaultNamespace vol2.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter vol2.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol2}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol2}) require.NoError(t, err) index++ vid3 := "volume-id[0]" vol3 := vol.Copy() vol3.ID = vid3 - err = state.CSIVolumeRegister(index, []*structs.CSIVolume{vol3}) + err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol3}) require.NoError(t, err) index++ diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index ecb2784c6..713c6a94e 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -6133,7 +6133,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { // once its been fixed shared.AccessMode = structs.CSIVolumeAccessModeMultiNodeReader - require.NoError(h.State.CSIVolumeRegister( + require.NoError(h.State.UpsertCSIVolume( h.NextIndex(), []*structs.CSIVolume{shared, vol0, vol1, vol2})) // Create a job that uses both @@ -6226,7 +6226,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { vol4.ID = "volume-unique[3]" vol5 := vol0.Copy() vol5.ID = "volume-unique[4]" - require.NoError(h.State.CSIVolumeRegister( + require.NoError(h.State.UpsertCSIVolume( h.NextIndex(), []*structs.CSIVolume{vol4, vol5})) // Process again with failure fixed. It should create a new plan diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 66ff92840..2f36e0014 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -245,7 +245,7 @@ func TestServiceStack_Select_CSI(t *testing.T) { v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem v.PluginID = "bar" - err := state.CSIVolumeRegister(999, []*structs.CSIVolume{v}) + err := state.UpsertCSIVolume(999, []*structs.CSIVolume{v}) require.NoError(t, err) // Create a node with healthy fingerprints for both controller and node plugins From bc40222e3e0e3bb36c62ce9c47bb8d7f5679cd2c Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 7 Mar 2022 12:19:28 -0500 Subject: [PATCH 48/89] csi: add pagination args to `volume snapshot list` (#12193) The snapshot list API supports pagination as part of the CSI specification, but we didn't have it plumbed through to the command line. --- .changelog/12193.txt | 3 + command/volume_snapshot_list.go | 58 +++++++++++-------- e2e/csi/ebs.go | 6 +- .../docs/commands/volume/snapshot-list.mdx | 2 + 4 files changed, 45 insertions(+), 24 deletions(-) create mode 100644 .changelog/12193.txt diff --git a/.changelog/12193.txt b/.changelog/12193.txt new file mode 100644 index 000000000..b939c161d --- /dev/null +++ b/.changelog/12193.txt @@ -0,0 +1,3 @@ +```release-note:improvement +csi: Add pagination parameters to `volume snapshot list` command +``` diff --git a/command/volume_snapshot_list.go b/command/volume_snapshot_list.go index 848552118..814081aaa 100644 --- a/command/volume_snapshot_list.go +++ b/command/volume_snapshot_list.go @@ -3,6 +3,7 @@ package command import ( "fmt" "io" + "os" "sort" "strings" @@ -41,6 +42,12 @@ List Options: -secret Secrets to pass to the plugin to list snapshots. Accepts multiple flags in the form -secret key=value + + -per-page + How many results to show per page. Defaults to 30. + + -page-token + Where to start pagination. ` return strings.TrimSpace(helpText) } @@ -75,12 +82,16 @@ func (c *VolumeSnapshotListCommand) Run(args []string) int { var pluginID string var verbose bool var secretsArgs flaghelper.StringFlag + var perPage int + var pageToken string flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.StringVar(&pluginID, "plugin", "", "") flags.BoolVar(&verbose, "verbose", false, "") flags.Var(&secretsArgs, "secret", "secrets for snapshot, ex. -secret key=value") + flags.IntVar(&perPage, "per-page", 30, "") + flags.StringVar(&pageToken, "page-token", "", "") if err := flags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) @@ -132,32 +143,33 @@ func (c *VolumeSnapshotListCommand) Run(args []string) int { } req := &api.CSISnapshotListRequest{ - PluginID: pluginID, - Secrets: secrets, - QueryOptions: api.QueryOptions{PerPage: 30}, + PluginID: pluginID, + Secrets: secrets, + QueryOptions: api.QueryOptions{ + PerPage: int32(perPage), + NextToken: pageToken, + Params: map[string]string{}, + }, } - for { - resp, _, err := client.CSIVolumes().ListSnapshotsOpts(req) - if err != nil && !errors.Is(err, io.EOF) { - c.Ui.Error(fmt.Sprintf( - "Error querying CSI external snapshots for plugin %q: %s", pluginID, err)) - return 1 - } - if resp == nil || len(resp.Snapshots) == 0 { - // several plugins return EOF once you hit the end of the page, - // rather than an empty list - break - } + resp, _, err := client.CSIVolumes().ListSnapshotsOpts(req) + if err != nil && !errors.Is(err, io.EOF) { + c.Ui.Error(fmt.Sprintf( + "Error querying CSI external snapshots for plugin %q: %s", pluginID, err)) + return 1 + } + if resp == nil || len(resp.Snapshots) == 0 { + // several plugins return EOF once you hit the end of the page, + // rather than an empty list + return 0 + } - c.Ui.Output(csiFormatSnapshots(resp.Snapshots, verbose)) - req.NextToken = resp.NextToken - if req.NextToken == "" { - break - } - // we can't know the shape of arbitrarily-sized lists of snapshots, - // so break after each page - c.Ui.Output("...") + c.Ui.Output(csiFormatSnapshots(resp.Snapshots, verbose)) + + if resp.NextToken != "" { + c.Ui.Output(fmt.Sprintf(` +Results have been paginated. To get the next page run: +%s -page-token %s`, argsWithoutPageToken(os.Args), resp.NextToken)) } return 0 diff --git a/e2e/csi/ebs.go b/e2e/csi/ebs.go index 880e064f9..67184ef22 100644 --- a/e2e/csi/ebs.go +++ b/e2e/csi/ebs.go @@ -176,7 +176,11 @@ func (tc *CSIControllerPluginEBSTest) TestSnapshot(f *framework.F) { f.NoError(err, fmt.Sprintf("could not parse output:\n%v", out)) f.Len(snaps, 1, fmt.Sprintf("could not parse output:\n%v", out)) - out, err = e2e.Command("nomad", "volume", "snapshot", "list", "-plugin", ebsPluginID) + // the snapshot we're looking for should be the first one because + // we just created it, but give us some breathing room to allow + // for concurrent test runs + out, err = e2e.Command("nomad", "volume", "snapshot", "list", + "-plugin", ebsPluginID, "-per-page", "10") requireNoErrorElseDump(f, err, "could not list volume snapshots", tc.pluginJobIDs) f.Contains(out, snaps[0]["ID"], fmt.Sprintf("volume snapshot list did not include expected snapshot:\n%v", out)) diff --git a/website/content/docs/commands/volume/snapshot-list.mdx b/website/content/docs/commands/volume/snapshot-list.mdx index 0135cf967..747de707f 100644 --- a/website/content/docs/commands/volume/snapshot-list.mdx +++ b/website/content/docs/commands/volume/snapshot-list.mdx @@ -36,6 +36,8 @@ Nomad. matching plugins will be displayed. - `-secret`: Secrets to pass to the plugin to list snapshots. Accepts multiple flags in the form `-secret key=value` +- `-per-page`: How many results to show per page. +- `-page-token`: Where to start pagination. When ACLs are enabled, this command requires a token with the `csi-list-volumes` capability for the plugin's namespace. From d83ea30ff9afd917933959f2b25528ccb8c099f7 Mon Sep 17 00:00:00 2001 From: Ignacio Torres Masdeu Date: Mon, 7 Mar 2022 19:55:57 +0100 Subject: [PATCH 49/89] docs: fix examples for set_contains_all and set_contains_any (#12093) --- website/content/docs/job-specification/affinity.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/job-specification/affinity.mdx b/website/content/docs/job-specification/affinity.mdx index 2eac4bf8b..2ea425ef8 100644 --- a/website/content/docs/job-specification/affinity.mdx +++ b/website/content/docs/job-specification/affinity.mdx @@ -132,7 +132,7 @@ affinity { ```hcl affinity { attribute = "..." - operator = "set_contains" + operator = "set_contains_all" value = "a,b,c" weight = 50 } @@ -147,7 +147,7 @@ affinity { ```hcl affinity { attribute = "..." - operator = "set_contains" + operator = "set_contains_any" value = "a,b,c" weight = 50 } From 6707062b0d04cdec686c68ae3f1195751c46a322 Mon Sep 17 00:00:00 2001 From: Merlin Scholz <21988035+ruhrscholz@users.noreply.github.com> Date: Tue, 8 Mar 2022 15:49:29 +0100 Subject: [PATCH 50/89] docs: elaborate on networking issues with firewalld (#12214) --- website/content/docs/drivers/docker.mdx | 7 +++++++ website/content/docs/job-specification/network.mdx | 13 +++++++++++++ 2 files changed, 20 insertions(+) diff --git a/website/content/docs/drivers/docker.mdx b/website/content/docs/drivers/docker.mdx index f84e97ed6..926cc2dc8 100644 --- a/website/content/docs/drivers/docker.mdx +++ b/website/content/docs/drivers/docker.mdx @@ -259,6 +259,12 @@ config { the task group. This will also prevent [Connect]-enabled tasks from reaching the Envoy sidecar proxy. + If you are in the process of migrating from the default Docker network to + group-wide bridge networking, you may encounter issues preventing your + containers from reaching networks outside of the bridge interface on systems with + firewalld enabled. This behavior is often caused by the CNI plugin not registering the group + network as trusted and can be resolved as described in the [network stanza] documentation. + - `pid_mode` - (Optional) `host` or not set (default). Set to `host` to share the PID namespace with the host. Note that this also requires the Nomad agent to be configured to allow privileged containers. @@ -1173,4 +1179,5 @@ Windows is relatively new and rapidly evolving you may want to consult the [allow_caps]: /docs/drivers/docker#allow_caps [Connect]: /docs/job-specification/connect [`bridge`]: docs/job-specification/network#bridge +[network stanza]: /docs/job-specification/network#bridge-mode [`pids_limit`]: /docs/drivers/docker#pids_limit diff --git a/website/content/docs/job-specification/network.mdx b/website/content/docs/job-specification/network.mdx index c107e8166..5c6613e55 100644 --- a/website/content/docs/job-specification/network.mdx +++ b/website/content/docs/job-specification/network.mdx @@ -218,6 +218,19 @@ network { } ``` +Using bridge mode can result in failing outbound network requests on hosts that have +[firewalld](https://firewalld.org) enabled. This includes most RHEL-based Linux distributions +like CentOS, Rocky Linux or Oracle Linux. One solution for firewalld to allow network +requsts coming from Nomad jobs is to mark the `nomad` bridge interface as trusted. + +```shell-session +$ sudo firewall-cmd --zone=trusted --add-interface=nomad +$ sudo firewall-cmd --zone=trusted --add-interface=nomad --permanent +``` + +It is neccessary to restart the affected jobs afterwards for them to be able to access +the network. Further details can be found in Docker's documentation under [Docker and iptables](https://docs.docker.com/network/iptables/#integration-with-firewalld). + ### DNS The following example configures the allocation to use Google's DNS resolvers 8.8.8.8 and 8.8.4.4. From a9d64b8e3ead16abc6441061abfe857219c8a8a2 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 8 Mar 2022 11:59:20 -0500 Subject: [PATCH 51/89] docs: add note about docker DNS config when using bridge mode (#12229) The Docker DNS configuration options are not compatible with a group-level network in `bridge` mode. Warn users about this in the Docker task configuration docs. --- website/content/docs/drivers/docker.mdx | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/website/content/docs/drivers/docker.mdx b/website/content/docs/drivers/docker.mdx index 926cc2dc8..ce3fbb127 100644 --- a/website/content/docs/drivers/docker.mdx +++ b/website/content/docs/drivers/docker.mdx @@ -100,13 +100,21 @@ config { } ``` -- `dns_search_domains` - (Optional) A list of DNS search domains for the container - to use. +- `dns_search_domains` - (Optional) A list of DNS search domains for + the container to use. If you are using bridge networking mode with a + `network` block in the task group, you must set all DNS options in + the `network.dns` block instead. -- `dns_options` - (Optional) A list of DNS options for the container to use. +- `dns_options` - (Optional) A list of DNS options for the container + to use. If you are using bridge networking mode with a `network` + block in the task group, you must set all DNS options in the + `network.dns` block instead. -- `dns_servers` - (Optional) A list of DNS servers for the container to use - (e.g. ["8.8.8.8", "8.8.4.4"]). Requires Docker v1.10 or greater. +- `dns_servers` - (Optional) A list of DNS servers for the container + to use (e.g. ["8.8.8.8", "8.8.4.4"]). Requires Docker v1.10 or + greater. If you are using bridge networking mode with a `network` + block in the task group, you must set all DNS options in the + `network.dns` block instead. - `entrypoint` - (Optional) A string list overriding the image's entrypoint. @@ -257,7 +265,8 @@ config { the group `network.mode = "bridge"` you should not set the Docker config `network_mode`, or the container will be unable to reach other containers in the task group. This will also prevent [Connect]-enabled tasks from reaching - the Envoy sidecar proxy. + the Envoy sidecar proxy. You must also set any DNS options in the `network.dns` + block and not in the task configuration. If you are in the process of migrating from the default Docker network to group-wide bridge networking, you may encounter issues preventing your From fe6cbbf078ab59d48117243017842781b9d81823 Mon Sep 17 00:00:00 2001 From: Michael Klein Date: Tue, 8 Mar 2022 18:28:36 +0100 Subject: [PATCH 52/89] Upgrade Ember and friends 3.28 (#12215) * chore: upgrade forward compatible packages * chore: v3.20.2...v3.24.0 * chore: silence string prototype extension deprecation * refact: don't test clicking disabled button job-list Recent test-helper upgrades will guard against clicking disabled buttons as this is not something that real users can do. We need to change our tests accordingly. * fix: await async test helper `expectError` We have to await this async test function otherwise the test's rendering context will be torn down before we run assertions against it. * fix: don't try to click disabled two-step-button Recent test-helper updates prohibit clicking disabled buttons. We need to adapt the tests accordingly. * fix: recommendation-accordion Use up-to-date semantics for handling list-accordion closing in recommendation-accordion. * fixes toggling recommendation-accordion toggle. * fix: simple-unless linting error application.hbs There's no reason to use unless here - we can use if instead. * fix: no-quoteless-attributes recommendation accordion * fix: no-quoteless-attributes recommendation-chart * fix: allow `unless` - global-header.hbs This is a valid use of unless in our opinion. * fix: allow unless in job-diff This is not a great use for unless but we don't want to change this behavior atm. * fix: no-attrs-in-components list-pager There is no need to use this.attrs in classic components. When we will convert to glimmer we will use `@`-instead. * fix: simple-unless job/definition We can convert to a simple if here. * fix: allow inline-styles stats-box component To make linter happy. * fix: disable no-action and no-invalid-interactive Will be adressed in follow-up PRs. * chore: update ember-classic-decorator to latest * chore: upgrade ember-can to latest * chore: upgrade ember-composable-helpers to latest * chore: upgrade ember-concurrency * fix: recomputation deprecation `Trigger` schedule `do` on actions queue to work around recomputation deprecation when triggering Trigger on `did-insert`. * chore: upgrade ember-cli-string-helpers * chore: upgrade ember-copy * chore: upgrade ember-data-model-fragments * chore: upgrade ember-deprecation-workflow * chore: upgrade ember-inline-svg * chore: upgrade ember-modifier * chore: upgrade ember-truth-helpers * chore: upgrade ember-moment & ember-cli-moment-shim * chore: upgrade ember-power-select * chore: upgrade ember-responsive * chore: upgrade ember-sinon * chore: upgrade ember-cli-mirage For now we will stay on 2.2 - upgrades > 2.3 break the build. * chore: upgrade 3.24.0 to 3.28.5 * fix: add missing classic decorators on adapters * fix: missing classic decorators to serializers * fix: don't reopen Ember.Object anymore * fix: remove unused useNativeEvents ember-cli-page-objects doesn't provide this method anymore * fix: add missing attributeBindings for test-selectors ember-test-selectors doesn't provides automatic bindings for data-test-* attributes anymore. * fix: classic decorator for application serializer test * fix: remove `removeContext` from tests. It is unneeded and ember-cli-page-objects doesn't provides this method anymore. * fix: remove deprecations `run.*`-invocations * fix: `collapseWhitespace` in optimize test * fix: make sure to load async relationship before access * fix: dependent keys for relationship computeds We need to add `*.isFulfilled` as dependent keys for computeds that access async relationships. * fix: `computed.read`-invocations use `read` instead * chore: prettify templates * fix: use map instead of mapBy ember-cli-page-object Doesn't work with updated ember-cli-page-object anymore. * fix: remove remaining deprecated `run.*`-calls * chore: add more deprecations deprecation-workflow * fix: `implicit-injection`-deprecation All routes that add watchers will need to inject the store-service as the store service is internally used in watchers. * fix: more implicit injection deprecations * chore: silence implicit-injection deprecation We can tackle the deprecation when we find the time. * fix: new linting errors after upgrade * fix: remove merge conflicts prettierignore * chore: upgrade to run node 12.22 when building binaries --- scripts/vagrant-linux-unpriv-ui.sh | 4 +- ui/.eslintignore | 2 + ui/.eslintrc.js | 24 +- ui/.template-lintrc.js | 34 +- ui/README.md | 33 +- ui/app/adapters/agent.js | 2 + ui/app/adapters/allocation.js | 2 + ui/app/adapters/deployment.js | 2 + ui/app/adapters/evaluation.js | 2 + ui/app/adapters/job-scale.js | 2 + ui/app/adapters/job-summary.js | 2 + ui/app/adapters/job-version.js | 2 + ui/app/adapters/job.js | 2 + ui/app/adapters/namespace.js | 2 + ui/app/adapters/node.js | 2 + ui/app/adapters/plugin.js | 2 + ui/app/adapters/policy.js | 2 + ui/app/adapters/recommendation-summary.js | 2 + ui/app/adapters/token.js | 2 + ui/app/adapters/volume.js | 2 + ui/app/adapters/watchable.js | 2 + ui/app/components/allocation-row.js | 15 +- ui/app/components/allocation-status-bar.js | 4 + ui/app/components/breadcrumbs/default.hbs | 6 +- ui/app/components/children-status-bar.js | 2 + .../das/recommendation-accordion.hbs | 9 +- ui/app/components/das/recommendation-card.hbs | 74 +- ui/app/components/das/recommendation-card.js | 9 +- .../components/das/recommendation-chart.hbs | 37 +- ui/app/components/das/recommendation-row.hbs | 15 +- ui/app/components/distribution-bar.js | 4 +- ui/app/components/flex-masonry.js | 4 +- ui/app/components/fs/breadcrumbs.js | 7 +- ui/app/components/fs/file.js | 3 +- ui/app/components/gauge-chart.js | 4 +- ui/app/components/global-header.js | 2 + ui/app/components/global-search/control.js | 7 +- ui/app/components/image-file.js | 7 +- ui/app/components/job-client-status-bar.js | 2 + ui/app/components/job-editor.js | 10 +- ui/app/components/job-row.js | 7 +- ui/app/components/line-chart.js | 8 +- .../list-accordion/accordion-head.js | 7 +- ui/app/components/list-table.js | 2 +- ui/app/components/multi-select-dropdown.js | 4 +- ui/app/components/plugin-allocation-row.js | 5 + ui/app/components/popover-menu.js | 4 +- ui/app/components/streaming-file.js | 17 +- ui/app/components/task-row.js | 7 +- ui/app/components/toggle.js | 2 + ui/app/components/topo-viz.js | 4 +- ui/app/components/trigger.js | 5 +- .../allocations/allocation/index.js | 1 + ui/app/controllers/application.js | 7 +- ui/app/controllers/jobs/job/clients.js | 3 + ui/app/mixins/window-resizable.js | 4 +- ui/app/models/task-group-scale.js | 4 +- ui/app/models/task-group.js | 2 +- ui/app/routes/clients/index.js | 3 + ui/app/routes/csi/plugins/index.js | 3 + .../routes/csi/plugins/plugin/allocations.js | 3 + ui/app/routes/csi/plugins/plugin/index.js | 3 + ui/app/routes/jobs/job/allocations.js | 3 + ui/app/routes/jobs/job/clients.js | 1 + ui/app/routes/jobs/job/deployments.js | 3 + ui/app/routes/jobs/job/evaluations.js | 3 + ui/app/routes/jobs/job/index.js | 1 + ui/app/routes/jobs/job/task-group.js | 3 + ui/app/routes/jobs/job/versions.js | 3 + ui/app/routes/optimize.js | 1 + ui/app/serializers/agent.js | 2 + ui/app/serializers/application.js | 2 + ui/app/serializers/drain-strategy.js | 2 + ui/app/serializers/fragment.js | 2 + ui/app/serializers/job-plan.js | 2 + ui/app/serializers/job-scale.js | 2 + ui/app/serializers/job-summary.js | 2 + ui/app/serializers/job-version.js | 2 + ui/app/serializers/job.js | 2 + ui/app/serializers/namespace.js | 2 + ui/app/serializers/network.js | 2 + ui/app/serializers/node-event.js | 2 + ui/app/serializers/plugin.js | 2 + ui/app/serializers/policy.js | 2 + ui/app/serializers/port.js | 2 + ui/app/serializers/reschedule-event.js | 3 + ui/app/serializers/resources.js | 2 + ui/app/serializers/scale-event.js | 2 + ui/app/serializers/service.js | 2 + ui/app/serializers/structured-attributes.js | 2 + ui/app/serializers/task-event.js | 2 + .../task-group-deployment-summary.js | 2 + ui/app/serializers/task-group-scale.js | 2 + ui/app/serializers/task-group.js | 2 + ui/app/serializers/task-state.js | 2 + ui/app/serializers/task.js | 2 + ui/app/serializers/token.js | 2 + ui/app/serializers/volume.js | 2 + ui/app/templates/application.hbs | 43 +- ui/app/templates/clients/index.hbs | 63 +- .../templates/components/distribution-bar.hbs | 32 +- ui/app/templates/components/drain-popover.hbs | 76 +- ui/app/templates/components/global-header.hbs | 2 + .../job-deployment/deployment-allocations.hbs | 10 +- ui/app/templates/components/job-diff.hbs | 93 +- ui/app/templates/components/job-editor.hbs | 81 +- .../components/job-page/parts/stats-box.hbs | 1 + .../templates/components/list-accordion.hbs | 3 +- .../list-accordion/accordion-head.hbs | 4 +- .../components/list-pagination/list-pager.hbs | 7 +- .../components/multi-select-dropdown.hbs | 21 +- ui/app/templates/components/popover-menu.hbs | 18 +- ui/app/templates/components/stepper-input.hbs | 14 +- ui/app/templates/components/toggle.hbs | 6 +- ui/app/templates/jobs/job/definition.hbs | 32 +- ui/app/templates/optimize.hbs | 56 +- ui/app/utils/classes/stream-logger.js | 8 +- ui/config/deprecation-workflow.js | 9 + ui/config/ember-cli-update.json | 2 +- ui/config/environment.js | 2 +- ui/ember-cli-build.js | 2 +- ui/package.json | 94 +- ui/tests/acceptance/clients-list-test.js | 24 +- ui/tests/acceptance/jobs-list-test.js | 4 +- ui/tests/acceptance/optimize-test.js | 3 +- ui/tests/helpers/collapse-whitespace.js | 7 + ui/tests/index.html | 23 +- .../components/job-page/periodic-test.js | 3 +- .../components/job-page/service-test.js | 4 +- .../components/multi-select-dropdown-test.js | 50 +- .../components/two-step-button-test.js | 8 +- ui/tests/pages/components/popover-menu.js | 4 +- ui/tests/pages/components/stepper-input.js | 2 +- ui/tests/test-helper.js | 7 +- ui/tests/unit/adapters/job-test.js | 10 +- ui/tests/unit/adapters/volume-test.js | 4 +- ui/tests/unit/serializers/application-test.js | 2 + ui/yarn.lock | 5588 ++++++++--------- 138 files changed, 3495 insertions(+), 3457 deletions(-) create mode 100644 ui/tests/helpers/collapse-whitespace.js diff --git a/scripts/vagrant-linux-unpriv-ui.sh b/scripts/vagrant-linux-unpriv-ui.sh index 837fa582d..f6ece798c 100755 --- a/scripts/vagrant-linux-unpriv-ui.sh +++ b/scripts/vagrant-linux-unpriv-ui.sh @@ -10,8 +10,8 @@ export NVM_DIR="${HOME}/.nvm" # Install Node, Ember CLI, and Phantom for UI development # Use exact full version version (e.g. not 12) for reproducibility purposes -nvm install 12.19.0 -nvm alias default 12.19.0 +nvm install 12.22.10 +nvm alias default 12.22.10 npm install -g ember-cli # Install Yarn for front-end dependency management diff --git a/ui/.eslintignore b/ui/.eslintignore index 4089d9aaf..920dc142c 100644 --- a/ui/.eslintignore +++ b/ui/.eslintignore @@ -15,6 +15,8 @@ mirage/ # misc /coverage/ !.* +.*/ +.eslintcache # ember-try /.node_modules.ember-try/ diff --git a/ui/.eslintrc.js b/ui/.eslintrc.js index 19fd1e1dc..b59f6e21e 100644 --- a/ui/.eslintrc.js +++ b/ui/.eslintrc.js @@ -37,15 +37,16 @@ module.exports = { // node files { files: [ - '.eslintrc.js', - '.prettierrc.js', - '.template-lintrc.js', - 'ember-cli-build.js', - 'testem.js', - 'blueprints/*/index.js', - 'config/**/*.js', - 'lib/*/index.js', - 'server/**/*.js', + './.eslintrc.js', + './.prettierrc.js', + './.template-lintrc.js', + './ember-cli-build.js', + './testem.js', + './blueprints/*/index.js', + './config/**/*.js', + './lib/*/index.js', + './server/**/*.js', + './tests/.eslintrc.js', ], parserOptions: { sourceType: 'script', @@ -73,5 +74,10 @@ module.exports = { }, plugins: ['node'], }, + { + // Test files: + files: ['tests/**/*-test.{js,ts}'], + extends: ['plugin:qunit/recommended'], + }, ], }; diff --git a/ui/.template-lintrc.js b/ui/.template-lintrc.js index fea180ebf..4da3008f3 100644 --- a/ui/.template-lintrc.js +++ b/ui/.template-lintrc.js @@ -3,37 +3,7 @@ module.exports = { extends: 'recommended', rules: { - // should definitely move to template only - // glimmer components for this one - 'no-partial': false, - - // these need to be looked into, but - // may be a bigger change - 'no-invalid-interactive': false, - 'simple-unless': false, - - 'self-closing-void-elements': false, - 'no-unnecessary-concat': false, - 'no-quoteless-attributes': false, - 'no-nested-interactive': false, - - // Only used in list-pager, which can be replaced with - // an angle-bracket component - 'no-attrs-in-components': false, - - // Used in practice with charts. Ideally this would be true - // except for a whitelist of chart files. - 'no-inline-styles': false, - - // not sure we'll ever want these on, - // would be nice but if prettier isn't doing - // it for us, then not sure it's worth it - 'attribute-indentation': false, - 'block-indentation': false, - quotes: false, - - // remove when moving from extending `recommended` to `octane` - 'no-curly-component-invocation': true, - 'no-implicit-this': true, + 'no-action': 'off', + 'no-invalid-interactive': 'off', }, }; diff --git a/ui/README.md b/ui/README.md index e638af4f6..7f5b6fb93 100644 --- a/ui/README.md +++ b/ui/README.md @@ -6,9 +6,9 @@ The official Nomad UI. This is an [ember.js](https://emberjs.com/) project, and you will need the following tools installed on your computer. -* [Node.js v10](https://nodejs.org/) -* [Yarn](https://yarnpkg.com) -* [Ember CLI](https://ember-cli.com/) +- [Node.js v10](https://nodejs.org/) +- [Yarn](https://yarnpkg.com) +- [Ember CLI](https://ember-cli.com/) ## Installation @@ -21,10 +21,10 @@ $ yarn ## Running / Development -UI in development mode defaults to using fake generated data, but you can configure it to proxy a live running nomad process by setting `USE_MIRAGE` environment variable to `false`. First, make sure nomad is running. The UI, in development mode, runs independently from Nomad, so this could be an official release or a dev branch. Likewise, Nomad can be running in server mode or dev mode. As long as the API is accessible, the UI will work as expected. +UI in development mode defaults to using fake generated data, but you can configure it to proxy a live running nomad process by setting `USE_MIRAGE` environment variable to `false`. First, make sure nomad is running. The UI, in development mode, runs independently from Nomad, so this could be an official release or a dev branch. Likewise, Nomad can be running in server mode or dev mode. As long as the API is accessible, the UI will work as expected. -* `USE_MIRAGE=false ember serve` -* Visit your app at [http://localhost:4200](http://localhost:4200). +- `USE_MIRAGE=false ember serve` +- Visit your app at [http://localhost:4200](http://localhost:4200). You may need to reference the direct path to `ember`, typically in `./node_modules/.bin/ember`. @@ -38,8 +38,8 @@ All necessary tools for UI development are installed as part of the Vagrantfile. That said, development with Vagrant is still possible, but the `ember serve` command requires two modifications: -* `--watch polling`: This allows the vm to notice file changes made in the host environment. -* `--port 4201`: The default port 4200 is not forwarded, since local development is recommended. +- `--watch polling`: This allows the vm to notice file changes made in the host environment. +- `--port 4201`: The default port 4200 is not forwarded, since local development is recommended. This makes the full command for running the UI in development mode in Vagrant: @@ -51,8 +51,8 @@ $ ember serve --watch polling --port 4201 Nomad UI tests can be run independently of Nomad golang tests. -* `ember test` (single run, headless browser) -* `ember test --server` (watches for changes, runs in a full browser) +- `ember test` (single run, headless browser) +- `ember test --server` (watches for changes, runs in a full browser) You can use `--filter ` to run a targetted set of tests, e.g. `ember test --filter 'allocation detail'`. @@ -60,18 +60,15 @@ In the test environment, the fake data is generated with a random seed. If you w ### Linting -Linting should happen automatically in your editor and when committing changes, but it can also be invoked manually. - -* `npm run lint:hbs` -* `npm run lint:js` -* `npm run lint:js -- --fix` +- `yarn lint` +- `yarn lint:fix` ### Building Typically `make release` or `make dev-ui` will be the desired build workflow, but in the event that build artifacts need to be inspected, `ember build` will output compiled files in `ui/dist`. -* `ember build` (development) -* `ember build --environment production` (production) +- `ember build` (development) +- `ember build --environment production` (production) ### Releasing @@ -79,7 +76,7 @@ Nomad UI releases are in lockstep with Nomad releases and are integrated into th ### Conventions -* UI branches should be prefix with `f-ui-` for feature work and `b-ui-` for bug fixes. This instructs CI to skip running nomad backend tests. +- UI branches should be prefix with `f-ui-` for feature work and `b-ui-` for bug fixes. This instructs CI to skip running nomad backend tests. ### Storybook UI Library diff --git a/ui/app/adapters/agent.js b/ui/app/adapters/agent.js index 8b2d24fa8..bd0331dcb 100644 --- a/ui/app/adapters/agent.js +++ b/ui/app/adapters/agent.js @@ -1,5 +1,7 @@ import ApplicationAdapter from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class AgentAdapter extends ApplicationAdapter { pathForType = () => 'agent/members'; diff --git a/ui/app/adapters/allocation.js b/ui/app/adapters/allocation.js index 64c1ef7da..20cb6d4f5 100644 --- a/ui/app/adapters/allocation.js +++ b/ui/app/adapters/allocation.js @@ -1,6 +1,8 @@ import Watchable from './watchable'; import addToPath from 'nomad-ui/utils/add-to-path'; +import classic from 'ember-classic-decorator'; +@classic export default class AllocationAdapter extends Watchable { stop = adapterAction('/stop'); diff --git a/ui/app/adapters/deployment.js b/ui/app/adapters/deployment.js index 8904038d0..604ffec50 100644 --- a/ui/app/adapters/deployment.js +++ b/ui/app/adapters/deployment.js @@ -1,5 +1,7 @@ import Watchable from './watchable'; +import classic from 'ember-classic-decorator'; +@classic export default class DeploymentAdapter extends Watchable { fail(deployment) { const id = deployment.get('id'); diff --git a/ui/app/adapters/evaluation.js b/ui/app/adapters/evaluation.js index 13f0caabb..e9d128721 100644 --- a/ui/app/adapters/evaluation.js +++ b/ui/app/adapters/evaluation.js @@ -1,5 +1,7 @@ import ApplicationAdapter from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class EvaluationAdapter extends ApplicationAdapter { handleResponse(_status, headers) { const result = super.handleResponse(...arguments); diff --git a/ui/app/adapters/job-scale.js b/ui/app/adapters/job-scale.js index f068e69a8..ca134fd67 100644 --- a/ui/app/adapters/job-scale.js +++ b/ui/app/adapters/job-scale.js @@ -1,5 +1,7 @@ import WatchableNamespaceIDs from './watchable-namespace-ids'; +import classic from 'ember-classic-decorator'; +@classic export default class JobScaleAdapter extends WatchableNamespaceIDs { urlForFindRecord(id, type, hash) { return super.urlForFindRecord(id, 'job', hash, 'scale'); diff --git a/ui/app/adapters/job-summary.js b/ui/app/adapters/job-summary.js index 62ce05a5b..805fff764 100644 --- a/ui/app/adapters/job-summary.js +++ b/ui/app/adapters/job-summary.js @@ -1,5 +1,7 @@ import WatchableNamespaceIDs from './watchable-namespace-ids'; +import classic from 'ember-classic-decorator'; +@classic export default class JobSummaryAdapter extends WatchableNamespaceIDs { urlForFindRecord(id, type, hash) { return super.urlForFindRecord(id, 'job', hash, 'summary'); diff --git a/ui/app/adapters/job-version.js b/ui/app/adapters/job-version.js index fb9eba821..b494a52cd 100644 --- a/ui/app/adapters/job-version.js +++ b/ui/app/adapters/job-version.js @@ -1,6 +1,8 @@ import ApplicationAdapter from './application'; import addToPath from 'nomad-ui/utils/add-to-path'; +import classic from 'ember-classic-decorator'; +@classic export default class JobVersionAdapter extends ApplicationAdapter { revertTo(jobVersion) { const jobAdapter = this.store.adapterFor('job'); diff --git a/ui/app/adapters/job.js b/ui/app/adapters/job.js index 94995fd8e..8545b4aa3 100644 --- a/ui/app/adapters/job.js +++ b/ui/app/adapters/job.js @@ -1,7 +1,9 @@ import WatchableNamespaceIDs from './watchable-namespace-ids'; import addToPath from 'nomad-ui/utils/add-to-path'; import { base64EncodeString } from 'nomad-ui/utils/encode'; +import classic from 'ember-classic-decorator'; +@classic export default class JobAdapter extends WatchableNamespaceIDs { relationshipFallbackLinks = { summary: '/summary', diff --git a/ui/app/adapters/namespace.js b/ui/app/adapters/namespace.js index 8e91ea90c..b690839d6 100644 --- a/ui/app/adapters/namespace.js +++ b/ui/app/adapters/namespace.js @@ -1,6 +1,8 @@ import Watchable from './watchable'; import codesForError from '../utils/codes-for-error'; +import classic from 'ember-classic-decorator'; +@classic export default class NamespaceAdapter extends Watchable { findRecord(store, modelClass, id) { return super.findRecord(...arguments).catch((error) => { diff --git a/ui/app/adapters/node.js b/ui/app/adapters/node.js index fdb085a2c..1941ffa59 100644 --- a/ui/app/adapters/node.js +++ b/ui/app/adapters/node.js @@ -1,6 +1,8 @@ import Watchable from './watchable'; import addToPath from 'nomad-ui/utils/add-to-path'; +import classic from 'ember-classic-decorator'; +@classic export default class NodeAdapter extends Watchable { setEligible(node) { return this.setEligibility(node, true); diff --git a/ui/app/adapters/plugin.js b/ui/app/adapters/plugin.js index 80096f31b..f78a4b2a6 100644 --- a/ui/app/adapters/plugin.js +++ b/ui/app/adapters/plugin.js @@ -1,5 +1,7 @@ import Watchable from './watchable'; +import classic from 'ember-classic-decorator'; +@classic export default class PluginAdapter extends Watchable { queryParamsToAttrs = { type: 'type', diff --git a/ui/app/adapters/policy.js b/ui/app/adapters/policy.js index 79e68bcc4..85be4fbad 100644 --- a/ui/app/adapters/policy.js +++ b/ui/app/adapters/policy.js @@ -1,5 +1,7 @@ import { default as ApplicationAdapter, namespace } from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class PolicyAdapter extends ApplicationAdapter { namespace = namespace + '/acl'; } diff --git a/ui/app/adapters/recommendation-summary.js b/ui/app/adapters/recommendation-summary.js index 71b807749..540209d58 100644 --- a/ui/app/adapters/recommendation-summary.js +++ b/ui/app/adapters/recommendation-summary.js @@ -1,5 +1,7 @@ import ApplicationAdapter from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class RecommendationSummaryAdapter extends ApplicationAdapter { pathForType = () => 'recommendations'; diff --git a/ui/app/adapters/token.js b/ui/app/adapters/token.js index 253673534..81018aeb9 100644 --- a/ui/app/adapters/token.js +++ b/ui/app/adapters/token.js @@ -1,7 +1,9 @@ import { inject as service } from '@ember/service'; import { default as ApplicationAdapter, namespace } from './application'; import OTTExchangeError from '../utils/ott-exchange-error'; +import classic from 'ember-classic-decorator'; +@classic export default class TokenAdapter extends ApplicationAdapter { @service store; diff --git a/ui/app/adapters/volume.js b/ui/app/adapters/volume.js index 594f929a1..8b1a9212a 100644 --- a/ui/app/adapters/volume.js +++ b/ui/app/adapters/volume.js @@ -1,5 +1,7 @@ import WatchableNamespaceIDs from './watchable-namespace-ids'; +import classic from 'ember-classic-decorator'; +@classic export default class VolumeAdapter extends WatchableNamespaceIDs { queryParamsToAttrs = { type: 'type', diff --git a/ui/app/adapters/watchable.js b/ui/app/adapters/watchable.js index b8eff658e..05dd103e2 100644 --- a/ui/app/adapters/watchable.js +++ b/ui/app/adapters/watchable.js @@ -5,7 +5,9 @@ import { AbortError } from '@ember-data/adapter/error'; import queryString from 'query-string'; import ApplicationAdapter from './application'; import removeRecord from '../utils/remove-record'; +import classic from 'ember-classic-decorator'; +@classic export default class Watchable extends ApplicationAdapter { @service watchList; @service store; diff --git a/ui/app/components/allocation-row.js b/ui/app/components/allocation-row.js index 4eaddb3fd..7d87481c7 100644 --- a/ui/app/components/allocation-row.js +++ b/ui/app/components/allocation-row.js @@ -4,16 +4,25 @@ import Component from '@ember/component'; import { computed } from '@ember/object'; import { computed as overridable } from 'ember-overridable-computed'; import { alias } from '@ember/object/computed'; -import { run } from '@ember/runloop'; +import { scheduleOnce } from '@ember/runloop'; import { task, timeout } from 'ember-concurrency'; import { lazyClick } from '../helpers/lazy-click'; import AllocationStatsTracker from 'nomad-ui/utils/classes/allocation-stats-tracker'; import classic from 'ember-classic-decorator'; -import { classNames, tagName } from '@ember-decorators/component'; +import { + classNames, + tagName, + attributeBindings, +} from '@ember-decorators/component'; @classic @tagName('tr') @classNames('allocation-row', 'is-interactive') +@attributeBindings( + 'data-test-allocation', + 'data-test-write-allocation', + 'data-test-read-allocation' +) export default class AllocationRow extends Component { @service store; @service token; @@ -56,7 +65,7 @@ export default class AllocationRow extends Component { const allocation = this.allocation; if (allocation) { - run.scheduleOnce('afterRender', this, qualifyAllocation); + scheduleOnce('afterRender', this, qualifyAllocation); } else { this.fetchStats.cancelAll(); } diff --git a/ui/app/components/allocation-status-bar.js b/ui/app/components/allocation-status-bar.js index dc65be516..28e7a01ea 100644 --- a/ui/app/components/allocation-status-bar.js +++ b/ui/app/components/allocation-status-bar.js @@ -1,6 +1,10 @@ import { computed } from '@ember/object'; import DistributionBar from './distribution-bar'; +import { attributeBindings } from '@ember-decorators/component'; +import classic from 'ember-classic-decorator'; +@classic +@attributeBindings('data-test-allocation-status-bar') export default class AllocationStatusBar extends DistributionBar { layoutName = 'components/distribution-bar'; diff --git a/ui/app/components/breadcrumbs/default.hbs b/ui/app/components/breadcrumbs/default.hbs index baf06cc78..3117a3b54 100644 --- a/ui/app/components/breadcrumbs/default.hbs +++ b/ui/app/components/breadcrumbs/default.hbs @@ -1,5 +1,9 @@ +{{! template-lint-disable no-unknown-arguments-for-builtin-components }}
  • - + {{#if @crumb.title}}
    diff --git a/ui/app/components/children-status-bar.js b/ui/app/components/children-status-bar.js index b64cbb351..d0c84895b 100644 --- a/ui/app/components/children-status-bar.js +++ b/ui/app/components/children-status-bar.js @@ -1,8 +1,10 @@ import { computed } from '@ember/object'; import DistributionBar from './distribution-bar'; import classic from 'ember-classic-decorator'; +import { attributeBindings } from '@ember-decorators/component'; @classic +@attributeBindings('data-test-children-status-bar') export default class ChildrenStatusBar extends DistributionBar { layoutName = 'components/distribution-bar'; diff --git a/ui/app/components/das/recommendation-accordion.hbs b/ui/app/components/das/recommendation-accordion.hbs index 08f304853..6043d7cae 100644 --- a/ui/app/components/das/recommendation-accordion.hbs +++ b/ui/app/components/das/recommendation-accordion.hbs @@ -1,18 +1,19 @@ {{#if this.show}} + as |a| + > {{#if a.isOpen}}
    {{else}} diff --git a/ui/app/components/das/recommendation-card.hbs b/ui/app/components/das/recommendation-card.hbs index 51ecdd461..caab7aa83 100644 --- a/ui/app/components/das/recommendation-card.hbs +++ b/ui/app/components/das/recommendation-card.hbs @@ -1,12 +1,17 @@ +{{! template-lint-disable no-duplicate-landmark-elements}} {{#if this.interstitialComponent}}
    - {{component (concat 'das/' this.interstitialComponent) proceed=this.proceedPromiseResolve error=this.error}} + {{component + (concat "das/" this.interstitialComponent) + proceed=this.proceedPromiseResolve + error=this.error + }}
    {{else if @summary.taskGroup}}
    @@ -14,11 +19,18 @@

    - {{@summary.taskGroup.job.name}} - {{@summary.taskGroup.name}} + {{@summary.taskGroup.job.name}} + {{@summary.taskGroup.name}}

    - Namespace: {{@summary.jobNamespace}} + Namespace: + {{@summary.jobNamespace}}

    @@ -45,10 +57,16 @@
    CPU
    @@ -56,10 +74,16 @@
    Mem
    @@ -87,13 +111,27 @@
    - - + +
    - + {{@summary.taskGroup.job.name}} / {{@summary.taskGroup.name}} @@ -104,7 +142,8 @@ data-test-accordion-toggle class="button is-light is-compact pull-right accordion-toggle" {{on "click" @onCollapse}} - type="button"> + type="button" + > Collapse {{/if}} @@ -131,7 +170,10 @@ @currentValue={{recommendation.currentValue}} @recommendedValue={{recommendation.value}} @stats={{recommendation.stats}} - @disabled={{includes recommendation @summary.excludedRecommendations}} + @disabled={{includes + recommendation + @summary.excludedRecommendations + }} />
  • {{/each}} diff --git a/ui/app/components/das/recommendation-card.js b/ui/app/components/das/recommendation-card.js index 7ab471631..5004c0920 100644 --- a/ui/app/components/das/recommendation-card.js +++ b/ui/app/components/das/recommendation-card.js @@ -194,11 +194,12 @@ export default class DasRecommendationCardComponent extends Component { } @action - dismiss() { + async dismiss() { this.storeCardHeight(); - this.args.summary.excludedRecommendations.pushObjects( - this.args.summary.recommendations - ); + const recommendations = await this.args.summary.recommendations; + + this.args.summary.excludedRecommendations.pushObjects(recommendations); + this.args.summary .save() .then( diff --git a/ui/app/components/das/recommendation-chart.hbs b/ui/app/components/das/recommendation-chart.hbs index 154baea0d..918135694 100644 --- a/ui/app/components/das/recommendation-chart.hbs +++ b/ui/app/components/das/recommendation-chart.hbs @@ -31,7 +31,13 @@ {{#if this.center}} - + {{/if}} {{#each this.statsShapes as |shapes|}} @@ -55,7 +61,7 @@ height={{shapes.rect.height}} {{on "mouseenter" (fn this.setActiveLegendRow shapes.text.label)}} {{on "mouseleave" this.unsetActiveLegendRow}} - /> + > + > {{/each}} {{#unless @disabled}} @@ -77,24 +83,24 @@ y={{this.deltaRect.y}} width={{this.deltaRect.width}} height={{this.deltaRect.height}} - /> + > + > + > + > + -
    +
      {{#each this.sortedStats as |stat|}}
    1. diff --git a/ui/app/components/das/recommendation-row.hbs b/ui/app/components/das/recommendation-row.hbs index 1590055fb..3deaf8ec5 100644 --- a/ui/app/components/das/recommendation-row.hbs +++ b/ui/app/components/das/recommendation-row.hbs @@ -1,5 +1,5 @@ {{#if @summary.taskGroup.allocations.length}} - {{!-- Prevent storing aggregate diffs until allocation count is known --}} + {{! Prevent storing aggregate diffs until allocation count is known }}
      - {{@summary.taskGroup.job.name}} + {{@summary.taskGroup.job.name}} / - {{@summary.taskGroup.name}} + {{@summary.taskGroup.name}}
      -
      - Namespace: {{@summary.jobNamespace}} +
      + Namespace: + {{@summary.jobNamespace}}
      @@ -25,13 +26,13 @@ {{#if this.cpu.delta}} {{this.cpu.signedDiff}} - {{this.cpu.percentDiff}} + {{this.cpu.percentDiff}} {{/if}} {{#if this.memory.delta}} {{this.memory.signedDiff}} - {{this.memory.percentDiff}} + {{this.memory.percentDiff}} {{/if}} diff --git a/ui/app/components/distribution-bar.js b/ui/app/components/distribution-bar.js index a33c48f3e..055ea23d8 100644 --- a/ui/app/components/distribution-bar.js +++ b/ui/app/components/distribution-bar.js @@ -2,7 +2,7 @@ import Component from '@ember/component'; import { computed, set } from '@ember/object'; import { observes } from '@ember-decorators/object'; -import { run } from '@ember/runloop'; +import { run, once } from '@ember/runloop'; import { assign } from '@ember/polyfills'; import { guidFor } from '@ember/object/internals'; import { copy } from 'ember-copy'; @@ -190,6 +190,6 @@ export default class DistributionBar extends Component.extend(WindowResizable) { /* eslint-enable */ windowResizeHandler() { - run.once(this, this.renderChart); + once(this, this.renderChart); } } diff --git a/ui/app/components/flex-masonry.js b/ui/app/components/flex-masonry.js index 29b32501a..4973594a2 100644 --- a/ui/app/components/flex-masonry.js +++ b/ui/app/components/flex-masonry.js @@ -1,6 +1,6 @@ import Component from '@glimmer/component'; import { tracked } from '@glimmer/tracking'; -import { run } from '@ember/runloop'; +import { next } from '@ember/runloop'; import { action } from '@ember/object'; import { minIndex, max } from 'd3-array'; @@ -14,7 +14,7 @@ export default class FlexMasonry extends Component { @action reflow() { - run.next(() => { + next(() => { // There's nothing to do if there is no element if (!this.element) return; diff --git a/ui/app/components/fs/breadcrumbs.js b/ui/app/components/fs/breadcrumbs.js index 819e822b6..23c46066c 100644 --- a/ui/app/components/fs/breadcrumbs.js +++ b/ui/app/components/fs/breadcrumbs.js @@ -1,12 +1,17 @@ import Component from '@ember/component'; import { computed } from '@ember/object'; import { isEmpty } from '@ember/utils'; -import { classNames, tagName } from '@ember-decorators/component'; +import { + classNames, + tagName, + attributeBindings, +} from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic @tagName('nav') @classNames('breadcrumb') +@attributeBindings('data-test-fs-breadcrumbs') export default class Breadcrumbs extends Component { 'data-test-fs-breadcrumbs' = true; diff --git a/ui/app/components/fs/file.js b/ui/app/components/fs/file.js index 1af183478..b06ca3e3b 100644 --- a/ui/app/components/fs/file.js +++ b/ui/app/components/fs/file.js @@ -6,11 +6,12 @@ import { equal, gt } from '@ember/object/computed'; import RSVP from 'rsvp'; import Log from 'nomad-ui/utils/classes/log'; import timeout from 'nomad-ui/utils/timeout'; -import { classNames } from '@ember-decorators/component'; +import { classNames, attributeBindings } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic @classNames('boxed-section', 'task-log') +@attributeBindings('data-test-file-viewer') export default class File extends Component { @service token; @service system; diff --git a/ui/app/components/gauge-chart.js b/ui/app/components/gauge-chart.js index bbd98bfde..9202080ed 100644 --- a/ui/app/components/gauge-chart.js +++ b/ui/app/components/gauge-chart.js @@ -2,7 +2,7 @@ import Component from '@ember/component'; import { computed } from '@ember/object'; import { assert } from '@ember/debug'; import { guidFor } from '@ember/object/internals'; -import { run } from '@ember/runloop'; +import { once } from '@ember/runloop'; import d3Shape from 'd3-shape'; import WindowResizable from 'nomad-ui/mixins/window-resizable'; import { classNames } from '@ember-decorators/component'; @@ -88,6 +88,6 @@ export default class GaugeChart extends Component.extend(WindowResizable) { } windowResizeHandler() { - run.once(this, this.updateDimensions); + once(this, this.updateDimensions); } } diff --git a/ui/app/components/global-header.js b/ui/app/components/global-header.js index 2e89fcb55..a3f3b26bf 100644 --- a/ui/app/components/global-header.js +++ b/ui/app/components/global-header.js @@ -1,8 +1,10 @@ import Component from '@ember/component'; import classic from 'ember-classic-decorator'; import { inject as service } from '@ember/service'; +import { attributeBindings } from '@ember-decorators/component'; @classic +@attributeBindings('data-test-global-header') export default class GlobalHeader extends Component { @service config; @service system; diff --git a/ui/app/components/global-search/control.js b/ui/app/components/global-search/control.js index e68e5c7be..b3d8a9230 100644 --- a/ui/app/components/global-search/control.js +++ b/ui/app/components/global-search/control.js @@ -1,14 +1,15 @@ import Component from '@ember/component'; -import { classNames } from '@ember-decorators/component'; +import { classNames, attributeBindings } from '@ember-decorators/component'; import { task } from 'ember-concurrency'; import { action, set } from '@ember/object'; import { inject as service } from '@ember/service'; -import { debounce, run } from '@ember/runloop'; +import { debounce, next } from '@ember/runloop'; const SLASH_KEY = '/'; const MAXIMUM_RESULTS = 10; @classNames('global-search-container') +@attributeBindings('data-test-search-parent') export default class GlobalSearchControl extends Component { @service router; @service token; @@ -223,7 +224,7 @@ export default class GlobalSearchControl extends Component { @action onCloseEvent(select, event) { if (event.key === 'Escape') { - run.next(() => { + next(() => { this.element.querySelector('.ember-power-select-trigger').blur(); }); } diff --git a/ui/app/components/image-file.js b/ui/app/components/image-file.js index 16ee6733c..bcf7d434d 100644 --- a/ui/app/components/image-file.js +++ b/ui/app/components/image-file.js @@ -1,11 +1,16 @@ import Component from '@ember/component'; import { computed } from '@ember/object'; -import { classNames, tagName } from '@ember-decorators/component'; +import { + classNames, + tagName, + attributeBindings, +} from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic @tagName('figure') @classNames('image-file') +@attributeBindings('data-test-image-file') export default class ImageFile extends Component { 'data-test-image-file' = true; diff --git a/ui/app/components/job-client-status-bar.js b/ui/app/components/job-client-status-bar.js index 9f3690b01..4ebcd8d53 100644 --- a/ui/app/components/job-client-status-bar.js +++ b/ui/app/components/job-client-status-bar.js @@ -1,8 +1,10 @@ import { computed } from '@ember/object'; import DistributionBar from './distribution-bar'; +import { attributeBindings } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic +@attributeBindings('data-test-job-client-status-bar') export default class JobClientStatusBar extends DistributionBar { layoutName = 'components/distribution-bar'; diff --git a/ui/app/components/job-editor.js b/ui/app/components/job-editor.js index 0e9426851..6b9d9da0d 100644 --- a/ui/app/components/job-editor.js +++ b/ui/app/components/job-editor.js @@ -1,13 +1,15 @@ import Component from '@ember/component'; import { assert } from '@ember/debug'; import { inject as service } from '@ember/service'; -import { computed } from '@ember/object'; +import { computed, action } from '@ember/object'; import { task } from 'ember-concurrency'; import messageFromAdapterError from 'nomad-ui/utils/message-from-adapter-error'; import localStorageProperty from 'nomad-ui/utils/properties/local-storage'; +import { attributeBindings } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic +@attributeBindings('data-test-job-editor') export default class JobEditor extends Component { @service store; @service config; @@ -33,6 +35,12 @@ export default class JobEditor extends Component { this.set('_context', value); } + @action updateCode(value) { + if (!this.job.isDestroying && !this.job.isDestroyed) { + this.job.set('_newDefinition', value); + } + } + _context = null; parseError = null; planError = null; diff --git a/ui/app/components/job-row.js b/ui/app/components/job-row.js index 107d47fc9..8738a43ab 100644 --- a/ui/app/components/job-row.js +++ b/ui/app/components/job-row.js @@ -2,12 +2,17 @@ import Component from '@ember/component'; import { action } from '@ember/object'; import { inject as service } from '@ember/service'; import { lazyClick } from '../helpers/lazy-click'; -import { classNames, tagName } from '@ember-decorators/component'; +import { + classNames, + tagName, + attributeBindings, +} from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic @tagName('tr') @classNames('job-row', 'is-interactive') +@attributeBindings('data-test-job-row') export default class JobRow extends Component { @service router; @service store; diff --git a/ui/app/components/line-chart.js b/ui/app/components/line-chart.js index 8f7bb19f1..c9a119b75 100644 --- a/ui/app/components/line-chart.js +++ b/ui/app/components/line-chart.js @@ -1,7 +1,7 @@ import Component from '@glimmer/component'; import { tracked } from '@glimmer/tracking'; import { action } from '@ember/object'; -import { run } from '@ember/runloop'; +import { schedule, next } from '@ember/runloop'; import d3 from 'd3-selection'; import d3Scale from 'd3-scale'; import d3Axis from 'd3-axis'; @@ -235,7 +235,7 @@ export default class LineChart extends Component { const mouseX = d3.pointer(ev, this)[0]; chart.latestMouseX = mouseX; updateActiveDatum(mouseX); - run.schedule('afterRender', chart, () => (chart.isActive = true)); + schedule('afterRender', chart, () => (chart.isActive = true)); }); canvas.on('mousemove', function (ev) { @@ -245,7 +245,7 @@ export default class LineChart extends Component { }); canvas.on('mouseleave', () => { - run.schedule('afterRender', this, () => (this.isActive = false)); + schedule('afterRender', this, () => (this.isActive = false)); this.activeDatum = null; this.activeData = []; }); @@ -338,7 +338,7 @@ export default class LineChart extends Component { // svg elements this.mountD3Elements(); - run.next(() => { + next(() => { // Since each axis depends on the dimension of the other // axis, the axes themselves are recomputed and need to // be re-rendered. diff --git a/ui/app/components/list-accordion/accordion-head.js b/ui/app/components/list-accordion/accordion-head.js index 320092634..c3fd9d0a9 100644 --- a/ui/app/components/list-accordion/accordion-head.js +++ b/ui/app/components/list-accordion/accordion-head.js @@ -1,10 +1,15 @@ import Component from '@ember/component'; -import { classNames, classNameBindings } from '@ember-decorators/component'; +import { + classNames, + classNameBindings, + attributeBindings, +} from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic @classNames('accordion-head') @classNameBindings('isOpen::is-light', 'isExpandable::is-inactive') +@attributeBindings('data-test-accordion-head') export default class AccordionHead extends Component { 'data-test-accordion-head' = true; diff --git a/ui/app/components/list-table.js b/ui/app/components/list-table.js index a5851040f..e02b7fbd6 100644 --- a/ui/app/components/list-table.js +++ b/ui/app/components/list-table.js @@ -11,7 +11,7 @@ export default class ListTable extends Component { @overridable(() => []) source; // Plan for a future with metadata (e.g., isSelected) - @computed('source.[]') + @computed('source.{[],isFulfilled}') get decoratedSource() { return (this.source || []).map((row) => ({ model: row, diff --git a/ui/app/components/multi-select-dropdown.js b/ui/app/components/multi-select-dropdown.js index 692ea7c9f..408069c4f 100644 --- a/ui/app/components/multi-select-dropdown.js +++ b/ui/app/components/multi-select-dropdown.js @@ -1,7 +1,7 @@ import Component from '@ember/component'; import { action } from '@ember/object'; import { computed as overridable } from 'ember-overridable-computed'; -import { run } from '@ember/runloop'; +import { scheduleOnce } from '@ember/runloop'; import { classNames } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @@ -33,7 +33,7 @@ export default class MultiSelectDropdown extends Component { super.didReceiveAttrs(); const dropdown = this.dropdown; if (this.isOpen && dropdown) { - run.scheduleOnce('afterRender', this, this.repositionDropdown); + scheduleOnce('afterRender', this, this.repositionDropdown); } } diff --git a/ui/app/components/plugin-allocation-row.js b/ui/app/components/plugin-allocation-row.js index d27bf09fc..352e21869 100644 --- a/ui/app/components/plugin-allocation-row.js +++ b/ui/app/components/plugin-allocation-row.js @@ -1,7 +1,12 @@ import AllocationRow from 'nomad-ui/components/allocation-row'; import classic from 'ember-classic-decorator'; +import { attributeBindings } from '@ember-decorators/component'; @classic +@attributeBindings( + 'data-test-controller-allocation', + 'data-test-node-allocation' +) export default class PluginAllocationRow extends AllocationRow { pluginAllocation = null; allocation = null; diff --git a/ui/app/components/popover-menu.js b/ui/app/components/popover-menu.js index a6498a9aa..52dd2ee2a 100644 --- a/ui/app/components/popover-menu.js +++ b/ui/app/components/popover-menu.js @@ -1,6 +1,6 @@ import Component from '@ember/component'; import { action } from '@ember/object'; -import { run } from '@ember/runloop'; +import { scheduleOnce } from '@ember/runloop'; import { classNames } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @@ -35,7 +35,7 @@ export default class PopoverMenu extends Component { super.didReceiveAttrs(); const dropdown = this.dropdown; if (this.isOpen && dropdown) { - run.scheduleOnce('afterRender', this, this.repositionDropdown); + scheduleOnce('afterRender', this, this.repositionDropdown); } } diff --git a/ui/app/components/streaming-file.js b/ui/app/components/streaming-file.js index b19dea125..8239aedf8 100644 --- a/ui/app/components/streaming-file.js +++ b/ui/app/components/streaming-file.js @@ -1,8 +1,12 @@ import Component from '@ember/component'; -import { run } from '@ember/runloop'; +import { scheduleOnce, once } from '@ember/runloop'; import { task } from 'ember-concurrency'; import WindowResizable from 'nomad-ui/mixins/window-resizable'; -import { classNames, tagName } from '@ember-decorators/component'; +import { + classNames, + tagName, + attributeBindings, +} from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; const A_KEY = 65; @@ -10,6 +14,7 @@ const A_KEY = 65; @classic @tagName('pre') @classNames('cli-window') +@attributeBindings('data-test-log-cli') export default class StreamingFile extends Component.extend(WindowResizable) { 'data-test-log-cli' = true; @@ -27,7 +32,7 @@ export default class StreamingFile extends Component.extend(WindowResizable) { return; } - run.scheduleOnce('actions', this, this.performTask); + scheduleOnce('actions', this, this.performTask); } performTask() { @@ -100,7 +105,7 @@ export default class StreamingFile extends Component.extend(WindowResizable) { } windowResizeHandler() { - run.once(this, this.fillAvailableHeight); + once(this, this.fillAvailableHeight); } fillAvailableHeight() { @@ -115,7 +120,7 @@ export default class StreamingFile extends Component.extend(WindowResizable) { @task(function* () { yield this.get('logger.gotoHead').perform(); - run.scheduleOnce('afterRender', this, this.scrollToTop); + scheduleOnce('afterRender', this, this.scrollToTop); }) head; @@ -144,7 +149,7 @@ export default class StreamingFile extends Component.extend(WindowResizable) { stream; scheduleScrollSynchronization() { - run.scheduleOnce('afterRender', this, this.synchronizeScrollPosition); + scheduleOnce('afterRender', this, this.synchronizeScrollPosition); } willDestroy() { diff --git a/ui/app/components/task-row.js b/ui/app/components/task-row.js index 63183a57a..35343205d 100644 --- a/ui/app/components/task-row.js +++ b/ui/app/components/task-row.js @@ -5,12 +5,17 @@ import { computed } from '@ember/object'; import { alias } from '@ember/object/computed'; import { task, timeout } from 'ember-concurrency'; import { lazyClick } from '../helpers/lazy-click'; -import { classNames, tagName } from '@ember-decorators/component'; +import { + classNames, + tagName, + attributeBindings, +} from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @classic @tagName('tr') @classNames('task-row', 'is-interactive') +@attributeBindings('data-test-task-row') export default class TaskRow extends Component { @service store; @service token; diff --git a/ui/app/components/toggle.js b/ui/app/components/toggle.js index efa1ed87d..6f49956f9 100644 --- a/ui/app/components/toggle.js +++ b/ui/app/components/toggle.js @@ -3,6 +3,7 @@ import { classNames, classNameBindings, tagName, + attributeBindings, } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; @@ -10,6 +11,7 @@ import classic from 'ember-classic-decorator'; @tagName('label') @classNames('toggle') @classNameBindings('isDisabled:is-disabled', 'isActive:is-active') +@attributeBindings('data-test-label') export default class Toggle extends Component { 'data-test-label' = true; diff --git a/ui/app/components/topo-viz.js b/ui/app/components/topo-viz.js index 48faa6b7f..75b025bc0 100644 --- a/ui/app/components/topo-viz.js +++ b/ui/app/components/topo-viz.js @@ -2,7 +2,7 @@ import Component from '@glimmer/component'; import { tracked } from '@glimmer/tracking'; import { action, set } from '@ember/object'; import { inject as service } from '@ember/service'; -import { run } from '@ember/runloop'; +import { next } from '@ember/runloop'; import { scaleLinear } from 'd3-scale'; import { extent, deviation, mean } from 'd3-array'; import { line, curveBasis } from 'd3-shape'; @@ -268,7 +268,7 @@ export default class TopoViz extends Component { @action computedActiveEdges() { // Wait a render cycle - run.next(() => { + next(() => { const path = line().curve(curveBasis); // 1. Get the active element const allocation = this.activeAllocation.allocation; diff --git a/ui/app/components/trigger.js b/ui/app/components/trigger.js index d141e6c33..e64a2423e 100644 --- a/ui/app/components/trigger.js +++ b/ui/app/components/trigger.js @@ -2,6 +2,7 @@ import { action } from '@ember/object'; import Component from '@glimmer/component'; import { tracked } from '@glimmer/tracking'; import { task } from 'ember-concurrency'; +import { schedule } from '@ember/runloop'; const noOp = () => undefined; @@ -63,6 +64,8 @@ export default class Trigger extends Component { @action onTrigger() { - this.triggerTask.perform(); + schedule('actions', () => { + this.triggerTask.perform(); + }); } } diff --git a/ui/app/controllers/allocations/allocation/index.js b/ui/app/controllers/allocations/allocation/index.js index 3fdc17216..701efaf74 100644 --- a/ui/app/controllers/allocations/allocation/index.js +++ b/ui/app/controllers/allocations/allocation/index.js @@ -15,6 +15,7 @@ import classic from 'ember-classic-decorator'; @classic export default class IndexController extends Controller.extend(Sortable) { @service token; + @service store; queryParams = [ { diff --git a/ui/app/controllers/application.js b/ui/app/controllers/application.js index 5c4c1ed24..8e862ead6 100644 --- a/ui/app/controllers/application.js +++ b/ui/app/controllers/application.js @@ -1,7 +1,7 @@ /* eslint-disable ember/no-observers */ import { inject as service } from '@ember/service'; import Controller from '@ember/controller'; -import { run } from '@ember/runloop'; +import { next } from '@ember/runloop'; import { observes } from '@ember-decorators/object'; import { computed } from '@ember/object'; import Ember from 'ember'; @@ -14,6 +14,7 @@ import classic from 'ember-classic-decorator'; export default class ApplicationController extends Controller { @service config; @service system; + @service token; queryParams = [ { @@ -70,11 +71,11 @@ export default class ApplicationController extends Controller { @observes('error') throwError() { if (this.get('config.isDev')) { - run.next(() => { + next(() => { throw this.error; }); } else if (!Ember.testing) { - run.next(() => { + next(() => { // eslint-disable-next-line console.warn('UNRECOVERABLE ERROR:', this.error); }); diff --git a/ui/app/controllers/jobs/job/clients.js b/ui/app/controllers/jobs/job/clients.js index a7735784c..81080b6f6 100644 --- a/ui/app/controllers/jobs/job/clients.js +++ b/ui/app/controllers/jobs/job/clients.js @@ -13,6 +13,7 @@ import { deserializedQueryParam as selection, } from 'nomad-ui/utils/qp-serialize'; import classic from 'ember-classic-decorator'; +import { inject as service } from '@ember/service'; @classic export default class ClientsController extends Controller.extend( @@ -20,6 +21,8 @@ export default class ClientsController extends Controller.extend( Searchable, WithNamespaceResetting ) { + @service store; + queryParams = [ { currentPage: 'page', diff --git a/ui/app/mixins/window-resizable.js b/ui/app/mixins/window-resizable.js index 164cb4c66..078cef0e8 100644 --- a/ui/app/mixins/window-resizable.js +++ b/ui/app/mixins/window-resizable.js @@ -1,5 +1,5 @@ import Mixin from '@ember/object/mixin'; -import { run } from '@ember/runloop'; +import { scheduleOnce } from '@ember/runloop'; import { assert } from '@ember/debug'; import { on } from '@ember/object/evented'; @@ -13,7 +13,7 @@ export default Mixin.create({ }, setupWindowResize: on('didInsertElement', function () { - run.scheduleOnce('afterRender', this, this.addResizeListener); + scheduleOnce('afterRender', this, this.addResizeListener); }), addResizeListener() { diff --git a/ui/app/models/task-group-scale.js b/ui/app/models/task-group-scale.js index 9eb5238d4..e2210ed9b 100644 --- a/ui/app/models/task-group-scale.js +++ b/ui/app/models/task-group-scale.js @@ -1,4 +1,4 @@ -import { computed } from '@ember/object'; +import { reads } from '@ember/object/computed'; import Fragment from 'ember-data-model-fragments/fragment'; import { attr } from '@ember-data/model'; import { @@ -19,6 +19,6 @@ export default class TaskGroupScale extends Fragment { @fragmentArray('scale-event') events; - @computed.reads('events.length') + @reads('events.length') isVisible; } diff --git a/ui/app/models/task-group.js b/ui/app/models/task-group.js index afeafe69c..9ca99479c 100644 --- a/ui/app/models/task-group.js +++ b/ui/app/models/task-group.js @@ -41,7 +41,7 @@ export default class TaskGroup extends Fragment { return this.tasks.mapBy('driver').uniq(); } - @computed('job.allocations.@each.taskGroup', 'name') + @computed('job.allocations.{@each.taskGroup,isFulfilled}', 'name') get allocations() { return maybe(this.get('job.allocations')).filterBy( 'taskGroupName', diff --git a/ui/app/routes/clients/index.js b/ui/app/routes/clients/index.js index 862c16b75..49b420679 100644 --- a/ui/app/routes/clients/index.js +++ b/ui/app/routes/clients/index.js @@ -2,8 +2,11 @@ import Route from '@ember/routing/route'; import { collect } from '@ember/object/computed'; import { watchAll } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class IndexRoute extends Route.extend(WithWatchers) { + @service store; + startWatchers(controller) { controller.set('watcher', this.watch.perform()); } diff --git a/ui/app/routes/csi/plugins/index.js b/ui/app/routes/csi/plugins/index.js index 1155e7b80..4ceb027c1 100644 --- a/ui/app/routes/csi/plugins/index.js +++ b/ui/app/routes/csi/plugins/index.js @@ -2,8 +2,11 @@ import Route from '@ember/routing/route'; import { collect } from '@ember/object/computed'; import { watchQuery } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class IndexRoute extends Route.extend(WithWatchers) { + @service store; + startWatchers(controller) { controller.set('modelWatch', this.watch.perform({ type: 'csi' })); } diff --git a/ui/app/routes/csi/plugins/plugin/allocations.js b/ui/app/routes/csi/plugins/plugin/allocations.js index f25dd1ec2..c928833ce 100644 --- a/ui/app/routes/csi/plugins/plugin/allocations.js +++ b/ui/app/routes/csi/plugins/plugin/allocations.js @@ -2,8 +2,11 @@ import Route from '@ember/routing/route'; import { collect } from '@ember/object/computed'; import { watchRecord } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class AllocationsRoute extends Route.extend(WithWatchers) { + @service store; + startWatchers(controller, model) { if (!model) return; diff --git a/ui/app/routes/csi/plugins/plugin/index.js b/ui/app/routes/csi/plugins/plugin/index.js index f80460f42..d59e9bffe 100644 --- a/ui/app/routes/csi/plugins/plugin/index.js +++ b/ui/app/routes/csi/plugins/plugin/index.js @@ -2,8 +2,11 @@ import Route from '@ember/routing/route'; import { collect } from '@ember/object/computed'; import { watchRecord } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class IndexRoute extends Route.extend(WithWatchers) { + @service store; + startWatchers(controller, model) { if (!model) return; diff --git a/ui/app/routes/jobs/job/allocations.js b/ui/app/routes/jobs/job/allocations.js index d5829719c..09217e53a 100644 --- a/ui/app/routes/jobs/job/allocations.js +++ b/ui/app/routes/jobs/job/allocations.js @@ -2,8 +2,11 @@ import Route from '@ember/routing/route'; import { collect } from '@ember/object/computed'; import { watchRelationship } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class AllocationsRoute extends Route.extend(WithWatchers) { + @service store; + model() { const job = this.modelFor('jobs.job'); return job && job.get('allocations').then(() => job); diff --git a/ui/app/routes/jobs/job/clients.js b/ui/app/routes/jobs/job/clients.js index 8a4841606..93b71f6e6 100644 --- a/ui/app/routes/jobs/job/clients.js +++ b/ui/app/routes/jobs/job/clients.js @@ -10,6 +10,7 @@ import { collect } from '@ember/object/computed'; export default class ClientsRoute extends Route.extend(WithWatchers) { @service can; + @service store; beforeModel() { if (this.can.cannot('read client')) { diff --git a/ui/app/routes/jobs/job/deployments.js b/ui/app/routes/jobs/job/deployments.js index 94aa0c1da..95773fbd8 100644 --- a/ui/app/routes/jobs/job/deployments.js +++ b/ui/app/routes/jobs/job/deployments.js @@ -3,8 +3,11 @@ import RSVP from 'rsvp'; import { collect } from '@ember/object/computed'; import { watchRelationship } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class DeploymentsRoute extends Route.extend(WithWatchers) { + @service store; + model() { const job = this.modelFor('jobs.job'); return ( diff --git a/ui/app/routes/jobs/job/evaluations.js b/ui/app/routes/jobs/job/evaluations.js index cfb7fe1c6..d992a3671 100644 --- a/ui/app/routes/jobs/job/evaluations.js +++ b/ui/app/routes/jobs/job/evaluations.js @@ -2,8 +2,11 @@ import Route from '@ember/routing/route'; import { collect } from '@ember/object/computed'; import { watchRelationship } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class EvaluationsRoute extends Route.extend(WithWatchers) { + @service store; + model() { const job = this.modelFor('jobs.job'); return job && job.get('evaluations').then(() => job); diff --git a/ui/app/routes/jobs/job/index.js b/ui/app/routes/jobs/job/index.js index be195becc..2c527ef9f 100644 --- a/ui/app/routes/jobs/job/index.js +++ b/ui/app/routes/jobs/job/index.js @@ -11,6 +11,7 @@ import WithWatchers from 'nomad-ui/mixins/with-watchers'; export default class IndexRoute extends Route.extend(WithWatchers) { @service can; + @service store; async model() { return this.modelFor('jobs.job'); diff --git a/ui/app/routes/jobs/job/task-group.js b/ui/app/routes/jobs/job/task-group.js index 88b654301..7943cb3ae 100644 --- a/ui/app/routes/jobs/job/task-group.js +++ b/ui/app/routes/jobs/job/task-group.js @@ -8,8 +8,11 @@ import { } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; import notifyError from 'nomad-ui/utils/notify-error'; +import { inject as service } from '@ember/service'; export default class TaskGroupRoute extends Route.extend(WithWatchers) { + @service store; + model({ name }) { const job = this.modelFor('jobs.job'); diff --git a/ui/app/routes/jobs/job/versions.js b/ui/app/routes/jobs/job/versions.js index dc924197f..93a132024 100644 --- a/ui/app/routes/jobs/job/versions.js +++ b/ui/app/routes/jobs/job/versions.js @@ -5,8 +5,11 @@ import { watchRelationship, } from 'nomad-ui/utils/properties/watch'; import WithWatchers from 'nomad-ui/mixins/with-watchers'; +import { inject as service } from '@ember/service'; export default class VersionsRoute extends Route.extend(WithWatchers) { + @service store; + model() { const job = this.modelFor('jobs.job'); return job && job.get('versions').then(() => job); diff --git a/ui/app/routes/optimize.js b/ui/app/routes/optimize.js index 35f348a0c..43d8c9062 100644 --- a/ui/app/routes/optimize.js +++ b/ui/app/routes/optimize.js @@ -8,6 +8,7 @@ import RSVP from 'rsvp'; @classic export default class OptimizeRoute extends Route { @service can; + @service store; beforeModel() { if (this.can.cannot('accept recommendation')) { diff --git a/ui/app/serializers/agent.js b/ui/app/serializers/agent.js index ec6b393c9..92a79d087 100644 --- a/ui/app/serializers/agent.js +++ b/ui/app/serializers/agent.js @@ -1,6 +1,8 @@ import ApplicationSerializer from './application'; import AdapterError from '@ember-data/adapter/error'; +import classic from 'ember-classic-decorator'; +@classic export default class AgentSerializer extends ApplicationSerializer { attrs = { datacenter: 'dc', diff --git a/ui/app/serializers/application.js b/ui/app/serializers/application.js index c37983af6..d4414d7c9 100644 --- a/ui/app/serializers/application.js +++ b/ui/app/serializers/application.js @@ -6,7 +6,9 @@ import JSONSerializer from '@ember-data/serializer/json'; import { pluralize, singularize } from 'ember-inflector'; import removeRecord from '../utils/remove-record'; import { assign } from '@ember/polyfills'; +import classic from 'ember-classic-decorator'; +@classic export default class Application extends JSONSerializer { primaryKey = 'ID'; diff --git a/ui/app/serializers/drain-strategy.js b/ui/app/serializers/drain-strategy.js index dfaf0c5c8..8772dbce1 100644 --- a/ui/app/serializers/drain-strategy.js +++ b/ui/app/serializers/drain-strategy.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class DrainStrategy extends ApplicationSerializer { normalize(typeHash, hash) { // TODO API: finishedAt is always marshaled as a date even when unset. diff --git a/ui/app/serializers/fragment.js b/ui/app/serializers/fragment.js index 435e04210..d2bf2b755 100644 --- a/ui/app/serializers/fragment.js +++ b/ui/app/serializers/fragment.js @@ -1,3 +1,5 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class Fragment extends ApplicationSerializer {} diff --git a/ui/app/serializers/job-plan.js b/ui/app/serializers/job-plan.js index ea8971451..d61c4dc6c 100644 --- a/ui/app/serializers/job-plan.js +++ b/ui/app/serializers/job-plan.js @@ -1,6 +1,8 @@ import ApplicationSerializer from './application'; import { get } from '@ember/object'; +import classic from 'ember-classic-decorator'; +@classic export default class JobPlan extends ApplicationSerializer { mapToArray = ['FailedTGAllocs']; diff --git a/ui/app/serializers/job-scale.js b/ui/app/serializers/job-scale.js index 45bc98d7f..0522ede5f 100644 --- a/ui/app/serializers/job-scale.js +++ b/ui/app/serializers/job-scale.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class JobScale extends ApplicationSerializer { mapToArray = [{ beforeName: 'TaskGroups', afterName: 'TaskGroupScales' }]; diff --git a/ui/app/serializers/job-summary.js b/ui/app/serializers/job-summary.js index 23af75c7c..4c7996937 100644 --- a/ui/app/serializers/job-summary.js +++ b/ui/app/serializers/job-summary.js @@ -1,6 +1,8 @@ import { get } from '@ember/object'; import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class JobSummary extends ApplicationSerializer { normalize(modelClass, hash) { hash.PlainJobId = hash.JobID; diff --git a/ui/app/serializers/job-version.js b/ui/app/serializers/job-version.js index bc2962a53..a96f92625 100644 --- a/ui/app/serializers/job-version.js +++ b/ui/app/serializers/job-version.js @@ -1,6 +1,8 @@ import { assign } from '@ember/polyfills'; import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class JobVersionSerializer extends ApplicationSerializer { attrs = { number: 'Version', diff --git a/ui/app/serializers/job.js b/ui/app/serializers/job.js index 65a2001bb..a2b01fb2a 100644 --- a/ui/app/serializers/job.js +++ b/ui/app/serializers/job.js @@ -1,7 +1,9 @@ import { assign } from '@ember/polyfills'; import ApplicationSerializer from './application'; import queryString from 'query-string'; +import classic from 'ember-classic-decorator'; +@classic export default class JobSerializer extends ApplicationSerializer { attrs = { parameterized: 'ParameterizedJob', diff --git a/ui/app/serializers/namespace.js b/ui/app/serializers/namespace.js index 5b35364e8..562ff0160 100644 --- a/ui/app/serializers/namespace.js +++ b/ui/app/serializers/namespace.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class Namespace extends ApplicationSerializer { primaryKey = 'Name'; } diff --git a/ui/app/serializers/network.js b/ui/app/serializers/network.js index 605f4bc33..e994d6454 100644 --- a/ui/app/serializers/network.js +++ b/ui/app/serializers/network.js @@ -1,6 +1,8 @@ import ApplicationSerializer from './application'; import isIp from 'is-ip'; +import classic from 'ember-classic-decorator'; +@classic export default class NetworkSerializer extends ApplicationSerializer { attrs = { cidr: 'CIDR', diff --git a/ui/app/serializers/node-event.js b/ui/app/serializers/node-event.js index 169b65faf..500a6846f 100644 --- a/ui/app/serializers/node-event.js +++ b/ui/app/serializers/node-event.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class NodeEventSerializer extends ApplicationSerializer { attrs = { time: 'Timestamp', diff --git a/ui/app/serializers/plugin.js b/ui/app/serializers/plugin.js index 5f368a9fd..bcdc40731 100644 --- a/ui/app/serializers/plugin.js +++ b/ui/app/serializers/plugin.js @@ -1,4 +1,5 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; // Convert a map[string]interface{} into an array of objects // where the key becomes a property at propKey. @@ -14,6 +15,7 @@ const unmap = (hash, propKey) => return record; }); +@classic export default class Plugin extends ApplicationSerializer { normalize(typeHash, hash) { hash.PlainId = hash.ID; diff --git a/ui/app/serializers/policy.js b/ui/app/serializers/policy.js index a7fd7b1fc..913dff7f1 100644 --- a/ui/app/serializers/policy.js +++ b/ui/app/serializers/policy.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class Policy extends ApplicationSerializer { normalize(typeHash, hash) { hash.ID = hash.Name; diff --git a/ui/app/serializers/port.js b/ui/app/serializers/port.js index fd3681fa9..d41733d6b 100644 --- a/ui/app/serializers/port.js +++ b/ui/app/serializers/port.js @@ -1,6 +1,8 @@ import ApplicationSerializer from './application'; import isIp from 'is-ip'; +import classic from 'ember-classic-decorator'; +@classic export default class PortSerializer extends ApplicationSerializer { attrs = { hostIp: 'HostIP', diff --git a/ui/app/serializers/reschedule-event.js b/ui/app/serializers/reschedule-event.js index 0f843a09e..ff7a71b5c 100644 --- a/ui/app/serializers/reschedule-event.js +++ b/ui/app/serializers/reschedule-event.js @@ -1,5 +1,8 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; + +@classic export default class RescheduleEvent extends ApplicationSerializer { separateNanos = ['Time']; diff --git a/ui/app/serializers/resources.js b/ui/app/serializers/resources.js index c82e00996..659eebe37 100644 --- a/ui/app/serializers/resources.js +++ b/ui/app/serializers/resources.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class ResourcesSerializer extends ApplicationSerializer { arrayNullOverrides = ['Ports', 'Networks']; diff --git a/ui/app/serializers/scale-event.js b/ui/app/serializers/scale-event.js index bbcfb3f97..1808de1b3 100644 --- a/ui/app/serializers/scale-event.js +++ b/ui/app/serializers/scale-event.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class ScaleEventSerializer extends ApplicationSerializer { separateNanos = ['Time']; objectNullOverrides = ['Meta']; diff --git a/ui/app/serializers/service.js b/ui/app/serializers/service.js index 7b7fb898a..89a5588a4 100644 --- a/ui/app/serializers/service.js +++ b/ui/app/serializers/service.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class ServiceSerializer extends ApplicationSerializer { attrs = { connect: 'Connect', diff --git a/ui/app/serializers/structured-attributes.js b/ui/app/serializers/structured-attributes.js index 87fd6dc8b..9b7c5428f 100644 --- a/ui/app/serializers/structured-attributes.js +++ b/ui/app/serializers/structured-attributes.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class StructuredAttributes extends ApplicationSerializer { normalize(typeHash, hash) { return super.normalize(typeHash, { Raw: hash }); diff --git a/ui/app/serializers/task-event.js b/ui/app/serializers/task-event.js index 108ff4a45..10d3b8855 100644 --- a/ui/app/serializers/task-event.js +++ b/ui/app/serializers/task-event.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class TaskEventSerializer extends ApplicationSerializer { attrs = { message: 'DisplayMessage', diff --git a/ui/app/serializers/task-group-deployment-summary.js b/ui/app/serializers/task-group-deployment-summary.js index 0441af6be..4af5cb094 100644 --- a/ui/app/serializers/task-group-deployment-summary.js +++ b/ui/app/serializers/task-group-deployment-summary.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class TaskGroupDeploymentSummary extends ApplicationSerializer { normalize(typeHash, hash) { hash.PlacedCanaryAllocations = hash.PlacedCanaries || []; diff --git a/ui/app/serializers/task-group-scale.js b/ui/app/serializers/task-group-scale.js index 86d25d894..cbb6f8e1b 100644 --- a/ui/app/serializers/task-group-scale.js +++ b/ui/app/serializers/task-group-scale.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class TaskGroupScaleSerializer extends ApplicationSerializer { arrayNullOverrides = ['Events']; } diff --git a/ui/app/serializers/task-group.js b/ui/app/serializers/task-group.js index 28e86e003..b8b66d9de 100644 --- a/ui/app/serializers/task-group.js +++ b/ui/app/serializers/task-group.js @@ -1,6 +1,8 @@ import { copy } from 'ember-copy'; import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class TaskGroup extends ApplicationSerializer { arrayNullOverrides = ['Services']; mapToArray = ['Volumes']; diff --git a/ui/app/serializers/task-state.js b/ui/app/serializers/task-state.js index 11a29e25b..0efabca0a 100644 --- a/ui/app/serializers/task-state.js +++ b/ui/app/serializers/task-state.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class TaskState extends ApplicationSerializer { normalize(typeHash, hash) { // TODO API: finishedAt is always marshaled as a date even when unset. diff --git a/ui/app/serializers/task.js b/ui/app/serializers/task.js index 2ed5dbe9b..96105b8f7 100644 --- a/ui/app/serializers/task.js +++ b/ui/app/serializers/task.js @@ -1,5 +1,7 @@ import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class Task extends ApplicationSerializer { normalize(typeHash, hash) { // Lift the reserved resource numbers out of the Resources object diff --git a/ui/app/serializers/token.js b/ui/app/serializers/token.js index 0e3beb302..1ec3416c6 100644 --- a/ui/app/serializers/token.js +++ b/ui/app/serializers/token.js @@ -1,6 +1,8 @@ import { copy } from 'ember-copy'; import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class TokenSerializer extends ApplicationSerializer { primaryKey = 'AccessorID'; diff --git a/ui/app/serializers/volume.js b/ui/app/serializers/volume.js index faf2b398d..995c31b11 100644 --- a/ui/app/serializers/volume.js +++ b/ui/app/serializers/volume.js @@ -1,6 +1,8 @@ import { set, get } from '@ember/object'; import ApplicationSerializer from './application'; +import classic from 'ember-classic-decorator'; +@classic export default class VolumeSerializer extends ApplicationSerializer { attrs = { externalId: 'ExternalID', diff --git a/ui/app/templates/application.hbs b/ui/app/templates/application.hbs index e479b00f3..a51eea0a2 100644 --- a/ui/app/templates/application.hbs +++ b/ui/app/templates/application.hbs @@ -1,14 +1,18 @@ -{{page-title (if this.system.shouldShowRegions (concat this.system.activeRegion " - ")) "Nomad" separator=" - "}} +{{page-title + (if this.system.shouldShowRegions (concat this.system.activeRegion " - ")) + "Nomad" + separator=" - " +}} -{{#unless this.error}} - {{outlet}} -{{else}} +{{#if this.error}}
      {{#if this.isNoLeader}}

      No Cluster Leader

      - The cluster has no leader. Read about Outage Recovery. + The cluster has no leader. + + Read about Outage Recovery.

      {{else if this.isOTTExchange}}

      Token Exchange Error

      @@ -17,16 +21,24 @@

      {{else if this.is500}}

      Server Error

      -

      A server error prevented data from being sent to the client.

      +

      A server error prevented + data from being sent to the client.

      {{else if this.is404}}

      Not Found

      -

      What you're looking for couldn't be found. It either doesn't exist or you are not authorized to see it.

      +

      What you're looking for + couldn't be found. It either doesn't exist or you are not authorized + to see it.

      {{else if this.is403}}

      Not Authorized

      {{#if this.token.secret}} -

      Your ACL token does not provide the required permissions. Contact your administrator if this is an error.

      +

      Your + ACL token + does not provide the required permissions. Contact your + administrator if this is an error.

      {{else}} -

      Provide an ACL token with requisite permissions to view this.

      +

      Provide an + ACL token + with requisite permissions to view this.

      {{/if}} {{else}}

      Error

      @@ -37,8 +49,15 @@ {{/if}}
      -{{/unless}} +{{else}} + {{outlet}} +{{/if}} diff --git a/ui/app/templates/clients/index.hbs b/ui/app/templates/clients/index.hbs index 91d889569..97b0618cf 100644 --- a/ui/app/templates/clients/index.hbs +++ b/ui/app/templates/clients/index.hbs @@ -9,7 +9,8 @@ + @placeholder="Search clients..." + /> {{/if}}
      @@ -19,31 +20,36 @@ @label="Class" @options={{this.optionsClass}} @selection={{this.selectionClass}} - @onSelect={{action this.setFacetQueryParam "qpClass"}} /> + @onSelect={{action this.setFacetQueryParam "qpClass"}} + /> + @onSelect={{action this.setFacetQueryParam "qpState"}} + /> + @onSelect={{action this.setFacetQueryParam "qpDatacenter"}} + /> + @onSelect={{action this.setFacetQueryParam "qpVersion"}} + /> + @onSelect={{action this.setFacetQueryParam "qpVolume"}} + />
    @@ -51,16 +57,23 @@ + @page={{this.currentPage}} + as |p| + > + @class="with-foot" + as |t| + > ID - Name + Name State Address Datacenter @@ -69,16 +82,24 @@ # Allocs - +
    @@ -87,18 +108,28 @@ {{else}}
    {{#if (eq this.nodes.length 0)}} -

    No Clients

    +

    No Clients

    The cluster currently has no client nodes.

    {{else if (eq this.filteredNodes.length 0)}} -

    No Matches

    +

    No Matches

    No clients match your current filter selection.

    {{else if this.searchTerm}} -

    No Matches

    -

    No clients match the term {{this.searchTerm}}

    +

    No Matches

    +

    No clients match the term + {{this.searchTerm}}

    {{/if}}
    {{/if}} diff --git a/ui/app/templates/components/distribution-bar.hbs b/ui/app/templates/components/distribution-bar.hbs index 9f42b9c7c..177d74ca9 100644 --- a/ui/app/templates/components/distribution-bar.hbs +++ b/ui/app/templates/components/distribution-bar.hbs @@ -1,23 +1,35 @@ - + -{{#if hasBlock}} - {{yield (hash - data=this._data - activeDatum=this.activeDatum - )}} +{{#if (has-block)}} + {{yield (hash data=this._data activeDatum=this.activeDatum)}} {{else}} -
    +
      {{#each this._data as |datum index|}} -
    1. - - +
    2. + + {{datum.label}} {{datum.value}} diff --git a/ui/app/templates/components/drain-popover.hbs b/ui/app/templates/components/drain-popover.hbs index a2c6af2ea..2d9fa7edc 100644 --- a/ui/app/templates/components/drain-popover.hbs +++ b/ui/app/templates/components/drain-popover.hbs @@ -1,3 +1,4 @@ +{{! template-lint-disable require-input-label }} -
      + (if this.isDisabled "tooltip is-right-aligned") + }} + as |m| +> +

      Drain Options

      {{#if this.deadlineEnabled}} -
      +
      + @onChange={{action (mut this.selectedDurationQuickOption)}} + as |opt| + > {{opt.label}}
      @@ -40,12 +58,16 @@ + oninput={{action + (queue + (action (mut this.parseError) "") + (action (mut this.customDuration) value="target.value") + ) + }} + /> {{#if this.parseError}} {{this.parseError}} {{/if}} @@ -57,10 +79,14 @@ + @onToggle={{action (mut this.forceDrain) value="target.checked"}} + > Force Drain - - + + {{x-icon "info-circle-outline" class="is-faded"}} @@ -70,10 +96,14 @@ + @onToggle={{action (mut this.drainSystemJobs) value="target.checked"}} + > Drain System Jobs - - + + {{x-icon "info-circle-outline" class="is-faded"}} @@ -83,10 +113,16 @@ data-test-drain-submit type="button" class="popover-action is-primary" - onclick={{perform this.drain m.actions.close}}> + onclick={{perform this.drain m.actions.close}} + > Drain - +
      diff --git a/ui/app/templates/components/global-header.hbs b/ui/app/templates/components/global-header.hbs index 09709fc0c..eb23131f3 100644 --- a/ui/app/templates/components/global-header.hbs +++ b/ui/app/templates/components/global-header.hbs @@ -1,3 +1,4 @@ +{{! template-lint-disable no-duplicate-landmark-elements }}
    - + ID @@ -20,7 +18,11 @@ Memory - +
    diff --git a/ui/app/templates/components/job-diff.hbs b/ui/app/templates/components/job-diff.hbs index 377767017..dff37f63a 100644 --- a/ui/app/templates/components/job-diff.hbs +++ b/ui/app/templates/components/job-diff.hbs @@ -1,14 +1,14 @@ -{{!-- Job heading --}} +{{! Job heading }} +{{! template-lint-disable simple-unless }}
    - + class="{{if + this.diff.Type + (concat 'diff-section-label is-' (lowercase this.diff.Type)) + }}" +> + {{#if (eq (lowercase this.diff.Type) "added")}} + {{else if (eq (lowercase this.diff.Type) "deleted")}} @@ -20,22 +20,26 @@ Job: "{{this.diff.ID}}"
    -{{!-- Show job field and object diffs if the job is edited --}} +{{! Show job field and object diffs if the job is edited }} {{#if (or this.verbose (eq (lowercase this.diff.Type) "edited"))}} {{/if}} -{{!-- Each task group --}} +{{! Each task group }} {{#each this.diff.TaskGroups as |group|}} -
    - +
    - +
    {{/if}} @@ -64,10 +83,16 @@

    Job Plan

    -

    This is the impact running this job will have on your cluster.

    +

    This is the impact running this job + will have on your cluster.

    - +
    @@ -75,10 +100,18 @@
    Job Plan
    - +
    -
    +
    Scheduler dry-run
    {{#if this.planOutput.failedTGAllocs}} @@ -90,15 +123,22 @@ {{/if}}
    - {{#if (and this.planOutput.preemptions.isFulfilled this.planOutput.preemptions.length)}} + {{#if + (and + this.planOutput.preemptions.isFulfilled this.planOutput.preemptions.length + ) + }}
    - Preemptions (if you choose to run this job, these allocations will be stopped) + Preemptions (if you choose to run this job, these allocations will be + stopped)
    + @class="allocations is-isolated" + as |t| + > ID @@ -120,7 +160,18 @@
    {{/if}}
    - - + +
    {{/if}} diff --git a/ui/app/templates/components/job-page/parts/stats-box.hbs b/ui/app/templates/components/job-page/parts/stats-box.hbs index 1767a3dce..7b4c4b0e2 100644 --- a/ui/app/templates/components/job-page/parts/stats-box.hbs +++ b/ui/app/templates/components/job-page/parts/stats-box.hbs @@ -1,3 +1,4 @@ +{{! template-lint-disable no-inline-styles }}
    Job Details diff --git a/ui/app/templates/components/list-accordion.hbs b/ui/app/templates/components/list-accordion.hbs index 75cd2d3d8..a00f62ece 100644 --- a/ui/app/templates/components/list-accordion.hbs +++ b/ui/app/templates/components/list-accordion.hbs @@ -1,5 +1,5 @@ {{#each this.decoratedSource as |item|}} - {{yield (hash +{{yield (hash head=(component "list-accordion/accordion-head" isOpen=item.isOpen onOpen=(action (queue @@ -14,5 +14,6 @@ body=(component "list-accordion/accordion-body" isOpen=item.isOpen) item=item.item isOpen=item.isOpen + close=(fn (mut item.isOpen) false) )}} {{/each}} diff --git a/ui/app/templates/components/list-accordion/accordion-head.hbs b/ui/app/templates/components/list-accordion/accordion-head.hbs index 739a5c63a..5f77992a1 100644 --- a/ui/app/templates/components/list-accordion/accordion-head.hbs +++ b/ui/app/templates/components/list-accordion/accordion-head.hbs @@ -5,8 +5,8 @@ data-test-accordion-toggle data-test-accordion-summary-chart={{this.buttonType}} class="button is-light is-compact pull-right accordion-toggle - {{unless this.isExpandable "is-invisible"}}" - onclick={{action (if this.isOpen this.onClose this.onOpen) this.item}} + {{unless this.isExpandable 'is-invisible'}}" + {{on "click" (fn (if this.isOpen this.onClose this.onOpen) this.item)}} type="button" > {{this.buttonLabel}} diff --git a/ui/app/templates/components/list-pagination/list-pager.hbs b/ui/app/templates/components/list-pagination/list-pager.hbs index 16c2edd52..4d7366fe3 100644 --- a/ui/app/templates/components/list-pagination/list-pager.hbs +++ b/ui/app/templates/components/list-pagination/list-pager.hbs @@ -1,5 +1,10 @@ {{#if this.visible}} - + {{yield}} {{/if}} diff --git a/ui/app/templates/components/multi-select-dropdown.hbs b/ui/app/templates/components/multi-select-dropdown.hbs index 982d2bf57..2a3869188 100644 --- a/ui/app/templates/components/multi-select-dropdown.hbs +++ b/ui/app/templates/components/multi-select-dropdown.hbs @@ -1,9 +1,16 @@ - +
    @@ -56,8 +66,7 @@ {{#if this.filteredSummaries}} {{outlet}} - + Job Recommended At @@ -75,17 +84,25 @@ /> {{else}} {{/if}} - {{else}}
    -

    No Matches

    +

    + No Matches +

    No recommendations match your current filter selection.

    @@ -93,9 +110,16 @@ {{/if}} {{else}}
    -

    No Recommendations

    +

    + No Recommendations +

    - All recommendations have been accepted or dismissed. Nomad will continuously monitor applications so expect more recommendations in the future. + All recommendations have been accepted or dismissed. Nomad will + continuously monitor applications so expect more recommendations in + the future.

    {{/if}} diff --git a/ui/app/utils/classes/stream-logger.js b/ui/app/utils/classes/stream-logger.js index e935f199f..3fcce3d0a 100644 --- a/ui/app/utils/classes/stream-logger.js +++ b/ui/app/utils/classes/stream-logger.js @@ -10,6 +10,10 @@ import classic from 'ember-classic-decorator'; export default class StreamLogger extends EmberObject.extend(AbstractLogger) { reader = null; + get isSupported() { + return !!window.ReadableStream; + } + @computed() get additionalParams() { return { @@ -87,7 +91,3 @@ export default class StreamLogger extends EmberObject.extend(AbstractLogger) { }) poll; } - -StreamLogger.reopenClass({ - isSupported: !!window.ReadableStream, -}); diff --git a/ui/config/deprecation-workflow.js b/ui/config/deprecation-workflow.js index abb58e926..6467a8ce0 100644 --- a/ui/config/deprecation-workflow.js +++ b/ui/config/deprecation-workflow.js @@ -11,5 +11,14 @@ self.deprecationWorkflow.config = { }, { handler: 'throw', matchId: 'ember-cli-page-object.is-property' }, { handler: 'throw', matchId: 'ember-views.partial' }, + { handler: 'silence', matchId: 'ember-string.prototype-extensions' }, + { + handler: 'silence', + matchId: 'ember-glimmer.link-to.positional-arguments', + }, + { + handler: 'silence', + matchId: 'implicit-injections', + }, ], }; diff --git a/ui/config/ember-cli-update.json b/ui/config/ember-cli-update.json index 3bcf33be7..18405cde8 100644 --- a/ui/config/ember-cli-update.json +++ b/ui/config/ember-cli-update.json @@ -3,7 +3,7 @@ "packages": [ { "name": "ember-cli", - "version": "3.20.2", + "version": "3.28.5", "blueprints": [ { "name": "app", diff --git a/ui/config/environment.js b/ui/config/environment.js index 5cfb8fc4b..5e0e595b1 100644 --- a/ui/config/environment.js +++ b/ui/config/environment.js @@ -7,7 +7,7 @@ if (process.env.USE_MIRAGE) { } module.exports = function (environment) { - var ENV = { + let ENV = { modulePrefix: 'nomad-ui', environment: environment, rootURL: '/ui/', diff --git a/ui/ember-cli-build.js b/ui/ember-cli-build.js index 7d864ae70..86987115f 100644 --- a/ui/ember-cli-build.js +++ b/ui/ember-cli-build.js @@ -6,7 +6,7 @@ const isProd = environment === 'production'; const isTest = environment === 'test'; module.exports = function (defaults) { - var app = new EmberApp(defaults, { + let app = new EmberApp(defaults, { svg: { paths: [ 'node_modules/@hashicorp/structure-icons/dist', diff --git a/ui/package.json b/ui/package.json index e7f5845be..8fa7a5eca 100644 --- a/ui/package.json +++ b/ui/package.json @@ -10,7 +10,7 @@ "scripts": { "build": "ember build --environment=production", "precommit": "lint-staged", - "lint": "npm-run-all --aggregate-output --continue-on-error --parallel 'lint:!(fix)'", + "lint": "npm-run-all --aggregate-output --continue-on-error --parallel \"lint:!(fix)\"", "lint:fix": "npm-run-all --aggregate-output --continue-on-error --parallel lint:*:fix", "lint:hbs": "ember-template-lint .", "lint:hbs:fix": "ember-template-lint . --fix", @@ -19,7 +19,7 @@ "start": "ember server", "build-storybook": "STORYBOOK=true ember build && build-storybook -s dist", "storybook": "STORYBOOK=true start-storybook -p 6006 -s dist", - "test": "npm-run-all lint:* test:*", + "test": "npm-run-all lint test:*", "test:ember": "ember test", "local:qunitdom": "ember test --server --query=dockcontainer", "local:exam": "ember exam --server --load-balance --parallel=4" @@ -40,12 +40,12 @@ "devDependencies": { "@babel/plugin-proposal-object-rest-spread": "^7.4.3", "@ember/optional-features": "2.0.0", - "@ember/test-helpers": "^2.0.0", - "@glimmer/component": "^1.0.1", - "@glimmer/tracking": "^1.0.0", + "@ember/test-helpers": "^2.6.0", + "@glimmer/component": "^1.0.4", + "@glimmer/tracking": "^1.0.4", "@hashicorp/structure-icons": "^1.3.0", "@storybook/ember-cli-storybook": "https://github.com/DingoEatingFuzz/ember-cli-storybook#c207500", - "anser": "^2.1.0", + "anser": "^2.1.1", "babel-eslint": "^10.1.0", "base64-js": "^1.3.1", "broccoli-asset-rev": "^3.0.0", @@ -61,56 +61,55 @@ "d3-transition": "^3.0.1", "duration-js": "^4.0.0", "ember-a11y-testing": "^4.0.0", - "ember-auto-import": "^1.6.0", - "ember-can": "^2.0.0", - "ember-classic-decorator": "^1.0.8", - "ember-cli": "~3.20.2", - "ember-cli-babel": "^7.21.0", + "ember-auto-import": "^2.4.0", + "ember-can": "^4.1.0", + "ember-classic-decorator": "^3.0.0", + "ember-cli": "~3.28.5", + "ember-cli-babel": "^7.26.10", "ember-cli-clipboard": "^0.13.0", "ember-cli-dependency-checker": "^3.2.0", - "ember-cli-deprecation-workflow": "^1.0.1", + "ember-cli-deprecation-workflow": "^2.1.0", "ember-cli-funnel": "^0.6.1", - "ember-cli-htmlbars": "^5.2.0", - "ember-cli-inject-live-reload": "^2.0.2", - "ember-cli-mirage": "^1.1.2", - "ember-cli-moment-shim": "^3.5.0", - "ember-cli-page-object": "^1.17.2", + "ember-cli-htmlbars": "^5.7.2", + "ember-cli-inject-live-reload": "^2.1.0", + "ember-cli-mirage": "2.2.0", + "ember-cli-moment-shim": "^3.8.0", + "ember-cli-page-object": "^2.0.0-beta.3", "ember-cli-sass": "^10.0.0", "ember-cli-sri": "^2.1.1", - "ember-cli-string-helpers": "^1.5.0", - "ember-cli-terser": "^4.0.1", - "ember-composable-helpers": "^4.4.1", - "ember-concurrency": "^1.0.0", - "ember-copy": "^1.0.0", - "ember-data": "~3.24", - "ember-data-model-fragments": "5.0.0-beta.2", + "ember-cli-string-helpers": "^6.1.0", + "ember-cli-terser": "^4.0.2", + "ember-composable-helpers": "^5.0.0", + "ember-concurrency": "^2.2.1", + "ember-copy": "^2.0.1", + "ember-data": "~3.28.6", + "ember-data-model-fragments": "5.0.0-beta.3", "ember-decorators": "^6.1.1", "ember-exam": "6.1.0", "ember-export-application-global": "^2.0.1", - "ember-fetch": "^8.0.2", - "ember-inflector": "^3.0.0", - "ember-inline-svg": "^0.3.0", - "ember-load-initializers": "^2.1.1", - "ember-maybe-import-regenerator": "^0.1.6", - "ember-modifier": "^2.1.1", - "ember-moment": "^7.8.1", + "ember-fetch": "^8.1.1", + "ember-inflector": "^4.0.2", + "ember-inline-svg": "^1.0.1", + "ember-load-initializers": "^2.1.2", + "ember-maybe-import-regenerator": "^1.0.0", + "ember-modifier": "^3.1.0", + "ember-moment": "^9.0.1", "ember-named-blocks-polyfill": "^0.2.4", "ember-overridable-computed": "^1.0.0", - "ember-page-title": "^6.0.3", - "ember-power-select": "^4.1.3", - "ember-qunit": "^4.6.0", - "ember-qunit-nice-errors": "^1.2.0", + "ember-page-title": "^6.2.2", + "ember-power-select": "^4.1.7", + "ember-qunit": "^5.1.5", "ember-render-helpers": "^0.2.0", - "ember-resolver": "^8.0.0", - "ember-responsive": "^3.0.4", - "ember-sinon": "^4.0.0", - "ember-source": "~3.20.2", - "ember-template-lint": "^2.9.1", - "ember-test-selectors": "^5.0.0", - "ember-truth-helpers": "^2.0.0", + "ember-resolver": "^8.0.3", + "ember-responsive": "^4.0.2", + "ember-sinon": "^5.0.0", + "ember-source": "~3.28.8", + "ember-template-lint": "^3.15.0", + "ember-test-selectors": "^6.0.0", + "ember-truth-helpers": "^3.0.0", "eslint": "^7.32.0", "eslint-config-prettier": "^8.3.0", - "eslint-plugin-ember": "^10.5.8", + "eslint-plugin-ember": "^10.5.9", "eslint-plugin-ember-a11y-testing": "a11y-tool-sandbox/eslint-plugin-ember-a11y-testing#ca31c9698c7cb105f1c9761d98fcaca7d6874459", "eslint-plugin-node": "^11.1.0", "eslint-plugin-prettier": "^3.4.1", @@ -131,16 +130,18 @@ "pretender": "^3.0.1", "prettier": "^2.5.1", "query-string": "^7.0.1", + "qunit": "^2.17.2", "qunit-dom": "^2.0.0", "sass": "^1.17.3", "testem": "^3.0.3", "testem-multi-reporter": "^1.2.0", "text-encoder-lite": "^2.0.0", + "webpack": "^5.69.1", "xterm": "^4.6.0", "xterm-addon-fit": "0.5.0" }, "optionalDependencies": { - "@babel/plugin-transform-member-expression-literals": "^7.2.0", + "@babel/plugin-transform-member-expression-literals": "^7.16.7", "@storybook/addon-knobs": "^6.3.1", "@storybook/addon-storysource": "^6.3.10", "@storybook/addon-viewport": "^6.3.10", @@ -151,7 +152,7 @@ "ember-cli-string-utils": "^1.1.0" }, "engines": { - "node": "10.* || >= 12" + "node": "12.* || 14.* || >= 16" }, "ember": { "edition": "octane" @@ -169,6 +170,7 @@ "title-case": "^3.0.3" }, "resolutions": { - "ivy-codemirror/codemirror": "^5.56.0" + "ivy-codemirror/codemirror": "^5.56.0", + "ember-auto-import": "^2.4.0" } } diff --git a/ui/tests/acceptance/clients-list-test.js b/ui/tests/acceptance/clients-list-test.js index 37b3bf97b..73d0ce16e 100644 --- a/ui/tests/acceptance/clients-list-test.js +++ b/ui/tests/acceptance/clients-list-test.js @@ -174,14 +174,10 @@ module('Acceptance | clients list', function (hooks) { await ClientsList.sortBy('compositeStatus'); - assert.deepEqual(ClientsList.nodes.mapBy('compositeStatus.text'), [ - 'ready', - 'initializing', - 'ineligible', - 'draining', - 'down', - 'down', - ]); + assert.deepEqual( + ClientsList.nodes.map((n) => n.compositeStatus.text), + ['ready', 'initializing', 'ineligible', 'draining', 'down', 'down'] + ); // Simulate a client state change arriving through polling let readyClient = this.owner @@ -192,14 +188,10 @@ module('Acceptance | clients list', function (hooks) { await settled(); - assert.deepEqual(ClientsList.nodes.mapBy('compositeStatus.text'), [ - 'initializing', - 'ineligible', - 'ineligible', - 'draining', - 'down', - 'down', - ]); + assert.deepEqual( + ClientsList.nodes.map((n) => n.compositeStatus.text), + ['initializing', 'ineligible', 'ineligible', 'draining', 'down', 'down'] + ); }); test('each client should link to the client detail page', async function (assert) { diff --git a/ui/tests/acceptance/jobs-list-test.js b/ui/tests/acceptance/jobs-list-test.js index 2a46b0649..8d8a38764 100644 --- a/ui/tests/acceptance/jobs-list-test.js +++ b/ui/tests/acceptance/jobs-list-test.js @@ -87,12 +87,10 @@ module('Acceptance | jobs list', function (hooks) { test('the job run button is disabled when the token lacks permission', async function (assert) { window.localStorage.nomadTokenSecret = clientToken.secretId; + await JobsList.visit(); assert.ok(JobsList.runJobButton.isDisabled); - - await JobsList.runJobButton.click(); - assert.equal(currentURL(), '/jobs'); }); test('the anonymous policy is fetched to check whether to show the job run button', async function (assert) { diff --git a/ui/tests/acceptance/optimize-test.js b/ui/tests/acceptance/optimize-test.js index ec1e291fd..0b7fe635f 100644 --- a/ui/tests/acceptance/optimize-test.js +++ b/ui/tests/acceptance/optimize-test.js @@ -12,6 +12,7 @@ import { formatBytes, formatHertz, replaceMinus } from 'nomad-ui/utils/units'; import Optimize from 'nomad-ui/tests/pages/optimize'; import Layout from 'nomad-ui/tests/pages/layout'; import JobsList from 'nomad-ui/tests/pages/jobs/list'; +import collapseWhitespace from '../helpers/collapse-whitespace'; let managementToken, clientToken; @@ -462,7 +463,7 @@ module('Acceptance | optimize search and facets', function (hooks) { assert.equal(Optimize.card.slug.jobName, 'zzzzzz'); assert.equal( - Optimize.search.placeholder, + collapseWhitespace(Optimize.search.placeholder), `Search ${Optimize.recommendationSummaries.length} recommendations...` ); diff --git a/ui/tests/helpers/collapse-whitespace.js b/ui/tests/helpers/collapse-whitespace.js new file mode 100644 index 000000000..161c0d54d --- /dev/null +++ b/ui/tests/helpers/collapse-whitespace.js @@ -0,0 +1,7 @@ +export default function (string) { + return string + .replace(/[\t\r\n]/g, ' ') + .replace(/ +/g, ' ') + .replace(/^ /, '') + .replace(/ $/, ''); +} diff --git a/ui/tests/index.html b/ui/tests/index.html index a75e239bc..1224e622c 100644 --- a/ui/tests/index.html +++ b/ui/tests/index.html @@ -10,9 +10,9 @@ {{content-for "head"}} {{content-for "test-head"}} - - - + + + {{content-for "head-footer"}} {{content-for "test-head-footer"}} @@ -26,11 +26,18 @@ window.__ReadableStream = window.ReadableStream; window.ReadableStream = undefined; - - - - - +
    +
    +
    +
    +
    +
    + + + + + + {{content-for "body-footer"}} {{content-for "test-body-footer"}} diff --git a/ui/tests/integration/components/job-page/periodic-test.js b/ui/tests/integration/components/job-page/periodic-test.js index fd2b3fd1c..aa55a8881 100644 --- a/ui/tests/integration/components/job-page/periodic-test.js +++ b/ui/tests/integration/components/job-page/periodic-test.js @@ -222,7 +222,8 @@ module('Integration | Component | job-page/periodic', function (hooks) { await render(commonTemplate); await startJob(); - expectError(assert, 'Could Not Start Job'); + + await expectError(assert, 'Could Not Start Job'); }); test('Each job row includes the submitted time', async function (assert) { diff --git a/ui/tests/integration/components/job-page/service-test.js b/ui/tests/integration/components/job-page/service-test.js index 79160c790..745c90f55 100644 --- a/ui/tests/integration/components/job-page/service-test.js +++ b/ui/tests/integration/components/job-page/service-test.js @@ -27,7 +27,6 @@ module('Integration | Component | job-page/service', function (hooks) { }); hooks.afterEach(function () { - Job.removeContext(); this.server.shutdown(); window.localStorage.clear(); }); @@ -125,7 +124,8 @@ module('Integration | Component | job-page/service', function (hooks) { await render(commonTemplate); await startJob(); - expectError(assert, 'Could Not Start Job'); + + await expectError(assert, 'Could Not Start Job'); }); test('Recent allocations shows allocations in the job context', async function (assert) { diff --git a/ui/tests/integration/components/multi-select-dropdown-test.js b/ui/tests/integration/components/multi-select-dropdown-test.js index 1d2f931be..0e865ce54 100644 --- a/ui/tests/integration/components/multi-select-dropdown-test.js +++ b/ui/tests/integration/components/multi-select-dropdown-test.js @@ -171,11 +171,7 @@ module('Integration | Component | multi-select dropdown', function (hooks) { find('[data-test-dropdown-options]'), 'Options are not shown on focus' ); - await triggerKeyEvent( - '[data-test-dropdown-trigger]', - 'keydown', - ARROW_DOWN - ); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', ARROW_DOWN); assert.ok(find('[data-test-dropdown-options]'), 'Options are now shown'); assert.equal( document.activeElement, @@ -190,16 +186,8 @@ module('Integration | Component | multi-select dropdown', function (hooks) { await render(commonTemplate); await focus('[data-test-dropdown-trigger]'); - await triggerKeyEvent( - '[data-test-dropdown-trigger]', - 'keydown', - ARROW_DOWN - ); - await triggerKeyEvent( - '[data-test-dropdown-trigger]', - 'keydown', - ARROW_DOWN - ); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', ARROW_DOWN); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', ARROW_DOWN); assert.equal( document.activeElement, find('[data-test-dropdown-option]'), @@ -213,12 +201,8 @@ module('Integration | Component | multi-select dropdown', function (hooks) { await render(commonTemplate); await focus('[data-test-dropdown-trigger]'); - await triggerKeyEvent( - '[data-test-dropdown-trigger]', - 'keydown', - ARROW_DOWN - ); - await triggerKeyEvent('[data-test-dropdown-trigger]', 'keydown', TAB); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', ARROW_DOWN); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', TAB); assert.equal( document.activeElement, find('[data-test-dropdown-option]'), @@ -234,7 +218,7 @@ module('Integration | Component | multi-select dropdown', function (hooks) { await click('[data-test-dropdown-trigger]'); await focus('[data-test-dropdown-option]'); - await triggerKeyEvent('[data-test-dropdown-option]', 'keydown', ARROW_UP); + await triggerKeyEvent('[data-test-dropdown-option]', 'keyup', ARROW_UP); assert.equal( document.activeElement, find('[data-test-dropdown-option]'), @@ -250,7 +234,7 @@ module('Integration | Component | multi-select dropdown', function (hooks) { await click('[data-test-dropdown-trigger]'); await focus('[data-test-dropdown-option]'); - await triggerKeyEvent('[data-test-dropdown-option]', 'keydown', ARROW_DOWN); + await triggerKeyEvent('[data-test-dropdown-option]', 'keyup', ARROW_DOWN); assert.equal( document.activeElement, findAll('[data-test-dropdown-option]')[1], @@ -272,7 +256,7 @@ module('Integration | Component | multi-select dropdown', function (hooks) { const lastIndex = optionEls.length - 1; for (const [index, option] of optionEls.entries()) { - await triggerKeyEvent(option, 'keydown', ARROW_DOWN); + await triggerKeyEvent(option, 'keyup', ARROW_DOWN); if (index < lastIndex) { /* eslint-disable-next-line qunit/no-conditional-assertions */ @@ -284,7 +268,7 @@ module('Integration | Component | multi-select dropdown', function (hooks) { } } - await triggerKeyEvent(optionEls[lastIndex], 'keydown', ARROW_DOWN); + await triggerKeyEvent(optionEls[lastIndex], 'keyup', ARROW_DOWN); assert.equal( document.activeElement, optionEls[lastIndex], @@ -300,7 +284,7 @@ module('Integration | Component | multi-select dropdown', function (hooks) { await click('[data-test-dropdown-trigger]'); await focus('[data-test-dropdown-option]'); - await triggerKeyEvent('[data-test-dropdown-option]', 'keydown', SPACE); + await triggerKeyEvent('[data-test-dropdown-option]', 'keyup', SPACE); assert.ok(props.onSelect.called, 'onSelect was called'); const newSelection = props.onSelect.getCall(0).args[0]; @@ -357,17 +341,9 @@ module('Integration | Component | multi-select dropdown', function (hooks) { await render(commonTemplate); await focus('[data-test-dropdown-trigger]'); - await triggerKeyEvent( - '[data-test-dropdown-trigger]', - 'keydown', - ARROW_DOWN - ); - await triggerKeyEvent( - '[data-test-dropdown-trigger]', - 'keydown', - ARROW_DOWN - ); - await triggerKeyEvent('[data-test-dropdown-option]', 'keydown', ESC); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', ARROW_DOWN); + await triggerKeyEvent('[data-test-dropdown-trigger]', 'keyup', ARROW_DOWN); + await triggerKeyEvent('[data-test-dropdown-option]', 'keyup', ESC); assert.notOk( find('[data-test-dropdown-options]'), diff --git a/ui/tests/integration/components/two-step-button-test.js b/ui/tests/integration/components/two-step-button-test.js index 3ef1be602..d615a066a 100644 --- a/ui/tests/integration/components/two-step-button-test.js +++ b/ui/tests/integration/components/two-step-button-test.js @@ -185,7 +185,7 @@ module('Integration | Component | two step button', function (hooks) { }); test('when disabled is true, the idle button is disabled', async function (assert) { - assert.expect(3); + assert.expect(2); const props = commonProperties(); props.disabled = true; @@ -194,12 +194,6 @@ module('Integration | Component | two step button', function (hooks) { assert.ok(TwoStepButton.isDisabled, 'The idle button is disabled'); - await TwoStepButton.idle(); - assert.ok( - find('[data-test-idle-button]'), - 'Still in the idle state after clicking' - ); - await componentA11yAudit(this.element, assert); }); }); diff --git a/ui/tests/pages/components/popover-menu.js b/ui/tests/pages/components/popover-menu.js index 8b38d2fb7..f7805c338 100644 --- a/ui/tests/pages/components/popover-menu.js +++ b/ui/tests/pages/components/popover-menu.js @@ -19,10 +19,10 @@ export default (scope) => ({ toggle: clickable('[data-test-popover-trigger]'), focus: focusable('[data-test-popover-trigger]'), - downArrow: triggerable('keydown', '[data-test-popover-trigger]', { + downArrow: triggerable('keyup', '[data-test-popover-trigger]', { eventProperties: { keyCode: ARROW_DOWN }, }), - focusNext: triggerable('keydown', '[data-test-popover-trigger]', { + focusNext: triggerable('keyup', '[data-test-popover-trigger]', { eventProperties: { keyCode: TAB }, }), esc: triggerable('keydown', '[data-test-popover-trigger]', { diff --git a/ui/tests/pages/components/stepper-input.js b/ui/tests/pages/components/stepper-input.js index 17b4af348..e3b74f0c3 100644 --- a/ui/tests/pages/components/stepper-input.js +++ b/ui/tests/pages/components/stepper-input.js @@ -22,7 +22,7 @@ export default (scope) => ({ focus: focusable(), blur: blurrable(), value: value(), - esc: triggerable('keydown', '', { eventProperties: { keyCode: 27 } }), + esc: triggerable('keyup', '', { eventProperties: { keyCode: 27 } }), isDisabled: attribute('disabled'), }, diff --git a/ui/tests/test-helper.js b/ui/tests/test-helper.js index a8efa1f76..96263f3f8 100644 --- a/ui/tests/test-helper.js +++ b/ui/tests/test-helper.js @@ -1,12 +1,13 @@ import 'core-js'; import Application from 'nomad-ui/app'; import config from 'nomad-ui/config/environment'; +import * as QUnit from 'qunit'; import { setApplication } from '@ember/test-helpers'; import start from 'ember-exam/test-support/start'; -import { useNativeEvents } from 'ember-cli-page-object/extend'; - -useNativeEvents(); +import { setup } from 'qunit-dom'; setApplication(Application.create(config.APP)); +setup(QUnit.assert); + start(); diff --git a/ui/tests/unit/adapters/job-test.js b/ui/tests/unit/adapters/job-test.js index 2b696ba19..e34b3c8d7 100644 --- a/ui/tests/unit/adapters/job-test.js +++ b/ui/tests/unit/adapters/job-test.js @@ -1,4 +1,4 @@ -import { run } from '@ember/runloop'; +import { next } from '@ember/runloop'; import { assign } from '@ember/polyfills'; import { settled } from '@ember/test-helpers'; import { setupTest } from 'ember-qunit'; @@ -290,7 +290,7 @@ module('Unit | Adapter | Job', function (hooks) { assert.equal(xhr.status, 0, 'Request is still pending'); // Schedule the cancelation before waiting - run.next(() => { + next(() => { controller.abort(); }); @@ -316,7 +316,7 @@ module('Unit | Adapter | Job', function (hooks) { assert.equal(xhr.status, 0, 'Request is still pending'); // Schedule the cancelation before waiting - run.next(() => { + next(() => { controller.abort(); }); @@ -342,7 +342,7 @@ module('Unit | Adapter | Job', function (hooks) { assert.equal(xhr.status, 0, 'Request is still pending'); // Schedule the cancelation before waiting - run.next(() => { + next(() => { controller.abort(); }); @@ -385,7 +385,7 @@ module('Unit | Adapter | Job', function (hooks) { ); // Schedule the cancelation and resolution before waiting - run.next(() => { + next(() => { controller1.abort(); pretender.resolve(xhr2); }); diff --git a/ui/tests/unit/adapters/volume-test.js b/ui/tests/unit/adapters/volume-test.js index 8eeace873..81be4e355 100644 --- a/ui/tests/unit/adapters/volume-test.js +++ b/ui/tests/unit/adapters/volume-test.js @@ -1,4 +1,4 @@ -import { run } from '@ember/runloop'; +import { next } from '@ember/runloop'; import { settled } from '@ember/test-helpers'; import { setupTest } from 'ember-qunit'; import { module, test } from 'qunit'; @@ -139,7 +139,7 @@ module('Unit | Adapter | Volume', function (hooks) { assert.equal(xhr.status, 0, 'Request is still pending'); // Schedule the cancelation before waiting - run.next(() => { + next(() => { controller.abort(); }); diff --git a/ui/tests/unit/serializers/application-test.js b/ui/tests/unit/serializers/application-test.js index b1f01389c..d9abad8cf 100644 --- a/ui/tests/unit/serializers/application-test.js +++ b/ui/tests/unit/serializers/application-test.js @@ -4,7 +4,9 @@ import ApplicationSerializer from 'nomad-ui/serializers/application'; import Model from '@ember-data/model'; import { attr } from '@ember-data/model'; +import classic from 'ember-classic-decorator'; +@classic class TestSerializer extends ApplicationSerializer { arrayNullOverrides = ['Things']; diff --git a/ui/yarn.lock b/ui/yarn.lock index 739b5ba96..da53db368 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -2,6 +2,13 @@ # yarn lockfile v1 +"@ampproject/remapping@^2.1.0": + version "2.1.2" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.1.2.tgz#4edca94973ded9630d20101cd8559cedb8d8bd34" + integrity sha512-hoyByceqwKirw7w3Z7gnIIZC3Wx3J484Y3L/cMpXFbr7d9ZQj2mODrirNzcJa+SM3UlpWXYvKV4RlRpFXlWgXg== + dependencies: + "@jridgewell/trace-mapping" "^0.3.0" + "@babel/code-frame@7.10.4": version "7.10.4" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a" @@ -9,13 +16,20 @@ dependencies: "@babel/highlight" "^7.10.4" -"@babel/code-frame@7.12.11", "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.10.4", "@babel/code-frame@^7.12.11": +"@babel/code-frame@7.12.11", "@babel/code-frame@^7.0.0": version "7.12.11" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.12.11.tgz#f4ad435aa263db935b8f10f2c552d23fb716a63f" integrity sha512-Zt1yodBx1UcyiePMSkWnU4hPqhwq7hGi2nFL1LeA3EUl+q2LQx16MISgJ0+z7dnmgvP9QtIleuETGOiOH1RcIw== dependencies: "@babel/highlight" "^7.10.4" +"@babel/code-frame@^7.10.4", "@babel/code-frame@^7.12.11", "@babel/code-frame@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.16.7.tgz#44416b6bd7624b998f5b1af5d470856c40138789" + integrity sha512-iAXqUn8IIeBTNd72xsFlgaXHkMBMt6y4HJp1tIaK465CWLT/fG1aqB7ykr95gHHmlBdGbFeWWfyB4NJJ0nmeIg== + dependencies: + "@babel/highlight" "^7.16.7" + "@babel/code-frame@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.12.13.tgz#dcfc826beef65e75c50e21d3837d7d95798dd658" @@ -30,11 +44,6 @@ dependencies: "@babel/highlight" "^7.14.5" -"@babel/compat-data@^7.12.5", "@babel/compat-data@^7.12.7": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.12.7.tgz#9329b4782a7d6bbd7eef57e11addf91ee3ef1e41" - integrity sha512-YaxPMGs/XIWtYqrdEOZOCPsVWfEoriXopnsz3/i7apYPXQ3698UFhS6dVT1KN5qOsWmVgw/FOrmQgpRaZayGsw== - "@babel/compat-data@^7.13.0", "@babel/compat-data@^7.13.8": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.13.12.tgz#a8a5ccac19c200f9dd49624cac6e19d7be1236a1" @@ -50,6 +59,11 @@ resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.16.0.tgz#ea269d7f78deb3a7826c39a4048eecda541ebdaa" integrity sha512-DGjt2QZse5SGd9nfOSqO4WLJ8NN/oHkijbXbPrxuoJO3oIPJL3TciZs9FX+cOHNiY9E9l0opL8g7BmLe3T+9ew== +"@babel/compat-data@^7.16.4", "@babel/compat-data@^7.16.8", "@babel/compat-data@^7.17.0": + version "7.17.0" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.17.0.tgz#86850b8597ea6962089770952075dcaabb8dba34" + integrity sha512-392byTlpGWXMv4FbyWw3sAZ/FrW/DrwqLGXpy0mbyNe9Taqv1mg9yON5/o0cnr8XYCkFTZbC1eV+c+LAROgrng== + "@babel/core@7.12.9": version "7.12.9" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.12.9.tgz#fd450c4ec10cdbb980e2928b7aa7a28484593fc8" @@ -72,7 +86,7 @@ semver "^5.4.1" source-map "^0.5.0" -"@babel/core@^7.0.0", "@babel/core@^7.1.6", "@babel/core@^7.10.2", "@babel/core@^7.12.0", "@babel/core@^7.12.3", "@babel/core@^7.3.4", "@babel/core@^7.7.0": +"@babel/core@^7.0.0", "@babel/core@^7.12.3": version "7.12.10" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.12.10.tgz#b79a2e1b9f70ed3d84bbfb6d8c4ef825f606bccd" integrity sha512-eTAlQKq65zHfkHZV0sIVODCPGVgoo1HdBlbSLi9CqOzuZanMv2ihzY+4paiKr1mH+XmYESMAmJ/dpZ68eN6d8w== @@ -93,6 +107,27 @@ semver "^5.4.1" source-map "^0.5.0" +"@babel/core@^7.12.0", "@babel/core@^7.13.10", "@babel/core@^7.13.8", "@babel/core@^7.16.7", "@babel/core@^7.3.4": + version "7.17.5" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.17.5.tgz#6cd2e836058c28f06a4ca8ee7ed955bbf37c8225" + integrity sha512-/BBMw4EvjmyquN5O+t5eh0+YqB3XXJkYD2cjKpYtWOfFy4lQ4UozNSmxAcWT8r2XtZs0ewG+zrfsqeR15i1ajA== + dependencies: + "@ampproject/remapping" "^2.1.0" + "@babel/code-frame" "^7.16.7" + "@babel/generator" "^7.17.3" + "@babel/helper-compilation-targets" "^7.16.7" + "@babel/helper-module-transforms" "^7.16.7" + "@babel/helpers" "^7.17.2" + "@babel/parser" "^7.17.3" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.17.3" + "@babel/types" "^7.17.0" + convert-source-map "^1.7.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.1.2" + semver "^6.3.0" + "@babel/core@^7.12.10": version "7.15.5" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.15.5.tgz#f8ed9ace730722544609f90c9bb49162dc3bf5b9" @@ -114,12 +149,12 @@ semver "^6.3.0" source-map "^0.5.0" -"@babel/generator@^7.12.10", "@babel/generator@^7.12.11": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.12.11.tgz#98a7df7b8c358c9a37ab07a24056853016aba3af" - integrity sha512-Ggg6WPOJtSi8yYQvLVjG8F/TlpWDlKx0OpS4Kt+xMQPs5OaGYWy+v1A+1TvxI6sAMGZpKWWoAQ1DaeQbImlItA== +"@babel/generator@^7.12.10", "@babel/generator@^7.12.11", "@babel/generator@^7.17.3": + version "7.17.3" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.17.3.tgz#a2c30b0c4f89858cb87050c3ffdfd36bdf443200" + integrity sha512-+R6Dctil/MgUsZsZAkYgK+ADNSZzJRRy0TvY65T71z/CR854xHQ1EweBYXdfT+HNeN7w0cSJJEzgxZMv40pxsg== dependencies: - "@babel/types" "^7.12.11" + "@babel/types" "^7.17.0" jsesc "^2.5.1" source-map "^0.5.0" @@ -141,13 +176,6 @@ jsesc "^2.5.1" source-map "^0.5.0" -"@babel/helper-annotate-as-pure@^7.10.4": - version "7.12.10" - resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.12.10.tgz#54ab9b000e60a93644ce17b3f37d313aaf1d115d" - integrity sha512-XplmVbC1n+KY6jL8/fgLVXXUauDIB+lD5+GsQEh6F6GBF1dq1qy4DP4yXWzDKcoqXB3X58t61e85Fitoww4JVQ== - dependencies: - "@babel/types" "^7.12.10" - "@babel/helper-annotate-as-pure@^7.14.5", "@babel/helper-annotate-as-pure@^7.15.4": version "7.15.4" resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.15.4.tgz#3d0e43b00c5e49fdb6c57e421601a7a658d5f835" @@ -155,13 +183,12 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-builder-binary-assignment-operator-visitor@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.10.4.tgz#bb0b75f31bf98cbf9ff143c1ae578b87274ae1a3" - integrity sha512-L0zGlFrGWZK4PbT8AszSfLTM5sDU1+Az/En9VrdT8/LmEiJt4zXt+Jve9DCAnQcbqDhCI+29y/L93mrDzddCcg== +"@babel/helper-annotate-as-pure@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.16.7.tgz#bb2339a7534a9c128e3102024c60760a3a7f3862" + integrity sha512-s6t2w/IPQVTAET1HitoowRGXooX8mCgtuP5195wD/QJPV6wYjpujCGF7JuMODVX2ZAJOf1GT6DT9MHEZvLOFSw== dependencies: - "@babel/helper-explode-assignable-expression" "^7.10.4" - "@babel/types" "^7.10.4" + "@babel/types" "^7.16.7" "@babel/helper-builder-binary-assignment-operator-visitor@^7.14.5": version "7.15.4" @@ -171,15 +198,23 @@ "@babel/helper-explode-assignable-expression" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/helper-compilation-targets@^7.12.0", "@babel/helper-compilation-targets@^7.12.5": - version "7.12.5" - resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.12.5.tgz#cb470c76198db6a24e9dbc8987275631e5d29831" - integrity sha512-+qH6NrscMolUlzOYngSBMIOQpKUGPPsc61Bu5W10mg84LxZ7cmvnBHzARKbDoFxVvqqAbj6Tg6N7bSrWSPXMyw== +"@babel/helper-builder-binary-assignment-operator-visitor@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.16.7.tgz#38d138561ea207f0f69eb1626a418e4f7e6a580b" + integrity sha512-C6FdbRaxYjwVu/geKW4ZeQ0Q31AftgRcdSnZ5/jsH6BzCJbtvXvhpfkbkThYSuutZA7nCXpPR6AD9zd1dprMkA== dependencies: - "@babel/compat-data" "^7.12.5" - "@babel/helper-validator-option" "^7.12.1" - browserslist "^4.14.5" - semver "^5.5.0" + "@babel/helper-explode-assignable-expression" "^7.16.7" + "@babel/types" "^7.16.7" + +"@babel/helper-compilation-targets@^7.12.0", "@babel/helper-compilation-targets@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.16.7.tgz#06e66c5f299601e6c7da350049315e83209d551b" + integrity sha512-mGojBwIWcwGD6rfqgRXVlVYmPAv7eOpIemUG3dGnDdCY4Pae70ROij3XmfrH6Fa1h1aiDylpglbZyktfzyo/hA== + dependencies: + "@babel/compat-data" "^7.16.4" + "@babel/helper-validator-option" "^7.16.7" + browserslist "^4.17.5" + semver "^6.3.0" "@babel/helper-compilation-targets@^7.13.0": version "7.13.10" @@ -211,16 +246,18 @@ browserslist "^4.16.6" semver "^6.3.0" -"@babel/helper-create-class-features-plugin@^7.12.1", "@babel/helper-create-class-features-plugin@^7.5.5", "@babel/helper-create-class-features-plugin@^7.8.3": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.12.1.tgz#3c45998f431edd4a9214c5f1d3ad1448a6137f6e" - integrity sha512-hkL++rWeta/OVOBTRJc9a5Azh5mt5WgZUGAKMD8JM141YsE08K//bp1unBBieO6rUKkIPyUE0USQ30jAy3Sk1w== +"@babel/helper-create-class-features-plugin@^7.12.1", "@babel/helper-create-class-features-plugin@^7.16.10", "@babel/helper-create-class-features-plugin@^7.16.7", "@babel/helper-create-class-features-plugin@^7.17.1", "@babel/helper-create-class-features-plugin@^7.17.6", "@babel/helper-create-class-features-plugin@^7.5.5": + version "7.17.6" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.17.6.tgz#3778c1ed09a7f3e65e6d6e0f6fbfcc53809d92c9" + integrity sha512-SogLLSxXm2OkBbSsHZMM4tUi8fUzjs63AT/d0YQIzr6GSd8Hxsbk2KYDX0k0DweAzGMj/YWeiCsorIdtdcW8Eg== dependencies: - "@babel/helper-function-name" "^7.10.4" - "@babel/helper-member-expression-to-functions" "^7.12.1" - "@babel/helper-optimise-call-expression" "^7.10.4" - "@babel/helper-replace-supers" "^7.12.1" - "@babel/helper-split-export-declaration" "^7.10.4" + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-function-name" "^7.16.7" + "@babel/helper-member-expression-to-functions" "^7.16.7" + "@babel/helper-optimise-call-expression" "^7.16.7" + "@babel/helper-replace-supers" "^7.16.7" + "@babel/helper-split-export-declaration" "^7.16.7" "@babel/helper-create-class-features-plugin@^7.13.0": version "7.13.11" @@ -245,13 +282,16 @@ "@babel/helper-replace-supers" "^7.15.4" "@babel/helper-split-export-declaration" "^7.15.4" -"@babel/helper-create-regexp-features-plugin@^7.12.1": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.12.7.tgz#2084172e95443fa0a09214ba1bb328f9aea1278f" - integrity sha512-idnutvQPdpbduutvi3JVfEgcVIHooQnhvhx0Nk9isOINOIGYkZea1Pk2JlJRiUnMefrlvr0vkByATBY/mB4vjQ== +"@babel/helper-create-class-features-plugin@^7.8.3": + version "7.12.1" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.12.1.tgz#3c45998f431edd4a9214c5f1d3ad1448a6137f6e" + integrity sha512-hkL++rWeta/OVOBTRJc9a5Azh5mt5WgZUGAKMD8JM141YsE08K//bp1unBBieO6rUKkIPyUE0USQ30jAy3Sk1w== dependencies: - "@babel/helper-annotate-as-pure" "^7.10.4" - regexpu-core "^4.7.1" + "@babel/helper-function-name" "^7.10.4" + "@babel/helper-member-expression-to-functions" "^7.12.1" + "@babel/helper-optimise-call-expression" "^7.10.4" + "@babel/helper-replace-supers" "^7.12.1" + "@babel/helper-split-export-declaration" "^7.10.4" "@babel/helper-create-regexp-features-plugin@^7.14.5": version "7.14.5" @@ -261,14 +301,13 @@ "@babel/helper-annotate-as-pure" "^7.14.5" regexpu-core "^4.7.1" -"@babel/helper-define-map@^7.10.4": - version "7.10.5" - resolved "https://registry.yarnpkg.com/@babel/helper-define-map/-/helper-define-map-7.10.5.tgz#b53c10db78a640800152692b13393147acb9bb30" - integrity sha512-fMw4kgFB720aQFXSVaXr79pjjcW5puTCM16+rECJ/plGS+zByelE8l9nCpV1GibxTnFVmUuYG9U8wYfQHdzOEQ== +"@babel/helper-create-regexp-features-plugin@^7.16.7": + version "7.17.0" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.17.0.tgz#1dcc7d40ba0c6b6b25618997c5dbfd310f186fe1" + integrity sha512-awO2So99wG6KnlE+TPs6rn83gCz5WlEePJDTnLEqbchMVrBeAujURVphRdigsk094VhvZehFoNOihSlcBjwsXA== dependencies: - "@babel/helper-function-name" "^7.10.4" - "@babel/types" "^7.10.5" - lodash "^4.17.19" + "@babel/helper-annotate-as-pure" "^7.16.7" + regexpu-core "^5.0.1" "@babel/helper-define-polyfill-provider@^0.1.5": version "0.1.5" @@ -298,12 +337,26 @@ resolve "^1.14.2" semver "^6.1.2" -"@babel/helper-explode-assignable-expression@^7.10.4": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.12.1.tgz#8006a466695c4ad86a2a5f2fb15b5f2c31ad5633" - integrity sha512-dmUwH8XmlrUpVqgtZ737tK88v07l840z9j3OEhCLwKTkjlvKpfqXVIZ0wpK3aeOxspwGrf/5AP5qLx4rO3w5rA== +"@babel/helper-define-polyfill-provider@^0.3.1": + version "0.3.1" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.3.1.tgz#52411b445bdb2e676869e5a74960d2d3826d2665" + integrity sha512-J9hGMpJQmtWmj46B3kBHmL38UhJGhYX7eqkcq+2gsstyYt341HmPeWspihX43yVRA0mS+8GGk2Gckc7bY/HCmA== dependencies: - "@babel/types" "^7.12.1" + "@babel/helper-compilation-targets" "^7.13.0" + "@babel/helper-module-imports" "^7.12.13" + "@babel/helper-plugin-utils" "^7.13.0" + "@babel/traverse" "^7.13.0" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + semver "^6.1.2" + +"@babel/helper-environment-visitor@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.16.7.tgz#ff484094a839bde9d89cd63cba017d7aae80ecd7" + integrity sha512-SLLb0AAn6PkUeAfKJCCOl9e1R53pQlGAfc4y4XuMRZfqeMYLE0dM1LMhqbGAlGQY0lfw5/ohoYWAe9V1yibRag== + dependencies: + "@babel/types" "^7.16.7" "@babel/helper-explode-assignable-expression@^7.15.4": version "7.15.4" @@ -312,14 +365,21 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-function-name@^7.10.4", "@babel/helper-function-name@^7.12.11": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.12.11.tgz#1fd7738aee5dcf53c3ecff24f1da9c511ec47b42" - integrity sha512-AtQKjtYNolKNi6nNNVLQ27CP6D9oFR6bq/HPYSizlzbp7uC1M59XJe8L+0uXjbIaZaUJF99ruHqVGiKXU/7ybA== +"@babel/helper-explode-assignable-expression@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-explode-assignable-expression/-/helper-explode-assignable-expression-7.16.7.tgz#12a6d8522fdd834f194e868af6354e8650242b7a" + integrity sha512-KyUenhWMC8VrxzkGP0Jizjo4/Zx+1nNZhgocs+gLzyZyB8SHidhoq9KK/8Ato4anhwsivfkBLftky7gvzbZMtQ== dependencies: - "@babel/helper-get-function-arity" "^7.12.10" - "@babel/template" "^7.12.7" - "@babel/types" "^7.12.11" + "@babel/types" "^7.16.7" + +"@babel/helper-function-name@^7.10.4", "@babel/helper-function-name@^7.12.11", "@babel/helper-function-name@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.16.7.tgz#f1ec51551fb1c8956bc8dd95f38523b6cf375f8f" + integrity sha512-QfDfEnIUyyBSR3HtrtGECuZ6DAyCkYFp7GHl75vFtTnn6pjKeK0T1DB5lLkFvBea8MdaiUABx3osbgLyInoejA== + dependencies: + "@babel/helper-get-function-arity" "^7.16.7" + "@babel/template" "^7.16.7" + "@babel/types" "^7.16.7" "@babel/helper-function-name@^7.12.13": version "7.12.13" @@ -339,13 +399,6 @@ "@babel/template" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/helper-get-function-arity@^7.12.10": - version "7.12.10" - resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.10.tgz#b158817a3165b5faa2047825dfa61970ddcc16cf" - integrity sha512-mm0n5BPjR06wh9mPQaDdXWDoll/j5UpCAPl1x8fS71GHm7HA6Ua2V4ylG1Ju8lvcTOietbPNNPaSilKj+pj+Ag== - dependencies: - "@babel/types" "^7.12.10" - "@babel/helper-get-function-arity@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.12.13.tgz#bc63451d403a3b3082b97e1d8b3fe5bd4091e583" @@ -360,12 +413,12 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-hoist-variables@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.10.4.tgz#d49b001d1d5a68ca5e6604dda01a6297f7c9381e" - integrity sha512-wljroF5PgCk2juF69kanHVs6vrLwIPNp6DLD+Lrl3hoQ3PpPPikaDRNFA+0t81NOoMt2DL6WW/mdU8k4k6ZzuA== +"@babel/helper-get-function-arity@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-get-function-arity/-/helper-get-function-arity-7.16.7.tgz#ea08ac753117a669f1508ba06ebcc49156387419" + integrity sha512-flc+RLSOBXzNzVhcLu6ujeHUrD6tANAOU5ojrRx/as+tbzf8+stUCj7+IfRRoAbEZqj/ahXEMsjhOhgeZsrnTw== dependencies: - "@babel/types" "^7.10.4" + "@babel/types" "^7.16.7" "@babel/helper-hoist-variables@^7.15.4": version "7.15.4" @@ -374,12 +427,19 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-member-expression-to-functions@^7.12.1", "@babel/helper-member-expression-to-functions@^7.12.7": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.12.7.tgz#aa77bd0396ec8114e5e30787efa78599d874a855" - integrity sha512-DCsuPyeWxeHgh1Dus7APn7iza42i/qXqiFPWyBDdOFtvS581JQePsc1F/nD+fHrcswhLlRc2UpYS1NwERxZhHw== +"@babel/helper-hoist-variables@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.16.7.tgz#86bcb19a77a509c7b77d0e22323ef588fa58c246" + integrity sha512-m04d/0Op34H5v7pbZw6pSKP7weA6lsMvfiIAMeIvkY/R4xQtBSMFEigu9QTZ2qB/9l22vsxtM8a+Q8CzD255fg== dependencies: - "@babel/types" "^7.12.7" + "@babel/types" "^7.16.7" + +"@babel/helper-member-expression-to-functions@^7.12.1", "@babel/helper-member-expression-to-functions@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.16.7.tgz#42b9ca4b2b200123c3b7e726b0ae5153924905b0" + integrity sha512-VtJ/65tYiU/6AbMTDwyoXGPKHgTsfRarivm+YbB5uAzKUyuPjgZSgAFeG87FCigc7KNHu2Pegh1XIT3lXjvz3Q== + dependencies: + "@babel/types" "^7.16.7" "@babel/helper-member-expression-to-functions@^7.13.0", "@babel/helper-member-expression-to-functions@^7.13.12": version "7.13.12" @@ -395,7 +455,7 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-module-imports@^7.0.0", "@babel/helper-module-imports@^7.12.1", "@babel/helper-module-imports@^7.12.5", "@babel/helper-module-imports@^7.8.3": +"@babel/helper-module-imports@^7.0.0", "@babel/helper-module-imports@^7.8.3": version "7.12.5" resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.12.5.tgz#1bfc0229f794988f76ed0a4d4e90860850b54dfb" integrity sha512-SR713Ogqg6++uexFRORf/+nPXMmWIn80TALu0uaFb+iQIUoR7bOC7zBWyzBs5b3tBBJXuyD0cRu1F15GyzjOWA== @@ -409,6 +469,13 @@ dependencies: "@babel/types" "^7.13.12" +"@babel/helper-module-imports@^7.12.5", "@babel/helper-module-imports@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.16.7.tgz#25612a8091a999704461c8a222d0efec5d091437" + integrity sha512-LVtS6TqjJHFc+nYeITRo6VLXve70xmq7wPhWTqDJusJEgGmkAACWwMiTNrvfoQo6hEhFwAIixNkvB0jPXDL8Wg== + dependencies: + "@babel/types" "^7.16.7" + "@babel/helper-module-imports@^7.14.5", "@babel/helper-module-imports@^7.15.4": version "7.15.4" resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.15.4.tgz#e18007d230632dea19b47853b984476e7b4e103f" @@ -416,20 +483,19 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-module-transforms@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.12.1.tgz#7954fec71f5b32c48e4b303b437c34453fd7247c" - integrity sha512-QQzehgFAZ2bbISiCpmVGfiGux8YVFXQ0abBic2Envhej22DVXV9nCFaS5hIQbkyo1AdGb+gNME2TSh3hYJVV/w== +"@babel/helper-module-transforms@^7.12.1", "@babel/helper-module-transforms@^7.16.7": + version "7.17.6" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.17.6.tgz#3c3b03cc6617e33d68ef5a27a67419ac5199ccd0" + integrity sha512-2ULmRdqoOMpdvkbT8jONrZML/XALfzxlb052bldftkicAUy8AxSCkD5trDPQcwHNmolcl7wP6ehNqMlyUw6AaA== dependencies: - "@babel/helper-module-imports" "^7.12.1" - "@babel/helper-replace-supers" "^7.12.1" - "@babel/helper-simple-access" "^7.12.1" - "@babel/helper-split-export-declaration" "^7.11.0" - "@babel/helper-validator-identifier" "^7.10.4" - "@babel/template" "^7.10.4" - "@babel/traverse" "^7.12.1" - "@babel/types" "^7.12.1" - lodash "^4.17.19" + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-module-imports" "^7.16.7" + "@babel/helper-simple-access" "^7.16.7" + "@babel/helper-split-export-declaration" "^7.16.7" + "@babel/helper-validator-identifier" "^7.16.7" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.17.3" + "@babel/types" "^7.17.0" "@babel/helper-module-transforms@^7.13.0": version "7.13.12" @@ -459,12 +525,12 @@ "@babel/traverse" "^7.15.4" "@babel/types" "^7.15.6" -"@babel/helper-optimise-call-expression@^7.10.4", "@babel/helper-optimise-call-expression@^7.12.10": - version "7.12.10" - resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.12.10.tgz#94ca4e306ee11a7dd6e9f42823e2ac6b49881e2d" - integrity sha512-4tpbU0SrSTjjt65UMWSrUOPZTsgvPgGG4S8QSTNHacKzpS51IVWGDj0yCwyeZND/i+LSN2g/O63jEXEWm49sYQ== +"@babel/helper-optimise-call-expression@^7.10.4", "@babel/helper-optimise-call-expression@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.16.7.tgz#a34e3560605abbd31a18546bd2aad3e6d9a174f2" + integrity sha512-EtgBhg7rd/JcnpZFXpBy0ze1YRfdm7BnBX4uKMBd3ixa3RGAE002JZB66FJyNH7g0F38U05pXmA5P8cBh7z+1w== dependencies: - "@babel/types" "^7.12.10" + "@babel/types" "^7.16.7" "@babel/helper-optimise-call-expression@^7.12.13": version "7.12.13" @@ -480,30 +546,21 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-plugin-utils@7.10.4", "@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.8.3": +"@babel/helper-plugin-utils@7.10.4": version "7.10.4" resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz#2f75a831269d4f677de49986dff59927533cf375" integrity sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg== -"@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.8.0": - version "7.14.5" - resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.14.5.tgz#5ac822ce97eec46741ab70a517971e443a70c5a9" - integrity sha512-/37qQCE3K0vvZKwoK4XU/irIJQdIfCJuhU5eKnNxpFDsOkgFaUAwbv+RYw6eYgsC0E4hS7r5KqGULUogqui0fQ== +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.16.7", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.16.7.tgz#aa3a8ab4c3cceff8e65eb9e73d87dc4ff320b2f5" + integrity sha512-Qg3Nk7ZxpgMrsox6HreY1ZNKdBq7K72tDSliA6dCl5f007jR4ne8iD5UzuNnCJH2xBf2BEEVGr+/OL6Gdp7RxA== "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.13.0": version "7.13.0" resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.13.0.tgz#806526ce125aed03373bc416a828321e3a6a33af" integrity sha512-ZPafIPSwzUlAoWT8DKs1W2VyF2gOWthGd5NGFMsBcMMol+ZhK+EQY/e6V96poa6PA/Bh+C9plWN0hXO1uB8AfQ== -"@babel/helper-remap-async-to-generator@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.12.1.tgz#8c4dbbf916314f6047dc05e6a2217074238347fd" - integrity sha512-9d0KQCRM8clMPcDwo8SevNs+/9a8yWVVmaE80FGJcEP8N1qToREmWEGnBn8BUlJhYRFz6fqxeRL1sl5Ogsed7A== - dependencies: - "@babel/helper-annotate-as-pure" "^7.10.4" - "@babel/helper-wrap-function" "^7.10.4" - "@babel/types" "^7.12.1" - "@babel/helper-remap-async-to-generator@^7.14.5", "@babel/helper-remap-async-to-generator@^7.15.4": version "7.15.4" resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.15.4.tgz#2637c0731e4c90fbf58ac58b50b2b5a192fc970f" @@ -513,15 +570,25 @@ "@babel/helper-wrap-function" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/helper-replace-supers@^7.12.1": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.12.11.tgz#ea511658fc66c7908f923106dd88e08d1997d60d" - integrity sha512-q+w1cqmhL7R0FNzth/PLLp2N+scXEK/L2AHbXUyydxp828F4FEa5WcVoqui9vFRiHDQErj9Zof8azP32uGVTRA== +"@babel/helper-remap-async-to-generator@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.16.8.tgz#29ffaade68a367e2ed09c90901986918d25e57e3" + integrity sha512-fm0gH7Flb8H51LqJHy3HJ3wnE1+qtYR2A99K06ahwrawLdOFsCEWjZOrYricXJHoPSudNKxrMBUPEIPxiIIvBw== dependencies: - "@babel/helper-member-expression-to-functions" "^7.12.7" - "@babel/helper-optimise-call-expression" "^7.12.10" - "@babel/traverse" "^7.12.10" - "@babel/types" "^7.12.11" + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-wrap-function" "^7.16.8" + "@babel/types" "^7.16.8" + +"@babel/helper-replace-supers@^7.12.1", "@babel/helper-replace-supers@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.16.7.tgz#e9f5f5f32ac90429c1a4bdec0f231ef0c2838ab1" + integrity sha512-y9vsWilTNaVnVh6xiJfABzsNpgDPKev9HnAgz6Gb1p6UUwf9NepdlsV7VXGCftJM+jqD5f7JIEubcpLjZj5dBw== + dependencies: + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-member-expression-to-functions" "^7.16.7" + "@babel/helper-optimise-call-expression" "^7.16.7" + "@babel/traverse" "^7.16.7" + "@babel/types" "^7.16.7" "@babel/helper-replace-supers@^7.13.0", "@babel/helper-replace-supers@^7.13.12": version "7.13.12" @@ -543,13 +610,6 @@ "@babel/traverse" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/helper-simple-access@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.12.1.tgz#32427e5aa61547d38eb1e6eaf5fd1426fdad9136" - integrity sha512-OxBp7pMrjVewSSC8fXDFrHrBcJATOOFssZwv16F3/6Xtc138GHybBfPbm9kfiqQHKhYQrlamWILwlDCeyMFEaA== - dependencies: - "@babel/types" "^7.12.1" - "@babel/helper-simple-access@^7.13.12": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.13.12.tgz#dd6c538afb61819d205a012c31792a39c7a5eaf6" @@ -564,12 +624,19 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-skip-transparent-expression-wrappers@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.12.1.tgz#462dc63a7e435ade8468385c63d2b84cce4b3cbf" - integrity sha512-Mf5AUuhG1/OCChOJ/HcADmvcHM42WJockombn8ATJG3OnyiSxBK/Mm5x78BQWvmtXZKHgbjdGL2kin/HOLlZGA== +"@babel/helper-simple-access@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.16.7.tgz#d656654b9ea08dbb9659b69d61063ccd343ff0f7" + integrity sha512-ZIzHVyoeLMvXMN/vok/a4LWRy8G2v205mNP0XOuf9XRLyX5/u9CnVulUtDgUTama3lT+bf/UqucuZjqiGuTS1g== dependencies: - "@babel/types" "^7.12.1" + "@babel/types" "^7.16.7" + +"@babel/helper-skip-transparent-expression-wrappers@^7.12.1", "@babel/helper-skip-transparent-expression-wrappers@^7.16.0": + version "7.16.0" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.16.0.tgz#0ee3388070147c3ae051e487eca3ebb0e2e8bb09" + integrity sha512-+il1gTy0oHwUsBQZyJvukbB4vPMdcYBrFHa0Uc4AizLxbq6BOYC51Rv4tWocX9BLBDLZ4kc6qUFpQ6HRgL+3zw== + dependencies: + "@babel/types" "^7.16.0" "@babel/helper-skip-transparent-expression-wrappers@^7.14.5", "@babel/helper-skip-transparent-expression-wrappers@^7.15.4": version "7.15.4" @@ -578,12 +645,12 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-split-export-declaration@^7.10.4", "@babel/helper-split-export-declaration@^7.11.0", "@babel/helper-split-export-declaration@^7.12.11": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.12.11.tgz#1b4cc424458643c47d37022223da33d76ea4603a" - integrity sha512-LsIVN8j48gHgwzfocYUSkO/hjYAOJqlpJEc7tGXcIm4cubjVUf8LGW6eWRyxEu7gA25q02p0rQUWoCI33HNS5g== +"@babel/helper-split-export-declaration@^7.10.4", "@babel/helper-split-export-declaration@^7.12.11", "@babel/helper-split-export-declaration@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.16.7.tgz#0b648c0c42da9d3920d85ad585f2778620b8726b" + integrity sha512-xbWoy/PFoxSWazIToT9Sif+jJTlrMcndIsaOKvTA6u7QEo7ilkRZpjew18/W3c7nm8fXdUDXh02VXTbZ0pGDNw== dependencies: - "@babel/types" "^7.12.11" + "@babel/types" "^7.16.7" "@babel/helper-split-export-declaration@^7.12.13": version "7.12.13" @@ -599,21 +666,16 @@ dependencies: "@babel/types" "^7.15.4" -"@babel/helper-validator-identifier@^7.10.4", "@babel/helper-validator-identifier@^7.12.11": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.12.11.tgz#c9a1f021917dcb5ccf0d4e453e399022981fc9ed" - integrity sha512-np/lG3uARFybkoHokJUmf1QfEvRVCPbmQeUQpKow5cQ3xWrV9i3rUHodKDJPQfTVX61qKi+UdYk8kik84n7XOw== +"@babel/helper-validator-identifier@^7.12.11", "@babel/helper-validator-identifier@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.16.7.tgz#e8c602438c4a8195751243da9031d1607d247cad" + integrity sha512-hsEnFemeiW4D08A5gUAZxLBTXpZ39P+a+DGDsHw1yxqyQ/jzFEnxf5uTEGp+3bzAbNOxU1paTgYS4ECU/IgfDw== "@babel/helper-validator-identifier@^7.14.5", "@babel/helper-validator-identifier@^7.14.9", "@babel/helper-validator-identifier@^7.15.7": version "7.15.7" resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.15.7.tgz#220df993bfe904a4a6b02ab4f3385a5ebf6e2389" integrity sha512-K4JvCtQqad9OY2+yTU8w+E82ywk/fe+ELNlt1G8z3bVGlZfn/hOcQQsUhGhW/N+tb3fxK800wLtKOE/aM0m72w== -"@babel/helper-validator-option@^7.12.1", "@babel/helper-validator-option@^7.12.11": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.12.11.tgz#d66cb8b7a3e7fe4c6962b32020a131ecf0847f4f" - integrity sha512-TBFCyj939mFSdeX7U7DDj32WtzYY7fDcalgq8v3fBZMNOJQNn7nOYzMaUCiPxPYfCup69mtIpqlKgMZLvQ8Xhw== - "@babel/helper-validator-option@^7.12.17": version "7.12.17" resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.12.17.tgz#d1fbf012e1a79b7eebbfdc6d270baaf8d9eb9831" @@ -624,15 +686,10 @@ resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.14.5.tgz#6e72a1fff18d5dfcb878e1e62f1a021c4b72d5a3" integrity sha512-OX8D5eeX4XwcroVW45NMvoYaIuFI+GQpA2a8Gi+X/U/cDUIRsV37qQfF905F0htTRCREQIB4KqPeaveRJUl3Ow== -"@babel/helper-wrap-function@^7.10.4": - version "7.12.3" - resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.12.3.tgz#3332339fc4d1fbbf1c27d7958c27d34708e990d9" - integrity sha512-Cvb8IuJDln3rs6tzjW3Y8UeelAOdnpB8xtQ4sme2MSZ9wOxrbThporC0y/EtE16VAtoyEfLM404Xr1e0OOp+ow== - dependencies: - "@babel/helper-function-name" "^7.10.4" - "@babel/template" "^7.10.4" - "@babel/traverse" "^7.10.4" - "@babel/types" "^7.10.4" +"@babel/helper-validator-option@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.16.7.tgz#b203ce62ce5fe153899b617c08957de860de4d23" + integrity sha512-TRtenOuRUVo9oIQGPC5G9DgK4743cdxvtOw0weQNpZXaS16SCBi5MNjZF8vba3ETURjZpTbVn7Vvcf2eAwFozQ== "@babel/helper-wrap-function@^7.15.4": version "7.15.4" @@ -644,14 +701,24 @@ "@babel/traverse" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/helpers@^7.12.5": - version "7.12.5" - resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.12.5.tgz#1a1ba4a768d9b58310eda516c449913fe647116e" - integrity sha512-lgKGMQlKqA8meJqKsW6rUnc4MdUk35Ln0ATDqdM1a/UpARODdI4j5Y5lVfUScnSNkJcdCRAaWkspykNoFg9sJA== +"@babel/helper-wrap-function@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.16.8.tgz#58afda087c4cd235de92f7ceedebca2c41274200" + integrity sha512-8RpyRVIAW1RcDDGTA+GpPAwV22wXCfKOoM9bet6TLkGIFTkRQSkH1nMQ5Yet4MpoXe1ZwHPVtNasc2w0uZMqnw== dependencies: - "@babel/template" "^7.10.4" - "@babel/traverse" "^7.12.5" - "@babel/types" "^7.12.5" + "@babel/helper-function-name" "^7.16.7" + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.16.8" + "@babel/types" "^7.16.8" + +"@babel/helpers@^7.12.5", "@babel/helpers@^7.17.2": + version "7.17.2" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.17.2.tgz#23f0a0746c8e287773ccd27c14be428891f63417" + integrity sha512-0Qu7RLR1dILozr/6M0xgj+DFPmi6Bnulgm9M8BVa9ZCWxDqlSnqt3cf8IDPB5m45sVXUZ0kuQAgUrdSFFH79fQ== + dependencies: + "@babel/template" "^7.16.7" + "@babel/traverse" "^7.17.0" + "@babel/types" "^7.17.0" "@babel/helpers@^7.15.4": version "7.15.4" @@ -662,12 +729,12 @@ "@babel/traverse" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/highlight@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.4.tgz#7d1bdfd65753538fabe6c38596cdb76d9ac60143" - integrity sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA== +"@babel/highlight@^7.10.4", "@babel/highlight@^7.16.7": + version "7.16.10" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.16.10.tgz#744f2eb81579d6eea753c227b0f570ad785aba88" + integrity sha512-5FnTQLSLswEj6IkgVw5KusNUUFY9ZGqe/TRFnP/BKYHYgfh7tc+C7mwiy95/yNP7Dh9x580Vv8r7u7ZfTBFxdw== dependencies: - "@babel/helper-validator-identifier" "^7.10.4" + "@babel/helper-validator-identifier" "^7.16.7" chalk "^2.0.0" js-tokens "^4.0.0" @@ -689,21 +756,33 @@ chalk "^2.0.0" js-tokens "^4.0.0" -"@babel/parser@^7.12.10", "@babel/parser@^7.12.11", "@babel/parser@^7.12.3", "@babel/parser@^7.12.7", "@babel/parser@^7.4.5", "@babel/parser@^7.7.0": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.12.11.tgz#9ce3595bcd74bc5c466905e86c535b8b25011e79" - integrity sha512-N3UxG+uuF4CMYoNj8AhnbAcJF0PiuJ9KHuy1lQmkYsxTer/MAH9UBNHsBoAX/4s6NvlDD047No8mYVGGzLL4hg== +"@babel/parser@^7.12.10", "@babel/parser@^7.12.11", "@babel/parser@^7.12.7", "@babel/parser@^7.16.7", "@babel/parser@^7.17.3": + version "7.17.3" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.17.3.tgz#b07702b982990bf6fdc1da5049a23fece4c5c3d0" + integrity sha512-7yJPvPV+ESz2IUTPbOL+YkIGyCqOyNIzdguKQuJGnH7bg1WTIifuM21YqokFt/THWh1AkCRn9IgoykTRCBVpzA== "@babel/parser@^7.12.13", "@babel/parser@^7.13.0": version "7.13.12" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.13.12.tgz#ba320059420774394d3b0c0233ba40e4250b81d1" integrity sha512-4T7Pb244rxH24yR116LAuJ+adxXXnHhZaLJjegJVKSdoNCe4x1eDBaud5YIcQFcqzsaD5BHvJw5BQ0AZapdCRw== +"@babel/parser@^7.12.3", "@babel/parser@^7.4.5", "@babel/parser@^7.7.0": + version "7.12.11" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.12.11.tgz#9ce3595bcd74bc5c466905e86c535b8b25011e79" + integrity sha512-N3UxG+uuF4CMYoNj8AhnbAcJF0PiuJ9KHuy1lQmkYsxTer/MAH9UBNHsBoAX/4s6NvlDD047No8mYVGGzLL4hg== + "@babel/parser@^7.15.4", "@babel/parser@^7.15.5": version "7.15.7" resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.15.7.tgz#0c3ed4a2eb07b165dfa85b3cc45c727334c4edae" integrity sha512-rycZXvQ+xS9QyIcJ9HXeDWf1uxqlbVFAUq0Rq0dbc50Zb/+wUe/ehyfzGfm9KZZF0kBejYgxltBXocP+gKdL2g== +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.16.7.tgz#4eda6d6c2a0aa79c70fa7b6da67763dfe2141050" + integrity sha512-anv/DObl7waiGEnC24O9zqL0pSuI9hljihqiDuFHC8d7/bjr/4RLGPWuc8rYOff/QPzbEPSkzG8wGG9aDuhHRg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.15.4": version "7.15.4" resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.15.4.tgz#dbdeabb1e80f622d9f0b583efb2999605e0a567e" @@ -713,14 +792,14 @@ "@babel/helper-skip-transparent-expression-wrappers" "^7.15.4" "@babel/plugin-proposal-optional-chaining" "^7.14.5" -"@babel/plugin-proposal-async-generator-functions@^7.12.1": - version "7.12.12" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.12.12.tgz#04b8f24fd4532008ab4e79f788468fd5a8476566" - integrity sha512-nrz9y0a4xmUrRq51bYkWJIO5SBZyG2ys2qinHsN0zHDHVsUaModrkpyWWWXfGqYQmOL3x9sQIcTNN/pBGpo09A== +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.16.7.tgz#cc001234dfc139ac45f6bcf801866198c8c72ff9" + integrity sha512-di8vUHRdf+4aJ7ltXhaDbPoszdkh59AQtJM5soLsuHpQJdFQZOA4uGj0V2u/CZ8bJ/u8ULDL5yq6FO/bCXnKHw== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-remap-async-to-generator" "^7.12.1" - "@babel/plugin-syntax-async-generators" "^7.8.0" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + "@babel/plugin-proposal-optional-chaining" "^7.16.7" "@babel/plugin-proposal-async-generator-functions@^7.15.4": version "7.15.4" @@ -731,7 +810,16 @@ "@babel/helper-remap-async-to-generator" "^7.15.4" "@babel/plugin-syntax-async-generators" "^7.8.4" -"@babel/plugin-proposal-class-properties@^7.1.0", "@babel/plugin-proposal-class-properties@^7.10.4", "@babel/plugin-proposal-class-properties@^7.12.1", "@babel/plugin-proposal-class-properties@^7.7.0": +"@babel/plugin-proposal-async-generator-functions@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.16.8.tgz#3bdd1ebbe620804ea9416706cd67d60787504bc8" + integrity sha512-71YHIvMuiuqWJQkebWJtdhQTfd4Q4mF76q2IX37uZPkG9+olBxsX+rH1vkhFto4UeJZ9dPY2s+mDvhDm1u2BGQ== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-remap-async-to-generator" "^7.16.8" + "@babel/plugin-syntax-async-generators" "^7.8.4" + +"@babel/plugin-proposal-class-properties@^7.1.0": version "7.12.1" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.12.1.tgz#a082ff541f2a29a4821065b8add9346c0c16e5de" integrity sha512-cKp3dlQsFsEs5CWKnN7BnSHOd0EOW8EKpEjkoz1pO2E5KzIDNV9Ros1b0CnmbVgAGXJubOYVBOGCT1OmJwOI7w== @@ -739,13 +827,13 @@ "@babel/helper-create-class-features-plugin" "^7.12.1" "@babel/helper-plugin-utils" "^7.10.4" -"@babel/plugin-proposal-class-properties@^7.13.0": - version "7.13.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.13.0.tgz#146376000b94efd001e57a40a88a525afaab9f37" - integrity sha512-KnTDjFNC1g+45ka0myZNvSBFLhNCLN+GeGYLDEA8Oq7MZ6yMgfLoIRh86GRT0FjtJhZw8JyUskP9uvj5pHM9Zg== +"@babel/plugin-proposal-class-properties@^7.12.1", "@babel/plugin-proposal-class-properties@^7.16.5", "@babel/plugin-proposal-class-properties@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.16.7.tgz#925cad7b3b1a2fcea7e59ecc8eb5954f961f91b0" + integrity sha512-IobU0Xme31ewjYOShSIqd/ZGM/r/cuOz2z0MDbNrhF5FW+ZVgi0f2lyeoj9KFPDOAqsYxmLWZte1WOwlvY9aww== dependencies: - "@babel/helper-create-class-features-plugin" "^7.13.0" - "@babel/helper-plugin-utils" "^7.13.0" + "@babel/helper-create-class-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-proposal-class-properties@^7.14.5": version "7.14.5" @@ -764,14 +852,14 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-class-static-block" "^7.14.5" -"@babel/plugin-proposal-decorators@^7.10.5", "@babel/plugin-proposal-decorators@^7.7.0": - version "7.12.12" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.12.12.tgz#067a6d3d6ca86d54cf56bb183239199c20daeafe" - integrity sha512-fhkE9lJYpw2mjHelBpM2zCbaA11aov2GJs7q4cFaXNrWx0H3bW58H9Esy2rdtYOghFBEYUDRIpvlgi+ZD+AvvQ== +"@babel/plugin-proposal-class-static-block@^7.16.7": + version "7.17.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-static-block/-/plugin-proposal-class-static-block-7.17.6.tgz#164e8fd25f0d80fa48c5a4d1438a6629325ad83c" + integrity sha512-X/tididvL2zbs7jZCeeRJ8167U/+Ac135AM6jCAx6gYXDUviZV5Ku9UDvWS2NCuWlFjIRXklYhwo6HhAC7ETnA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-decorators" "^7.12.1" + "@babel/helper-create-class-features-plugin" "^7.17.6" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-class-static-block" "^7.14.5" "@babel/plugin-proposal-decorators@^7.12.12": version "7.15.4" @@ -791,13 +879,16 @@ "@babel/helper-plugin-utils" "^7.13.0" "@babel/plugin-syntax-decorators" "^7.12.13" -"@babel/plugin-proposal-dynamic-import@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.12.1.tgz#43eb5c2a3487ecd98c5c8ea8b5fdb69a2749b2dc" - integrity sha512-a4rhUSZFuq5W8/OO8H7BL5zspjnc1FLd9hlOxIK/f7qG4a0qsqk8uvF/ywgBA8/OmjsapjpvaEOYItfGG1qIvQ== +"@babel/plugin-proposal-decorators@^7.16.7": + version "7.17.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-decorators/-/plugin-proposal-decorators-7.17.2.tgz#c36372ddfe0360cac1ee331a238310bddca11493" + integrity sha512-WH8Z95CwTq/W8rFbMqb9p3hicpt4RX4f0K659ax2VHxgOyT6qQmUaEVEjIh4WR9Eh9NymkVn5vwsrE68fAQNUw== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-dynamic-import" "^7.8.0" + "@babel/helper-create-class-features-plugin" "^7.17.1" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-replace-supers" "^7.16.7" + "@babel/plugin-syntax-decorators" "^7.17.0" + charcodes "^0.2.0" "@babel/plugin-proposal-dynamic-import@^7.14.5": version "7.14.5" @@ -807,6 +898,14 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-dynamic-import" "^7.8.3" +"@babel/plugin-proposal-dynamic-import@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.16.7.tgz#c19c897eaa46b27634a00fee9fb7d829158704b2" + integrity sha512-I8SW9Ho3/8DRSdmDdH3gORdyUuYnk1m4cMxUAdu5oy4n3OfN8flDEH+d60iG7dUfi0KkYwSvoalHzzdRzpWHTg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + "@babel/plugin-proposal-export-default-from@^7.12.1": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-default-from/-/plugin-proposal-export-default-from-7.14.5.tgz#8931a6560632c650f92a8e5948f6e73019d6d321" @@ -815,14 +914,6 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-export-default-from" "^7.14.5" -"@babel/plugin-proposal-export-namespace-from@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.12.1.tgz#8b9b8f376b2d88f5dd774e4d24a5cc2e3679b6d4" - integrity sha512-6CThGf0irEkzujYS5LQcjBx8j/4aQGiVv7J9+2f7pGfxqyKh3WnmVJYW3hdrQjyksErMGBPQrCnHfOtna+WLbw== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - "@babel/plugin-proposal-export-namespace-from@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.14.5.tgz#dbad244310ce6ccd083072167d8cea83a52faf76" @@ -831,13 +922,13 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-export-namespace-from" "^7.8.3" -"@babel/plugin-proposal-json-strings@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.12.1.tgz#d45423b517714eedd5621a9dfdc03fa9f4eb241c" - integrity sha512-GoLDUi6U9ZLzlSda2Df++VSqDJg3CG+dR0+iWsv6XRw1rEq+zwt4DirM9yrxW6XWaTpmai1cWJLMfM8qQJf+yw== +"@babel/plugin-proposal-export-namespace-from@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-namespace-from/-/plugin-proposal-export-namespace-from-7.16.7.tgz#09de09df18445a5786a305681423ae63507a6163" + integrity sha512-ZxdtqDXLRGBL64ocZcs7ovt71L3jhC1RGSyR996svrCi3PYqHNkb3SwPJCs8RIzD86s+WPpt2S73+EHCGO+NUA== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-json-strings" "^7.8.0" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-export-namespace-from" "^7.8.3" "@babel/plugin-proposal-json-strings@^7.14.5": version "7.14.5" @@ -847,13 +938,13 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-json-strings" "^7.8.3" -"@babel/plugin-proposal-logical-assignment-operators@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.12.1.tgz#f2c490d36e1b3c9659241034a5d2cd50263a2751" - integrity sha512-k8ZmVv0JU+4gcUGeCDZOGd0lCIamU/sMtIiX3UWnUc5yzgq6YUGyEolNYD+MLYKfSzgECPcqetVcJP9Afe/aCA== +"@babel/plugin-proposal-json-strings@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.16.7.tgz#9732cb1d17d9a2626a08c5be25186c195b6fa6e8" + integrity sha512-lNZ3EEggsGY78JavgbHsK9u5P3pQaW7k4axlgFLYkMd7UBsiNahCITShLjNQschPyjtO6dADrL24757IdhBrsQ== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-json-strings" "^7.8.3" "@babel/plugin-proposal-logical-assignment-operators@^7.14.5": version "7.14.5" @@ -863,13 +954,21 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" -"@babel/plugin-proposal-nullish-coalescing-operator@^7.12.1", "@babel/plugin-proposal-nullish-coalescing-operator@^7.4.4": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.12.1.tgz#3ed4fff31c015e7f3f1467f190dbe545cd7b046c" - integrity sha512-nZY0ESiaQDI1y96+jk6VxMOaL4LPo/QDHBqL+SF3/vl6dHkTwHlOI8L4ZwuRBHgakRBw5zsVylel7QPbbGuYgg== +"@babel/plugin-proposal-logical-assignment-operators@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.16.7.tgz#be23c0ba74deec1922e639832904be0bea73cdea" + integrity sha512-K3XzyZJGQCr00+EtYtrDjmwX7o7PLK6U9bi1nCwkQioRFVUv6dJoxbQjtWVtP+bCPy82bONBKG8NPyQ4+i6yjg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + +"@babel/plugin-proposal-nullish-coalescing-operator@^7.12.1", "@babel/plugin-proposal-nullish-coalescing-operator@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.16.7.tgz#141fc20b6857e59459d430c850a0011e36561d99" + integrity sha512-aUOrYU3EVtjf62jQrCj63pYZ7k6vns2h/DQvHPWGmsJRYzWXZ6/AsfgpiRy6XiuIDADhJzP2Q9MwSMKauBQ+UQ== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" "@babel/plugin-proposal-nullish-coalescing-operator@^7.14.5": version "7.14.5" @@ -879,13 +978,13 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" -"@babel/plugin-proposal-numeric-separator@^7.12.7": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.12.7.tgz#8bf253de8139099fea193b297d23a9d406ef056b" - integrity sha512-8c+uy0qmnRTeukiGsjLGy6uVs/TFjJchGXUeBqlG4VWYOdJWkhhVPdQ3uHwbmalfJwv2JsV0qffXP4asRfL2SQ== +"@babel/plugin-proposal-nullish-coalescing-operator@^7.4.4": + version "7.12.1" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.12.1.tgz#3ed4fff31c015e7f3f1467f190dbe545cd7b046c" + integrity sha512-nZY0ESiaQDI1y96+jk6VxMOaL4LPo/QDHBqL+SF3/vl6dHkTwHlOI8L4ZwuRBHgakRBw5zsVylel7QPbbGuYgg== dependencies: "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-numeric-separator" "^7.10.4" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" "@babel/plugin-proposal-numeric-separator@^7.14.5": version "7.14.5" @@ -895,7 +994,15 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-numeric-separator" "^7.10.4" -"@babel/plugin-proposal-object-rest-spread@7.12.1", "@babel/plugin-proposal-object-rest-spread@^7.12.1": +"@babel/plugin-proposal-numeric-separator@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.16.7.tgz#d6b69f4af63fb38b6ca2558442a7fb191236eba9" + integrity sha512-vQgPMknOIgiuVqbokToyXbkY/OmmjAzr/0lhSIbG/KmnzXPGwW/AdhdKpi+O4X/VkWiWjnkKOBiqJrTaC98VKw== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + +"@babel/plugin-proposal-object-rest-spread@7.12.1": version "7.12.1" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz#def9bd03cea0f9b72283dac0ec22d289c7691069" integrity sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA== @@ -904,6 +1011,17 @@ "@babel/plugin-syntax-object-rest-spread" "^7.8.0" "@babel/plugin-transform-parameters" "^7.12.1" +"@babel/plugin-proposal-object-rest-spread@^7.12.1", "@babel/plugin-proposal-object-rest-spread@^7.16.7": + version "7.17.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.17.3.tgz#d9eb649a54628a51701aef7e0ea3d17e2b9dd390" + integrity sha512-yuL5iQA/TbZn+RGAfxQXfi7CNLmKi1f8zInn4IgobuCWcAb7i+zj4TYzQ9l8cEzVyJ89PDGuqxK1xZpUDISesw== + dependencies: + "@babel/compat-data" "^7.17.0" + "@babel/helper-compilation-targets" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.16.7" + "@babel/plugin-proposal-object-rest-spread@^7.15.6": version "7.15.6" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.15.6.tgz#ef68050c8703d07b25af402cb96cf7f34a68ed11" @@ -926,14 +1044,6 @@ "@babel/plugin-syntax-object-rest-spread" "^7.8.3" "@babel/plugin-transform-parameters" "^7.16.0" -"@babel/plugin-proposal-optional-catch-binding@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.12.1.tgz#ccc2421af64d3aae50b558a71cede929a5ab2942" - integrity sha512-hFvIjgprh9mMw5v42sJWLI1lzU5L2sznP805zeT6rySVRA0Y18StRhDqhSxlap0oVgItRsB6WSROp4YnJTJz0g== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.0" - "@babel/plugin-proposal-optional-catch-binding@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.14.5.tgz#939dd6eddeff3a67fdf7b3f044b5347262598c3c" @@ -942,14 +1052,22 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" -"@babel/plugin-proposal-optional-chaining@^7.12.7", "@babel/plugin-proposal-optional-chaining@^7.6.0": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.12.7.tgz#e02f0ea1b5dc59d401ec16fb824679f683d3303c" - integrity sha512-4ovylXZ0PWmwoOvhU2vhnzVNnm88/Sm9nx7V8BPgMvAzn5zDou3/Awy0EjglyubVHasJj+XCEkr/r1X3P5elCA== +"@babel/plugin-proposal-optional-catch-binding@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.16.7.tgz#c623a430674ffc4ab732fd0a0ae7722b67cb74cf" + integrity sha512-eMOH/L4OvWSZAE1VkHbr1vckLG1WUcHGJSLqqQwl2GaUqG6QjddvrOaTUMNYiv77H5IKPMZ9U9P7EaHwvAShfA== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-skip-transparent-expression-wrappers" "^7.12.1" - "@babel/plugin-syntax-optional-chaining" "^7.8.0" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + +"@babel/plugin-proposal-optional-chaining@^7.12.7", "@babel/plugin-proposal-optional-chaining@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.16.7.tgz#7cd629564724816c0e8a969535551f943c64c39a" + integrity sha512-eC3xy+ZrUcBtP7x+sq62Q/HYd674pPTb/77XZMb5wbDPGWIdUbSr4Agr052+zaUPSb+gGRnjxXfKFvx5iMJ+DA== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" "@babel/plugin-proposal-optional-chaining@^7.14.5": version "7.14.5" @@ -960,13 +1078,22 @@ "@babel/helper-skip-transparent-expression-wrappers" "^7.14.5" "@babel/plugin-syntax-optional-chaining" "^7.8.3" -"@babel/plugin-proposal-private-methods@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.12.1.tgz#86814f6e7a21374c980c10d38b4493e703f4a389" - integrity sha512-mwZ1phvH7/NHK6Kf8LP7MYDogGV+DKB1mryFOEwx5EBNQrosvIczzZFTUmWaeujd5xT6G1ELYWUz3CutMhjE1w== +"@babel/plugin-proposal-optional-chaining@^7.6.0": + version "7.12.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.12.7.tgz#e02f0ea1b5dc59d401ec16fb824679f683d3303c" + integrity sha512-4ovylXZ0PWmwoOvhU2vhnzVNnm88/Sm9nx7V8BPgMvAzn5zDou3/Awy0EjglyubVHasJj+XCEkr/r1X3P5elCA== dependencies: - "@babel/helper-create-class-features-plugin" "^7.12.1" "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-skip-transparent-expression-wrappers" "^7.12.1" + "@babel/plugin-syntax-optional-chaining" "^7.8.0" + +"@babel/plugin-proposal-private-methods@^7.12.1", "@babel/plugin-proposal-private-methods@^7.16.11", "@babel/plugin-proposal-private-methods@^7.16.5": + version "7.16.11" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-methods/-/plugin-proposal-private-methods-7.16.11.tgz#e8df108288555ff259f4527dbe84813aac3a1c50" + integrity sha512-F/2uAkPlXDr8+BHpZvo19w3hLFKge+k75XUprE6jaqKxjGkSYcK+4c+bup5PdW/7W/Rpjwql7FTVEDW+fRAQsw== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.16.10" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-proposal-private-methods@^7.14.5": version "7.14.5" @@ -986,13 +1113,15 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-syntax-private-property-in-object" "^7.14.5" -"@babel/plugin-proposal-unicode-property-regex@^7.12.1", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.12.1.tgz#2a183958d417765b9eae334f47758e5d6a82e072" - integrity sha512-MYq+l+PvHuw/rKUz1at/vb6nCnQ2gmJBNaM62z0OgH7B2W1D9pvkpYtlti9bGtizNIU1K3zm4bZF9F91efVY0w== +"@babel/plugin-proposal-private-property-in-object@^7.16.5", "@babel/plugin-proposal-private-property-in-object@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.16.7.tgz#b0b8cef543c2c3d57e59e2c611994861d46a3fce" + integrity sha512-rMQkjcOFbm+ufe3bTZLyOfsOUOxyvLXZJCTARhJr+8UMSoZmqTe1K1BgkFcrW37rAchWg57yI69ORxiWvUINuQ== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-create-class-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-syntax-private-property-in-object" "^7.14.5" "@babel/plugin-proposal-unicode-property-regex@^7.14.5": version "7.14.5" @@ -1002,20 +1131,21 @@ "@babel/helper-create-regexp-features-plugin" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-async-generators@^7.8.0", "@babel/plugin-syntax-async-generators@^7.8.4": +"@babel/plugin-proposal-unicode-property-regex@^7.16.7", "@babel/plugin-proposal-unicode-property-regex@^7.4.4": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.16.7.tgz#635d18eb10c6214210ffc5ff4932552de08188a2" + integrity sha512-QRK0YI/40VLhNVGIjRNAAQkEHws0cswSdFFjpFyt943YmJIU1da9uW63Iu6NFV6CxTZW5eTDCrwZUstBWgp/Rg== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-syntax-async-generators@^7.8.4": version "7.8.4" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-class-properties@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.1.tgz#bcb297c5366e79bebadef509549cd93b04f19978" - integrity sha512-U40A76x5gTwmESz+qiqssqmeEsKvcSyvtgktrm0uzcARAmM9I1jR221f6Oq+GmHrcD+LvZDag1UTOTe2fL3TeA== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-class-properties@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" @@ -1030,13 +1160,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-decorators@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.1.tgz#81a8b535b284476c41be6de06853a8802b98c5dd" - integrity sha512-ir9YW5daRrTYiy9UJ2TzdNIJEZu8KclVzDcfSt4iEmOtwQ4llPtWInNKJyKnVXp1vE4bbVd5S31M/im3mYMO1w== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-decorators@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.12.13.tgz#fac829bf3c7ef4a1bc916257b403e58c6bdaf648" @@ -1051,7 +1174,14 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-dynamic-import@^7.8.0", "@babel/plugin-syntax-dynamic-import@^7.8.3": +"@babel/plugin-syntax-decorators@^7.17.0": + version "7.17.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-decorators/-/plugin-syntax-decorators-7.17.0.tgz#a2be3b2c9fe7d78bd4994e790896bc411e2f166d" + integrity sha512-qWe85yCXsvDEluNP0OyeQjH63DlhAR3W7K9BxxU1MvbDb48tgBG+Ao6IJJ6smPDrrVzSQZrbF6donpkFBMcs3A== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-syntax-dynamic-import@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz#62bf98b2da3cd21d626154fc96ee5b3cb68eacb3" integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ== @@ -1072,7 +1202,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.3" -"@babel/plugin-syntax-json-strings@^7.8.0", "@babel/plugin-syntax-json-strings@^7.8.3": +"@babel/plugin-syntax-json-strings@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== @@ -1121,7 +1251,7 @@ dependencies: "@babel/helper-plugin-utils" "^7.8.0" -"@babel/plugin-syntax-optional-catch-binding@^7.8.0", "@babel/plugin-syntax-optional-catch-binding@^7.8.3": +"@babel/plugin-syntax-optional-catch-binding@^7.8.3": version "7.8.3" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== @@ -1142,13 +1272,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-top-level-await@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.12.1.tgz#dd6c0b357ac1bb142d98537450a319625d13d2a0" - integrity sha512-i7ooMZFS+a/Om0crxZodrTzNEPJHZrlMVGMTEpFAj6rYY/bKCddB0Dk/YxfPuYXOopuhKk/e1jV6h+WUU9XN3A== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-top-level-await@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c" @@ -1156,13 +1279,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-syntax-typescript@^7.12.1", "@babel/plugin-syntax-typescript@^7.2.0", "@babel/plugin-syntax-typescript@^7.8.3": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.12.1.tgz#460ba9d77077653803c3dd2e673f76d66b4029e5" - integrity sha512-UZNEcCY+4Dp9yYRCAHrHDU+9ZXLYaY9MgBXSRLkB9WjYFRR6quJBumfVrEkUxrePPBwFcpWfNKXqVRQQtm7mMA== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-typescript@^7.12.13": version "7.12.13" resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.12.13.tgz#9dff111ca64154cef0f4dc52cf843d9f12ce4474" @@ -1177,13 +1293,27 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-arrow-functions@^7.12.1": +"@babel/plugin-syntax-typescript@^7.2.0": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.16.7.tgz#39c9b55ee153151990fb038651d58d3fd03f98f8" + integrity sha512-YhUIJHHGkqPgEcMYkPCKTyGUdoGKWtopIycQyjJH8OjvRgOYsXsaKehLVPScKJWAULPxMa4N1vCe6szREFlZ7A== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-syntax-typescript@^7.8.3": version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.12.1.tgz#8083ffc86ac8e777fbe24b5967c4b2521f3cb2b3" - integrity sha512-5QB50qyN44fzzz4/qxDPQMBCTHgxg3n0xRBLJUmBlLoU/sFvxVWGZF/ZUfMVDQuJUKXaBhbupxIzIfZ6Fwk/0A== + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.12.1.tgz#460ba9d77077653803c3dd2e673f76d66b4029e5" + integrity sha512-UZNEcCY+4Dp9yYRCAHrHDU+9ZXLYaY9MgBXSRLkB9WjYFRR6quJBumfVrEkUxrePPBwFcpWfNKXqVRQQtm7mMA== dependencies: "@babel/helper-plugin-utils" "^7.10.4" +"@babel/plugin-transform-arrow-functions@^7.12.1", "@babel/plugin-transform-arrow-functions@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.16.7.tgz#44125e653d94b98db76369de9c396dc14bef4154" + integrity sha512-9ffkFFMbvzTvv+7dTp/66xvZAWASuPD5Tl9LK3Z9vhOmANo6j94rik+5YMBt4CwHVMWLWpMsriIc2zsa3WW3xQ== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-transform-arrow-functions@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.14.5.tgz#f7187d9588a768dd080bf4c9ffe117ea62f7862a" @@ -1191,15 +1321,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-async-to-generator@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.12.1.tgz#3849a49cc2a22e9743cbd6b52926d30337229af1" - integrity sha512-SDtqoEcarK1DFlRJ1hHRY5HvJUj5kX4qmtpMAm2QnhOlyuMC4TMdCRgW6WXpv93rZeYNeLP22y8Aq2dbcDRM1A== - dependencies: - "@babel/helper-module-imports" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-remap-async-to-generator" "^7.12.1" - "@babel/plugin-transform-async-to-generator@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.14.5.tgz#72c789084d8f2094acb945633943ef8443d39e67" @@ -1209,12 +1330,14 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/helper-remap-async-to-generator" "^7.14.5" -"@babel/plugin-transform-block-scoped-functions@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.12.1.tgz#f2a1a365bde2b7112e0a6ded9067fdd7c07905d9" - integrity sha512-5OpxfuYnSgPalRpo8EWGPzIYf0lHBWORCkj5M0oLBwHdlux9Ri36QqGW3/LR13RSVOAoUUMzoPI/jpE4ABcHoA== +"@babel/plugin-transform-async-to-generator@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.16.8.tgz#b83dff4b970cf41f1b819f8b49cc0cfbaa53a808" + integrity sha512-MtmUmTJQHCnyJVrScNzNlofQJ3dLFuobYn3mwOTKHnSCMtbNsqvF71GQmJfFjdrXSsAA7iysFmYWw4bXZ20hOg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-module-imports" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-remap-async-to-generator" "^7.16.8" "@babel/plugin-transform-block-scoped-functions@^7.14.5": version "7.14.5" @@ -1223,12 +1346,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-block-scoping@^7.12.11", "@babel/plugin-transform-block-scoping@^7.8.3": - version "7.12.12" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.12.tgz#d93a567a152c22aea3b1929bb118d1d0a175cdca" - integrity sha512-VOEPQ/ExOVqbukuP7BYJtI5ZxxsmegTwzZ04j1aF0dkSypGo9XpDHuOrABsJu+ie+penpSJheDJ11x1BEZNiyQ== +"@babel/plugin-transform-block-scoped-functions@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.16.7.tgz#4d0d57d9632ef6062cdf354bb717102ee042a620" + integrity sha512-JUuzlzmF40Z9cXyytcbZEZKckgrQzChbQJw/5PuEHYeqzCsvebDx0K0jWnIIVcmmDOAVctCgnYs0pMcrYj2zJg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-block-scoping@^7.12.12", "@babel/plugin-transform-block-scoping@^7.15.3": version "7.15.3" @@ -1237,18 +1360,32 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-classes@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.12.1.tgz#65e650fcaddd3d88ddce67c0f834a3d436a32db6" - integrity sha512-/74xkA7bVdzQTBeSUhLLJgYIcxw/dpEpCdRDiHgPJ3Mv6uC11UhjpOhl72CgqbBCmt1qtssCyB2xnJm1+PFjog== +"@babel/plugin-transform-block-scoping@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.16.7.tgz#f50664ab99ddeaee5bc681b8f3a6ea9d72ab4f87" + integrity sha512-ObZev2nxVAYA4bhyusELdo9hb3H+A56bxH3FZMbEImZFiEDYVHXQSJ1hQKFlDnlt8G9bBrCZ5ZpURZUrV4G5qQ== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-block-scoping@^7.8.3": + version "7.12.12" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.12.12.tgz#d93a567a152c22aea3b1929bb118d1d0a175cdca" + integrity sha512-VOEPQ/ExOVqbukuP7BYJtI5ZxxsmegTwzZ04j1aF0dkSypGo9XpDHuOrABsJu+ie+penpSJheDJ11x1BEZNiyQ== dependencies: - "@babel/helper-annotate-as-pure" "^7.10.4" - "@babel/helper-define-map" "^7.10.4" - "@babel/helper-function-name" "^7.10.4" - "@babel/helper-optimise-call-expression" "^7.10.4" "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-replace-supers" "^7.12.1" - "@babel/helper-split-export-declaration" "^7.10.4" + +"@babel/plugin-transform-classes@^7.12.1", "@babel/plugin-transform-classes@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.16.7.tgz#8f4b9562850cd973de3b498f1218796eb181ce00" + integrity sha512-WY7og38SFAGYRe64BrjKf8OrE6ulEHtr5jEYaZMwox9KebgqPi67Zqz8K53EKk1fFEJgm96r32rkKZ3qA2nCWQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.16.7" + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-function-name" "^7.16.7" + "@babel/helper-optimise-call-expression" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-replace-supers" "^7.16.7" + "@babel/helper-split-export-declaration" "^7.16.7" globals "^11.1.0" "@babel/plugin-transform-classes@^7.15.4": @@ -1264,13 +1401,6 @@ "@babel/helper-split-export-declaration" "^7.15.4" globals "^11.1.0" -"@babel/plugin-transform-computed-properties@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.12.1.tgz#d68cf6c9b7f838a8a4144badbe97541ea0904852" - integrity sha512-vVUOYpPWB7BkgUWPo4C44mUQHpTZXakEqFjbv8rQMg7TC6S6ZhGZ3otQcRH6u7+adSlE5i0sp63eMC/XGffrzg== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-transform-computed-properties@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.14.5.tgz#1b9d78987420d11223d41195461cc43b974b204f" @@ -1278,12 +1408,19 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-destructuring@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.12.1.tgz#b9a570fe0d0a8d460116413cb4f97e8e08b2f847" - integrity sha512-fRMYFKuzi/rSiYb2uRLiUENJOKq4Gnl+6qOv5f8z0TZXg3llUwUhsNNwrwaT/6dUhJTzNpBr+CUvEWBtfNY1cw== +"@babel/plugin-transform-computed-properties@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.16.7.tgz#66dee12e46f61d2aae7a73710f591eb3df616470" + integrity sha512-gN72G9bcmenVILj//sv1zLNaPyYcOzUho2lIJBMh/iakJ9ygCo/hEF9cpGb61SCMEDxbbyBoVQxrt+bWKu5KGw== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-destructuring@^7.12.1", "@babel/plugin-transform-destructuring@^7.16.7": + version "7.17.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.17.3.tgz#c445f75819641788a27a0a3a759d9df911df6abc" + integrity sha512-dDFzegDYKlPqa72xIlbmSkly5MluLoaC1JswABGktyt6NTXSBcUuse/kWE/wvKFWJHPETpi158qJZFS3JmykJg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-destructuring@^7.14.7": version "7.14.7" @@ -1292,14 +1429,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-dotall-regex@^7.12.1", "@babel/plugin-transform-dotall-regex@^7.4.4": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.12.1.tgz#a1d16c14862817b6409c0a678d6f9373ca9cd975" - integrity sha512-B2pXeRKoLszfEW7J4Hg9LoFaWEbr/kzo3teWHmtFCszjRNa/b40f9mfeqZsIDLLt/FjwQ6pz/Gdlwy85xNckBA== - dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-transform-dotall-regex@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.14.5.tgz#2f6bf76e46bdf8043b4e7e16cf24532629ba0c7a" @@ -1308,12 +1437,13 @@ "@babel/helper-create-regexp-features-plugin" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-duplicate-keys@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.12.1.tgz#745661baba295ac06e686822797a69fbaa2ca228" - integrity sha512-iRght0T0HztAb/CazveUpUQrZY+aGKKaWXMJ4uf9YJtqxSUe09j3wteztCUDRHs+SRAL7yMuFqUsLoAKKzgXjw== +"@babel/plugin-transform-dotall-regex@^7.16.7", "@babel/plugin-transform-dotall-regex@^7.4.4": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.16.7.tgz#6b2d67686fab15fb6a7fd4bd895d5982cfc81241" + integrity sha512-Lyttaao2SjZF6Pf4vk1dVKv8YypMpomAbygW+mU5cYP3S5cWTfCJjG8xV6CFdzGFlfWK81IjL9viiTvpb6G7gQ== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-create-regexp-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-duplicate-keys@^7.14.5": version "7.14.5" @@ -1322,13 +1452,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-exponentiation-operator@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.12.1.tgz#b0f2ed356ba1be1428ecaf128ff8a24f02830ae0" - integrity sha512-7tqwy2bv48q+c1EHbXK0Zx3KXd2RVQp6OC7PbwFNt/dPTAV3Lu5sWtWuAj8owr5wqtWnqHfl2/mJlUmqkChKug== +"@babel/plugin-transform-duplicate-keys@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.16.7.tgz#2207e9ca8f82a0d36a5a67b6536e7ef8b08823c9" + integrity sha512-03DvpbRfvWIXyK0/6QiR1KMTWeT6OcQ7tbhjrXyFS02kjuX/mu5Bvnh5SDSWHxyawit2g5aWhKwI86EE7GUnTw== dependencies: - "@babel/helper-builder-binary-assignment-operator-visitor" "^7.10.4" - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-exponentiation-operator@^7.14.5": version "7.14.5" @@ -1338,12 +1467,20 @@ "@babel/helper-builder-binary-assignment-operator-visitor" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-for-of@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.12.1.tgz#07640f28867ed16f9511c99c888291f560921cfa" - integrity sha512-Zaeq10naAsuHo7heQvyV0ptj4dlZJwZgNAtBYBnu5nNKJoW62m0zKcIEyVECrUKErkUkg6ajMy4ZfnVZciSBhg== +"@babel/plugin-transform-exponentiation-operator@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.16.7.tgz#efa9862ef97e9e9e5f653f6ddc7b665e8536fe9b" + integrity sha512-8UYLSlyLgRixQvlYH3J2ekXFHDFLQutdy7FfFAMm3CPZ6q9wHCwnUyiXpQCe3gVVnQlHc5nsuiEVziteRNTXEA== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-for-of@^7.12.1", "@babel/plugin-transform-for-of@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.16.7.tgz#649d639d4617dff502a9a158c479b3b556728d8c" + integrity sha512-/QZm9W92Ptpw7sjI9Nx1mbcsWz33+l8kuMIQnDwgQBG5s3fAfQvkRjQ7NqXhtNcKOnPkdICmUHyCaWW06HCsqg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-for-of@^7.15.4": version "7.15.4" @@ -1352,14 +1489,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-function-name@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.12.1.tgz#2ec76258c70fe08c6d7da154003a480620eba667" - integrity sha512-JF3UgJUILoFrFMEnOJLJkRHSk6LUSXLmEFsA23aR2O5CSLUxbeUX1IZ1YQ7Sn0aXb601Ncwjx73a+FVqgcljVw== - dependencies: - "@babel/helper-function-name" "^7.10.4" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-transform-function-name@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.14.5.tgz#e81c65ecb900746d7f31802f6bed1f52d915d6f2" @@ -1368,12 +1497,14 @@ "@babel/helper-function-name" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-literals@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.12.1.tgz#d73b803a26b37017ddf9d3bb8f4dc58bfb806f57" - integrity sha512-+PxVGA+2Ag6uGgL0A5f+9rklOnnMccwEBzwYFL3EUaKuiyVnUipyXncFcfjSkbimLrODoqki1U9XxZzTvfN7IQ== +"@babel/plugin-transform-function-name@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.16.7.tgz#5ab34375c64d61d083d7d2f05c38d90b97ec65cf" + integrity sha512-SU/C68YVwTRxqWj5kgsbKINakGag0KTgq9f2iZEXdStoAbOzLHEBRYzImmA6yFo8YZhJVflvXmIHUO7GWHmxxA== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-compilation-targets" "^7.16.7" + "@babel/helper-function-name" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-literals@^7.14.5": version "7.14.5" @@ -1382,12 +1513,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-member-expression-literals@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.12.1.tgz#496038602daf1514a64d43d8e17cbb2755e0c3ad" - integrity sha512-1sxePl6z9ad0gFMB9KqmYofk34flq62aqMt9NqliS/7hPEpURUCMbyHXrMPlo282iY7nAvUB1aQd5mg79UD9Jg== +"@babel/plugin-transform-literals@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.16.7.tgz#254c9618c5ff749e87cb0c0cef1a0a050c0bdab1" + integrity sha512-6tH8RTpTWI0s2sV6uq3e/C9wPo4PTqqZps4uF0kzQ9/xPLFQtipynvmT1g/dOfEJ+0EQsHhkQ/zyRId8J2b8zQ== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-member-expression-literals@^7.14.5": version "7.14.5" @@ -1396,20 +1527,20 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-member-expression-literals@^7.2.0": - version "7.16.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.0.tgz#5251b4cce01eaf8314403d21aedb269d79f5e64b" - integrity sha512-WRpw5HL4Jhnxw8QARzRvwojp9MIE7Tdk3ez6vRyUk1MwgjJN0aNpRoXainLR5SgxmoXx/vsXGZ6OthP6t/RbUg== +"@babel/plugin-transform-member-expression-literals@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.16.7.tgz#6e5dcf906ef8a098e630149d14c867dd28f92384" + integrity sha512-mBruRMbktKQwbxaJof32LT9KLy2f3gH+27a5XSuXo6h7R3vqltl0PgZ80C8ZMKw98Bf8bqt6BEVi3svOh2PzMw== dependencies: - "@babel/helper-plugin-utils" "^7.14.5" + "@babel/helper-plugin-utils" "^7.16.7" -"@babel/plugin-transform-modules-amd@^7.10.4", "@babel/plugin-transform-modules-amd@^7.10.5", "@babel/plugin-transform-modules-amd@^7.12.1", "@babel/plugin-transform-modules-amd@^7.5.0": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.12.1.tgz#3154300b026185666eebb0c0ed7f8415fefcf6f9" - integrity sha512-tDW8hMkzad5oDtzsB70HIQQRBiTKrhfgwC/KkJeGsaNFTdWhKNt/BiE8c5yj19XiGyrxpbkOfH87qkNg1YGlOQ== +"@babel/plugin-transform-modules-amd@^7.12.1", "@babel/plugin-transform-modules-amd@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.16.7.tgz#b28d323016a7daaae8609781d1f8c9da42b13186" + integrity sha512-KaaEtgBL7FKYwjJ/teH63oAmE3lP34N3kshz8mm4VMAw7U3PxjVwwUmxEFksbgsNUaO3wId9R2AVQYSEGRa2+g== dependencies: - "@babel/helper-module-transforms" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-module-transforms" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" babel-plugin-dynamic-import-node "^2.3.3" "@babel/plugin-transform-modules-amd@^7.13.0": @@ -1430,16 +1561,6 @@ "@babel/helper-plugin-utils" "^7.14.5" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-commonjs@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.12.1.tgz#fa403124542636c786cf9b460a0ffbb48a86e648" - integrity sha512-dY789wq6l0uLY8py9c1B48V8mVL5gZh/+PQ5ZPrylPYsnAvnEMjqsUXkuoDVPeVK+0VyGar+D08107LzDQ6pag== - dependencies: - "@babel/helper-module-transforms" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-simple-access" "^7.12.1" - babel-plugin-dynamic-import-node "^2.3.3" - "@babel/plugin-transform-modules-commonjs@^7.15.4": version "7.15.4" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.15.4.tgz#8201101240eabb5a76c08ef61b2954f767b6b4c1" @@ -1450,15 +1571,14 @@ "@babel/helper-simple-access" "^7.15.4" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-systemjs@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.12.1.tgz#663fea620d593c93f214a464cd399bf6dc683086" - integrity sha512-Hn7cVvOavVh8yvW6fLwveFqSnd7rbQN3zJvoPNyNaQSvgfKmDBO9U1YL9+PCXGRlZD9tNdWTy5ACKqMuzyn32Q== +"@babel/plugin-transform-modules-commonjs@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.16.8.tgz#cdee19aae887b16b9d331009aa9a219af7c86afe" + integrity sha512-oflKPvsLT2+uKQopesJt3ApiaIS2HW+hzHFcwRNtyDGieAeC/dIHZX8buJQ2J2X1rxGPy4eRcUijm3qcSPjYcA== dependencies: - "@babel/helper-hoist-variables" "^7.10.4" - "@babel/helper-module-transforms" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-validator-identifier" "^7.10.4" + "@babel/helper-module-transforms" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-simple-access" "^7.16.7" babel-plugin-dynamic-import-node "^2.3.3" "@babel/plugin-transform-modules-systemjs@^7.15.4": @@ -1472,13 +1592,16 @@ "@babel/helper-validator-identifier" "^7.14.9" babel-plugin-dynamic-import-node "^2.3.3" -"@babel/plugin-transform-modules-umd@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.12.1.tgz#eb5a218d6b1c68f3d6217b8fa2cc82fec6547902" - integrity sha512-aEIubCS0KHKM0zUos5fIoQm+AZUMt1ZvMpqz0/H5qAQ7vWylr9+PLYurT+Ic7ID/bKLd4q8hDovaG3Zch2uz5Q== +"@babel/plugin-transform-modules-systemjs@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.16.7.tgz#887cefaef88e684d29558c2b13ee0563e287c2d7" + integrity sha512-DuK5E3k+QQmnOqBR9UkusByy5WZWGRxfzV529s9nPra1GE7olmxfqO2FHobEOYSPIjPBTr4p66YDcjQnt8cBmw== dependencies: - "@babel/helper-module-transforms" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-hoist-variables" "^7.16.7" + "@babel/helper-module-transforms" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-validator-identifier" "^7.16.7" + babel-plugin-dynamic-import-node "^2.3.3" "@babel/plugin-transform-modules-umd@^7.14.5": version "7.14.5" @@ -1488,12 +1611,13 @@ "@babel/helper-module-transforms" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-named-capturing-groups-regex@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.12.1.tgz#b407f5c96be0d9f5f88467497fa82b30ac3e8753" - integrity sha512-tB43uQ62RHcoDp9v2Nsf+dSM8sbNodbEicbQNA53zHz8pWUhsgHSJCGpt7daXxRydjb0KnfmB+ChXOv3oADp1Q== +"@babel/plugin-transform-modules-umd@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.16.7.tgz#23dad479fa585283dbd22215bff12719171e7618" + integrity sha512-EMh7uolsC8O4xhudF2F6wedbSHm1HHZ0C6aJ7K67zcDNidMzVcxWdGr+htW9n21klm+bOn+Rx4CBsAntZd3rEQ== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.12.1" + "@babel/helper-module-transforms" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-named-capturing-groups-regex@^7.14.9": version "7.14.9" @@ -1502,12 +1626,12 @@ dependencies: "@babel/helper-create-regexp-features-plugin" "^7.14.5" -"@babel/plugin-transform-new-target@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.12.1.tgz#80073f02ee1bb2d365c3416490e085c95759dec0" - integrity sha512-+eW/VLcUL5L9IvJH7rT1sT0CzkdUTvPrXC2PXTn/7z7tXLBuKvezYbGdxD5WMRoyvyaujOq2fWoKl869heKjhw== +"@babel/plugin-transform-named-capturing-groups-regex@^7.16.8": + version "7.16.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.16.8.tgz#7f860e0e40d844a02c9dcf9d84965e7dfd666252" + integrity sha512-j3Jw+n5PvpmhRR+mrgIh04puSANCk/T/UA3m3P1MjJkhlK906+ApHhDIqBQDdOgL/r1UYpz4GNclTXxyZrYGSw== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-create-regexp-features-plugin" "^7.16.7" "@babel/plugin-transform-new-target@^7.14.5": version "7.14.5" @@ -1516,6 +1640,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" +"@babel/plugin-transform-new-target@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.16.7.tgz#9967d89a5c243818e0800fdad89db22c5f514244" + integrity sha512-xiLDzWNMfKoGOpc6t3U+etCE2yRnn3SM09BXqWPIZOBpL2gvVrBWUKnsJx0K/ADi5F5YC5f8APFfWrz25TdlGg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-transform-object-assign@^7.8.3": version "7.12.1" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-assign/-/plugin-transform-object-assign-7.12.1.tgz#9102b06625f60a5443cc292d32b565373665e1e4" @@ -1523,14 +1654,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.10.4" -"@babel/plugin-transform-object-super@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.12.1.tgz#4ea08696b8d2e65841d0c7706482b048bed1066e" - integrity sha512-AvypiGJH9hsquNUn+RXVcBdeE3KHPZexWRdimhuV59cSoOt5kFBmqlByorAeUlGG2CJWd0U+4ZtNKga/TB0cAw== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-replace-supers" "^7.12.1" - "@babel/plugin-transform-object-super@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.14.5.tgz#d0b5faeac9e98597a161a9cf78c527ed934cdc45" @@ -1539,12 +1662,20 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/helper-replace-supers" "^7.14.5" -"@babel/plugin-transform-parameters@^7.12.1", "@babel/plugin-transform-parameters@^7.16.0": - version "7.16.0" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.16.0.tgz#1b50765fc421c229819dc4c7cdb8911660b3c2d7" - integrity sha512-XgnQEm1CevKROPx+udOi/8f8TiGhrUWiHiaUCIp47tE0tpFDjzXNTZc9E5CmCwxNjXTWEVqvRfWZYOTFvMa/ZQ== +"@babel/plugin-transform-object-super@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.16.7.tgz#ac359cf8d32cf4354d27a46867999490b6c32a94" + integrity sha512-14J1feiQVWaGvRxj2WjyMuXS2jsBkgB3MdSN5HuC2G5nRspa5RK9COcs82Pwy5BuGcjb+fYaUj94mYcOj7rCvw== dependencies: - "@babel/helper-plugin-utils" "^7.14.5" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-replace-supers" "^7.16.7" + +"@babel/plugin-transform-parameters@^7.12.1", "@babel/plugin-transform-parameters@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.16.7.tgz#a1721f55b99b736511cb7e0152f61f17688f331f" + integrity sha512-AT3MufQ7zZEhU2hwOA11axBnExW0Lszu4RL/tAlUJBuNoRak+wehQW8h6KcXOcgjY42fHtDxswuMhMjFEuv/aw== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-parameters@^7.15.4": version "7.15.4" @@ -1553,12 +1684,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-property-literals@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.12.1.tgz#41bc81200d730abb4456ab8b3fbd5537b59adecd" - integrity sha512-6MTCR/mZ1MQS+AwZLplX4cEySjCpnIF26ToWo942nqn8hXSm7McaHQNeGx/pt7suI1TWOWMfa/NgBhiqSnX0cQ== +"@babel/plugin-transform-parameters@^7.16.0": + version "7.16.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.16.0.tgz#1b50765fc421c229819dc4c7cdb8911660b3c2d7" + integrity sha512-XgnQEm1CevKROPx+udOi/8f8TiGhrUWiHiaUCIp47tE0tpFDjzXNTZc9E5CmCwxNjXTWEVqvRfWZYOTFvMa/ZQ== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.14.5" "@babel/plugin-transform-property-literals@^7.14.5": version "7.14.5" @@ -1567,6 +1698,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" +"@babel/plugin-transform-property-literals@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.16.7.tgz#2dadac85155436f22c696c4827730e0fe1057a55" + integrity sha512-z4FGr9NMGdoIl1RqavCqGG+ZuYjfZ/hkCIeuH6Do7tXmSm0ls11nYVSJqFEUOSJbDab5wC6lRE/w6YjVcr6Hqw== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/plugin-transform-react-display-name@^7.14.5": version "7.15.1" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.15.1.tgz#6aaac6099f1fcf6589d35ae6be1b6e10c8c602b9" @@ -1600,13 +1738,6 @@ "@babel/helper-annotate-as-pure" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-regenerator@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.12.1.tgz#5f0a28d842f6462281f06a964e88ba8d7ab49753" - integrity sha512-gYrHqs5itw6i4PflFX3OdBPMQdPbF4bj2REIUxlMRUFk0/ZOAIpDFuViuxPjUL7YC8UPnf+XG7/utJvqXdPKng== - dependencies: - regenerator-transform "^0.14.2" - "@babel/plugin-transform-regenerator@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.14.5.tgz#9676fd5707ed28f522727c5b3c0aa8544440b04f" @@ -1614,12 +1745,12 @@ dependencies: regenerator-transform "^0.14.2" -"@babel/plugin-transform-reserved-words@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.12.1.tgz#6fdfc8cc7edcc42b36a7c12188c6787c873adcd8" - integrity sha512-pOnUfhyPKvZpVyBHhSBoX8vfA09b7r00Pmm1sH+29ae2hMTKVmSp4Ztsr8KBKjLjx17H0eJqaRC3bR2iThM54A== +"@babel/plugin-transform-regenerator@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.16.7.tgz#9e7576dc476cb89ccc5096fff7af659243b4adeb" + integrity sha512-mF7jOgGYCkSJagJ6XCujSQg+6xC1M77/03K2oBmVJWoFGNUtnVJO4WHKJk3dnPC8HCcj4xBQP1Egm8DWh3Pb3Q== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + regenerator-transform "^0.14.2" "@babel/plugin-transform-reserved-words@^7.14.5": version "7.14.5" @@ -1628,7 +1759,14 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-runtime@^7.12.0", "@babel/plugin-transform-runtime@^7.12.1", "@babel/plugin-transform-runtime@^7.6.0": +"@babel/plugin-transform-reserved-words@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.16.7.tgz#1d798e078f7c5958eec952059c460b220a63f586" + integrity sha512-KQzzDnZ9hWQBjwi5lpY5v9shmm6IVG0U9pB18zvMu2i4H90xpT4gmqwPYsn8rObiadYe2M0gmgsiOIF5A/2rtg== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-runtime@^7.12.1": version "7.12.10" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.12.10.tgz#af0fded4e846c4b37078e8e5d06deac6cd848562" integrity sha512-xOrUfzPxw7+WDm9igMgQCbO3cJKymX7dFdsgRr1eu9n3KjjyU4pptIXbXPseQDquw+W+RuJEJMHKHNsPNNm3CA== @@ -1649,12 +1787,12 @@ babel-plugin-polyfill-regenerator "^0.1.2" semver "^6.3.0" -"@babel/plugin-transform-shorthand-properties@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.12.1.tgz#0bf9cac5550fce0cfdf043420f661d645fdc75e3" - integrity sha512-GFZS3c/MhX1OusqB1MZ1ct2xRzX5ppQh2JU1h2Pnfk88HtFTM+TWQqJNfwkmxtPQtb/s1tk87oENfXJlx7rSDw== +"@babel/plugin-transform-shorthand-properties@^7.12.1", "@babel/plugin-transform-shorthand-properties@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.16.7.tgz#e8549ae4afcf8382f711794c0c7b6b934c5fbd2a" + integrity sha512-hah2+FEnoRoATdIb05IOXf+4GzXYTq75TVhIn1PewihbpyrNWUt2JbudKQOETWw6QpLe+AIUpJ5MVLYTQbeeUg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-shorthand-properties@^7.14.5": version "7.14.5" @@ -1663,13 +1801,13 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-spread@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.12.1.tgz#527f9f311be4ec7fdc2b79bb89f7bf884b3e1e1e" - integrity sha512-vuLp8CP0BE18zVYjsEBZ5xoCecMK6LBMMxYzJnh01rxQRvhNhH1csMMmBfNo5tGpGO+NhdSNW2mzIvBu3K1fng== +"@babel/plugin-transform-spread@^7.12.1", "@babel/plugin-transform-spread@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.16.7.tgz#a303e2122f9f12e0105daeedd0f30fb197d8ff44" + integrity sha512-+pjJpgAngb53L0iaA5gU/1MLXJIfXcYepLgXB3esVRf4fqmj8f2cxM3/FKaHsZms08hFQJkFccEWuIpm429TXg== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-skip-transparent-expression-wrappers" "^7.12.1" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-skip-transparent-expression-wrappers" "^7.16.0" "@babel/plugin-transform-spread@^7.14.6": version "7.14.6" @@ -1679,13 +1817,6 @@ "@babel/helper-plugin-utils" "^7.14.5" "@babel/helper-skip-transparent-expression-wrappers" "^7.14.5" -"@babel/plugin-transform-sticky-regex@^7.12.7": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.12.7.tgz#560224613ab23987453948ed21d0b0b193fa7fad" - integrity sha512-VEiqZL5N/QvDbdjfYQBhruN0HYjSPjC4XkeqW4ny/jNtH9gcbgaqBIXYEZCNnESMAGs0/K/R7oFGMhOyu/eIxg== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-transform-sticky-regex@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.14.5.tgz#5b617542675e8b7761294381f3c28c633f40aeb9" @@ -1693,12 +1824,19 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-template-literals@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.12.1.tgz#b43ece6ed9a79c0c71119f576d299ef09d942843" - integrity sha512-b4Zx3KHi+taXB1dVRBhVJtEPi9h1THCeKmae2qP0YdUHIFhVjtpqqNfxeVAa1xeHVhAy4SbHxEwx5cltAu5apw== +"@babel/plugin-transform-sticky-regex@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.16.7.tgz#c84741d4f4a38072b9a1e2e3fd56d359552e8660" + integrity sha512-NJa0Bd/87QV5NZZzTuZG5BPJjLYadeSZ9fO6oOUoL4iQx+9EEuw/eEM92SrsT19Yc2jgB1u1hsjqDtH02c3Drw== dependencies: - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/plugin-transform-template-literals@^7.12.1", "@babel/plugin-transform-template-literals@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.16.7.tgz#f3d1c45d28967c8e80f53666fc9c3e50618217ab" + integrity sha512-VwbkDDUeenlIjmfNeDX/V0aWrQH2QiVyJtwymVQSzItFDTpxfyJh3EVaQiS0rIN/CqbLGr0VcGmuwyTdZtdIsA== + dependencies: + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-template-literals@^7.14.5": version "7.14.5" @@ -1707,13 +1845,6 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-typeof-symbol@^7.12.10": - version "7.12.10" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.12.10.tgz#de01c4c8f96580bd00f183072b0d0ecdcf0dec4b" - integrity sha512-JQ6H8Rnsogh//ijxspCjc21YPd3VLVoYtAwv3zQmqAt8YGYUtdo5usNhdl4b9/Vir2kPFZl6n1h0PfUz4hJhaA== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-transform-typeof-symbol@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.14.5.tgz#39af2739e989a2bd291bf6b53f16981423d457d4" @@ -1721,14 +1852,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-typescript@^7.12.0": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.12.1.tgz#d92cc0af504d510e26a754a7dbc2e5c8cd9c7ab4" - integrity sha512-VrsBByqAIntM+EYMqSm59SiMEf7qkmI9dqMt6RbD/wlwueWmYcI0FFK5Fj47pP6DRZm+3teXjosKlwcZJ5lIMw== +"@babel/plugin-transform-typeof-symbol@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.16.7.tgz#9cdbe622582c21368bd482b660ba87d5545d4f7e" + integrity sha512-p2rOixCKRJzpg9JB4gjnG4gjWkWa89ZoYUnl9snJ1cWIcTH/hvxZqfO+WjG6T8DRBpctEol5jw1O5rA8gkCokQ== dependencies: - "@babel/helper-create-class-features-plugin" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-syntax-typescript" "^7.12.1" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-typescript@^7.13.0": version "7.13.0" @@ -1774,13 +1903,6 @@ "@babel/helper-plugin-utils" "^7.8.3" "@babel/plugin-syntax-typescript" "^7.8.3" -"@babel/plugin-transform-unicode-escapes@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.12.1.tgz#5232b9f81ccb07070b7c3c36c67a1b78f1845709" - integrity sha512-I8gNHJLIc7GdApm7wkVnStWssPNbSRMPtgHdmH3sRM1zopz09UWPS4x5V4n1yz/MIWTVnJ9sp6IkuXdWM4w+2Q== - dependencies: - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/plugin-transform-unicode-escapes@^7.14.5": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.14.5.tgz#9d4bd2a681e3c5d7acf4f57fa9e51175d91d0c6b" @@ -1788,13 +1910,12 @@ dependencies: "@babel/helper-plugin-utils" "^7.14.5" -"@babel/plugin-transform-unicode-regex@^7.12.1": - version "7.12.1" - resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.12.1.tgz#cc9661f61390db5c65e3febaccefd5c6ac3faecb" - integrity sha512-SqH4ClNngh/zGwHZOOQMTD+e8FGWexILV+ePMyiDJttAWRh5dhDL8rcl5lSgU3Huiq6Zn6pWTMvdPAb21Dwdyg== +"@babel/plugin-transform-unicode-escapes@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.16.7.tgz#da8717de7b3287a2c6d659750c964f302b31ece3" + integrity sha512-TAV5IGahIz3yZ9/Hfv35TV2xEm+kaBDaZQCn2S/hG9/CZ0DktxJv9eKfPc7yYCvOYR4JGx1h8C+jcSOvgaaI/Q== dependencies: - "@babel/helper-create-regexp-features-plugin" "^7.12.1" - "@babel/helper-plugin-utils" "^7.10.4" + "@babel/helper-plugin-utils" "^7.16.7" "@babel/plugin-transform-unicode-regex@^7.14.5": version "7.14.5" @@ -1804,7 +1925,15 @@ "@babel/helper-create-regexp-features-plugin" "^7.14.5" "@babel/helper-plugin-utils" "^7.14.5" -"@babel/polyfill@^7.11.5", "@babel/polyfill@^7.7.0": +"@babel/plugin-transform-unicode-regex@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.16.7.tgz#0f7aa4a501198976e25e82702574c34cfebe9ef2" + integrity sha512-oC5tYYKw56HO75KZVLQ+R/Nl3Hro9kf8iG0hXoaHP7tjAyCpvqBiSNe6vGrZni1Z6MggmUOC6A7VP7AVmw225Q== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + +"@babel/polyfill@^7.11.5": version "7.12.1" resolved "https://registry.yarnpkg.com/@babel/polyfill/-/polyfill-7.12.1.tgz#1f2d6371d1261bbd961f3c5d5909150e12d0bd96" integrity sha512-X0pi0V6gxLi6lFZpGmeNa4zxtwEmCs42isWLNjZZDE0Y8yVfgu0T2OAHlzBbdYlqbW/YXVvoBHpATEM+goCj8g== @@ -1812,78 +1941,6 @@ core-js "^2.6.5" regenerator-runtime "^0.13.4" -"@babel/preset-env@^7.10.2", "@babel/preset-env@^7.12.0", "@babel/preset-env@^7.7.0": - version "7.12.11" - resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.12.11.tgz#55d5f7981487365c93dbbc84507b1c7215e857f9" - integrity sha512-j8Tb+KKIXKYlDBQyIOy4BLxzv1NUOwlHfZ74rvW+Z0Gp4/cI2IMDPBWAgWceGcE7aep9oL/0K9mlzlMGxA8yNw== - dependencies: - "@babel/compat-data" "^7.12.7" - "@babel/helper-compilation-targets" "^7.12.5" - "@babel/helper-module-imports" "^7.12.5" - "@babel/helper-plugin-utils" "^7.10.4" - "@babel/helper-validator-option" "^7.12.11" - "@babel/plugin-proposal-async-generator-functions" "^7.12.1" - "@babel/plugin-proposal-class-properties" "^7.12.1" - "@babel/plugin-proposal-dynamic-import" "^7.12.1" - "@babel/plugin-proposal-export-namespace-from" "^7.12.1" - "@babel/plugin-proposal-json-strings" "^7.12.1" - "@babel/plugin-proposal-logical-assignment-operators" "^7.12.1" - "@babel/plugin-proposal-nullish-coalescing-operator" "^7.12.1" - "@babel/plugin-proposal-numeric-separator" "^7.12.7" - "@babel/plugin-proposal-object-rest-spread" "^7.12.1" - "@babel/plugin-proposal-optional-catch-binding" "^7.12.1" - "@babel/plugin-proposal-optional-chaining" "^7.12.7" - "@babel/plugin-proposal-private-methods" "^7.12.1" - "@babel/plugin-proposal-unicode-property-regex" "^7.12.1" - "@babel/plugin-syntax-async-generators" "^7.8.0" - "@babel/plugin-syntax-class-properties" "^7.12.1" - "@babel/plugin-syntax-dynamic-import" "^7.8.0" - "@babel/plugin-syntax-export-namespace-from" "^7.8.3" - "@babel/plugin-syntax-json-strings" "^7.8.0" - "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" - "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.0" - "@babel/plugin-syntax-numeric-separator" "^7.10.4" - "@babel/plugin-syntax-object-rest-spread" "^7.8.0" - "@babel/plugin-syntax-optional-catch-binding" "^7.8.0" - "@babel/plugin-syntax-optional-chaining" "^7.8.0" - "@babel/plugin-syntax-top-level-await" "^7.12.1" - "@babel/plugin-transform-arrow-functions" "^7.12.1" - "@babel/plugin-transform-async-to-generator" "^7.12.1" - "@babel/plugin-transform-block-scoped-functions" "^7.12.1" - "@babel/plugin-transform-block-scoping" "^7.12.11" - "@babel/plugin-transform-classes" "^7.12.1" - "@babel/plugin-transform-computed-properties" "^7.12.1" - "@babel/plugin-transform-destructuring" "^7.12.1" - "@babel/plugin-transform-dotall-regex" "^7.12.1" - "@babel/plugin-transform-duplicate-keys" "^7.12.1" - "@babel/plugin-transform-exponentiation-operator" "^7.12.1" - "@babel/plugin-transform-for-of" "^7.12.1" - "@babel/plugin-transform-function-name" "^7.12.1" - "@babel/plugin-transform-literals" "^7.12.1" - "@babel/plugin-transform-member-expression-literals" "^7.12.1" - "@babel/plugin-transform-modules-amd" "^7.12.1" - "@babel/plugin-transform-modules-commonjs" "^7.12.1" - "@babel/plugin-transform-modules-systemjs" "^7.12.1" - "@babel/plugin-transform-modules-umd" "^7.12.1" - "@babel/plugin-transform-named-capturing-groups-regex" "^7.12.1" - "@babel/plugin-transform-new-target" "^7.12.1" - "@babel/plugin-transform-object-super" "^7.12.1" - "@babel/plugin-transform-parameters" "^7.12.1" - "@babel/plugin-transform-property-literals" "^7.12.1" - "@babel/plugin-transform-regenerator" "^7.12.1" - "@babel/plugin-transform-reserved-words" "^7.12.1" - "@babel/plugin-transform-shorthand-properties" "^7.12.1" - "@babel/plugin-transform-spread" "^7.12.1" - "@babel/plugin-transform-sticky-regex" "^7.12.7" - "@babel/plugin-transform-template-literals" "^7.12.1" - "@babel/plugin-transform-typeof-symbol" "^7.12.10" - "@babel/plugin-transform-unicode-escapes" "^7.12.1" - "@babel/plugin-transform-unicode-regex" "^7.12.1" - "@babel/preset-modules" "^0.1.3" - "@babel/types" "^7.12.11" - core-js-compat "^3.8.0" - semver "^5.5.0" - "@babel/preset-env@^7.12.11": version "7.15.6" resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.15.6.tgz#0f3898db9d63d320f21b17380d8462779de57659" @@ -1963,7 +2020,87 @@ core-js-compat "^3.16.0" semver "^6.3.0" -"@babel/preset-modules@^0.1.3", "@babel/preset-modules@^0.1.4": +"@babel/preset-env@^7.16.5", "@babel/preset-env@^7.16.7": + version "7.16.11" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.16.11.tgz#5dd88fd885fae36f88fd7c8342475c9f0abe2982" + integrity sha512-qcmWG8R7ZW6WBRPZK//y+E3Cli151B20W1Rv7ln27vuPaXU/8TKms6jFdiJtF7UDTxcrb7mZd88tAeK9LjdT8g== + dependencies: + "@babel/compat-data" "^7.16.8" + "@babel/helper-compilation-targets" "^7.16.7" + "@babel/helper-plugin-utils" "^7.16.7" + "@babel/helper-validator-option" "^7.16.7" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.16.7" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.16.7" + "@babel/plugin-proposal-async-generator-functions" "^7.16.8" + "@babel/plugin-proposal-class-properties" "^7.16.7" + "@babel/plugin-proposal-class-static-block" "^7.16.7" + "@babel/plugin-proposal-dynamic-import" "^7.16.7" + "@babel/plugin-proposal-export-namespace-from" "^7.16.7" + "@babel/plugin-proposal-json-strings" "^7.16.7" + "@babel/plugin-proposal-logical-assignment-operators" "^7.16.7" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.16.7" + "@babel/plugin-proposal-numeric-separator" "^7.16.7" + "@babel/plugin-proposal-object-rest-spread" "^7.16.7" + "@babel/plugin-proposal-optional-catch-binding" "^7.16.7" + "@babel/plugin-proposal-optional-chaining" "^7.16.7" + "@babel/plugin-proposal-private-methods" "^7.16.11" + "@babel/plugin-proposal-private-property-in-object" "^7.16.7" + "@babel/plugin-proposal-unicode-property-regex" "^7.16.7" + "@babel/plugin-syntax-async-generators" "^7.8.4" + "@babel/plugin-syntax-class-properties" "^7.12.13" + "@babel/plugin-syntax-class-static-block" "^7.14.5" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + "@babel/plugin-syntax-export-namespace-from" "^7.8.3" + "@babel/plugin-syntax-json-strings" "^7.8.3" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" + "@babel/plugin-syntax-private-property-in-object" "^7.14.5" + "@babel/plugin-syntax-top-level-await" "^7.14.5" + "@babel/plugin-transform-arrow-functions" "^7.16.7" + "@babel/plugin-transform-async-to-generator" "^7.16.8" + "@babel/plugin-transform-block-scoped-functions" "^7.16.7" + "@babel/plugin-transform-block-scoping" "^7.16.7" + "@babel/plugin-transform-classes" "^7.16.7" + "@babel/plugin-transform-computed-properties" "^7.16.7" + "@babel/plugin-transform-destructuring" "^7.16.7" + "@babel/plugin-transform-dotall-regex" "^7.16.7" + "@babel/plugin-transform-duplicate-keys" "^7.16.7" + "@babel/plugin-transform-exponentiation-operator" "^7.16.7" + "@babel/plugin-transform-for-of" "^7.16.7" + "@babel/plugin-transform-function-name" "^7.16.7" + "@babel/plugin-transform-literals" "^7.16.7" + "@babel/plugin-transform-member-expression-literals" "^7.16.7" + "@babel/plugin-transform-modules-amd" "^7.16.7" + "@babel/plugin-transform-modules-commonjs" "^7.16.8" + "@babel/plugin-transform-modules-systemjs" "^7.16.7" + "@babel/plugin-transform-modules-umd" "^7.16.7" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.16.8" + "@babel/plugin-transform-new-target" "^7.16.7" + "@babel/plugin-transform-object-super" "^7.16.7" + "@babel/plugin-transform-parameters" "^7.16.7" + "@babel/plugin-transform-property-literals" "^7.16.7" + "@babel/plugin-transform-regenerator" "^7.16.7" + "@babel/plugin-transform-reserved-words" "^7.16.7" + "@babel/plugin-transform-shorthand-properties" "^7.16.7" + "@babel/plugin-transform-spread" "^7.16.7" + "@babel/plugin-transform-sticky-regex" "^7.16.7" + "@babel/plugin-transform-template-literals" "^7.16.7" + "@babel/plugin-transform-typeof-symbol" "^7.16.7" + "@babel/plugin-transform-unicode-escapes" "^7.16.7" + "@babel/plugin-transform-unicode-regex" "^7.16.7" + "@babel/preset-modules" "^0.1.5" + "@babel/types" "^7.16.8" + babel-plugin-polyfill-corejs2 "^0.3.0" + babel-plugin-polyfill-corejs3 "^0.5.0" + babel-plugin-polyfill-regenerator "^0.3.0" + core-js-compat "^3.20.2" + semver "^6.3.0" + +"@babel/preset-modules@^0.1.4": version "0.1.4" resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.4.tgz#362f2b68c662842970fdb5e254ffc8fc1c2e415e" integrity sha512-J36NhwnfdzpmH41M1DrnkkgAqhZaqr/NBdPfQ677mLzlaXo+oDiv1deyCDtgAhz8p328otdob0Du7+xgHGZbKg== @@ -1974,6 +2111,17 @@ "@babel/types" "^7.4.4" esutils "^2.0.2" +"@babel/preset-modules@^0.1.5": + version "0.1.5" + resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.5.tgz#ef939d6e7f268827e1841638dc6ff95515e115d9" + integrity sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/plugin-proposal-unicode-property-regex" "^7.4.4" + "@babel/plugin-transform-dotall-regex" "^7.4.4" + "@babel/types" "^7.4.4" + esutils "^2.0.2" + "@babel/preset-react@^7.12.10": version "7.14.5" resolved "https://registry.yarnpkg.com/@babel/preset-react/-/preset-react-7.14.5.tgz#0fbb769513f899c2c56f3a882fa79673c2d4ab3c" @@ -2020,21 +2168,19 @@ dependencies: regenerator-runtime "^0.13.4" -"@babel/runtime@^7.12.0", "@babel/runtime@^7.12.5", "@babel/runtime@^7.3.1", "@babel/runtime@^7.4.4", "@babel/runtime@^7.5.0", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.0", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.8.7": +"@babel/runtime@^7.12.5", "@babel/runtime@^7.3.1", "@babel/runtime@^7.4.4", "@babel/runtime@^7.5.0", "@babel/runtime@^7.5.5", "@babel/runtime@^7.7.2", "@babel/runtime@^7.8.7": version "7.12.5" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.12.5.tgz#410e7e487441e1b360c29be715d870d9b985882e" integrity sha512-plcc+hbExy3McchJCEQG3knOsuh3HH+Prx1P6cLIkET/0dLuQDEnrT+s27Axgc9bqfsmNUNHfscgMUdBpC9xfg== dependencies: regenerator-runtime "^0.13.4" -"@babel/template@^7.10.4", "@babel/template@^7.12.7": - version "7.12.7" - resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.12.7.tgz#c817233696018e39fbb6c491d2fb684e05ed43bc" - integrity sha512-GkDzmHS6GV7ZeXfJZ0tLRBhZcMcY0/Lnb+eEbXDBfCAcZCjrZKe6p3J4we/D24O9Y8enxWAg1cWwof59yLh2ow== +"@babel/runtime@^7.8.4": + version "7.17.2" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.17.2.tgz#66f68591605e59da47523c631416b18508779941" + integrity sha512-hzeyJyMA1YGdJTuWU0e/j4wKXrU4OMFvY2MSlaI9B7VQb0r5cxTE3EAIS2Q7Tn2RIcDkRvTA/v2JsAEhxe99uw== dependencies: - "@babel/code-frame" "^7.10.4" - "@babel/parser" "^7.12.7" - "@babel/types" "^7.12.7" + regenerator-runtime "^0.13.4" "@babel/template@^7.12.13": version "7.12.13" @@ -2045,6 +2191,15 @@ "@babel/parser" "^7.12.13" "@babel/types" "^7.12.13" +"@babel/template@^7.12.7", "@babel/template@^7.16.7": + version "7.16.7" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.16.7.tgz#8d126c8701fde4d66b264b3eba3d96f07666d155" + integrity sha512-I8j/x8kHUrbYRTUxXrrMbfCa7jxkE7tZre39x3kjr9hvI82cK1FfqLygotcWN5kdPGWcLdWMHpSBavse5tWw3w== + dependencies: + "@babel/code-frame" "^7.16.7" + "@babel/parser" "^7.16.7" + "@babel/types" "^7.16.7" + "@babel/template@^7.15.4": version "7.15.4" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.15.4.tgz#51898d35dcf3faa670c4ee6afcfd517ee139f194" @@ -2054,20 +2209,21 @@ "@babel/parser" "^7.15.4" "@babel/types" "^7.15.4" -"@babel/traverse@^7.1.6", "@babel/traverse@^7.10.4", "@babel/traverse@^7.12.1", "@babel/traverse@^7.12.10", "@babel/traverse@^7.12.5", "@babel/traverse@^7.4.5", "@babel/traverse@^7.7.0": - version "7.12.12" - resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.12.12.tgz#d0cd87892704edd8da002d674bc811ce64743376" - integrity sha512-s88i0X0lPy45RrLM8b9mz8RPH5FqO9G9p7ti59cToE44xFm1Q+Pjh5Gq4SXBbtb88X7Uy7pexeqRIQDDMNkL0w== +"@babel/traverse@^7.12.1", "@babel/traverse@^7.12.10", "@babel/traverse@^7.16.7", "@babel/traverse@^7.16.8", "@babel/traverse@^7.17.0", "@babel/traverse@^7.17.3": + version "7.17.3" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.17.3.tgz#0ae0f15b27d9a92ba1f2263358ea7c4e7db47b57" + integrity sha512-5irClVky7TxRWIRtxlh2WPUUOLhcPN06AGgaQSB8AEwuyEBgJVuJ5imdHm5zxk8w0QS5T+tDfnDxAlhWjpb7cw== dependencies: - "@babel/code-frame" "^7.12.11" - "@babel/generator" "^7.12.11" - "@babel/helper-function-name" "^7.12.11" - "@babel/helper-split-export-declaration" "^7.12.11" - "@babel/parser" "^7.12.11" - "@babel/types" "^7.12.12" + "@babel/code-frame" "^7.16.7" + "@babel/generator" "^7.17.3" + "@babel/helper-environment-visitor" "^7.16.7" + "@babel/helper-function-name" "^7.16.7" + "@babel/helper-hoist-variables" "^7.16.7" + "@babel/helper-split-export-declaration" "^7.16.7" + "@babel/parser" "^7.17.3" + "@babel/types" "^7.17.0" debug "^4.1.0" globals "^11.1.0" - lodash "^4.17.19" "@babel/traverse@^7.12.11", "@babel/traverse@^7.12.9", "@babel/traverse@^7.15.4": version "7.15.4" @@ -2099,13 +2255,27 @@ globals "^11.1.0" lodash "^4.17.19" -"@babel/types@^7.1.6", "@babel/types@^7.10.4", "@babel/types@^7.10.5", "@babel/types@^7.12.1", "@babel/types@^7.12.10", "@babel/types@^7.12.11", "@babel/types@^7.12.12", "@babel/types@^7.12.5", "@babel/types@^7.12.7", "@babel/types@^7.4.0", "@babel/types@^7.4.4", "@babel/types@^7.7.0", "@babel/types@^7.7.2": +"@babel/traverse@^7.4.5", "@babel/traverse@^7.7.0": version "7.12.12" - resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.12.12.tgz#4608a6ec313abbd87afa55004d373ad04a96c299" - integrity sha512-lnIX7piTxOH22xE7fDXDbSHg9MM1/6ORnafpJmov5rs0kX5g4BZxeXNJLXsMRiO0U5Rb8/FvMS6xlTnTHvxonQ== + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.12.12.tgz#d0cd87892704edd8da002d674bc811ce64743376" + integrity sha512-s88i0X0lPy45RrLM8b9mz8RPH5FqO9G9p7ti59cToE44xFm1Q+Pjh5Gq4SXBbtb88X7Uy7pexeqRIQDDMNkL0w== dependencies: - "@babel/helper-validator-identifier" "^7.12.11" + "@babel/code-frame" "^7.12.11" + "@babel/generator" "^7.12.11" + "@babel/helper-function-name" "^7.12.11" + "@babel/helper-split-export-declaration" "^7.12.11" + "@babel/parser" "^7.12.11" + "@babel/types" "^7.12.12" + debug "^4.1.0" + globals "^11.1.0" lodash "^4.17.19" + +"@babel/types@^7.12.1", "@babel/types@^7.12.10", "@babel/types@^7.12.11", "@babel/types@^7.12.12", "@babel/types@^7.12.5", "@babel/types@^7.12.7", "@babel/types@^7.16.0", "@babel/types@^7.16.7", "@babel/types@^7.16.8", "@babel/types@^7.17.0", "@babel/types@^7.4.4": + version "7.17.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.17.0.tgz#a826e368bccb6b3d84acd76acad5c0d87342390b" + integrity sha512-TmKSNO4D5rzhL5bjWFcVHHLETzfQ/AmbKpKPOSjlP0WoHZ6L911fgoOKY4Alp/emzG4cHJdyN49zpgkbXFEHHw== + dependencies: + "@babel/helper-validator-identifier" "^7.16.7" to-fast-properties "^2.0.0" "@babel/types@^7.12.13", "@babel/types@^7.13.0", "@babel/types@^7.13.12": @@ -2125,6 +2295,15 @@ "@babel/helper-validator-identifier" "^7.14.9" to-fast-properties "^2.0.0" +"@babel/types@^7.7.0", "@babel/types@^7.7.2": + version "7.12.12" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.12.12.tgz#4608a6ec313abbd87afa55004d373ad04a96c299" + integrity sha512-lnIX7piTxOH22xE7fDXDbSHg9MM1/6ORnafpJmov5rs0kX5g4BZxeXNJLXsMRiO0U5Rb8/FvMS6xlTnTHvxonQ== + dependencies: + "@babel/helper-validator-identifier" "^7.12.11" + lodash "^4.17.19" + to-fast-properties "^2.0.0" + "@cnakazawa/watch@^1.0.3": version "1.0.4" resolved "https://registry.yarnpkg.com/@cnakazawa/watch/-/watch-1.0.4.tgz#f864ae85004d0fcab6f50be9141c4da368d1656a" @@ -2138,78 +2317,79 @@ resolved "https://registry.yarnpkg.com/@discoveryjs/json-ext/-/json-ext-0.5.5.tgz#9283c9ce5b289a3c4f61c12757469e59377f81f3" integrity sha512-6nFkfkmSeV/rqSaS4oWHgmpnYw194f6hmWF5is6b0J1naJZoiD0NTc9AiUwPHvWsowkjuHErCZT1wa0jg+BLIA== -"@ember-data/adapter@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/adapter/-/adapter-3.24.2.tgz#dd0b0f4f3c6e83dcba8c1a65d799a4c737f5eeec" - integrity sha512-3NmgrGNOUYKseJjUHcre3IOhLlpPMg7o9o8ZNRyi7r2M1n9flsXuKzJPMiteAic3U7bhODk44gorYjQ6goCzHw== +"@ember-data/adapter@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/adapter/-/adapter-3.28.8.tgz#dea435bba99cb9a6483ae1097429dbee0392e8df" + integrity sha512-wQiCvUEYq9c51y7X25njP2V9FcgqEawT6hSNuZjk9p7NemZWmqdFChcmXNOs2+dejuM58mF1q2Q6pllhhBSJJQ== dependencies: - "@ember-data/private-build-infra" "3.24.2" - "@ember-data/store" "3.24.2" + "@ember-data/private-build-infra" "3.28.8" + "@ember-data/store" "3.28.8" "@ember/edition-utils" "^1.2.0" - "@ember/string" "^1.0.0" - ember-cli-babel "^7.18.0" + "@ember/string" "^3.0.0" + ember-cli-babel "^7.26.6" ember-cli-test-info "^1.0.0" - ember-cli-typescript "^3.1.3" + ember-cli-typescript "^4.1.0" -"@ember-data/canary-features@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/canary-features/-/canary-features-3.24.2.tgz#bd91beda313fc4d7ef3d8fc5dc709d53350c246c" - integrity sha512-duCgl99T6QQ4HuXNMI1l1vA8g7cvi7Ol/loVFOtkJn+MOlcQOzXNATuNqC/LPjTiHpPdQTL18+fq2wIZEDnq0w== +"@ember-data/canary-features@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/canary-features/-/canary-features-3.28.8.tgz#8e82d8cfea740726cd68cf83f662ee9b63267c39" + integrity sha512-Ek4esmO2PLYGW5Bc9H0Fgl0XYzwWKT7JRVFYYwDyqS4UWQXERAKOMtJSrIndhkeahvhwBl/WUD8//s8wqm+gNQ== dependencies: - ember-cli-babel "^7.18.0" - ember-cli-typescript "^3.1.3" + ember-cli-babel "^7.26.6" + ember-cli-typescript "^4.1.0" -"@ember-data/debug@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/debug/-/debug-3.24.2.tgz#6b96fb9cd0914071efb9ac3c6befc1a3a55f5d38" - integrity sha512-RPTGoSFPGjhB7ZVbv3eGFL6NeZKCtWv9BrZwrZH7ZvHWN1Vc7vYG3NAsLAafpjbkfSo4KG2OKHZGftpXCIl2Og== +"@ember-data/debug@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/debug/-/debug-3.28.8.tgz#679c60c0ed54c98858a74e3efc1de028ac638a49" + integrity sha512-IYjIIZ4si9ozNpfAlNf6kINRTTNAR8vJbFy+53/R5+niKWl9ghF+SO9tWwgq1yOyj0RUXuuW91/uc6HXwMdx0A== dependencies: - "@ember-data/private-build-infra" "3.24.2" + "@ember-data/private-build-infra" "3.28.8" "@ember/edition-utils" "^1.2.0" - "@ember/string" "^1.0.0" - ember-cli-babel "^7.18.0" + "@ember/string" "^3.0.0" + ember-cli-babel "^7.26.6" ember-cli-test-info "^1.0.0" - ember-cli-typescript "^3.1.3" + ember-cli-typescript "^4.1.0" -"@ember-data/model@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/model/-/model-3.24.2.tgz#8d718b8151eb2489328c5957d9ffc367b5b9a512" - integrity sha512-vKBYlWZYk0uh+7TiEYADQakUpJLbZ+ahU9ez2WEMtsdl4cDHpEBwyFH76Zmh3dp2Pz/aq5UwOtEHz/ggpUo7fQ== +"@ember-data/model@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/model/-/model-3.28.8.tgz#7ed19f128e80518b023cdcbf7cb678df6d2e72ea" + integrity sha512-/BhAMvloW2bz7A0CjczF4vWYdG9at/IkQX4G2tLEicR7rp4G0GwNaehXzV1S7Tk9jeM0GA3Pj/OaP2mXcxX7lw== dependencies: - "@ember-data/canary-features" "3.24.2" - "@ember-data/private-build-infra" "3.24.2" - "@ember-data/store" "3.24.2" + "@ember-data/canary-features" "3.28.8" + "@ember-data/private-build-infra" "3.28.8" + "@ember-data/store" "3.28.8" "@ember/edition-utils" "^1.2.0" - "@ember/string" "^1.0.0" - ember-cli-babel "^7.18.0" + "@ember/string" "^3.0.0" + ember-cached-decorator-polyfill "^0.1.4" + ember-cli-babel "^7.26.6" ember-cli-string-utils "^1.1.0" ember-cli-test-info "^1.0.0" - ember-cli-typescript "^3.1.3" + ember-cli-typescript "^4.1.0" ember-compatibility-helpers "^1.2.0" - inflection "1.12.0" + inflection "~1.13.1" -"@ember-data/private-build-infra@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/private-build-infra/-/private-build-infra-3.24.2.tgz#2b77fb4490a6c657d9f82ce8c54fe22157e03963" - integrity sha512-uYv9BOGaNxsSacE0jFRFhrs/Xg6f8Rma2Ap/mVjwouBvu+DV2cl5E2zIMalygu/ngIiGhiNUeUp2RpjSpR054w== +"@ember-data/private-build-infra@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/private-build-infra/-/private-build-infra-3.28.8.tgz#9e38e30282417bf596ee62f7574a0b01b6319223" + integrity sha512-iVhAgBzdYEAczJ7+xb1GC5P31/5ZJvsl9bNMilohw+YLcRy5fZHl/y0ibLwutKKR0nCYDE92a5b6RotbpVWbJQ== dependencies: "@babel/plugin-transform-block-scoping" "^7.8.3" - "@ember-data/canary-features" "3.24.2" + "@ember-data/canary-features" "3.28.8" "@ember/edition-utils" "^1.2.0" babel-plugin-debug-macros "^0.3.3" babel-plugin-filter-imports "^4.0.0" babel6-plugin-strip-class-callcheck "^6.0.0" broccoli-debug "^0.6.5" broccoli-file-creator "^2.1.1" - broccoli-funnel "^2.0.2" + broccoli-funnel "^3.0.3" broccoli-merge-trees "^4.2.0" - broccoli-rollup "^4.1.1" + broccoli-rollup "^5.0.0" calculate-cache-key-for-tree "^2.0.0" chalk "^4.0.0" - ember-cli-babel "^7.18.0" + ember-cli-babel "^7.26.6" ember-cli-path-utils "^1.0.0" ember-cli-string-utils "^1.1.0" - ember-cli-typescript "^3.1.3" + ember-cli-typescript "^4.1.0" ember-cli-version-checker "^5.1.1" esm "^3.2.25" git-repo-info "^2.1.1" @@ -2220,48 +2400,47 @@ semver "^7.1.3" silent-error "^1.1.1" -"@ember-data/record-data@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/record-data/-/record-data-3.24.2.tgz#cf07dd13c74d02046af2dc4a9102f9f6cb897184" - integrity sha512-vdsWiPp29lwgMeyf4O1sXZ8xJf/zPCIEfksYeGaJ9VhiTKOucqiRxIFeI2cdyqxkM0frtCyNwYEntpy871Os2Q== +"@ember-data/record-data@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/record-data/-/record-data-3.28.8.tgz#2e27765bc4a22390b3989deb73de0ee3029f7e35" + integrity sha512-3HVCWsG1+nS2kqDYCVKuNhunmS4ovQViOeOtqwdwaYrYk892JR+vbDtrrAApgY6jDtLc1dyvgaNK950FypYSEQ== dependencies: - "@ember-data/canary-features" "3.24.2" - "@ember-data/private-build-infra" "3.24.2" - "@ember-data/store" "3.24.2" + "@ember-data/canary-features" "3.28.8" + "@ember-data/private-build-infra" "3.28.8" + "@ember-data/store" "3.28.8" "@ember/edition-utils" "^1.2.0" - "@ember/ordered-set" "^4.0.0" - ember-cli-babel "^7.18.0" + ember-cli-babel "^7.26.6" ember-cli-test-info "^1.0.0" - ember-cli-typescript "^3.1.3" + ember-cli-typescript "^4.1.0" "@ember-data/rfc395-data@^0.0.4": version "0.0.4" resolved "https://registry.yarnpkg.com/@ember-data/rfc395-data/-/rfc395-data-0.0.4.tgz#ecb86efdf5d7733a76ff14ea651a1b0ed1f8a843" integrity sha512-tGRdvgC9/QMQSuSuJV45xoyhI0Pzjm7A9o/MVVA3HakXIImJbbzx/k/6dO9CUEQXIyS2y0fW6C1XaYOG7rY0FQ== -"@ember-data/serializer@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/serializer/-/serializer-3.24.2.tgz#d95816f4af4b9d3031bdd198cd10e6bde3fa8b17" - integrity sha512-so/NkQgtecXqPdFMjUHkXQ73n9TFVMigZeCFuippkP3lQu2HquJ9u/e+WRcgLzziU7q+eBTnt2Lar9uLkXMNyw== +"@ember-data/serializer@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/serializer/-/serializer-3.28.8.tgz#1c616538f7d2e454f7188676af4d8882bc7e6d9e" + integrity sha512-iOUIHk/gg0C+IeqaBrW5y+5bPaGQawxpw92PuWmIqupixkVwJtJb5fD1KBjDzKYTO2QbAKzsHtseXShZa9XHTA== dependencies: - "@ember-data/private-build-infra" "3.24.2" - "@ember-data/store" "3.24.2" - ember-cli-babel "^7.18.0" + "@ember-data/private-build-infra" "3.28.8" + "@ember-data/store" "3.28.8" + ember-cli-babel "^7.26.6" ember-cli-test-info "^1.0.0" - ember-cli-typescript "^3.1.3" + ember-cli-typescript "^4.1.0" -"@ember-data/store@3.24.2": - version "3.24.2" - resolved "https://registry.yarnpkg.com/@ember-data/store/-/store-3.24.2.tgz#2583e03f8c51c5b049a29bbed304ae78085e8b9b" - integrity sha512-FJVZIrCwFDebh/s3Gy4YC+PK7BRaDIudor53coia236hpAW9eO/itO/ZbOGt9eFumWzX6eUFxJixD0o9FvGybA== +"@ember-data/store@3.28.8": + version "3.28.8" + resolved "https://registry.yarnpkg.com/@ember-data/store/-/store-3.28.8.tgz#9bdae8cceed22372248ed76f4bf2707f0029e714" + integrity sha512-frzTXDaa1NwrjCSIy8e9iQjns+g1feoK9eHQYhjOLyVtxlGzfS+grP62qMp5mdBuiY1XmLChSCVh8DhjlZ6Gbw== dependencies: - "@ember-data/canary-features" "3.24.2" - "@ember-data/private-build-infra" "3.24.2" - "@ember/string" "^1.0.0" - ember-cli-babel "^7.18.0" + "@ember-data/canary-features" "3.28.8" + "@ember-data/private-build-infra" "3.28.8" + "@ember/string" "^3.0.0" + "@glimmer/tracking" "^1.0.4" + ember-cli-babel "^7.26.6" ember-cli-path-utils "^1.0.0" - ember-cli-typescript "^3.1.3" - heimdalljs "^0.3.0" + ember-cli-typescript "^4.1.0" "@ember-decorators/component@^6.1.1": version "6.1.1" @@ -2286,6 +2465,16 @@ dependencies: ember-cli-babel "^7.1.3" +"@ember-template-lint/todo-utils@^10.0.0": + version "10.0.0" + resolved "https://registry.yarnpkg.com/@ember-template-lint/todo-utils/-/todo-utils-10.0.0.tgz#085aafcf31ca04ba4d3a9460f088aed752b90ea8" + integrity sha512-US8VKnetBOl8KfKz+rXGsosz6rIETNwSz2F2frM8hIoJfF/d6ME1Iz1K7tPYZEE6SoKqZFlBs5XZPSmzRnabjA== + dependencies: + "@types/eslint" "^7.2.13" + fs-extra "^9.1.0" + slash "^3.0.0" + tslib "^2.2.0" + "@ember/edition-utils@^1.2.0": version "1.2.0" resolved "https://registry.yarnpkg.com/@ember/edition-utils/-/edition-utils-1.2.0.tgz#a039f542dc14c8e8299c81cd5abba95e2459cfa6" @@ -2303,54 +2492,21 @@ mkdirp "^1.0.4" silent-error "^1.1.1" -"@ember/ordered-set@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@ember/ordered-set/-/ordered-set-4.0.0.tgz#c5ec021ab8d4734c6db92708a81edd499d45fd31" - integrity sha512-cUCcme4R5H37HyK8w0qzdG5+lpb3XVr2RQHLyWEP4JsKI66Ob4tizoJOs8rb/XdHCv+F5WeA321hfPMi3DrZbg== +"@ember/render-modifiers@^2.0.0": + version "2.0.4" + resolved "https://registry.yarnpkg.com/@ember/render-modifiers/-/render-modifiers-2.0.4.tgz#0ac7af647cb736076dbfcd54ca71e090cd329d71" + integrity sha512-Zh/fo5VUmVzYHkHVvzWVjJ1RjFUxA2jH0zCp2+DQa80Bf3DUXauiEByxU22UkN4LFT55DBFttC0xCQSJG3WTsg== dependencies: - ember-cli-babel "^7.22.1" - ember-compatibility-helpers "^1.1.1" + "@embroider/macros" "^1.0.0" + ember-cli-babel "^7.26.11" + ember-modifier-manager-polyfill "^1.2.0" -"@ember/render-modifiers@^1.0.2": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@ember/render-modifiers/-/render-modifiers-1.0.2.tgz#2e87c48db49d922ce4850d707215caaac60d8444" - integrity sha512-6tEnHl5+62NTSAG2mwhGMFPhUrJQjoVqV+slsn+rlTknm2Zik+iwxBQEbwaiQOU1FUYxkS8RWcieovRNMR8inQ== +"@ember/string@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@ember/string/-/string-3.0.0.tgz#e3a3cc7874c9f64eadfdac644d8b1238721ce289" + integrity sha512-T+7QYDp8ItlQseNveK2lL6OsOO5wg7aNQ/M2RpO8cGwM80oZOnr/Y35HmMfu4ejFEc+F1LPegvu7LGfeJOicWA== dependencies: - ember-cli-babel "^7.10.0" - ember-modifier-manager-polyfill "^1.1.0" - -"@ember/string@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@ember/string/-/string-1.0.0.tgz#3a2254caedacb95e09071204d36cad49e0f8b855" - integrity sha512-KZ+CcIXFdyIBMztxDMgza4SdLJgIeUgTjDAoHk6M50C2u1X/BK7KWUIN7MIK2LNTOMvbib9lWwEzKboxdI4lBw== - dependencies: - ember-cli-babel "^7.4.0" - -"@ember/test-helpers@^1.7.1": - version "1.7.2" - resolved "https://registry.yarnpkg.com/@ember/test-helpers/-/test-helpers-1.7.2.tgz#5b128dc5f6524c3850abf52668e6bd4fda401194" - integrity sha512-FEJBpbFNIaWAsCSnataiNwYFvmcpoymL/B7fXLruuJ/46BnJjzLaRPtpUIZ91w4GNTK6knxbHWXW76aVb3Aezg== - dependencies: - broccoli-debug "^0.6.5" - broccoli-funnel "^2.0.2" - ember-assign-polyfill "^2.6.0" - ember-cli-babel "^7.7.3" - ember-cli-htmlbars-inline-precompile "^2.1.0" - ember-test-waiters "^1.1.1" - -"@ember/test-helpers@^2.0.0": - version "2.1.4" - resolved "https://registry.yarnpkg.com/@ember/test-helpers/-/test-helpers-2.1.4.tgz#18cc78f3f9230a66955eb4307541129b53ebd487" - integrity sha512-CcDAWfzYE4r1YApufPPINm0IZL223yv/JiG61rjZr0KPyGBCOWjA9b//bY+jdAwZIEsmoMDfMbSS3p1u3c6i5w== - dependencies: - "@ember/test-waiters" "^2.3.2" - broccoli-debug "^0.6.5" - broccoli-funnel "^3.0.3" - ember-auto-import "^1.10.0" - ember-cli-babel "^7.22.1" - ember-cli-htmlbars "^5.2.0" - ember-destroyable-polyfill "^2.0.2" - es6-promise "^4.2.8" + ember-cli-babel "^7.26.6" "@ember/test-helpers@^2.1.4": version "2.4.2" @@ -2364,16 +2520,17 @@ ember-cli-htmlbars "^5.7.1" ember-destroyable-polyfill "^2.0.3" -"@ember/test-waiters@^2.3.2": - version "2.3.2" - resolved "https://registry.yarnpkg.com/@ember/test-waiters/-/test-waiters-2.3.2.tgz#5d416d25209b25b16116df0a01a856437362b7eb" - integrity sha512-4zDQd14br6VzvBf0PD/dm6Vg9nG33WUW14UGI26k+iOGG9iY++pHL5+PnrvCxxrZ9867EPTULy8K2oneenAtSw== +"@ember/test-helpers@^2.6.0": + version "2.6.0" + resolved "https://registry.yarnpkg.com/@ember/test-helpers/-/test-helpers-2.6.0.tgz#d687515c6ab49ba72717fc62046970ef4a72ea9c" + integrity sha512-N5sr3layWk60wB3maCy+/5hFHQRcTh8aqxcZTSs3Od9QkuHdWBtRgMGLP/35mXpJlgWuu3xqLpt6u3dGHc8gCg== dependencies: - calculate-cache-key-for-tree "^2.0.0" - ember-cli-babel "^7.21.0" - ember-cli-typescript "^3.1.4" - ember-cli-version-checker "^5.1.1" - semver "^7.3.2" + "@ember/test-waiters" "^3.0.0" + broccoli-debug "^0.6.5" + broccoli-funnel "^3.0.8" + ember-cli-babel "^7.26.6" + ember-cli-htmlbars "^5.7.1" + ember-destroyable-polyfill "^2.0.3" "@ember/test-waiters@^3.0.0": version "3.0.0" @@ -2385,45 +2542,6 @@ ember-cli-version-checker "^5.1.2" semver "^7.3.5" -"@embroider/core@0.33.0", "@embroider/core@^0.33.0": - version "0.33.0" - resolved "https://registry.yarnpkg.com/@embroider/core/-/core-0.33.0.tgz#0fb1752d6e34ea45368e65c42e13220a57ffae76" - integrity sha512-Kd3W4vBJCSwskVislwldhuoe1RtdA04lRr2r2ccnPI4msCXxLn292WBaS7/x0LdEu2EMO5ffRDeQva2/xoS4Yg== - dependencies: - "@babel/core" "^7.12.3" - "@babel/parser" "^7.12.3" - "@babel/plugin-syntax-dynamic-import" "^7.8.3" - "@babel/plugin-transform-runtime" "^7.12.1" - "@babel/runtime" "^7.12.5" - "@babel/traverse" "^7.12.1" - "@babel/types" "^7.12.1" - "@embroider/macros" "0.33.0" - assert-never "^1.1.0" - babel-plugin-syntax-dynamic-import "^6.18.0" - broccoli-node-api "^1.7.0" - broccoli-persistent-filter "^3.1.2" - broccoli-plugin "^4.0.1" - broccoli-source "^3.0.0" - debug "^3.1.0" - escape-string-regexp "^4.0.0" - fast-sourcemap-concat "^1.4.0" - filesize "^4.1.2" - fs-extra "^7.0.1" - fs-tree-diff "^2.0.0" - handlebars "^4.4.2" - js-string-escape "^1.0.1" - jsdom "^16.4.0" - json-stable-stringify "^1.0.1" - lodash "^4.17.10" - pkg-up "^2.0.0" - resolve "^1.8.1" - resolve-package-path "^1.2.2" - semver "^7.3.2" - strip-bom "^3.0.0" - typescript-memoize "^1.0.0-alpha.3" - walk-sync "^1.1.3" - wrap-legacy-hbs-plugin-if-needed "^1.0.1" - "@embroider/core@0.36.0": version "0.36.0" resolved "https://registry.yarnpkg.com/@embroider/core/-/core-0.36.0.tgz#fbbd60d29c3fcbe02b4e3e63e6043a43de2b9ce3" @@ -2463,21 +2581,6 @@ walk-sync "^1.1.3" wrap-legacy-hbs-plugin-if-needed "^1.0.1" -"@embroider/macros@0.33.0": - version "0.33.0" - resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-0.33.0.tgz#d5826ea7565bb69b57ba81ed528315fe77acbf9d" - integrity sha512-nl/1zRn+Wd3MO8Bb+YPqHmFl/2vwQLTsEB6Zt+K9bWXsM/kA+dPCeeCReLN6PbkMP16xxqtNSIrQ8Y49hnWjpg== - dependencies: - "@babel/core" "^7.12.3" - "@babel/traverse" "^7.12.1" - "@babel/types" "^7.12.1" - "@embroider/core" "0.33.0" - assert-never "^1.1.0" - ember-cli-babel "^7.23.0" - lodash "^4.17.10" - resolve "^1.8.1" - semver "^7.3.2" - "@embroider/macros@0.36.0", "@embroider/macros@^0.36.0": version "0.36.0" resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-0.36.0.tgz#5330f1e6f12112f0f68e34b3e4855dc7dd3c69a5" @@ -2493,6 +2596,109 @@ resolve "^1.8.1" semver "^7.3.2" +"@embroider/macros@0.41.0": + version "0.41.0" + resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-0.41.0.tgz#3e78b6f388d7229906abf4c75edfff8bb0208aca" + integrity sha512-QISzwEEfLsskZeL0jyZDs1RoQSotwBWj+4upTogNHuxQP5j/9H3IMG/3QB1gh8GEpbudATb/cS4NDYK3UBxufw== + dependencies: + "@embroider/shared-internals" "0.41.0" + assert-never "^1.1.0" + ember-cli-babel "^7.23.0" + lodash "^4.17.10" + resolve "^1.8.1" + semver "^7.3.2" + +"@embroider/macros@0.47.2", "@embroider/macros@^0.47.2": + version "0.47.2" + resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-0.47.2.tgz#23cbe92cac3c24747f054e1eea2a22538bf7ebd0" + integrity sha512-ViNWluJCeM5OPlM3rs8kdOz3RV5rpfXX5D2rDnc/q86xRS0xf4NFEjYRV7W6fBcD0b3v5jSHDTwrjq9Kee4rHg== + dependencies: + "@embroider/shared-internals" "0.47.2" + assert-never "^1.2.1" + ember-cli-babel "^7.26.6" + find-up "^5.0.0" + lodash "^4.17.21" + resolve "^1.20.0" + semver "^7.3.2" + +"@embroider/macros@^0.40.0": + version "0.40.0" + resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-0.40.0.tgz#f58763b4cfb9b4089679b478a28627595341bc5a" + integrity sha512-ygChvFoebSi/N8b+A+XFncd454gLYBYHancrtY0AE/h6Y1HouoqQvji/IfaLisGoeuwUWuI9rCBv97COweu/rA== + dependencies: + "@embroider/shared-internals" "0.40.0" + assert-never "^1.1.0" + ember-cli-babel "^7.23.0" + lodash "^4.17.10" + resolve "^1.8.1" + semver "^7.3.2" + +"@embroider/macros@^1.0.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@embroider/macros/-/macros-1.2.0.tgz#9a2d99225fba6dcb69e795cddad9f4948c2a2b6b" + integrity sha512-WD2V3OKXZB73OymI/zC2+MbqIYaAskhjtSOVVY6yG6kWILyVsJ6+fcbNHEnZyGqs4sm0TvHVJfevmA2OXV8Pww== + dependencies: + "@embroider/shared-internals" "1.2.0" + assert-never "^1.2.1" + babel-import-util "^1.1.0" + ember-cli-babel "^7.26.6" + find-up "^5.0.0" + lodash "^4.17.21" + resolve "^1.20.0" + semver "^7.3.2" + +"@embroider/shared-internals@0.40.0": + version "0.40.0" + resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-0.40.0.tgz#2f768c60f4f35ba5f9228f046f70324851e8bfe2" + integrity sha512-Ovr/i0Qgn6W6jdGXMvYJKlRoRpyBY9uhYozDSFKlBjeEmRJ0Plp7OST41+O5Td6Pqp+Rv2jVSnGzhA/MpC++NQ== + dependencies: + ember-rfc176-data "^0.3.17" + fs-extra "^7.0.1" + lodash "^4.17.10" + pkg-up "^3.1.0" + resolve-package-path "^1.2.2" + semver "^7.3.2" + typescript-memoize "^1.0.0-alpha.3" + +"@embroider/shared-internals@0.41.0": + version "0.41.0" + resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-0.41.0.tgz#2553f026d4f48ea1fd11235501feb63bf49fa306" + integrity sha512-fiqUVB6cfh2UBEFE4yhT5EzagkZ1Q26+OhBV0nJszFEJZx4DqVIb3pxSSZ8P+HhpxuJsQ2XpMA/j02ZPFZfbdQ== + dependencies: + ember-rfc176-data "^0.3.17" + fs-extra "^7.0.1" + lodash "^4.17.10" + pkg-up "^3.1.0" + resolve-package-path "^1.2.2" + semver "^7.3.2" + typescript-memoize "^1.0.0-alpha.3" + +"@embroider/shared-internals@0.47.2": + version "0.47.2" + resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-0.47.2.tgz#24e9fa0dd9c529d5c996ee1325729ea08d1fa19f" + integrity sha512-SxdZYjAE0fiM5zGDz+12euWIsQZ1tsfR1k+NKmiWMyLhA5T3pNgbR2/Djvx/cVIxOtEavGGSllYbzRKBtV4xMg== + dependencies: + babel-import-util "^0.2.0" + ember-rfc176-data "^0.3.17" + fs-extra "^9.1.0" + lodash "^4.17.21" + resolve-package-path "^4.0.1" + semver "^7.3.5" + typescript-memoize "^1.0.1" + +"@embroider/shared-internals@1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-1.2.0.tgz#edb3ca7e8528be713dab01818e8caad2dc937fba" + integrity sha512-11RfGuXxT+m2xPcpny/ENHjw53CuKPcrx7222LFQ53+I09hLxsvPCsH8be5E99LePDA46YGX41vxOxxfowD4OQ== + dependencies: + babel-import-util "^1.1.0" + ember-rfc176-data "^0.3.17" + fs-extra "^9.1.0" + lodash "^4.17.21" + resolve-package-path "^4.0.1" + semver "^7.3.5" + typescript-memoize "^1.0.1" + "@embroider/shared-internals@^1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@embroider/shared-internals/-/shared-internals-1.0.0.tgz#b081708ac79e4582f17ba0f3e3796e6612a8976c" @@ -2506,13 +2712,23 @@ semver "^7.3.5" typescript-memoize "^1.0.1" -"@embroider/util@^0.36.0": - version "0.36.0" - resolved "https://registry.yarnpkg.com/@embroider/util/-/util-0.36.0.tgz#b2ffb2b06ac491f157a771392191ce91ef2216a6" - integrity sha512-gMIGL9UQ6Pl5WvpcIqIiE+QkK5GW49VLp+IEl+v4w9ZBkH7Z9boiwWariW4y/FtNU1iU8ELFbol1+IJ8I8VN4Q== +"@embroider/util@^0.39.1 || ^0.40.0 || ^0.41.0": + version "0.41.0" + resolved "https://registry.yarnpkg.com/@embroider/util/-/util-0.41.0.tgz#5324cb4742aa4ed8d613c4f88a466f73e4e6acc1" + integrity sha512-ytA3J/YfQh7FEUEBwz3ezTqQNm/S5et5rZw3INBIy4Ak4x0NXV/VXLjyL8mv3txL8fGknZTBdXEhDsHUKIq8SQ== dependencies: - "@embroider/macros" "0.36.0" - ember-cli-babel "^7.22.1" + "@embroider/macros" "0.41.0" + broccoli-funnel "^3.0.5" + ember-cli-babel "^7.23.1" + +"@embroider/util@^0.47.2": + version "0.47.2" + resolved "https://registry.yarnpkg.com/@embroider/util/-/util-0.47.2.tgz#d06497b4b84c07ed9c7b628293bb019c533f4556" + integrity sha512-g9OqnFJPktGu9NS0Ug3Pxz1JE3jeDceeVE4IrlxDrVmBXMA/GrBvpwjolWgl6jh97cMJyExdz62jIvPHV4256Q== + dependencies: + "@embroider/macros" "0.47.2" + broccoli-funnel "^3.0.5" + ember-cli-babel "^7.23.1" "@emotion/cache@^10.0.27", "@emotion/cache@^10.0.9": version "10.0.29" @@ -2636,27 +2852,7 @@ resolved "https://registry.yarnpkg.com/@gar/promisify/-/promisify-1.1.2.tgz#30aa825f11d438671d585bd44e7fd564535fc210" integrity sha512-82cpyJyKRoQoRi+14ibCeGPu0CwypgtBAdBhq1WfvagpCZNKqwXbKwXllYSMG91DhmG4jt9gN8eP6lGOtozuaw== -"@glimmer/component@^1.0.1": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@glimmer/component/-/component-1.0.3.tgz#38c26fc4855fd7ad0e0816d18d80d32c578e5140" - integrity sha512-GD3gcN+Pr2flmxkt2lm5K86jwX+KRD9QQpNH+wiEQGjBXOzd46+XD5npH1sRByqLYml9rW4klflcrEfNb7dnQw== - dependencies: - "@glimmer/di" "^0.1.9" - "@glimmer/env" "^0.1.7" - "@glimmer/util" "^0.44.0" - broccoli-file-creator "^2.1.1" - broccoli-merge-trees "^3.0.2" - ember-cli-babel "^7.7.3" - ember-cli-get-component-path-option "^1.0.0" - ember-cli-is-package-missing "^1.0.0" - ember-cli-normalize-entity-name "^1.0.0" - ember-cli-path-utils "^1.0.0" - ember-cli-string-utils "^1.1.0" - ember-cli-typescript "3.0.0" - ember-cli-version-checker "^3.1.3" - ember-compatibility-helpers "^1.1.2" - -"@glimmer/component@^1.0.2", "@glimmer/component@^1.0.4": +"@glimmer/component@^1.0.4": version "1.0.4" resolved "https://registry.yarnpkg.com/@glimmer/component/-/component-1.0.4.tgz#1c85a5181615a6647f6acfaaed68e28ad7e9626e" integrity sha512-sS4N8wtcKfYdUJ6O3m8nbTut6NjErdz94Ap8VB1ekcg4WSD+7sI7Nmv6kt2rdPoe363nUdjUbRBzHNWhLzraBw== @@ -2778,14 +2974,6 @@ "@handlebars/parser" "^1.1.0" simple-html-tokenizer "^0.5.10" -"@glimmer/tracking@^1.0.0": - version "1.0.3" - resolved "https://registry.yarnpkg.com/@glimmer/tracking/-/tracking-1.0.3.tgz#8b9b42aff6c206edeaaea178a95acc1eff62e61e" - integrity sha512-21WR13vPdzt1IQ6JmPPAu4szjV9yKdmLHV3nD0MLDj6/EoYv1c2PqpFBBlp++6xW8OnyDa++cQ8OFoQDP+MRpA== - dependencies: - "@glimmer/env" "^0.1.7" - "@glimmer/validator" "^0.44.0" - "@glimmer/tracking@^1.0.2", "@glimmer/tracking@^1.0.4": version "1.0.4" resolved "https://registry.yarnpkg.com/@glimmer/tracking/-/tracking-1.0.4.tgz#f1bc1412fe5e2236d0f8d502994a8f88af1bbb21" @@ -2826,6 +3014,13 @@ resolved "https://registry.yarnpkg.com/@glimmer/validator/-/validator-0.44.0.tgz#03d127097dc9cb23052cdb7fcae59d0a9dca53e1" integrity sha512-i01plR0EgFVz69GDrEuFgq1NheIjZcyTy3c7q+w7d096ddPVeVcRzU3LKaqCfovvLJ+6lJx40j45ecycASUUyw== +"@glimmer/vm-babel-plugins@0.80.3": + version "0.80.3" + resolved "https://registry.yarnpkg.com/@glimmer/vm-babel-plugins/-/vm-babel-plugins-0.80.3.tgz#434b62172318cac43830d3ac29818cf2c5f111c1" + integrity sha512-9ej6xlm5MzHBJ5am2l0dbbn8Z0wJoYoMpM8FcrGMlUP6SPMLWxvxpMsApgQo8u6dvZRCjR3/bw3fdf7GOy0AFw== + dependencies: + babel-plugin-debug-macros "^0.3.4" + "@glimmer/vm@^0.42.2": version "0.42.2" resolved "https://registry.yarnpkg.com/@glimmer/vm/-/vm-0.42.2.tgz#492a4f05eac587c3a37371b3c62593f20bef553d" @@ -2866,6 +3061,24 @@ resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz#b520529ec21d8e5945a1851dfd1c32e94e39ff45" integrity sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA== +"@jridgewell/resolve-uri@^3.0.3": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.0.5.tgz#68eb521368db76d040a6315cdb24bf2483037b9c" + integrity sha512-VPeQ7+wH0itvQxnG+lIzWgkysKIr3L9sslimFW55rHMdGu/qCQ5z5h9zq4gI8uBtqkpHhsF4Z/OwExufUCThew== + +"@jridgewell/sourcemap-codec@^1.4.10": + version "1.4.11" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.11.tgz#771a1d8d744eeb71b6adb35808e1a6c7b9b8c8ec" + integrity sha512-Fg32GrJo61m+VqYSdRSjRXMjQ06j8YIYfcTqndLYVAaHmroZHLJZCydsWBOTDqXS2v+mjxohBWEMfg97GXmYQg== + +"@jridgewell/trace-mapping@^0.3.0": + version "0.3.4" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.4.tgz#f6a0832dffd5b8a6aaa633b7d9f8e8e94c83a0c3" + integrity sha512-vFv9ttIedivx0ux3QSjhgtCVjPZd5l46ZOMDSCwnH1yUO2e964gO8LZGyv2QkqcgR6TnBU1v+1IFqmeoG+0UJQ== + dependencies: + "@jridgewell/resolve-uri" "^3.0.3" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@mdx-js/mdx@^1.6.22": version "1.6.22" resolved "https://registry.yarnpkg.com/@mdx-js/mdx/-/mdx-1.6.22.tgz#8a723157bf90e78f17dc0f27995398e6c731f1ba" @@ -2966,39 +3179,45 @@ prop-types "^15.6.1" react-lifecycles-compat "^3.0.4" +"@ro0gr/ceibo@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@ro0gr/ceibo/-/ceibo-2.2.0.tgz#01c4f3c8e6bfab98136d5863f55ac24e64c74539" + integrity sha512-4gSXPwwr99zUWxnTllN5L4QlfgFDloYKOsenoPvx46LE75x3wvLgGUhxUxhIMxJbqOZ0w9pzrugjQR7St0/PQg== + "@simple-dom/interface@^1.4.0": version "1.4.0" resolved "https://registry.yarnpkg.com/@simple-dom/interface/-/interface-1.4.0.tgz#e8feea579232017f89b0138e2726facda6fbb71f" integrity sha512-l5qumKFWU0S+4ZzMaLXFU8tQZsicHEMEyAxI5kDFGhJsRqDwe0a7/iPA/GdxlGyDKseQQAgIz5kzU7eXTrlSpA== -"@sindresorhus/is@^0.7.0": - version "0.7.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" - integrity sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow== +"@sinonjs/commons@^1.6.0", "@sinonjs/commons@^1.8.1": + version "1.8.3" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.3.tgz#3802ddd21a50a949b6721ddd72da36e67e7f1b2d" + integrity sha512-xkNcLAn/wZaX14RPlwizcKicDk9G3F8m2nU3L7Ukm5zBgTwiT0wsoFAHx9Jq56fJA1z/7uKGtCRu16sOUCLIHQ== + dependencies: + type-detect "4.0.8" -"@sinonjs/commons@^1", "@sinonjs/commons@^1.3.0", "@sinonjs/commons@^1.4.0", "@sinonjs/commons@^1.7.0": +"@sinonjs/commons@^1.7.0": version "1.8.2" resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-1.8.2.tgz#858f5c4b48d80778fde4b9d541f27edc0d56488b" integrity sha512-sruwd86RJHdsVf/AtBoijDmUqJp3B6hF/DGC23C+JaegnDHaZyewCjoVGTdg3J0uz3Zs7NnIT05OBOmML72lQw== dependencies: type-detect "4.0.8" -"@sinonjs/formatio@^3.2.1": - version "3.2.2" - resolved "https://registry.yarnpkg.com/@sinonjs/formatio/-/formatio-3.2.2.tgz#771c60dfa75ea7f2d68e3b94c7e888a78781372c" - integrity sha512-B8SEsgd8gArBLMD6zpRw3juQ2FVSsmdd7qlevyDqzS9WTCtvF55/gAL+h6gue8ZvPYcdiPdvueM/qm//9XzyTQ== +"@sinonjs/fake-timers@^6.0.0", "@sinonjs/fake-timers@^6.0.1": + version "6.0.1" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-6.0.1.tgz#293674fccb3262ac782c7aadfdeca86b10c75c40" + integrity sha512-MZPUxrmFubI36XS1DI3qmI0YdN1gks62JtFZvxR67ljjSNCeK6U08Zx4msEWOXuofgqUt6zPHSi1H9fbjR/NRA== dependencies: - "@sinonjs/commons" "^1" - "@sinonjs/samsam" "^3.1.0" + "@sinonjs/commons" "^1.7.0" -"@sinonjs/samsam@^3.1.0", "@sinonjs/samsam@^3.3.3": - version "3.3.3" - resolved "https://registry.yarnpkg.com/@sinonjs/samsam/-/samsam-3.3.3.tgz#46682efd9967b259b81136b9f120fd54585feb4a" - integrity sha512-bKCMKZvWIjYD0BLGnNrxVuw4dkWCYsLqFOUWw8VgKF/+5Y+mE7LfHWPIYoDXowH+3a9LsWDMo0uAP8YDosPvHQ== +"@sinonjs/samsam@^5.3.1": + version "5.3.1" + resolved "https://registry.yarnpkg.com/@sinonjs/samsam/-/samsam-5.3.1.tgz#375a45fe6ed4e92fca2fb920e007c48232a6507f" + integrity sha512-1Hc0b1TtyfBu8ixF/tpfSHTVWKwCBLY4QJbkgnE7HcwyvT2xArDxb4K7dMgqRm3szI+LJbzmW/s4xxEhv6hwDg== dependencies: - "@sinonjs/commons" "^1.3.0" - array-from "^2.1.1" - lodash "^4.17.15" + "@sinonjs/commons" "^1.6.0" + lodash.get "^4.4.2" + type-detect "^4.0.8" "@sinonjs/text-encoding@^0.7.1": version "0.7.1" @@ -3619,10 +3838,12 @@ resolved "https://registry.yarnpkg.com/@types/braces/-/braces-3.0.1.tgz#5a284d193cfc61abb2e5a50d36ebbc50d942a32b" integrity sha512-+euflG6ygo4bn0JHtn4pYqcXwRtLvElQ7/nnjDu7iYG56H0+OhCd7d6Ug0IE3WcFpZozBKW2+80FUbv5QGk5AQ== -"@types/broccoli-plugin@^1.3.0": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@types/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#38f8462fecaebc4e09a32e4d4ed1b9808f75bbca" - integrity sha512-SLk4/hFc2kGvgwNFrpn2O1juxFOllcHAywvlo7VwxfExLzoz1GGJ0oIZCwj5fwSpvHw4AWpZjJ1fUvb62PDayQ== +"@types/broccoli-plugin@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@types/broccoli-plugin/-/broccoli-plugin-3.0.0.tgz#290fda2270c47a568edfd0cefab8bb840d8bb7b2" + integrity sha512-f+TcsARR2PovfFRKFdCX0kfH/QoM3ZVD2h1rl2mNvrKO0fq2uBNCBsTU3JanfU4COCt5cXpTfARyUsERlC8vIw== + dependencies: + broccoli-plugin "*" "@types/chai-as-promised@^7.1.2": version "7.1.3" @@ -3670,11 +3891,40 @@ resolved "https://registry.yarnpkg.com/@types/cors/-/cors-2.8.12.tgz#6b2c510a7ad7039e98e7b8d3d6598f4359e5c080" integrity sha512-vt+kDhq/M2ayberEtJcIN/hxXy1Pk+59g2FV/ZQceeaTyCtCucjL2Q7FXlFjtWn4n15KCr1NE2lNNFhp0lEThw== +"@types/eslint-scope@^3.7.3": + version "3.7.3" + resolved "https://registry.yarnpkg.com/@types/eslint-scope/-/eslint-scope-3.7.3.tgz#125b88504b61e3c8bc6f870882003253005c3224" + integrity sha512-PB3ldyrcnAicT35TWPs5IcwKD8S333HMaa2VVv4+wdvebJkjWuW/xESoB8IwRcog8HYVYamb1g/R31Qv5Bx03g== + dependencies: + "@types/eslint" "*" + "@types/estree" "*" + +"@types/eslint@*": + version "8.4.1" + resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-8.4.1.tgz#c48251553e8759db9e656de3efc846954ac32304" + integrity sha512-GE44+DNEyxxh2Kc6ro/VkIj+9ma0pO0bwv9+uHSyBrikYOHr8zYcdPvnBOp1aw8s+CjRvuSx7CyWqRrNFQ59mA== + dependencies: + "@types/estree" "*" + "@types/json-schema" "*" + +"@types/eslint@^7.2.13": + version "7.29.0" + resolved "https://registry.yarnpkg.com/@types/eslint/-/eslint-7.29.0.tgz#e56ddc8e542815272720bb0b4ccc2aff9c3e1c78" + integrity sha512-VNcvioYDH8/FxaeTKkM4/TiTwt6pBV9E3OfGmvaw8tPl0rrHCJ4Ll15HRT+pMiFAf/MLQvAzC+6RzUMEL9Ceng== + dependencies: + "@types/estree" "*" + "@types/json-schema" "*" + "@types/estree@*": version "0.0.46" resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.46.tgz#0fb6bfbbeabd7a30880504993369c4bf1deab1fe" integrity sha512-laIjwTQaD+5DukBZaygQ79K1Z0jb1bPEMRrkXSLjtCcZm+abyp5YbrqpSLzD42FwWW6gK/aS4NYpJ804nG2brg== +"@types/estree@^0.0.51": + version "0.0.51" + resolved "https://registry.yarnpkg.com/@types/estree/-/estree-0.0.51.tgz#cfd70924a25a3fd32b218e5e420e6897e1ac4f40" + integrity sha512-CuPgU6f3eT/XgKKPqKd/gLZV1Xmvf1a2R5POBOGQa6uv82xpls89HU5zKeVoyR8XzHd1RGNOlQlvUe3CFkjWNQ== + "@types/express-serve-static-core@^4.17.18": version "4.17.18" resolved "https://registry.yarnpkg.com/@types/express-serve-static-core/-/express-serve-static-core-4.17.18.tgz#8371e260f40e0e1ca0c116a9afcd9426fa094c40" @@ -3713,7 +3963,15 @@ resolved "https://registry.yarnpkg.com/@types/glob-base/-/glob-base-0.3.0.tgz#a581d688347e10e50dd7c17d6f2880a10354319d" integrity sha1-pYHWiDR+EOUN18F9byiAoQNUMZ0= -"@types/glob@*", "@types/glob@^7.1.1": +"@types/glob@*": + version "7.2.0" + resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.2.0.tgz#bc1b5bf3aa92f25bd5dd39f35c57361bdce5b2eb" + integrity sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA== + dependencies: + "@types/minimatch" "*" + "@types/node" "*" + +"@types/glob@^7.1.1": version "7.1.3" resolved "https://registry.yarnpkg.com/@types/glob/-/glob-7.1.3.tgz#e6ba80f36b7daad2c685acd9266382e68985c183" integrity sha512-SEYeGAIQIQX8NN6LDKprLjbrd5dARM5EXsd8GI/A5l0apYI1fGMWgPHSe4ZKL4eozlAyI+doUE9XbYS4xCkQ1w== @@ -3738,7 +3996,7 @@ resolved "https://registry.yarnpkg.com/@types/is-function/-/is-function-1.0.0.tgz#1b0b819b1636c7baf0d6785d030d12edf70c3e83" integrity sha512-iTs9HReBu7evG77Q4EC8hZnqRt57irBDkK9nvmHroiOIVwYMQc4IvYvdRgwKfYepunIY7Oh/dBuuld+Gj9uo6w== -"@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8": +"@types/json-schema@*", "@types/json-schema@^7.0.4", "@types/json-schema@^7.0.5", "@types/json-schema@^7.0.8", "@types/json-schema@^7.0.9": version "7.0.9" resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.9.tgz#97edc9037ea0c38585320b28964dde3b39e4660d" integrity sha512-qcUXuemtEu+E5wZSJHNxUXeCZhAfXKQ41D+duX+VYPde7xyEVZci+/oXKJL13tnRs9lR2pr4fod59GT6/X1/yQ== @@ -3769,10 +4027,10 @@ resolved "https://registry.yarnpkg.com/@types/mime/-/mime-1.3.2.tgz#93e25bf9ee75fe0fd80b594bc4feb0e862111b5a" integrity sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw== -"@types/minimatch@*", "@types/minimatch@^3.0.3": - version "3.0.3" - resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.3.tgz#3dca0e3f33b200fc7d1139c0cd96c1268cadfd9d" - integrity sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA== +"@types/minimatch@*", "@types/minimatch@^3.0.3", "@types/minimatch@^3.0.4": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@types/minimatch/-/minimatch-3.0.5.tgz#1001cc5e6a3704b83c236027e77f2f58ea010f40" + integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ== "@types/node-fetch@^2.5.7": version "2.5.12" @@ -3783,9 +4041,9 @@ form-data "^3.0.0" "@types/node@*": - version "14.14.22" - resolved "https://registry.yarnpkg.com/@types/node/-/node-14.14.22.tgz#0d29f382472c4ccf3bd96ff0ce47daf5b7b84b18" - integrity sha512-g+f/qj/cNcqKkc3tFqlXOYjrmZA+jNBiDzbP3kH+B+otKFqAdPgVTGP1IeKRdMml/aE69as5S4FqtxAbl+LaMw== + version "17.0.21" + resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.21.tgz#864b987c0c68d07b4345845c3e63b75edd143644" + integrity sha512-DBZCJbhII3r90XbQxI8Y9IjjiiOGlZ0Hr32omXIZvwwZ7p4DMMXGrKXVyPfuoBOri9XNtL0UK69jYIBIsRX3QQ== "@types/node@>=10.0.0": version "16.11.6" @@ -3879,7 +4137,15 @@ "@types/prop-types" "*" csstype "^3.0.2" -"@types/rimraf@^2.0.2", "@types/rimraf@^2.0.3": +"@types/rimraf@^2.0.2": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@types/rimraf/-/rimraf-2.0.5.tgz#368fb04d59630b727fc05a74d2ca557f64a8ef98" + integrity sha512-YyP+VfeaqAyFmXoTh3HChxOQMyjByRMsHU7kc5KOJkSlXudhMhQIALbYV7rHh/l8d2lX3VUQzprrcAgWdRuU8g== + dependencies: + "@types/glob" "*" + "@types/node" "*" + +"@types/rimraf@^2.0.3": version "2.0.4" resolved "https://registry.yarnpkg.com/@types/rimraf/-/rimraf-2.0.4.tgz#403887b0b53c6100a6c35d2ab24f6ccc042fec46" integrity sha512-8gBudvllD2A/c0CcEX/BivIDorHFt5UI5m46TsNj8DjWCCTTZT74kEe4g+QsY7P/B9WdO98d82zZgXO/RQzu2Q== @@ -3965,6 +4231,14 @@ "@types/webpack-sources" "*" source-map "^0.6.0" +"@webassemblyjs/ast@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.11.1.tgz#2bfd767eae1a6996f432ff7e8d7fc75679c0b6a7" + integrity sha512-ukBh14qFLjxTQNTXocdyksN5QdM28S1CxHt2rdskFyL+xFV7VremuBLVbmCePj+URalXBENx/9Lm7lnhihtCSw== + dependencies: + "@webassemblyjs/helper-numbers" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ast@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/ast/-/ast-1.9.0.tgz#bd850604b4042459a5a41cd7d338cbed695ed964" @@ -3974,16 +4248,31 @@ "@webassemblyjs/helper-wasm-bytecode" "1.9.0" "@webassemblyjs/wast-parser" "1.9.0" +"@webassemblyjs/floating-point-hex-parser@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.1.tgz#f6c61a705f0fd7a6aecaa4e8198f23d9dc179e4f" + integrity sha512-iGRfyc5Bq+NnNuX8b5hwBrRjzf0ocrJPI6GWFodBFzmFnyvrQ83SHKhmilCU/8Jv67i4GJZBMhEzltxzcNagtQ== + "@webassemblyjs/floating-point-hex-parser@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz#3c3d3b271bddfc84deb00f71344438311d52ffb4" integrity sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA== +"@webassemblyjs/helper-api-error@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.1.tgz#1a63192d8788e5c012800ba6a7a46c705288fd16" + integrity sha512-RlhS8CBCXfRUR/cwo2ho9bkheSXG0+NwooXcc3PAILALf2QLdFyj7KGsKRbVc95hZnhnERon4kW/D3SZpp6Tcg== + "@webassemblyjs/helper-api-error@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz#203f676e333b96c9da2eeab3ccef33c45928b6a2" integrity sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw== +"@webassemblyjs/helper-buffer@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.11.1.tgz#832a900eb444884cde9a7cad467f81500f5e5ab5" + integrity sha512-gwikF65aDNeeXa8JxXa2BAk+REjSyhrNC9ZwdT0f8jc4dQQeDQ7G4m0f2QCLPJiMTTO6wfDmRmj/pW0PsUvIcA== + "@webassemblyjs/helper-buffer@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz#a1442d269c5feb23fcbc9ef759dac3547f29de00" @@ -4008,11 +4297,35 @@ dependencies: "@webassemblyjs/ast" "1.9.0" +"@webassemblyjs/helper-numbers@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.1.tgz#64d81da219fbbba1e3bd1bfc74f6e8c4e10a62ae" + integrity sha512-vDkbxiB8zfnPdNK9Rajcey5C0w+QJugEglN0of+kmO8l7lDb77AnlKYQF7aarZuCrv+l0UvqL+68gSDr3k9LPQ== + dependencies: + "@webassemblyjs/floating-point-hex-parser" "1.11.1" + "@webassemblyjs/helper-api-error" "1.11.1" + "@xtuc/long" "4.2.2" + +"@webassemblyjs/helper-wasm-bytecode@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.1.tgz#f328241e41e7b199d0b20c18e88429c4433295e1" + integrity sha512-PvpoOGiJwXeTrSf/qfudJhwlvDQxFgelbMqtq52WWiXC6Xgg1IREdngmPN3bs4RoO83PnL/nFrxucXj1+BX62Q== + "@webassemblyjs/helper-wasm-bytecode@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz#4fed8beac9b8c14f8c58b70d124d549dd1fe5790" integrity sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw== +"@webassemblyjs/helper-wasm-section@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.11.1.tgz#21ee065a7b635f319e738f0dd73bfbda281c097a" + integrity sha512-10P9No29rYX1j7F3EVPX3JvGPQPae+AomuSTPiF9eBQeChHI6iqjMIwR9JmOJXwpnn/oVGDk7I5IlskuMwU/pg== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/helper-wasm-section@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz#5a4138d5a6292ba18b04c5ae49717e4167965346" @@ -4023,6 +4336,13 @@ "@webassemblyjs/helper-wasm-bytecode" "1.9.0" "@webassemblyjs/wasm-gen" "1.9.0" +"@webassemblyjs/ieee754@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.11.1.tgz#963929e9bbd05709e7e12243a099180812992614" + integrity sha512-hJ87QIPtAMKbFq6CGTkZYJivEwZDbQUgYd3qKSadTNOhVY7p+gfP6Sr0lLRVTaG1JjFj+r3YchoqRYxNH3M0GQ== + dependencies: + "@xtuc/ieee754" "^1.2.0" + "@webassemblyjs/ieee754@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz#15c7a0fbaae83fb26143bbacf6d6df1702ad39e4" @@ -4030,6 +4350,13 @@ dependencies: "@xtuc/ieee754" "^1.2.0" +"@webassemblyjs/leb128@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.11.1.tgz#ce814b45574e93d76bae1fb2644ab9cdd9527aa5" + integrity sha512-BJ2P0hNZ0u+Th1YZXJpzW6miwqQUGcIHT1G/sf72gLVD9DZ5AdYTqPNbHZh6K1M5VmKvFXwGSWZADz+qBWxeRw== + dependencies: + "@xtuc/long" "4.2.2" + "@webassemblyjs/leb128@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/leb128/-/leb128-1.9.0.tgz#f19ca0b76a6dc55623a09cffa769e838fa1e1c95" @@ -4037,11 +4364,30 @@ dependencies: "@xtuc/long" "4.2.2" +"@webassemblyjs/utf8@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.11.1.tgz#d1f8b764369e7c6e6bae350e854dec9a59f0a3ff" + integrity sha512-9kqcxAEdMhiwQkHpkNiorZzqpGrodQQ2IGrHHxCy+Ozng0ofyMA0lTqiLkVs1uzTRejX+/O0EOT7KxqVPuXosQ== + "@webassemblyjs/utf8@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/utf8/-/utf8-1.9.0.tgz#04d33b636f78e6a6813227e82402f7637b6229ab" integrity sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w== +"@webassemblyjs/wasm-edit@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.11.1.tgz#ad206ebf4bf95a058ce9880a8c092c5dec8193d6" + integrity sha512-g+RsupUC1aTHfR8CDgnsVRVZFJqdkFHpsHMfJuWQzWU3tvnLC07UqHICfP+4XyL2tnr1amvl1Sdp06TnYCmVkA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/helper-wasm-section" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/wasm-opt" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + "@webassemblyjs/wast-printer" "1.11.1" + "@webassemblyjs/wasm-edit@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz#3fe6d79d3f0f922183aa86002c42dd256cfee9cf" @@ -4056,6 +4402,17 @@ "@webassemblyjs/wasm-parser" "1.9.0" "@webassemblyjs/wast-printer" "1.9.0" +"@webassemblyjs/wasm-gen@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.11.1.tgz#86c5ea304849759b7d88c47a32f4f039ae3c8f76" + integrity sha512-F7QqKXwwNlMmsulj6+O7r4mmtAlCWfO/0HdgOxSklZfQcDu0TpLiD1mRt/zF25Bk59FIjEuGAIyn5ei4yMfLhA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ieee754" "1.11.1" + "@webassemblyjs/leb128" "1.11.1" + "@webassemblyjs/utf8" "1.11.1" + "@webassemblyjs/wasm-gen@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz#50bc70ec68ded8e2763b01a1418bf43491a7a49c" @@ -4067,6 +4424,16 @@ "@webassemblyjs/leb128" "1.9.0" "@webassemblyjs/utf8" "1.9.0" +"@webassemblyjs/wasm-opt@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.11.1.tgz#657b4c2202f4cf3b345f8a4c6461c8c2418985f2" + integrity sha512-VqnkNqnZlU5EB64pp1l7hdm3hmQw7Vgqa0KF/KCNO9sIpI6Fk6brDEiX+iCOYrvMuBWDws0NkTOxYEb85XQHHw== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-buffer" "1.11.1" + "@webassemblyjs/wasm-gen" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + "@webassemblyjs/wasm-opt@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz#2211181e5b31326443cc8112eb9f0b9028721a61" @@ -4077,6 +4444,18 @@ "@webassemblyjs/wasm-gen" "1.9.0" "@webassemblyjs/wasm-parser" "1.9.0" +"@webassemblyjs/wasm-parser@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.11.1.tgz#86ca734534f417e9bd3c67c7a1c75d8be41fb199" + integrity sha512-rrBujw+dJu32gYB7/Lup6UhdkPx9S9SnobZzRVL7VcBH9Bt9bCBLEuX/YXOOtBsOZ4NQrRykKhffRWHvigQvOA== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/helper-api-error" "1.11.1" + "@webassemblyjs/helper-wasm-bytecode" "1.11.1" + "@webassemblyjs/ieee754" "1.11.1" + "@webassemblyjs/leb128" "1.11.1" + "@webassemblyjs/utf8" "1.11.1" + "@webassemblyjs/wasm-parser@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz#9d48e44826df4a6598294aa6c87469d642fff65e" @@ -4101,6 +4480,14 @@ "@webassemblyjs/helper-fsm" "1.9.0" "@xtuc/long" "4.2.2" +"@webassemblyjs/wast-printer@1.11.1": + version "1.11.1" + resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.11.1.tgz#d0c73beda8eec5426f10ae8ef55cee5e7084c2f0" + integrity sha512-IQboUWM4eKzWW+N/jij2sRatKMh99QEelo3Eb2q0qXkvPRISAj8Qxtmw5itwqK+TTkBuUIE45AxYPToqPtL5gg== + dependencies: + "@webassemblyjs/ast" "1.11.1" + "@xtuc/long" "4.2.2" + "@webassemblyjs/wast-printer@1.9.0": version "1.9.0" resolved "https://registry.yarnpkg.com/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz#4935d54c85fef637b00ce9f52377451d00d47899" @@ -4135,10 +4522,10 @@ abbrev@1: resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" integrity sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q== -abortcontroller-polyfill@^1.4.0: - version "1.7.1" - resolved "https://registry.yarnpkg.com/abortcontroller-polyfill/-/abortcontroller-polyfill-1.7.1.tgz#27084bac87d78a7224c8ee78135d05df430c2d2f" - integrity sha512-yml9NiDEH4M4p0G4AcPkg8AAa4mF3nfYF28VQxaokpO67j9H7gWgmsVWJ/f1Rn+PzsnDYvzJzWIQzCqDKRvWlA== +abortcontroller-polyfill@^1.7.3: + version "1.7.3" + resolved "https://registry.yarnpkg.com/abortcontroller-polyfill/-/abortcontroller-polyfill-1.7.3.tgz#1b5b487bd6436b5b764fd52a612509702c3144b5" + integrity sha512-zetDJxd89y3X99Kvo4qFx8GKlt6GsvN3UcRZHwU6iFA/0KiOmhkTVhe8oRoTBiTVPZu09x3vCra47+w8Yz1+2Q== accepts@~1.3.4, accepts@~1.3.5, accepts@~1.3.7: version "1.3.7" @@ -4163,6 +4550,11 @@ acorn-globals@^6.0.0: acorn "^7.1.1" acorn-walk "^7.1.1" +acorn-import-assertions@^1.7.6: + version "1.8.0" + resolved "https://registry.yarnpkg.com/acorn-import-assertions/-/acorn-import-assertions-1.8.0.tgz#ba2b5939ce62c238db6d93d81c9b111b29b855e9" + integrity sha512-m7VZ3jwz4eK6A4Vtt8Ew1/mNbP24u0FhdyfA7fSvnJR6LMdfOYnmuIrrJAgrYfYJ10F/otaHTtrtrtmHdMNzEw== + acorn-jsx@^5.3.1: version "5.3.1" resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.1.tgz#fc8661e11b7ac1539c47dbfea2e72b3af34d267b" @@ -4183,21 +4575,21 @@ acorn@^6.4.1: resolved "https://registry.yarnpkg.com/acorn/-/acorn-6.4.2.tgz#35866fd710528e92de10cf06016498e47e39e1e6" integrity sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ== -acorn@^7.1.0, acorn@^7.1.1, acorn@^7.4.0: +acorn@^7.1.1, acorn@^7.4.0: version "7.4.1" resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== +acorn@^8.4.1, acorn@^8.5.0: + version "8.7.0" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.7.0.tgz#90951fde0f8f09df93549481e5fc141445b791cf" + integrity sha512-V/LGr1APy+PXIwKebEWrkZPwoeoF+w1jiOBUmuxuiUIaOHtob8Qc9BTrYo7VuI5fR8tqsy+buA2WFooR5olqvQ== + address@1.1.2, address@^1.0.1: version "1.1.2" resolved "https://registry.yarnpkg.com/address/-/address-1.1.2.tgz#bf1116c9c758c51b7a933d296b72c221ed9428b6" integrity sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA== -after@0.8.2: - version "0.8.2" - resolved "https://registry.yarnpkg.com/after/-/after-0.8.2.tgz#fedb394f9f0e02aa9768e702bda23b505fae7e1f" - integrity sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8= - aggregate-error@^3.0.0: version "3.1.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" @@ -4234,11 +4626,25 @@ ajv-errors@^1.0.0: resolved "https://registry.yarnpkg.com/ajv-errors/-/ajv-errors-1.0.1.tgz#f35986aceb91afadec4102fbd85014950cefa64d" integrity sha512-DCRfO/4nQ+89p/RK43i8Ezd41EqdGIU4ld7nGF8OQ14oc/we5rEntLCUa7+jrn3nn83BosfwZA0wb4pon2o8iQ== +ajv-formats@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/ajv-formats/-/ajv-formats-2.1.1.tgz#6e669400659eb74973bbf2e33327180a0996b520" + integrity sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA== + dependencies: + ajv "^8.0.0" + ajv-keywords@^3.1.0, ajv-keywords@^3.4.1, ajv-keywords@^3.5.2: version "3.5.2" resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-3.5.2.tgz#31f29da5ab6e00d1c2d329acf7b5929614d5014d" integrity sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ== +ajv-keywords@^5.0.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/ajv-keywords/-/ajv-keywords-5.1.0.tgz#69d4d385a4733cdbeab44964a1170a88f87f0e16" + integrity sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw== + dependencies: + fast-deep-equal "^3.1.3" + ajv@^6.1.0, ajv@^6.10.0, ajv@^6.10.2, ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.5: version "6.12.6" resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" @@ -4249,6 +4655,16 @@ ajv@^6.1.0, ajv@^6.10.0, ajv@^6.10.2, ajv@^6.12.2, ajv@^6.12.3, ajv@^6.12.4, ajv json-schema-traverse "^0.4.1" uri-js "^4.2.2" +ajv@^8.0.0, ajv@^8.8.0: + version "8.10.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.10.0.tgz#e573f719bd3af069017e3b66538ab968d040e54d" + integrity sha512-bzqAEZOjkrUMl2afH8dknrq5KEk2SrwdBROR+vH1EKVQTqaUbJVPdc/gEdggTMM0Se+s+Ja4ju4TlNcStKl2Hw== + dependencies: + fast-deep-equal "^3.1.1" + json-schema-traverse "^1.0.0" + require-from-string "^2.0.2" + uri-js "^4.2.2" + ajv@^8.0.1: version "8.8.2" resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.8.2.tgz#01b4fef2007a28bf75f0b7fc009f62679de4abbb" @@ -4259,14 +4675,7 @@ ajv@^8.0.1: require-from-string "^2.0.2" uri-js "^4.2.2" -amd-name-resolver@1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-1.2.0.tgz#fc41b3848824b557313897d71f8d5a0184fbe679" - integrity sha512-hlSTWGS1t6/xq5YCed7YALg7tKZL3rkl7UwEZ/eCIkn8JxmM6fU6Qs/1hwtjQqfuYxlffuUcgYEm0f5xP4YKaA== - dependencies: - ensure-posix-path "^1.0.1" - -amd-name-resolver@^1.2.0, amd-name-resolver@^1.2.1, amd-name-resolver@^1.3.1: +amd-name-resolver@^1.2.0, amd-name-resolver@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-1.3.1.tgz#ffe71c683c6e7191fc4ae1bb3aaed15abea135d9" integrity sha512-26qTEWqZQ+cxSYygZ4Cf8tsjDBLceJahhtewxtKZA3SRa4PluuqYCuheemDQD+7Mf5B7sr+zhTDWAHDh02a1Dw== @@ -4279,10 +4688,10 @@ amdefine@>=0.0.4: resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5" integrity sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU= -anser@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/anser/-/anser-2.1.0.tgz#a7309c9f29886f19af56cb30c79fc60ea483944e" - integrity sha512-zqC6MjuKg2ASofHsYE4orC7uGZQVbfJT1NiDDAzPtwc8XkWsAOSPAfqGFB/SG/PLybgeZ+LjVXvwfAWAEPXzuQ== +anser@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/anser/-/anser-2.1.1.tgz#8afae28d345424c82de89cc0e4d1348eb0c5af7c" + integrity sha512-nqLm4HxOTpeLOxcmB3QWmV5TcDFhW9y/fyQ+hivtDFcK4OQ+pQ5fzPnXHM1Mfcm0VkLtvVi1TCPr++Qy0Q/3EQ== ansi-align@^3.0.0: version "3.0.0" @@ -4364,14 +4773,14 @@ ansi-styles@^4.0.0, ansi-styles@^4.1.0: dependencies: color-convert "^2.0.1" -ansi-to-html@^0.6.11, ansi-to-html@^0.6.6: +ansi-to-html@^0.6.11: version "0.6.14" resolved "https://registry.yarnpkg.com/ansi-to-html/-/ansi-to-html-0.6.14.tgz#65fe6d08bba5dd9db33f44a20aec331e0010dad8" integrity sha512-7ZslfB1+EnFSDO5Ju+ue5Y6It19DRnZXWv8jrGHgIlPna5Mh4jz7BV5jCbQneXNFurQcKoolaaAjHtgSBfOIuA== dependencies: entities "^1.1.2" -ansi-to-html@^0.6.15: +ansi-to-html@^0.6.15, ansi-to-html@^0.6.6: version "0.6.15" resolved "https://registry.yarnpkg.com/ansi-to-html/-/ansi-to-html-0.6.15.tgz#ac6ad4798a00f6aa045535d7f6a9cb9294eebea7" integrity sha512-28ijx2aHJGdzbs+O5SNQF65r6rrKYnkuwTYm8lZlChuoJ9P1vVzIpWO20sQTqTPDXYp6NFwk326vApTtLVFXpQ== @@ -4424,6 +4833,11 @@ argparse@^1.0.7, argparse@~1.0.2: dependencies: sprintf-js "~1.0.2" +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + arr-diff@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-4.0.0.tgz#d6461074febfec71e7e15235761a329a5dc7c520" @@ -4449,11 +4863,6 @@ array-flatten@1.1.1: resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2" integrity sha1-ml9pkFGx5wczKPKgCJaLZOopVdI= -array-from@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/array-from/-/array-from-2.1.1.tgz#cfe9d8c26628b9dc5aecc62a9f5d8f1f352c1195" - integrity sha1-z+nYwmYoudxa7MYqn12PHzUsEZU= - array-includes@^3.0.3: version "3.1.2" resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.2.tgz#a8db03e0b88c8c6aeddc49cb132f9bcab4ebf9c8" @@ -4529,11 +4938,6 @@ array.prototype.map@^1.0.3: es-array-method-boxes-properly "^1.0.0" is-string "^1.0.5" -arraybuffer.slice@~0.0.7: - version "0.0.7" - resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz#3bbc4275dd584cc1b10809b89d4e8b63a69e7675" - integrity sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog== - arrify@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/arrify/-/arrify-2.0.1.tgz#c9655e9331e0abcd588d2a7cad7e9956f66701fa" @@ -4561,7 +4965,7 @@ asn1@~0.2.3: dependencies: safer-buffer "~2.1.0" -assert-never@^1.1.0: +assert-never@^1.1.0, assert-never@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/assert-never/-/assert-never-1.2.1.tgz#11f0e363bf146205fb08193b5c7b90f4d1cf44fe" integrity sha512-TaTivMB6pYI1kXwrFlEhLeGfOqoDNdTxjCdwRfFFkEA30Eu+k48W34nlok2EYWJfFFzqaEmichdNM7th6M5HNw== @@ -4589,11 +4993,6 @@ assign-symbols@^1.0.0: resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367" integrity sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c= -ast-types@0.10.2: - version "0.10.2" - resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.10.2.tgz#aef76a04fde54634976fc94defaad1a67e2eadb0" - integrity sha512-ufWX953VU1eIuWqxS0nRDMYlGyFH+yxln5CsmIHlpzEt3fdYqUnRtsFt0XAsQot8OaVCwFqxT1RiwvtzYjeYeg== - ast-types@0.13.3: version "0.13.3" resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.13.3.tgz#50da3f28d17bdbc7969a3a2d83a0e4a72ae755a7" @@ -4708,40 +5107,6 @@ axe-core@^4.0.1: resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.1.1.tgz#70a7855888e287f7add66002211a423937063eaf" integrity sha512-5Kgy8Cz6LPC9DJcNb3yjAXTu3XihQgEdnIg50c//zOC/MyLP0Clg+Y8Sh9ZjjnvBrDZU4DgXS9C3T9r4/scGZQ== -babel-code-frame@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b" - integrity sha1-Y/1D99weO7fONZR9uP42mj9Yx0s= - dependencies: - chalk "^1.1.3" - esutils "^2.0.2" - js-tokens "^3.0.2" - -babel-core@^6.10.4, babel-core@^6.26.0, babel-core@^6.26.3: - version "6.26.3" - resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-6.26.3.tgz#b2e2f09e342d0f0c88e2f02e067794125e75c207" - integrity sha512-6jyFLuDmeidKmUEb3NM+/yawG0M2bDZ9Z1qbZP59cyHLz8kYGKYwpJP0UwUKKUiTRNvxfLesJnTedqczP7cTDA== - dependencies: - babel-code-frame "^6.26.0" - babel-generator "^6.26.0" - babel-helpers "^6.24.1" - babel-messages "^6.23.0" - babel-register "^6.26.0" - babel-runtime "^6.26.0" - babel-template "^6.26.0" - babel-traverse "^6.26.0" - babel-types "^6.26.0" - babylon "^6.18.0" - convert-source-map "^1.5.1" - debug "^2.6.9" - json5 "^0.5.1" - lodash "^4.17.4" - minimatch "^3.0.4" - path-is-absolute "^1.0.1" - private "^0.1.8" - slash "^1.0.0" - source-map "^0.5.7" - babel-eslint@^10.1.0: version "10.1.0" resolved "https://registry.yarnpkg.com/babel-eslint/-/babel-eslint-10.1.0.tgz#6968e568a910b78fb3779cdd8b6ac2f479943232" @@ -4754,132 +5119,10 @@ babel-eslint@^10.1.0: eslint-visitor-keys "^1.0.0" resolve "^1.12.0" -babel-generator@^6.26.0: - version "6.26.1" - resolved "https://registry.yarnpkg.com/babel-generator/-/babel-generator-6.26.1.tgz#1844408d3b8f0d35a404ea7ac180f087a601bd90" - integrity sha512-HyfwY6ApZj7BYTcJURpM5tznulaBvyio7/0d4zFOeMPUmfxkCjHocCuoLa2SAGzBI8AREcH3eP3758F672DppA== - dependencies: - babel-messages "^6.23.0" - babel-runtime "^6.26.0" - babel-types "^6.26.0" - detect-indent "^4.0.0" - jsesc "^1.3.0" - lodash "^4.17.4" - source-map "^0.5.7" - trim-right "^1.0.1" - -babel-helper-builder-binary-assignment-operator-visitor@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-builder-binary-assignment-operator-visitor/-/babel-helper-builder-binary-assignment-operator-visitor-6.24.1.tgz#cce4517ada356f4220bcae8a02c2b346f9a56664" - integrity sha1-zORReto1b0IgvK6KAsKzRvmlZmQ= - dependencies: - babel-helper-explode-assignable-expression "^6.24.1" - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-helper-call-delegate@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-call-delegate/-/babel-helper-call-delegate-6.24.1.tgz#ece6aacddc76e41c3461f88bfc575bd0daa2df8d" - integrity sha1-7Oaqzdx25Bw0YfiL/Fdb0Nqi340= - dependencies: - babel-helper-hoist-variables "^6.24.1" - babel-runtime "^6.22.0" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-helper-define-map@^6.24.1: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-helper-define-map/-/babel-helper-define-map-6.26.0.tgz#a5f56dab41a25f97ecb498c7ebaca9819f95be5f" - integrity sha1-pfVtq0GiX5fstJjH66ypgZ+Vvl8= - dependencies: - babel-helper-function-name "^6.24.1" - babel-runtime "^6.26.0" - babel-types "^6.26.0" - lodash "^4.17.4" - -babel-helper-explode-assignable-expression@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-explode-assignable-expression/-/babel-helper-explode-assignable-expression-6.24.1.tgz#f25b82cf7dc10433c55f70592d5746400ac22caa" - integrity sha1-8luCz33BBDPFX3BZLVdGQArCLKo= - dependencies: - babel-runtime "^6.22.0" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-helper-function-name@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-function-name/-/babel-helper-function-name-6.24.1.tgz#d3475b8c03ed98242a25b48351ab18399d3580a9" - integrity sha1-00dbjAPtmCQqJbSDUasYOZ01gKk= - dependencies: - babel-helper-get-function-arity "^6.24.1" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-helper-get-function-arity@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-get-function-arity/-/babel-helper-get-function-arity-6.24.1.tgz#8f7782aa93407c41d3aa50908f89b031b1b6853d" - integrity sha1-j3eCqpNAfEHTqlCQj4mwMbG2hT0= - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-helper-hoist-variables@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-hoist-variables/-/babel-helper-hoist-variables-6.24.1.tgz#1ecb27689c9d25513eadbc9914a73f5408be7a76" - integrity sha1-HssnaJydJVE+rbyZFKc/VAi+enY= - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-helper-optimise-call-expression@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-optimise-call-expression/-/babel-helper-optimise-call-expression-6.24.1.tgz#f7a13427ba9f73f8f4fa993c54a97882d1244257" - integrity sha1-96E0J7qfc/j0+pk8VKl4gtEkQlc= - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-helper-regex@^6.24.1: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-helper-regex/-/babel-helper-regex-6.26.0.tgz#325c59f902f82f24b74faceed0363954f6495e72" - integrity sha1-MlxZ+QL4LyS3T6zu0DY5VPZJXnI= - dependencies: - babel-runtime "^6.26.0" - babel-types "^6.26.0" - lodash "^4.17.4" - -babel-helper-remap-async-to-generator@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-remap-async-to-generator/-/babel-helper-remap-async-to-generator-6.24.1.tgz#5ec581827ad723fecdd381f1c928390676e4551b" - integrity sha1-XsWBgnrXI/7N04HxySg5BnbkVRs= - dependencies: - babel-helper-function-name "^6.24.1" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-helper-replace-supers@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helper-replace-supers/-/babel-helper-replace-supers-6.24.1.tgz#bf6dbfe43938d17369a213ca8a8bf74b6a90ab1a" - integrity sha1-v22/5Dk40XNpohPKiov3S2qQqxo= - dependencies: - babel-helper-optimise-call-expression "^6.24.1" - babel-messages "^6.23.0" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-helpers@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-helpers/-/babel-helpers-6.24.1.tgz#3471de9caec388e5c850e597e58a26ddf37602b2" - integrity sha1-NHHenK7DiOXIUOWX5Yom3fN2ArI= - dependencies: - babel-runtime "^6.22.0" - babel-template "^6.24.1" +babel-import-util@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/babel-import-util/-/babel-import-util-0.2.0.tgz#b468bb679919601a3570f9e317536c54f2862e23" + integrity sha512-CtWYYHU/MgK88rxMrLfkD356dApswtR/kWZ/c6JifG1m10e7tBBrs/366dFzWMAoqYmG5/JSh+94tUSpIwh+ag== babel-import-util@^1.1.0: version "1.1.0" @@ -4906,13 +5149,6 @@ babel-loader@^8.2.2: make-dir "^3.1.0" schema-utils "^2.6.5" -babel-messages@^6.23.0: - version "6.23.0" - resolved "https://registry.yarnpkg.com/babel-messages/-/babel-messages-6.23.0.tgz#f3cdf4703858035b2a2951c6ec5edf6c62f2630e" - integrity sha1-8830cDhYA1sqKVHG7F7fbGLyYw4= - dependencies: - babel-runtime "^6.22.0" - babel-plugin-apply-mdx-type-prop@1.6.22: version "1.6.22" resolved "https://registry.yarnpkg.com/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz#d216e8fd0de91de3f1478ef3231e05446bc8705b" @@ -4921,21 +5157,14 @@ babel-plugin-apply-mdx-type-prop@1.6.22: "@babel/helper-plugin-utils" "7.10.4" "@mdx-js/util" "1.6.22" -babel-plugin-check-es2015-constants@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-check-es2015-constants/-/babel-plugin-check-es2015-constants-6.22.0.tgz#35157b101426fd2ffd3da3f75c7d1e91835bbf8a" - integrity sha1-NRV7EBQm/S/9PaP3XH0ekYNbv4o= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-debug-macros@^0.2.0, babel-plugin-debug-macros@^0.2.0-beta.6: +babel-plugin-debug-macros@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/babel-plugin-debug-macros/-/babel-plugin-debug-macros-0.2.0.tgz#0120ac20ce06ccc57bf493b667cf24b85c28da7a" integrity sha512-Wpmw4TbhR3Eq2t3W51eBAQSdKlr+uAyF0GI4GtPfMCD12Y4cIdpKC9l0RjNTH/P9isFypSqqewMPm7//fnZlNA== dependencies: semver "^5.3.0" -babel-plugin-debug-macros@^0.3.0, babel-plugin-debug-macros@^0.3.3, babel-plugin-debug-macros@^0.3.4: +babel-plugin-debug-macros@^0.3.3, babel-plugin-debug-macros@^0.3.4: version "0.3.4" resolved "https://registry.yarnpkg.com/babel-plugin-debug-macros/-/babel-plugin-debug-macros-0.3.4.tgz#22961d0cb851a80654cece807a8b4b73d85c6075" integrity sha512-wfel/vb3pXfwIDZUrkoDrn5FHmlWI96PCJ3UCDv2a86poJ3EQrnArNW5KfHSVJ9IOgxHbo748cQt7sDU+0KCEw== @@ -4956,20 +5185,6 @@ babel-plugin-ember-data-packages-polyfill@^0.1.2: dependencies: "@ember-data/rfc395-data" "^0.0.4" -babel-plugin-ember-modules-api-polyfill@^2.12.0, babel-plugin-ember-modules-api-polyfill@^2.6.0: - version "2.13.4" - resolved "https://registry.yarnpkg.com/babel-plugin-ember-modules-api-polyfill/-/babel-plugin-ember-modules-api-polyfill-2.13.4.tgz#cf62bc9bfd808c48d810d5194f4329e9453bd603" - integrity sha512-uxQPkEQAzCYdwhZk16O9m1R4xtCRNy4oEUTBrccOPfzlIahRZJic/JeP/ZEL0BC6Mfq6r55eOg6gMF/zdFoCvA== - dependencies: - ember-rfc176-data "^0.3.13" - -babel-plugin-ember-modules-api-polyfill@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/babel-plugin-ember-modules-api-polyfill/-/babel-plugin-ember-modules-api-polyfill-3.2.1.tgz#715252ffde309da36fb32cd6a9bad5c6b61edd33" - integrity sha512-7k4gM0VLAMjoWVxLBDqavH/Dn4mBfzqTuQmtGmZgsdQ4SYVEJ7dewUVeqWBVn5v3QspW4VSoeXh4rHPPlp/rPw== - dependencies: - ember-rfc176-data "^0.3.16" - babel-plugin-ember-modules-api-polyfill@^3.4.0, babel-plugin-ember-modules-api-polyfill@^3.5.0: version "3.5.0" resolved "https://registry.yarnpkg.com/babel-plugin-ember-modules-api-polyfill/-/babel-plugin-ember-modules-api-polyfill-3.5.0.tgz#27b6087fac75661f779f32e60f94b14d0e9f6965" @@ -4977,6 +5192,16 @@ babel-plugin-ember-modules-api-polyfill@^3.4.0, babel-plugin-ember-modules-api-p dependencies: ember-rfc176-data "^0.3.17" +babel-plugin-ember-template-compilation@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/babel-plugin-ember-template-compilation/-/babel-plugin-ember-template-compilation-1.0.1.tgz#64baf434ff1b751c6292936f8b9eb75a2f8149dc" + integrity sha512-V/kY6CDyUNrl5Kx6UPKUPhzSKNfdrxNii+S5zK4dgJvVyoxFv7Ykg06Ct/yskY0LkA4wUPdYN7JOBtYJwHk2sg== + dependencies: + babel-import-util "^0.2.0" + line-column "^1.0.2" + magic-string "^0.25.7" + string.prototype.matchall "^4.0.5" + babel-plugin-emotion@^10.0.27: version "10.0.33" resolved "https://registry.yarnpkg.com/babel-plugin-emotion/-/babel-plugin-emotion-10.0.33.tgz#ce1155dcd1783bbb9286051efee53f4e2be63e03" @@ -5000,14 +5225,6 @@ babel-plugin-extract-import-names@1.6.22: dependencies: "@babel/helper-plugin-utils" "7.10.4" -babel-plugin-filter-imports@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-3.0.0.tgz#a849683837ad29960da17492fb32789ab6b09a11" - integrity sha512-p/chjzVTgCxUqyLM0q/pfWVZS7IJTwGQMwNg0LOvuQpKiTftQgZDtkGB8XvETnUw19rRcL7bJCTopSwibTN2tA== - dependencies: - "@babel/types" "^7.4.0" - lodash "^4.17.11" - babel-plugin-filter-imports@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/babel-plugin-filter-imports/-/babel-plugin-filter-imports-4.0.0.tgz#068f8da15236a96a9602c36dc6f4a6eeca70a4f4" @@ -5016,11 +5233,6 @@ babel-plugin-filter-imports@^4.0.0: "@babel/types" "^7.7.2" lodash "^4.17.15" -babel-plugin-htmlbars-inline-precompile@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-1.0.0.tgz#a9d2f6eaad8a3f3d361602de593a8cbef8179c22" - integrity sha512-4jvKEHR1bAX03hBDZ94IXsYCj3bwk9vYsn6ux6JZNL2U5pvzCWjqyrGahfsGNrhERyxw8IqcirOi9Q6WCo3dkQ== - babel-plugin-htmlbars-inline-precompile@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-3.2.0.tgz#c4882ea875d0f5683f0d91c1f72e29a4f14b5606" @@ -5038,6 +5250,17 @@ babel-plugin-htmlbars-inline-precompile@^5.0.0: dependencies: babel-plugin-ember-modules-api-polyfill "^3.4.0" +babel-plugin-htmlbars-inline-precompile@^5.2.1, babel-plugin-htmlbars-inline-precompile@^5.3.0: + version "5.3.1" + resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-5.3.1.tgz#5ba272e2e4b6221522401f5f1d98a73b1de38787" + integrity sha512-QWjjFgSKtSRIcsBhJmEwS2laIdrA6na8HAlc/pEAhjHgQsah/gMiBFRZvbQTy//hWxR4BMwV7/Mya7q5H8uHeA== + dependencies: + babel-plugin-ember-modules-api-polyfill "^3.5.0" + line-column "^1.0.2" + magic-string "^0.25.7" + parse-static-imports "^1.1.0" + string.prototype.matchall "^4.0.5" + babel-plugin-macros@^2.0.0, babel-plugin-macros@^2.8.0: version "2.8.0" resolved "https://registry.yarnpkg.com/babel-plugin-macros/-/babel-plugin-macros-2.8.0.tgz#0f958a7cc6556b1e65344465d99111a1e5e10138" @@ -5056,7 +5279,7 @@ babel-plugin-macros@^3.0.1: cosmiconfig "^7.0.0" resolve "^1.19.0" -babel-plugin-module-resolver@^3.1.1, babel-plugin-module-resolver@^3.2.0: +babel-plugin-module-resolver@^3.2.0: version "3.2.0" resolved "https://registry.yarnpkg.com/babel-plugin-module-resolver/-/babel-plugin-module-resolver-3.2.0.tgz#ddfa5e301e3b9aa12d852a9979f18b37881ff5a7" integrity sha512-tjR0GvSndzPew/Iayf4uICWZqjBwnlMWjSx6brryfQ81F9rxBVqwDJtFCV8oOs0+vJeefK9TmdZtkIFdFe1UnA== @@ -5067,7 +5290,7 @@ babel-plugin-module-resolver@^3.1.1, babel-plugin-module-resolver@^3.2.0: reselect "^3.0.1" resolve "^1.4.0" -babel-plugin-module-resolver@^4.0.0: +babel-plugin-module-resolver@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/babel-plugin-module-resolver/-/babel-plugin-module-resolver-4.1.0.tgz#22a4f32f7441727ec1fbf4967b863e1e3e9f33e2" integrity sha512-MlX10UDheRr3lb3P0WcaIdtCSRlxdQsB1sBqL7W0raF070bGl1HQQq5K3T2vf2XAYie+ww+5AKC/WrkjRO2knA== @@ -5096,6 +5319,15 @@ babel-plugin-polyfill-corejs2@^0.2.2: "@babel/helper-define-polyfill-provider" "^0.2.2" semver "^6.1.1" +babel-plugin-polyfill-corejs2@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.3.1.tgz#440f1b70ccfaabc6b676d196239b138f8a2cfba5" + integrity sha512-v7/T6EQcNfVLfcN2X8Lulb7DjprieyLWJK/zOWH5DUYcAgex9sP3h25Q+DLsX9TloXe3y1O8l2q2Jv9q8UVB9w== + dependencies: + "@babel/compat-data" "^7.13.11" + "@babel/helper-define-polyfill-provider" "^0.3.1" + semver "^6.1.1" + babel-plugin-polyfill-corejs3@^0.1.0, babel-plugin-polyfill-corejs3@^0.1.3: version "0.1.7" resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.1.7.tgz#80449d9d6f2274912e05d9e182b54816904befd0" @@ -5112,6 +5344,14 @@ babel-plugin-polyfill-corejs3@^0.2.2: "@babel/helper-define-polyfill-provider" "^0.2.2" core-js-compat "^3.16.2" +babel-plugin-polyfill-corejs3@^0.5.0: + version "0.5.2" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.5.2.tgz#aabe4b2fa04a6e038b688c5e55d44e78cd3a5f72" + integrity sha512-G3uJih0XWiID451fpeFaYGVuxHEjzKTHtc9uGFEjR6hHrvNzeS/PX+LLLcetJcytsB5m4j+K3o/EpXJNb/5IEQ== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.3.1" + core-js-compat "^3.21.0" + babel-plugin-polyfill-regenerator@^0.1.2: version "0.1.6" resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.1.6.tgz#0fe06a026fe0faa628ccc8ba3302da0a6ce02f3f" @@ -5126,366 +5366,28 @@ babel-plugin-polyfill-regenerator@^0.2.2: dependencies: "@babel/helper-define-polyfill-provider" "^0.2.2" -babel-plugin-syntax-async-functions@^6.8.0: - version "6.13.0" - resolved "https://registry.yarnpkg.com/babel-plugin-syntax-async-functions/-/babel-plugin-syntax-async-functions-6.13.0.tgz#cad9cad1191b5ad634bf30ae0872391e0647be95" - integrity sha1-ytnK0RkbWtY0vzCuCHI5HgZHvpU= +babel-plugin-polyfill-regenerator@^0.3.0: + version "0.3.1" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.3.1.tgz#2c0678ea47c75c8cc2fbb1852278d8fb68233990" + integrity sha512-Y2B06tvgHYt1x0yz17jGkGeeMr5FeKUu+ASJ+N6nB5lQ8Dapfg42i0OVrf8PNGJ3zKL4A23snMi1IRwrqqND7A== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.3.1" babel-plugin-syntax-dynamic-import@^6.18.0: version "6.18.0" resolved "https://registry.yarnpkg.com/babel-plugin-syntax-dynamic-import/-/babel-plugin-syntax-dynamic-import-6.18.0.tgz#8d6a26229c83745a9982a441051572caa179b1da" integrity sha1-jWomIpyDdFqZgqRBBRVyyqF5sdo= -babel-plugin-syntax-exponentiation-operator@^6.8.0: - version "6.13.0" - resolved "https://registry.yarnpkg.com/babel-plugin-syntax-exponentiation-operator/-/babel-plugin-syntax-exponentiation-operator-6.13.0.tgz#9ee7e8337290da95288201a6a57f4170317830de" - integrity sha1-nufoM3KQ2pUoggGmpX9BcDF4MN4= - babel-plugin-syntax-jsx@^6.18.0: version "6.18.0" resolved "https://registry.yarnpkg.com/babel-plugin-syntax-jsx/-/babel-plugin-syntax-jsx-6.18.0.tgz#0af32a9a6e13ca7a3fd5069e62d7b0f58d0d8946" integrity sha1-CvMqmm4Tyno/1QaeYtew9Y0NiUY= -babel-plugin-syntax-trailing-function-commas@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-syntax-trailing-function-commas/-/babel-plugin-syntax-trailing-function-commas-6.22.0.tgz#ba0360937f8d06e40180a43fe0d5616fff532cf3" - integrity sha1-ugNgk3+NBuQBgKQ/4NVhb/9TLPM= - -babel-plugin-transform-async-to-generator@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-async-to-generator/-/babel-plugin-transform-async-to-generator-6.24.1.tgz#6536e378aff6cb1d5517ac0e40eb3e9fc8d08761" - integrity sha1-ZTbjeK/2yx1VF6wOQOs+n8jQh2E= - dependencies: - babel-helper-remap-async-to-generator "^6.24.1" - babel-plugin-syntax-async-functions "^6.8.0" - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-arrow-functions@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-arrow-functions/-/babel-plugin-transform-es2015-arrow-functions-6.22.0.tgz#452692cb711d5f79dc7f85e440ce41b9f244d221" - integrity sha1-RSaSy3EdX3ncf4XkQM5BufJE0iE= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-block-scoped-functions@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoped-functions/-/babel-plugin-transform-es2015-block-scoped-functions-6.22.0.tgz#bbc51b49f964d70cb8d8e0b94e820246ce3a6141" - integrity sha1-u8UbSflk1wy42OC5ToICRs46YUE= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-block-scoping@^6.23.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-block-scoping/-/babel-plugin-transform-es2015-block-scoping-6.26.0.tgz#d70f5299c1308d05c12f463813b0a09e73b1895f" - integrity sha1-1w9SmcEwjQXBL0Y4E7CgnnOxiV8= - dependencies: - babel-runtime "^6.26.0" - babel-template "^6.26.0" - babel-traverse "^6.26.0" - babel-types "^6.26.0" - lodash "^4.17.4" - -babel-plugin-transform-es2015-classes@^6.23.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-classes/-/babel-plugin-transform-es2015-classes-6.24.1.tgz#5a4c58a50c9c9461e564b4b2a3bfabc97a2584db" - integrity sha1-WkxYpQyclGHlZLSyo7+ryXolhNs= - dependencies: - babel-helper-define-map "^6.24.1" - babel-helper-function-name "^6.24.1" - babel-helper-optimise-call-expression "^6.24.1" - babel-helper-replace-supers "^6.24.1" - babel-messages "^6.23.0" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-plugin-transform-es2015-computed-properties@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-computed-properties/-/babel-plugin-transform-es2015-computed-properties-6.24.1.tgz#6fe2a8d16895d5634f4cd999b6d3480a308159b3" - integrity sha1-b+Ko0WiV1WNPTNmZttNICjCBWbM= - dependencies: - babel-runtime "^6.22.0" - babel-template "^6.24.1" - -babel-plugin-transform-es2015-destructuring@^6.23.0: - version "6.23.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-destructuring/-/babel-plugin-transform-es2015-destructuring-6.23.0.tgz#997bb1f1ab967f682d2b0876fe358d60e765c56d" - integrity sha1-mXux8auWf2gtKwh2/jWNYOdlxW0= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-duplicate-keys@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-duplicate-keys/-/babel-plugin-transform-es2015-duplicate-keys-6.24.1.tgz#73eb3d310ca969e3ef9ec91c53741a6f1576423e" - integrity sha1-c+s9MQypaePvnskcU3QabxV2Qj4= - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-plugin-transform-es2015-for-of@^6.23.0: - version "6.23.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-for-of/-/babel-plugin-transform-es2015-for-of-6.23.0.tgz#f47c95b2b613df1d3ecc2fdb7573623c75248691" - integrity sha1-9HyVsrYT3x0+zC/bdXNiPHUkhpE= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-function-name@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-function-name/-/babel-plugin-transform-es2015-function-name-6.24.1.tgz#834c89853bc36b1af0f3a4c5dbaa94fd8eacaa8b" - integrity sha1-g0yJhTvDaxrw86TF26qU/Y6sqos= - dependencies: - babel-helper-function-name "^6.24.1" - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-plugin-transform-es2015-literals@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-literals/-/babel-plugin-transform-es2015-literals-6.22.0.tgz#4f54a02d6cd66cf915280019a31d31925377ca2e" - integrity sha1-T1SgLWzWbPkVKAAZox0xklN3yi4= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-modules-amd@^6.22.0, babel-plugin-transform-es2015-modules-amd@^6.24.0, babel-plugin-transform-es2015-modules-amd@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-amd/-/babel-plugin-transform-es2015-modules-amd-6.24.1.tgz#3b3e54017239842d6d19c3011c4bd2f00a00d154" - integrity sha1-Oz5UAXI5hC1tGcMBHEvS8AoA0VQ= - dependencies: - babel-plugin-transform-es2015-modules-commonjs "^6.24.1" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - -babel-plugin-transform-es2015-modules-commonjs@^6.23.0, babel-plugin-transform-es2015-modules-commonjs@^6.24.1: - version "6.26.2" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-commonjs/-/babel-plugin-transform-es2015-modules-commonjs-6.26.2.tgz#58a793863a9e7ca870bdc5a881117ffac27db6f3" - integrity sha512-CV9ROOHEdrjcwhIaJNBGMBCodN+1cfkwtM1SbUHmvyy35KGT7fohbpOxkE2uLz1o6odKK2Ck/tz47z+VqQfi9Q== - dependencies: - babel-plugin-transform-strict-mode "^6.24.1" - babel-runtime "^6.26.0" - babel-template "^6.26.0" - babel-types "^6.26.0" - -babel-plugin-transform-es2015-modules-systemjs@^6.23.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-systemjs/-/babel-plugin-transform-es2015-modules-systemjs-6.24.1.tgz#ff89a142b9119a906195f5f106ecf305d9407d23" - integrity sha1-/4mhQrkRmpBhlfXxBuzzBdlAfSM= - dependencies: - babel-helper-hoist-variables "^6.24.1" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - -babel-plugin-transform-es2015-modules-umd@^6.23.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-modules-umd/-/babel-plugin-transform-es2015-modules-umd-6.24.1.tgz#ac997e6285cd18ed6176adb607d602344ad38468" - integrity sha1-rJl+YoXNGO1hdq22B9YCNErThGg= - dependencies: - babel-plugin-transform-es2015-modules-amd "^6.24.1" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - -babel-plugin-transform-es2015-object-super@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-object-super/-/babel-plugin-transform-es2015-object-super-6.24.1.tgz#24cef69ae21cb83a7f8603dad021f572eb278f8d" - integrity sha1-JM72muIcuDp/hgPa0CH1cusnj40= - dependencies: - babel-helper-replace-supers "^6.24.1" - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-parameters@^6.23.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-parameters/-/babel-plugin-transform-es2015-parameters-6.24.1.tgz#57ac351ab49caf14a97cd13b09f66fdf0a625f2b" - integrity sha1-V6w1GrScrxSpfNE7CfZv3wpiXys= - dependencies: - babel-helper-call-delegate "^6.24.1" - babel-helper-get-function-arity "^6.24.1" - babel-runtime "^6.22.0" - babel-template "^6.24.1" - babel-traverse "^6.24.1" - babel-types "^6.24.1" - -babel-plugin-transform-es2015-shorthand-properties@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-shorthand-properties/-/babel-plugin-transform-es2015-shorthand-properties-6.24.1.tgz#24f875d6721c87661bbd99a4622e51f14de38aa0" - integrity sha1-JPh11nIch2YbvZmkYi5R8U3jiqA= - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-plugin-transform-es2015-spread@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-spread/-/babel-plugin-transform-es2015-spread-6.22.0.tgz#d6d68a99f89aedc4536c81a542e8dd9f1746f8d1" - integrity sha1-1taKmfia7cRTbIGlQujdnxdG+NE= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-sticky-regex@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-sticky-regex/-/babel-plugin-transform-es2015-sticky-regex-6.24.1.tgz#00c1cdb1aca71112cdf0cf6126c2ed6b457ccdbc" - integrity sha1-AMHNsaynERLN8M9hJsLta0V8zbw= - dependencies: - babel-helper-regex "^6.24.1" - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-plugin-transform-es2015-template-literals@^6.22.0: - version "6.22.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-template-literals/-/babel-plugin-transform-es2015-template-literals-6.22.0.tgz#a84b3450f7e9f8f1f6839d6d687da84bb1236d8d" - integrity sha1-qEs0UPfp+PH2g51taH2oS7EjbY0= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-typeof-symbol@^6.23.0: - version "6.23.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-typeof-symbol/-/babel-plugin-transform-es2015-typeof-symbol-6.23.0.tgz#dec09f1cddff94b52ac73d505c84df59dcceb372" - integrity sha1-3sCfHN3/lLUqxz1QXITfWdzOs3I= - dependencies: - babel-runtime "^6.22.0" - -babel-plugin-transform-es2015-unicode-regex@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-es2015-unicode-regex/-/babel-plugin-transform-es2015-unicode-regex-6.24.1.tgz#d38b12f42ea7323f729387f18a7c5ae1faeb35e9" - integrity sha1-04sS9C6nMj9yk4fxinxa4frrNek= - dependencies: - babel-helper-regex "^6.24.1" - babel-runtime "^6.22.0" - regexpu-core "^2.0.0" - -babel-plugin-transform-exponentiation-operator@^6.22.0: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-exponentiation-operator/-/babel-plugin-transform-exponentiation-operator-6.24.1.tgz#2ab0c9c7f3098fa48907772bb813fe41e8de3a0e" - integrity sha1-KrDJx/MJj6SJB3cruBP+QejeOg4= - dependencies: - babel-helper-builder-binary-assignment-operator-visitor "^6.24.1" - babel-plugin-syntax-exponentiation-operator "^6.8.0" - babel-runtime "^6.22.0" - -babel-plugin-transform-regenerator@^6.22.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-regenerator/-/babel-plugin-transform-regenerator-6.26.0.tgz#e0703696fbde27f0a3efcacf8b4dca2f7b3a8f2f" - integrity sha1-4HA2lvveJ/Cj78rPi03KL3s6jy8= - dependencies: - regenerator-transform "^0.10.0" - -babel-plugin-transform-strict-mode@^6.24.1: - version "6.24.1" - resolved "https://registry.yarnpkg.com/babel-plugin-transform-strict-mode/-/babel-plugin-transform-strict-mode-6.24.1.tgz#d5faf7aa578a65bbe591cf5edae04a0c67020758" - integrity sha1-1fr3qleKZbvlkc9e2uBKDGcCB1g= - dependencies: - babel-runtime "^6.22.0" - babel-types "^6.24.1" - -babel-polyfill@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-polyfill/-/babel-polyfill-6.26.0.tgz#379937abc67d7895970adc621f284cd966cf2153" - integrity sha1-N5k3q8Z9eJWXCtxiHyhM2WbPIVM= - dependencies: - babel-runtime "^6.26.0" - core-js "^2.5.0" - regenerator-runtime "^0.10.5" - -babel-preset-env@^1.7.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/babel-preset-env/-/babel-preset-env-1.7.0.tgz#dea79fa4ebeb883cd35dab07e260c1c9c04df77a" - integrity sha512-9OR2afuKDneX2/q2EurSftUYM0xGu4O2D9adAhVfADDhrYDaxXV0rBbevVYoY9n6nyX1PmQW/0jtpJvUNr9CHg== - dependencies: - babel-plugin-check-es2015-constants "^6.22.0" - babel-plugin-syntax-trailing-function-commas "^6.22.0" - babel-plugin-transform-async-to-generator "^6.22.0" - babel-plugin-transform-es2015-arrow-functions "^6.22.0" - babel-plugin-transform-es2015-block-scoped-functions "^6.22.0" - babel-plugin-transform-es2015-block-scoping "^6.23.0" - babel-plugin-transform-es2015-classes "^6.23.0" - babel-plugin-transform-es2015-computed-properties "^6.22.0" - babel-plugin-transform-es2015-destructuring "^6.23.0" - babel-plugin-transform-es2015-duplicate-keys "^6.22.0" - babel-plugin-transform-es2015-for-of "^6.23.0" - babel-plugin-transform-es2015-function-name "^6.22.0" - babel-plugin-transform-es2015-literals "^6.22.0" - babel-plugin-transform-es2015-modules-amd "^6.22.0" - babel-plugin-transform-es2015-modules-commonjs "^6.23.0" - babel-plugin-transform-es2015-modules-systemjs "^6.23.0" - babel-plugin-transform-es2015-modules-umd "^6.23.0" - babel-plugin-transform-es2015-object-super "^6.22.0" - babel-plugin-transform-es2015-parameters "^6.23.0" - babel-plugin-transform-es2015-shorthand-properties "^6.22.0" - babel-plugin-transform-es2015-spread "^6.22.0" - babel-plugin-transform-es2015-sticky-regex "^6.22.0" - babel-plugin-transform-es2015-template-literals "^6.22.0" - babel-plugin-transform-es2015-typeof-symbol "^6.23.0" - babel-plugin-transform-es2015-unicode-regex "^6.22.0" - babel-plugin-transform-exponentiation-operator "^6.22.0" - babel-plugin-transform-regenerator "^6.22.0" - browserslist "^3.2.6" - invariant "^2.2.2" - semver "^5.3.0" - -babel-register@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-register/-/babel-register-6.26.0.tgz#6ed021173e2fcb486d7acb45c6009a856f647071" - integrity sha1-btAhFz4vy0htestFxgCahW9kcHE= - dependencies: - babel-core "^6.26.0" - babel-runtime "^6.26.0" - core-js "^2.5.0" - home-or-tmp "^2.0.0" - lodash "^4.17.4" - mkdirp "^0.5.1" - source-map-support "^0.4.15" - -babel-runtime@^6.18.0, babel-runtime@^6.22.0, babel-runtime@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-runtime/-/babel-runtime-6.26.0.tgz#965c7058668e82b55d7bfe04ff2337bc8b5647fe" - integrity sha1-llxwWGaOgrVde/4E/yM3vItWR/4= - dependencies: - core-js "^2.4.0" - regenerator-runtime "^0.11.0" - -babel-template@^6.24.1, babel-template@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-template/-/babel-template-6.26.0.tgz#de03e2d16396b069f46dd9fff8521fb1a0e35e02" - integrity sha1-3gPi0WOWsGn0bdn/+FIfsaDjXgI= - dependencies: - babel-runtime "^6.26.0" - babel-traverse "^6.26.0" - babel-types "^6.26.0" - babylon "^6.18.0" - lodash "^4.17.4" - -babel-traverse@^6.24.1, babel-traverse@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-traverse/-/babel-traverse-6.26.0.tgz#46a9cbd7edcc62c8e5c064e2d2d8d0f4035766ee" - integrity sha1-RqnL1+3MYsjlwGTi0tjQ9ANXZu4= - dependencies: - babel-code-frame "^6.26.0" - babel-messages "^6.23.0" - babel-runtime "^6.26.0" - babel-types "^6.26.0" - babylon "^6.18.0" - debug "^2.6.8" - globals "^9.18.0" - invariant "^2.2.2" - lodash "^4.17.4" - -babel-types@^6.19.0, babel-types@^6.24.1, babel-types@^6.26.0: - version "6.26.0" - resolved "https://registry.yarnpkg.com/babel-types/-/babel-types-6.26.0.tgz#a3b073f94ab49eb6fa55cd65227a334380632497" - integrity sha1-o7Bz+Uq0nrb6Vc1lInozQ4BjJJc= - dependencies: - babel-runtime "^6.26.0" - esutils "^2.0.2" - lodash "^4.17.4" - to-fast-properties "^1.0.3" - babel6-plugin-strip-class-callcheck@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/babel6-plugin-strip-class-callcheck/-/babel6-plugin-strip-class-callcheck-6.0.0.tgz#de841c1abebbd39f78de0affb2c9a52ee228fddf" integrity sha1-3oQcGr6705943gr/ssmlLuIo/d8= -babylon@^6.18.0: - version "6.18.0" - resolved "https://registry.yarnpkg.com/babylon/-/babylon-6.18.0.tgz#af2f3b88fa6f5c1e4c634d1a0f8eac4f55b395e3" - integrity sha512-q/UEjfGJ2Cm3oKV71DJz9d25TPnq5rhBVL2Q4fA5wcC3jcrdn7+SssEybFIxwAvvP+YCsCYNKughoF33GxgycQ== - backbone@^1.1.2: version "1.4.0" resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.4.0.tgz#54db4de9df7c3811c3f032f34749a4cd27f3bd12" @@ -5493,11 +5395,6 @@ backbone@^1.1.2: dependencies: underscore ">=1.8.3" -backo2@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947" - integrity sha1-MasayLEpNjRj41s+u2n038+6eUc= - bail@^1.0.0: version "1.0.5" resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.5.tgz#b6fa133404a392cbc1f8c4bf63f5953351e7a776" @@ -5508,11 +5405,6 @@ balanced-match@^1.0.0: resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== -base64-arraybuffer@0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.4.tgz#9818c79e059b1355f97e0428a017c838e90ba812" - integrity sha1-mBjHngWbE1X5fgQooBfIOOkLqBI= - base64-arraybuffer@~1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-1.0.1.tgz#87bd13525626db4a9838e00a508c2b73efcf348c" @@ -5594,10 +5486,10 @@ bindings@^1.5.0: dependencies: file-uri-to-path "1.0.0" -bl@^4.0.3: - version "4.0.3" - resolved "https://registry.yarnpkg.com/bl/-/bl-4.0.3.tgz#12d6287adc29080e22a705e5764b2a9522cdc489" - integrity sha512-fs4G6/Hu4/EE+F75J8DuN/0IpQqNjAdC7aEQv7Qt8MHGUH7Ckv2MwTEEeN9QehD0pfIDkMI1bkHYkKy7xHyKIg== +bl@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" + integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== dependencies: buffer "^5.5.0" inherits "^2.0.4" @@ -5608,11 +5500,6 @@ blank-object@^1.0.1: resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9" integrity sha1-+ZB5P76ajI3QE/syGUIL7IHV9Lk= -blob@0.0.5: - version "0.0.5" - resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.5.tgz#d680eeef25f8cd91ad533f5b01eed48e64caf683" - integrity sha512-gaqbzQPqOoamawKg0LGVd7SzLgXS+JH61oWprSLH+P+abTczqJbhTR8CmJ2u9/bUYNmHTGJx/UEmn6doAvvuig== - bluebird@^3.1.1, bluebird@^3.3.5, bluebird@^3.4.6, bluebird@^3.5.5: version "3.7.2" resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" @@ -5755,23 +5642,7 @@ broccoli-asset-rewrite@^2.0.0: dependencies: broccoli-filter "^1.2.3" -broccoli-babel-transpiler@^6.5.0: - version "6.5.1" - resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-6.5.1.tgz#a4afc8d3b59b441518eb9a07bd44149476e30738" - integrity sha512-w6GcnkxvHcNCte5FcLGEG1hUdQvlfvSN/6PtGWU/otg69Ugk8rUk51h41R0Ugoc+TNxyeFG1opRt2RlA87XzNw== - dependencies: - babel-core "^6.26.0" - broccoli-funnel "^2.0.1" - broccoli-merge-trees "^2.0.0" - broccoli-persistent-filter "^1.4.3" - clone "^2.0.0" - hash-for-dep "^1.2.3" - heimdalljs-logger "^0.1.7" - json-stable-stringify "^1.0.0" - rsvp "^4.8.2" - workerpool "^2.3.0" - -broccoli-babel-transpiler@^7.3.0, broccoli-babel-transpiler@^7.6.0, broccoli-babel-transpiler@^7.8.0: +broccoli-babel-transpiler@^7.8.0: version "7.8.0" resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-7.8.0.tgz#7e0f01fce5739f49bbadeee7f1e625ca51cad66e" integrity sha512-dv30Td5uL7dO3NzQUqQKQs+Iq7JGKnCNtvc6GBO76uVPqGnRlsQZcYqdBVr33JrctR+ZrpTUf7TjsFKeDRFA8Q== @@ -5853,6 +5724,23 @@ broccoli-concat@^4.2.4: lodash.omit "^4.1.0" lodash.uniq "^4.2.0" +broccoli-concat@^4.2.5: + version "4.2.5" + resolved "https://registry.yarnpkg.com/broccoli-concat/-/broccoli-concat-4.2.5.tgz#d578f00094048b5fc87195e82fbdbde20d838d29" + integrity sha512-dFB5ATPwOyV8S2I7a07HxCoutoq23oY//LhM6Mou86cWUTB174rND5aQLR7Fu8FjFFLxoTbkk7y0VPITJ1IQrw== + dependencies: + broccoli-debug "^0.6.5" + broccoli-kitchen-sink-helpers "^0.3.1" + broccoli-plugin "^4.0.2" + ensure-posix-path "^1.0.2" + fast-sourcemap-concat "^2.1.0" + find-index "^1.1.0" + fs-extra "^8.1.0" + fs-tree-diff "^2.0.1" + lodash.merge "^4.6.2" + lodash.omit "^4.1.0" + lodash.uniq "^4.2.0" + broccoli-config-loader@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.1.tgz#d10aaf8ebc0cb45c1da5baa82720e1d88d28c80a" @@ -5965,7 +5853,7 @@ broccoli-funnel@^1.0.1, broccoli-funnel@^1.1.0: symlink-or-copy "^1.0.0" walk-sync "^0.3.1" -broccoli-funnel@^2.0.0, broccoli-funnel@^2.0.1, broccoli-funnel@^2.0.2: +broccoli-funnel@^2.0.0, broccoli-funnel@^2.0.1, broccoli-funnel@^2.0.2, broccoli-funnel@~2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-2.0.2.tgz#0edf629569bc10bd02cc525f74b9a38e71366a75" integrity sha512-/vDTqtv7ipjEZQOVqO4vGDVAOZyuYzQ/EgGoyewfOgh1M7IQAToBKZI0oAQPgMBeFPPlIbfMuAngk+ohPBuaHQ== @@ -6000,7 +5888,7 @@ broccoli-funnel@^3.0.3: path-posix "^1.0.0" walk-sync "^2.0.2" -broccoli-funnel@^3.0.8: +broccoli-funnel@^3.0.5, broccoli-funnel@^3.0.8: version "3.0.8" resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-3.0.8.tgz#f5b62e2763c3918026a15a3c833edc889971279b" integrity sha512-ng4eIhPYiXqMw6SyGoxPHR3YAwEd2lr9FgBI1CyTbspl4txZovOsmzFkMkGAlu88xyvYXJqHiM2crfLa65T1BQ== @@ -6029,7 +5917,7 @@ broccoli-kitchen-sink-helpers@^0.3.1: glob "^5.0.10" mkdirp "^0.5.1" -broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.1: +broccoli-merge-trees@^1.1.1: version "1.2.4" resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.2.4.tgz#a001519bb5067f06589d91afa2942445a2d0fdb5" integrity sha1-oAFRm7UGfwZYnZGvopQkRaLQ/bU= @@ -6051,7 +5939,7 @@ broccoli-merge-trees@^2.0.0: broccoli-plugin "^1.3.0" merge-trees "^1.0.1" -broccoli-merge-trees@^3.0.0, broccoli-merge-trees@^3.0.1, broccoli-merge-trees@^3.0.2: +broccoli-merge-trees@^3.0.0, broccoli-merge-trees@^3.0.1, broccoli-merge-trees@^3.0.2, broccoli-merge-trees@~3.0.2: version "3.0.2" resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-3.0.2.tgz#f33b451994225522b5c9bcf27d59decfd8ba537d" integrity sha512-ZyPAwrOdlCddduFbsMyyFzJUrvW6b04pMvDiAQZrCwghlvgowJDY+EfoXn+eR1RRA5nmGHJ+B68T63VnpRiT1A== @@ -6164,6 +6052,19 @@ broccoli-persistent-filter@^3.1.0, broccoli-persistent-filter@^3.1.2: symlink-or-copy "^1.0.1" sync-disk-cache "^2.0.0" +broccoli-plugin@*, broccoli-plugin@^4.0.5, broccoli-plugin@^4.0.7: + version "4.0.7" + resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-4.0.7.tgz#dd176a85efe915ed557d913744b181abe05047db" + integrity sha512-a4zUsWtA1uns1K7p9rExYVYG99rdKeGRymW0qOCNkvDPHQxVi3yVyJHhQbM3EZwdt2E0mnhr5e0c/bPpJ7p3Wg== + dependencies: + broccoli-node-api "^1.7.0" + broccoli-output-wrapper "^3.2.5" + fs-merger "^3.2.1" + promise-map-series "^0.3.0" + quick-temp "^0.1.8" + rimraf "^3.0.2" + symlink-or-copy "^1.3.1" + broccoli-plugin@1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02" @@ -6184,7 +6085,7 @@ broccoli-plugin@^1.0.0, broccoli-plugin@^1.1.0, broccoli-plugin@^1.2.0, broccoli rimraf "^2.3.4" symlink-or-copy "^1.1.8" -broccoli-plugin@^2.0.0, broccoli-plugin@^2.1.0: +broccoli-plugin@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-2.1.0.tgz#2fab6c578219cfcc64f773e9616073313fc8b334" integrity sha512-ElE4caljW4slapyEhSD9jU9Uayc8SoSABWdmY9SqbV8DHNxU6xg1jJsPcMm+cXOvggR3+G+OXAYQeFjWVnznaw== @@ -6220,19 +6121,6 @@ broccoli-plugin@^4.0.0, broccoli-plugin@^4.0.1, broccoli-plugin@^4.0.2, broccoli rimraf "^3.0.0" symlink-or-copy "^1.3.0" -broccoli-plugin@^4.0.7: - version "4.0.7" - resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-4.0.7.tgz#dd176a85efe915ed557d913744b181abe05047db" - integrity sha512-a4zUsWtA1uns1K7p9rExYVYG99rdKeGRymW0qOCNkvDPHQxVi3yVyJHhQbM3EZwdt2E0mnhr5e0c/bPpJ7p3Wg== - dependencies: - broccoli-node-api "^1.7.0" - broccoli-output-wrapper "^3.2.5" - fs-merger "^3.2.1" - promise-map-series "^0.3.0" - quick-temp "^0.1.8" - rimraf "^3.0.2" - symlink-or-copy "^1.3.1" - broccoli-rollup@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/broccoli-rollup/-/broccoli-rollup-2.1.1.tgz#0b77dc4b7560a53e998ea85f3b56772612d4988d" @@ -6250,20 +6138,20 @@ broccoli-rollup@^2.1.1: symlink-or-copy "^1.1.8" walk-sync "^0.3.1" -broccoli-rollup@^4.1.1: - version "4.1.1" - resolved "https://registry.yarnpkg.com/broccoli-rollup/-/broccoli-rollup-4.1.1.tgz#7531a24d88ddab9f1bace1c6ee6e6ca74a38d36f" - integrity sha512-hkp0dB5chiemi32t6hLe5bJvxuTOm1TU+SryFlZIs95KT9+94uj0C8w6k6CsZ2HuIdIZg6D252t4gwOlcTXrpA== +broccoli-rollup@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/broccoli-rollup/-/broccoli-rollup-5.0.0.tgz#a77b53bcef1b70e988913fee82265c0a4ca530da" + integrity sha512-QdMuXHwsdz/LOS8zu4HP91Sfi4ofimrOXoYP/lrPdRh7lJYD87Lfq4WzzUhGHsxMfzANIEvl/7qVHKD3cFJ4tA== dependencies: - "@types/broccoli-plugin" "^1.3.0" - broccoli-plugin "^2.0.0" + "@types/broccoli-plugin" "^3.0.0" + broccoli-plugin "^4.0.7" fs-tree-diff "^2.0.1" heimdalljs "^0.2.6" node-modules-path "^1.0.1" - rollup "^1.12.0" + rollup "^2.50.0" rollup-pluginutils "^2.8.1" symlink-or-copy "^1.2.0" - walk-sync "^1.1.3" + walk-sync "^2.2.0" broccoli-sass-source-maps@^4.0.0: version "4.0.0" @@ -6378,10 +6266,10 @@ broccoli-terser-sourcemap@^4.1.0: walk-sync "^2.2.0" workerpool "^6.0.0" -broccoli@^3.4.2: - version "3.5.0" - resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-3.5.0.tgz#bdf96dc32980d1ad9f7ef5e68aaaa19e8d47b602" - integrity sha512-qGYirIs6W5NGzDPMTRtoOnveP3UQJKWFjAWdRRZJjCoAJYN872sXuO48cnNfnc/c1nd9fCR8et4knlTPgI+9yQ== +broccoli@^3.5.1: + version "3.5.2" + resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-3.5.2.tgz#60921167d57b43fb5bad527420d62fe532595ef4" + integrity sha512-sWi3b3fTUSVPDsz5KsQ5eCQNVAtLgkIE/HYFkEZXR/07clqmd4E/gFiuwSaqa9b+QTXc1Uemfb7TVWbEIURWDg== dependencies: "@types/chai" "^4.2.9" "@types/chai-as-promised" "^7.1.2" @@ -6489,15 +6377,7 @@ browserslist@4.14.2: escalade "^3.0.2" node-releases "^1.1.61" -browserslist@^3.1.1, browserslist@^3.2.6: - version "3.2.8" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-3.2.8.tgz#b0005361d6471f0f5952797a76fc985f1f978fc6" - integrity sha512-WHVocJYavUwVgVViC0ORikPHQquXwVh939TaelZ4WDqpWgTX/FsGhl/+P4qBUAGcRvtOgDgC+xftNWWp2RUTAQ== - dependencies: - caniuse-lite "^1.0.30000844" - electron-to-chromium "^1.3.47" - -browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.14.5, browserslist@^4.16.1, browserslist@^4.16.3: +browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.16.3: version "4.16.3" resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.16.3.tgz#340aa46940d7db878748567c5dea24a48ddf3717" integrity sha512-vIyhWmIkULaq04Gt93txdh+j02yX/JzlyhLYbV3YQCn/zvES3JnY7TifHHvvr1w5hTDluNKMkV05cs4vy8Q7sw== @@ -6508,6 +6388,17 @@ browserslist@^4.0.0, browserslist@^4.12.0, browserslist@^4.14.5, browserslist@^4 escalade "^3.1.1" node-releases "^1.1.70" +browserslist@^4.14.5, browserslist@^4.17.5, browserslist@^4.19.1: + version "4.19.3" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.19.3.tgz#29b7caad327ecf2859485f696f9604214bedd383" + integrity sha512-XK3X4xtKJ+Txj8G5c30B4gsm71s69lqXlkYui4s6EkKxuv49qjYlY6oVd+IFJ73d4YymtM3+djvvt/R/iJwwDg== + dependencies: + caniuse-lite "^1.0.30001312" + electron-to-chromium "^1.4.71" + escalade "^3.1.1" + node-releases "^2.0.2" + picocolors "^1.0.0" + browserslist@^4.16.6, browserslist@^4.17.3: version "4.17.3" resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.17.3.tgz#2844cd6eebe14d12384b0122d217550160d2d624" @@ -6643,19 +6534,6 @@ cache-base@^1.0.1: union-value "^1.0.0" unset-value "^1.0.0" -cacheable-request@^2.1.1: - version "2.1.4" - resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-2.1.4.tgz#0d808801b6342ad33c91df9d0b44dc09b91e5c3d" - integrity sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0= - dependencies: - clone-response "1.0.2" - get-stream "3.0.0" - http-cache-semantics "3.8.1" - keyv "3.0.0" - lowercase-keys "1.0.0" - normalize-url "2.0.1" - responselike "1.0.2" - calculate-cache-key-for-tree@^1.1.0: version "1.2.3" resolved "https://registry.yarnpkg.com/calculate-cache-key-for-tree/-/calculate-cache-key-for-tree-1.2.3.tgz#5a5e4fcfa2d374a63e47fe967593f179e8282825" @@ -6683,7 +6561,7 @@ call-me-maybe@^1.0.1: resolved "https://registry.yarnpkg.com/call-me-maybe/-/call-me-maybe-1.0.1.tgz#26d208ea89e37b5cbde60250a15f031c16a4d66b" integrity sha1-JtII6onje1y95gJQoV8DHBak1ms= -callsites@^3.0.0: +callsites@^3.0.0, callsites@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== @@ -6723,7 +6601,7 @@ caniuse-api@^3.0.0: lodash.memoize "^4.1.2" lodash.uniq "^4.5.0" -caniuse-lite@^1.0.0, caniuse-lite@^1.0.30000844, caniuse-lite@^1.0.30001109, caniuse-lite@^1.0.30001181: +caniuse-lite@^1.0.0, caniuse-lite@^1.0.30001109: version "1.0.30001183" resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001183.tgz#7a57ba9d6584119bb5f2bc76d3cc47ba9356b3e2" integrity sha512-7JkwTEE1hlRKETbCFd8HDZeLiQIUcl8rC6JgNjvHCNaxOeNmQ9V4LvQXRUsKIV2CC73qKxljwVhToaA3kLRqTw== @@ -6733,6 +6611,11 @@ caniuse-lite@^1.0.30001125, caniuse-lite@^1.0.30001264: resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001265.tgz#0613c9e6c922e422792e6fcefdf9a3afeee4f8c3" integrity sha512-YzBnspggWV5hep1m9Z6sZVLOt7vrju8xWooFAgN6BA5qvy98qPAPb7vNUzypFaoh2pb3vlfzbDO8tB57UPGbtw== +caniuse-lite@^1.0.30001181, caniuse-lite@^1.0.30001312: + version "1.0.30001312" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001312.tgz#e11eba4b87e24d22697dae05455d5aea28550d5f" + integrity sha512-Wiz1Psk2MEK0pX3rUzWaunLTZzqS2JYZFzNKqAiJGiuxIjRPLgV6+VDPOg6lQOUxmDwhTlh198JsTTi8Hzw6aQ== + capture-exit@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/capture-exit/-/capture-exit-2.0.0.tgz#fb953bfaebeb781f62898239dabb426d08a509a4" @@ -6763,11 +6646,6 @@ ccount@^1.0.0: resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.1.0.tgz#246687debb6014735131be8abab2d93898f8d043" integrity sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg== -ceibo@~2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ceibo/-/ceibo-2.0.0.tgz#9a61eb054a91c09934588d4e45d9dd2c3bf04eee" - integrity sha1-mmHrBUqRwJk0WI1ORdndLDvwTu4= - chalk@2.4.2, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.3.0, chalk@^2.4.1, chalk@^2.4.2: version "2.4.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" @@ -6804,7 +6682,7 @@ chalk@^4.0.0: ansi-styles "^4.1.0" supports-color "^7.1.0" -chalk@^4.1.0: +chalk@^4.1.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -6827,6 +6705,11 @@ character-reference-invalid@^1.0.0: resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz#083329cda0eae272ab3dbbf37e9a382c13af1560" integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg== +charcodes@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/charcodes/-/charcodes-0.2.0.tgz#5208d327e6cc05f99eb80ffc814707572d1f14e4" + integrity sha512-Y4kiDb+AM4Ecy58YkuZrrSRJBDQdQ2L+NyS1vHHFtNtUjgutcZfx3yp1dAONI/oPaPmyGfCLx5CxL+zauIMyKQ== + chardet@^0.7.0: version "0.7.0" resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" @@ -6934,6 +6817,11 @@ ci-info@^2.0.0: resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== +ci-info@^3.3.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.3.0.tgz#b4ed1fb6818dea4803a55c623041f9165d2066b2" + integrity sha512-riT/3vI5YpVH6/qomlDnJow6TBee2PBKSEpx3O32EGPYbWGIRsIlGRms3Sm74wYE1JMo8RnO04Hb12+v1J5ICw== + cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: version "1.0.4" resolved "https://registry.yarnpkg.com/cipher-base/-/cipher-base-1.0.4.tgz#8760e4ecc272f4c363532f926d874aae2c1397de" @@ -6981,7 +6869,7 @@ clean-css@^4.2.3: dependencies: source-map "~0.6.0" -clean-stack@^2.0.0: +clean-stack@^2.0.0, clean-stack@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== @@ -7087,19 +6975,12 @@ clone-deep@^4.0.1: kind-of "^6.0.2" shallow-clone "^3.0.0" -clone-response@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" - integrity sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws= - dependencies: - mimic-response "^1.0.0" - clone@^1.0.2: version "1.0.4" resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" integrity sha1-2jCcwmPfFZlMaIypAheco8fNfH4= -clone@^2.0.0, clone@^2.1.2: +clone@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f" integrity sha1-G39Ln1kfHo+DZwQBYANFoCiHQ18= @@ -7165,12 +7046,7 @@ color-name@~1.1.4: resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== -colorette@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.2.1.tgz#4d0b921325c14faf92633086a536db6e89564b1b" - integrity sha512-puCDz0CzydiSYOrnXpz/PKd69zRrribezjtE9yd4zvytoRc8+RY/KJPvtPFKZS3E3wP6neGyMe0vOTlHO5L3Pw== - -colorette@^1.4.0: +colorette@^1.2.1, colorette@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.4.0.tgz#5190fbb87276259a86ad700bff2c6d6faa3fca40" integrity sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g== @@ -7211,10 +7087,10 @@ commander@2.8.x: dependencies: graceful-readlink ">= 1.0.0" -commander@6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.0.tgz#b990bfb8ac030aedc6d11bc04d1488ffef56db75" - integrity sha512-zP4jEKbe8SHzKJYQmq8Y9gYjtO/POJLgIdKgV7B9qNmABVFVc+ctqSX6iXh4mCpJfRBOabiZ2YKPg8ciDw6C+Q== +commander@7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" + integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== commander@^2.20.0, commander@^2.6.0: version "2.20.3" @@ -7226,7 +7102,7 @@ commander@^4.1.1: resolved "https://registry.yarnpkg.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068" integrity sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA== -commander@^6.2.0, commander@^6.2.1: +commander@^6.2.1: version "6.2.1" resolved "https://registry.yarnpkg.com/commander/-/commander-6.2.1.tgz#0792eb682dfbc325999bb2b84fddddba110ac73c" integrity sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA== @@ -7236,7 +7112,7 @@ commander@^8.2.0: resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww== -common-tags@^1.4.0, common-tags@^1.8.0: +common-tags@^1.8.0: version "1.8.0" resolved "https://registry.yarnpkg.com/common-tags/-/common-tags-1.8.0.tgz#8e3153e542d4a39e9b10554434afaaf98956a937" integrity sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw== @@ -7251,26 +7127,11 @@ compare-versions@^3.6.0: resolved "https://registry.yarnpkg.com/compare-versions/-/compare-versions-3.6.0.tgz#1a5689913685e5a87637b8d3ffca75514ec41d62" integrity sha512-W6Af2Iw1z4CB7q4uU4hv646dW9GQuBM+YpC0UvUCWSD8w90SJjp+ujJuXaEMtAXBtSqGfMPuFOVn4/+FlaqfBA== -component-bind@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1" - integrity sha1-AMYIq33Nk4l8AAllGx06jh5zu9E= - -component-emitter@1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.2.1.tgz#137918d6d78283f7df7a6b7c5a63e140e69425e6" - integrity sha1-E3kY1teCg/ffemt8WmPhQOaUJeY= - component-emitter@^1.2.1, component-emitter@~1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.3.0.tgz#16e4070fba8ae29b679f2215853ee181ab2eabc0" integrity sha512-Rd3se6QB+sO1TwqZjscQrurpEPIfO0/yYnSin6Q/rD3mOutHvUrCAhJub3r90uNb+SESBuE0QYoB90YdfatsRg== -component-inherit@0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143" - integrity sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM= - compressible@~2.0.16: version "2.0.18" resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba" @@ -7343,7 +7204,7 @@ console-control-strings@^1.0.0, console-control-strings@~1.1.0: resolved "https://registry.yarnpkg.com/console-control-strings/-/console-control-strings-1.1.0.tgz#3d7cf4464db6446ea644bf4b39507f9851008e8e" integrity sha1-PXz0Rk22RG6mRL9LOVB/mFEAjo4= -console-ui@^3.0.4, console-ui@^3.1.1: +console-ui@^3.0.4, console-ui@^3.1.2: version "3.1.2" resolved "https://registry.yarnpkg.com/console-ui/-/console-ui-3.1.2.tgz#51aef616ff02013c85ccee6a6d77ef7a94202e7a" integrity sha512-+5j3R4wZJcEYZeXk30whc4ZU/+fWW9JMTNntVuMYpjZJ9n26Cxr0tUBXco1NRjVZRpRVvZ4DDKKKIHNYeUG9Dw== @@ -7383,13 +7244,20 @@ continuable-cache@^0.3.1: resolved "https://registry.yarnpkg.com/continuable-cache/-/continuable-cache-0.3.1.tgz#bd727a7faed77e71ff3985ac93351a912733ad0f" integrity sha1-vXJ6f67XfnH/OYWskzUakSczrQ8= -convert-source-map@^1.5.0, convert-source-map@^1.5.1, convert-source-map@^1.7.0: +convert-source-map@^1.5.0, convert-source-map@^1.5.1: version "1.7.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.7.0.tgz#17a2cb882d7f77d3490585e2ce6c524424a3a442" integrity sha512-4FJkXzKXEDB1snCFZlLP4gpC3JILicCpGbzG9f9G7tGqGCzETQ2hWPrcinA9oU4wtf2biUaEH5065UnMeR33oA== dependencies: safe-buffer "~5.1.1" +convert-source-map@^1.7.0: + version "1.8.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.8.0.tgz#f3373c32d21b4d780dd8004514684fb791ca4369" + integrity sha512-+OQdjP49zViI/6i7nIJpA8rAl4sV/JdPfU9nZs3VqOwGIgizICvuN2ru6fMd+4llL0tar18UYJXfZ/TWtmhUjA== + dependencies: + safe-buffer "~5.1.1" + cookie-signature@1.0.6: version "1.0.6" resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c" @@ -7442,12 +7310,12 @@ core-js-compat@^3.16.0, core-js-compat@^3.16.2: browserslist "^4.17.3" semver "7.0.0" -core-js-compat@^3.8.0: - version "3.8.3" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.8.3.tgz#9123fb6b9cad30f0651332dc77deba48ef9b0b3f" - integrity sha512-1sCb0wBXnBIL16pfFG1Gkvei6UzvKyTNYpiC41yrdjEv0UoJoq9E/abTMzyYJ6JpTkAj15dLjbqifIzEBDVvog== +core-js-compat@^3.20.2, core-js-compat@^3.21.0: + version "3.21.1" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.21.1.tgz#cac369f67c8d134ff8f9bd1623e3bc2c42068c82" + integrity sha512-gbgX5AUvMb8gwxC7FLVWYT7Kkgu/y7+h/h1X43yJkNqhlK2fuYyQimqvKGNZFAY6CKii/GFKJ2cp/1/42TN36g== dependencies: - browserslist "^4.16.1" + browserslist "^4.19.1" semver "7.0.0" core-js-compat@^3.8.1: @@ -7468,7 +7336,7 @@ core-js@3.19.1: resolved "https://registry.yarnpkg.com/core-js/-/core-js-3.19.1.tgz#f6f173cae23e73a7d88fa23b6e9da329276c6641" integrity sha512-Tnc7E9iKd/b/ff7GFbhwPVzJzPztGrChB8X8GLqoYGdEOG8IpLnK1xPyo3ZoO3HsK6TodJS58VGPOxA+hLHQMg== -core-js@^2.4.0, core-js@^2.5.0, core-js@^2.6.5: +core-js@^2.6.5: version "2.6.12" resolved "https://registry.yarnpkg.com/core-js/-/core-js-2.6.12.tgz#d9333dfa7b065e347cc5682219d6f690859cc2ec" integrity sha512-Kb2wC0fvsWfQrgk8HU5lW6U/Lcs8+9aaYcy4ZFc6DDlo4nZ7n70dEgE5rtR0oG6ufKDUnrwfWL1mXR5ljDatrQ== @@ -7673,6 +7541,22 @@ css-loader@^3.6.0: schema-utils "^2.7.0" semver "^6.3.0" +css-loader@^5.2.0: + version "5.2.7" + resolved "https://registry.yarnpkg.com/css-loader/-/css-loader-5.2.7.tgz#9b9f111edf6fb2be5dc62525644cbc9c232064ae" + integrity sha512-Q7mOvpBNBG7YrVGMxRxcBJZFL75o+cH2abNASdibkj/fffYD8qWbInZrD0S9ccI6vZclF3DsHE7njGlLtaHbhg== + dependencies: + icss-utils "^5.1.0" + loader-utils "^2.0.0" + postcss "^8.2.15" + postcss-modules-extract-imports "^3.0.0" + postcss-modules-local-by-default "^4.0.0" + postcss-modules-scope "^3.0.0" + postcss-modules-values "^4.0.0" + postcss-value-parser "^4.1.0" + schema-utils "^3.0.0" + semver "^7.3.5" + css-select-base-adapter@^0.1.1: version "0.1.1" resolved "https://registry.yarnpkg.com/css-select-base-adapter/-/css-select-base-adapter-0.1.1.tgz#3b2ff4972cc362ab88561507a95408a1432135d7" @@ -7715,13 +7599,13 @@ css-tree@1.0.0-alpha.29: mdn-data "~1.1.0" source-map "^0.5.3" -css-tree@^1.0.0-alpha.39: - version "1.1.3" - resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.3.tgz#eb4870fb6fd7707327ec95c2ff2ab09b5e8db91d" - integrity sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q== +css-tree@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-2.0.4.tgz#be44314f17e9ac85fe894a5888941782e1123c29" + integrity sha512-b4IS9ZUMtGBiNjzYbcj9JhYbyei99R3ai2CSxlu8GQDnoPA/P+NU85hAm0eKDc/Zp660rpK6tFJQ2OSdacMHVg== dependencies: - mdn-data "2.0.14" - source-map "^0.6.1" + mdn-data "2.0.23" + source-map-js "^1.0.1" css-url-regex@^1.1.0: version "1.1.0" @@ -7910,6 +7794,11 @@ date-and-time@^0.14.1: resolved "https://registry.yarnpkg.com/date-and-time/-/date-and-time-0.14.2.tgz#a4266c3dead460f6c231fe9674e585908dac354e" integrity sha512-EFTCh9zRSEpGPmJaexg7HTuzZHh6cnJj1ui7IGCFNXzd2QdpsNh05Db5TF3xzJm30YN+A8/6xHSuRcQqoc3kFA== +date-fns@^2.28.0: + version "2.28.0" + resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-2.28.0.tgz#9570d656f5fc13143e50c975a3b6bbeb46cd08b2" + integrity sha512-8d35hViGYx/QH0icHYCeLmsLmMUheMmTyV9Fcm6gvNwdw31yXXH+O85sOBJ+OLnLQMKZowvpKb6FgMIQjcpvQw== + date-time@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/date-time/-/date-time-2.1.0.tgz#0286d1b4c769633b3ca13e1e62558d2dbdc2eba2" @@ -7917,7 +7806,7 @@ date-time@^2.1.0: dependencies: time-zone "^1.0.0" -debug@2.6.9, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0, debug@^2.6.8, debug@^2.6.9: +debug@2.6.9, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.3.3, debug@^2.6.0, debug@^2.6.8: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== @@ -7931,20 +7820,20 @@ debug@^3.0.0, debug@^3.0.1, debug@^3.1.0, debug@^3.1.1: dependencies: ms "^2.1.1" -debug@^4.0.0, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" - integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== - dependencies: - ms "2.1.2" - -debug@^4.2.0: +debug@^4.0.0, debug@^4.1.0, debug@^4.2.0: version "4.3.3" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.3.tgz#04266e0b70a98d4462e6e288e38259213332b664" integrity sha512-/zxw5+vh1Tfv+4Qn7a5nsbcJKPaSvCDhojn6FEl9vupwK2VCSDtEiEtqr8DFtzYFOdz63LBkxec7DYuc2jon6Q== dependencies: ms "2.1.2" +debug@^4.0.1, debug@^4.1.1, debug@^4.3.1: + version "4.3.1" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.1.tgz#f0d229c505e0c6d8c49ac553d1b13dc183f6b2ee" + integrity sha512-doEwdvm4PCeK4K3RQN2ZC2BYUBaxwLARCqZmMjtF8a51J2Rb0xpVloFRnCODwqjpwnAoao4pelN8l3RJdv3gRQ== + dependencies: + ms "2.1.2" + debug@^4.3.2, debug@~4.3.1, debug@~4.3.2: version "4.3.2" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.2.tgz#f0a49c18ac8779e31d4a0c6029dfb76873c7428b" @@ -7952,20 +7841,6 @@ debug@^4.3.2, debug@~4.3.1, debug@~4.3.2: dependencies: ms "2.1.2" -debug@~3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/debug/-/debug-3.1.0.tgz#5bb5a0672628b64149566ba16819e61518c67261" - integrity sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g== - dependencies: - ms "2.0.0" - -debug@~4.1.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791" - integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw== - dependencies: - ms "^2.1.1" - decimal.js@^10.2.0: version "10.2.1" resolved "https://registry.yarnpkg.com/decimal.js/-/decimal.js-10.2.1.tgz#238ae7b0f0c793d3e3cea410108b35a2c01426a3" @@ -7976,13 +7851,6 @@ decode-uri-component@^0.2.0: resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.0.tgz#eb3913333458775cb84cd1a1fae062106bb87545" integrity sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU= -decompress-response@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" - integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= - dependencies: - mimic-response "^1.0.0" - deep-is@^0.1.3, deep-is@~0.1.3: version "0.1.3" resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.3.tgz#b369d6fb5dbc13eecf524f91b070feedc357cf34" @@ -8089,13 +7957,6 @@ detect-file@^1.0.0: resolved "https://registry.yarnpkg.com/detect-file/-/detect-file-1.0.0.tgz#f0d66d03672a825cb1b73bdb3fe62310c8e552b7" integrity sha1-8NZtA2cqglyxtzvbP+YjEMjlUrc= -detect-indent@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-4.0.0.tgz#f76d064352cdf43a1cb6ce619c4ee3a9475de208" - integrity sha1-920GQ1LN9Docts5hnE7jqUdd4gg= - dependencies: - repeating "^2.0.0" - detect-indent@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-6.0.0.tgz#0abd0f549f69fc6659a254fe96786186b6f528fd" @@ -8122,16 +7983,16 @@ detect-port@^1.3.0: address "^1.0.1" debug "^2.6.0" -diff@^3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/diff/-/diff-3.5.0.tgz#800c0dd1e0a8bfbc95835c202ad220fe317e5a12" - integrity sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA== - diff@^4.0.2: version "4.0.2" resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== +diff@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-5.0.0.tgz#7ed6ad76d859d030787ec35855f5b1daf31d852b" + integrity sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w== + diffie-hellman@^5.0.0: version "5.0.3" resolved "https://registry.yarnpkg.com/diffie-hellman/-/diffie-hellman-5.0.3.tgz#40e8ee98f55a2149607146921c63e1ae5f3d2875" @@ -8307,11 +8168,6 @@ downshift@^6.0.15: react-is "^17.0.2" tslib "^2.3.0" -duplexer3@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" - integrity sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI= - duplexer@^0.1.1: version "0.1.2" resolved "https://registry.yarnpkg.com/duplexer/-/duplexer-0.1.2.tgz#3abe43aef3835f8ae077d136ddce0f276b0400e6" @@ -8358,16 +8214,16 @@ ee-first@1.1.1: resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" integrity sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0= -electron-to-chromium@^1.3.47, electron-to-chromium@^1.3.649: - version "1.3.650" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.650.tgz#24e821fff2ed61fd71fee092f2a0631b3c0c22a6" - integrity sha512-j6pRuNylFBbroG6NB8Lw/Im9oDY74s2zWHBP5TmdYg73cBuL6cz//SMgolVa0gIJk/DSL+kO7baJ1DSXW1FUZg== - electron-to-chromium@^1.3.564, electron-to-chromium@^1.3.857: version "1.3.860" resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.3.860.tgz#d612e54ed75fa524c12af8da3ad8121ebfe2802b" integrity sha512-gWwGZ+Wv4Mou2SJRH6JQzhTPjL5f95SX7n6VkLTQ/Q/INsZLZNQ1vH2GlZjozKyvT0kkFuCmWTwIoCj+/hUDPw== +electron-to-chromium@^1.3.649, electron-to-chromium@^1.4.71: + version "1.4.72" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.72.tgz#19b871f1da8be8199b2330d694fc84fcdb72ecd9" + integrity sha512-9LkRQwjW6/wnSfevR21a3k8sOJ+XWSH7kkzs9/EUenKmuDkndP3W9y1yCZpOxufwGbX3JV8glZZSDb4o95zwXQ== + element-resize-detector@^1.2.2: version "1.2.3" resolved "https://registry.yarnpkg.com/element-resize-detector/-/element-resize-detector-1.2.3.tgz#5078d9b99398fe4c589f8c8df94ff99e5d413ff3" @@ -8412,116 +8268,98 @@ ember-assign-helper@^0.3.0: ember-cli-babel "^7.19.0" ember-cli-htmlbars "^4.3.1" -ember-assign-polyfill@^2.6.0: - version "2.7.2" - resolved "https://registry.yarnpkg.com/ember-assign-polyfill/-/ember-assign-polyfill-2.7.2.tgz#58f6f60235126cb23df248c846008fa9a3245fc1" - integrity sha512-hDSaKIZyFS0WRQsWzxUgO6pJPFfmcpfdM7CbGoMgYGriYbvkKn+k8zTXSKpTFVGehhSmsLE9YPqisQ9QpPisfA== +ember-auto-import@^1.10.1, ember-auto-import@^1.11.3, ember-auto-import@^1.2.19, ember-auto-import@^2.4.0: + version "2.4.0" + resolved "https://registry.yarnpkg.com/ember-auto-import/-/ember-auto-import-2.4.0.tgz#91c4797f08315728086e35af954cb60bd23c14bc" + integrity sha512-BwF6iTaoSmT2vJ9NEHEGRBCh2+qp+Nlaz/Q7roqNSxl5oL5iMRwenPnHhOoBPTYZvPhcV/KgXR5e+pBQ107plQ== dependencies: - ember-cli-babel "^7.20.5" - ember-cli-version-checker "^2.0.0" - -ember-auto-import@^1.10.0, ember-auto-import@^1.2.19, ember-auto-import@^1.6.0: - version "1.10.1" - resolved "https://registry.yarnpkg.com/ember-auto-import/-/ember-auto-import-1.10.1.tgz#6c93a875e494aa0a58b759867d3f20adfd514ae3" - integrity sha512-7bOWzPELlVwdWDOkB+phDIjg8BNW+/2RiLLQ+Xa/eIvCLT4ABYhHV5wqW5gs5BnXTDVLfE4ddKZdllnGuPGGDQ== - dependencies: - "@babel/core" "^7.1.6" - "@babel/preset-env" "^7.10.2" - "@babel/traverse" "^7.1.6" - "@babel/types" "^7.1.6" - "@embroider/core" "^0.33.0" - babel-core "^6.26.3" - babel-loader "^8.0.6" - babel-plugin-syntax-dynamic-import "^6.18.0" - babylon "^6.18.0" - broccoli-debug "^0.6.4" - broccoli-node-api "^1.7.0" - broccoli-plugin "^4.0.0" - debug "^3.1.0" - ember-cli-babel "^7.0.0" - enhanced-resolve "^4.0.0" - fs-extra "^6.0.1" - fs-tree-diff "^2.0.0" - handlebars "^4.3.1" - js-string-escape "^1.0.1" - lodash "^4.17.19" - mkdirp "^0.5.1" - resolve-package-path "^3.1.0" - rimraf "^2.6.2" - semver "^7.3.4" - symlink-or-copy "^1.2.0" - typescript-memoize "^1.0.0-alpha.3" - walk-sync "^0.3.3" - webpack "^4.43.0" - -ember-auto-import@^1.10.1: - version "1.12.1" - resolved "https://registry.yarnpkg.com/ember-auto-import/-/ember-auto-import-1.12.1.tgz#09967bd35cd56ac45f413c48deabf7cfb3a785f6" - integrity sha512-Jm0vWKNAy/wYMrdSQIrG8sRsvarIRHZ2sS/CGhMdMqVKJR48AhGU7NgPJ5SIlO/+seL2VSO+dtv7aEOEIaT6BA== - dependencies: - "@babel/core" "^7.1.6" - "@babel/preset-env" "^7.10.2" - "@babel/traverse" "^7.1.6" - "@babel/types" "^7.1.6" + "@babel/core" "^7.16.7" + "@babel/plugin-proposal-class-properties" "^7.16.7" + "@babel/plugin-proposal-decorators" "^7.16.7" + "@babel/preset-env" "^7.16.7" + "@embroider/macros" "^1.0.0" "@embroider/shared-internals" "^1.0.0" - babel-core "^6.26.3" babel-loader "^8.0.6" + babel-plugin-ember-modules-api-polyfill "^3.5.0" + babel-plugin-htmlbars-inline-precompile "^5.2.1" babel-plugin-syntax-dynamic-import "^6.18.0" - babylon "^6.18.0" broccoli-debug "^0.6.4" - broccoli-node-api "^1.7.0" + broccoli-funnel "^3.0.8" + broccoli-merge-trees "^4.2.0" broccoli-plugin "^4.0.0" broccoli-source "^3.0.0" - debug "^3.1.0" - ember-cli-babel "^7.0.0" - enhanced-resolve "^4.0.0" + css-loader "^5.2.0" + debug "^4.3.1" fs-extra "^6.0.1" fs-tree-diff "^2.0.0" handlebars "^4.3.1" js-string-escape "^1.0.1" lodash "^4.17.19" - mkdirp "^0.5.1" + mini-css-extract-plugin "^2.5.2" + parse5 "^6.0.1" + resolve "^1.20.0" resolve-package-path "^3.1.0" - rimraf "^2.6.2" semver "^7.3.4" - symlink-or-copy "^1.2.0" + style-loader "^2.0.0" typescript-memoize "^1.0.0-alpha.3" - walk-sync "^0.3.3" - webpack "^4.43.0" + walk-sync "^3.0.0" -ember-basic-dropdown@^3.0.16: - version "3.0.16" - resolved "https://registry.yarnpkg.com/ember-basic-dropdown/-/ember-basic-dropdown-3.0.16.tgz#287fcde57b5a37405d89cc65e0a4ad9a2e8e1b0b" - integrity sha512-ctVQL63nWoZ6+Lvb6aCo70SUA8ieMz5fQa0BuQKeV2LQx8njXDiZZ96gaK0PBn60glNghbIr1ZKU+wmnIT++5w== +ember-basic-dropdown@^3.0.21: + version "3.1.0" + resolved "https://registry.yarnpkg.com/ember-basic-dropdown/-/ember-basic-dropdown-3.1.0.tgz#47c292de890d1958057736c00b8eb2b8017d530b" + integrity sha512-UISvgJHfiJ8FeXqH8ZN+NmoImN8p6Sb+85qlEv853hLuEfEYnFUqLNhea8nNl9CpFqcD3yU4dKbhYtc6nB39aQ== dependencies: - "@ember/render-modifiers" "^1.0.2" - "@embroider/macros" "^0.36.0" - "@embroider/util" "^0.36.0" + "@ember/render-modifiers" "^2.0.0" + "@embroider/macros" "^0.47.2" + "@embroider/util" "^0.47.2" "@glimmer/component" "^1.0.4" "@glimmer/tracking" "^1.0.4" - ember-cli-babel "^7.23.1" - ember-cli-htmlbars "^5.3.2" - ember-cli-typescript "^4.1.0" - ember-element-helper "^0.3.2" - ember-maybe-in-element "^2.0.1" - ember-style-modifier "^0.6.0" + ember-cli-babel "^7.26.6" + ember-cli-htmlbars "^6.0.0" + ember-cli-typescript "^4.2.1" + ember-element-helper "^0.5.5" + ember-maybe-in-element "^2.0.3" + ember-style-modifier "^0.7.0" ember-truth-helpers "^2.1.0 || ^3.0.0" -ember-can@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ember-can/-/ember-can-2.1.0.tgz#b62a56d602b41c5846e87893715476b48030490a" - integrity sha512-lxNOYtgH76dJq2qdbBUCvAwkBdmwFNc+Nyer/ALDHujJVgRjIaJCr6TcFJ1FDWyMKZ3srlYGfeRNvICgbe3E2Q== +ember-cache-primitive-polyfill@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ember-cache-primitive-polyfill/-/ember-cache-primitive-polyfill-1.0.1.tgz#a27075443bd87e5af286c1cd8a7df24e3b9f6715" + integrity sha512-hSPcvIKarA8wad2/b6jDd/eU+OtKmi6uP+iYQbzi5TQpjsqV6b4QdRqrLk7ClSRRKBAtdTuutx+m+X+WlEd2lw== dependencies: - ember-cli-babel "7.13.2" - ember-inflector "3.0.1" + ember-cli-babel "^7.22.1" + ember-cli-version-checker "^5.1.1" + ember-compatibility-helpers "^1.2.1" + silent-error "^1.1.1" -ember-classic-decorator@^1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/ember-classic-decorator/-/ember-classic-decorator-1.0.8.tgz#e290e5b0b1a31a569587a85a9c5c7a2f1242cabb" - integrity sha512-IsCDJ7rLsrFjYtgi9UXUmjzUQJaaJzmy/gKwGGtZ6kZwT5yhzSbScRi0P6Cb0guJPtlMMCE0sAQpJRbXmBb/gA== +ember-cached-decorator-polyfill@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/ember-cached-decorator-polyfill/-/ember-cached-decorator-polyfill-0.1.4.tgz#f1e2c65cc78d0d9c4ac0e047e643af477eb85ace" + integrity sha512-JOK7kBCWsTVCzmCefK4nr9BACDJk0owt9oIUaVt6Q0UtQ4XeAHmoK5kQ/YtDcxQF1ZevHQFdGhsTR3JLaHNJgA== dependencies: - babel-plugin-filter-imports "^3.0.0" - ember-cli-babel "^7.11.1" + "@glimmer/tracking" "^1.0.4" + ember-cache-primitive-polyfill "^1.0.1" + ember-cli-babel "^7.21.0" + ember-cli-babel-plugin-helpers "^1.1.1" + +ember-can@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ember-can/-/ember-can-4.1.0.tgz#af3bb7838eab381e56977772dbb75bf5e16e77a9" + integrity sha512-8s0bR+rmwQ/A4kj6E7AaIAlsPs/oenQVOVgfiUTdbZO2ek57PFWMoSg6mWxDKbozoW2fSEgdhcBv/eaSflh9lQ== + dependencies: + ember-cli-babel "^7.26.6" + ember-cli-htmlbars "^6.0.0" + ember-inflector "^4.0.2" + +ember-classic-decorator@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ember-classic-decorator/-/ember-classic-decorator-3.0.0.tgz#9e31c50c40c060d23d15653cdf260f78b6cdef2a" + integrity sha512-nCCO3d06c27l1vj4P54xLYk8AwrzSlFFGX8MZmfFl/xZ9LDY0V8OphAmO9PsaYEbBkjmtUy+h9vG2Lg9qVij+g== + dependencies: + "@embroider/macros" "^1.0.0" + babel-plugin-filter-imports "^4.0.0" + ember-cli-babel "^7.26.11" + ember-cli-htmlbars "^6.0.1" ember-cli-addon-docs-yuidoc@^1.0.0: version "1.0.0" @@ -8541,131 +8379,22 @@ ember-cli-babel-plugin-helpers@^1.0.0, ember-cli-babel-plugin-helpers@^1.1.0, em resolved "https://registry.yarnpkg.com/ember-cli-babel-plugin-helpers/-/ember-cli-babel-plugin-helpers-1.1.1.tgz#5016b80cdef37036c4282eef2d863e1d73576879" integrity sha512-sKvOiPNHr5F/60NLd7SFzMpYPte/nnGkq/tMIfXejfKHIhaiIkYFqX8Z9UFTKWLLn+V7NOaby6niNPZUdvKCRw== -ember-cli-babel@7.13.2: - version "7.13.2" - resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.13.2.tgz#6b6f4d508cc3bb300c5711d3d02c59ba80f0f686" - integrity sha512-VH2tMXaRFkbQEyVJnxUtAyta5bAKjtcLwJ4lStW/iRk/NIlNFNJh1uOd7uL9H9Vm0f4/xR7Mc0Q7ND9ezKOo+A== - dependencies: - "@babel/core" "^7.7.0" - "@babel/plugin-proposal-class-properties" "^7.7.0" - "@babel/plugin-proposal-decorators" "^7.7.0" - "@babel/plugin-transform-modules-amd" "^7.5.0" - "@babel/plugin-transform-runtime" "^7.6.0" - "@babel/polyfill" "^7.7.0" - "@babel/preset-env" "^7.7.0" - "@babel/runtime" "^7.7.0" - amd-name-resolver "^1.2.1" - babel-plugin-debug-macros "^0.3.0" - babel-plugin-ember-modules-api-polyfill "^2.12.0" - babel-plugin-module-resolver "^3.1.1" - broccoli-babel-transpiler "^7.3.0" - broccoli-debug "^0.6.4" - broccoli-funnel "^2.0.1" - broccoli-source "^1.1.0" - clone "^2.1.2" - ember-cli-babel-plugin-helpers "^1.1.0" - ember-cli-version-checker "^2.1.2" - ensure-posix-path "^1.0.2" - semver "^5.5.0" - -ember-cli-babel@^6.0.0-beta.4, ember-cli-babel@^6.16.0, ember-cli-babel@^6.18.0, ember-cli-babel@^6.3.0, ember-cli-babel@^6.6.0, ember-cli-babel@^6.7.2, ember-cli-babel@^6.8.1: - version "6.18.0" - resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-6.18.0.tgz#3f6435fd275172edeff2b634ee7b29ce74318957" - integrity sha512-7ceC8joNYxY2wES16iIBlbPSxwKDBhYwC8drU3ZEvuPDMwVv1KzxCNu1fvxyFEBWhwaRNTUxSCsEVoTd9nosGA== - dependencies: - amd-name-resolver "1.2.0" - babel-plugin-debug-macros "^0.2.0-beta.6" - babel-plugin-ember-modules-api-polyfill "^2.6.0" - babel-plugin-transform-es2015-modules-amd "^6.24.0" - babel-polyfill "^6.26.0" - babel-preset-env "^1.7.0" - broccoli-babel-transpiler "^6.5.0" - broccoli-debug "^0.6.4" - broccoli-funnel "^2.0.0" - broccoli-source "^1.1.0" - clone "^2.0.0" - ember-cli-version-checker "^2.1.2" - semver "^5.5.0" - -ember-cli-babel@^7.0.0, ember-cli-babel@^7.1.2, ember-cli-babel@^7.1.3, ember-cli-babel@^7.10.0, ember-cli-babel@^7.11.0, ember-cli-babel@^7.11.1, ember-cli-babel@^7.12.0, ember-cli-babel@^7.13.0, ember-cli-babel@^7.19.0, ember-cli-babel@^7.20.5, ember-cli-babel@^7.21.0, ember-cli-babel@^7.22.1, ember-cli-babel@^7.23.0, ember-cli-babel@^7.5.0, ember-cli-babel@^7.7.3: - version "7.23.1" - resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.23.1.tgz#d1517228ede08a5d4b045c78a7429728e956b30b" - integrity sha512-qYggmt3hRs6QJ6cRkww3ahMpyP8IEV2KFrIRO/Z6hu9MkE/8Y28Xd5NjQl6fPV3oLoG0vwuHzhNe3Jr7Wec8zw== +ember-cli-babel@^7.0.0, ember-cli-babel@^7.1.2, ember-cli-babel@^7.1.3, ember-cli-babel@^7.10.0, ember-cli-babel@^7.11.1, ember-cli-babel@^7.13.0, ember-cli-babel@^7.13.2, ember-cli-babel@^7.17.2, ember-cli-babel@^7.19.0, ember-cli-babel@^7.21.0, ember-cli-babel@^7.22.1, ember-cli-babel@^7.23.0, ember-cli-babel@^7.23.1, ember-cli-babel@^7.24.0, ember-cli-babel@^7.26.0, ember-cli-babel@^7.26.10, ember-cli-babel@^7.26.11, ember-cli-babel@^7.26.3, ember-cli-babel@^7.26.4, ember-cli-babel@^7.26.5, ember-cli-babel@^7.26.6, ember-cli-babel@^7.5.0, ember-cli-babel@^7.7.3: + version "7.26.11" + resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.26.11.tgz#50da0fe4dcd99aada499843940fec75076249a9f" + integrity sha512-JJYeYjiz/JTn34q7F5DSOjkkZqy8qwFOOxXfE6pe9yEJqWGu4qErKxlz8I22JoVEQ/aBUO+OcKTpmctvykM9YA== dependencies: "@babel/core" "^7.12.0" "@babel/helper-compilation-targets" "^7.12.0" - "@babel/plugin-proposal-class-properties" "^7.10.4" - "@babel/plugin-proposal-decorators" "^7.10.5" - "@babel/plugin-transform-modules-amd" "^7.10.5" - "@babel/plugin-transform-runtime" "^7.12.0" - "@babel/plugin-transform-typescript" "^7.12.0" - "@babel/polyfill" "^7.11.5" - "@babel/preset-env" "^7.12.0" - "@babel/runtime" "^7.12.0" - amd-name-resolver "^1.2.1" - babel-plugin-debug-macros "^0.3.3" - babel-plugin-ember-data-packages-polyfill "^0.1.2" - babel-plugin-ember-modules-api-polyfill "^3.2.1" - babel-plugin-module-resolver "^3.1.1" - broccoli-babel-transpiler "^7.8.0" - broccoli-debug "^0.6.4" - broccoli-funnel "^2.0.1" - broccoli-source "^1.1.0" - clone "^2.1.2" - ember-cli-babel-plugin-helpers "^1.1.1" - ember-cli-version-checker "^4.1.0" - ensure-posix-path "^1.0.2" - fixturify-project "^1.10.0" - rimraf "^3.0.1" - semver "^5.5.0" - -ember-cli-babel@^7.17.2, ember-cli-babel@^7.23.1: - version "7.26.2" - resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.26.2.tgz#497985e741ffcc08f89f98c9464509e91cdb2809" - integrity sha512-bSSlFbUBfLwaabGpLgoLkOKMIdDRWu3cPBNrN2UQXfzlPei3nZblDatSzPbCZ7O5faJHRi13ra3Q4odnCoBtTg== - dependencies: - "@babel/core" "^7.12.0" - "@babel/helper-compilation-targets" "^7.12.0" - "@babel/plugin-proposal-class-properties" "^7.13.0" + "@babel/plugin-proposal-class-properties" "^7.16.5" "@babel/plugin-proposal-decorators" "^7.13.5" + "@babel/plugin-proposal-private-methods" "^7.16.5" + "@babel/plugin-proposal-private-property-in-object" "^7.16.5" "@babel/plugin-transform-modules-amd" "^7.13.0" "@babel/plugin-transform-runtime" "^7.13.9" "@babel/plugin-transform-typescript" "^7.13.0" "@babel/polyfill" "^7.11.5" - "@babel/preset-env" "^7.12.0" - "@babel/runtime" "7.12.18" - amd-name-resolver "^1.3.1" - babel-plugin-debug-macros "^0.3.4" - babel-plugin-ember-data-packages-polyfill "^0.1.2" - babel-plugin-ember-modules-api-polyfill "^3.5.0" - babel-plugin-module-resolver "^3.2.0" - broccoli-babel-transpiler "^7.8.0" - broccoli-debug "^0.6.4" - broccoli-funnel "^2.0.2" - broccoli-source "^2.1.2" - clone "^2.1.2" - ember-cli-babel-plugin-helpers "^1.1.1" - ember-cli-version-checker "^4.1.0" - ensure-posix-path "^1.0.2" - fixturify-project "^1.10.0" - resolve-package-path "^3.1.0" - rimraf "^3.0.1" - semver "^5.5.0" - -ember-cli-babel@^7.18.0, ember-cli-babel@^7.26.6, ember-cli-babel@^7.4.0: - version "7.26.6" - resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-7.26.6.tgz#322fbbd3baad9dd99e3276ff05bc6faef5e54b39" - integrity sha512-040svtfj2RC35j/WMwdWJFusZaXmNoytLAMyBDGLMSlRvznudTxZjGlPV6UupmtTBApy58cEF8Fq4a+COWoEmQ== - dependencies: - "@babel/core" "^7.12.0" - "@babel/helper-compilation-targets" "^7.12.0" - "@babel/plugin-proposal-class-properties" "^7.13.0" - "@babel/plugin-proposal-decorators" "^7.13.5" - "@babel/plugin-transform-modules-amd" "^7.13.0" - "@babel/plugin-transform-runtime" "^7.13.9" - "@babel/plugin-transform-typescript" "^7.13.0" - "@babel/polyfill" "^7.11.5" - "@babel/preset-env" "^7.12.0" + "@babel/preset-env" "^7.16.5" "@babel/runtime" "7.12.18" amd-name-resolver "^1.3.1" babel-plugin-debug-macros "^0.3.4" @@ -8676,6 +8405,7 @@ ember-cli-babel@^7.18.0, ember-cli-babel@^7.26.6, ember-cli-babel@^7.4.0: broccoli-debug "^0.6.4" broccoli-funnel "^2.0.2" broccoli-source "^2.1.2" + calculate-cache-key-for-tree "^2.0.0" clone "^2.1.2" ember-cli-babel-plugin-helpers "^1.1.1" ember-cli-version-checker "^4.1.0" @@ -8707,15 +8437,15 @@ ember-cli-dependency-checker@^3.2.0: resolve "^1.5.0" semver "^5.3.0" -ember-cli-deprecation-workflow@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/ember-cli-deprecation-workflow/-/ember-cli-deprecation-workflow-1.0.1.tgz#3305a6879af7f074216a54963d92491c411ce7e0" - integrity sha512-tns8l4FLz8zmhmNRH7ywihs4XNTTuQysl+POYTpiyjb4zPNKv0cUJBCT/MklYFWBCo/5DcVzabhLODJZcScUfg== +ember-cli-deprecation-workflow@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ember-cli-deprecation-workflow/-/ember-cli-deprecation-workflow-2.1.0.tgz#f0d38ece7ac0ab7b3f83790a3a092e3472f58cff" + integrity sha512-Ay9P9iKMJdY4Gq5XPowh3HqqeAzLfwBRj1oB1ZKkDW1fryZQWBN4pZuRnjnB+3VWZjBnZif5e7Pacc7YNW9hWg== dependencies: - broccoli-funnel "^2.0.1" - broccoli-merge-trees "^3.0.1" - broccoli-plugin "^1.3.1" - ember-debug-handlers-polyfill "^1.1.1" + broccoli-funnel "^3.0.3" + broccoli-merge-trees "^4.2.0" + broccoli-plugin "^4.0.5" + ember-cli-htmlbars "^5.3.2" ember-cli-funnel@^0.6.1: version "0.6.1" @@ -8729,17 +8459,6 @@ ember-cli-get-component-path-option@^1.0.0: resolved "https://registry.yarnpkg.com/ember-cli-get-component-path-option/-/ember-cli-get-component-path-option-1.0.0.tgz#0d7b595559e2f9050abed804f1d8eff1b08bc771" integrity sha1-DXtZVVni+QUKvtgE8djv8bCLx3E= -ember-cli-htmlbars-inline-precompile@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-2.1.0.tgz#61b91ff1879d44ae504cadb46fb1f2604995ae08" - integrity sha512-BylIHduwQkncPhnj0ZyorBuljXbTzLgRo6kuHf1W+IHFxThFl2xG+r87BVwsqx4Mn9MTgW9SE0XWjwBJcSWd6Q== - dependencies: - babel-plugin-htmlbars-inline-precompile "^1.0.0" - ember-cli-version-checker "^2.1.2" - hash-for-dep "^1.2.3" - heimdalljs-logger "^0.1.9" - silent-error "^1.1.0" - ember-cli-htmlbars@^3.0.1: version "3.1.0" resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-3.1.0.tgz#87806c2a0bca2ab52d4fb8af8e2215c1ca718a99" @@ -8750,7 +8469,7 @@ ember-cli-htmlbars@^3.0.1: json-stable-stringify "^1.0.1" strip-bom "^3.0.0" -ember-cli-htmlbars@^4.3.1: +ember-cli-htmlbars@^4.0.0, ember-cli-htmlbars@^4.3.1: version "4.5.0" resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-4.5.0.tgz#d299e4f7eba6f30dc723ee086906cc550beb252e" integrity sha512-bYJpK1pqFu9AadDAGTw05g2LMNzY8xTCIqQm7dMJmKEoUpLRFbPf4SfHXrktzDh7Q5iggl6Skzf1M0bPlIxARw== @@ -8770,10 +8489,10 @@ ember-cli-htmlbars@^4.3.1: strip-bom "^4.0.0" walk-sync "^2.0.2" -ember-cli-htmlbars@^5.1.0, ember-cli-htmlbars@^5.3.1, ember-cli-htmlbars@^5.3.2, ember-cli-htmlbars@^5.6.3, ember-cli-htmlbars@^5.7.1: - version "5.7.1" - resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-5.7.1.tgz#eb5b88c7d9083bc27665fb5447a9b7503b32ce4f" - integrity sha512-9laCgL4tSy48orNoQgQKEHp93MaqAs9ZOl7or5q+8iyGGJHW6sVXIYrVv5/5O9HfV6Ts8/pW1rSoaeKyLUE+oA== +ember-cli-htmlbars@^5.1.0, ember-cli-htmlbars@^5.7.2: + version "5.7.2" + resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-5.7.2.tgz#e0cd2fb3c20d85fe4c3e228e6f0590ee1c645ba8" + integrity sha512-Uj6R+3TtBV5RZoJY14oZn/sNPnc+UgmC8nb5rI4P3fR/gYoyTFIZSXiIM7zl++IpMoIrocxOrgt+mhonKphgGg== dependencies: "@ember/edition-utils" "^1.2.0" babel-plugin-htmlbars-inline-precompile "^5.0.0" @@ -8813,15 +8532,58 @@ ember-cli-htmlbars@^5.2.0: strip-bom "^4.0.0" walk-sync "^2.2.0" +ember-cli-htmlbars@^5.3.1, ember-cli-htmlbars@^5.3.2, ember-cli-htmlbars@^5.6.3, ember-cli-htmlbars@^5.7.1: + version "5.7.1" + resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-5.7.1.tgz#eb5b88c7d9083bc27665fb5447a9b7503b32ce4f" + integrity sha512-9laCgL4tSy48orNoQgQKEHp93MaqAs9ZOl7or5q+8iyGGJHW6sVXIYrVv5/5O9HfV6Ts8/pW1rSoaeKyLUE+oA== + dependencies: + "@ember/edition-utils" "^1.2.0" + babel-plugin-htmlbars-inline-precompile "^5.0.0" + broccoli-debug "^0.6.5" + broccoli-persistent-filter "^3.1.2" + broccoli-plugin "^4.0.3" + common-tags "^1.8.0" + ember-cli-babel-plugin-helpers "^1.1.1" + ember-cli-version-checker "^5.1.2" + fs-tree-diff "^2.0.1" + hash-for-dep "^1.5.1" + heimdalljs-logger "^0.1.10" + json-stable-stringify "^1.0.1" + semver "^7.3.4" + silent-error "^1.1.1" + strip-bom "^4.0.0" + walk-sync "^2.2.0" + +ember-cli-htmlbars@^6.0.0, ember-cli-htmlbars@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-6.0.1.tgz#5487831d477e61682bc867fd138808269e5d2152" + integrity sha512-IDsl9uty+MXtMfp/BUTEc/Q36EmlHYj8ZdPekcoRa8hmdsigHnK4iokfaB7dJFktlf6luruei+imv7JrJrBQPQ== + dependencies: + "@ember/edition-utils" "^1.2.0" + babel-plugin-ember-template-compilation "^1.0.0" + babel-plugin-htmlbars-inline-precompile "^5.3.0" + broccoli-debug "^0.6.5" + broccoli-persistent-filter "^3.1.2" + broccoli-plugin "^4.0.3" + ember-cli-version-checker "^5.1.2" + fs-tree-diff "^2.0.1" + hash-for-dep "^1.5.1" + heimdalljs-logger "^0.1.10" + json-stable-stringify "^1.0.1" + semver "^7.3.4" + silent-error "^1.1.1" + strip-bom "^4.0.0" + walk-sync "^2.2.0" + ember-cli-import-polyfill@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/ember-cli-import-polyfill/-/ember-cli-import-polyfill-0.2.0.tgz#c1a08a8affb45c97b675926272fe78cf4ca166f2" integrity sha1-waCKiv+0XJe2dZJicv54z0yhZvI= -ember-cli-inject-live-reload@^2.0.2: - version "2.0.2" - resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-2.0.2.tgz#95edb543b386239d35959e5ea9579f5382976ac7" - integrity sha512-HDD6o/kBHT/kUtazklU0OW23q2jigIN42QmcpFdXUSvJ2/2SYA6yIqSUxWfJgISmtn5gTNZ2KPq1p3dLkhJxSQ== +ember-cli-inject-live-reload@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-2.1.0.tgz#ef63c733c133024d5726405a3c247fa12e88a385" + integrity sha512-YV5wYRD5PJHmxaxaJt18u6LE6Y+wo455BnmcpN+hGNlChy2piM9/GMvYgTAz/8Vin8RJ5KekqP/w/NEaRndc/A== dependencies: clean-base-url "^1.0.0" ember-cli-version-checker "^3.1.3" @@ -8836,22 +8598,23 @@ ember-cli-lodash-subset@^2.0.1: resolved "https://registry.yarnpkg.com/ember-cli-lodash-subset/-/ember-cli-lodash-subset-2.0.1.tgz#20cb68a790fe0fde2488ddfd8efbb7df6fe766f2" integrity sha1-IMtop5D+D94kiN39jvu332/nZvI= -ember-cli-mirage@^1.1.2: - version "1.1.8" - resolved "https://registry.yarnpkg.com/ember-cli-mirage/-/ember-cli-mirage-1.1.8.tgz#b3874a0f2eb83d8f341416d3a691ff82735c629c" - integrity sha512-i0/CNwdyza+o+0RW92i/dEhAqTcyoAJ9jGKKiOLK74RwDG0Bgt5w/CdqKfIFBf/lIGPXHPveneWsyckeSiVr4A== +ember-cli-mirage@2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/ember-cli-mirage/-/ember-cli-mirage-2.2.0.tgz#38f4ec02536dd50ecdb265da2abbf7986d66b091" + integrity sha512-w+DrFEGuuLyHzJwOVkG0yOLvgwYezaMBNvvZJQzQkv1W3CsdhllkY1ZauYgL0dhrmYJwRFtp8DnaPQwBTDCSfA== dependencies: + "@embroider/macros" "^0.40.0" broccoli-file-creator "^2.1.1" - broccoli-funnel "^2.0.2" - broccoli-merge-trees "^3.0.2" + broccoli-funnel "^3.0.3" + broccoli-merge-trees "^4.2.0" ember-auto-import "^1.2.19" ember-cli-babel "^7.5.0" - ember-get-config "^0.2.2" - ember-inflector "^2.0.0 || ^3.0.0" + ember-get-config "^0.2.4 || ^0.3.0" + ember-inflector "^2.0.0 || ^3.0.0 || ^4.0.0" lodash-es "^4.17.11" miragejs "^0.1.31" -ember-cli-moment-shim@^3.5.0: +ember-cli-moment-shim@^3.8.0: version "3.8.0" resolved "https://registry.yarnpkg.com/ember-cli-moment-shim/-/ember-cli-moment-shim-3.8.0.tgz#dc61bbac9dce4963394e60dd42726d4ba38e2bc1" integrity sha512-dN5ImjrjZevEqB7xhwFXaPWwxdKGSFiR1kqy9gDVB+A5EGnhCL1uveKugcyJE/MICVhXUAHBUu6G2LFWEPF2YA== @@ -8887,17 +8650,18 @@ ember-cli-normalize-entity-name@^1.0.0: dependencies: silent-error "^1.0.0" -ember-cli-page-object@^1.17.2: - version "1.17.5" - resolved "https://registry.yarnpkg.com/ember-cli-page-object/-/ember-cli-page-object-1.17.5.tgz#a6b555470cc36e40b622047c6bcf6319deb6ae07" - integrity sha512-1fW4TF4JmoNDtH3V5wuSbcvlFQtl15unGJdOUjbP3HMl/pPDqGeTo4o0Bnoj8LQwARua5A34T5FNWWbAN9YHfQ== +ember-cli-page-object@^2.0.0-beta.3: + version "2.0.0-beta.3" + resolved "https://registry.yarnpkg.com/ember-cli-page-object/-/ember-cli-page-object-2.0.0-beta.3.tgz#49ecc2239f061d801ad208d6a933db1d3906c088" + integrity sha512-swUbl4HnHEK3H6s+ivhEN+RBNgznIxHt3AHT4qQqSIQFrUdEck/s1/Z5lDl1rPOxuWvz5nnKt+W4GOu27muTMA== dependencies: + "@babel/preset-env" "^7.12.11" + "@ro0gr/ceibo" "^2.2.0" broccoli-file-creator "^2.1.1" broccoli-merge-trees "^2.0.0" - ceibo "~2.0.0" - ember-cli-babel "^6.16.0" - ember-cli-node-assets "^0.2.2" - ember-native-dom-helpers "^0.7.0" + ember-auto-import "^2.4.0" + ember-cli-babel "^7.24.0" + ember-cli-typescript "^4.1.0" jquery "^3.4.1" rsvp "^4.7.0" @@ -8933,23 +8697,25 @@ ember-cli-sri@^2.1.1: dependencies: broccoli-sri-hash "^2.1.0" -ember-cli-string-helpers@^1.5.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/ember-cli-string-helpers/-/ember-cli-string-helpers-1.10.0.tgz#6ee6c18d15759acb0905aa0153fe9e031a382fa4" - integrity sha512-z2eNT7BsTNSxp3qNrv7KAxjPwdLC1kIYCck9CERg0RM5vBGy2vK6ozZE3U6nWrtth1xO4PrYkgISwhSgN8NMeg== +ember-cli-string-helpers@^6.1.0: + version "6.1.0" + resolved "https://registry.yarnpkg.com/ember-cli-string-helpers/-/ember-cli-string-helpers-6.1.0.tgz#aeb96112bb91c540b869ed8b9c680f7fd5859cb6" + integrity sha512-Lw8B6MJx2n8CNF2TSIKs+hWLw0FqSYjr2/NRPyquyYA05qsl137WJSYW3ZqTsLgoinHat0DGF2qaCXocLhLmyA== dependencies: - broccoli-funnel "^1.0.1" - ember-cli-babel "^6.6.0" + "@babel/core" "^7.13.10" + broccoli-funnel "^3.0.3" + ember-cli-babel "^7.7.3" + resolve "^1.20.0" ember-cli-string-utils@^1.0.0, ember-cli-string-utils@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1" integrity sha1-ObZ3/CgF9VFzc1N2/O8njqpEUqE= -ember-cli-terser@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/ember-cli-terser/-/ember-cli-terser-4.0.1.tgz#0da0b8f2b09989e8c992b207553ddec1bbb65915" - integrity sha512-vvp0uVl8reYeW9EZjSXRPR3Bq7y4u9CYlUdI7j/WzMPDj3/gUHU4Z7CHYOCrftrClQvFfqO2eXmHwDA6F7SLug== +ember-cli-terser@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/ember-cli-terser/-/ember-cli-terser-4.0.2.tgz#c436a9e4159f76a615b051cba0584844652b7dcd" + integrity sha512-Ej77K+YhCZImotoi/CU2cfsoZaswoPlGaM5TB3LvjvPDlVPRhxUHO2RsaUVC5lsGeRLRiHCOxVtoJ6GyqexzFA== dependencies: broccoli-terser-sourcemap "^4.1.0" @@ -8960,12 +8726,12 @@ ember-cli-test-info@^1.0.0: dependencies: ember-cli-string-utils "^1.0.0" -ember-cli-test-loader@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/ember-cli-test-loader/-/ember-cli-test-loader-2.2.0.tgz#3fb8d5d1357e4460d3f0a092f5375e71b6f7c243" - integrity sha512-mlSXX9SciIRwGkFTX6XGyJYp4ry6oCFZRxh5jJ7VH8UXLTNx2ZACtDTwaWtNhYrWXgKyiDUvmD8enD56aePWRA== +ember-cli-test-loader@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ember-cli-test-loader/-/ember-cli-test-loader-3.0.0.tgz#1c036fc48de36155355fcda3266af63f977826f1" + integrity sha512-wfFRBrfO9gaKScYcdQxTfklx9yp1lWK6zv1rZRpkas9z2SHyJojF7NOQRWQgSB3ypm7vfpiF8VsFFVVr7VBzAQ== dependencies: - ember-cli-babel "^6.8.1" + ember-cli-babel "^7.13.2" ember-cli-typescript@3.0.0: version "3.0.0" @@ -9002,7 +8768,7 @@ ember-cli-typescript@^2.0.2: stagehand "^1.0.0" walk-sync "^1.0.0" -ember-cli-typescript@^3.0.0, ember-cli-typescript@^3.1.3, ember-cli-typescript@^3.1.4: +ember-cli-typescript@^3.0.0, ember-cli-typescript@^3.1.4: version "3.1.4" resolved "https://registry.yarnpkg.com/ember-cli-typescript/-/ember-cli-typescript-3.1.4.tgz#21d6ccd670d1f2e34c9cce68c6e32c442f46806b" integrity sha512-HJ73kL45OGRmIkPhBNFt31I1SGUvdZND+LCH21+qpq3pPlFpJG8GORyXpP+2ze8PbnITNLzwe5AwUrpyuRswdQ== @@ -9022,7 +8788,7 @@ ember-cli-typescript@^3.0.0, ember-cli-typescript@^3.1.3, ember-cli-typescript@^ stagehand "^1.0.0" walk-sync "^2.0.0" -ember-cli-typescript@^4.0.0: +ember-cli-typescript@^4.0.0, ember-cli-typescript@^4.1.0, ember-cli-typescript@^4.2.0, ember-cli-typescript@^4.2.1: version "4.2.1" resolved "https://registry.yarnpkg.com/ember-cli-typescript/-/ember-cli-typescript-4.2.1.tgz#54d08fc90318cc986f3ea562f93ce58a6cc4c24d" integrity sha512-0iKTZ+/wH6UB/VTWKvGuXlmwiE8HSIGcxHamwNhEC5x1mN3z8RfvsFZdQWYUzIWFN2Tek0gmepGRPTwWdBYl/A== @@ -9038,23 +8804,7 @@ ember-cli-typescript@^4.0.0: stagehand "^1.0.0" walk-sync "^2.2.0" -ember-cli-typescript@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/ember-cli-typescript/-/ember-cli-typescript-4.1.0.tgz#2ff17be2e6d26b58c88b1764cb73887e7176618b" - integrity sha512-zSuKG8IQuYE3vS+c7V0mHJqwrN/4Wo9Wr50+0NUjnZH3P99ChynczQHu/P7WSifkO6pF6jaxwzf09XzWvG8sVw== - dependencies: - ansi-to-html "^0.6.6" - broccoli-stew "^3.0.0" - debug "^4.0.0" - execa "^4.0.0" - fs-extra "^9.0.1" - resolve "^1.5.0" - rsvp "^4.8.1" - semver "^7.3.2" - stagehand "^1.0.0" - walk-sync "^2.2.0" - -ember-cli-version-checker@^2.0.0, ember-cli-version-checker@^2.1.0, ember-cli-version-checker@^2.1.2: +ember-cli-version-checker@^2.1.0, ember-cli-version-checker@^2.1.2: version "2.2.0" resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-2.2.0.tgz#47771b731fe0962705e27c8199a9e3825709f3b3" integrity sha512-G+KtYIVlSOWGcNaTFHk76xR4GdzDLzAS4uxZUKdASuFX0KJE43C6DaqL+y3VTpUFLI2FIkAS6HZ4I1YBi+S3hg== @@ -9088,26 +8838,26 @@ ember-cli-version-checker@^5.1.1, ember-cli-version-checker@^5.1.2: semver "^7.3.4" silent-error "^1.1.1" -ember-cli@~3.20.2: - version "3.20.2" - resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-3.20.2.tgz#4a5411afc32a91eb81e4fa1e6f4bc950e838c132" - integrity sha512-8ggRX+NXD9VkQt/9/GSMLcnswWNYTgE7Aw1uelexHdxGA1TqcjOjQ07ljVmgRwYVieknhDzhK3M0u3Xoa3x0HA== +ember-cli@~3.28.5: + version "3.28.5" + resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-3.28.5.tgz#9c91266e412f6d537b47a88e4b63a2b56db35483" + integrity sha512-Y/UdbUOTeKHGMCP3XtE5g14JUTYyeQTdjPvHuv11FFx5HQBtHqqWLY6U1ivMDukDkQ4i2v6TyaUcKVo4e8PtyQ== dependencies: - "@babel/core" "^7.10.2" - "@babel/plugin-transform-modules-amd" "^7.10.4" + "@babel/core" "^7.13.8" + "@babel/plugin-transform-modules-amd" "^7.12.1" amd-name-resolver "^1.3.1" - babel-plugin-module-resolver "^4.0.0" + babel-plugin-module-resolver "^4.1.0" bower-config "^1.4.3" bower-endpoint-parser "0.2.2" - broccoli "^3.4.2" + broccoli "^3.5.1" broccoli-amd-funnel "^2.0.1" - broccoli-babel-transpiler "^7.6.0" + broccoli-babel-transpiler "^7.8.0" broccoli-builder "^0.18.14" - broccoli-concat "^4.2.4" + broccoli-concat "^4.2.5" broccoli-config-loader "^1.0.1" broccoli-config-replace "^1.1.2" broccoli-debug "^0.6.5" - broccoli-funnel "^2.0.2" + broccoli-funnel "^3.0.5" broccoli-funnel-reducer "^1.0.0" broccoli-merge-trees "^3.0.2" broccoli-middleware "^2.1.1" @@ -9116,74 +8866,87 @@ ember-cli@~3.20.2: broccoli-stew "^3.0.0" calculate-cache-key-for-tree "^2.0.0" capture-exit "^2.0.0" - chalk "^4.0.0" + chalk "^4.1.0" ci-info "^2.0.0" clean-base-url "^1.0.0" compression "^1.7.4" configstore "^5.0.1" - console-ui "^3.1.1" + console-ui "^3.1.2" core-object "^3.1.5" dag-map "^2.0.2" - diff "^4.0.2" + diff "^5.0.0" ember-cli-is-package-missing "^1.0.0" ember-cli-lodash-subset "^2.0.1" ember-cli-normalize-entity-name "^1.0.0" ember-cli-preprocess-registry "^3.3.0" ember-cli-string-utils "^1.1.0" - ember-source-channel-url "^2.0.1" + ember-source-channel-url "^3.0.0" ensure-posix-path "^1.1.1" - execa "^1.0.0" + execa "^5.0.0" exit "^0.1.2" express "^4.17.1" filesize "^6.1.0" - find-up "^4.1.0" - find-yarn-workspace-root "^1.2.1" - fixturify-project "^2.1.0" - fs-extra "^9.0.0" + find-up "^5.0.0" + find-yarn-workspace-root "^2.0.0" + fixturify-project "^2.1.1" + fs-extra "^9.1.0" fs-tree-diff "^2.0.1" get-caller-file "^2.0.5" git-repo-info "^2.1.1" glob "^7.1.6" heimdalljs "^0.2.6" - heimdalljs-fs-monitor "^0.2.3" + heimdalljs-fs-monitor "^1.1.0" heimdalljs-graph "^1.0.0" heimdalljs-logger "^0.1.10" http-proxy "^1.18.1" inflection "^1.12.0" is-git-url "^1.0.0" + is-language-code "^2.0.0" isbinaryfile "^4.0.6" - js-yaml "^3.13.1" + js-yaml "^3.14.0" json-stable-stringify "^1.0.1" leek "0.0.24" lodash.template "^4.5.0" - markdown-it "^11.0.0" + markdown-it "^12.0.4" markdown-it-terminal "0.2.1" minimatch "^3.0.4" morgan "^1.10.0" nopt "^3.0.6" - npm-package-arg "^8.0.1" + npm-package-arg "^8.1.1" p-defer "^3.0.0" - portfinder "^1.0.26" + portfinder "^1.0.28" promise-map-series "^0.3.0" - promise.hash.helper "^1.0.6" + promise.hash.helper "^1.0.7" quick-temp "^0.1.8" - resolve "^1.17.0" - resolve-package-path "^2.0.0" + resolve "^1.20.0" + resolve-package-path "^3.1.0" sane "^4.1.0" - semver "^7.3.2" + semver "^7.3.4" silent-error "^1.1.1" - sort-package-json "^1.44.0" + sort-package-json "^1.49.0" symlink-or-copy "^1.3.1" - temp "0.9.1" - testem "^3.1.0" - tiny-lr "^1.1.1" + temp "0.9.4" + testem "^3.2.0" + tiny-lr "^2.0.0" tree-sync "^2.1.0" - uuid "^8.1.0" + uuid "^8.3.2" walk-sync "^2.2.0" watch-detector "^1.0.0" + workerpool "^6.1.4" yam "^1.0.0" -ember-compatibility-helpers@^1.1.1, ember-compatibility-helpers@^1.1.2, ember-compatibility-helpers@^1.2.0, ember-compatibility-helpers@^1.2.1: +ember-compatibility-helpers@^1.1.2, ember-compatibility-helpers@^1.2.5: + version "1.2.6" + resolved "https://registry.yarnpkg.com/ember-compatibility-helpers/-/ember-compatibility-helpers-1.2.6.tgz#603579ab2fb14be567ef944da3fc2d355f779cd8" + integrity sha512-2UBUa5SAuPg8/kRVaiOfTwlXdeVweal1zdNPibwItrhR0IvPrXpaqwJDlEZnWKEoB+h33V0JIfiWleSG6hGkkA== + dependencies: + babel-plugin-debug-macros "^0.2.0" + ember-cli-version-checker "^5.1.1" + find-up "^5.0.0" + fs-extra "^9.1.0" + semver "^5.4.1" + +ember-compatibility-helpers@^1.2.0, ember-compatibility-helpers@^1.2.1: version "1.2.2" resolved "https://registry.yarnpkg.com/ember-compatibility-helpers/-/ember-compatibility-helpers-1.2.2.tgz#839e0c24190b7a2ec8c39b80e030811b1a95b6d3" integrity sha512-EKyCGOGBvKkBsk6wKfg3GhjTvTTkcEwzl/cv4VYvZM18cihmjGNpliR4BymWsKRWrv4VJLyq15Vhk3NHkSNBag== @@ -9192,14 +8955,14 @@ ember-compatibility-helpers@^1.1.1, ember-compatibility-helpers@^1.1.2, ember-co ember-cli-version-checker "^5.1.1" semver "^5.4.1" -ember-composable-helpers@^4.4.1: - version "4.4.1" - resolved "https://registry.yarnpkg.com/ember-composable-helpers/-/ember-composable-helpers-4.4.1.tgz#968f0ef72731cc300b377c552f36f20881911472" - integrity sha512-MVx4KGFL6JzsYfCf9OqLCCnr7DN5tG2jFW9EvosvfgCL7gRdNxLqewR4PWPYA882wetmJ9zvcIEUJhFzZ4deaw== +ember-composable-helpers@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/ember-composable-helpers/-/ember-composable-helpers-5.0.0.tgz#055bab3a3e234ab2917499b1465e968c253ca885" + integrity sha512-gyUrjiSju4QwNrsCLbBpP0FL6VDFZaELNW7Kbcp60xXhjvNjncYgzm4zzYXhT+i1lLA6WEgRZ3lOGgyBORYD0w== dependencies: "@babel/core" "^7.0.0" broccoli-funnel "2.0.1" - ember-cli-babel "^7.11.1" + ember-cli-babel "^7.26.3" resolve "^1.10.0" ember-concurrency-decorators@^2.0.0: @@ -9223,33 +8986,28 @@ ember-concurrency-decorators@^2.0.0: ember-compatibility-helpers "^1.2.0" ember-destroyable-polyfill "^2.0.2" -ember-concurrency@^1.0.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/ember-concurrency/-/ember-concurrency-1.3.0.tgz#66f90fb792687470bcee1172adc0ebf33f5e8b9c" - integrity sha512-DwGlfWFpYyAkTwsedlEtK4t1DznJSculAW6Vq5S1C0shVPc5b6tTpHB2FFYisannSYkm+wpm1f1Pd40qiNPtOQ== +ember-concurrency@^2.2.1: + version "2.2.1" + resolved "https://registry.yarnpkg.com/ember-concurrency/-/ember-concurrency-2.2.1.tgz#4ed2e167036d00f7142312bc93c16f13ed9a259c" + integrity sha512-a4283Yq+jimxqoD5YaxQu7cXePHKqkNQfsT4fs0nYTz5PYbUd6wzUtelp6k8R1JTNPwDdxyVvUgu7yYoC8Sk5A== dependencies: - ember-cli-babel "^7.7.3" + "@glimmer/tracking" "^1.0.4" + ember-cli-babel "^7.26.6" + ember-cli-htmlbars "^5.7.1" ember-compatibility-helpers "^1.2.0" - ember-maybe-import-regenerator "^0.1.6" + ember-destroyable-polyfill "^2.0.2" -ember-copy@2.0.1: +ember-copy@2.0.1, ember-copy@^2.0.1: version "2.0.1" resolved "https://registry.yarnpkg.com/ember-copy/-/ember-copy-2.0.1.tgz#13192b12a250324bb4a8b4547a680b113f4e3041" integrity sha512-N/XFvZszrzyyX4IcNoeK4mJvIItNuONumhPLqi64T8NDjJkxBj4Pq61rvMkJx/9eZ8alzE4I8vYKOLxT0FvRuQ== dependencies: ember-cli-babel "^7.22.1" -ember-copy@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/ember-copy/-/ember-copy-1.0.0.tgz#426554ba6cf65920f31d24d0a3ca2cb1be16e4aa" - integrity sha512-aiZNAvOmdemHdvZNn0b5b/0d9g3JFpcOsrDgfhYEbfd7SzE0b69YiaVK2y3wjqfjuuiA54vOllGN4pjSzECNSw== - dependencies: - ember-cli-babel "^6.6.0" - -ember-data-model-fragments@5.0.0-beta.2: - version "5.0.0-beta.2" - resolved "https://registry.yarnpkg.com/ember-data-model-fragments/-/ember-data-model-fragments-5.0.0-beta.2.tgz#cc5f6e37363216a227394b31beb8eecc4c267a3e" - integrity sha512-LMObs51btOpHFX0DABReU68hpzkZp/yCKCh5w6CSGEm7MM5Bcj8p7Wb4GQIPZANKidVSvdCOT4qnyCREyUatBg== +ember-data-model-fragments@5.0.0-beta.3: + version "5.0.0-beta.3" + resolved "https://registry.yarnpkg.com/ember-data-model-fragments/-/ember-data-model-fragments-5.0.0-beta.3.tgz#88cc796ceac20ed1ff3591e12a1cebb6829e1083" + integrity sha512-fu9c+9WSJZg8SRqiPKRwZ+0AFj3jAM+BpDHt24rCEmdPouECoijOyRRYE8arM+k+qBNlYScJ0lPkuD9tDD4gLA== dependencies: broccoli-file-creator "^2.1.1" broccoli-merge-trees "^3.0.0" @@ -9260,31 +9018,25 @@ ember-data-model-fragments@5.0.0-beta.2: git-repo-info "^2.1.1" npm-git-info "^1.0.3" -ember-data@~3.24: - version "3.24.2" - resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-3.24.2.tgz#34d72b0bc83fce1791bf09a8391425717b9c3f55" - integrity sha512-dfpLagJn09eEcoVqU4NfMs3J+750jJU7rLZA7uFY2/+0M0a4iGhjbm1dVVZQTkrfNiYHXvOOItr1bOT9sMC8Hg== +ember-data@~3.28.6: + version "3.28.8" + resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-3.28.8.tgz#dc93b3fa69f3d5fab9757d93c89a547be3fd3225" + integrity sha512-9vMQ82GrAN0XUGX+k6B+xxmLS9fl/lobUNLtmMOKkeXM5P3Y2VdQycuUtK7yJhdq6FhEMO5sxoayUz8AImjLWw== dependencies: - "@ember-data/adapter" "3.24.2" - "@ember-data/debug" "3.24.2" - "@ember-data/model" "3.24.2" - "@ember-data/private-build-infra" "3.24.2" - "@ember-data/record-data" "3.24.2" - "@ember-data/serializer" "3.24.2" - "@ember-data/store" "3.24.2" + "@ember-data/adapter" "3.28.8" + "@ember-data/debug" "3.28.8" + "@ember-data/model" "3.28.8" + "@ember-data/private-build-infra" "3.28.8" + "@ember-data/record-data" "3.28.8" + "@ember-data/serializer" "3.28.8" + "@ember-data/store" "3.28.8" "@ember/edition-utils" "^1.2.0" - "@ember/ordered-set" "^4.0.0" - "@ember/string" "^1.0.0" + "@ember/string" "^3.0.0" "@glimmer/env" "^0.1.7" broccoli-merge-trees "^4.2.0" - ember-cli-babel "^7.18.0" - ember-cli-typescript "^3.1.3" - ember-inflector "^3.0.1" - -ember-debug-handlers-polyfill@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/ember-debug-handlers-polyfill/-/ember-debug-handlers-polyfill-1.1.1.tgz#e9ae0a720271a834221179202367421b580002ef" - integrity sha512-lO7FBAqJjzbL+IjnWhVfQITypPOJmXdZngZR/Vdn513W4g/Q6Sjicao/mDzeDCb48Y70C4Facwk0LjdIpSZkRg== + ember-cli-babel "^7.26.6" + ember-cli-typescript "^4.1.0" + ember-inflector "^4.0.1" ember-decorators@^6.1.1: version "6.1.1" @@ -9304,14 +9056,14 @@ ember-destroyable-polyfill@^2.0.1, ember-destroyable-polyfill@^2.0.2, ember-dest ember-cli-version-checker "^5.1.1" ember-compatibility-helpers "^1.2.1" -ember-element-helper@^0.3.2: - version "0.3.2" - resolved "https://registry.yarnpkg.com/ember-element-helper/-/ember-element-helper-0.3.2.tgz#a0e384c266c6fb0e39803708d6f5e83ce6dba659" - integrity sha512-t4lrekoRb/jVQeg/N1V0kzehV6cw0YAH1hG1H2+Ykl35YxpYdX7/8hKtaGzVPxceemUVFO7fUorEQ6Y//wpWdA== +ember-element-helper@^0.5.5: + version "0.5.5" + resolved "https://registry.yarnpkg.com/ember-element-helper/-/ember-element-helper-0.5.5.tgz#4a9ecb4dce57ee7f5ceb868a53c7b498c729f056" + integrity sha512-Tu3hsI+/mjHBUvw62Qi+YDZtKkn59V66CjwbgfNTZZ7aHf4gFm1ow4zJ4WLnpnie8p9FvOmIUxwl5HvgPJIcFA== dependencies: + "@embroider/util" "^0.39.1 || ^0.40.0 || ^0.41.0" ember-cli-babel "^7.17.2" ember-cli-htmlbars "^5.1.0" - ember-compatibility-helpers "^1.2.1" ember-exam@6.1.0: version "6.1.0" @@ -9338,20 +9090,13 @@ ember-export-application-global@^2.0.1: resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-2.0.1.tgz#b120a70e322ab208defc9e2daebe8d0dfc2dcd46" integrity sha512-B7wiurPgsxsSGzJuPFkpBWnaeuCu2PGpG2BjyrfA1VcL7//o+5RSnZqiCEY326y7qmxb2GoCgo0ft03KBU0rRw== -ember-factory-for-polyfill@^1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/ember-factory-for-polyfill/-/ember-factory-for-polyfill-1.3.1.tgz#b446ed64916d293c847a4955240eb2c993b86eae" - integrity sha512-y3iG2iCzH96lZMTWQw6LWNLAfOmDC4pXKbZP6FxG8lt7GGaNFkZjwsf+Z5GAe7kxfD7UG4lVkF7x37K82rySGA== +ember-fetch@^8.1.1: + version "8.1.1" + resolved "https://registry.yarnpkg.com/ember-fetch/-/ember-fetch-8.1.1.tgz#d68d4a58529121a572ec09c39c6a3ad174c83a2e" + integrity sha512-Xi1wNmPtVmfIoFH675AA0ELIdYUcoZ2p+6j9c8eDFjiGJiFesyp01bDtl5ryBI/1VPOByJLsDkT+4C11HixsJw== dependencies: - ember-cli-version-checker "^2.1.0" - -ember-fetch@^8.0.2: - version "8.0.4" - resolved "https://registry.yarnpkg.com/ember-fetch/-/ember-fetch-8.0.4.tgz#b9a3239e9d188ada18a4448ccb4fea2a091e3952" - integrity sha512-vx/v6+OTZMDbm6BT5TI6Q/NSuaWPbQxb2KRDD4yR/iJoRl2DtfgXiCA491pYT5IwAAtp6NbMa1CitIQDoaII9Q== - dependencies: - abortcontroller-polyfill "^1.4.0" - broccoli-concat "^4.2.4" + abortcontroller-polyfill "^1.7.3" + broccoli-concat "^4.2.5" broccoli-debug "^0.6.5" broccoli-merge-trees "^4.2.0" broccoli-rollup "^2.1.1" @@ -9359,12 +9104,13 @@ ember-fetch@^8.0.2: broccoli-templater "^2.0.1" calculate-cache-key-for-tree "^2.0.0" caniuse-api "^3.0.0" - ember-cli-babel "^7.23.0" - ember-cli-typescript "^3.1.3" + ember-cli-babel "^7.23.1" + ember-cli-typescript "^4.1.0" + ember-cli-version-checker "^5.1.2" node-fetch "^2.6.1" - whatwg-fetch "^3.4.0" + whatwg-fetch "^3.6.2" -ember-get-config@: +ember-get-config@, "ember-get-config@^0.2.4 || ^0.3.0": version "0.3.0" resolved "https://registry.yarnpkg.com/ember-get-config/-/ember-get-config-0.3.0.tgz#a73a1a87b48d9dde4c66a0e52ed5260b8a48cfbd" integrity sha512-0e2pKzwW5lBZ4oJnvu9qHOht4sP1MWz/m3hyz8kpSoMdrlZVf62LDKZ6qfKgy8drcv5YhCMYE6QV7MhnqlrzEQ== @@ -9372,22 +9118,6 @@ ember-get-config@: broccoli-file-creator "^1.1.1" ember-cli-babel "^7.0.0" -ember-get-config@^0.2.2: - version "0.2.4" - resolved "https://registry.yarnpkg.com/ember-get-config/-/ember-get-config-0.2.4.tgz#118492a2a03d73e46004ed777928942021fe1ecd" - integrity sha1-EYSSoqA9c+RgBO13eSiUICH+Hs0= - dependencies: - broccoli-file-creator "^1.1.1" - ember-cli-babel "^6.3.0" - -ember-getowner-polyfill@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/ember-getowner-polyfill/-/ember-getowner-polyfill-2.2.0.tgz#38e7dccbcac69d5ec694000329ec0b2be651d2b2" - integrity sha512-rwGMJgbGzxIAiWYjdpAh04Abvt0s3HuS/VjHzUFhVyVg2pzAuz45B9AzOxYXzkp88vFC7FPaiA4kE8NxNk4A4Q== - dependencies: - ember-cli-version-checker "^2.1.0" - ember-factory-for-polyfill "^1.3.1" - ember-in-element-polyfill@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/ember-in-element-polyfill/-/ember-in-element-polyfill-1.0.1.tgz#143504445bb4301656a2eaad42644d684f5164dd" @@ -9398,30 +9128,31 @@ ember-in-element-polyfill@^1.0.1: ember-cli-htmlbars "^5.3.1" ember-cli-version-checker "^5.1.2" -ember-inflector@3.0.1, "ember-inflector@^2.0.0 || ^3.0.0", ember-inflector@^3.0.0, ember-inflector@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-3.0.1.tgz#04be6df4d7e4000f6d6bd70787cdc995f77be4ab" - integrity sha512-fngrwMsnhkBt51KZgwNwQYxgURwV4lxtoHdjxf7RueGZ5zM7frJLevhHw7pbQNGqXZ3N+MRkhfNOLkdDK9kFdA== +"ember-inflector@^2.0.0 || ^3.0.0 || ^4.0.0", ember-inflector@^4.0.1, ember-inflector@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/ember-inflector/-/ember-inflector-4.0.2.tgz#4494f1a5f61c1aca7702d59d54024cc92211d8ec" + integrity sha512-+oRstEa52mm0jAFzhr51/xtEWpCEykB3SEBr7vUg8YnXUZJ5hKNBppP938q8Zzr9XfJEbzrtDSGjhKwJCJv6FQ== dependencies: - ember-cli-babel "^6.6.0" + ember-cli-babel "^7.26.5" -ember-inline-svg@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/ember-inline-svg/-/ember-inline-svg-0.3.0.tgz#14e305446e832c116210d5f0cb2aae8343879980" - integrity sha512-CH7+bOUCDfDo27mJO2NQqcDpnOz258iXzdiWmG6hftIBZ97Q64Qo0/Hm38ShWY40r2cw3jwior19miH6/e7amg== +ember-inline-svg@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/ember-inline-svg/-/ember-inline-svg-1.0.1.tgz#fa1e8ef56823274680d363b21fb1127a95ae98a3" + integrity sha512-dRf6zRpFunXms8ba095vT4pppTItwYZGy9fjpd9zq5Eqx+97bukv+NffM+shtah1WHrMLj/j8XnxEalENAOiag== dependencies: broccoli-caching-writer "^3.0.3" broccoli-flatiron "~0.1.3" - broccoli-funnel "^2.0.1" - broccoli-merge-trees "^3.0.0" - ember-cli-babel "^6.16.0" - merge "^1.2.1" + broccoli-funnel "~2.0.2" + broccoli-merge-trees "~3.0.2" + ember-cli-babel "^7.11.1" + ember-cli-htmlbars "^4.0.0" + merge "^2.1.1" mkdirp "^0.5.1" promise-map-series "^0.2.1" svgo "~1.2.2" - walk-sync "^0.3.1" + walk-sync "~2.0.2" -ember-load-initializers@^2.1.1: +ember-load-initializers@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/ember-load-initializers/-/ember-load-initializers-2.1.2.tgz#8a47a656c1f64f9b10cecdb4e22a9d52ad9c7efa" integrity sha512-CYR+U/wRxLbrfYN3dh+0Tb6mFaxJKfdyz+wNql6cqTrA0BBi9k6J3AaKXj273TqvEpyyXegQFFkZEiuZdYtgJw== @@ -9429,37 +9160,27 @@ ember-load-initializers@^2.1.1: ember-cli-babel "^7.13.0" ember-cli-typescript "^2.0.2" -ember-macro-helpers@^2.1.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/ember-macro-helpers/-/ember-macro-helpers-2.2.0.tgz#682bb887fda0679fd82cf981190f6dbba20b6e03" - integrity sha512-/ah3k6GFyrAxA2EE8XzNcrk1BlLw34TXgb+1tNdbWQj8pPoP0Notc00xiEXUDgwJ6kTT3KaEBujFyuIbwe3rsw== +ember-maybe-import-regenerator@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/ember-maybe-import-regenerator/-/ember-maybe-import-regenerator-1.0.0.tgz#c05453dfd3b65dbec2b569612b01ae70b672dd7e" + integrity sha512-wtjgjEV0Hk4fgiAwFjOfPrGWfmFrbRW3zgNZO4oA3H5FlbMssMvWuR8blQ3QSWYHODVK9r+ThsRAs8lG4kbxqA== dependencies: - ember-cli-babel "^6.18.0" - ember-cli-string-utils "^1.1.0" - ember-cli-test-info "^1.0.0" - ember-weakmap "^3.0.0" + broccoli-funnel "^2.0.1" + broccoli-merge-trees "^3.0.0" + ember-cli-babel "^7.26.6" + regenerator-runtime "^0.13.2" -ember-maybe-import-regenerator@^0.1.6: - version "0.1.6" - resolved "https://registry.yarnpkg.com/ember-maybe-import-regenerator/-/ember-maybe-import-regenerator-0.1.6.tgz#35d41828afa6d6a59bc0da3ce47f34c573d776ca" - integrity sha1-NdQYKK+m1qWbwNo85H80xXPXdso= - dependencies: - broccoli-funnel "^1.0.1" - broccoli-merge-trees "^1.0.0" - ember-cli-babel "^6.0.0-beta.4" - regenerator-runtime "^0.9.5" - -ember-maybe-in-element@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/ember-maybe-in-element/-/ember-maybe-in-element-2.0.2.tgz#93e503fb0655b65cc822e4040e51e13814b5f648" - integrity sha512-NyZNEGsdUHKUbpeZV0U6fbs0KaRKaa6O6E3RP3TMoqUA/NI3Fhha28kZ38aPe21KMgN4I1NHicgSHf4ijuHFsA== +ember-maybe-in-element@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/ember-maybe-in-element/-/ember-maybe-in-element-2.0.3.tgz#640ea56b492bdacd1c41c128c2163d933c18c3ec" + integrity sha512-XKuBYPYELwsEmDnJXI7aNSZtt/SKGgRZNMFhASODLz7j0OHSNrcJtjo5Wam/alxIjUIYVjEnMnOzqBLMfJnQkQ== dependencies: ember-cli-babel "^7.21.0" ember-cli-htmlbars "^5.2.0" ember-cli-version-checker "^5.1.1" ember-in-element-polyfill "^1.0.1" -ember-modifier-manager-polyfill@^1.1.0, ember-modifier-manager-polyfill@^1.2.0: +ember-modifier-manager-polyfill@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/ember-modifier-manager-polyfill/-/ember-modifier-manager-polyfill-1.2.0.tgz#cf4444e11a42ac84f5c8badd85e635df57565dda" integrity sha512-bnaKF1LLKMkBNeDoetvIJ4vhwRPKIIumWr6dbVuW6W6p4QV8ZiO+GdF8J7mxDNlog9CeL9Z/7wam4YS86G8BYA== @@ -9468,26 +9189,27 @@ ember-modifier-manager-polyfill@^1.1.0, ember-modifier-manager-polyfill@^1.2.0: ember-cli-version-checker "^2.1.2" ember-compatibility-helpers "^1.2.0" -ember-modifier@^2.1.0, ember-modifier@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/ember-modifier/-/ember-modifier-2.1.1.tgz#aa3a12e2d6cf1622f774f3f1eab4880982a43fa9" - integrity sha512-g9mcpFWgw5lgNU40YNf0USNWqoGTJ+EqjDQKjm7556gaRNDeGnLylFKqx9O3opwLHEt6ZODnRDy9U0S5YEMREg== +ember-modifier@^3.0.0, ember-modifier@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/ember-modifier/-/ember-modifier-3.1.0.tgz#ba5b0941302accd787ed3dcfc8d20400b77ffc41" + integrity sha512-G5Lj9jVFsD2sVJcRNQfaGKG1p81wT4LGfClBhCuB4TgwP1NGJKdqI+Q8BW2MptONxQt/71UjjUH0YK7Gm9eahg== dependencies: - ember-cli-babel "^7.22.1" + ember-cli-babel "^7.26.6" ember-cli-normalize-entity-name "^1.0.0" ember-cli-string-utils "^1.1.0" - ember-cli-typescript "^3.1.3" - ember-destroyable-polyfill "^2.0.2" - ember-modifier-manager-polyfill "^1.2.0" + ember-cli-typescript "^4.2.1" + ember-compatibility-helpers "^1.2.5" -ember-moment@^7.8.1: - version "7.8.1" - resolved "https://registry.yarnpkg.com/ember-moment/-/ember-moment-7.8.1.tgz#6f77cf941d1a92e231b2f4b810e113b2fae50c5f" - integrity sha512-qEMWvJYOGGgyusUE9b+2wU7uWUG8aHkzdE6NBtGjaX5NU8SeByQkH8RkA+QKZonWQH3JncdbG6UBqRO1Ng3DYA== +ember-moment@^9.0.1: + version "9.0.1" + resolved "https://registry.yarnpkg.com/ember-moment/-/ember-moment-9.0.1.tgz#fcf06cb8ef07c8d0108820c1639778590d613b38" + integrity sha512-mwcj2g/37pIQua3uTpJWu/zpcnyvbgg9YmkKR6nhtY7FXBaGSIU3aeG95VAoPcfET9DazkPOyqxVB+Dcd5BP5g== dependencies: - ember-cli-babel "^6.7.2" - ember-getowner-polyfill "^2.2.0" - ember-macro-helpers "^2.1.0" + ember-auto-import "^1.10.1" + ember-cli-babel "^7.23.0" + ember-cli-htmlbars "^5.3.1" + moment "^2.29.1" + moment-timezone "^0.5.33" ember-named-blocks-polyfill@^0.2.4: version "0.2.4" @@ -9497,14 +9219,6 @@ ember-named-blocks-polyfill@^0.2.4: ember-cli-babel "^7.19.0" ember-cli-version-checker "^5.1.1" -ember-native-dom-helpers@^0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/ember-native-dom-helpers/-/ember-native-dom-helpers-0.7.0.tgz#98a87c11a391cec5c12382a4857e59ea2fb4b00a" - integrity sha512-ySJRGRhwYIWUAZKilB8xEcIatP9wKfEBX6JFG8bG4Ck7GvA0eau265hTGZz/+ntZuwcY4HrzSNkwimlHx4cM/A== - dependencies: - broccoli-funnel "^1.1.0" - ember-cli-babel "^6.6.0" - ember-overridable-computed@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/ember-overridable-computed/-/ember-overridable-computed-1.0.0.tgz#4fb4a5acc9ec9ed7421586a5a8b4014f5bdb04f7" @@ -9512,52 +9226,44 @@ ember-overridable-computed@^1.0.0: dependencies: ember-cli-babel "^7.7.3" -ember-page-title@^6.0.3: - version "6.2.0" - resolved "https://registry.yarnpkg.com/ember-page-title/-/ember-page-title-6.2.0.tgz#d79f7422299ebb8f8ac819399596e8f54f01a0e9" - integrity sha512-mgzI59rVH6Q00uG1OnsbrSfic+MxFQbtsjX3BuDWGgU7hWahDhAAgaQrKIFx99aEqWoiL+OiX4tQLPnxyZEceA== +ember-page-title@^6.2.2: + version "6.2.2" + resolved "https://registry.yarnpkg.com/ember-page-title/-/ember-page-title-6.2.2.tgz#980838c44e96cba1d00f42435d707936af627324" + integrity sha512-YTXA+cylZrh9zO0zwjlaAGReT2MVOxAMnVO1OOygFrs1JBs4D6CKV3tImoilg3AvIXFBeJfFNNUbJOdRd9IGGg== dependencies: - ember-cli-babel "^7.22.1" + ember-cli-babel "^7.23.1" -ember-power-select@^4.1.3: - version "4.1.3" - resolved "https://registry.yarnpkg.com/ember-power-select/-/ember-power-select-4.1.3.tgz#660d3bec86ce71db43a4a7368a2827b07e65c8b6" - integrity sha512-/8HdYb8PptMbrB4lxiPkfRQt6ts83lGRB+ZA6Xr4OaCzWk+5evYm/+a0Q5Gy3XnyhRiQgbzXO07G+fAijXGTOQ== +ember-power-select@^4.1.7: + version "4.1.7" + resolved "https://registry.yarnpkg.com/ember-power-select/-/ember-power-select-4.1.7.tgz#eb547dd37448357d8f3fa789db18ddbba43fb8ca" + integrity sha512-Q4cjUudWb7JA6q7qe0jhcpLsipuFUHMwkYC05HxST5qm3MRMEzs6KfZ3Xd/TcrjBLSoWniw3Q61Quwcb41w5Jw== dependencies: - "@glimmer/component" "^1.0.2" - "@glimmer/tracking" "^1.0.2" + "@glimmer/component" "^1.0.4" + "@glimmer/tracking" "^1.0.4" ember-assign-helper "^0.3.0" - ember-basic-dropdown "^3.0.16" - ember-cli-babel "^7.23.0" - ember-cli-htmlbars "^5.3.1" - ember-cli-typescript "^4.1.0" + ember-basic-dropdown "^3.0.21" + ember-cli-babel "^7.26.0" + ember-cli-htmlbars "^6.0.0" + ember-cli-typescript "^4.2.0" ember-concurrency ">=1.0.0 <3" ember-concurrency-decorators "^2.0.0" ember-text-measurer "^0.6.0" ember-truth-helpers "^2.1.0 || ^3.0.0" -ember-qunit-nice-errors@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ember-qunit-nice-errors/-/ember-qunit-nice-errors-1.2.0.tgz#8db4468fbe761f42bec9adcddfd21efa31237267" - integrity sha512-DNvqrS6vSMf7VImW7RjlTbiUnYvyxNNeVMNBVyys7g8hgg/IqbWeXQjKUzeSp/dUbzVi9Fr/8cuLHH7S38DSOA== +ember-qunit@^5.1.5: + version "5.1.5" + resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-5.1.5.tgz#24a7850f052be24189ff597dfc31b923e684c444" + integrity sha512-2cFA4oMygh43RtVcMaBrr086Tpdhgbn3fVZ2awLkzF/rnSN0D0PSRpd7hAD7OdBPerC/ZYRwzVyGXLoW/Zes4A== dependencies: - babel-core "^6.10.4" - broccoli-persistent-filter "^1.4.3" - ember-cli-babel "^6.6.0" - recast "^0.13.0" - -ember-qunit@^4.6.0: - version "4.6.0" - resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-4.6.0.tgz#ad79fd3ff00073a8779400cc5a4b44829517590f" - integrity sha512-i5VOGn0RP8XH+5qkYDOZshbqAvO6lHgF65D0gz8vRx4DszCIvJMJO+bbftBTfYMxp6rqG85etAA6pfNxE0DqsQ== - dependencies: - "@ember/test-helpers" "^1.7.1" - broccoli-funnel "^2.0.2" + broccoli-funnel "^3.0.8" broccoli-merge-trees "^3.0.2" - common-tags "^1.4.0" - ember-cli-babel "^7.12.0" - ember-cli-test-loader "^2.2.0" - qunit "^2.9.3" + common-tags "^1.8.0" + ember-auto-import "^1.11.3" + ember-cli-babel "^7.26.6" + ember-cli-test-loader "^3.0.0" + resolve-package-path "^3.1.0" + silent-error "^1.1.1" + validate-peer-dependencies "^1.2.0" ember-render-helpers@^0.2.0: version "0.2.0" @@ -9567,26 +9273,26 @@ ember-render-helpers@^0.2.0: ember-cli-babel "^7.23.0" ember-cli-typescript "^4.0.0" -ember-resolver@^8.0.0: - version "8.0.2" - resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-8.0.2.tgz#8a45a744aaf5391eb52b4cb393b3b06d2db1975c" - integrity sha512-BmCoPT8nf5uv0g2QkhMsrG1Gmu6MtXd6igfhCVzCTMTwZ97SxhJKqoMY62EP/av5HVSWBC/Sa1uGU5cQvX1Aog== +ember-resolver@^8.0.3: + version "8.0.3" + resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-8.0.3.tgz#40f243aa58281bf195c695fe84a6b291e204690a" + integrity sha512-fA53fxfG821BRqNiB9mQDuzZpzSRcSAYZTYBlRQOHsJwoYdjyE7idz4YcytbSsa409G5J2kP6B+PiKOBh0odlw== dependencies: - babel-plugin-debug-macros "^0.3.3" - broccoli-funnel "^3.0.3" + babel-plugin-debug-macros "^0.3.4" + broccoli-funnel "^3.0.8" broccoli-merge-trees "^4.2.0" - ember-cli-babel "^7.22.1" - ember-cli-version-checker "^5.1.1" - resolve "^1.17.0" + ember-cli-babel "^7.26.6" + ember-cli-version-checker "^5.1.2" + resolve "^1.20.0" -ember-responsive@^3.0.4: - version "3.0.6" - resolved "https://registry.yarnpkg.com/ember-responsive/-/ember-responsive-3.0.6.tgz#4413c1475d08229791d0133f8bfd44f744a81ca7" - integrity sha512-TSFOB5FnlsMoAQrIe8EhM+cV0kJSLefTyXdb/rAi5zAqVoPC5qreQeHUQG7JKHR8K73azNZM64mXADX1IrIVUw== +ember-responsive@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/ember-responsive/-/ember-responsive-4.0.2.tgz#271fb0e619f492493a3332d96332d3684d70cb58" + integrity sha512-cNpR7ZA/JqF4f9+wCct3LXVjNLCv+biIVrAoo3fuCkIiGp3/I6D9GBhKZngvSFQiKp/tp2N52zvS7v5h0ahF4A== dependencies: ember-cli-babel "^7.19.0" -ember-rfc176-data@^0.3.13, ember-rfc176-data@^0.3.15, ember-rfc176-data@^0.3.16: +ember-rfc176-data@^0.3.15: version "0.3.16" resolved "https://registry.yarnpkg.com/ember-rfc176-data/-/ember-rfc176-data-0.3.16.tgz#2ace0ac9cf9016d493a74a1d931643a308679803" integrity sha512-IYAzffS90r2ybAcx8c2qprYfkxa70G+/UPkxMN1hw55DU5S2aLOX6v3umKDZItoRhrvZMCnzwsdfKSrKdC9Wbg== @@ -9605,40 +9311,42 @@ ember-router-generator@^2.0.0: "@babel/traverse" "^7.4.5" recast "^0.18.1" -ember-sinon@^4.0.0: - version "4.1.1" - resolved "https://registry.yarnpkg.com/ember-sinon/-/ember-sinon-4.1.1.tgz#ff6db9e3b9548e269386d9efe1c381e165f03ba5" - integrity sha512-CmLjy7LGcTw2uP0WdFSPuXYbI7rwB4U/5EOtVU5h2jXtItrnspLIXBL50kigDzwv+lgE8XhfDVPbJ1QMrIXWXg== +ember-sinon@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/ember-sinon/-/ember-sinon-5.0.0.tgz#990bcafa65403d2b2e3e7f14bb547862197e97d5" + integrity sha512-dTP2vhao1xWm3OlfpOALooso/OLM71SFg7PIBmZ6JdwKCC+CzcPb4BYRAXuoAFYzmhH8z28p8HdemjZBb0B3Bw== dependencies: broccoli-funnel "^2.0.0" broccoli-merge-trees "^3.0.0" - ember-cli-babel "^7.7.3" - sinon "^7.4.2" + ember-cli-babel "^7.17.2" + sinon "^9.0.0" -ember-source-channel-url@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/ember-source-channel-url/-/ember-source-channel-url-2.0.1.tgz#18b88f8a00b7746e7a456b3551abb3aea18729cc" - integrity sha512-YlLUHW9gNvxEaohIj5exykoTZb4xj9ZRTcR4J3svv9S8rjAHJUnHmqC5Fd9onCs+NGxHo7KwR/fDwsfadbDu5Q== +ember-source-channel-url@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/ember-source-channel-url/-/ember-source-channel-url-3.0.0.tgz#bcd5be72c63fa0b8c390b3121783b462063e2a1b" + integrity sha512-vF/8BraOc66ZxIDo3VuNP7iiDrnXEINclJgSJmqwAAEpg84Zb1DHPI22XTXSDA+E8fW5btPUxu65c3ZXi8AQFA== dependencies: - got "^8.0.1" + node-fetch "^2.6.0" -ember-source@~3.20.2: - version "3.20.6" - resolved "https://registry.yarnpkg.com/ember-source/-/ember-source-3.20.6.tgz#c2aa4cee386436214b4b3ed84a71becfe815fa12" - integrity sha512-THimwJmimckNdumSg0M5URQTRlXAArEhe2ricCIuYesMwyX9HwZSUMS+IbXALzktGZU8qfEqyWNXxpmQ8ThJLA== +ember-source@~3.28.8: + version "3.28.8" + resolved "https://registry.yarnpkg.com/ember-source/-/ember-source-3.28.8.tgz#c58fd4a1538d6c4b9aebe76c764cabf5396c64d9" + integrity sha512-hA15oYzbRdi9983HIemeVzzX2iLcMmSPp6akUiMQhFZYWPrKksbPyLrO6YpZ4hNM8yBjQSDXEkZ1V3yxBRKjUA== dependencies: "@babel/helper-module-imports" "^7.8.3" "@babel/plugin-transform-block-scoping" "^7.8.3" "@babel/plugin-transform-object-assign" "^7.8.3" "@ember/edition-utils" "^1.2.0" - babel-plugin-debug-macros "^0.3.3" + "@glimmer/vm-babel-plugins" "0.80.3" + babel-plugin-debug-macros "^0.3.4" babel-plugin-filter-imports "^4.0.0" broccoli-concat "^4.2.4" broccoli-debug "^0.6.4" + broccoli-file-creator "^2.1.1" broccoli-funnel "^2.0.2" broccoli-merge-trees "^4.2.0" chalk "^4.0.0" - ember-cli-babel "^7.19.0" + ember-cli-babel "^7.23.0" ember-cli-get-component-path-option "^1.0.0" ember-cli-is-package-missing "^1.0.0" ember-cli-normalize-entity-name "^1.0.0" @@ -9647,68 +9355,65 @@ ember-source@~3.20.2: ember-cli-version-checker "^5.1.1" ember-router-generator "^2.0.0" inflection "^1.12.0" - jquery "^3.5.0" + jquery "^3.5.1" resolve "^1.17.0" - semver "^6.1.1" + semver "^7.3.4" silent-error "^1.1.1" -ember-style-modifier@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/ember-style-modifier/-/ember-style-modifier-0.6.0.tgz#cc5e58db7f6d6662028a7b4e3cf63cf25ba59a8f" - integrity sha512-KqW4vyR80l/GMJsuFV+WLqTmGjXKLpoQ/HAmno+oMDrMt13p/5ImrvarQ6lFgXttFnLCxl6YpMY4YX27p1G54g== +ember-style-modifier@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/ember-style-modifier/-/ember-style-modifier-0.7.0.tgz#85b3dfd7e4bc2bd546df595f2dab4fb141cf7d87" + integrity sha512-iDzffiwJcb9j6gu3g8CxzZOTvRZ0BmLMEFl+uyqjiaj72VVND9+HbLyQRw1/ewPAtinhSktxxTTdwU/JO+stLw== dependencies: - ember-cli-babel "^7.21.0" - ember-modifier "^2.1.0" + ember-cli-babel "^7.26.6" + ember-modifier "^3.0.0" -ember-template-lint@^2.9.1: - version "2.18.1" - resolved "https://registry.yarnpkg.com/ember-template-lint/-/ember-template-lint-2.18.1.tgz#4b87c4093bac08d3a57cfd0d250652cbf72561a8" - integrity sha512-F81Y6oQRBHZyR41AV+YeTfdvwoAo0Eh8TbhNaKOSAmWK2X0GLdxhzem9a9VrnZGCji6cfzJrIPyHlYtUEAk0dw== +ember-template-lint@^3.15.0: + version "3.16.0" + resolved "https://registry.yarnpkg.com/ember-template-lint/-/ember-template-lint-3.16.0.tgz#7af2ec8d4386f4726be08c14c39ba121c56f0896" + integrity sha512-hbP4JefkOLx9tMkrZ3UIvdBNoEnrT7rg6c70tIxpB9F+KpPneDbmpGMBsQVhhK4BirTXIFwAIfnwKcwkIk3bPQ== dependencies: - chalk "^4.0.0" - ember-template-recast "^5.0.1" + "@ember-template-lint/todo-utils" "^10.0.0" + chalk "^4.1.2" + ci-info "^3.3.0" + date-fns "^2.28.0" + ember-template-recast "^5.0.3" find-up "^5.0.0" + fuse.js "^6.5.3" get-stdin "^8.0.0" - globby "^11.0.2" - is-glob "^4.0.1" - micromatch "^4.0.2" - resolve "^1.19.0" - v8-compile-cache "^2.2.0" + globby "^11.0.4" + is-glob "^4.0.3" + micromatch "^4.0.4" + requireindex "^1.2.0" + resolve "^1.20.0" + v8-compile-cache "^2.3.0" yargs "^16.2.0" -ember-template-recast@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/ember-template-recast/-/ember-template-recast-5.0.1.tgz#c5d9e7208bf629ee368a3792328f245f3c651003" - integrity sha512-MtjyYtr5jnE72i/jVkI3m2QOdozglLwXS3HN74Ge9cm7dz5GoDszVvUksyj/9xqpIP31LMXjs2bZetRGvinU1Q== +ember-template-recast@^5.0.3: + version "5.0.3" + resolved "https://registry.yarnpkg.com/ember-template-recast/-/ember-template-recast-5.0.3.tgz#79df27a70bdce7be17f14db13886afde1e9d02d6" + integrity sha512-qsJYQhf29Dk6QMfviXhUPE+byMOs6iRQxUDHgkj8yqjeppvjHaFG96hZi/NAXJTm/M7o3PpfF5YlmeaKtI9UeQ== dependencies: "@glimmer/reference" "^0.65.0" "@glimmer/syntax" "^0.65.0" "@glimmer/validator" "^0.65.0" async-promise-queue "^1.0.5" colors "^1.4.0" - commander "^6.2.0" - globby "^11.0.1" - ora "^5.1.0" + commander "^6.2.1" + globby "^11.0.3" + ora "^5.4.0" slash "^3.0.0" tmp "^0.2.1" - workerpool "^6.0.3" + workerpool "^6.1.4" -ember-test-selectors@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/ember-test-selectors/-/ember-test-selectors-5.0.0.tgz#36c30f64498039cb88797cdda682275a460ee624" - integrity sha512-hqAPqyJLEGBYcQ9phOKvHhSCyvcSbUL8Yj2si8OASsQWxwRqbxrtk5YlkN2aZiZdp9PAd2wErS8uClG0U7tNpA== +ember-test-selectors@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/ember-test-selectors/-/ember-test-selectors-6.0.0.tgz#ba9bb19550d9dec6e4037d86d2b13c2cfd329341" + integrity sha512-PgYcI9PeNvtKaF0QncxfbS68olMYM1idwuI8v/WxsjOGqUx5bmsu6V17vy/d9hX4mwmjgsBhEghrVasGSuaIgw== dependencies: calculate-cache-key-for-tree "^2.0.0" - ember-cli-babel "^7.22.1" - ember-cli-version-checker "^5.1.1" - -ember-test-waiters@^1.1.1: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ember-test-waiters/-/ember-test-waiters-1.2.0.tgz#c12ead4313934c24cff41857020cacdbf8e6effe" - integrity sha512-aEw7YuutLuJT4NUuPTNiGFwgTYl23ThqmBxSkfFimQAn+keWjAftykk3dlFELuhsJhYW/S8YoVjN0bSAQRLNtw== - dependencies: - ember-cli-babel "^7.11.0" - semver "^6.3.0" + ember-cli-babel "^7.26.4" + ember-cli-version-checker "^5.1.2" ember-text-measurer@^0.6.0: version "0.6.0" @@ -9718,29 +9423,13 @@ ember-text-measurer@^0.6.0: ember-cli-babel "^7.19.0" ember-cli-htmlbars "^4.3.1" -ember-truth-helpers@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-2.1.0.tgz#d4dab4eee7945aa2388126485977baeb33ca0798" - integrity sha512-BQlU8aTNl1XHKTYZ243r66yqtR9JU7XKWQcmMA+vkqfkE/c9WWQ9hQZM8YABihCmbyxzzZsngvldokmeX5GhAw== - dependencies: - ember-cli-babel "^6.6.0" - -"ember-truth-helpers@^2.1.0 || ^3.0.0": +"ember-truth-helpers@^2.1.0 || ^3.0.0", ember-truth-helpers@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-3.0.0.tgz#86766bdca4ac9b86bce3d262dff2aabc4a0ea384" integrity sha512-hPKG9QqruAELh0li5xaiLZtr88ioWYxWCXisAWHWE0qCP4a2hgtuMzKUPpiTCkltvKjuqpzTZCU4VhQ+IlRmew== dependencies: ember-cli-babel "^7.22.1" -ember-weakmap@^3.0.0: - version "3.3.2" - resolved "https://registry.yarnpkg.com/ember-weakmap/-/ember-weakmap-3.3.2.tgz#82c0d3168df786f789600cd8386c2eabff96958d" - integrity sha512-AFECvDuKaxMvi/kMl18sRDUwoB+WKxDdLXfEAOJ8JbjcYhGE/6cWXELSNIshIIwmuLRyDC9xwfhd7PpMnRohYA== - dependencies: - browserslist "^3.1.1" - debug "^3.1.0" - ember-cli-babel "^6.6.0" - emoji-regex@^7.0.1: version "7.0.3" resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" @@ -9777,34 +9466,6 @@ end-of-stream@^1.0.0, end-of-stream@^1.1.0: dependencies: once "^1.4.0" -engine.io-client@~3.5.0: - version "3.5.2" - resolved "https://registry.yarnpkg.com/engine.io-client/-/engine.io-client-3.5.2.tgz#0ef473621294004e9ceebe73cef0af9e36f2f5fa" - integrity sha512-QEqIp+gJ/kMHeUun7f5Vv3bteRHppHH/FMBQX/esFj/fuYfjyUKWGMo3VCvIP/V8bE9KcjHmRZrhIz2Z9oNsDA== - dependencies: - component-emitter "~1.3.0" - component-inherit "0.0.3" - debug "~3.1.0" - engine.io-parser "~2.2.0" - has-cors "1.1.0" - indexof "0.0.1" - parseqs "0.0.6" - parseuri "0.0.6" - ws "~7.4.2" - xmlhttprequest-ssl "~1.6.2" - yeast "0.1.2" - -engine.io-parser@~2.2.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-2.2.1.tgz#57ce5611d9370ee94f99641b589f94c97e4f5da7" - integrity sha512-x+dN/fBH8Ro8TFwJ+rkB2AmuVw9Yu2mockR/p3W8f8YtExwFgDvBDi0GWyb4ZLkpahtDGZgtr3zLovanJghPqg== - dependencies: - after "0.8.2" - arraybuffer.slice "~0.0.7" - base64-arraybuffer "0.1.4" - blob "0.0.5" - has-binary2 "~1.0.2" - engine.io-parser@~5.0.0: version "5.0.1" resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-5.0.1.tgz#6695fc0f1e6d76ad4a48300ff80db5f6b3654939" @@ -9812,18 +9473,6 @@ engine.io-parser@~5.0.0: dependencies: base64-arraybuffer "~1.0.1" -engine.io@~3.5.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-3.5.0.tgz#9d6b985c8a39b1fe87cd91eb014de0552259821b" - integrity sha512-21HlvPUKaitDGE4GXNtQ7PLP0Sz4aWLddMPw2VTyFz1FVZqu/kZsJUO8WNpKuE/OCL7nkfRaOui2ZCJloGznGA== - dependencies: - accepts "~1.3.4" - base64id "2.0.0" - cookie "~0.4.1" - debug "~4.1.0" - engine.io-parser "~2.2.0" - ws "~7.4.2" - engine.io@~6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/engine.io/-/engine.io-6.0.0.tgz#2b993fcd73e6b3a6abb52b40b803651cd5747cf0" @@ -9840,7 +9489,7 @@ engine.io@~6.0.0: engine.io-parser "~5.0.0" ws "~8.2.3" -enhanced-resolve@^4.0.0, enhanced-resolve@^4.5.0: +enhanced-resolve@^4.5.0: version "4.5.0" resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-4.5.0.tgz#2f3cfd84dbe3b487f18f2db2ef1e064a571ca5ec" integrity sha512-Nv9m36S/vxpsI+Hc4/ZGRs0n9mXqSWGGq49zxb/cJfPAQMbUtttJAlNPS4AQzaBdw/pKskw5bMbekT/Y7W/Wlg== @@ -9849,6 +9498,14 @@ enhanced-resolve@^4.0.0, enhanced-resolve@^4.5.0: memory-fs "^0.5.0" tapable "^1.0.0" +enhanced-resolve@^5.8.3: + version "5.9.2" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.9.2.tgz#0224dcd6a43389ebfb2d55efee517e5466772dd9" + integrity sha512-GIm3fQfwLJ8YZx2smuHpBKkXC1yOk+OBEmKckVyL0i/ea8mqDEykK3ld5dgH1QYPNyT/lIllxV2LULnxCHaHkA== + dependencies: + graceful-fs "^4.2.4" + tapable "^2.2.0" + enquirer@^2.3.5, enquirer@^2.3.6: version "2.3.6" resolved "https://registry.yarnpkg.com/enquirer/-/enquirer-2.3.6.tgz#2a7fe5dd634a1e4125a975ec994ff5456dc3734d" @@ -9871,11 +9528,6 @@ entities@^2.0.0: resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55" integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A== -entities@~2.0.0: - version "2.0.3" - resolved "https://registry.yarnpkg.com/entities/-/entities-2.0.3.tgz#5c487e5742ab93c15abb5da22759b8590ec03b7f" - integrity sha512-MyoZ0jgnLvB2X3Lg5HqpFmn1kybDiIfEQmKzTb5apr51Rb+T3KdmMiqa70T+bhGnyv7bQ6WMj2QMHpGMmlrUYQ== - entities@~2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5" @@ -9924,7 +9576,7 @@ es-abstract@^1.17.0-next.0, es-abstract@^1.17.2: string.prototype.trimend "^1.0.1" string.prototype.trimstart "^1.0.1" -es-abstract@^1.18.0-next.1: +es-abstract@^1.18.0-next.1, es-abstract@^1.19.1: version "1.19.1" resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.19.1.tgz#d4885796876916959de78edaa0df456627115ec3" integrity sha512-2vJ6tjA/UfqLm2MPs7jxVybLoB8i1t1Jd9R3kISld20sIxPcTbLuggQOUxeWeAvIUkduv/CfMjuh4WmiXr2v9w== @@ -9989,6 +9641,11 @@ es-get-iterator@^1.0.2: is-string "^1.0.5" isarray "^2.0.5" +es-module-lexer@^0.9.0: + version "0.9.3" + resolved "https://registry.yarnpkg.com/es-module-lexer/-/es-module-lexer-0.9.3.tgz#6f13db00cc38417137daf74366f535c8eb438f19" + integrity sha512-1HQ2M2sPtxwnvOvT1ZClHyQDiggdNjURWpY2we6aMKCQiUVxTmVs2UYPLIrD84sS+kMdUwfBSylbJPwNnBrnHQ== + es-to-primitive@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" @@ -10003,11 +9660,6 @@ es5-shim@^4.5.13: resolved "https://registry.yarnpkg.com/es5-shim/-/es5-shim-4.5.15.tgz#6a26869b261854a3b045273f5583c52d390217fe" integrity sha512-FYpuxEjMeDvU4rulKqFdukQyZSTpzhg4ScQHrAosrlVpR6GFyaw14f74yn2+4BugniIS0Frpg7TvwZocU4ZMTw== -es6-promise@^4.2.8: - version "4.2.8" - resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.2.8.tgz#4eb21594c972bc40553d276e510539143db53e0a" - integrity sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w== - es6-shim@^0.35.5: version "0.35.6" resolved "https://registry.yarnpkg.com/es6-shim/-/es6-shim-0.35.6.tgz#d10578301a83af2de58b9eadb7c2c9945f7388a0" @@ -10061,13 +9713,13 @@ eslint-plugin-ember-a11y-testing@a11y-tool-sandbox/eslint-plugin-ember-a11y-test dependencies: requireindex "~1.1.0" -eslint-plugin-ember@^10.5.8: - version "10.5.8" - resolved "https://registry.yarnpkg.com/eslint-plugin-ember/-/eslint-plugin-ember-10.5.8.tgz#87e004a5ebed88f94008364554daf57df2c9c718" - integrity sha512-d21mJ+F+htgi6HhrjwbOfllJojF4ZWGruW13HkBoGS2SaHqKUyvIH/8j3EjSxlsGFiNfhTEUWkNaUSLJxgbtWg== +eslint-plugin-ember@^10.5.9: + version "10.5.9" + resolved "https://registry.yarnpkg.com/eslint-plugin-ember/-/eslint-plugin-ember-10.5.9.tgz#4071ac135c7207c7d4942e9fa75b710214885469" + integrity sha512-kJsdAaKNcfRvZBZ+YZ67pxxUgl+aPLFAnoFJNwTo+BsaptiOCsHUEc4xUKXiInH2BJOC6mg0FQcZKn1a6gwKrA== dependencies: "@ember-data/rfc395-data" "^0.0.4" - css-tree "^1.0.0-alpha.39" + css-tree "^2.0.4" ember-rfc176-data "^0.3.15" eslint-utils "^3.0.0" estraverse "^5.2.0" @@ -10110,6 +9762,14 @@ eslint-plugin-qunit@^6.2.0: eslint-utils "^3.0.0" requireindex "^1.2.0" +eslint-scope@5.1.1, eslint-scope@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" + integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== + dependencies: + esrecurse "^4.3.0" + estraverse "^4.1.1" + eslint-scope@^4.0.3: version "4.0.3" resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-4.0.3.tgz#ca03833310f6889a3264781aa82e63eb9cfe7848" @@ -10118,14 +9778,6 @@ eslint-scope@^4.0.3: esrecurse "^4.1.0" estraverse "^4.1.1" -eslint-scope@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" - integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== - dependencies: - esrecurse "^4.3.0" - estraverse "^4.1.1" - eslint-utils@^2.0.0, eslint-utils@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/eslint-utils/-/eslint-utils-2.1.0.tgz#d2de5e03424e707dc10c74068ddedae708741b27" @@ -10274,6 +9926,11 @@ events@^3.0.0: resolved "https://registry.yarnpkg.com/events/-/events-3.2.0.tgz#93b87c18f8efcd4202a461aec4dfc0556b639379" integrity sha512-/46HWwbfCX2xTawVfkKLGxMifJYQBWMwY1mjywRtb4c9x8l5NP3KoJtnIOiL1hfdRkIuYhETxQlo62IF8tcnlg== +events@^3.2.0: + version "3.3.0" + resolved "https://registry.yarnpkg.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" + integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== + evp_bytestokey@^1.0.0, evp_bytestokey@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/evp_bytestokey/-/evp_bytestokey-1.0.3.tgz#7fcbdb198dc71959432efe13842684e0525acb02" @@ -10346,7 +10003,7 @@ execa@^4.0.0, execa@^4.0.3: signal-exit "^3.0.2" strip-final-newline "^2.0.0" -execa@^5.1.1: +execa@^5.0.0, execa@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== @@ -10470,6 +10127,11 @@ extglob@^2.0.4: snapdragon "^0.8.1" to-regex "^3.0.1" +extract-stack@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/extract-stack/-/extract-stack-2.0.0.tgz#11367bc865bfcd9bc0db3123e5edb57786f11f9b" + integrity sha512-AEo4zm+TenK7zQorGK1f9mJ8L14hnTDi2ZQPR+Mub1NX8zimka1mXpV5LpH8x9HoUmFSHZCfLHqWvp0Y4FxxzQ== + extsprintf@1.3.0: version "1.3.0" resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05" @@ -10480,7 +10142,7 @@ extsprintf@^1.2.0: resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.4.0.tgz#e2689f8f356fad62cca65a3a91c5df5f9551692f" integrity sha1-4mifjzVvrWLMplo6kcXfX5VRaS8= -fake-xml-http-request@^2.1.1, fake-xml-http-request@^2.1.2: +fake-xml-http-request@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/fake-xml-http-request/-/fake-xml-http-request-2.1.2.tgz#f1786720cae50bbb46273035a0173414f3e85e74" integrity sha512-HaFMBi7r+oEC9iJNpc3bvcW7Z7iLmM26hPDmlb0mFwyANSsOQAtJxbdWsXITKOzZUyMYK0zYCv3h5yDj9TsiXg== @@ -10524,6 +10186,17 @@ fast-glob@^3.0.3, fast-glob@^3.1.1: micromatch "^4.0.2" picomatch "^2.2.1" +fast-glob@^3.2.9: + version "3.2.11" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.11.tgz#a1172ad95ceb8a16e20caa5c5e56480e5129c1d9" + integrity sha512-xrO3+1bxSo3ZVHAnqzyuewYT6aMFHRAd4Kcs92MAonjwQZLsK9d0SF1IyQ3k5PoirxTW0Oe/RqFgMQ6TcNE5Ew== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + fast-json-stable-stringify@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" @@ -10591,10 +10264,10 @@ fault@^1.0.0: dependencies: format "^0.2.0" -faye-websocket@~0.10.0: - version "0.10.0" - resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4" - integrity sha1-TkkvjQTftviQA1B/btvy1QHnxvQ= +faye-websocket@^0.11.3: + version "0.11.4" + resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.11.4.tgz#7f0d9275cfdd86a1c963dc8b65fcc451edcbb1da" + integrity sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g== dependencies: websocket-driver ">=0.5.1" @@ -10771,7 +10444,7 @@ find-versions@^4.0.0: dependencies: semver-regex "^3.1.2" -find-yarn-workspace-root@^1.1.0, find-yarn-workspace-root@^1.2.1: +find-yarn-workspace-root@^1.1.0: version "1.2.1" resolved "https://registry.yarnpkg.com/find-yarn-workspace-root/-/find-yarn-workspace-root-1.2.1.tgz#40eb8e6e7c2502ddfaa2577c176f221422f860db" integrity sha512-dVtfb0WuQG+8Ag2uWkbG79hOUzEsRrhBzgfn86g2sJPkzmcpGdghbNTfUKGTxymFrY/tLIodDzLoW9nOJ4FY8Q== @@ -10779,6 +10452,13 @@ find-yarn-workspace-root@^1.1.0, find-yarn-workspace-root@^1.2.1: fs-extra "^4.0.3" micromatch "^3.1.4" +find-yarn-workspace-root@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz#f47fb8d239c900eb78179aa81b66673eac88f7bd" + integrity sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ== + dependencies: + micromatch "^4.0.2" + findup-sync@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-4.0.0.tgz#956c9cdde804052b881b428512905c4a5f2cdef0" @@ -10808,7 +10488,7 @@ fixturify-project@^1.10.0: fixturify "^1.2.0" tmp "^0.0.33" -fixturify-project@^2.1.0: +fixturify-project@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/fixturify-project/-/fixturify-project-2.1.1.tgz#a511dd26700c6b64ac271ef4393e7124f153c81f" integrity sha512-sP0gGMTr4iQ8Kdq5Ez0CVJOZOGWqzP5dv/veOTdFNywioKjkNWCHBi1q65DMpcNGUGeoOUWehyji274Q2wRgxA== @@ -10967,7 +10647,7 @@ fresh@0.5.2: resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" integrity sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac= -from2@^2.1.0, from2@^2.1.1: +from2@^2.1.0: version "2.3.0" resolved "https://registry.yarnpkg.com/from2/-/from2-2.3.0.tgz#8bfb5502bde4a4d36cfdeea007fcca21d7e382af" integrity sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8= @@ -11176,6 +10856,11 @@ fuse.js@^3.4.4, fuse.js@^3.6.1: resolved "https://registry.yarnpkg.com/fuse.js/-/fuse.js-3.6.1.tgz#7de85fdd6e1b3377c23ce010892656385fd9b10c" integrity sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw== +fuse.js@^6.5.3: + version "6.5.3" + resolved "https://registry.yarnpkg.com/fuse.js/-/fuse.js-6.5.3.tgz#7446c0acbc4ab0ab36fa602e97499bdb69452b93" + integrity sha512-sA5etGE7yD/pOqivZRBvUBd/NaL2sjAu6QuSaFoe1H2BrJSkH/T/UXAJ8CdXdw7DvY3Hs8CXKYkDWX7RiP5KOg== + gauge@~2.7.3: version "2.7.4" resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" @@ -11233,11 +10918,6 @@ get-stdin@^8.0.0: resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-8.0.0.tgz#cbad6a73feb75f6eeb22ba9e01f89aa28aa97a53" integrity sha512-sY22aA6xchAzprjyqmSEQv4UbAAzRN0L2dQB0NlN5acTTK9Don6nhoc3eAbUnpZiCANAMfd/+40kVdKfFygohg== -get-stream@3.0.0, get-stream@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" - integrity sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ= - get-stream@^4.0.0: version "4.1.0" resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" @@ -11336,6 +11016,11 @@ glob-to-regexp@^0.3.0: resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz#8c5a1494d2066c570cc3bfe4496175acc4d502ab" integrity sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs= +glob-to-regexp@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz#c75297087c851b9a578bd217dd59a92f59fe546e" + integrity sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw== + glob@^5.0.10: version "5.0.15" resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1" @@ -11347,7 +11032,7 @@ glob@^5.0.10: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^7.0.4, glob@^7.1.3, glob@^7.1.6, glob@^7.2.0: +glob@^7.0.4, glob@^7.1.2, glob@^7.1.3, glob@^7.1.6, glob@^7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.0.tgz#d15535af7732e02e948f4c41628bd910293f6023" integrity sha512-lmLf6gtyrPq8tTjSmrO94wBeQbFR3HbLHbuyD69wuyQkImp2hWqMGB47OX65FBkPffO641IP9jWa1z4ivqG26Q== @@ -11359,7 +11044,7 @@ glob@^7.0.4, glob@^7.1.3, glob@^7.1.6, glob@^7.2.0: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^7.1.2, glob@^7.1.4: +glob@^7.1.4: version "7.1.6" resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== @@ -11427,11 +11112,6 @@ globals@^13.6.0, globals@^13.9.0: dependencies: type-fest "^0.20.2" -globals@^9.18.0: - version "9.18.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-9.18.0.tgz#aa3896b3e69b487f17e31ed2143d69a8e30c2d8a" - integrity sha512-S0nG3CLEQiY/ILxqtztTWH/3iRRdyBLw6KMDxnKMchrtbj2OFmehVh0WUCfW3DUrIgx/qFrJPICrq4Z4sTR9UQ== - globalthis@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.1.tgz#40116f5d9c071f9e8fb0037654df1ab3a83b7ef9" @@ -11470,7 +11150,7 @@ globby@11.0.1: merge2 "^1.3.0" slash "^3.0.0" -globby@^11.0.1, globby@^11.0.2: +globby@^11.0.2: version "11.0.2" resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.2.tgz#1af538b766a3b540ebfb58a32b2e2d5897321d83" integrity sha512-2ZThXDvvV8fYFRVIxnrMQBipZQDr7MxKAmQK1vujaj9/7eF0efG7BPUKJ7jP7G5SLF37xKDXvO4S/KKLj/Z0og== @@ -11482,6 +11162,18 @@ globby@^11.0.1, globby@^11.0.2: merge2 "^1.3.0" slash "^3.0.0" +globby@^11.0.3, globby@^11.0.4: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" + globby@^9.2.0: version "9.2.0" resolved "https://registry.yarnpkg.com/globby/-/globby-9.2.0.tgz#fd029a706c703d29bdd170f4b6db3a3f7a7cb63d" @@ -11508,34 +11200,16 @@ good-listener@^1.2.2: dependencies: delegate "^3.1.2" -got@^8.0.1: - version "8.3.2" - resolved "https://registry.yarnpkg.com/got/-/got-8.3.2.tgz#1d23f64390e97f776cac52e5b936e5f514d2e937" - integrity sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw== - dependencies: - "@sindresorhus/is" "^0.7.0" - cacheable-request "^2.1.1" - decompress-response "^3.3.0" - duplexer3 "^0.1.4" - get-stream "^3.0.0" - into-stream "^3.1.0" - is-retry-allowed "^1.1.0" - isurl "^1.0.0-alpha5" - lowercase-keys "^1.0.0" - mimic-response "^1.0.0" - p-cancelable "^0.4.0" - p-timeout "^2.0.1" - pify "^3.0.0" - safe-buffer "^5.1.1" - timed-out "^4.0.1" - url-parse-lax "^3.0.0" - url-to-options "^1.0.1" - -graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.3, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@^4.2.0: +graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.3, graceful-fs@^4.1.9: version "4.2.4" resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.4.tgz#2256bde14d3632958c465ebc96dc467ca07a29fb" integrity sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw== +graceful-fs@^4.1.2, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.9: + version "4.2.9" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.9.tgz#041b05df45755e587a24942279b9d113146e1c96" + integrity sha512-NtNxqUcXgpW2iMrfqSfR73Glt39K+BLwWsPs94yR63v45T0Wbej7eRmL5cWfwEgqXnmjQp3zaJTshdRW/qC2ZQ== + "graceful-readlink@>= 1.0.0": version "1.0.1" resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725" @@ -11603,18 +11277,6 @@ has-bigints@^1.0.1: resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.1.tgz#64fe6acb020673e3b78db035a5af69aa9d07b113" integrity sha512-LSBS2LjbNBTf6287JEbEzvJgftkF5qFkmCo9hDRpAzKhUOlJ+hx8dd4USs00SgsUNwc4617J9ki5YtEClM2ffA== -has-binary2@~1.0.2: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-binary2/-/has-binary2-1.0.3.tgz#7776ac627f3ea77250cfc332dab7ddf5e4f5d11d" - integrity sha512-G1LWKhDSvhGeAQ8mPVQlqNcOB2sJdwATtZKl2pDKKHfpf/rYj24lkinxf69blJbnsvtqqNU+L3SL50vzZhXOnw== - dependencies: - isarray "2.0.1" - -has-cors@1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39" - integrity sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk= - has-flag@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" @@ -11632,23 +11294,11 @@ has-glob@^1.0.0: dependencies: is-glob "^3.0.0" -has-symbol-support-x@^1.4.1: - version "1.4.2" - resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" - integrity sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw== - has-symbols@^1.0.1, has-symbols@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.2.tgz#165d3070c00309752a1236a479331e3ac56f1423" integrity sha512-chXa79rL/UC2KlX17jo3vRGz0azaWEx5tGqZg5pO3NUyEJVB17dMruQlzCCOfUvElghKcm5194+BCRvi2Rv/Gw== -has-to-string-tag-x@^1.2.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" - integrity sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw== - dependencies: - has-symbol-support-x "^1.4.1" - has-tostringtag@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.0.tgz#7e133818a7d394734f941e73c3d3f9291e658b25" @@ -11708,7 +11358,7 @@ hash-base@^3.0.0: readable-stream "^3.6.0" safe-buffer "^5.2.0" -hash-for-dep@^1.0.2, hash-for-dep@^1.2.3, hash-for-dep@^1.4.7, hash-for-dep@^1.5.0, hash-for-dep@^1.5.1: +hash-for-dep@^1.0.2, hash-for-dep@^1.4.7, hash-for-dep@^1.5.0, hash-for-dep@^1.5.1: version "1.5.1" resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.5.1.tgz#497754b39bee2f1c4ade4521bfd2af0a7c1196e3" integrity sha512-/dQ/A2cl7FBPI2pO0CANkvuuVi/IFS5oTyJ0PsOb6jW6WbVW1js5qJXMJTNbWHXBIPdFTWFbabjB+mE0d+gelw== @@ -11811,11 +11461,14 @@ he@^1.2.0: resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== -heimdalljs-fs-monitor@^0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/heimdalljs-fs-monitor/-/heimdalljs-fs-monitor-0.2.3.tgz#1aedd4b1c61d86c51f6141fb75c5a3350dc41b15" - integrity sha512-fYAvqSP0CxeOjLrt61B4wux/jqZzdZnS2xfb2oc14NP6BTZ8gtgtR2op6gKFakOR8lm8GN9Xhz1K4A1ZvJ4RQw== +heimdalljs-fs-monitor@^1.1.0: + version "1.1.1" + resolved "https://registry.yarnpkg.com/heimdalljs-fs-monitor/-/heimdalljs-fs-monitor-1.1.1.tgz#bb4021007e88484202402cdf594e3962d70dc4f4" + integrity sha512-BHB8oOXLRlrIaON0MqJSEjGVPDyqt2Y6gu+w2PaEZjrCxeVtZG7etEZp7M4ZQ80HNvnr66KIQ2lot2qdeG8HgQ== dependencies: + callsites "^3.1.0" + clean-stack "^2.2.0" + extract-stack "^2.0.0" heimdalljs "^0.2.3" heimdalljs-logger "^0.1.7" @@ -11839,13 +11492,6 @@ heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3, heimdalljs@^0.2.5, heim dependencies: rsvp "~3.2.1" -heimdalljs@^0.3.0: - version "0.3.3" - resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.3.3.tgz#e92d2c6f77fd46d5bf50b610d28ad31755054d0b" - integrity sha1-6S0sb3f9RtW/ULYQ0orTF1UFTQs= - dependencies: - rsvp "~3.2.1" - highlight.js@^10.1.1, highlight.js@~10.7.0: version "10.7.3" resolved "https://registry.yarnpkg.com/highlight.js/-/highlight.js-10.7.3.tgz#697272e3991356e40c3cac566a74eef681756531" @@ -11872,14 +11518,6 @@ hoist-non-react-statics@^3.3.0: dependencies: react-is "^16.7.0" -home-or-tmp@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-2.0.0.tgz#e36c3f2d2cae7d746a857e38d18d5f32a7882db8" - integrity sha1-42w/LSyufXRqhX440Y1fMqeILbg= - dependencies: - os-homedir "^1.0.0" - os-tmpdir "^1.0.1" - homedir-polyfill@^1.0.1: version "1.0.3" resolved "https://registry.yarnpkg.com/homedir-polyfill/-/homedir-polyfill-1.0.3.tgz#743298cef4e5af3e194161fbadcc2151d3a058e8" @@ -11892,10 +11530,10 @@ hosted-git-info@^2.1.4: resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.9.tgz#dffc0bf9a21c02209090f2aa69429e1414daf3f9" integrity sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw== -hosted-git-info@^3.0.6: - version "3.0.8" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-3.0.8.tgz#6e35d4cc87af2c5f816e4cb9ce350ba87a3f370d" - integrity sha512-aXpmwoOhRBrw6X3j0h5RloK4x1OzsxMPyxqIHyNfSe2pypkVTZFpEiRoSipPEPlMrh0HW/XsjkJ5WgnCirpNUw== +hosted-git-info@^4.0.1: + version "4.1.0" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-4.1.0.tgz#827b82867e9ff1c8d0c4d9d53880397d2c86d224" + integrity sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA== dependencies: lru-cache "^6.0.0" @@ -11966,11 +11604,6 @@ htmlparser2@^6.0.0: domutils "^2.4.4" entities "^2.0.0" -http-cache-semantics@3.8.1: - version "3.8.1" - resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz#39b0e16add9b605bf0a9ef3d9daaf4843b4cacd2" - integrity sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w== - http-errors@1.7.2: version "1.7.2" resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-1.7.2.tgz#4f5029cf13239f31036e5b2e55292bcfbcc85c8f" @@ -12085,6 +11718,11 @@ icss-utils@^4.0.0, icss-utils@^4.1.1: dependencies: postcss "^7.0.14" +icss-utils@^5.0.0, icss-utils@^5.1.0: + version "5.1.0" + resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-5.1.0.tgz#c6be6858abd013d768e98366ae47e25d5887b1ae" + integrity sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA== + ieee754@^1.1.13, ieee754@^1.1.4: version "1.2.1" resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" @@ -12105,6 +11743,11 @@ ignore@^5.1.1, ignore@^5.1.4: resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== +ignore@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.2.0.tgz#6d3bac8fa7fe0d45d9f9be7bac2fc279577e345a" + integrity sha512-CmxgYGiEPCLhfLnpPp1MoRmifwEIOgjcHXxOBjv7mY96c+eWScsOP9c112ZyLdWHi0FxHjI+4uVhKYp/gcdRmQ== + immer@8.0.1: version "8.0.1" resolved "https://registry.yarnpkg.com/immer/-/immer-8.0.1.tgz#9c73db683e2b3975c424fb0572af5889877ae656" @@ -12138,11 +11781,6 @@ indexes-of@^1.0.1: resolved "https://registry.yarnpkg.com/indexes-of/-/indexes-of-1.0.1.tgz#f30f716c8e2bd346c7b67d3df3915566a7c05607" integrity sha1-8w9xbI4r00bHtn0985FVZqfAVgc= -indexof@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/indexof/-/indexof-0.0.1.tgz#82dc336d232b9062179d05ab3293a66059fd435d" - integrity sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10= - infer-owner@^1.0.3, infer-owner@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/infer-owner/-/infer-owner-1.0.4.tgz#c4cefcaa8e51051c2a40ba2ce8a3d27295af9467" @@ -12153,11 +11791,16 @@ inflected@^2.0.4: resolved "https://registry.yarnpkg.com/inflected/-/inflected-2.1.0.tgz#2816ac17a570bbbc8303ca05bca8bf9b3f959687" integrity sha512-hAEKNxvHf2Iq3H60oMBHkB4wl5jn3TPF3+fXek/sRwAB5gP9xWs4r7aweSF95f99HFoz69pnZTcu8f0SIHV18w== -inflection@1.12.0, inflection@^1.12.0: +inflection@^1.12.0: version "1.12.0" resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.12.0.tgz#a200935656d6f5f6bc4dc7502e1aecb703228416" integrity sha1-ogCTVlbW9fa8TcdQLhrstwMihBY= +inflection@~1.13.1: + version "1.13.2" + resolved "https://registry.yarnpkg.com/inflection/-/inflection-1.13.2.tgz#15e8c797c6c3dadf31aa658f8df8a4ea024798b0" + integrity sha512-cmZlljCRTBFouT8UzMzrGcVEvkv6D/wBdcdKG7J1QH5cXjtU75Dm+P27v9EKu/Y43UYyCJd1WC4zLebRrC8NBw== + inflight@^1.0.4: version "1.0.6" resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" @@ -12259,15 +11902,7 @@ interpret@^2.2.0: resolved "https://registry.yarnpkg.com/interpret/-/interpret-2.2.0.tgz#1a78a0b5965c40a5416d007ad6f50ad27c417df9" integrity sha512-Ju0Bz/cEia55xDwUWEa8+olFpCiQoypjnQySseKtmjNrnps3P+xfpUmGr90T7yjlVJmOtybRvPXhKMbHr+fWnw== -into-stream@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/into-stream/-/into-stream-3.1.0.tgz#96fb0a936c12babd6ff1752a17d05616abd094c6" - integrity sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY= - dependencies: - from2 "^2.1.1" - p-is-promise "^1.1.0" - -invariant@^2.2.2, invariant@^2.2.3, invariant@^2.2.4: +invariant@^2.2.3, invariant@^2.2.4: version "2.2.4" resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== @@ -12377,17 +12012,10 @@ is-callable@^1.1.4, is-callable@^1.2.2, is-callable@^1.2.4: resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.4.tgz#47301d58dd0259407865547853df6d61fe471945" integrity sha512-nsuwtxZfMX67Oryl9LCQ+upnC0Z0BgpwntpS89m1H/TLF0zNfzfLMV/9Wa/6MZsj0acpEjAO0KF1xT6ZdLl95w== -is-core-module@^2.1.0: - version "2.8.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.8.0.tgz#0321336c3d0925e497fd97f5d95cb114a5ccd548" - integrity sha512-vd15qHsaqrRL7dtH6QNuy0ndJmRDrS9HAM1CAiSifNUFv4x1a0CCVsj18hJ1mShxIG6T2i1sO78MkP56r0nYRw== - dependencies: - has "^1.0.3" - -is-core-module@^2.2.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.2.0.tgz#97037ef3d52224d85163f5597b2b63d9afed981a" - integrity sha512-XRAfAdyyY5F5cOXn7hYQDqh2Xmii+DEfIcQGxK/uNwMHhIkPWO0g8msXcbzLe+MpGoR951MlqM/2iIlU4vKDdQ== +is-core-module@^2.1.0, is-core-module@^2.2.0, is-core-module@^2.8.1: + version "2.8.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.8.1.tgz#f59fdfca701d5879d0a6b100a40aa1560ce27211" + integrity sha512-SdNCUs284hr40hFTFP6l0IfZ/RSrMXF3qgoRHd3/79unUTvrFO/JoXwkGm+5J/Oe3E/b5GsnG330uUNgRpu1PA== dependencies: has "^1.0.3" @@ -12462,11 +12090,6 @@ is-extglob@^2.1.0, is-extglob@^2.1.1: resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= -is-finite@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-finite/-/is-finite-1.1.0.tgz#904135c77fb42c0641d6aa1bcdbc4daa8da082f3" - integrity sha512-cdyMtqX/BOqqNBBiKlIVkytNHm49MtMlYyn1zxzvJKWmFMlGzm+ry5BBfYyeY9YmNKbRSo/o7OX9w9ale0wg3w== - is-fullwidth-code-point@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" @@ -12515,7 +12138,7 @@ is-glob@^4.0.0: dependencies: is-extglob "^2.1.1" -is-glob@^4.0.1, is-glob@~4.0.1: +is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: version "4.0.3" resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== @@ -12539,6 +12162,11 @@ is-ip@^3.1.0: dependencies: ip-regex "^4.0.0" +is-language-code@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-language-code/-/is-language-code-2.0.0.tgz#6f4d59c551d73b98c45cf9f1d3ce65cee060e65b" + integrity sha512-6xKmRRcP2YdmMBZMVS3uiJRPQgcMYolkD6hFw2Y4KjqyIyaJlCGxUt56tuu0iIV8q9r8kMEo0Gjd/GFwKrgjbw== + is-map@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127" @@ -12578,21 +12206,11 @@ is-obj@^2.0.0: resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-2.0.0.tgz#473fb05d973705e3fd9620545018ca8e22ef4982" integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w== -is-object@^1.0.1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/is-object/-/is-object-1.0.2.tgz#a56552e1c665c9e950b4a025461da87e72f86fcf" - integrity sha512-2rRIahhZr2UWb45fIOuvZGpFtz0TyOZLf32KxBbSoUCeZR495zCKlWUKKUByk3geS2eAs7ZAABt0Y/Rx0GiQGA== - is-plain-obj@2.1.0, is-plain-obj@^2.0.0: version "2.1.0" resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287" integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA== -is-plain-obj@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" - integrity sha1-caUMhCnfync8kqOQpKA7OfzVHT4= - is-plain-object@^2.0.3, is-plain-object@^2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" @@ -12625,11 +12243,6 @@ is-regexp@^1.0.0: resolved "https://registry.yarnpkg.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" integrity sha1-/S2INUXEa6xaYz57mgnof6LLUGk= -is-retry-allowed@^1.1.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/is-retry-allowed/-/is-retry-allowed-1.2.0.tgz#d778488bd0a4666a3be8a1482b9f2baafedea8b4" - integrity sha512-RUbUeKwvm3XG2VYamhJL1xFktgjvPzL0Hq8C+6yrWIswDy3BIXGqCxhxkc30N9jqK311gVU137K8Ei55/zVJRg== - is-root@2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/is-root/-/is-root-2.1.0.tgz#809e18129cf1129644302a4f8544035d51984a9c" @@ -12651,9 +12264,9 @@ is-stream@^1.1.0: integrity sha1-EtSj3U5o4Lec6428hBc66A2RykQ= is-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" - integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" + integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== is-string@^1.0.5: version "1.0.5" @@ -12693,6 +12306,11 @@ is-typedarray@^1.0.0, is-typedarray@~1.0.0: resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= +is-unicode-supported@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" + integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== + is-weakref@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.1.tgz#842dba4ec17fa9ac9850df2d6efbc1737274f2a2" @@ -12737,11 +12355,6 @@ isarray@1.0.0, isarray@^1.0.0, isarray@~1.0.0: resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" integrity sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE= -isarray@2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.1.tgz#a37d94ed9cda2d59865c9f76fe596ee1f338741e" - integrity sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4= - isarray@^2.0.5: version "2.0.5" resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" @@ -12797,14 +12410,6 @@ istextorbinary@^2.5.1: editions "^2.2.0" textextensions "^2.5.0" -isurl@^1.0.0-alpha5: - version "1.0.0" - resolved "https://registry.yarnpkg.com/isurl/-/isurl-1.0.0.tgz#b27f4f49f3cdaa3ea44a0a5b7f3462e6edc39d67" - integrity sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w== - dependencies: - has-to-string-tag-x "^1.2.0" - is-object "^1.0.1" - iterate-iterator@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/iterate-iterator/-/iterate-iterator-1.0.1.tgz#1693a768c1ddd79c969051459453f082fe82e9f6" @@ -12835,15 +12440,24 @@ jest-worker@^26.5.0: merge-stream "^2.0.0" supports-color "^7.0.0" -jquery@^3.4.1, jquery@^3.5.0: +jest-worker@^27.4.5: + version "27.5.1" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-27.5.1.tgz#8d146f0900e8973b106b6f73cc1e9a8cb86f8db0" + integrity sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg== + dependencies: + "@types/node" "*" + merge-stream "^2.0.0" + supports-color "^8.0.0" + +jquery@^3.4.1: version "3.5.1" resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.5.1.tgz#d7b4d08e1bfdb86ad2f1a3d039ea17304717abb5" integrity sha512-XwIBPqcMn57FxfT+Go5pzySnm4KWkT1Tv7gjrpT1srtf8Weynl6R273VJ5GjkRb51IzMp5nbaPjJXMWeju2MKg== -js-reporters@1.2.3: - version "1.2.3" - resolved "https://registry.yarnpkg.com/js-reporters/-/js-reporters-1.2.3.tgz#8febcab370539df62e09b95da133da04b11f6168" - integrity sha512-2YzWkHbbRu6LueEs5ZP3P1LqbECvAeUJYrjw3H4y1ofW06hqCS0AbzBtLwbr+Hke51bt9CUepJ/Fj1hlCRIF6A== +jquery@^3.5.1: + version "3.6.0" + resolved "https://registry.yarnpkg.com/jquery/-/jquery-3.6.0.tgz#c72a09f15c1bdce142f49dbf1170bdf8adac2470" + integrity sha512-JVzAR/AjBvVt2BmYhxRCSYysDsPcssdmTFnzyLEts9qNwmjmu4JTAMYubEfwVOSwpQ1I1sKKFcxhZCI2buerfw== js-string-escape@^1.0.1: version "1.0.1" @@ -12855,11 +12469,6 @@ js-string-escape@^1.0.1: resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== -js-tokens@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-3.0.2.tgz#9866df395102130e38f7f996bceb65443209c25b" - integrity sha1-mGbfOVECEw449/mWvOtlRDIJwls= - js-yaml@^3.13.1, js-yaml@^3.14.0, js-yaml@^3.2.5, js-yaml@^3.2.7: version "3.14.1" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" @@ -12905,11 +12514,6 @@ jsdom@^16.4.0: ws "^7.2.3" xml-name-validator "^3.0.0" -jsesc@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-1.3.0.tgz#46c3fec8c1892b12b0833db9bc7622176dbab34b" - integrity sha1-RsP+yMGJKxKwgz25vHYiF226s0s= - jsesc@^2.5.1: version "2.5.2" resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" @@ -12932,11 +12536,6 @@ json-api-serializer@^2.6.0: dependencies: setimmediate "^1.0.5" -json-buffer@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" - integrity sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg= - json-parse-better-errors@^1.0.1, json-parse-better-errors@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" @@ -13046,13 +12645,6 @@ just-extend@^4.0.2: resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-4.1.1.tgz#158f1fdb01f128c411dc8b286a7b4837b3545282" integrity sha512-aWgeGFW67BP3e5181Ep1Fv2v8z//iBJfrvyTnq8wG86vEESwmonn1zPBJ0VfmT9CJq2FIT0VsETtrNFm2a+SHA== -keyv@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.0.0.tgz#44923ba39e68b12a7cec7df6c3268c031f2ef373" - integrity sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA== - dependencies: - json-buffer "3.0.0" - kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0: version "3.2.2" resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64" @@ -13130,6 +12722,14 @@ levn@~0.3.0: prelude-ls "~1.1.2" type-check "~0.3.2" +line-column@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/line-column/-/line-column-1.0.2.tgz#d25af2936b6f4849172b312e4792d1d987bc34a2" + integrity sha1-0lryk2tvSEkXKzEuR5LR2Ye8NKI= + dependencies: + isarray "^1.0.0" + isobject "^2.0.0" + lines-and-columns@^1.1.6: version "1.1.6" resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" @@ -13190,10 +12790,10 @@ listr2@^3.12.2: through "^2.3.8" wrap-ansi "^7.0.0" -livereload-js@^2.3.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-2.4.0.tgz#447c31cf1ea9ab52fc20db615c5ddf678f78009c" - integrity sha512-XPQH8Z2GDP/Hwz2PCDrh2mth4yFejwA1OZ/81Ti3LgKyhDcEjsSsqFWZojHG0va/duGd+WyosY7eXLDoOyqcPw== +livereload-js@^3.3.1: + version "3.3.3" + resolved "https://registry.yarnpkg.com/livereload-js/-/livereload-js-3.3.3.tgz#3e4f5699f741fdf8be6dc9c46c523e4fc1abbd0d" + integrity sha512-a7Jipme3XIBIryJluWP5LQrEAvhobDPyScBe+q+MYwxBiMT2Ck7msy4tAdF8TAa33FMdJqX4guP81Yhiu6BkmQ== load-json-file@^4.0.0: version "4.0.0" @@ -13210,6 +12810,11 @@ loader-runner@^2.4.0: resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-2.4.0.tgz#ed47066bfe534d7e84c4c7b9998c2a75607d9357" integrity sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw== +loader-runner@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/loader-runner/-/loader-runner-4.2.0.tgz#d7022380d66d14c5fb1d496b89864ebcfd478384" + integrity sha512-92+huvxMvYlMzMt0iIOukcwYBFpkYJdpl2xsZ7LrlayO7E8SOv+JJUEK17B/dJIHAOLMfh2dZZ/Y18WgmGtYNw== + loader-utils@2.0.0, loader-utils@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/loader-utils/-/loader-utils-2.0.0.tgz#e4cace5b816d425a166b5f097e10cd12b36064b0" @@ -13566,7 +13171,7 @@ lodash.values@^4.3.0: resolved "https://registry.yarnpkg.com/lodash.values/-/lodash.values-4.3.0.tgz#a3a6c2b0ebecc5c2cba1c17e6e620fe81b53d347" integrity sha1-o6bCsOvsxcLLocF+bmIP6BtT00c= -lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.17.4, lodash@^4.5.1: +lodash@^4.17.10, lodash@^4.17.12, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17.19, lodash@^4.17.20, lodash@^4.17.21, lodash@^4.5.1: version "4.17.21" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== @@ -13578,12 +13183,13 @@ log-symbols@^2.2.0: dependencies: chalk "^2.0.1" -log-symbols@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.0.0.tgz#69b3cc46d20f448eccdb75ea1fa733d9e821c920" - integrity sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA== +log-symbols@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" + integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== dependencies: - chalk "^4.0.0" + chalk "^4.1.0" + is-unicode-supported "^0.1.0" log-update@^4.0.0: version "4.0.0" @@ -13595,18 +13201,6 @@ log-update@^4.0.0: slice-ansi "^4.0.0" wrap-ansi "^6.2.0" -lolex@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/lolex/-/lolex-4.2.0.tgz#ddbd7f6213ca1ea5826901ab1222b65d714b3cd7" - integrity sha512-gKO5uExCXvSm6zbF562EvM+rd1kQDnB9AZBbiQVzf1ZmdDpxUSvpnAaVOP83N/31mRK8Ml8/VE8DMvsAZQ+7wg== - -lolex@^5.0.1: - version "5.1.2" - resolved "https://registry.yarnpkg.com/lolex/-/lolex-5.1.2.tgz#953694d098ce7c07bc5ed6d0e42bc6c0c6d5a367" - integrity sha512-h4hmjAvHTmd+25JSwrtTIuwbKdwg5NzZVRMLn9saij4SZaepCrTCxPr35H/3bjwfMJtN+t3CX8672UIkglz28A== - dependencies: - "@sinonjs/commons" "^1.7.0" - loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: version "1.4.0" resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" @@ -13621,16 +13215,6 @@ lower-case@^2.0.2: dependencies: tslib "^2.0.3" -lowercase-keys@1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.0.tgz#4e3366b39e7f5457e35f1324bdf6f88d0bfc7306" - integrity sha1-TjNms55/VFfjXxMkvfb4jQv8cwY= - -lowercase-keys@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" - integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== - lowlight@^1.14.0: version "1.20.0" resolved "https://registry.yarnpkg.com/lowlight/-/lowlight-1.20.0.tgz#ddb197d33462ad0d93bf19d17b6c301aa3941888" @@ -13665,6 +13249,13 @@ magic-string@^0.24.0: dependencies: sourcemap-codec "^1.4.1" +magic-string@^0.25.7: + version "0.25.7" + resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.25.7.tgz#3f497d6fd34c669c6798dcb821f2ef31f5445051" + integrity sha512-4CrMT5DOHTDk4HYDlzmwu4FVCcIYI8gauveasrdCu2IKIFOJ3f0v/8MDGJCDL9oD2ppz/Av1b0Nj345H9M+XIA== + dependencies: + sourcemap-codec "^1.4.4" + make-dir@^2.0.0, make-dir@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" @@ -13720,13 +13311,13 @@ markdown-it-terminal@0.2.1: lodash.merge "^4.6.2" markdown-it "^8.3.1" -markdown-it@^11.0.0: - version "11.0.1" - resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-11.0.1.tgz#b54f15ec2a2193efa66dda1eb4173baea08993d6" - integrity sha512-aU1TzmBKcWNNYvH9pjq6u92BML+Hz3h5S/QpfTFwiQF852pLT+9qHsrhM9JYipkOXZxGn+sGH8oyJE9FD9WezQ== +markdown-it@^12.0.4: + version "12.3.2" + resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-12.3.2.tgz#bf92ac92283fe983fe4de8ff8abfb5ad72cd0c90" + integrity sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg== dependencies: - argparse "^1.0.7" - entities "~2.0.0" + argparse "^2.0.1" + entities "~2.1.0" linkify-it "^3.0.1" mdurl "^1.0.1" uc.micro "^1.0.5" @@ -13818,10 +13409,10 @@ mdast-util-to-hast@10.0.1: unist-util-position "^3.0.0" unist-util-visit "^2.0.0" -mdn-data@2.0.14: - version "2.0.14" - resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.14.tgz#7113fc4281917d63ce29b43446f701e68c25ba50" - integrity sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow== +mdn-data@2.0.23: + version "2.0.23" + resolved "https://registry.yarnpkg.com/mdn-data/-/mdn-data-2.0.23.tgz#dfb6c41e50a0edb808cf340973ab29321b70808e" + integrity sha512-IonVb7pfla2U4zW8rc7XGrtgq11BvYeCxWN8HS+KFBnLDE7XDK9AAMVhRuG6fj9BBsjc69Fqsp6WEActEdNTDQ== mdn-data@~1.1.0: version "1.1.4" @@ -13920,15 +13511,15 @@ merge-trees@^2.0.0: fs-updater "^1.0.4" heimdalljs "^0.2.5" -merge2@^1.2.3, merge2@^1.3.0: +merge2@^1.2.3, merge2@^1.3.0, merge2@^1.4.1: version "1.4.1" resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== -merge@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/merge/-/merge-1.2.1.tgz#38bebf80c3220a8a487b6fcfb3941bb11720c145" - integrity sha512-VjFo4P5Whtj4vsLzsYBu5ayHhoHJ0UqNm7ibvShmbmoz7tGi0vXaoJbGdB+GmDMLUdg8DpQXEIeVDAe8MaABvQ== +merge@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/merge/-/merge-2.1.1.tgz#59ef4bf7e0b3e879186436e8481c06a6c162ca98" + integrity sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w== methods@~1.1.2: version "1.1.2" @@ -14037,11 +13628,6 @@ mimic-fn@^2.1.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== -mimic-response@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" - integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== - min-document@^2.19.0: version "2.19.0" resolved "https://registry.yarnpkg.com/min-document/-/min-document-2.19.0.tgz#7bd282e3f5842ed295bb748cdd9f1ffa2c824685" @@ -14049,6 +13635,13 @@ min-document@^2.19.0: dependencies: dom-walk "^0.1.0" +mini-css-extract-plugin@^2.5.2: + version "2.5.3" + resolved "https://registry.yarnpkg.com/mini-css-extract-plugin/-/mini-css-extract-plugin-2.5.3.tgz#c5c79f9b22ce9b4f164e9492267358dbe35376d9" + integrity sha512-YseMB8cs8U/KCaAGQoqYmfUuhhGW0a9p9XvWXrxVOkE3/IiISTLw4ALNt7JR5B2eYauFM+PQGSbXMDmVbR7Tfw== + dependencies: + schema-utils "^4.0.0" + minimalistic-assert@^1.0.0, minimalistic-assert@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7" @@ -14059,7 +13652,14 @@ minimalistic-crypto-utils@^1.0.1: resolved "https://registry.yarnpkg.com/minimalistic-crypto-utils/-/minimalistic-crypto-utils-1.0.1.tgz#f6c00c1c0b082246e5c4d99dfb8c7c083b2b582a" integrity sha1-9sAMHAsIIkblxNmd+4x8CDsrWCo= -"minimatch@2 || 3", minimatch@3.0.4, minimatch@^3.0.0, minimatch@^3.0.2, minimatch@^3.0.4: +"minimatch@2 || 3", minimatch@^3.0.0, minimatch@^3.0.2, minimatch@^3.0.4: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimatch@3.0.4: version "3.0.4" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== @@ -14121,9 +13721,9 @@ minizlib@^2.1.1: yallist "^4.0.0" miragejs@^0.1.31: - version "0.1.41" - resolved "https://registry.yarnpkg.com/miragejs/-/miragejs-0.1.41.tgz#1b06a2d2d9de65624f5bb1cee7ebb4a208f554d0" - integrity sha512-ur8x7sBskgey64vdzKGVCVC3hgKXWl2Cg5lZbxd6OmKrhr9LCCP/Bv7qh4wsQxIMHZnENxybFATXnrQ+rzSOWQ== + version "0.1.43" + resolved "https://registry.yarnpkg.com/miragejs/-/miragejs-0.1.43.tgz#47a8546b9e3489f806073e681f380ccfe13d757d" + integrity sha512-BhkyxssOZ2i4JqRjWpRnUQu9AFAKHyft8dJbqsg/N64+gCn2vw6vRteMpTKXllLjCPOA9Os8PhGRVXlXs4Ojhw== dependencies: "@miragejs/pretender-node-polyfill" "^0.1.0" inflected "^2.0.4" @@ -14150,7 +13750,7 @@ miragejs@^0.1.31: lodash.uniq "^4.5.0" lodash.uniqby "^4.7.0" lodash.values "^4.3.0" - pretender "^3.4.3" + pretender "^3.4.7" mississippi@^3.0.0: version "3.0.0" @@ -14205,7 +13805,14 @@ moment-timezone@^0.5.13: dependencies: moment ">= 2.9.0" -"moment@>= 2.9.0", moment@^2.19.3: +moment-timezone@^0.5.33: + version "0.5.34" + resolved "https://registry.yarnpkg.com/moment-timezone/-/moment-timezone-0.5.34.tgz#a75938f7476b88f155d3504a9343f7519d9a405c" + integrity sha512-3zAEHh2hKUs3EXLESx/wsgw6IQdusOT8Bxm3D9UrHPQR7zlMmzwybC8zHEM1tQ4LJwP7fcxrWr8tuBg05fFCbg== + dependencies: + moment ">= 2.9.0" + +"moment@>= 2.9.0", moment@^2.19.3, moment@^2.29.1: version "2.29.1" resolved "https://registry.yarnpkg.com/moment/-/moment-2.29.1.tgz#b2be769fa31940be9eeea6469c075e35006fa3d3" integrity sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ== @@ -14278,6 +13885,11 @@ nan@^2.12.1: resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.2.tgz#f5376400695168f4cc694ac9393d0c9585eeea19" integrity sha512-M2ufzIiINKCuDfBSAUr1vWQ+vuVcA9kqx8JJUsbQi6yf1uGRyb7HfpdfUr5qLXf3B/t8dPvcjhKMmlfnP47EzQ== +nanoid@^3.3.1: + version "3.3.1" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.1.tgz#6347a18cac88af88f58af0b3594b723d5e99bb35" + integrity sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw== + nanomatch@^1.2.9: version "1.2.13" resolved "https://registry.yarnpkg.com/nanomatch/-/nanomatch-1.2.13.tgz#b87a8aa4fc0de8fe6be88895b38983ff265bd119" @@ -14305,7 +13917,7 @@ negotiator@0.6.2: resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb" integrity sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw== -neo-async@^2.5.0, neo-async@^2.6.0, neo-async@^2.6.1: +neo-async@^2.5.0, neo-async@^2.6.0, neo-async@^2.6.1, neo-async@^2.6.2: version "2.6.2" resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f" integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw== @@ -14320,15 +13932,15 @@ nice-try@^1.0.4: resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== -nise@^1.5.2: - version "1.5.3" - resolved "https://registry.yarnpkg.com/nise/-/nise-1.5.3.tgz#9d2cfe37d44f57317766c6e9408a359c5d3ac1f7" - integrity sha512-Ymbac/94xeIrMf59REBPOv0thr+CJVFMhrlAkW/gjCIE58BGQdCj0x7KRCb3yz+Ga2Rz3E9XXSvUyyxqqhjQAQ== +nise@^4.0.4: + version "4.1.0" + resolved "https://registry.yarnpkg.com/nise/-/nise-4.1.0.tgz#8fb75a26e90b99202fa1e63f448f58efbcdedaf6" + integrity sha512-eQMEmGN/8arp0xsvGoQ+B1qvSkR73B1nWSCh7nOt5neMCtwcQVYQGdzQMhcNscktTsWB54xnlSQFzOAPJD8nXA== dependencies: - "@sinonjs/formatio" "^3.2.1" + "@sinonjs/commons" "^1.7.0" + "@sinonjs/fake-timers" "^6.0.0" "@sinonjs/text-encoding" "^0.7.1" just-extend "^4.0.2" - lolex "^5.0.1" path-to-regexp "^1.7.0" no-case@^3.0.4: @@ -14339,6 +13951,13 @@ no-case@^3.0.4: lower-case "^2.0.2" tslib "^2.0.3" +node-fetch@^2.6.0: + version "2.6.7" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.7.tgz#24de9fba827e3b4ae44dc8b20256a379160052ad" + integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ== + dependencies: + whatwg-url "^5.0.0" + node-fetch@^2.6.1: version "2.6.1" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" @@ -14388,17 +14007,6 @@ node-modules-regexp@^1.0.0: resolved "https://registry.yarnpkg.com/node-modules-regexp/-/node-modules-regexp-1.0.0.tgz#8d9dbe28964a4ac5712e9131642107c71e90ec40" integrity sha1-jZ2+KJZKSsVxLpExZCEHxx6Q7EA= -node-notifier@^5.0.1: - version "5.4.5" - resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-5.4.5.tgz#0cbc1a2b0f658493b4025775a13ad938e96091ef" - integrity sha512-tVbHs7DyTLtzOiN78izLA85zRqB9NvEXkAf014Vx3jtSvn/xBl6bR8ZYifj+dFcFrKI21huSQgJZ6ZtL3B4HfQ== - dependencies: - growly "^1.3.0" - is-wsl "^1.1.0" - semver "^5.5.0" - shellwords "^0.1.1" - which "^1.3.0" - node-notifier@^9.0.1: version "9.0.1" resolved "https://registry.yarnpkg.com/node-notifier/-/node-notifier-9.0.1.tgz#cea837f4c5e733936c7b9005e6545cea825d1af4" @@ -14411,25 +14019,25 @@ node-notifier@^9.0.1: uuid "^8.3.0" which "^2.0.2" -node-releases@^1.1.61, node-releases@^1.1.77: +node-releases@^1.1.61, node-releases@^1.1.70, node-releases@^1.1.77: version "1.1.77" resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.77.tgz#50b0cfede855dd374e7585bf228ff34e57c1c32e" integrity sha512-rB1DUFUNAN4Gn9keO2K1efO35IDK7yKHCdCaIMvFO7yUYmmZYeDjnGKle26G4rwj+LKRQpjyUUvMkPglwGCYNQ== -node-releases@^1.1.70: - version "1.1.70" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-1.1.70.tgz#66e0ed0273aa65666d7fe78febe7634875426a08" - integrity sha512-Slf2s69+2/uAD79pVVQo8uSiC34+g8GWY8UH2Qtqv34ZfhYrxpYpfzs9Js9d6O0mbDmALuxaTlplnBTnSELcrw== +node-releases@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.2.tgz#7139fe71e2f4f11b47d4d2986aaf8c48699e0c01" + integrity sha512-XxYDdcQ6eKqp/YjI+tb2C5WM2LgjnZrfYg4vgQt49EK268b6gYCHsBLrK2qvJo4FmCtqmKezb0WZFK4fkrZNsg== node-uuid@~1.4.0: version "1.4.8" resolved "https://registry.yarnpkg.com/node-uuid/-/node-uuid-1.4.8.tgz#b040eb0923968afabf8d32fb1f17f1167fdab907" integrity sha1-sEDrCSOWivq/jTL7HxfxFn/auQc= -node-watch@0.7.1: - version "0.7.1" - resolved "https://registry.yarnpkg.com/node-watch/-/node-watch-0.7.1.tgz#0caaa6a6833b0d533487f953c52a6c787769ba7c" - integrity sha512-UWblPYuZYrkCQCW5PxAwYSxaELNBLUckrTBBk8xr1/bUgyOkYYTsUcV4e3ytcazFEOyiRyiUrsG37pu6I0I05g== +node-watch@0.7.3: + version "0.7.3" + resolved "https://registry.yarnpkg.com/node-watch/-/node-watch-0.7.3.tgz#6d4db88e39c8d09d3ea61d6568d80e5975abc7ab" + integrity sha512-3l4E8uMPY1HdMMryPRUAl+oIHtXtyiTlIiESNSVSNxcPfzAFzeTbXFQkZfAwBbo0B1qMSG8nUABx+Gd+YrbKrQ== nopt@^3.0.6: version "3.0.6" @@ -14465,27 +14073,18 @@ normalize-range@^0.1.2: resolved "https://registry.yarnpkg.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" integrity sha1-LRDAa9/TEuqXd2laTShDlFa3WUI= -normalize-url@2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-2.0.1.tgz#835a9da1551fa26f70e92329069a23aa6574d7e6" - integrity sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw== - dependencies: - prepend-http "^2.0.0" - query-string "^5.0.1" - sort-keys "^2.0.0" - npm-git-info@^1.0.3: version "1.0.3" resolved "https://registry.yarnpkg.com/npm-git-info/-/npm-git-info-1.0.3.tgz#a933c42ec321e80d3646e0d6e844afe94630e1d5" integrity sha1-qTPELsMh6A02RuDW6ESv6UYw4dU= -npm-package-arg@^8.0.1: - version "8.1.0" - resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-8.1.0.tgz#b5f6319418c3246a1c38e1a8fbaa06231bc5308f" - integrity sha512-/ep6QDxBkm9HvOhOg0heitSd7JHA1U7y1qhhlRlteYYAi9Pdb/ZV7FW5aHpkrpM8+P+4p/jjR8zCyKPBMBjSig== +npm-package-arg@^8.1.1: + version "8.1.5" + resolved "https://registry.yarnpkg.com/npm-package-arg/-/npm-package-arg-8.1.5.tgz#3369b2d5fe8fdc674baa7f1786514ddc15466e44" + integrity sha512-LhgZrg0n0VgvzVdSm1oiZworPbTxYHUJCgtsJW8mGvlDpxTM1vSJc3m5QZeUkhAHIzbz3VCHd/R4osi1L1Tg/Q== dependencies: - hosted-git-info "^3.0.6" - semver "^7.0.0" + hosted-git-info "^4.0.1" + semver "^7.3.4" validate-npm-package-name "^3.0.0" npm-run-all@^4.1.5: @@ -14757,17 +14356,18 @@ ora@^3.4.0: strip-ansi "^5.2.0" wcwidth "^1.0.1" -ora@^5.1.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/ora/-/ora-5.3.0.tgz#fb832899d3a1372fe71c8b2c534bbfe74961bb6f" - integrity sha512-zAKMgGXUim0Jyd6CXK9lraBnD3H5yPGBPPOkC23a2BG6hsm4Zu6OQSjQuEtV0BHDf4aKHcUFvJiGRrFuW3MG8g== +ora@^5.4.0: + version "5.4.1" + resolved "https://registry.yarnpkg.com/ora/-/ora-5.4.1.tgz#1b2678426af4ac4a509008e5e4ac9e9959db9e18" + integrity sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ== dependencies: - bl "^4.0.3" + bl "^4.1.0" chalk "^4.1.0" cli-cursor "^3.1.0" cli-spinners "^2.5.0" is-interactive "^1.0.0" - log-symbols "^4.0.0" + is-unicode-supported "^0.1.0" + log-symbols "^4.1.0" strip-ansi "^6.0.0" wcwidth "^1.0.1" @@ -14781,7 +14381,7 @@ os-homedir@^1.0.0: resolved "https://registry.yarnpkg.com/os-homedir/-/os-homedir-1.0.2.tgz#ffbc4988336e0e833de0c168c7ef152121aa7fb3" integrity sha1-/7xJiDNuDoM94MFox+8VISGqf7M= -os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.1, os-tmpdir@~1.0.2: +os-tmpdir@^1.0.0, os-tmpdir@~1.0.1, os-tmpdir@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= @@ -14806,11 +14406,6 @@ p-all@^2.1.0: dependencies: p-map "^2.0.0" -p-cancelable@^0.4.0: - version "0.4.1" - resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-0.4.1.tgz#35f363d67d52081c8d9585e37bcceb7e0bbcb2a0" - integrity sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ== - p-defer@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-3.0.0.tgz#d1dceb4ee9b2b604b1d94ffec83760175d4e6f83" @@ -14840,11 +14435,6 @@ p-finally@^2.0.0: resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-2.0.1.tgz#bd6fcaa9c559a096b680806f4d657b3f0f240561" integrity sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw== -p-is-promise@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/p-is-promise/-/p-is-promise-1.1.0.tgz#9c9456989e9f6588017b0434d56097675c3da05e" - integrity sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4= - p-limit@^1.1.0: version "1.3.0" resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-1.3.0.tgz#b86bd5f0c25690911c7590fcbfc2010d54b3ccb8" @@ -14913,13 +14503,6 @@ p-map@^4.0.0: dependencies: aggregate-error "^3.0.0" -p-timeout@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-2.0.1.tgz#d8dd1979595d2dc0139e1fe46b8b646cb3cdf038" - integrity sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA== - dependencies: - p-finally "^1.0.0" - p-timeout@^3.1.0: version "3.2.0" resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-3.2.0.tgz#c7e17abc971d2a7962ef83626b35d635acf23dfe" @@ -15017,6 +14600,11 @@ parse-passwd@^1.0.0: resolved "https://registry.yarnpkg.com/parse-passwd/-/parse-passwd-1.0.0.tgz#6d5b934a456993b23d37f40a382d6f1666a8e5c6" integrity sha1-bVuTSkVpk7I9N/QKOC1vFmao5cY= +parse-static-imports@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/parse-static-imports/-/parse-static-imports-1.1.0.tgz#ae2f18f18da1a993080ae406a5219455c0bbad5d" + integrity sha512-HlxrZcISCblEV0lzXmAHheH/8qEkKgmqkdxyHTPbSqsTUV8GzqmN1L+SSti+VbNPfbBO3bYLPHDiUs2avbAdbA== + parse5-htmlparser2-tree-adapter@^6.0.0: version "6.0.1" resolved "https://registry.yarnpkg.com/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-6.0.1.tgz#2cdf9ad823321140370d4dbf5d3e92c7c8ddc6e6" @@ -15034,16 +14622,6 @@ parse5@^6.0.0, parse5@^6.0.1: resolved "https://registry.yarnpkg.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== -parseqs@0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/parseqs/-/parseqs-0.0.6.tgz#8e4bb5a19d1cdc844a08ac974d34e273afa670d5" - integrity sha512-jeAGzMDbfSHHA091hr0r31eYfTig+29g3GKKE/PPbEQ65X0lmMwlEoqmhzu0iztID5uJpZsFlUPDP8ThPL7M8w== - -parseuri@0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/parseuri/-/parseuri-0.0.6.tgz#e1496e829e3ac2ff47f39a4dd044b32823c4a25a" - integrity sha512-AUjen8sAkGgao7UyCX6Ahv0gIK2fABKmYjvP4xmy5JaKvcbTRueIqIPHLAfq30xJddqSE033IOMUSOMCcK3Sow== - parseurl@~1.3.2, parseurl@~1.3.3: version "1.3.3" resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" @@ -15082,7 +14660,7 @@ path-exists@^4.0.0: resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== -path-is-absolute@1.0.1, path-is-absolute@^1.0.0, path-is-absolute@^1.0.1: +path-is-absolute@1.0.1, path-is-absolute@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= @@ -15097,7 +14675,7 @@ path-key@^3.0.0, path-key@^3.1.0: resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== -path-parse@^1.0.6: +path-parse@^1.0.6, path-parse@^1.0.7: version "1.0.7" resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== @@ -15164,6 +14742,11 @@ picocolors@^0.2.1: resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-0.2.1.tgz#570670f793646851d1ba135996962abad587859f" integrity sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA== +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== + picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3: version "2.3.0" resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.0.tgz#f1f061de8f6a4bf022892e2d128234fb98302972" @@ -15264,7 +14847,7 @@ polished@^4.0.5: dependencies: "@babel/runtime" "^7.14.0" -portfinder@^1.0.26: +portfinder@^1.0.28: version "1.0.28" resolved "https://registry.yarnpkg.com/portfinder/-/portfinder-1.0.28.tgz#67c4622852bd5374dd1dd900f779f53462fac778" integrity sha512-Se+2isanIcEqf2XMHjyUKskczxbPH7dQnlMjXX6+dybayyHvAf/TCgyMRlzf/B6QDhAEFOGes0pzRo3by4AbMA== @@ -15303,6 +14886,11 @@ postcss-modules-extract-imports@^2.0.0: dependencies: postcss "^7.0.5" +postcss-modules-extract-imports@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz#cda1f047c0ae80c97dbe28c3e76a43b88025741d" + integrity sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw== + postcss-modules-local-by-default@^3.0.2: version "3.0.3" resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-3.0.3.tgz#bb14e0cc78279d504dbdcbfd7e0ca28993ffbbb0" @@ -15313,6 +14901,15 @@ postcss-modules-local-by-default@^3.0.2: postcss-selector-parser "^6.0.2" postcss-value-parser "^4.1.0" +postcss-modules-local-by-default@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.0.tgz#ebbb54fae1598eecfdf691a02b3ff3b390a5a51c" + integrity sha512-sT7ihtmGSF9yhm6ggikHdV0hlziDTX7oFoXtuVWeDd3hHObNkcHRo9V3yg7vCAY7cONyxJC/XXCmmiHHcvX7bQ== + dependencies: + icss-utils "^5.0.0" + postcss-selector-parser "^6.0.2" + postcss-value-parser "^4.1.0" + postcss-modules-scope@^2.2.0: version "2.2.0" resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz#385cae013cc7743f5a7d7602d1073a89eaae62ee" @@ -15321,6 +14918,13 @@ postcss-modules-scope@^2.2.0: postcss "^7.0.6" postcss-selector-parser "^6.0.0" +postcss-modules-scope@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz#9ef3151456d3bbfa120ca44898dfca6f2fa01f06" + integrity sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg== + dependencies: + postcss-selector-parser "^6.0.4" + postcss-modules-values@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-3.0.0.tgz#5b5000d6ebae29b4255301b4a3a54574423e7f10" @@ -15329,6 +14933,13 @@ postcss-modules-values@^3.0.0: icss-utils "^4.0.0" postcss "^7.0.6" +postcss-modules-values@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz#d7c5e7e68c3bb3c9b27cbf48ca0bb3ffb4602c9c" + integrity sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ== + dependencies: + icss-utils "^5.0.0" + postcss-selector-parser@^6.0.0, postcss-selector-parser@^6.0.2: version "6.0.4" resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.4.tgz#56075a1380a04604c38b063ea7767a129af5c2b3" @@ -15339,6 +14950,14 @@ postcss-selector-parser@^6.0.0, postcss-selector-parser@^6.0.2: uniq "^1.0.1" util-deprecate "^1.0.2" +postcss-selector-parser@^6.0.4: + version "6.0.9" + resolved "https://registry.yarnpkg.com/postcss-selector-parser/-/postcss-selector-parser-6.0.9.tgz#ee71c3b9ff63d9cd130838876c13a2ec1a992b2f" + integrity sha512-UO3SgnZOVTwu4kyLR22UQ1xZh086RyNZppb7lLAKBFK8a32ttG5i87Y/P3+2bRSjZNyJ1B7hfFNo273tKe9YxQ== + dependencies: + cssesc "^3.0.0" + util-deprecate "^1.0.2" + postcss-value-parser@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.1.0.tgz#443f6a20ced6481a2bda4fa8532a6e55d789a2cb" @@ -15361,6 +14980,15 @@ postcss@^7.0.36: picocolors "^0.2.1" source-map "^0.6.1" +postcss@^8.2.15: + version "8.4.7" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.7.tgz#f99862069ec4541de386bf57f5660a6c7a0875a8" + integrity sha512-L9Ye3r6hkkCeOETQX6iOaWZgjp3LL6Lpqm6EtgbKrgqGGteRMNb9vzBfRL96YOSu8o7x3MfIH9Mo5cPJFGrW6A== + dependencies: + nanoid "^3.3.1" + picocolors "^1.0.0" + source-map-js "^1.0.2" + prelude-ls@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" @@ -15371,12 +14999,7 @@ prelude-ls@~1.1.2: resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" integrity sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ= -prepend-http@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" - integrity sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc= - -pretender@^3.0.1: +pretender@^3.0.1, pretender@^3.4.7: version "3.4.7" resolved "https://registry.yarnpkg.com/pretender/-/pretender-3.4.7.tgz#34a2ae2d1fc9db440a990d50e6c0f5481d8755fc" integrity sha512-jkPAvt1BfRi0RKamweJdEcnjkeu7Es8yix3bJ+KgBC5VpG/Ln4JE3hYN6vJym4qprm8Xo5adhWpm3HCoft1dOw== @@ -15384,14 +15007,6 @@ pretender@^3.0.1: fake-xml-http-request "^2.1.2" route-recognizer "^0.3.3" -pretender@^3.4.3: - version "3.4.3" - resolved "https://registry.yarnpkg.com/pretender/-/pretender-3.4.3.tgz#a3b4160516007075d29127262f3a0063d19896e9" - integrity sha512-AlbkBly9R8KR+R0sTCJ/ToOeEoUMtt52QVCetui5zoSmeLOU3S8oobFsyPLm1O2txR6t58qDNysqPnA1vVi8Hg== - dependencies: - fake-xml-http-request "^2.1.1" - route-recognizer "^0.3.3" - prettier-linter-helpers@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz#d23d41fe1375646de2d0104d3454a3008802cf7b" @@ -15429,11 +15044,6 @@ pretty-ms@^3.1.0: dependencies: parse-ms "^1.0.0" -printf@^0.5.1: - version "0.5.3" - resolved "https://registry.yarnpkg.com/printf/-/printf-0.5.3.tgz#8b7eec278d886833312238b2bf42b2b6f250880a" - integrity sha512-t3lYN6vPU5PZXDiEZZqoyXvN8wCsBfi8gPoxTKo2e5hhV673t/KUh+mfO8P8lCOCDC/BWcOGIxKyebxc5FuqLA== - printf@^0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/printf/-/printf-0.6.1.tgz#b9afa3d3b55b7f2e8b1715272479fc756ed88650" @@ -15449,7 +15059,7 @@ prismjs@~1.24.0: resolved "https://registry.yarnpkg.com/prismjs/-/prismjs-1.24.1.tgz#c4d7895c4d6500289482fa8936d9cdd192684036" integrity sha512-mNPsedLuk90RVJioIky8ANZEwYm5w9LcvCXrxHlwf4fNVSn8jEipMybMkWUyyF0JhnC+C4VcOVSBuHRKs1L5Ow== -private@^0.1.6, private@^0.1.8, private@~0.1.5: +private@^0.1.8: version "0.1.8" resolved "https://registry.yarnpkg.com/private/-/private-0.1.8.tgz#2381edb3689f7a53d653190060fcf822d2f368ff" integrity sha512-VvivMrbvd2nKkiG38qjULzlc+4Vx4wm/whI9pQD35YrARNnhxeiRktSOhSukRLFNlzg6Br/cJPet5J/u19r/mg== @@ -15505,10 +15115,10 @@ promise.allsettled@^1.0.0: get-intrinsic "^1.0.2" iterate-value "^1.0.2" -promise.hash.helper@^1.0.6: - version "1.0.7" - resolved "https://registry.yarnpkg.com/promise.hash.helper/-/promise.hash.helper-1.0.7.tgz#2f39d8495df40dcdfbc1d5be9e9e56efeae7f180" - integrity sha512-0qhWYyCV9TYDMSooYw1fShIb7R6hsWYja7JLqbeb1MvHqDTvP/uy/R1RsyVqDi6GCiHOI4G5p2Hpr3IA+/l/+Q== +promise.hash.helper@^1.0.7: + version "1.0.8" + resolved "https://registry.yarnpkg.com/promise.hash.helper/-/promise.hash.helper-1.0.8.tgz#8c5fa0570f6f96821f52364fd72292b2c5a114f7" + integrity sha512-KYcnXctWUWyVD3W3Ye0ZDuA1N8Szrh85cVCxpG6xYrOk/0CttRtYCmU30nWsUch0NuExQQ63QXvzRE6FLimZmg== promise.prototype.finally@^3.1.0: version "3.1.2" @@ -15653,15 +15263,6 @@ qs@~6.5.2: resolved "https://registry.yarnpkg.com/qs/-/qs-6.5.2.tgz#cb3ae806e8740444584ef154ce8ee98d403f3e36" integrity sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA== -query-string@^5.0.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/query-string/-/query-string-5.1.1.tgz#a78c012b71c17e05f2e3fa2319dd330682efb3cb" - integrity sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw== - dependencies: - decode-uri-component "^0.2.0" - object-assign "^4.1.0" - strict-uri-encode "^1.0.0" - query-string@^7.0.1: version "7.0.1" resolved "https://registry.yarnpkg.com/query-string/-/query-string-7.0.1.tgz#45bd149cf586aaa582dffc7ec7a8ad97dd02f75d" @@ -15701,15 +15302,14 @@ qunit-dom@^2.0.0: ember-cli-babel "^7.23.0" ember-cli-version-checker "^5.1.1" -qunit@^2.9.3: - version "2.14.0" - resolved "https://registry.yarnpkg.com/qunit/-/qunit-2.14.0.tgz#6f913903e71ebe24ef4a4dada2b796fb52285051" - integrity sha512-CYfenbgdpmhl2Ql2rDrrj0felY4h8k6lYhtWwGBCLL4qQC33YOj0psV8MWo85L1i0SIOmEDRXkFopWnGCLmf7g== +qunit@^2.17.2: + version "2.18.0" + resolved "https://registry.yarnpkg.com/qunit/-/qunit-2.18.0.tgz#5a3efc8ee609e9d8f207a1ad317a9b02bb14bc6b" + integrity sha512-Xw/zUm5t1JY8SNErki/qtw4fCuaaOZL+bPloZU+0kto+fO8j1JV9MQWqXO4kATfhEyJohlsKZpfg1HF7GOkpXw== dependencies: - commander "6.2.0" - js-reporters "1.2.3" - node-watch "0.7.1" - tiny-glob "0.2.8" + commander "7.2.0" + node-watch "0.7.3" + tiny-glob "0.2.9" ramda@^0.21.0: version "0.21.0" @@ -16021,16 +15621,6 @@ readdirp@~3.6.0: dependencies: picomatch "^2.2.1" -recast@^0.13.0: - version "0.13.2" - resolved "https://registry.yarnpkg.com/recast/-/recast-0.13.2.tgz#919e7e856d5154f13284142ed1797753c6756137" - integrity sha512-Xqo0mKljGUWGUhnkdbODk7oJGFrMcpgKQ9cCyZ4y+G9VfoTKdum8nHbf/SxIdKx5aBSZ29VpVy20bTyt7jyC8w== - dependencies: - ast-types "0.10.2" - esprima "~4.0.0" - private "~0.1.5" - source-map "~0.6.1" - recast@^0.18.1: version "0.18.10" resolved "https://registry.yarnpkg.com/recast/-/recast-0.18.10.tgz#605ebbe621511eb89b6356a7e224bff66ed91478" @@ -16064,52 +15654,30 @@ refractor@^3.1.0: parse-entities "^2.0.0" prismjs "~1.24.0" -regenerate-unicode-properties@^8.2.0: - version "8.2.0" - resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz#e5de7111d655e7ba60c057dbe9ff37c87e65cdec" - integrity sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA== +regenerate-unicode-properties@^10.0.1: + version "10.0.1" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.0.1.tgz#7f442732aa7934a3740c779bb9b3340dccc1fb56" + integrity sha512-vn5DU6yg6h8hP/2OkQo3K7uVILvY4iu0oI4t3HFa81UPkhGJwkRwM10JEc3upjdhHjs/k8GJY1sRBhk5sr69Bw== dependencies: - regenerate "^1.4.0" + regenerate "^1.4.2" -regenerate@^1.2.1, regenerate@^1.4.0: +regenerate-unicode-properties@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-9.0.0.tgz#54d09c7115e1f53dc2314a974b32c1c344efe326" + integrity sha512-3E12UeNSPfjrgwjkR81m5J7Aw/T55Tu7nUyZVQYCKEOs+2dkxEY+DpPtZzO4YruuiPb7NkYLVcyJC4+zCbk5pA== + dependencies: + regenerate "^1.4.2" + +regenerate@^1.4.2: version "1.4.2" resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== -regenerator-runtime@^0.10.5: - version "0.10.5" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.10.5.tgz#336c3efc1220adcedda2c9fab67b5a7955a33658" - integrity sha1-M2w+/BIgrc7dosn6tntaeVWjNlg= - -regenerator-runtime@^0.11.0: - version "0.11.1" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz#be05ad7f9bf7d22e056f9726cee5017fbf19e2e9" - integrity sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg== - -regenerator-runtime@^0.13.4: - version "0.13.7" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.7.tgz#cac2dacc8a1ea675feaabaeb8ae833898ae46f55" - integrity sha512-a54FxoJDIr27pgf7IgeQGxmqUNYrcV338lf/6gH456HZ/PhX+5BcwHXG9ajESmwe6WRO0tAzRUrRmNONWgkrew== - -regenerator-runtime@^0.13.7: +regenerator-runtime@^0.13.2, regenerator-runtime@^0.13.4, regenerator-runtime@^0.13.7: version "0.13.9" resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.9.tgz#8925742a98ffd90814988d7566ad30ca3b263b52" integrity sha512-p3VT+cOEgxFsRRA9X4lkI1E+k2/CtnKtU4gcxyaCUreilL/vqI6CdZ3wxVUx3UOUg+gnUOQQcRI7BmSI656MYA== -regenerator-runtime@^0.9.5: - version "0.9.6" - resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.9.6.tgz#d33eb95d0d2001a4be39659707c51b0cb71ce029" - integrity sha1-0z65XQ0gAaS+OWWXB8UbDLcc4Ck= - -regenerator-transform@^0.10.0: - version "0.10.1" - resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.10.1.tgz#1e4996837231da8b7f3cf4114d71b5691a0680dd" - integrity sha512-PJepbvDbuK1xgIgnau7Y90cwaAmO/LCLMI2mPvaXq2heGMR3aWW5/BQvYrhJ8jgmQjXewXvBjzfqKcVOmhjZ6Q== - dependencies: - babel-runtime "^6.18.0" - babel-types "^6.19.0" - private "^0.1.6" - regenerator-transform@^0.14.2: version "0.14.5" resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.14.5.tgz#c98da154683671c9c4dcb16ece736517e1b7feb4" @@ -16133,53 +15701,64 @@ regexp.prototype.flags@^1.3.0: call-bind "^1.0.2" define-properties "^1.1.3" +regexp.prototype.flags@^1.3.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.4.1.tgz#b3f4c0059af9e47eca9f3f660e51d81307e72307" + integrity sha512-pMR7hBVUUGI7PMA37m2ofIdQCsomVnas+Jn5UPGAHQ+/LlwKm/aTLJHdasmHRzlfeZwHiAOaRSo2rbBDm3nNUQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + regexpp@^3.0.0, regexpp@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/regexpp/-/regexpp-3.1.0.tgz#206d0ad0a5648cffbdb8ae46438f3dc51c9f78e2" integrity sha512-ZOIzd8yVsQQA7j8GCSlPGXwg5PfmA1mrq0JP4nGhh54LaKN3xdai/vHUDu74pKwV8OxseMS65u2NImosQcSD0Q== -regexpu-core@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-2.0.0.tgz#49d038837b8dcf8bfa5b9a42139938e6ea2ae240" - integrity sha1-SdA4g3uNz4v6W5pCE5k45uoq4kA= - dependencies: - regenerate "^1.2.1" - regjsgen "^0.2.0" - regjsparser "^0.1.4" - regexpu-core@^4.7.1: - version "4.7.1" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.7.1.tgz#2dea5a9a07233298fbf0db91fa9abc4c6e0f8ad6" - integrity sha512-ywH2VUraA44DZQuRKzARmw6S66mr48pQVva4LBeRhcOltJ6hExvWly5ZjFLYo67xbIxb6W1q4bAGtgfEl20zfQ== + version "4.8.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-4.8.0.tgz#e5605ba361b67b1718478501327502f4479a98f0" + integrity sha512-1F6bYsoYiz6is+oz70NWur2Vlh9KWtswuRuzJOfeYUrfPX2o8n74AnUVaOGDbUqVGO9fNHu48/pjJO4sNVwsOg== dependencies: - regenerate "^1.4.0" - regenerate-unicode-properties "^8.2.0" - regjsgen "^0.5.1" - regjsparser "^0.6.4" - unicode-match-property-ecmascript "^1.0.4" - unicode-match-property-value-ecmascript "^1.2.0" + regenerate "^1.4.2" + regenerate-unicode-properties "^9.0.0" + regjsgen "^0.5.2" + regjsparser "^0.7.0" + unicode-match-property-ecmascript "^2.0.0" + unicode-match-property-value-ecmascript "^2.0.0" -regjsgen@^0.2.0: - version "0.2.0" - resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.2.0.tgz#6c016adeac554f75823fe37ac05b92d5a4edb1f7" - integrity sha1-bAFq3qxVT3WCP+N6wFuS1aTtsfc= +regexpu-core@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-5.0.1.tgz#c531122a7840de743dcf9c83e923b5560323ced3" + integrity sha512-CriEZlrKK9VJw/xQGJpQM5rY88BtuL8DM+AEwvcThHilbxiTAy8vq4iJnd2tqq8wLmjbGZzP7ZcKFjbGkmEFrw== + dependencies: + regenerate "^1.4.2" + regenerate-unicode-properties "^10.0.1" + regjsgen "^0.6.0" + regjsparser "^0.8.2" + unicode-match-property-ecmascript "^2.0.0" + unicode-match-property-value-ecmascript "^2.0.0" -regjsgen@^0.5.1: +regjsgen@^0.5.2: version "0.5.2" resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.5.2.tgz#92ff295fb1deecbf6ecdab2543d207e91aa33733" integrity sha512-OFFT3MfrH90xIW8OOSyUrk6QHD5E9JOTeGodiJeBS3J6IwlgzJMNE/1bZklWz5oTg+9dCMyEetclvCVXOPoN3A== -regjsparser@^0.1.4: - version "0.1.5" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.1.5.tgz#7ee8f84dc6fa792d3fd0ae228d24bd949ead205c" - integrity sha1-fuj4Tcb6eS0/0K4ijSS9lJ6tIFw= +regjsgen@^0.6.0: + version "0.6.0" + resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.6.0.tgz#83414c5354afd7d6627b16af5f10f41c4e71808d" + integrity sha512-ozE883Uigtqj3bx7OhL1KNbCzGyW2NQZPl6Hs09WTvCuZD5sTI4JY58bkbQWa/Y9hxIsvJ3M8Nbf7j54IqeZbA== + +regjsparser@^0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.7.0.tgz#a6b667b54c885e18b52554cb4960ef71187e9968" + integrity sha512-A4pcaORqmNMDVwUjWoTzuhwMGpP+NykpfqAsEgI1FSH/EzC7lrN5TMd+kN8YCovX+jMpu8eaqXgXPCa0g8FQNQ== dependencies: jsesc "~0.5.0" -regjsparser@^0.6.4: - version "0.6.7" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.6.7.tgz#c00164e1e6713c2e3ee641f1701c4b7aa0a7f86c" - integrity sha512-ib77G0uxsA2ovgiYbCVGx4Pv3PSttAx2vIwidqQzbL2U5S4Q+j00HdSAneSBuyVcMvEnTXMjiGgB+DlXozVhpQ== +regjsparser@^0.8.2: + version "0.8.4" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.8.4.tgz#8a14285ffcc5de78c5b95d62bbf413b6bc132d5f" + integrity sha512-J3LABycON/VNEu3abOviqGHuB/LOtOQj8SKmfP9anY5GfAVw/SPjwzSjxGjbZXIxbGfqTHtJw58C2Li/WkStmA== dependencies: jsesc "~0.5.0" @@ -16262,13 +15841,6 @@ repeat-string@^1.5.4, repeat-string@^1.6.1: resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637" integrity sha1-jcrkcOHIirwtYA//Sndihtp15jc= -repeating@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/repeating/-/repeating-2.0.1.tgz#5214c53a926d3552707527fbab415dbc08d06dda" - integrity sha1-UhTFOpJtNVJwdSf7q0FdvAjQbdo= - dependencies: - is-finite "^1.0.0" - request-promise-core@1.1.4: version "1.1.4" resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.4.tgz#3eedd4223208d419867b78ce815167d10593a22f" @@ -16433,7 +16005,7 @@ resolve-url@^0.2.1: resolved "https://registry.yarnpkg.com/resolve-url/-/resolve-url-0.2.1.tgz#2c637fe77c893afd2a663fe21aa9080068e2052a" integrity sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo= -resolve@^1.1.7, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.12.0, resolve@^1.13.1, resolve@^1.17.0, resolve@^1.19.0, resolve@^1.3.3, resolve@^1.4.0, resolve@^1.5.0, resolve@^1.8.1: +resolve@^1.1.7, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.12.0, resolve@^1.19.0, resolve@^1.3.3, resolve@^1.8.1: version "1.19.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.19.0.tgz#1af5bf630409734a067cae29318aac7fa29a267c" integrity sha512-rArEXAgsBG4UgRGcynxWIWKFvh/XZCcS8UJdHhwy91zwAvCZIbcs+vAbflgBnNjYMs/i/i+/Ux6IZhML1yPvxg== @@ -16441,7 +16013,16 @@ resolve@^1.1.7, resolve@^1.10.1, resolve@^1.11.1, resolve@^1.12.0, resolve@^1.13 is-core-module "^2.1.0" path-parse "^1.0.6" -resolve@^1.10.0, resolve@^1.14.2, resolve@^1.3.2: +resolve@^1.10.0, resolve@^1.13.1, resolve@^1.17.0, resolve@^1.20.0, resolve@^1.4.0, resolve@^1.5.0: + version "1.22.0" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.0.tgz#5e0b8c67c15df57a89bdbabe603a002f21731198" + integrity sha512-Hhtrw0nLeSrFQ7phPp4OOcVjLPIeMnRlr5mcnVuMe7M/7eBn98A3hmFRLoFo3DLZkivSYwhRUJTyPyWAk56WLw== + dependencies: + is-core-module "^2.8.1" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +resolve@^1.14.2, resolve@^1.3.2: version "1.20.0" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.20.0.tgz#629a013fb3f70755d6f0b7935cc1c2c5378b1975" integrity sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A== @@ -16449,13 +16030,6 @@ resolve@^1.10.0, resolve@^1.14.2, resolve@^1.3.2: is-core-module "^2.2.0" path-parse "^1.0.6" -responselike@1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" - integrity sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec= - dependencies: - lowercase-keys "^1.0.0" - restore-cursor@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" @@ -16535,14 +16109,12 @@ rollup@^0.57.1: signal-exit "^3.0.2" sourcemap-codec "^1.4.1" -rollup@^1.12.0: - version "1.32.1" - resolved "https://registry.yarnpkg.com/rollup/-/rollup-1.32.1.tgz#4480e52d9d9e2ae4b46ba0d9ddeaf3163940f9c4" - integrity sha512-/2HA0Ec70TvQnXdzynFffkjA6XN+1e2pEv/uKS5Ulca40g2L7KuOE3riasHoNVHOsFD5KKZgDsMk1CP3Tw9s+A== - dependencies: - "@types/estree" "*" - "@types/node" "*" - acorn "^7.1.0" +rollup@^2.50.0: + version "2.69.0" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-2.69.0.tgz#82aa86682a45e9760146b736c1643bf435506156" + integrity sha512-kjER91tHyek8gAkuz7+558vSnTQ+pITEok1P0aNOS45ZXyngaqPsXJmSel4QPQnJo7EJMjXUU1/GErWkWiKORg== + optionalDependencies: + fsevents "~2.3.2" route-recognizer@^0.3.3: version "0.3.4" @@ -16554,7 +16126,7 @@ rsvp@^3.0.14, rsvp@^3.0.17, rsvp@^3.0.18, rsvp@^3.0.21, rsvp@^3.0.6, rsvp@^3.1.0 resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-3.6.2.tgz#2e96491599a96cde1b515d5674a8f7a91452926a" integrity sha512-OfWGQTb9vnwRjwtA2QwpG2ICclHC3pgXZO5xt8H2EfgDquO0qVdSb5T88L4qJVAEugbS56pAuV4XZM58UX8ulw== -rsvp@^4.7.0, rsvp@^4.8.1, rsvp@^4.8.2, rsvp@^4.8.3, rsvp@^4.8.4, rsvp@^4.8.5: +rsvp@^4.7.0, rsvp@^4.8.1, rsvp@^4.8.3, rsvp@^4.8.4, rsvp@^4.8.5: version "4.8.5" resolved "https://registry.yarnpkg.com/rsvp/-/rsvp-4.8.5.tgz#c8f155311d167f68f21e168df71ec5b083113734" integrity sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA== @@ -16703,7 +16275,7 @@ schema-utils@^2.6.5, schema-utils@^2.7.0: ajv "^6.12.4" ajv-keywords "^3.5.2" -schema-utils@^3.0.0: +schema-utils@^3.0.0, schema-utils@^3.1.0, schema-utils@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-3.1.1.tgz#bc74c4b6b6995c1d88f76a8b77bea7219e0c8281" integrity sha512-Y5PQxS4ITlC+EahLuXaY86TXfR7Dc5lw294alXOq86JAHCihAIZfqv8nNCWvaEJvaC51uN9hbLGeV0cFBdH+Fw== @@ -16712,6 +16284,16 @@ schema-utils@^3.0.0: ajv "^6.12.5" ajv-keywords "^3.5.2" +schema-utils@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.0.0.tgz#60331e9e3ae78ec5d16353c467c34b3a0a1d3df7" + integrity sha512-1edyXKgh6XnJsJSQ8mKWXnN/BVaIbFMLpouRUrXgVq7WYne5kw3MW7UPhO44uRXQSIpTSXoJbmrR2X0w9kUTyg== + dependencies: + "@types/json-schema" "^7.0.9" + ajv "^8.8.0" + ajv-formats "^2.1.1" + ajv-keywords "^5.0.0" + select@^1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/select/-/select-1.1.2.tgz#0e7350acdec80b1108528786ec1d4418d11b396d" @@ -16742,20 +16324,20 @@ semver@^6.0.0, semver@^6.1.0, semver@^6.1.1, semver@^6.1.2, semver@^6.3.0: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== -semver@^7.0.0, semver@^7.2.1, semver@^7.3.2, semver@^7.3.4: - version "7.3.4" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.4.tgz#27aaa7d2e4ca76452f98d3add093a72c943edc97" - integrity sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw== - dependencies: - lru-cache "^6.0.0" - -semver@^7.1.3, semver@^7.3.5: +semver@^7.1.3, semver@^7.3.4, semver@^7.3.5: version "7.3.5" resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.5.tgz#0b621c879348d8998e4b0e4be94b3f12e6018ef7" integrity sha512-PoeGJYh8HK4BTO/a9Tf6ZG3veo/A7ZVsYrSA6J8ny9nb3B1VrpkuN+z9OE5wfE5p6H4LchYZsegiQgbJD94ZFQ== dependencies: lru-cache "^6.0.0" +semver@^7.2.1, semver@^7.3.2: + version "7.3.4" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.4.tgz#27aaa7d2e4ca76452f98d3add093a72c943edc97" + integrity sha512-tCfb2WLjqFAtXn4KEdxIhalnRtoKFN7nAwj0B3ZXCbQloV2tq5eDbcTmT68JJD3nRJq24/XgxtQKFIpQdtvmVw== + dependencies: + lru-cache "^6.0.0" + send@0.17.1: version "0.17.1" resolved "https://registry.yarnpkg.com/send/-/send-0.17.1.tgz#c1d8b059f7900f7466dd4938bdc44e11ddb376c8" @@ -16789,6 +16371,13 @@ serialize-javascript@^5.0.1: dependencies: randombytes "^2.1.0" +serialize-javascript@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.0.tgz#efae5d88f45d7924141da8b5c3a7a7e663fefeb8" + integrity sha512-Qr3TosvguFt8ePWqsvRfrKyQXIiW+nGbYpy8XK24NQHE83caxWt+mIymTT19DGFbNWNLfEwsrkSmN64lVWB9ag== + dependencies: + randombytes "^2.1.0" + serve-favicon@^2.5.0: version "2.5.0" resolved "https://registry.yarnpkg.com/serve-favicon/-/serve-favicon-2.5.0.tgz#935d240cdfe0f5805307fdfe967d88942a2cbcf0" @@ -16903,12 +16492,17 @@ side-channel@^1.0.3, side-channel@^1.0.4: get-intrinsic "^1.0.2" object-inspect "^1.9.0" -signal-exit@^3.0.0, signal-exit@^3.0.2, signal-exit@^3.0.3: +signal-exit@^3.0.0, signal-exit@^3.0.3: version "3.0.5" resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.5.tgz#9e3e8cc0c75a99472b44321033a7702e7738252f" integrity sha512-KWcOiKeQj6ZyXx7zq4YxSMgHRlod4czeBQZrPb8OKcohcqAXShm7E20kEMle9WBt26hFcAf0qLOcp5zmY7kOqQ== -silent-error@^1.0.0, silent-error@^1.0.1, silent-error@^1.1.0, silent-error@^1.1.1: +signal-exit@^3.0.2: + version "3.0.7" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + +silent-error@^1.0.0, silent-error@^1.0.1, silent-error@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/silent-error/-/silent-error-1.1.1.tgz#f72af5b0d73682a2ba1778b7e32cd8aa7c2d8662" integrity sha512-n4iEKyNcg4v6/jpb3c0/iyH2G1nzUNl7Gpqtn/mHIJK9S/q/7MCfoO4rwVOoO59qPFIc0hVHvMbiOJ0NdtxKKw== @@ -16920,29 +16514,23 @@ simple-html-tokenizer@^0.5.10, simple-html-tokenizer@^0.5.8: resolved "https://registry.yarnpkg.com/simple-html-tokenizer/-/simple-html-tokenizer-0.5.10.tgz#0843e4f00c9677f1c81e3dfeefcee0a4aca8e5d0" integrity sha512-1DHMUmvUOGuUZ9/+cX/+hOhWhRD5dEw6lodn8WuV+T+cQ31hhBcCu1dcDsNotowi4mMaNhrLyKoS+DtB81HdDA== -sinon@^7.4.2: - version "7.5.0" - resolved "https://registry.yarnpkg.com/sinon/-/sinon-7.5.0.tgz#e9488ea466070ea908fd44a3d6478fd4923c67ec" - integrity sha512-AoD0oJWerp0/rY9czP/D6hDTTUYGpObhZjMpd7Cl/A6+j0xBE+ayL/ldfggkBXUs0IkvIiM1ljM8+WkOc5k78Q== +sinon@^9.0.0: + version "9.2.4" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-9.2.4.tgz#e55af4d3b174a4443a8762fa8421c2976683752b" + integrity sha512-zljcULZQsJxVra28qIAL6ow1Z9tpattkCTEJR4RBP3TGc00FcttsP5pK284Nas5WjMZU5Yzy3kAIp3B3KRf5Yg== dependencies: - "@sinonjs/commons" "^1.4.0" - "@sinonjs/formatio" "^3.2.1" - "@sinonjs/samsam" "^3.3.3" - diff "^3.5.0" - lolex "^4.2.0" - nise "^1.5.2" - supports-color "^5.5.0" + "@sinonjs/commons" "^1.8.1" + "@sinonjs/fake-timers" "^6.0.1" + "@sinonjs/samsam" "^5.3.1" + diff "^4.0.2" + nise "^4.0.4" + supports-color "^7.1.0" sisteransi@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== -slash@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-1.0.0.tgz#c41f2f6c39fc16d1cd17ad4b5d896114ae470d55" - integrity sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU= - slash@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/slash/-/slash-2.0.0.tgz#de552851a1759df3a8f206535442f5ec4ddeab44" @@ -17016,51 +16604,11 @@ sntp@0.2.x: dependencies: hoek "0.9.x" -socket.io-adapter@~1.1.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-1.1.2.tgz#ab3f0d6f66b8fc7fca3959ab5991f82221789be9" - integrity sha512-WzZRUj1kUjrTIrUKpZLEzFZ1OLj5FwLlAFQs9kuZJzJi5DKdU7FsWc36SNmA8iDOtwBQyT8FkrriRM8vXLYz8g== - socket.io-adapter@~2.3.2: version "2.3.2" resolved "https://registry.yarnpkg.com/socket.io-adapter/-/socket.io-adapter-2.3.2.tgz#039cd7c71a52abad984a6d57da2c0b7ecdd3c289" integrity sha512-PBZpxUPYjmoogY0aoaTmo1643JelsaS1CiAwNjRVdrI0X9Seuc19Y2Wife8k88avW6haG8cznvwbubAZwH4Mtg== -socket.io-client@2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/socket.io-client/-/socket.io-client-2.4.0.tgz#aafb5d594a3c55a34355562fc8aea22ed9119a35" - integrity sha512-M6xhnKQHuuZd4Ba9vltCLT9oa+YvTsP8j9NcEiLElfIg8KeYPyhWOes6x4t+LTAC8enQbE/995AdTem2uNyKKQ== - dependencies: - backo2 "1.0.2" - component-bind "1.0.0" - component-emitter "~1.3.0" - debug "~3.1.0" - engine.io-client "~3.5.0" - has-binary2 "~1.0.2" - indexof "0.0.1" - parseqs "0.0.6" - parseuri "0.0.6" - socket.io-parser "~3.3.0" - to-array "0.1.4" - -socket.io-parser@~3.3.0: - version "3.3.2" - resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-3.3.2.tgz#ef872009d0adcf704f2fbe830191a14752ad50b6" - integrity sha512-FJvDBuOALxdCI9qwRrO/Rfp9yfndRtc1jSgVgV8FDraihmSP/MLGD5PEuJrNfjALvcQ+vMDM/33AWOYP/JSjDg== - dependencies: - component-emitter "~1.3.0" - debug "~3.1.0" - isarray "2.0.1" - -socket.io-parser@~3.4.0: - version "3.4.1" - resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-3.4.1.tgz#b06af838302975837eab2dc980037da24054d64a" - integrity sha512-11hMgzL+WCLWf1uFtHSNvliI++tcRUWdoeYuwIl+Axvwy9z2gQM+7nJyN3STj1tLj5JyIUH8/gpDGxzAlDdi0A== - dependencies: - component-emitter "1.2.1" - debug "~4.1.0" - isarray "2.0.1" - socket.io-parser@~4.0.4: version "4.0.4" resolved "https://registry.yarnpkg.com/socket.io-parser/-/socket.io-parser-4.0.4.tgz#9ea21b0d61508d18196ef04a2c6b9ab630f4c2b0" @@ -17070,18 +16618,6 @@ socket.io-parser@~4.0.4: component-emitter "~1.3.0" debug "~4.3.1" -socket.io@^2.1.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-2.4.1.tgz#95ad861c9a52369d7f1a68acf0d4a1b16da451d2" - integrity sha512-Si18v0mMXGAqLqCVpTxBa8MGqriHGQh8ccEOhmsmNS3thNCGBwO8WGrwMibANsWtQQ5NStdZwHqZR3naJVFc3w== - dependencies: - debug "~4.1.0" - engine.io "~3.5.0" - has-binary2 "~1.0.2" - socket.io-adapter "~1.1.0" - socket.io-client "2.4.0" - socket.io-parser "~3.4.0" - socket.io@^4.1.2: version "4.3.1" resolved "https://registry.yarnpkg.com/socket.io/-/socket.io-4.3.1.tgz#c0aa14f3f916a8ab713e83a5bd20c16600245763" @@ -17094,22 +16630,15 @@ socket.io@^4.1.2: socket.io-adapter "~2.3.2" socket.io-parser "~4.0.4" -sort-keys@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/sort-keys/-/sort-keys-2.0.0.tgz#658535584861ec97d730d6cf41822e1f56684128" - integrity sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg= - dependencies: - is-plain-obj "^1.0.0" - sort-object-keys@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/sort-object-keys/-/sort-object-keys-1.1.3.tgz#bff833fe85cab147b34742e45863453c1e190b45" integrity sha512-855pvK+VkU7PaKYPc+Jjnmt4EzejQHyhhF33q31qG8x7maDzkeFhAAThdCYay11CISO+qAMwjOBP+fPZe0IPyg== -sort-package-json@^1.44.0: - version "1.48.1" - resolved "https://registry.yarnpkg.com/sort-package-json/-/sort-package-json-1.48.1.tgz#58629823da53a3ccccc049cb7e7300bc23072b33" - integrity sha512-YvDm1iBzhphfXtctTS0XIBlIW/2N1DZNHx3YMcZnptpZhchqH4zazUOuEWmjfNXndwamITMt9hFPliqwx1SHvQ== +sort-package-json@^1.49.0: + version "1.54.0" + resolved "https://registry.yarnpkg.com/sort-package-json/-/sort-package-json-1.54.0.tgz#2096ccce1ef0221a6bf8ec3046ec6b9ae8d2c726" + integrity sha512-MA0nRiSfZ4/CNM/9rz70Hwq4PpvtBc3v532tzQSmoaLSdeBB3cCd488xmNruLL0fb/ZdbKlcaDDudwnrObbjBw== dependencies: detect-indent "^6.0.0" detect-newline "3.1.0" @@ -17123,6 +16652,11 @@ source-list-map@^2.0.0: resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw== +source-map-js@^1.0.1, source-map-js@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.0.2.tgz#adbc361d9c62df380125e7f161f71c826f1e490c" + integrity sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw== + source-map-resolve@^0.5.0: version "0.5.3" resolved "https://registry.yarnpkg.com/source-map-resolve/-/source-map-resolve-0.5.3.tgz#190866bece7553e1f8f267a2ee82c606b5509a1a" @@ -17134,13 +16668,6 @@ source-map-resolve@^0.5.0: source-map-url "^0.4.0" urix "^0.1.0" -source-map-support@^0.4.15: - version "0.4.18" - resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.4.18.tgz#0286a6de8be42641338594e97ccea75f0a2c585f" - integrity sha512-try0/JqxPLF9nOjvSta7tVondkP5dwgyLDjVoyMDlmjugT2lRZ1OfsrYTkCd2hkDnJTKRbO/Rl3orm8vlsUzbA== - dependencies: - source-map "^0.5.6" - source-map-support@^0.5.16, source-map-support@~0.5.20: version "0.5.20" resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.20.tgz#12166089f8f5e5e8c56926b377633392dd2cb6c9" @@ -17196,7 +16723,7 @@ source-map@~0.1.x: dependencies: amdefine ">=0.0.4" -sourcemap-codec@^1.4.1: +sourcemap-codec@^1.4.1, sourcemap-codec@^1.4.4: version "1.4.8" resolved "https://registry.yarnpkg.com/sourcemap-codec/-/sourcemap-codec-1.4.8.tgz#ea804bd94857402e6992d05a38ef1ae35a9ab4c4" integrity sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA== @@ -17259,7 +16786,7 @@ split-string@^3.0.1, split-string@^3.0.2: dependencies: extend-shallow "^3.0.0" -sprintf-js@^1.0.3: +sprintf-js@^1.0.3, sprintf-js@^1.1.1: version "1.1.2" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.1.2.tgz#da1765262bf8c0f571749f2ad6c26300207ae673" integrity sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug== @@ -17375,11 +16902,6 @@ stream-shift@^1.0.0: resolved "https://registry.yarnpkg.com/stream-shift/-/stream-shift-1.0.1.tgz#d7088281559ab2778424279b0877da3c392d5a3d" integrity sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ== -strict-uri-encode@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz#279b225df1d582b1f54e65addd4352e18faa0713" - integrity sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM= - strict-uri-encode@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/strict-uri-encode/-/strict-uri-encode-2.0.0.tgz#b9c7330c7042862f6b142dc274bbcc5866ce3546" @@ -17452,6 +16974,20 @@ string-width@^4.0.0, string-width@^4.2.0: regexp.prototype.flags "^1.3.0" side-channel "^1.0.3" +string.prototype.matchall@^4.0.5: + version "4.0.6" + resolved "https://registry.yarnpkg.com/string.prototype.matchall/-/string.prototype.matchall-4.0.6.tgz#5abb5dabc94c7b0ea2380f65ba610b3a544b15fa" + integrity sha512-6WgDX8HmQqvEd7J+G6VtAahhsQIssiZ8zl7zKh1VDMFyL3hRTJP4FTNA3RbIp2TOQ9AYNDcc7e3fH0Qbup+DBg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.1.3" + es-abstract "^1.19.1" + get-intrinsic "^1.1.1" + has-symbols "^1.0.2" + internal-slot "^1.0.3" + regexp.prototype.flags "^1.3.1" + side-channel "^1.0.4" + string.prototype.padend@^3.0.0: version "3.1.1" resolved "https://registry.yarnpkg.com/string.prototype.padend/-/string.prototype.padend-3.1.1.tgz#824c84265dbac46cade2b957b38b6a5d8d1683c5" @@ -17603,6 +17139,14 @@ style-loader@^1.3.0: loader-utils "^2.0.0" schema-utils "^2.7.0" +style-loader@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-2.0.0.tgz#9669602fd4690740eaaec137799a03addbbc393c" + integrity sha512-Z0gYUJmzZ6ZdRUqpg1r8GsaFKypE+3xAzuFeMuoHgjc9KZv3wMyCRjQIWEbhoFSq7+7yoHXySDJyyWQaPajeiQ== + dependencies: + loader-utils "^2.0.0" + schema-utils "^3.0.0" + style-to-object@0.3.0, style-to-object@^0.3.0: version "0.3.0" resolved "https://registry.yarnpkg.com/style-to-object/-/style-to-object-0.3.0.tgz#b1b790d205991cc783801967214979ee19a76e46" @@ -17622,7 +17166,7 @@ sum-up@^1.0.1: dependencies: chalk "^1.0.0" -supports-color@8.1.1: +supports-color@8.1.1, supports-color@^8.0.0: version "8.1.1" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== @@ -17634,7 +17178,7 @@ supports-color@^2.0.0: resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" integrity sha1-U10EXOa2Nj+kARcIRimZXp3zJMc= -supports-color@^5.3.0, supports-color@^5.5.0: +supports-color@^5.3.0: version "5.5.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== @@ -17655,6 +17199,11 @@ supports-color@^7.0.0, supports-color@^7.1.0: dependencies: has-flag "^4.0.0" +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + svgo@~1.2.2: version "1.2.2" resolved "https://registry.yarnpkg.com/svgo/-/svgo-1.2.2.tgz#0253d34eccf2aed4ad4f283e11ee75198f9d7316" @@ -17742,6 +17291,11 @@ tapable@^1.0.0, tapable@^1.1.3: resolved "https://registry.yarnpkg.com/tapable/-/tapable-1.1.3.tgz#a1fccc06b58db61fd7a45da2da44f5f3a3e67ba2" integrity sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA== +tapable@^2.1.1, tapable@^2.2.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/tapable/-/tapable-2.2.1.tgz#1967a73ef4060a82f12ab96af86d52fdb76eeca0" + integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ== + tar@^6.0.2: version "6.1.11" resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.11.tgz#6760a38f003afa1b2ffd0ffe9e9abbd0eab3d621" @@ -17768,11 +17322,12 @@ telejson@^5.3.2: lodash "^4.17.21" memoizerific "^1.11.3" -temp@0.9.1: - version "0.9.1" - resolved "https://registry.yarnpkg.com/temp/-/temp-0.9.1.tgz#2d666114fafa26966cd4065996d7ceedd4dd4697" - integrity sha512-WMuOgiua1xb5R56lE0eH6ivpVmg/lq2OHm4+LtT/xtEtPQ+sz6N3bBM6WZ5FvO1lO4IKIOb43qnhoc4qxP5OeA== +temp@0.9.4: + version "0.9.4" + resolved "https://registry.yarnpkg.com/temp/-/temp-0.9.4.tgz#cd20a8580cb63635d0e4e9d4bd989d44286e7620" + integrity sha512-yYrrsWnrXMcdsnu/7YMYAofM1ktpL5By7vZhf15CrXijWWrEYZks5AXBudalfSWJLlnen/QUJUB5aoB0kqZUGA== dependencies: + mkdirp "^0.5.1" rimraf "~2.6.2" term-size@^2.1.0: @@ -17810,6 +17365,17 @@ terser-webpack-plugin@^4.2.3: terser "^5.3.4" webpack-sources "^1.4.3" +terser-webpack-plugin@^5.1.3: + version "5.3.1" + resolved "https://registry.yarnpkg.com/terser-webpack-plugin/-/terser-webpack-plugin-5.3.1.tgz#0320dcc270ad5372c1e8993fabbd927929773e54" + integrity sha512-GvlZdT6wPQKbDNW/GDQzZFg/j4vKU96yl2q6mcUkzKOgW4gwf1Z8cZToUCrz31XHlPWH8MVb1r2tFtdDtTGJ7g== + dependencies: + jest-worker "^27.4.5" + schema-utils "^3.1.1" + serialize-javascript "^6.0.0" + source-map "^0.6.1" + terser "^5.7.2" + terser@^4.1.2, terser@^4.6.3: version "4.8.0" resolved "https://registry.yarnpkg.com/terser/-/terser-4.8.0.tgz#63056343d7c70bb29f3af665865a46fe03a0df17" @@ -17837,12 +17403,22 @@ terser@^5.3.4: source-map "~0.7.2" source-map-support "~0.5.20" +terser@^5.7.2: + version "5.12.0" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.12.0.tgz#728c6bff05f7d1dcb687d8eace0644802a9dae8a" + integrity sha512-R3AUhNBGWiFc77HXag+1fXpAxTAFRQTJemlJKjAgD9r8xXTpjNKqIXwHM/o7Rh+O0kUJtS3WQVdBeMKFk5sw9A== + dependencies: + acorn "^8.5.0" + commander "^2.20.0" + source-map "~0.7.2" + source-map-support "~0.5.20" + testem-multi-reporter@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/testem-multi-reporter/-/testem-multi-reporter-1.2.0.tgz#e93abdee54f821eb464232aba6b6483f2802664e" integrity sha512-ttIds/wpU0njpRBQsDl+tcPOy8jvafad6MCEIy21+BpNEcpCBZWrYuNva8TtxaZcoLuFTW0B8FsWl6XuJfH3rQ== -testem@^3.0.3: +testem@^3.0.3, testem@^3.2.0: version "3.6.0" resolved "https://registry.yarnpkg.com/testem/-/testem-3.6.0.tgz#bf5c86944bafd035c18f41f520195cce5eef33a8" integrity sha512-sXwx2IlOadOhrKf0hsV1Yt/yuYhdfrtJ4dpp7T6pFN62GjMyKifjAv2SFm+4zYHee1JwxheO7JUL0+3iN0rlHw== @@ -17877,41 +17453,6 @@ testem@^3.0.3: tap-parser "^7.0.0" tmp "0.0.33" -testem@^3.1.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/testem/-/testem-3.2.0.tgz#9924481f6a3b23e350fa77bb251c64d801c4c9a7" - integrity sha512-FkFzNRCIzCxjbNSTxIQSC2tWn1Q2MTR/GTxusSw6uZA4byEQ7wc86TKutNnoCyZ5XIaD9wo4q+dmlK0GUEqFVA== - dependencies: - backbone "^1.1.2" - bluebird "^3.4.6" - charm "^1.0.0" - commander "^2.6.0" - compression "^1.7.4" - consolidate "^0.15.1" - execa "^1.0.0" - express "^4.10.7" - fireworm "^0.7.0" - glob "^7.0.4" - http-proxy "^1.13.1" - js-yaml "^3.2.5" - lodash.assignin "^4.1.0" - lodash.castarray "^4.4.0" - lodash.clonedeep "^4.4.1" - lodash.find "^4.5.1" - lodash.uniqby "^4.7.0" - mkdirp "^0.5.1" - mustache "^3.0.0" - node-notifier "^5.0.1" - npmlog "^4.0.0" - printf "^0.5.1" - rimraf "^2.4.4" - socket.io "^2.1.0" - spawn-args "^0.2.0" - styled_string "0.0.1" - tap-parser "^7.0.0" - tmp "0.0.33" - xmldom "^0.1.19" - text-encoder-lite@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/text-encoder-lite/-/text-encoder-lite-2.0.0.tgz#3c865dd6f3720b279c9e370f8f36c831d2cee175" @@ -17958,11 +17499,6 @@ time-zone@^1.0.0: resolved "https://registry.yarnpkg.com/time-zone/-/time-zone-1.0.0.tgz#99c5bf55958966af6d06d83bdf3800dc82faec5d" integrity sha1-mcW/VZWJZq9tBtg73zgA3IL67F0= -timed-out@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/timed-out/-/timed-out-4.0.1.tgz#f32eacac5a175bea25d7fab565ab3ed8741ef56f" - integrity sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8= - timers-browserify@^2.0.4: version "2.0.12" resolved "https://registry.yarnpkg.com/timers-browserify/-/timers-browserify-2.0.12.tgz#44a45c11fbf407f34f97bccd1577c652361b00ee" @@ -17975,23 +17511,23 @@ tiny-emitter@^2.0.0: resolved "https://registry.yarnpkg.com/tiny-emitter/-/tiny-emitter-2.1.0.tgz#1d1a56edfc51c43e863cbb5382a72330e3555423" integrity sha512-NB6Dk1A9xgQPMoGqC5CVXn123gWyte215ONT5Pp5a0yt4nlEoO1ZWeCwpncaekPHXO60i47ihFnZPiRPjRMq4Q== -tiny-glob@0.2.8: - version "0.2.8" - resolved "https://registry.yarnpkg.com/tiny-glob/-/tiny-glob-0.2.8.tgz#b2792c396cc62db891ffa161fe8b33e76123e531" - integrity sha512-vkQP7qOslq63XRX9kMswlby99kyO5OvKptw7AMwBVMjXEI7Tb61eoI5DydyEMOseyGS5anDN1VPoVxEvH01q8w== +tiny-glob@0.2.9: + version "0.2.9" + resolved "https://registry.yarnpkg.com/tiny-glob/-/tiny-glob-0.2.9.tgz#2212d441ac17928033b110f8b3640683129d31e2" + integrity sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg== dependencies: globalyzer "0.1.0" globrex "^0.1.2" -tiny-lr@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-1.1.1.tgz#9fa547412f238fedb068ee295af8b682c98b2aab" - integrity sha512-44yhA3tsaRoMOjQQ+5v5mVdqef+kH6Qze9jTpqtVufgYjYt08zyZAwNwwVBj3i1rJMnR52IxOW0LK0vBzgAkuA== +tiny-lr@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/tiny-lr/-/tiny-lr-2.0.0.tgz#863659d7ce1ed201a117d8197d7f8b9a27bdc085" + integrity sha512-f6nh0VMRvhGx4KCeK1lQ/jaL0Zdb5WdR+Jk8q9OSUQnaSDxAEGH1fgqLZ+cMl5EW3F2MGnCsalBO1IsnnogW1Q== dependencies: body "^5.1.0" debug "^3.1.0" - faye-websocket "~0.10.0" - livereload-js "^2.3.0" + faye-websocket "^0.11.3" + livereload-js "^3.3.1" object-assign "^4.1.0" qs "^6.4.0" @@ -18035,21 +17571,11 @@ tmpl@1.0.x: resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.4.tgz#23640dd7b42d00433911140820e5cf440e521dd1" integrity sha1-I2QN17QtAEM5ERQIIOXPRA5SHdE= -to-array@0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/to-array/-/to-array-0.1.4.tgz#17e6c11f73dd4f3d74cda7a4ff3238e9ad9bf890" - integrity sha1-F+bBH3PdTz10zaek/zI46a2b+JA= - to-arraybuffer@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/to-arraybuffer/-/to-arraybuffer-1.0.1.tgz#7d229b1fcc637e466ca081180836a7aabff83f43" integrity sha1-fSKbH8xjfkZsoIEYCDanqr/4P0M= -to-fast-properties@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-1.0.3.tgz#b83571fa4d8c25b82e231b06e3a3055de4ca1a47" - integrity sha1-uDVx+k2MJbguIxsG46MFXeTKGkc= - to-fast-properties@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" @@ -18130,6 +17656,11 @@ tr46@^2.0.2: dependencies: punycode "^2.1.1" +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha1-gYT9NH2snNwYWZLzpmIuFLnZq2o= + tree-sync@^1.2.2: version "1.4.0" resolved "https://registry.yarnpkg.com/tree-sync/-/tree-sync-1.4.0.tgz#314598d13abaf752547d9335b8f95d9a137100d6" @@ -18152,11 +17683,6 @@ tree-sync@^2.0.0, tree-sync@^2.1.0: quick-temp "^0.1.5" walk-sync "^0.3.3" -trim-right@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/trim-right/-/trim-right-1.0.1.tgz#cb2e1203067e0c8de1f614094b9fe45704ea6003" - integrity sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM= - trim-trailing-lines@^1.0.0: version "1.1.4" resolved "https://registry.yarnpkg.com/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz#bd4abbec7cc880462f10b2c8b5ce1d8d1ec7c2c0" @@ -18197,7 +17723,7 @@ tslib@^2.0.3, tslib@~2.1.0: resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.1.0.tgz#da60860f1c2ecaa5703ab7d39bc05b6bf988b97a" integrity sha512-hcVC3wYEziELGGmEEXue7D75zbwIIVUMWAVbHItGPx0ziyXxrOMQx4rQEVEV45Ut/1IotuEvwqPopzIOkDMf0A== -tslib@^2.3.0: +tslib@^2.2.0, tslib@^2.3.0: version "2.3.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.3.1.tgz#e8a335add5ceae51aa261d32a490158ef042ef01" integrity sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw== @@ -18238,7 +17764,7 @@ type-check@~0.3.2: dependencies: prelude-ls "~1.1.2" -type-detect@4.0.8: +type-detect@4.0.8, type-detect@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== @@ -18318,7 +17844,7 @@ unbox-primitive@^1.0.1: has-symbols "^1.0.2" which-boxed-primitive "^1.0.2" -underscore.string@^3.2.2, underscore.string@~3.3.4: +underscore.string@^3.2.2: version "3.3.5" resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-3.3.5.tgz#fc2ad255b8bd309e239cbc5816fd23a9b7ea4023" integrity sha512-g+dpmgn+XBneLmXXo+sGlW5xQEt4ErkS3mgeN2GFbremYeMBSJKr9Wf2KJplQVaiPY/f7FN6atosWYNm9ovrYg== @@ -18326,6 +17852,14 @@ underscore.string@^3.2.2, underscore.string@~3.3.4: sprintf-js "^1.0.3" util-deprecate "^1.0.2" +underscore.string@~3.3.4: + version "3.3.6" + resolved "https://registry.yarnpkg.com/underscore.string/-/underscore.string-3.3.6.tgz#ad8cf23d7423cb3b53b898476117588f4e2f9159" + integrity sha512-VoC83HWXmCrF6rgkyxS9GHv8W9Q5nhMKho+OadDJGzL2oDYbYEppBaCMH6pFlwLeqj2QS+hhkw2kpXkSdD1JxQ== + dependencies: + sprintf-js "^1.1.1" + util-deprecate "^1.0.2" + underscore@>=1.8.3: version "1.13.1" resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.13.1.tgz#0c1c6bd2df54b6b69f2314066d65b6cde6fcf9d1" @@ -18344,28 +17878,28 @@ unherit@^1.0.4: inherits "^2.0.0" xtend "^4.0.0" -unicode-canonical-property-names-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-1.0.4.tgz#2619800c4c825800efdd8343af7dd9933cbe2818" - integrity sha512-jDrNnXWHd4oHiTZnx/ZG7gtUTVp+gCcTTKr8L0HjlwphROEW3+Him+IpvC+xcJEFegapiMZyZe02CyuOnRmbnQ== +unicode-canonical-property-names-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc" + integrity sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ== -unicode-match-property-ecmascript@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-1.0.4.tgz#8ed2a32569961bce9227d09cd3ffbb8fed5f020c" - integrity sha512-L4Qoh15vTfntsn4P1zqnHulG0LdXgjSO035fEpdtp6YxXhMT51Q6vgM5lYdG/5X3MjS+k/Y9Xw4SFCY9IkR0rg== +unicode-match-property-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz#54fd16e0ecb167cf04cf1f756bdcc92eba7976c3" + integrity sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q== dependencies: - unicode-canonical-property-names-ecmascript "^1.0.4" - unicode-property-aliases-ecmascript "^1.0.4" + unicode-canonical-property-names-ecmascript "^2.0.0" + unicode-property-aliases-ecmascript "^2.0.0" -unicode-match-property-value-ecmascript@^1.2.0: - version "1.2.0" - resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz#0d91f600eeeb3096aa962b1d6fc88876e64ea531" - integrity sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ== +unicode-match-property-value-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.0.0.tgz#1a01aa57247c14c568b89775a54938788189a714" + integrity sha512-7Yhkc0Ye+t4PNYzOGKedDhXbYIBe1XEQYQxOPyhcXNMJ0WCABqqj6ckydd6pWRZTHV4GuCPKdBAUiMc60tsKVw== -unicode-property-aliases-ecmascript@^1.0.4: - version "1.1.0" - resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz#dd57a99f6207bedff4628abefb94c50db941c8f4" - integrity sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg== +unicode-property-aliases-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.0.0.tgz#0a36cb9a585c4f6abd51ad1deddb285c165297c8" + integrity sha512-5Zfuy9q/DFr4tfO7ZPeVXb1aPoeQSdeFMLpYuFebehDAhbuevLs5yxSZmIFN1tP5F9Wl4IpJrYojg85/zgyZHQ== unified@9.2.0: version "9.2.0" @@ -18534,18 +18068,6 @@ url-loader@^4.1.1: mime-types "^2.1.27" schema-utils "^3.0.0" -url-parse-lax@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" - integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= - dependencies: - prepend-http "^2.0.0" - -url-to-options@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/url-to-options/-/url-to-options-1.0.1.tgz#1505a03a289a48cbd7a434efbaeec5055f5633a9" - integrity sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k= - url@^0.11.0: version "0.11.0" resolved "https://registry.yarnpkg.com/url/-/url-0.11.0.tgz#3838e97cfc60521eb73c525a8e55bfdd9e2e28f1" @@ -18579,9 +18101,9 @@ use@^3.1.0: integrity sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ== username-sync@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/username-sync/-/username-sync-1.0.2.tgz#0a3697909fb7b5768d29e2921f573acfdd427592" - integrity sha512-ayNkOJdoNSGNDBE46Nkc+l6IXmeugbzahZLSMkwvgRWv5y5ZqNY2IrzcgmkR4z32sj1W3tM3TuTUMqkqBzO+RA== + version "1.0.3" + resolved "https://registry.yarnpkg.com/username-sync/-/username-sync-1.0.3.tgz#ae41c5c8a4c8c2ecc1443a7d0742742bd7e36732" + integrity sha512-m/7/FSqjJNAzF2La448c/aEom0gJy7HY7Y509h6l0ePvEkFictAGptwWaj1msWJ38JbfEDOUoE8kqFee9EHKdA== util-deprecate@^1.0.1, util-deprecate@^1.0.2, util-deprecate@~1.0.1: version "1.0.2" @@ -18635,16 +18157,21 @@ uuid@^3.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.4.0.tgz#b23e4358afa8a202fe7a100af1f5f883f02007ee" integrity sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A== -uuid@^8.1.0, uuid@^8.3.0: +uuid@^8.3.0, uuid@^8.3.2: version "8.3.2" resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== -v8-compile-cache@^2.0.3, v8-compile-cache@^2.2.0: +v8-compile-cache@^2.0.3: version "2.2.0" resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.2.0.tgz#9471efa3ef9128d2f7c6a7ca39c4dd6b5055b132" integrity sha512-gTpR5XQNKFwOd4clxfnhaqvfqMpqEwr4tOtCyz4MtYZX2JYhfr1JvBFKdS+7K/9rfpZR3VLX+YWBbKoxCgS43Q== +v8-compile-cache@^2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/v8-compile-cache/-/v8-compile-cache-2.3.0.tgz#2de19618c66dc247dcfb6f99338035d8245a2cee" + integrity sha512-l8lCEmLcLYZh4nbunNZvQCJc5pv7+RCwa8q/LdUx8u7lsWvPDKmpodJAJNwkAhJC//dFY48KuIEmjtd4RViDrA== + validate-npm-package-license@^3.0.1: version "3.0.4" resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" @@ -18668,6 +18195,14 @@ validate-peer-dependencies@^1.1.0: resolve-package-path "^3.1.0" semver "^7.3.2" +validate-peer-dependencies@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/validate-peer-dependencies/-/validate-peer-dependencies-1.2.0.tgz#22aab93c514f4fda457d36c80685e8b1160d2036" + integrity sha512-nd2HUpKc6RWblPZQ2GDuI65sxJ2n/UqZwSBVtj64xlWjMx0m7ZB2m9b2JS3v1f+n9VWH/dd1CMhkHfP6pIdckA== + dependencies: + resolve-package-path "^3.1.0" + semver "^7.3.2" + vary@^1, vary@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" @@ -18759,6 +18294,25 @@ walk-sync@^2.0.0, walk-sync@^2.0.2, walk-sync@^2.2.0: matcher-collection "^2.0.0" minimatch "^3.0.4" +walk-sync@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-3.0.0.tgz#67f882925021e20569a1edd560b8da31da8d171c" + integrity sha512-41TvKmDGVpm2iuH7o+DAOt06yyu/cSHpX3uzAwetzASvlNtVddgIjXIb2DfB/Wa20B1Jo86+1Dv1CraSU7hWdw== + dependencies: + "@types/minimatch" "^3.0.4" + ensure-posix-path "^1.1.0" + matcher-collection "^2.0.1" + minimatch "^3.0.4" + +walk-sync@~2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/walk-sync/-/walk-sync-2.0.2.tgz#5ea8a28377c8be68c92d50f4007ea381725da14b" + integrity sha512-dCZkrxfHjPn7tIvdYrX3uMD/R0beVrHpA8lROQ5wWrl8psJgR6xwCkwqTFes0dNujbS2o/ITpvSYgIFsLsf13A== + dependencies: + "@types/minimatch" "^3.0.3" + ensure-posix-path "^1.1.0" + matcher-collection "^2.0.0" + walker@~1.0.5: version "1.0.7" resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.7.tgz#2f7f9b8fd10d677262b18a884e28d19618e028fb" @@ -18801,6 +18355,14 @@ watchpack@^1.7.4: chokidar "^3.4.1" watchpack-chokidar2 "^2.0.1" +watchpack@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.3.1.tgz#4200d9447b401156eeca7767ee610f8809bc9d25" + integrity sha512-x0t0JuydIo8qCNctdDrn1OzH/qDzk2+rdCOC3YzumZ42fiMqmQ7T3xQurykYMhYfHaPHTp4ZxAx2NfUo1K6QaA== + dependencies: + glob-to-regexp "^0.4.1" + graceful-fs "^4.1.2" + wcwidth@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" @@ -18813,6 +18375,11 @@ web-namespaces@^1.0.0: resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.4.tgz#bc98a3de60dadd7faefc403d1076d529f5e030ec" integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw== +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha1-JFNCdeKnvGvnvIZhHMFq4KVlSHE= + webidl-conversions@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-5.0.0.tgz#ae59c8a00b121543a2acc65c0434f57b0fc11aff" @@ -18865,6 +18432,11 @@ webpack-sources@^1.4.0, webpack-sources@^1.4.1, webpack-sources@^1.4.3: source-list-map "^2.0.0" source-map "~0.6.1" +webpack-sources@^3.2.3: + version "3.2.3" + resolved "https://registry.yarnpkg.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde" + integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== + webpack-virtual-modules@^0.2.2: version "0.2.2" resolved "https://registry.yarnpkg.com/webpack-virtual-modules/-/webpack-virtual-modules-0.2.2.tgz#20863dc3cb6bb2104729fff951fbe14b18bd0299" @@ -18872,7 +18444,7 @@ webpack-virtual-modules@^0.2.2: dependencies: debug "^3.0.0" -webpack@4, webpack@^4.43.0: +webpack@4: version "4.46.0" resolved "https://registry.yarnpkg.com/webpack/-/webpack-4.46.0.tgz#bf9b4404ea20a073605e0a011d188d77cb6ad542" integrity sha512-6jJuJjg8znb/xRItk7bkT0+Q7AHCYjjFnvKIWQPkNIOyRqoCGvkOs0ipeQzrqz4l5FtN5ZI/ukEHroeX/o1/5Q== @@ -18901,6 +18473,36 @@ webpack@4, webpack@^4.43.0: watchpack "^1.7.4" webpack-sources "^1.4.1" +webpack@^5.69.1: + version "5.69.1" + resolved "https://registry.yarnpkg.com/webpack/-/webpack-5.69.1.tgz#8cfd92c192c6a52c99ab00529b5a0d33aa848dc5" + integrity sha512-+VyvOSJXZMT2V5vLzOnDuMz5GxEqLk7hKWQ56YxPW/PQRUuKimPqmEIJOx8jHYeyo65pKbapbW464mvsKbaj4A== + dependencies: + "@types/eslint-scope" "^3.7.3" + "@types/estree" "^0.0.51" + "@webassemblyjs/ast" "1.11.1" + "@webassemblyjs/wasm-edit" "1.11.1" + "@webassemblyjs/wasm-parser" "1.11.1" + acorn "^8.4.1" + acorn-import-assertions "^1.7.6" + browserslist "^4.14.5" + chrome-trace-event "^1.0.2" + enhanced-resolve "^5.8.3" + es-module-lexer "^0.9.0" + eslint-scope "5.1.1" + events "^3.2.0" + glob-to-regexp "^0.4.1" + graceful-fs "^4.2.9" + json-parse-better-errors "^1.0.2" + loader-runner "^4.2.0" + mime-types "^2.1.27" + neo-async "^2.6.2" + schema-utils "^3.1.0" + tapable "^2.1.1" + terser-webpack-plugin "^5.1.3" + watchpack "^2.3.1" + webpack-sources "^3.2.3" + websocket-driver@>=0.5.1: version "0.7.4" resolved "https://registry.yarnpkg.com/websocket-driver/-/websocket-driver-0.7.4.tgz#89ad5295bbf64b480abcba31e4953aca706f5760" @@ -18922,16 +18524,24 @@ whatwg-encoding@^1.0.5: dependencies: iconv-lite "0.4.24" -whatwg-fetch@^3.4.0: - version "3.5.0" - resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.5.0.tgz#605a2cd0a7146e5db141e29d1c62ab84c0c4c868" - integrity sha512-jXkLtsR42xhXg7akoDKvKWE40eJeI+2KZqcp2h3NsOrRnDvtWX36KcKl30dy+hxECivdk2BVUHVNrPtoMBUx6A== +whatwg-fetch@^3.6.2: + version "3.6.2" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.6.2.tgz#dced24f37f2624ed0281725d51d0e2e3fe677f8c" + integrity sha512-bJlen0FcuU/0EMLrdbJ7zOnW6ITZLrZMIarMUVmdKtsGvZna8vxKYaexICWPfZ8qwf9fzNq+UEIZrnSaApt6RA== whatwg-mimetype@^2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/whatwg-mimetype/-/whatwg-mimetype-2.3.0.tgz#3d4b1e0312d2079879f826aff18dbeeca5960fbf" integrity sha512-M4yMwr6mAnQz76TbJm914+gPpB/nCwvZbJU28cUD6dR004SAxDLOOSUaB1JDRqLtaOV/vi0IC5lEAGFgrjGv/g== +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha1-lmRU6HZUYuN2RNNib2dCzotwll0= + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + whatwg-url@^8.0.0: version "8.4.0" resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-8.4.0.tgz#50fb9615b05469591d2b2bd6dfaed2942ed72837" @@ -18957,7 +18567,7 @@ which-pm-runs@^1.0.0: resolved "https://registry.yarnpkg.com/which-pm-runs/-/which-pm-runs-1.0.0.tgz#670b3afbc552e0b55df6b7780ca74615f23ad1cb" integrity sha1-Zws6+8VS4LVd9rd4DKdGFfI60cs= -which@^1.2.14, which@^1.2.9, which@^1.3.0, which@^1.3.1: +which@^1.2.14, which@^1.2.9, which@^1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== @@ -19014,13 +18624,6 @@ worker-rpc@^0.1.0: dependencies: microevent.ts "~0.1.1" -workerpool@^2.3.0: - version "2.3.3" - resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-2.3.3.tgz#49a70089bd55e890d68cc836a19419451d7c81d7" - integrity sha512-L1ovlYHp6UObYqElXXpbd214GgbEKDED0d3sj7pRdFXjNkb2+un/AUcCkceHizO0IVI6SOGGncrcjozruCkRgA== - dependencies: - object-assign "4.1.1" - workerpool@^3.1.1: version "3.1.2" resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-3.1.2.tgz#b34e79243647decb174b7481ab5b351dc565c426" @@ -19035,10 +18638,10 @@ workerpool@^6.0.0: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.1.2.tgz#52bb8c05d89e9b699b68d39f9687419cb2f6ca5c" integrity sha512-I/gDW4LwV3bslk4Yiqd4XoNYlnvV03LON7KuIjmQ90yDnKND1sR2LK/JA1g1tmd71oe6KPSvN0JpBzXIH6xAgA== -workerpool@^6.0.3: - version "6.1.0" - resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.1.0.tgz#a8e038b4c94569596852de7a8ea4228eefdeb37b" - integrity sha512-toV7q9rWNYha963Pl/qyeZ6wG+3nnsyvolaNUS8+R5Wtw6qJPTxIlOP1ZSvcGhEJw+l3HMMmtiNo9Gl61G4GVg== +workerpool@^6.1.4: + version "6.2.0" + resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.0.tgz#827d93c9ba23ee2019c3ffaff5c27fccea289e8b" + integrity sha512-Rsk5qQHJ9eowMH28Jwhe8HEbmdYDX4lwoMWshiCXugjtHqMD9ZbiqSDLxcsfdqsETPzVUtX5s1Z5kStiIM6l4A== wrap-ansi@^6.2.0: version "6.2.0" @@ -19083,7 +18686,7 @@ write-file-atomic@^3.0.0: signal-exit "^3.0.2" typedarray-to-buffer "^3.1.5" -ws@^7.2.3, ws@~7.4.2: +ws@^7.2.3: version "7.4.6" resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== @@ -19108,16 +18711,6 @@ xmlchars@^2.2.0: resolved "https://registry.yarnpkg.com/xmlchars/-/xmlchars-2.2.0.tgz#060fe1bcb7f9c76fe2a17db86a9bc3ab894210cb" integrity sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw== -xmldom@^0.1.19: - version "0.1.31" - resolved "https://registry.yarnpkg.com/xmldom/-/xmldom-0.1.31.tgz#b76c9a1bd9f0a9737e5a72dc37231cf38375e2ff" - integrity sha512-yS2uJflVQs6n+CyjHoaBmVSqIDevTAWrzMmjG1Gc7h1qQ7uVozNhEPJAwZXWyGQ/Gafo3fCwrcaokezLPupVyQ== - -xmlhttprequest-ssl@~1.6.2: - version "1.6.3" - resolved "https://registry.yarnpkg.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.6.3.tgz#03b713873b01659dfa2c1c5d056065b27ddc2de6" - integrity sha512-3XfeQE/wNkvrIktn2Kf0869fC0BN6UpydVasGIeSm2B1Llihf7/0UfZM+eCkOw3P7bP4+qPgqhm7ZoxuJtFU0Q== - xtend@^4.0.0, xtend@^4.0.1, xtend@~4.0.1: version "4.0.2" resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" @@ -19184,11 +18777,6 @@ yargs@^16.2.0: y18n "^5.0.5" yargs-parser "^20.2.2" -yeast@0.1.2: - version "0.1.2" - resolved "https://registry.yarnpkg.com/yeast/-/yeast-0.1.2.tgz#008e06d8094320c372dbc2f8ed76a0ca6c8ac419" - integrity sha1-AI4G2AlDIMNy28L47XagymyKxBk= - yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" From 154264fcd9276b53ea837664bceefd8b6f217291 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Tue, 8 Mar 2022 20:54:17 -0500 Subject: [PATCH 53/89] Add pagination, filtering and sort to more API endpoints (#12186) --- .changelog/12186.txt | 7 + api/api.go | 10 +- api/api_test.go | 4 +- command/agent/http.go | 8 +- helper/raftutil/fsm.go | 38 +- nomad/acl_endpoint.go | 48 ++- nomad/acl_endpoint_test.go | 280 ++++++++++++++ nomad/alloc_endpoint.go | 144 ++++--- nomad/alloc_endpoint_test.go | 350 +++++++++++++++++- nomad/core_sched.go | 2 +- nomad/csi_endpoint.go | 75 ++-- nomad/csi_endpoint_test.go | 194 ++++++++++ nomad/deployment_endpoint.go | 50 +-- nomad/deployment_endpoint_test.go | 26 +- .../deploymentwatcher/deployments_watcher.go | 6 +- nomad/drainer_int_test.go | 14 +- nomad/eval_endpoint.go | 51 +-- nomad/eval_endpoint_test.go | 66 +--- nomad/fsm.go | 16 +- nomad/job_endpoint.go | 162 ++++---- nomad/job_endpoint_test.go | 178 +++++++++ nomad/search_endpoint.go | 22 +- nomad/state/paginator/filter.go | 41 ++ nomad/state/{ => paginator}/filter_test.go | 136 +++++-- nomad/state/{ => paginator}/paginator.go | 64 ++-- nomad/state/{ => paginator}/paginator_test.go | 48 ++- nomad/state/paginator/tokenizer.go | 82 ++++ nomad/state/paginator/tokenizer_test.go | 67 ++++ nomad/state/schema.go | 62 +++- nomad/state/state_store.go | 145 ++++++-- nomad/state/state_store_test.go | 12 +- nomad/structs/csi.go | 26 ++ nomad/structs/structs.go | 101 ++++- 33 files changed, 2027 insertions(+), 508 deletions(-) create mode 100644 .changelog/12186.txt create mode 100644 nomad/state/paginator/filter.go rename nomad/state/{ => paginator}/filter_test.go (61%) rename nomad/state/{ => paginator}/paginator.go (64%) rename nomad/state/{ => paginator}/paginator_test.go (74%) create mode 100644 nomad/state/paginator/tokenizer.go create mode 100644 nomad/state/paginator/tokenizer_test.go diff --git a/.changelog/12186.txt b/.changelog/12186.txt new file mode 100644 index 000000000..2e39b210a --- /dev/null +++ b/.changelog/12186.txt @@ -0,0 +1,7 @@ +```release-note:improvement +api: Add support for filtering, sorting, and pagination to the ACL tokens and allocations list endpoint +``` + +```release-note:improvement +api: Add support for filtering and pagination to the jobs and volumes list endpoint +``` diff --git a/api/api.go b/api/api.go index 08521179b..2dc00a3f6 100644 --- a/api/api.go +++ b/api/api.go @@ -82,10 +82,10 @@ type QueryOptions struct { // previous response. NextToken string - // Ascending is used to have results sorted in ascending chronological order. + // Reverse is used to reverse the default order of list results. // - // Currently only supported by evaluations.List and deployments.list endpoints. - Ascending bool + // Currently only supported by specific endpoints. + Reverse bool // ctx is an optional context pass through to the underlying HTTP // request layer. Use Context() and WithContext() to manage this. @@ -605,8 +605,8 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.NextToken != "" { r.params.Set("next_token", q.NextToken) } - if q.Ascending { - r.params.Set("ascending", "true") + if q.Reverse { + r.params.Set("reverse", "true") } for k, v := range q.Params { r.params.Set(k, v) diff --git a/api/api_test.go b/api/api_test.go index 0f503c624..6ab82526d 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -181,7 +181,7 @@ func TestSetQueryOptions(t *testing.T) { WaitIndex: 1000, WaitTime: 100 * time.Second, AuthToken: "foobar", - Ascending: true, + Reverse: true, } r.setQueryOptions(q) @@ -199,7 +199,7 @@ func TestSetQueryOptions(t *testing.T) { try("stale", "") // should not be present try("index", "1000") try("wait", "100000ms") - try("ascending", "true") + try("reverse", "true") } func TestQueryOptionsContext(t *testing.T) { diff --git a/command/agent/http.go b/command/agent/http.go index 8568a0b0e..7d5fed9c4 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -788,7 +788,7 @@ func (s *HTTPServer) parse(resp http.ResponseWriter, req *http.Request, r *strin parseNamespace(req, &b.Namespace) parsePagination(req, b) parseFilter(req, b) - parseAscending(req, b) + parseReverse(req, b) return parseWait(resp, req, b) } @@ -814,10 +814,10 @@ func parseFilter(req *http.Request, b *structs.QueryOptions) { } } -// parseAscending parses the ascending query parameter for QueryOptions -func parseAscending(req *http.Request, b *structs.QueryOptions) { +// parseReverse parses the reverse query parameter for QueryOptions +func parseReverse(req *http.Request, b *structs.QueryOptions) { query := req.URL.Query() - b.Ascending = query.Get("ascending") == "true" + b.Reverse = query.Get("reverse") == "true" } // parseWriteRequest is a convenience method for endpoints that need to parse a diff --git a/helper/raftutil/fsm.go b/helper/raftutil/fsm.go index 26539215c..cad222f2e 100644 --- a/helper/raftutil/fsm.go +++ b/helper/raftutil/fsm.go @@ -185,28 +185,28 @@ func (f *FSMHelper) StateAsMap() map[string][]interface{} { } // StateAsMap returns a json-able representation of the state -func StateAsMap(state *state.StateStore) map[string][]interface{} { +func StateAsMap(store *state.StateStore) map[string][]interface{} { result := map[string][]interface{}{ - "ACLPolicies": toArray(state.ACLPolicies(nil)), - "ACLTokens": toArray(state.ACLTokens(nil)), - "Allocs": toArray(state.Allocs(nil)), - "CSIPlugins": toArray(state.CSIPlugins(nil)), - "CSIVolumes": toArray(state.CSIVolumes(nil)), - "Deployments": toArray(state.Deployments(nil, false)), - "Evals": toArray(state.Evals(nil, false)), - "Indexes": toArray(state.Indexes()), - "JobSummaries": toArray(state.JobSummaries(nil)), - "JobVersions": toArray(state.JobVersions(nil)), - "Jobs": toArray(state.Jobs(nil)), - "Nodes": toArray(state.Nodes(nil)), - "PeriodicLaunches": toArray(state.PeriodicLaunches(nil)), - "SITokenAccessors": toArray(state.SITokenAccessors(nil)), - "ScalingEvents": toArray(state.ScalingEvents(nil)), - "ScalingPolicies": toArray(state.ScalingPolicies(nil)), - "VaultAccessors": toArray(state.VaultAccessors(nil)), + "ACLPolicies": toArray(store.ACLPolicies(nil)), + "ACLTokens": toArray(store.ACLTokens(nil, state.SortDefault)), + "Allocs": toArray(store.Allocs(nil, state.SortDefault)), + "CSIPlugins": toArray(store.CSIPlugins(nil)), + "CSIVolumes": toArray(store.CSIVolumes(nil)), + "Deployments": toArray(store.Deployments(nil, state.SortDefault)), + "Evals": toArray(store.Evals(nil, state.SortDefault)), + "Indexes": toArray(store.Indexes()), + "JobSummaries": toArray(store.JobSummaries(nil)), + "JobVersions": toArray(store.JobVersions(nil)), + "Jobs": toArray(store.Jobs(nil)), + "Nodes": toArray(store.Nodes(nil)), + "PeriodicLaunches": toArray(store.PeriodicLaunches(nil)), + "SITokenAccessors": toArray(store.SITokenAccessors(nil)), + "ScalingEvents": toArray(store.ScalingEvents(nil)), + "ScalingPolicies": toArray(store.ScalingPolicies(nil)), + "VaultAccessors": toArray(store.VaultAccessors(nil)), } - insertEnterpriseState(result, state) + insertEnterpriseState(result, store) return result diff --git a/nomad/acl_endpoint.go b/nomad/acl_endpoint.go index b9a55cfa0..8f3031952 100644 --- a/nomad/acl_endpoint.go +++ b/nomad/acl_endpoint.go @@ -3,6 +3,7 @@ package nomad import ( "fmt" "io/ioutil" + "net/http" "os" "path/filepath" "strings" @@ -14,6 +15,7 @@ import ( policy "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" ) @@ -652,6 +654,7 @@ func (a *ACL) ListTokens(args *structs.ACLTokenListRequest, reply *structs.ACLTo } // Setup the blocking query + sort := state.SortOption(args.Reverse) opts := blockingOptions{ queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, @@ -659,34 +662,59 @@ func (a *ACL) ListTokens(args *structs.ACLTokenListRequest, reply *structs.ACLTo // Iterate over all the tokens var err error var iter memdb.ResultIterator + var opts paginator.StructsTokenizerOptions + if prefix := args.QueryOptions.Prefix; prefix != "" { iter, err = state.ACLTokenByAccessorIDPrefix(ws, prefix) + opts = paginator.StructsTokenizerOptions{ + WithID: true, + } } else if args.GlobalOnly { iter, err = state.ACLTokensByGlobal(ws, true) + opts = paginator.StructsTokenizerOptions{ + WithID: true, + } } else { - iter, err = state.ACLTokens(ws) + iter, err = state.ACLTokens(ws, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } if err != nil { return err } - // Convert all the tokens to a list stub - reply.Tokens = nil - for { - raw := iter.Next() - if raw == nil { - break - } - token := raw.(*structs.ACLToken) - reply.Tokens = append(reply.Tokens, token.Stub()) + tokenizer := paginator.NewStructsTokenizer(iter, opts) + + var tokens []*structs.ACLTokenListStub + paginator, err := paginator.NewPaginator(iter, tokenizer, nil, args.QueryOptions, + func(raw interface{}) error { + token := raw.(*structs.ACLToken) + tokens = append(tokens, token.Stub()) + return nil + }) + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to create result paginator: %v", err) } + nextToken, err := paginator.Page() + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to read result page: %v", err) + } + + reply.QueryMeta.NextToken = nextToken + reply.Tokens = tokens + // Use the last index that affected the token table index, err := state.Index("acl_token") if err != nil { return err } reply.Index = index + return nil }} return a.srv.blockingRPC(&opts) diff --git a/nomad/acl_endpoint_test.go b/nomad/acl_endpoint_test.go index 7258aba88..6e934b004 100644 --- a/nomad/acl_endpoint_test.go +++ b/nomad/acl_endpoint_test.go @@ -919,6 +919,286 @@ func TestACLEndpoint_ListTokens(t *testing.T) { assert.Equal(t, 2, len(resp3.Tokens)) } +func TestACLEndpoint_ListTokens_PaginationFiltering(t *testing.T) { + t.Parallel() + s1, cleanupS1 := TestServer(t, func(c *Config) { + c.ACLEnabled = true + }) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // create a set of ACL tokens. these are in the order that the state store + // will return them from the iterator (sorted by key) for ease of writing + // tests + mocks := []struct { + ids []string + typ string + }{ + {ids: []string{"aaaa1111-3350-4b4b-d185-0e1992ed43e9"}, typ: "management"}, // 0 + {ids: []string{"aaaaaa22-3350-4b4b-d185-0e1992ed43e9"}}, // 1 + {ids: []string{"aaaaaa33-3350-4b4b-d185-0e1992ed43e9"}}, // 2 + {ids: []string{"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9"}}, // 3 + {ids: []string{"aaaaaabb-3350-4b4b-d185-0e1992ed43e9"}}, // 4 + {ids: []string{"aaaaaacc-3350-4b4b-d185-0e1992ed43e9"}}, // 5 + {ids: []string{"aaaaaadd-3350-4b4b-d185-0e1992ed43e9"}}, // 6 + {ids: []string{"00000111-3350-4b4b-d185-0e1992ed43e9"}}, // 7 + {ids: []string{ // 8 + "00000222-3350-4b4b-d185-0e1992ed43e9", + "00000333-3350-4b4b-d185-0e1992ed43e9", + }}, + {}, // 9, index missing + {ids: []string{"bbbb1111-3350-4b4b-d185-0e1992ed43e9"}}, // 10 + } + + state := s1.fsm.State() + + var bootstrapToken string + for i, m := range mocks { + tokensInTx := []*structs.ACLToken{} + for _, id := range m.ids { + token := mock.ACLToken() + token.AccessorID = id + token.Type = m.typ + tokensInTx = append(tokensInTx, token) + } + index := 1000 + uint64(i) + + // bootstrap cluster with the first token + if i == 0 { + token := tokensInTx[0] + bootstrapToken = token.SecretID + err := s1.State().BootstrapACLTokens(structs.MsgTypeTestSetup, index, 0, token) + require.NoError(t, err) + + err = state.UpsertACLTokens(structs.MsgTypeTestSetup, index, tokensInTx[1:]) + require.NoError(t, err) + } else { + err := state.UpsertACLTokens(structs.MsgTypeTestSetup, index, tokensInTx) + require.NoError(t, err) + } + } + + cases := []struct { + name string + prefix string + filter string + nextToken string + pageSize int32 + expectedNextToken string + expectedIDs []string + expectedError string + }{ + { + name: "test01 size-2 page-1", + pageSize: 2, + expectedNextToken: "1002.aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "aaaa1111-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test02 size-2 page-1 with prefix", + prefix: "aaaa", + pageSize: 2, + expectedNextToken: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "aaaa1111-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test03 size-2 page-2 default NS", + pageSize: 2, + nextToken: "1002.aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1004.aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test04 go-bexpr filter", + filter: `AccessorID matches "^a+[123]"`, + expectedIDs: []string{ + "aaaa1111-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test05 go-bexpr filter with pagination", + filter: `AccessorID matches "^a+[123]"`, + pageSize: 2, + expectedNextToken: "1002.aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "aaaa1111-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test06 go-bexpr invalid expression", + filter: `NotValid`, + expectedError: "failed to read filter expression", + }, + { + name: "test07 go-bexpr invalid field", + filter: `InvalidField == "value"`, + expectedError: "error finding value in datum", + }, + { + name: "test08 non-lexicographic order", + pageSize: 1, + nextToken: "1007.00000111-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1008.00000222-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "00000111-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test09 same index", + pageSize: 1, + nextToken: "1008.00000222-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1008.00000333-3350-4b4b-d185-0e1992ed43e9", + expectedIDs: []string{ + "00000222-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test10 missing index", + pageSize: 1, + nextToken: "1009.e9522802-0cd8-4b1d-9c9e-ab3d97938371", + expectedIDs: []string{ + "bbbb1111-3350-4b4b-d185-0e1992ed43e9", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + req := &structs.ACLTokenListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Prefix: tc.prefix, + Filter: tc.filter, + PerPage: tc.pageSize, + NextToken: tc.nextToken, + }, + } + req.AuthToken = bootstrapToken + var resp structs.ACLTokenListResponse + err := msgpackrpc.CallWithCodec(codec, "ACL.ListTokens", req, &resp) + if tc.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedError) + return + } + + gotIDs := []string{} + for _, token := range resp.Tokens { + gotIDs = append(gotIDs, token.AccessorID) + } + require.Equal(t, tc.expectedIDs, gotIDs, "unexpected page of tokens") + require.Equal(t, tc.expectedNextToken, resp.QueryMeta.NextToken, "unexpected NextToken") + }) + } +} + +func TestACLEndpoint_ListTokens_Order(t *testing.T) { + t.Parallel() + + s1, cleanupS1 := TestServer(t, func(c *Config) { + c.ACLEnabled = true + }) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create register requests + uuid1 := uuid.Generate() + token1 := mock.ACLManagementToken() + token1.AccessorID = uuid1 + + uuid2 := uuid.Generate() + token2 := mock.ACLToken() + token2.AccessorID = uuid2 + + uuid3 := uuid.Generate() + token3 := mock.ACLToken() + token3.AccessorID = uuid3 + + // bootstrap cluster with the first token + bootstrapToken := token1.SecretID + err := s1.State().BootstrapACLTokens(structs.MsgTypeTestSetup, 1000, 0, token1) + require.NoError(t, err) + + err = s1.fsm.State().UpsertACLTokens(structs.MsgTypeTestSetup, 1001, []*structs.ACLToken{token2}) + require.NoError(t, err) + + err = s1.fsm.State().UpsertACLTokens(structs.MsgTypeTestSetup, 1002, []*structs.ACLToken{token3}) + require.NoError(t, err) + + // update token2 again so we can later assert create index order did not change + err = s1.fsm.State().UpsertACLTokens(structs.MsgTypeTestSetup, 1003, []*structs.ACLToken{token2}) + require.NoError(t, err) + + t.Run("default", func(t *testing.T) { + // Lookup the tokens in the default order (oldest first) + get := &structs.ACLTokenListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + }, + } + get.AuthToken = bootstrapToken + + var resp structs.ACLTokenListResponse + err = msgpackrpc.CallWithCodec(codec, "ACL.ListTokens", get, &resp) + require.NoError(t, err) + require.Equal(t, uint64(1003), resp.Index) + require.Len(t, resp.Tokens, 3) + + // Assert returned order is by CreateIndex (ascending) + require.Equal(t, uint64(1000), resp.Tokens[0].CreateIndex) + require.Equal(t, uuid1, resp.Tokens[0].AccessorID) + + require.Equal(t, uint64(1001), resp.Tokens[1].CreateIndex) + require.Equal(t, uuid2, resp.Tokens[1].AccessorID) + + require.Equal(t, uint64(1002), resp.Tokens[2].CreateIndex) + require.Equal(t, uuid3, resp.Tokens[2].AccessorID) + }) + + t.Run("reverse", func(t *testing.T) { + // Lookup the tokens in reverse order (newest first) + get := &structs.ACLTokenListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Reverse: true, + }, + } + get.AuthToken = bootstrapToken + + var resp structs.ACLTokenListResponse + err = msgpackrpc.CallWithCodec(codec, "ACL.ListTokens", get, &resp) + require.NoError(t, err) + require.Equal(t, uint64(1003), resp.Index) + require.Len(t, resp.Tokens, 3) + + // Assert returned order is by CreateIndex (descending) + require.Equal(t, uint64(1002), resp.Tokens[0].CreateIndex) + require.Equal(t, uuid3, resp.Tokens[0].AccessorID) + + require.Equal(t, uint64(1001), resp.Tokens[1].CreateIndex) + require.Equal(t, uuid2, resp.Tokens[1].AccessorID) + + require.Equal(t, uint64(1000), resp.Tokens[2].CreateIndex) + require.Equal(t, uuid1, resp.Tokens[2].AccessorID) + }) +} + func TestACLEndpoint_ListTokens_Blocking(t *testing.T) { t.Parallel() diff --git a/nomad/alloc_endpoint.go b/nomad/alloc_endpoint.go index 92abee62f..65b0ea4b7 100644 --- a/nomad/alloc_endpoint.go +++ b/nomad/alloc_endpoint.go @@ -2,6 +2,7 @@ package nomad import ( "fmt" + "net/http" "time" metrics "github.com/armon/go-metrics" @@ -13,6 +14,7 @@ import ( "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" ) @@ -32,111 +34,103 @@ func (a *Alloc) List(args *structs.AllocListRequest, reply *structs.AllocListRes } defer metrics.MeasureSince([]string{"nomad", "alloc", "list"}, time.Now()) - if args.RequestNamespace() == structs.AllNamespacesSentinel { - return a.listAllNamespaces(args, reply) - } + namespace := args.RequestNamespace() + var allow func(string) bool // Check namespace read-job permissions aclObj, err := a.srv.ResolveToken(args.AuthToken) - if err != nil { + + switch { + case err != nil: return err - } else if aclObj != nil && !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityReadJob) { + case aclObj == nil: + allow = func(string) bool { + return true + } + case namespace == structs.AllNamespacesSentinel: + allow = func(ns string) bool { + return aclObj.AllowNsOp(ns, acl.NamespaceCapabilityReadJob) + } + case !aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob): return structs.ErrPermissionDenied + default: + allow = func(string) bool { + return true + } } // Setup the blocking query + sort := state.SortOption(args.Reverse) opts := blockingOptions{ queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, run: func(ws memdb.WatchSet, state *state.StateStore) error { - // Capture all the allocations + // Scan all the allocations var err error var iter memdb.ResultIterator + var opts paginator.StructsTokenizerOptions - prefix := args.QueryOptions.Prefix - if prefix != "" { - iter, err = state.AllocsByIDPrefix(ws, args.RequestNamespace(), prefix) - } else { - iter, err = state.AllocsByNamespace(ws, args.RequestNamespace()) - } - if err != nil { - return err - } - - var allocs []*structs.AllocListStub - for { - raw := iter.Next() - if raw == nil { - break - } - alloc := raw.(*structs.Allocation) - allocs = append(allocs, alloc.Stub(args.Fields)) - } - reply.Allocations = allocs - - // Use the last index that affected the jobs table - index, err := state.Index("allocs") - if err != nil { - return err - } - reply.Index = index - - // Set the query response - a.srv.setQueryMeta(&reply.QueryMeta) - return nil - }} - return a.srv.blockingRPC(&opts) -} - -// listAllNamespaces lists all allocations across all namespaces -func (a *Alloc) listAllNamespaces(args *structs.AllocListRequest, reply *structs.AllocListResponse) error { - // Check for read-job permissions - aclObj, err := a.srv.ResolveToken(args.AuthToken) - if err != nil { - return err - } - prefix := args.QueryOptions.Prefix - allow := func(ns string) bool { - return aclObj.AllowNsOp(ns, acl.NamespaceCapabilityReadJob) - } - - // Setup the blocking query - opts := blockingOptions{ - queryOpts: &args.QueryOptions, - queryMeta: &reply.QueryMeta, - run: func(ws memdb.WatchSet, state *state.StateStore) error { // get list of accessible namespaces - allowedNSes, err := allowedNSes(aclObj, state, allow) + allowableNamespaces, err := allowedNSes(aclObj, state, allow) if err == structs.ErrPermissionDenied { - // return empty allocations if token isn't authorized for any + // return empty allocation if token is not authorized for any // namespace, matching other endpoints - reply.Allocations = []*structs.AllocListStub{} + reply.Allocations = make([]*structs.AllocListStub, 0) } else if err != nil { return err } else { - var iter memdb.ResultIterator - var err error - if prefix != "" { - iter, err = state.AllocsByIDPrefixAllNSs(ws, prefix) + if prefix := args.QueryOptions.Prefix; prefix != "" { + iter, err = state.AllocsByIDPrefix(ws, namespace, prefix) + opts = paginator.StructsTokenizerOptions{ + WithID: true, + } + } else if namespace != structs.AllNamespacesSentinel { + iter, err = state.AllocsByNamespaceOrdered(ws, namespace, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } else { - iter, err = state.Allocs(ws) + iter, err = state.Allocs(ws, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } if err != nil { return err } - var allocs []*structs.AllocListStub - for raw := iter.Next(); raw != nil; raw = iter.Next() { - alloc := raw.(*structs.Allocation) - if allowedNSes != nil && !allowedNSes[alloc.Namespace] { - continue - } - allocs = append(allocs, alloc.Stub(args.Fields)) + tokenizer := paginator.NewStructsTokenizer(iter, opts) + filters := []paginator.Filter{ + paginator.NamespaceFilter{ + AllowableNamespaces: allowableNamespaces, + }, } - reply.Allocations = allocs + + var stubs []*structs.AllocListStub + paginator, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, + func(raw interface{}) error { + allocation := raw.(*structs.Allocation) + stubs = append(stubs, allocation.Stub(args.Fields)) + return nil + }) + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to create result paginator: %v", err) + } + + nextToken, err := paginator.Page() + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to read result page: %v", err) + } + + reply.QueryMeta.NextToken = nextToken + reply.Allocations = stubs } - // Use the last index that affected the jobs table + // Use the last index that affected the allocs table index, err := state.Index("allocs") if err != nil { return err diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index d34abfbbf..9efdc06bb 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -74,6 +74,330 @@ func TestAllocEndpoint_List(t *testing.T) { require.Equal(t, uint64(1000), resp2.Index) require.Len(t, resp2.Allocations, 1) require.Equal(t, alloc.ID, resp2.Allocations[0].ID) + + // Lookup allocations with a filter + get = &structs.AllocListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: structs.DefaultNamespace, + Filter: "TaskGroup == web", + }, + } + + var resp3 structs.AllocListResponse + require.NoError(t, msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp3)) + require.Equal(t, uint64(1000), resp3.Index) + require.Len(t, resp3.Allocations, 1) + require.Equal(t, alloc.ID, resp3.Allocations[0].ID) +} + +func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { + t.Parallel() + s1, _, cleanupS1 := TestACLServer(t, nil) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // create a set of allocs and field values to filter on. these are in the order + // that the state store will return them from the iterator (sorted by create + // index), for ease of writing tests. + mocks := []struct { + ids []string + namespace string + group string + }{ + {ids: []string{"aaaa1111-3350-4b4b-d185-0e1992ed43e9"}}, // 0 + {ids: []string{"aaaaaa22-3350-4b4b-d185-0e1992ed43e9"}}, // 1 + {ids: []string{"aaaaaa33-3350-4b4b-d185-0e1992ed43e9"}, namespace: "non-default"}, // 2 + {ids: []string{"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9"}, group: "bar"}, // 3 + {ids: []string{"aaaaaabb-3350-4b4b-d185-0e1992ed43e9"}, group: "goo"}, // 4 + {ids: []string{"aaaaaacc-3350-4b4b-d185-0e1992ed43e9"}}, // 5 + {ids: []string{"aaaaaadd-3350-4b4b-d185-0e1992ed43e9"}, group: "bar"}, // 6 + {ids: []string{"aaaaaaee-3350-4b4b-d185-0e1992ed43e9"}, group: "goo"}, // 7 + {ids: []string{"aaaaaaff-3350-4b4b-d185-0e1992ed43e9"}, group: "bar"}, // 8 + {ids: []string{"00000111-3350-4b4b-d185-0e1992ed43e9"}}, // 9 + {ids: []string{ // 10 + "00000222-3350-4b4b-d185-0e1992ed43e9", + "00000333-3350-4b4b-d185-0e1992ed43e9", + }}, + {}, // 11, index missing + {ids: []string{"bbbb1111-3350-4b4b-d185-0e1992ed43e9"}}, // 12 + } + + state := s1.fsm.State() + + var allocs []*structs.Allocation + for i, m := range mocks { + allocsInTx := []*structs.Allocation{} + for _, id := range m.ids { + alloc := mock.Alloc() + alloc.ID = id + if m.namespace != "" { + alloc.Namespace = m.namespace + } + if m.group != "" { + alloc.TaskGroup = m.group + } + allocs = append(allocs, alloc) + allocsInTx = append(allocsInTx, alloc) + } + // other fields + index := 1000 + uint64(i) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, allocsInTx)) + } + + require.NoError(t, state.UpsertNamespaces(1099, []*structs.Namespace{ + {Name: "non-default"}, + })) + + aclToken := mock.CreatePolicyAndToken(t, + state, 1100, "test-valid-read", + mock.NamespacePolicy("*", "read", nil), + ).SecretID + + cases := []struct { + name string + namespace string + prefix string + nextToken string + pageSize int32 + filter string + expIDs []string + expNextToken string + expErr string + }{ + { + name: "test01 size-2 page-1 ns-default", + pageSize: 2, + expIDs: []string{ // first two items + "aaaa1111-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", + }, + expNextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // next one in default ns + }, + { + name: "test02 size-2 page-1 ns-default with-prefix", + prefix: "aaaa", + pageSize: 2, + expIDs: []string{ + "aaaa1111-3350-4b4b-d185-0e1992ed43e9", + "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", + }, + expNextToken: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + }, + { + name: "test03 size-2 page-2 ns-default", + pageSize: 2, + nextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expNextToken: "1005.aaaaaacc-3350-4b4b-d185-0e1992ed43e9", + expIDs: []string{ + "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test04 size-2 page-2 ns-default with prefix", + prefix: "aaaa", + pageSize: 2, + nextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + expNextToken: "aaaaaadd-3350-4b4b-d185-0e1992ed43e9", + expIDs: []string{ + "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + "aaaaaacc-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test05 go-bexpr filter", + filter: `TaskGroup == "goo"`, + nextToken: "", + expIDs: []string{ + "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + "aaaaaaee-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test06 go-bexpr filter with pagination", + filter: `TaskGroup == "bar"`, + pageSize: 2, + expNextToken: "1008.aaaaaaff-3350-4b4b-d185-0e1992ed43e9", + expIDs: []string{ + "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + "aaaaaadd-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test07 go-bexpr filter namespace", + namespace: "non-default", + filter: `ID contains "aaa"`, + expIDs: []string{ + "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test08 go-bexpr wrong namespace", + namespace: "default", + filter: `Namespace == "non-default"`, + expIDs: []string(nil), + }, + { + name: "test09 go-bexpr invalid expression", + filter: `NotValid`, + expErr: "failed to read filter expression", + }, + { + name: "test10 go-bexpr invalid field", + filter: `InvalidField == "value"`, + expErr: "error finding value in datum", + }, + { + name: "test11 non-lexicographic order", + pageSize: 1, + nextToken: "1009.00000111-3350-4b4b-d185-0e1992ed43e9", + expNextToken: "1010.00000222-3350-4b4b-d185-0e1992ed43e9", + expIDs: []string{ + "00000111-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test12 same index", + pageSize: 1, + nextToken: "1010.00000222-3350-4b4b-d185-0e1992ed43e9", + expNextToken: "1010.00000333-3350-4b4b-d185-0e1992ed43e9", + expIDs: []string{ + "00000222-3350-4b4b-d185-0e1992ed43e9", + }, + }, + { + name: "test13 missing index", + pageSize: 1, + nextToken: "1011.e9522802-0cd8-4b1d-9c9e-ab3d97938371", + expIDs: []string{ + "bbbb1111-3350-4b4b-d185-0e1992ed43e9", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + var req = &structs.AllocListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: tc.namespace, + Prefix: tc.prefix, + PerPage: tc.pageSize, + NextToken: tc.nextToken, + Filter: tc.filter, + }, + Fields: &structs.AllocStubFields{ + Resources: false, + TaskStates: false, + }, + } + req.AuthToken = aclToken + var resp structs.AllocListResponse + err := msgpackrpc.CallWithCodec(codec, "Alloc.List", req, &resp) + if tc.expErr == "" { + require.NoError(t, err) + } else { + require.Contains(t, err, tc.expErr) + } + + var gotIDs []string + for _, alloc := range resp.Allocations { + gotIDs = append(gotIDs, alloc.ID) + } + require.Equal(t, tc.expIDs, gotIDs) + require.Equal(t, tc.expNextToken, resp.QueryMeta.NextToken) + }) + } +} + +func TestAllocEndpoint_List_order(t *testing.T) { + t.Parallel() + + s1, cleanupS1 := TestServer(t, nil) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // Create register requests + uuid1 := uuid.Generate() + alloc1 := mock.Alloc() + alloc1.ID = uuid1 + + uuid2 := uuid.Generate() + alloc2 := mock.Alloc() + alloc2.ID = uuid2 + + uuid3 := uuid.Generate() + alloc3 := mock.Alloc() + alloc3.ID = uuid3 + + err := s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1}) + require.NoError(t, err) + + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc2}) + require.NoError(t, err) + + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc3}) + require.NoError(t, err) + + // update alloc2 again so we can later assert create index order did not change + err = s1.fsm.State().UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2}) + require.NoError(t, err) + + t.Run("default", func(t *testing.T) { + // Lookup the allocations in the default order (oldest first) + get := &structs.AllocListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: "*", + }, + } + + var resp structs.AllocListResponse + err = msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp) + require.NoError(t, err) + require.Equal(t, uint64(1003), resp.Index) + require.Len(t, resp.Allocations, 3) + + // Assert returned order is by CreateIndex (ascending) + require.Equal(t, uint64(1000), resp.Allocations[0].CreateIndex) + require.Equal(t, uuid1, resp.Allocations[0].ID) + + require.Equal(t, uint64(1001), resp.Allocations[1].CreateIndex) + require.Equal(t, uuid2, resp.Allocations[1].ID) + + require.Equal(t, uint64(1002), resp.Allocations[2].CreateIndex) + require.Equal(t, uuid3, resp.Allocations[2].ID) + }) + + t.Run("reverse", func(t *testing.T) { + // Lookup the allocations in reverse order (newest first) + get := &structs.AllocListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: "*", + Reverse: true, + }, + } + + var resp structs.AllocListResponse + err = msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp) + require.NoError(t, err) + require.Equal(t, uint64(1003), resp.Index) + require.Len(t, resp.Allocations, 3) + + // Assert returned order is by CreateIndex (descending) + require.Equal(t, uint64(1002), resp.Allocations[0].CreateIndex) + require.Equal(t, uuid3, resp.Allocations[0].ID) + + require.Equal(t, uint64(1001), resp.Allocations[1].CreateIndex) + require.Equal(t, uuid2, resp.Allocations[1].ID) + + require.Equal(t, uint64(1000), resp.Allocations[2].CreateIndex) + require.Equal(t, uuid1, resp.Allocations[2].ID) + }) } func TestAllocEndpoint_List_Fields(t *testing.T) { @@ -327,18 +651,23 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { require.NoError(t, state.UpsertNamespaces(900, []*structs.Namespace{ns1, ns2})) // Create the allocations + uuid1 := uuid.Generate() alloc1 := mock.Alloc() - alloc1.ID = "a" + alloc1.ID[1:] + alloc1.ID = uuid1 alloc1.Namespace = ns1.Name + + uuid2 := uuid.Generate() alloc2 := mock.Alloc() - alloc2.ID = "b" + alloc2.ID[1:] + alloc2.ID = uuid2 alloc2.Namespace = ns2.Name + summary1 := mock.JobSummary(alloc1.JobID) summary2 := mock.JobSummary(alloc2.JobID) - require.NoError(t, state.UpsertJobSummary(999, summary1)) - require.NoError(t, state.UpsertJobSummary(999, summary2)) - require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + require.NoError(t, state.UpsertJobSummary(1000, summary1)) + require.NoError(t, state.UpsertJobSummary(1001, summary2)) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc1})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2})) t.Run("looking up all allocations", func(t *testing.T) { get := &structs.AllocListRequest{ @@ -349,7 +678,7 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { } var resp structs.AllocListResponse require.NoError(t, msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp)) - require.Equal(t, uint64(1000), resp.Index) + require.Equal(t, uint64(1003), resp.Index) require.Len(t, resp.Allocations, 2) require.ElementsMatch(t, []string{resp.Allocations[0].ID, resp.Allocations[1].ID}, @@ -367,26 +696,23 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { } var resp structs.AllocListResponse require.NoError(t, msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp)) - require.Equal(t, uint64(1000), resp.Index) + require.Equal(t, uint64(1003), resp.Index) require.Len(t, resp.Allocations, 1) require.Equal(t, alloc1.ID, resp.Allocations[0].ID) require.Equal(t, alloc1.Namespace, resp.Allocations[0].Namespace) }) t.Run("looking up allocations with mismatch prefix", func(t *testing.T) { - // allocations were constructed above to have prefix starting with "a" or "b" - badPrefix := "cc" - get := &structs.AllocListRequest{ QueryOptions: structs.QueryOptions{ Region: "global", Namespace: "*", - Prefix: badPrefix, + Prefix: "000000", // unlikely to match }, } var resp structs.AllocListResponse require.NoError(t, msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp)) - require.Equal(t, uint64(1000), resp.Index) + require.Equal(t, uint64(1003), resp.Index) require.Empty(t, resp.Allocations) }) } diff --git a/nomad/core_sched.go b/nomad/core_sched.go index e4dbaf82a..1819847c7 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -545,7 +545,7 @@ func (c *CoreScheduler) nodeReap(eval *structs.Evaluation, nodeIDs []string) err func (c *CoreScheduler) deploymentGC(eval *structs.Evaluation) error { // Iterate over the deployments ws := memdb.NewWatchSet() - iter, err := c.snap.Deployments(ws, false) + iter, err := c.snap.Deployments(ws, state.SortDefault) if err != nil { return err } diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 139ed4b72..e46351bbf 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -3,6 +3,7 @@ package nomad import ( "errors" "fmt" + "net/http" "time" metrics "github.com/armon/go-metrics" @@ -12,6 +13,7 @@ import ( "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" ) @@ -136,40 +138,65 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV } else { iter, err = snap.CSIVolumes(ws) } - if err != nil { return err } + tokenizer := paginator.NewStructsTokenizer( + iter, + paginator.StructsTokenizerOptions{ + WithNamespace: true, + WithID: true, + }, + ) + volFilter := paginator.GenericFilter{ + Allow: func(raw interface{}) (bool, error) { + vol := raw.(*structs.CSIVolume) + + // Remove (possibly again) by PluginID to handle passing both + // NodeID and PluginID + if args.PluginID != "" && args.PluginID != vol.PluginID { + return false, nil + } + + // Remove by Namespace, since CSIVolumesByNodeID hasn't used + // the Namespace yet + if ns != structs.AllNamespacesSentinel && vol.Namespace != ns { + return false, nil + } + + return true, nil + }, + } + filters := []paginator.Filter{volFilter} + // Collect results, filter by ACL access vs := []*structs.CSIVolListStub{} - for { - raw := iter.Next() - if raw == nil { - break - } - vol := raw.(*structs.CSIVolume) + paginator, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, + func(raw interface{}) error { + vol := raw.(*structs.CSIVolume) - // Remove (possibly again) by PluginID to handle passing both - // NodeID and PluginID - if args.PluginID != "" && args.PluginID != vol.PluginID { - continue - } + vol, err := snap.CSIVolumeDenormalizePlugins(ws, vol.Copy()) + if err != nil { + return err + } - // Remove by Namespace, since CSIVolumesByNodeID hasn't used - // the Namespace yet - if ns != structs.AllNamespacesSentinel && vol.Namespace != ns { - continue - } - - vol, err := snap.CSIVolumeDenormalizePlugins(ws, vol.Copy()) - if err != nil { - return err - } - - vs = append(vs, vol.Stub()) + vs = append(vs, vol.Stub()) + return nil + }) + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to create result paginator: %v", err) } + + nextToken, err := paginator.Page() + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to read result page: %v", err) + } + + reply.QueryMeta.NextToken = nextToken reply.Volumes = vs return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) }} diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 528020bc9..a7129e8f4 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -753,6 +753,200 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { require.Equal(t, structs.DefaultNamespace, resp2.Volumes[0].Namespace) } +func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { + t.Parallel() + s1, cleanupS1 := TestServer(t, nil) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + nonDefaultNS := "non-default" + + // create a set of volumes. these are in the order that the state store + // will return them from the iterator (sorted by create index), for ease of + // writing tests + mocks := []struct { + id string + namespace string + }{ + {id: "vol-01"}, // 0 + {id: "vol-02"}, // 1 + {id: "vol-03", namespace: nonDefaultNS}, // 2 + {id: "vol-04"}, // 3 + {id: "vol-05"}, // 4 + {id: "vol-06"}, // 5 + {id: "vol-07"}, // 6 + {id: "vol-08"}, // 7 + {}, // 9, missing volume + {id: "vol-10"}, // 10 + } + + state := s1.fsm.State() + plugin := mock.CSIPlugin() + + // Create namespaces. + err := state.UpsertNamespaces(999, []*structs.Namespace{{Name: nonDefaultNS}}) + require.NoError(t, err) + + for i, m := range mocks { + if m.id == "" { + continue + } + + volume := mock.CSIVolume(plugin) + volume.ID = m.id + if m.namespace != "" { // defaults to "default" + volume.Namespace = m.namespace + } + index := 1000 + uint64(i) + require.NoError(t, state.CSIVolumeRegister(index, []*structs.CSIVolume{volume})) + } + + cases := []struct { + name string + namespace string + prefix string + filter string + nextToken string + pageSize int32 + expectedNextToken string + expectedIDs []string + expectedError string + }{ + { + name: "test01 size-2 page-1 default NS", + pageSize: 2, + expectedNextToken: "default.vol-04", + expectedIDs: []string{ + "vol-01", + "vol-02", + }, + }, + { + name: "test02 size-2 page-1 default NS with prefix", + prefix: "vol", + pageSize: 2, + expectedNextToken: "default.vol-04", + expectedIDs: []string{ + "vol-01", + "vol-02", + }, + }, + { + name: "test03 size-2 page-2 default NS", + pageSize: 2, + nextToken: "default.vol-04", + expectedNextToken: "default.vol-06", + expectedIDs: []string{ + "vol-04", + "vol-05", + }, + }, + { + name: "test04 size-2 page-2 default NS with prefix", + prefix: "vol", + pageSize: 2, + nextToken: "default.vol-04", + expectedNextToken: "default.vol-06", + expectedIDs: []string{ + "vol-04", + "vol-05", + }, + }, + { + name: "test05 no valid results with filters and prefix", + prefix: "cccc", + pageSize: 2, + nextToken: "", + expectedIDs: []string{}, + }, + { + name: "test06 go-bexpr filter", + namespace: "*", + filter: `ID matches "^vol-0[123]"`, + expectedIDs: []string{ + "vol-01", + "vol-02", + "vol-03", + }, + }, + { + name: "test07 go-bexpr filter with pagination", + namespace: "*", + filter: `ID matches "^vol-0[123]"`, + pageSize: 2, + expectedNextToken: "non-default.vol-03", + expectedIDs: []string{ + "vol-01", + "vol-02", + }, + }, + { + name: "test08 go-bexpr filter in namespace", + namespace: "non-default", + filter: `Provider == "com.hashicorp:mock"`, + expectedIDs: []string{ + "vol-03", + }, + }, + { + name: "test09 go-bexpr wrong namespace", + namespace: "default", + filter: `Namespace == "non-default"`, + expectedIDs: []string{}, + }, + { + name: "test10 go-bexpr invalid expression", + filter: `NotValid`, + expectedError: "failed to read filter expression", + }, + { + name: "test11 go-bexpr invalid field", + filter: `InvalidField == "value"`, + expectedError: "error finding value in datum", + }, + { + name: "test14 missing volume", + pageSize: 1, + nextToken: "default.vol-09", + expectedIDs: []string{ + "vol-10", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + req := &structs.CSIVolumeListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: tc.namespace, + Prefix: tc.prefix, + Filter: tc.filter, + PerPage: tc.pageSize, + NextToken: tc.nextToken, + }, + } + var resp structs.CSIVolumeListResponse + err := msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + if tc.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedError) + return + } + + gotIDs := []string{} + for _, deployment := range resp.Volumes { + gotIDs = append(gotIDs, deployment.ID) + } + require.Equal(t, tc.expectedIDs, gotIDs, "unexpected page of volumes") + require.Equal(t, tc.expectedNextToken, resp.QueryMeta.NextToken, "unexpected NextToken") + }) + } +} + func TestCSIVolumeEndpoint_Create(t *testing.T) { t.Parallel() var err error diff --git a/nomad/deployment_endpoint.go b/nomad/deployment_endpoint.go index 70f685d4a..d0217f424 100644 --- a/nomad/deployment_endpoint.go +++ b/nomad/deployment_endpoint.go @@ -10,33 +10,10 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" ) -// DeploymentPaginationIterator is a wrapper over a go-memdb iterator that -// implements the paginator Iterator interface. -type DeploymentPaginationIterator struct { - iter memdb.ResultIterator - byCreateIndex bool -} - -func (it DeploymentPaginationIterator) Next() (string, interface{}) { - raw := it.iter.Next() - if raw == nil { - return "", nil - } - - d := raw.(*structs.Deployment) - token := d.ID - - // prefix the pagination token by CreateIndex to keep it properly sorted. - if it.byCreateIndex { - token = fmt.Sprintf("%v-%v", d.CreateIndex, d.ID) - } - - return token, d -} - // Deployment endpoint is used for manipulating deployments type Deployment struct { srv *Server @@ -426,6 +403,7 @@ func (d *Deployment) List(args *structs.DeploymentListRequest, reply *structs.De } // Setup the blocking query + sort := state.SortOption(args.Reverse) opts := blockingOptions{ queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, @@ -433,26 +411,34 @@ func (d *Deployment) List(args *structs.DeploymentListRequest, reply *structs.De // Capture all the deployments var err error var iter memdb.ResultIterator - var deploymentIter DeploymentPaginationIterator + var opts paginator.StructsTokenizerOptions if prefix := args.QueryOptions.Prefix; prefix != "" { iter, err = store.DeploymentsByIDPrefix(ws, namespace, prefix) - deploymentIter.byCreateIndex = false + opts = paginator.StructsTokenizerOptions{ + WithID: true, + } } else if namespace != structs.AllNamespacesSentinel { - iter, err = store.DeploymentsByNamespaceOrdered(ws, namespace, args.Ascending) - deploymentIter.byCreateIndex = true + iter, err = store.DeploymentsByNamespaceOrdered(ws, namespace, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } else { - iter, err = store.Deployments(ws, args.Ascending) - deploymentIter.byCreateIndex = true + iter, err = store.Deployments(ws, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } if err != nil { return err } - deploymentIter.iter = iter + tokenizer := paginator.NewStructsTokenizer(iter, opts) var deploys []*structs.Deployment - paginator, err := state.NewPaginator(deploymentIter, args.QueryOptions, + paginator, err := paginator.NewPaginator(iter, tokenizer, nil, args.QueryOptions, func(raw interface{}) error { deploy := raw.(*structs.Deployment) deploys = append(deploys, deploy) diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index e91bc28a8..08982aeda 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -1066,13 +1066,12 @@ func TestDeploymentEndpoint_List_order(t *testing.T) { err = s1.fsm.State().UpsertDeployment(1003, dep2) require.NoError(t, err) - t.Run("ascending", func(t *testing.T) { + t.Run("default", func(t *testing.T) { // Lookup the deployments in chronological order (oldest first) get := &structs.DeploymentListRequest{ QueryOptions: structs.QueryOptions{ Region: "global", Namespace: "*", - Ascending: true, }, } @@ -1093,13 +1092,13 @@ func TestDeploymentEndpoint_List_order(t *testing.T) { require.Equal(t, uuid3, resp.Deployments[2].ID) }) - t.Run("descending", func(t *testing.T) { + t.Run("reverse", func(t *testing.T) { // Lookup the deployments in reverse chronological order (newest first) get := &structs.DeploymentListRequest{ QueryOptions: structs.QueryOptions{ Region: "global", Namespace: "*", - Ascending: false, + Reverse: true, }, } @@ -1312,7 +1311,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { { name: "test01 size-2 page-1 default NS", pageSize: 2, - expectedNextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1331,8 +1330,8 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { { name: "test03 size-2 page-2 default NS", pageSize: 2, - nextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1005-aaaaaacc-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1005.aaaaaacc-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", @@ -1353,8 +1352,8 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { name: "test05 size-2 page-2 all namespaces", namespace: "*", pageSize: 2, - nextToken: "1002-aaaaaa33-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1004-aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1002.aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1004.aaaaaabb-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", @@ -1382,7 +1381,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { namespace: "*", filter: `ID matches "^a+[123]"`, pageSize: 2, - expectedNextToken: "1002-aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1002.aaaaaa33-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1415,8 +1414,8 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { { name: "test13 non-lexicographic order", pageSize: 1, - nextToken: "1007-00000111-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1009-bbbb1111-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1007.00000111-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1009.bbbb1111-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "00000111-3350-4b4b-d185-0e1992ed43e9", }, @@ -1424,7 +1423,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { { name: "test14 missing index", pageSize: 1, - nextToken: "1008-e9522802-0cd8-4b1d-9c9e-ab3d97938371", + nextToken: "1008.e9522802-0cd8-4b1d-9c9e-ab3d97938371", expectedIDs: []string{ "bbbb1111-3350-4b4b-d185-0e1992ed43e9", }, @@ -1441,7 +1440,6 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { Filter: tc.filter, PerPage: tc.pageSize, NextToken: tc.nextToken, - Ascending: true, // counting up is easier to think about }, } req.AuthToken = aclToken diff --git a/nomad/deploymentwatcher/deployments_watcher.go b/nomad/deploymentwatcher/deployments_watcher.go index 8743f2b12..56617430f 100644 --- a/nomad/deploymentwatcher/deployments_watcher.go +++ b/nomad/deploymentwatcher/deployments_watcher.go @@ -202,9 +202,9 @@ func (w *Watcher) getDeploys(ctx context.Context, minIndex uint64) ([]*structs.D } // getDeploysImpl retrieves all deployments from the passed state store. -func (w *Watcher) getDeploysImpl(ws memdb.WatchSet, state *state.StateStore) (interface{}, uint64, error) { +func (w *Watcher) getDeploysImpl(ws memdb.WatchSet, store *state.StateStore) (interface{}, uint64, error) { - iter, err := state.Deployments(ws, false) + iter, err := store.Deployments(ws, state.SortDefault) if err != nil { return nil, 0, err } @@ -220,7 +220,7 @@ func (w *Watcher) getDeploysImpl(ws memdb.WatchSet, state *state.StateStore) (in } // Use the last index that affected the deployment table - index, err := state.Index("deployment") + index, err := store.Index("deployment") if err != nil { return nil, 0, err } diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index 739e2e14d..cb8aca2ea 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -931,9 +931,9 @@ func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { } // Wait for the two allocations to be placed - state := s1.State() + store := s1.State() testutil.WaitForResult(func() (bool, error) { - iter, err := state.Allocs(nil) + iter, err := store.Allocs(nil, state.SortDefault) if err != nil { return false, err } @@ -974,11 +974,11 @@ func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { errCh := make(chan error, 2) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - go allocPromoter(errCh, ctx, state, codec, n1.ID, s1.logger) - go allocPromoter(errCh, ctx, state, codec, n2.ID, s1.logger) + go allocPromoter(errCh, ctx, store, codec, n1.ID, s1.logger) + go allocPromoter(errCh, ctx, store, codec, n2.ID, s1.logger) testutil.WaitForResult(func() (bool, error) { - allocs, err := state.AllocsByNode(nil, n2.ID) + allocs, err := store.AllocsByNode(nil, n2.ID) if err != nil { return false, err } @@ -992,7 +992,7 @@ func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { if err := checkAllocPromoter(errCh); err != nil { return false, err } - node, err := state.NodeByID(nil, n1.ID) + node, err := store.NodeByID(nil, n1.ID) if err != nil { return false, err } @@ -1002,7 +1002,7 @@ func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { }) // Check we got the right events - node, err := state.NodeByID(nil, n1.ID) + node, err := store.NodeByID(nil, n1.ID) require.NoError(err) // sometimes test gets a duplicate node drain complete event require.GreaterOrEqualf(len(node.Events), 3, "unexpected number of events: %v", node.Events) diff --git a/nomad/eval_endpoint.go b/nomad/eval_endpoint.go index 0b6b26f59..daec216d2 100644 --- a/nomad/eval_endpoint.go +++ b/nomad/eval_endpoint.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/scheduler" ) @@ -21,30 +22,6 @@ const ( DefaultDequeueTimeout = time.Second ) -// EvalPaginationIterator is a wrapper over a go-memdb iterator that implements -// the paginator Iterator interface. -type EvalPaginationIterator struct { - iter memdb.ResultIterator - byCreateIndex bool -} - -func (it EvalPaginationIterator) Next() (string, interface{}) { - raw := it.iter.Next() - if raw == nil { - return "", nil - } - - eval := raw.(*structs.Evaluation) - token := eval.ID - - // prefix the pagination token by CreateIndex to keep it properly sorted. - if it.byCreateIndex { - token = fmt.Sprintf("%v-%v", eval.CreateIndex, eval.ID) - } - - return token, eval -} - // Eval endpoint is used for eval interactions type Eval struct { srv *Server @@ -431,6 +408,7 @@ func (e *Eval) List(args *structs.EvalListRequest, reply *structs.EvalListRespon } // Setup the blocking query + sort := state.SortOption(args.Reverse) opts := blockingOptions{ queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, @@ -438,17 +416,25 @@ func (e *Eval) List(args *structs.EvalListRequest, reply *structs.EvalListRespon // Scan all the evaluations var err error var iter memdb.ResultIterator - var evalIter EvalPaginationIterator + var opts paginator.StructsTokenizerOptions if prefix := args.QueryOptions.Prefix; prefix != "" { iter, err = store.EvalsByIDPrefix(ws, namespace, prefix) - evalIter.byCreateIndex = false + opts = paginator.StructsTokenizerOptions{ + WithID: true, + } } else if namespace != structs.AllNamespacesSentinel { - iter, err = store.EvalsByNamespaceOrdered(ws, namespace, args.Ascending) - evalIter.byCreateIndex = true + iter, err = store.EvalsByNamespaceOrdered(ws, namespace, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } else { - iter, err = store.Evals(ws, args.Ascending) - evalIter.byCreateIndex = true + iter, err = store.Evals(ws, sort) + opts = paginator.StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + } } if err != nil { return err @@ -460,10 +446,11 @@ func (e *Eval) List(args *structs.EvalListRequest, reply *structs.EvalListRespon } return false }) - evalIter.iter = iter + + tokenizer := paginator.NewStructsTokenizer(iter, opts) var evals []*structs.Evaluation - paginator, err := state.NewPaginator(evalIter, args.QueryOptions, + paginator, err := paginator.NewPaginator(iter, tokenizer, nil, args.QueryOptions, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 92463394e..6a2bf4575 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -751,40 +751,12 @@ func TestEvalEndpoint_List_order(t *testing.T) { err = s1.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2}) require.NoError(t, err) - t.Run("descending", func(t *testing.T) { - // Lookup the evaluations in reverse chronological order + t.Run("default", func(t *testing.T) { + // Lookup the evaluations in the default order (oldest first) get := &structs.EvalListRequest{ QueryOptions: structs.QueryOptions{ Region: "global", Namespace: "*", - Ascending: false, - }, - } - - var resp structs.EvalListResponse - err = msgpackrpc.CallWithCodec(codec, "Eval.List", get, &resp) - require.NoError(t, err) - require.Equal(t, uint64(1003), resp.Index) - require.Len(t, resp.Evaluations, 3) - - // Assert returned order is by CreateIndex (descending) - require.Equal(t, uint64(1002), resp.Evaluations[0].CreateIndex) - require.Equal(t, uuid3, resp.Evaluations[0].ID) - - require.Equal(t, uint64(1001), resp.Evaluations[1].CreateIndex) - require.Equal(t, uuid2, resp.Evaluations[1].ID) - - require.Equal(t, uint64(1000), resp.Evaluations[2].CreateIndex) - require.Equal(t, uuid1, resp.Evaluations[2].ID) - }) - - t.Run("ascending", func(t *testing.T) { - // Lookup the evaluations in reverse chronological order (newest first) - get := &structs.EvalListRequest{ - QueryOptions: structs.QueryOptions{ - Region: "global", - Namespace: "*", - Ascending: true, }, } @@ -805,13 +777,13 @@ func TestEvalEndpoint_List_order(t *testing.T) { require.Equal(t, uuid3, resp.Evaluations[2].ID) }) - t.Run("descending", func(t *testing.T) { - // Lookup the evaluations in chronological order (oldest first) + t.Run("reverse", func(t *testing.T) { + // Lookup the evaluations in reverse order (newest first) get := &structs.EvalListRequest{ QueryOptions: structs.QueryOptions{ Region: "global", Namespace: "*", - Ascending: false, + Reverse: true, }, } @@ -831,7 +803,6 @@ func TestEvalEndpoint_List_order(t *testing.T) { require.Equal(t, uint64(1000), resp.Evaluations[2].CreateIndex) require.Equal(t, uuid1, resp.Evaluations[2].ID) }) - } func TestEvalEndpoint_ListAllNamespaces(t *testing.T) { @@ -1084,7 +1055,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", }, - expectedNextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // next one in default namespace + expectedNextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", // next one in default namespace }, { name: "test02 size-2 page-1 default NS with prefix", @@ -1099,8 +1070,8 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { { name: "test03 size-2 page-2 default NS", pageSize: 2, - nextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1005-aaaaaacc-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1005.aaaaaacc-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", "aaaaaabb-3350-4b4b-d185-0e1992ed43e9", @@ -1123,7 +1094,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { filterJobID: "example", filterStatus: "pending", // aaaaaaaa, bb, and cc are filtered by status - expectedNextToken: "1006-aaaaaadd-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1006.aaaaaadd-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1159,7 +1130,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { pageSize: 3, // reads off the end filterJobID: "example", filterStatus: "pending", - nextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", expectedNextToken: "", expectedIDs: []string{ "aaaaaadd-3350-4b4b-d185-0e1992ed43e9", @@ -1183,8 +1154,8 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { name: "test10 size-2 page-2 all namespaces", namespace: "*", pageSize: 2, - nextToken: "1002-aaaaaa33-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1004-aaaaaabb-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1002.aaaaaa33-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1004.aaaaaabb-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaaaa33-3350-4b4b-d185-0e1992ed43e9", "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", @@ -1228,7 +1199,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { name: "test16 go-bexpr filter with pagination", filter: `JobID == "example"`, pageSize: 2, - expectedNextToken: "1003-aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1003.aaaaaaaa-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "aaaa1111-3350-4b4b-d185-0e1992ed43e9", "aaaaaa22-3350-4b4b-d185-0e1992ed43e9", @@ -1267,8 +1238,8 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { { name: "test22 non-lexicographic order", pageSize: 1, - nextToken: "1009-00000111-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1010-00000222-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1009.00000111-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1010.00000222-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "00000111-3350-4b4b-d185-0e1992ed43e9", }, @@ -1276,8 +1247,8 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { { name: "test23 same index", pageSize: 1, - nextToken: "1010-00000222-3350-4b4b-d185-0e1992ed43e9", - expectedNextToken: "1010-00000333-3350-4b4b-d185-0e1992ed43e9", + nextToken: "1010.00000222-3350-4b4b-d185-0e1992ed43e9", + expectedNextToken: "1010.00000333-3350-4b4b-d185-0e1992ed43e9", expectedIDs: []string{ "00000222-3350-4b4b-d185-0e1992ed43e9", }, @@ -1285,7 +1256,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { { name: "test24 missing index", pageSize: 1, - nextToken: "1011-e9522802-0cd8-4b1d-9c9e-ab3d97938371", + nextToken: "1011.e9522802-0cd8-4b1d-9c9e-ab3d97938371", expectedIDs: []string{ "bbbb1111-3350-4b4b-d185-0e1992ed43e9", }, @@ -1304,7 +1275,6 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { PerPage: tc.pageSize, NextToken: tc.nextToken, Filter: tc.filter, - Ascending: true, // counting up is easier to think about }, } req.AuthToken = aclToken diff --git a/nomad/fsm.go b/nomad/fsm.go index c288fc09b..a25e46885 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1704,16 +1704,16 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error { // failLeakedDeployments is used to fail deployments that do not have a job. // This state is a broken invariant that should not occur since 0.8.X. -func (n *nomadFSM) failLeakedDeployments(state *state.StateStore) error { +func (n *nomadFSM) failLeakedDeployments(store *state.StateStore) error { // Scan for deployments that are referencing a job that no longer exists. // This could happen if multiple deployments were created for a given job // and thus the older deployment leaks and then the job is removed. - iter, err := state.Deployments(nil, false) + iter, err := store.Deployments(nil, state.SortDefault) if err != nil { return fmt.Errorf("failed to query deployments: %v", err) } - dindex, err := state.Index("deployment") + dindex, err := store.Index("deployment") if err != nil { return fmt.Errorf("couldn't fetch index of deployments table: %v", err) } @@ -1733,7 +1733,7 @@ func (n *nomadFSM) failLeakedDeployments(state *state.StateStore) error { } // Find the job - job, err := state.JobByID(nil, d.Namespace, d.JobID) + job, err := store.JobByID(nil, d.Namespace, d.JobID) if err != nil { return fmt.Errorf("failed to lookup job %s from deployment %q: %v", d.JobID, d.ID, err) } @@ -1747,7 +1747,7 @@ func (n *nomadFSM) failLeakedDeployments(state *state.StateStore) error { failed := d.Copy() failed.Status = structs.DeploymentStatusCancelled failed.StatusDescription = structs.DeploymentStatusDescriptionStoppedJob - if err := state.UpsertDeployment(dindex, failed); err != nil { + if err := store.UpsertDeployment(dindex, failed); err != nil { return fmt.Errorf("failed to mark leaked deployment %q as failed: %v", failed.ID, err) } } @@ -2099,7 +2099,7 @@ func (s *nomadSnapshot) persistAllocs(sink raft.SnapshotSink, encoder *codec.Encoder) error { // Get all the allocations ws := memdb.NewWatchSet() - allocs, err := s.snap.Allocs(ws) + allocs, err := s.snap.Allocs(ws, state.SortDefault) if err != nil { return err } @@ -2250,7 +2250,7 @@ func (s *nomadSnapshot) persistDeployments(sink raft.SnapshotSink, encoder *codec.Encoder) error { // Get all the jobs ws := memdb.NewWatchSet() - deployments, err := s.snap.Deployments(ws, false) + deployments, err := s.snap.Deployments(ws, state.SortDefault) if err != nil { return err } @@ -2306,7 +2306,7 @@ func (s *nomadSnapshot) persistACLTokens(sink raft.SnapshotSink, encoder *codec.Encoder) error { // Get all the policies ws := memdb.NewWatchSet() - tokens, err := s.snap.ACLTokens(ws) + tokens, err := s.snap.ACLTokens(ws, state.SortDefault) if err != nil { return err } diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 418cbe1af..d6ab29058 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -3,6 +3,7 @@ package nomad import ( "context" "fmt" + "net/http" "sort" "strings" "time" @@ -20,6 +21,7 @@ import ( "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/scheduler" ) @@ -1339,15 +1341,29 @@ func (j *Job) List(args *structs.JobListRequest, reply *structs.JobListResponse) } defer metrics.MeasureSince([]string{"nomad", "job", "list"}, time.Now()) - if args.RequestNamespace() == structs.AllNamespacesSentinel { - return j.listAllNamespaces(args, reply) - } + namespace := args.RequestNamespace() + var allow func(string) bool // Check for list-job permissions - if aclObj, err := j.srv.ResolveToken(args.AuthToken); err != nil { + aclObj, err := j.srv.ResolveToken(args.AuthToken) + + switch { + case err != nil: return err - } else if aclObj != nil && !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityListJobs) { + case aclObj == nil: + allow = func(string) bool { + return true + } + case namespace == structs.AllNamespacesSentinel: + allow = func(ns string) bool { + return aclObj.AllowNsOp(ns, acl.NamespaceCapabilityListJobs) + } + case !aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityListJobs): return structs.ErrPermissionDenied + default: + allow = func(string) bool { + return true + } } // Setup the blocking query @@ -1358,106 +1374,65 @@ func (j *Job) List(args *structs.JobListRequest, reply *structs.JobListResponse) // Capture all the jobs var err error var iter memdb.ResultIterator - if prefix := args.QueryOptions.Prefix; prefix != "" { - iter, err = state.JobsByIDPrefix(ws, args.RequestNamespace(), prefix) - } else { - iter, err = state.JobsByNamespace(ws, args.RequestNamespace()) - } - if err != nil { - return err - } - var jobs []*structs.JobListStub - for { - raw := iter.Next() - if raw == nil { - break - } - job := raw.(*structs.Job) - summary, err := state.JobSummaryByID(ws, args.RequestNamespace(), job.ID) - if err != nil { - return fmt.Errorf("unable to look up summary for job: %v", job.ID) - } - jobs = append(jobs, job.Stub(summary)) - } - reply.Jobs = jobs - - // Use the last index that affected the jobs table or summary - jindex, err := state.Index("jobs") - if err != nil { - return err - } - sindex, err := state.Index("job_summary") - if err != nil { - return err - } - reply.Index = helper.Uint64Max(jindex, sindex) - - // Set the query response - j.srv.setQueryMeta(&reply.QueryMeta) - return nil - }} - return j.srv.blockingRPC(&opts) -} - -// listAllNamespaces lists all jobs across all namespaces -func (j *Job) listAllNamespaces(args *structs.JobListRequest, reply *structs.JobListResponse) error { - // Check for list-job permissions - aclObj, err := j.srv.ResolveToken(args.AuthToken) - if err != nil { - return err - } - prefix := args.QueryOptions.Prefix - allow := func(ns string) bool { - return aclObj.AllowNsOp(ns, acl.NamespaceCapabilityListJobs) - } - - // Setup the blocking query - opts := blockingOptions{ - queryOpts: &args.QueryOptions, - queryMeta: &reply.QueryMeta, - run: func(ws memdb.WatchSet, state *state.StateStore) error { // check if user has permission to all namespaces - allowedNSes, err := allowedNSes(aclObj, state, allow) + allowableNamespaces, err := allowedNSes(aclObj, state, allow) if err == structs.ErrPermissionDenied { // return empty jobs if token isn't authorized for any // namespace, matching other endpoints - reply.Jobs = []*structs.JobListStub{} - return nil + reply.Jobs = make([]*structs.JobListStub, 0) } else if err != nil { return err - } - - // Capture all the jobs - iter, err := state.Jobs(ws) - - if err != nil { - return err - } - - var jobs []*structs.JobListStub - for { - raw := iter.Next() - if raw == nil { - break + } else { + if prefix := args.QueryOptions.Prefix; prefix != "" { + iter, err = state.JobsByIDPrefix(ws, namespace, prefix) + } else if namespace != structs.AllNamespacesSentinel { + iter, err = state.JobsByNamespace(ws, namespace) + } else { + iter, err = state.Jobs(ws) } - job := raw.(*structs.Job) - if allowedNSes != nil && !allowedNSes[job.Namespace] { - // not permitted to this name namespace - continue - } - if prefix != "" && !strings.HasPrefix(job.ID, prefix) { - continue - } - summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) if err != nil { - return fmt.Errorf("unable to look up summary for job: %v", job.ID) + return err } - stub := job.Stub(summary) - jobs = append(jobs, stub) + tokenizer := paginator.NewStructsTokenizer( + iter, + paginator.StructsTokenizerOptions{ + WithNamespace: true, + WithID: true, + }, + ) + filters := []paginator.Filter{ + paginator.NamespaceFilter{ + AllowableNamespaces: allowableNamespaces, + }, + } + + var jobs []*structs.JobListStub + paginator, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, + func(raw interface{}) error { + job := raw.(*structs.Job) + summary, err := state.JobSummaryByID(ws, namespace, job.ID) + if err != nil { + return fmt.Errorf("unable to look up summary for job: %v", job.ID) + } + jobs = append(jobs, job.Stub(summary)) + return nil + }) + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to create result paginator: %v", err) + } + + nextToken, err := paginator.Page() + if err != nil { + return structs.NewErrRPCCodedf( + http.StatusBadRequest, "failed to read result page: %v", err) + } + + reply.QueryMeta.NextToken = nextToken + reply.Jobs = jobs } - reply.Jobs = jobs // Use the last index that affected the jobs table or summary jindex, err := state.Index("jobs") @@ -1475,7 +1450,6 @@ func (j *Job) listAllNamespaces(args *structs.JobListRequest, reply *structs.Job return nil }} return j.srv.blockingRPC(&opts) - } // Allocations is used to list the allocations for a job diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 3850290fa..8cb936399 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -5151,6 +5151,184 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) { } } +func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) { + t.Parallel() + s1, _, cleanupS1 := TestACLServer(t, nil) + defer cleanupS1() + codec := rpcClient(t, s1) + testutil.WaitForLeader(t, s1.RPC) + + // create a set of jobs. these are in the order that the state store will + // return them from the iterator (sorted by key) for ease of writing tests + mocks := []struct { + name string + namespace string + status string + }{ + {name: "job-01"}, // 0 + {name: "job-02"}, // 1 + {name: "job-03", namespace: "non-default"}, // 2 + {name: "job-04"}, // 3 + {name: "job-05", status: structs.JobStatusRunning}, // 4 + {name: "job-06", status: structs.JobStatusRunning}, // 5 + {}, // 6, missing job + {name: "job-08"}, // 7 + {name: "job-03", namespace: "other"}, // 8, same name but in another namespace + } + + state := s1.fsm.State() + require.NoError(t, state.UpsertNamespaces(999, []*structs.Namespace{{Name: "non-default"}, {Name: "other"}})) + + for i, m := range mocks { + if m.name == "" { + continue + } + + index := 1000 + uint64(i) + job := mock.Job() + job.ID = m.name + job.Name = m.name + job.Status = m.status + if m.namespace != "" { // defaults to "default" + job.Namespace = m.namespace + } + job.CreateIndex = index + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, index, job)) + } + + aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read", + mock.NamespacePolicy("*", "read", nil)). + SecretID + + cases := []struct { + name string + namespace string + prefix string + filter string + nextToken string + pageSize int32 + expectedNextToken string + expectedIDs []string + expectedError string + }{ + { + name: "test01 size-2 page-1 default NS", + pageSize: 2, + expectedNextToken: "default.job-04", + expectedIDs: []string{"job-01", "job-02"}, + }, + { + name: "test02 size-2 page-1 default NS with prefix", + prefix: "job", + pageSize: 2, + expectedNextToken: "default.job-04", + expectedIDs: []string{"job-01", "job-02"}, + }, + { + name: "test03 size-2 page-2 default NS", + pageSize: 2, + nextToken: "default.job-04", + expectedNextToken: "default.job-06", + expectedIDs: []string{"job-04", "job-05"}, + }, + { + name: "test04 size-2 page-2 default NS with prefix", + prefix: "job", + pageSize: 2, + nextToken: "default.job-04", + expectedNextToken: "default.job-06", + expectedIDs: []string{"job-04", "job-05"}, + }, + { + name: "test05 no valid results with filters and prefix", + prefix: "not-job", + pageSize: 2, + nextToken: "", + expectedIDs: []string{}, + }, + { + name: "test06 go-bexpr filter", + namespace: "*", + filter: `Name matches "job-0[123]"`, + expectedIDs: []string{"job-01", "job-02", "job-03", "job-03"}, + }, + { + name: "test07 go-bexpr filter with pagination", + namespace: "*", + filter: `Name matches "job-0[123]"`, + pageSize: 2, + expectedNextToken: "non-default.job-03", + expectedIDs: []string{"job-01", "job-02"}, + }, + { + name: "test08 go-bexpr filter in namespace", + namespace: "non-default", + filter: `Status == "pending"`, + expectedIDs: []string{"job-03"}, + }, + { + name: "test09 go-bexpr invalid expression", + filter: `NotValid`, + expectedError: "failed to read filter expression", + }, + { + name: "test10 go-bexpr invalid field", + filter: `InvalidField == "value"`, + expectedError: "error finding value in datum", + }, + { + name: "test11 missing index", + pageSize: 1, + nextToken: "default.job-07", + expectedIDs: []string{ + "job-08", + }, + }, + { + name: "test12 same name but different NS", + namespace: "*", + pageSize: 1, + filter: `Name == "job-03"`, + expectedNextToken: "other.job-03", + expectedIDs: []string{ + "job-03", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + req := &structs.JobListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: tc.namespace, + Prefix: tc.prefix, + Filter: tc.filter, + PerPage: tc.pageSize, + NextToken: tc.nextToken, + }, + } + req.AuthToken = aclToken + var resp structs.JobListResponse + err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp) + if tc.expectedError == "" { + require.NoError(t, err) + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tc.expectedError) + return + } + + gotIDs := []string{} + for _, job := range resp.Jobs { + gotIDs = append(gotIDs, job.ID) + } + require.Equal(t, tc.expectedIDs, gotIDs, "unexpected page of jobs") + require.Equal(t, tc.expectedNextToken, resp.QueryMeta.NextToken, "unexpected NextToken") + }) + } +} + func TestJobEndpoint_Allocations(t *testing.T) { t.Parallel() diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index 8c4bd25ea..73cfb47e2 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -394,42 +394,42 @@ func wildcard(namespace string) bool { return namespace == structs.AllNamespacesSentinel } -func getFuzzyResourceIterator(context structs.Context, aclObj *acl.ACL, namespace string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) { +func getFuzzyResourceIterator(context structs.Context, aclObj *acl.ACL, namespace string, ws memdb.WatchSet, store *state.StateStore) (memdb.ResultIterator, error) { switch context { case structs.Jobs: if wildcard(namespace) { - iter, err := state.Jobs(ws) + iter, err := store.Jobs(ws) return nsCapIterFilter(iter, err, aclObj) } - return state.JobsByNamespace(ws, namespace) + return store.JobsByNamespace(ws, namespace) case structs.Allocs: if wildcard(namespace) { - iter, err := state.Allocs(ws) + iter, err := store.Allocs(ws, state.SortDefault) return nsCapIterFilter(iter, err, aclObj) } - return state.AllocsByNamespace(ws, namespace) + return store.AllocsByNamespace(ws, namespace) case structs.Nodes: if wildcard(namespace) { - iter, err := state.Nodes(ws) + iter, err := store.Nodes(ws) return nsCapIterFilter(iter, err, aclObj) } - return state.Nodes(ws) + return store.Nodes(ws) case structs.Plugins: if wildcard(namespace) { - iter, err := state.CSIPlugins(ws) + iter, err := store.CSIPlugins(ws) return nsCapIterFilter(iter, err, aclObj) } - return state.CSIPlugins(ws) + return store.CSIPlugins(ws) case structs.Namespaces: - iter, err := state.Namespaces(ws) + iter, err := store.Namespaces(ws) return nsCapIterFilter(iter, err, aclObj) default: - return getEnterpriseFuzzyResourceIter(context, aclObj, namespace, ws, state) + return getEnterpriseFuzzyResourceIter(context, aclObj, namespace, ws, store) } } diff --git a/nomad/state/paginator/filter.go b/nomad/state/paginator/filter.go new file mode 100644 index 000000000..e4ed1f250 --- /dev/null +++ b/nomad/state/paginator/filter.go @@ -0,0 +1,41 @@ +package paginator + +// Filter is the interface that must be implemented to skip values when using +// the Paginator. +type Filter interface { + // Evaluate returns true if the element should be added to the page. + Evaluate(interface{}) (bool, error) +} + +// GenericFilter wraps a function that can be used to provide simple or in +// scope filtering. +type GenericFilter struct { + Allow func(interface{}) (bool, error) +} + +func (f GenericFilter) Evaluate(raw interface{}) (bool, error) { + return f.Allow(raw) +} + +// NamespaceFilter skips elements with a namespace value that is not in the +// allowable set. +type NamespaceFilter struct { + AllowableNamespaces map[string]bool +} + +func (f NamespaceFilter) Evaluate(raw interface{}) (bool, error) { + if raw == nil { + return false, nil + } + + item, _ := raw.(NamespaceGetter) + namespace := item.GetNamespace() + + if f.AllowableNamespaces == nil { + return true, nil + } + if f.AllowableNamespaces[namespace] { + return true, nil + } + return false, nil +} diff --git a/nomad/state/filter_test.go b/nomad/state/paginator/filter_test.go similarity index 61% rename from nomad/state/filter_test.go rename to nomad/state/paginator/filter_test.go index 2fa1b02ad..d94f49a57 100644 --- a/nomad/state/filter_test.go +++ b/nomad/state/paginator/filter_test.go @@ -1,15 +1,109 @@ -package state +package paginator import ( "testing" "time" "github.com/hashicorp/go-bexpr" - memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/helper/uuid" + "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) +func TestGenericFilter(t *testing.T) { + t.Parallel() + ids := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"} + + filters := []Filter{GenericFilter{ + Allow: func(raw interface{}) (bool, error) { + result := raw.(*mockObject) + return result.id > "5", nil + }, + }} + iter := newTestIterator(ids) + tokenizer := testTokenizer{} + opts := structs.QueryOptions{ + PerPage: 3, + } + results := []string{} + paginator, err := NewPaginator(iter, tokenizer, filters, opts, + func(raw interface{}) error { + result := raw.(*mockObject) + results = append(results, result.id) + return nil + }, + ) + require.NoError(t, err) + + nextToken, err := paginator.Page() + require.NoError(t, err) + + expected := []string{"6", "7", "8"} + require.Equal(t, "9", nextToken) + require.Equal(t, expected, results) +} + +func TestNamespaceFilter(t *testing.T) { + t.Parallel() + + mocks := []*mockObject{ + {namespace: "default"}, + {namespace: "dev"}, + {namespace: "qa"}, + {namespace: "region-1"}, + } + + cases := []struct { + name string + allowable map[string]bool + expected []string + }{ + { + name: "nil map", + expected: []string{"default", "dev", "qa", "region-1"}, + }, + { + name: "allow default", + allowable: map[string]bool{"default": true}, + expected: []string{"default"}, + }, + { + name: "allow multiple", + allowable: map[string]bool{"default": true, "dev": false, "qa": true}, + expected: []string{"default", "qa"}, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + filters := []Filter{NamespaceFilter{ + AllowableNamespaces: tc.allowable, + }} + iter := newTestIteratorWithMocks(mocks) + tokenizer := testTokenizer{} + opts := structs.QueryOptions{ + PerPage: int32(len(mocks)), + } + + results := []string{} + paginator, err := NewPaginator(iter, tokenizer, filters, opts, + func(raw interface{}) error { + result := raw.(*mockObject) + results = append(results, result.namespace) + return nil + }, + ) + require.NoError(t, err) + + nextToken, err := paginator.Page() + require.NoError(t, err) + require.Equal(t, "", nextToken) + require.Equal(t, tc.expected, results) + }) + } +} + func BenchmarkEvalListFilter(b *testing.B) { const evalCount = 100_000 @@ -76,9 +170,10 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.EvalsByNamespace(nil, structs.DefaultNamespace) - evalIter := evalPaginationIterator{iter} + tokenizer := NewStructsTokenizer(iter, StructsTokenizerOptions{WithID: true}) + var evals []*structs.Evaluation - paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(iter, tokenizer, nil, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -100,9 +195,10 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.Evals(nil, false) - evalIter := evalPaginationIterator{iter} + tokenizer := NewStructsTokenizer(iter, StructsTokenizerOptions{WithID: true}) + var evals []*structs.Evaluation - paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(iter, tokenizer, nil, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -137,9 +233,10 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.EvalsByNamespace(nil, structs.DefaultNamespace) - evalIter := evalPaginationIterator{iter} + tokenizer := NewStructsTokenizer(iter, StructsTokenizerOptions{WithID: true}) + var evals []*structs.Evaluation - paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(iter, tokenizer, nil, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -175,9 +272,10 @@ func BenchmarkEvalListFilter(b *testing.B) { for i := 0; i < b.N; i++ { iter, _ := state.Evals(nil, false) - evalIter := evalPaginationIterator{iter} + tokenizer := NewStructsTokenizer(iter, StructsTokenizerOptions{WithID: true}) + var evals []*structs.Evaluation - paginator, err := NewPaginator(evalIter, opts, func(raw interface{}) error { + paginator, err := NewPaginator(iter, tokenizer, nil, opts, func(raw interface{}) error { eval := raw.(*structs.Evaluation) evals = append(evals, eval) return nil @@ -193,12 +291,12 @@ func BenchmarkEvalListFilter(b *testing.B) { // ----------------- // BENCHMARK HELPER FUNCTIONS -func setupPopulatedState(b *testing.B, evalCount int) *StateStore { +func setupPopulatedState(b *testing.B, evalCount int) *state.StateStore { evals := generateEvals(evalCount) index := uint64(0) var err error - state := TestStateStore(b) + state := state.TestStateStore(b) for _, eval := range evals { index++ err = state.UpsertEvals( @@ -235,17 +333,3 @@ func generateEval(i int, ns string) *structs.Evaluation { ModifyTime: now, } } - -type evalPaginationIterator struct { - iter memdb.ResultIterator -} - -func (it evalPaginationIterator) Next() (string, interface{}) { - raw := it.iter.Next() - if raw == nil { - return "", nil - } - - eval := raw.(*structs.Evaluation) - return eval.ID, eval -} diff --git a/nomad/state/paginator.go b/nomad/state/paginator/paginator.go similarity index 64% rename from nomad/state/paginator.go rename to nomad/state/paginator/paginator.go index 607ff8cde..f4aa3c2fe 100644 --- a/nomad/state/paginator.go +++ b/nomad/state/paginator/paginator.go @@ -1,4 +1,4 @@ -package state +package paginator import ( "fmt" @@ -7,39 +7,37 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) -// Iterator is the interface that must be implemented to use the Paginator. +// Iterator is the interface that must be implemented to supply data to the +// Paginator. type Iterator interface { - // Next returns the next element to be considered for pagination along with - // a token string used to uniquely identify elements in the iteration. + // Next returns the next element to be considered for pagination. // The page will end if nil is returned. - // Tokens should have a stable order and the order must match the paginator - // ascending property. - Next() (string, interface{}) + Next() interface{} } -// Paginator is an iterator over a memdb.ResultIterator that returns -// only the expected number of pages. +// Paginator wraps an iterator and returns only the expected number of pages. type Paginator struct { iter Iterator + tokenizer Tokenizer + filters []Filter perPage int32 itemCount int32 seekingToken string nextToken string - ascending bool + reverse bool nextTokenFound bool pageErr error - // filterEvaluator is used to filter results using go-bexpr. It's nil if - // no filter expression is defined. - filterEvaluator *bexpr.Evaluator - // appendFunc is the function the caller should use to append raw // entries to the results set. The object is guaranteed to be // non-nil. appendFunc func(interface{}) error } -func NewPaginator(iter Iterator, opts structs.QueryOptions, appendFunc func(interface{}) error) (*Paginator, error) { +// NewPaginator returns a new Paginator. +func NewPaginator(iter Iterator, tokenizer Tokenizer, filters []Filter, + opts structs.QueryOptions, appendFunc func(interface{}) error) (*Paginator, error) { + var evaluator *bexpr.Evaluator var err error @@ -48,21 +46,23 @@ func NewPaginator(iter Iterator, opts structs.QueryOptions, appendFunc func(inte if err != nil { return nil, fmt.Errorf("failed to read filter expression: %v", err) } + filters = append(filters, evaluator) } return &Paginator{ - iter: iter, - perPage: opts.PerPage, - seekingToken: opts.NextToken, - ascending: opts.Ascending, - nextTokenFound: opts.NextToken == "", - filterEvaluator: evaluator, - appendFunc: appendFunc, + iter: iter, + tokenizer: tokenizer, + filters: filters, + perPage: opts.PerPage, + seekingToken: opts.NextToken, + reverse: opts.Reverse, + nextTokenFound: opts.NextToken == "", + appendFunc: appendFunc, }, nil } // Page populates a page by running the append function -// over all results. Returns the next token +// over all results. Returns the next token. func (p *Paginator) Page() (string, error) { DONE: for { @@ -84,34 +84,36 @@ DONE: } func (p *Paginator) next() (interface{}, paginatorState) { - token, raw := p.iter.Next() + raw := p.iter.Next() if raw == nil { p.nextToken = "" return nil, paginatorComplete } + token := p.tokenizer.GetToken(raw) // have we found the token we're seeking (if any)? p.nextToken = token var passedToken bool - if p.ascending { - passedToken = token < p.seekingToken - } else { + + if p.reverse { passedToken = token > p.seekingToken + } else { + passedToken = token < p.seekingToken } if !p.nextTokenFound && passedToken { return nil, paginatorSkip } - // apply filter if defined - if p.filterEvaluator != nil { - match, err := p.filterEvaluator.Evaluate(raw) + // apply filters if defined + for _, f := range p.filters { + allow, err := f.Evaluate(raw) if err != nil { p.pageErr = err return nil, paginatorComplete } - if !match { + if !allow { return nil, paginatorSkip } } diff --git a/nomad/state/paginator_test.go b/nomad/state/paginator/paginator_test.go similarity index 74% rename from nomad/state/paginator_test.go rename to nomad/state/paginator/paginator_test.go index 0d6f07fda..e3678da53 100644 --- a/nomad/state/paginator_test.go +++ b/nomad/state/paginator/paginator_test.go @@ -1,4 +1,4 @@ -package state +package paginator import ( "errors" @@ -58,14 +58,14 @@ func TestPaginator(t *testing.T) { t.Run(tc.name, func(t *testing.T) { iter := newTestIterator(ids) - results := []string{} + tokenizer := testTokenizer{} + opts := structs.QueryOptions{ + PerPage: tc.perPage, + NextToken: tc.nextToken, + } - paginator, err := NewPaginator(iter, - structs.QueryOptions{ - PerPage: tc.perPage, - NextToken: tc.nextToken, - Ascending: true, - }, + results := []string{} + paginator, err := NewPaginator(iter, tokenizer, nil, opts, func(raw interface{}) error { if tc.expectedError != "" { return errors.New(tc.expectedError) @@ -94,27 +94,32 @@ func TestPaginator(t *testing.T) { // helpers for pagination tests -// implements memdb.ResultIterator interface +// implements Iterator interface type testResultIterator struct { results chan interface{} } -func (i testResultIterator) Next() (string, interface{}) { +func (i testResultIterator) Next() interface{} { select { case raw := <-i.results: if raw == nil { - return "", nil + return nil } m := raw.(*mockObject) - return m.id, m + return m default: - return "", nil + return nil } } type mockObject struct { - id string + id string + namespace string +} + +func (m *mockObject) GetNamespace() string { + return m.namespace } func newTestIterator(ids []string) testResultIterator { @@ -124,3 +129,18 @@ func newTestIterator(ids []string) testResultIterator { } return iter } + +func newTestIteratorWithMocks(mocks []*mockObject) testResultIterator { + iter := testResultIterator{results: make(chan interface{}, 20)} + for _, m := range mocks { + iter.results <- m + } + return iter +} + +// implements Tokenizer interface +type testTokenizer struct{} + +func (t testTokenizer) GetToken(raw interface{}) string { + return raw.(*mockObject).id +} diff --git a/nomad/state/paginator/tokenizer.go b/nomad/state/paginator/tokenizer.go new file mode 100644 index 000000000..527f547a2 --- /dev/null +++ b/nomad/state/paginator/tokenizer.go @@ -0,0 +1,82 @@ +package paginator + +import ( + "fmt" + "strings" +) + +// Tokenizer is the interface that must be implemented to provide pagination +// tokens to the Paginator. +type Tokenizer interface { + // GetToken returns the pagination token for the given element. + GetToken(interface{}) string +} + +// IDGetter is the interface that must be implemented by structs that need to +// have their ID as part of the pagination token. +type IDGetter interface { + GetID() string +} + +// NamespaceGetter is the interface that must be implemented by structs that +// need to have their Namespace as part of the pagination token. +type NamespaceGetter interface { + GetNamespace() string +} + +// CreateIndexGetter is the interface that must be implemented by structs that +// need to have their CreateIndex as part of the pagination token. +type CreateIndexGetter interface { + GetCreateIndex() uint64 +} + +// StructsTokenizerOptions is the configuration provided to a StructsTokenizer. +type StructsTokenizerOptions struct { + WithCreateIndex bool + WithNamespace bool + WithID bool +} + +// StructsTokenizer is an pagination token generator that can create different +// formats of pagination tokens based on common fields found in the structs +// package. +type StructsTokenizer struct { + opts StructsTokenizerOptions +} + +// NewStructsTokenizer returns a new StructsTokenizer. +func NewStructsTokenizer(it Iterator, opts StructsTokenizerOptions) StructsTokenizer { + return StructsTokenizer{ + opts: opts, + } +} + +func (it StructsTokenizer) GetToken(raw interface{}) string { + if raw == nil { + return "" + } + + parts := []string{} + + if it.opts.WithCreateIndex { + token := raw.(CreateIndexGetter).GetCreateIndex() + parts = append(parts, fmt.Sprintf("%v", token)) + } + + if it.opts.WithNamespace { + token := raw.(NamespaceGetter).GetNamespace() + parts = append(parts, token) + } + + if it.opts.WithID { + token := raw.(IDGetter).GetID() + parts = append(parts, token) + } + + // Use a character that is not part of validNamespaceName as separator to + // avoid accidentally generating collisions. + // For example, namespace `a` and job `b-c` would collide with namespace + // `a-b` and job `c` into the same token `a-b-c`, since `-` is an allowed + // character in namespace. + return strings.Join(parts, ".") +} diff --git a/nomad/state/paginator/tokenizer_test.go b/nomad/state/paginator/tokenizer_test.go new file mode 100644 index 000000000..c74fe8a67 --- /dev/null +++ b/nomad/state/paginator/tokenizer_test.go @@ -0,0 +1,67 @@ +package paginator + +import ( + "fmt" + "testing" + + "github.com/hashicorp/nomad/nomad/mock" + "github.com/stretchr/testify/require" +) + +func TestStructsTokenizer(t *testing.T) { + j := mock.Job() + + cases := []struct { + name string + opts StructsTokenizerOptions + expected string + }{ + { + name: "ID", + opts: StructsTokenizerOptions{ + WithID: true, + }, + expected: fmt.Sprintf("%v", j.ID), + }, + { + name: "Namespace.ID", + opts: StructsTokenizerOptions{ + WithNamespace: true, + WithID: true, + }, + expected: fmt.Sprintf("%v.%v", j.Namespace, j.ID), + }, + { + name: "CreateIndex.Namespace.ID", + opts: StructsTokenizerOptions{ + WithCreateIndex: true, + WithNamespace: true, + WithID: true, + }, + expected: fmt.Sprintf("%v.%v.%v", j.CreateIndex, j.Namespace, j.ID), + }, + { + name: "CreateIndex.ID", + opts: StructsTokenizerOptions{ + WithCreateIndex: true, + WithID: true, + }, + expected: fmt.Sprintf("%v.%v", j.CreateIndex, j.ID), + }, + { + name: "CreateIndex.Namespace", + opts: StructsTokenizerOptions{ + WithCreateIndex: true, + WithNamespace: true, + }, + expected: fmt.Sprintf("%v.%v", j.CreateIndex, j.Namespace), + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + tokenizer := StructsTokenizer{opts: tc.opts} + require.Equal(t, tc.expected, tokenizer.GetToken(j)) + }) + } +} diff --git a/nomad/state/schema.go b/nomad/state/schema.go index 5c62ae2fd..0aafe87d2 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -530,7 +530,7 @@ func allocTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "allocs", Indexes: map[string]*memdb.IndexSchema{ - // Primary index is a UUID + // id index is used for direct lookup of allocation by ID. "id": { Name: "id", AllowMissing: false, @@ -540,6 +540,26 @@ func allocTableSchema() *memdb.TableSchema { }, }, + // create index is used for listing allocations, ordering them by + // creation chronology. (Use a reverse iterator for newest first). + "create": { + Name: "create", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.UintFieldIndex{ + Field: "CreateIndex", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + }, + + // namespace is used to lookup evaluations by namespace. + // todo(shoenig): i think we can deprecate this and other like it "namespace": { Name: "namespace", AllowMissing: false, @@ -549,6 +569,31 @@ func allocTableSchema() *memdb.TableSchema { }, }, + // namespace_create index is used to lookup evaluations by namespace + // in their original chronological order based on CreateIndex. + // + // Use a prefix iterator (namespace_prefix) on a Namespace to iterate + // those evaluations in order of CreateIndex. + "namespace_create": { + Name: "namespace_create", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + AllowMissing: false, + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.UintFieldIndex{ + Field: "CreateIndex", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + }, + // Node index is used to lookup allocations by node "node": { Name: "node", @@ -728,6 +773,21 @@ func aclTokenTableSchema() *memdb.TableSchema { Field: "AccessorID", }, }, + "create": { + Name: "create", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.UintFieldIndex{ + Field: "CreateIndex", + }, + &memdb.StringFieldIndex{ + Field: "AccessorID", + }, + }, + }, + }, "secret": { Name: "secret", AllowMissing: false, diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 1dd9e4814..60bf36fc1 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -22,6 +22,19 @@ import ( // This can be a read or write transaction. type Txn = *txn +// SortOption represents how results can be sorted. +type SortOption bool + +const ( + // SortDefault indicates that the result should be returned using the + // default go-memdb ResultIterator order. + SortDefault SortOption = false + + // SortReverse indicates that the result should be returned using the + // reversed go-memdb ResultIterator order. + SortReverse SortOption = true +) + const ( // NodeRegisterEventReregistered is the message used when the node becomes // reregistered. @@ -544,16 +557,17 @@ func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Depl return nil } -func (s *StateStore) Deployments(ws memdb.WatchSet, ascending bool) (memdb.ResultIterator, error) { +func (s *StateStore) Deployments(ws memdb.WatchSet, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() var it memdb.ResultIterator var err error - if ascending { - it, err = txn.Get("deployment", "create") - } else { + switch sort { + case SortReverse: it, err = txn.GetReverse("deployment", "create") + default: + it, err = txn.Get("deployment", "create") } if err != nil { @@ -578,7 +592,7 @@ func (s *StateStore) DeploymentsByNamespace(ws memdb.WatchSet, namespace string) return iter, nil } -func (s *StateStore) DeploymentsByNamespaceOrdered(ws memdb.WatchSet, namespace string, ascending bool) (memdb.ResultIterator, error) { +func (s *StateStore) DeploymentsByNamespaceOrdered(ws memdb.WatchSet, namespace string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() var ( @@ -587,10 +601,11 @@ func (s *StateStore) DeploymentsByNamespaceOrdered(ws memdb.WatchSet, namespace exact = terminate(namespace) ) - if ascending { - it, err = txn.Get("deployment", "namespace_create_prefix", exact) - } else { + switch sort { + case SortReverse: it, err = txn.GetReverse("deployment", "namespace_create_prefix", exact) + default: + it, err = txn.Get("deployment", "namespace_create_prefix", exact) } if err != nil { @@ -1919,8 +1934,13 @@ func (s *StateStore) JobByIDTxn(ws memdb.WatchSet, namespace, id string, txn Txn return nil, nil } -// JobsByIDPrefix is used to lookup a job by prefix +// JobsByIDPrefix is used to lookup a job by prefix. If querying all namespaces +// the prefix will not be filtered by an index. func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) { + if namespace == structs.AllNamespacesSentinel { + return s.jobsByIDPrefixAllNamespaces(ws, id) + } + txn := s.db.ReadTxn() iter, err := txn.Get("jobs", "id_prefix", namespace, id) @@ -1933,6 +1953,30 @@ func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, namespace, id string) (me return iter, nil } +func (s *StateStore) jobsByIDPrefixAllNamespaces(ws memdb.WatchSet, prefix string) (memdb.ResultIterator, error) { + txn := s.db.ReadTxn() + + // Walk the entire jobs table + iter, err := txn.Get("jobs", "id") + + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + + // Filter the iterator by ID prefix + f := func(raw interface{}) bool { + job, ok := raw.(*structs.Job) + if !ok { + return true + } + return !strings.HasPrefix(job.ID, prefix) + } + wrap := memdb.NewFilterIterator(iter, f) + return wrap, nil +} + // JobVersionsByID returns all the tracked versions of a job. func (s *StateStore) JobVersionsByID(ws memdb.WatchSet, namespace, id string) ([]*structs.Job, error) { txn := s.db.ReadTxn() @@ -3188,17 +3232,18 @@ func (s *StateStore) EvalsByJob(ws memdb.WatchSet, namespace, jobID string) ([]* } // Evals returns an iterator over all the evaluations in ascending or descending -// order of CreationIndex as determined by the ascending parameter. -func (s *StateStore) Evals(ws memdb.WatchSet, ascending bool) (memdb.ResultIterator, error) { +// order of CreationIndex as determined by the reverse parameter. +func (s *StateStore) Evals(ws memdb.WatchSet, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() var it memdb.ResultIterator var err error - if ascending { - it, err = txn.Get("evals", "create") - } else { + switch sort { + case SortReverse: it, err = txn.GetReverse("evals", "create") + default: + it, err = txn.Get("evals", "create") } if err != nil { @@ -3227,7 +3272,7 @@ func (s *StateStore) EvalsByNamespace(ws memdb.WatchSet, namespace string) (memd return it, nil } -func (s *StateStore) EvalsByNamespaceOrdered(ws memdb.WatchSet, namespace string, ascending bool) (memdb.ResultIterator, error) { +func (s *StateStore) EvalsByNamespaceOrdered(ws memdb.WatchSet, namespace string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() var ( @@ -3236,10 +3281,11 @@ func (s *StateStore) EvalsByNamespaceOrdered(ws memdb.WatchSet, namespace string exact = terminate(namespace) ) - if ascending { - it, err = txn.Get("evals", "namespace_create_prefix", exact) - } else { + switch sort { + case SortReverse: it, err = txn.GetReverse("evals", "namespace_create_prefix", exact) + default: + it, err = txn.Get("evals", "namespace_create_prefix", exact) } if err != nil { @@ -3609,6 +3655,10 @@ func allocNamespaceFilter(namespace string) func(interface{}) bool { return true } + if namespace == structs.AllNamespacesSentinel { + return false + } + return alloc.Namespace != namespace } } @@ -3766,19 +3816,52 @@ func (s *StateStore) AllocsByDeployment(ws memdb.WatchSet, deploymentID string) return out, nil } -// Allocs returns an iterator over all the evaluations -func (s *StateStore) Allocs(ws memdb.WatchSet) (memdb.ResultIterator, error) { +// Allocs returns an iterator over all the evaluations. +func (s *StateStore) Allocs(ws memdb.WatchSet, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - // Walk the entire table - iter, err := txn.Get("allocs", "id") + var it memdb.ResultIterator + var err error + + switch sort { + case SortReverse: + it, err = txn.GetReverse("allocs", "create") + default: + it, err = txn.Get("allocs", "create") + } + if err != nil { return nil, err } - ws.Add(iter.WatchCh()) + ws.Add(it.WatchCh()) - return iter, nil + return it, nil +} + +func (s *StateStore) AllocsByNamespaceOrdered(ws memdb.WatchSet, namespace string, sort SortOption) (memdb.ResultIterator, error) { + txn := s.db.ReadTxn() + + var ( + it memdb.ResultIterator + err error + exact = terminate(namespace) + ) + + switch sort { + case SortReverse: + it, err = txn.GetReverse("allocs", "namespace_create_prefix", exact) + default: + it, err = txn.Get("allocs", "namespace_create_prefix", exact) + } + + if err != nil { + return nil, err + } + + ws.Add(it.WatchCh()) + + return it, nil } // AllocsByNamespace returns an iterator over all the allocations in the @@ -5464,14 +5547,22 @@ func (s *StateStore) ACLTokenByAccessorIDPrefix(ws memdb.WatchSet, prefix string } // ACLTokens returns an iterator over all the tokens -func (s *StateStore) ACLTokens(ws memdb.WatchSet) (memdb.ResultIterator, error) { +func (s *StateStore) ACLTokens(ws memdb.WatchSet, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - // Walk the entire table - iter, err := txn.Get("acl_token", "id") + var iter memdb.ResultIterator + var err error + + switch sort { + case SortReverse: + iter, err = txn.GetReverse("acl_token", "create") + default: + iter, err = txn.Get("acl_token", "create") + } if err != nil { return nil, err } + ws.Add(iter.WatchCh()) return iter, nil } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 32987465a..e9b58b7b8 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -670,7 +670,7 @@ func TestStateStore_Deployments(t *testing.T) { } ws := memdb.NewWatchSet() - it, err := state.Deployments(ws, true) + it, err := state.Deployments(ws, SortDefault) require.NoError(t, err) var out []*structs.Deployment @@ -5432,7 +5432,7 @@ func TestStateStore_Allocs(t *testing.T) { } ws := memdb.NewWatchSet() - iter, err := state.Allocs(ws) + iter, err := state.Allocs(ws, SortDefault) if err != nil { t.Fatalf("err: %v", err) } @@ -5480,7 +5480,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { require.Nil(err) ws := memdb.NewWatchSet() - iter, err := state.Allocs(ws) + iter, err := state.Allocs(ws, SortDefault) require.Nil(err) var out []*structs.Allocation @@ -7508,7 +7508,7 @@ func TestStateStore_BootstrapACLTokens(t *testing.T) { t.Fatalf("expected error") } - iter, err := state.ACLTokens(nil) + iter, err := state.ACLTokens(nil, SortDefault) if err != nil { t.Fatalf("err: %v", err) } @@ -7602,7 +7602,7 @@ func TestStateStore_UpsertACLTokens(t *testing.T) { assert.Equal(t, nil, err) assert.Equal(t, tk2, out) - iter, err := state.ACLTokens(ws) + iter, err := state.ACLTokens(ws, SortDefault) if err != nil { t.Fatalf("err: %v", err) } @@ -7669,7 +7669,7 @@ func TestStateStore_DeleteACLTokens(t *testing.T) { t.Fatalf("bad: %#v", out) } - iter, err := state.ACLTokens(ws) + iter, err := state.ACLTokens(ws, SortDefault) if err != nil { t.Fatalf("err: %v", err) } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 71b28753e..0a97cd9e5 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -318,6 +318,32 @@ type CSIVolume struct { ModifyIndex uint64 } +// GetID implements the IDGetter interface, required for pagination. +func (v *CSIVolume) GetID() string { + if v == nil { + return "" + } + return v.ID +} + +// GetNamespace implements the NamespaceGetter interface, required for +// pagination. +func (v *CSIVolume) GetNamespace() string { + if v == nil { + return "" + } + return v.Namespace +} + +// GetCreateIndex implements the CreateIndexGetter interface, required for +// pagination. +func (v *CSIVolume) GetCreateIndex() uint64 { + if v == nil { + return 0 + } + return v.CreateIndex +} + // CSIVolListStub is partial representation of a CSI Volume for inclusion in lists type CSIVolListStub struct { ID string diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index a67606b7f..8f2974a3b 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -273,8 +273,8 @@ type QueryOptions struct { // previous response. NextToken string - // Ascending is used to have results sorted in ascending chronological order. - Ascending bool + // Reverse is used to reverse the default order of list results. + Reverse bool InternalRpcInfo } @@ -4188,6 +4188,33 @@ func (j *Job) NamespacedID() NamespacedID { } } +// GetID implements the IDGetter interface, required for pagination. +func (j *Job) GetID() string { + if j == nil { + return "" + } + return j.ID +} + +// GetNamespace implements the NamespaceGetter interface, required for +// pagination and filtering namespaces in endpoints that support glob namespace +// requests using tokens with limited access. +func (j *Job) GetNamespace() string { + if j == nil { + return "" + } + return j.Namespace +} + +// GetCreateIndex implements the CreateIndexGetter interface, required for +// pagination. +func (j *Job) GetCreateIndex() uint64 { + if j == nil { + return 0 + } + return j.CreateIndex +} + // Canonicalize is used to canonicalize fields in the Job. This should be // called when registering a Job. func (j *Job) Canonicalize() { @@ -9078,6 +9105,15 @@ func (d *Deployment) GetID() string { return d.ID } +// GetCreateIndex implements the CreateIndexGetter interface, required for +// pagination. +func (d *Deployment) GetCreateIndex() uint64 { + if d == nil { + return 0 + } + return d.CreateIndex +} + // HasPlacedCanaries returns whether the deployment has placed canaries func (d *Deployment) HasPlacedCanaries() bool { if d == nil || len(d.TaskGroups) == 0 { @@ -9467,6 +9503,33 @@ type Allocation struct { ModifyTime int64 } +// GetID implements the IDGetter interface, required for pagination. +func (a *Allocation) GetID() string { + if a == nil { + return "" + } + return a.ID +} + +// GetNamespace implements the NamespaceGetter interface, required for +// pagination and filtering namespaces in endpoints that support glob namespace +// requests using tokens with limited access. +func (a *Allocation) GetNamespace() string { + if a == nil { + return "" + } + return a.Namespace +} + +// GetCreateIndex implements the CreateIndexGetter interface, required for +// pagination. +func (a *Allocation) GetCreateIndex() uint64 { + if a == nil { + return 0 + } + return a.CreateIndex +} + // ConsulNamespace returns the Consul namespace of the task group associated // with this allocation. func (a *Allocation) ConsulNamespace() string { @@ -10569,6 +10632,23 @@ type Evaluation struct { ModifyTime int64 } +// GetID implements the IDGetter interface, required for pagination. +func (e *Evaluation) GetID() string { + if e == nil { + return "" + } + return e.ID +} + +// GetCreateIndex implements the CreateIndexGetter interface, required for +// pagination. +func (e *Evaluation) GetCreateIndex() uint64 { + if e == nil { + return 0 + } + return e.CreateIndex +} + // TerminalStatus returns if the current status is terminal and // will no longer transition. func (e *Evaluation) TerminalStatus() bool { @@ -11339,6 +11419,23 @@ type ACLToken struct { ModifyIndex uint64 } +// GetID implements the IDGetter interface, required for pagination. +func (a *ACLToken) GetID() string { + if a == nil { + return "" + } + return a.AccessorID +} + +// GetCreateIndex implements the CreateIndexGetter interface, required for +// pagination. +func (a *ACLToken) GetCreateIndex() uint64 { + if a == nil { + return 0 + } + return a.CreateIndex +} + func (a *ACLToken) Copy() *ACLToken { c := new(ACLToken) *c = *a From 68b203f77f586df9276f403f6391f8253a937417 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Wed, 9 Mar 2022 09:40:40 -0500 Subject: [PATCH 54/89] fix TestCSIVolumeEndpoint_List_PaginationFiltering test (#12245) --- nomad/csi_endpoint_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index a7129e8f4..c8591e38e 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -799,7 +799,7 @@ func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { volume.Namespace = m.namespace } index := 1000 + uint64(i) - require.NoError(t, state.CSIVolumeRegister(index, []*structs.CSIVolume{volume})) + require.NoError(t, state.UpsertCSIVolume(index, []*structs.CSIVolume{volume})) } cases := []struct { From 1593e2adf658e048d1328852dd2158fd0e09daa0 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 9 Mar 2022 10:47:19 -0500 Subject: [PATCH 55/89] job summary query in `Job.List` RPC should use job's namespace (#12249) The `Job.List` RPC attaches a `JobSummary` to each job stub. We're using the request namespace and not the job namespace for that query, which results in a nil `JobSummary` whenever we pass the wildcard namespace. This is incorrect and causes panics in downstream consumers like the CLI, which assume the `JobSummary` is non-nil as an unstate invariant. --- nomad/job_endpoint.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index d6ab29058..b0ab7a95f 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -1412,8 +1412,8 @@ func (j *Job) List(args *structs.JobListRequest, reply *structs.JobListResponse) paginator, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, func(raw interface{}) error { job := raw.(*structs.Job) - summary, err := state.JobSummaryByID(ws, namespace, job.ID) - if err != nil { + summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID) + if err != nil || summary == nil { return fmt.Errorf("unable to look up summary for job: %v", job.ID) } jobs = append(jobs, job.Stub(summary)) From 48addb7f050cf11931849d2b01cebad6bef707bb Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Wed, 9 Mar 2022 12:46:57 -0600 Subject: [PATCH 56/89] docs: describe the cgo dependency --- contributing/cgo.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 contributing/cgo.md diff --git a/contributing/cgo.md b/contributing/cgo.md new file mode 100644 index 000000000..66449ad77 --- /dev/null +++ b/contributing/cgo.md @@ -0,0 +1,8 @@ +# CGO + +Nomad requires the use of CGO on Linux. + +Issue [#5643](https://github.com/hashicorp/nomad/issues/5643) tracks the desire for Nomad to not require CGO. + +One of the core features of Nomad (the exec driver) depends on [nsenter](https://pkg.go.dev/github.com/opencontainers/runc/libcontainer/nsenter). +Until `nsenter` no longer requires CGO, the standalone Nomad executable on Linux will not be able to ship without depending on CGO. From 3888a59c47a7bb9c95f2b3b430caef73ccc343d8 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Wed, 9 Mar 2022 15:27:46 -0600 Subject: [PATCH 57/89] testing: use a smaller chroot when running exec driver tests The default chroot copies all of /bin, /usr, etc. which can ammount to gigabytes of stuff not actually needed for running our tests. Use a smaller chroot in test cases so that CI infra with poor disk IO has a chance. --- plugins/drivers/testutils/testing.go | 37 +++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/plugins/drivers/testutils/testing.go b/plugins/drivers/testutils/testing.go index d8fd2f75d..5056344e0 100644 --- a/plugins/drivers/testutils/testing.go +++ b/plugins/drivers/testutils/testing.go @@ -75,6 +75,41 @@ func (h *DriverHarness) Kill() { h.server.Stop() } +// tinyChroot is useful for testing, where we do not use anything other than +// trivial /bin commands like sleep and sh. +// +// Note that you cannot chroot a symlink. +var tinyChroot = map[string]string{ + // destination: /bin + "/usr/bin/sleep": "/bin/sleep", + "/usr/bin/dash": "/bin/sh", + "/usr/bin/bash": "/bin/bash", + "/usr/bin/cat": "/bin/cat", + + // destination: /usr/bin + "/usr/bin/stty": "/usr/bin/stty", + "/usr/bin/head": "/usr/bin/head", + "/usr/bin/mktemp": "/usr/bin/mktemp", + "/usr/bin/echo": "/usr/bin/echo", + "/usr/bin/touch": "/usr/bin/touch", + "/usr/bin/stat": "/usr/bin/stat", + + // destination: /etc/ + "/etc/ld.so.cache": "/etc/ld.so.cache", + "/etc/ld.so.conf": "/etc/ld.so.conf", + "/etc/ld.so.conf.d": "/etc/ld.so.conf.d", + "/etc/passwd": "/etc/passwd", + "/etc/resolv.conf": "/etc/resolv.conf", + + // others + "/lib": "/lib", + "/lib32": "/lib32", + "/lib64": "/lib64", + "/usr/lib/jvm": "/usr/lib/jvm", + "/run/resolvconf": "/run/resolvconf", + "/run/systemd/resolve": "/run/systemd/resolve", +} + // MkAllocDir creates a temporary directory and allocdir structure. // If enableLogs is set to true a logmon instance will be started to write logs // to the LogDir of the task @@ -95,7 +130,7 @@ func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func( require.NoError(h.t, err) fsi := caps.FSIsolation - require.NoError(h.t, taskDir.Build(fsi == drivers.FSIsolationChroot, config.DefaultChrootEnv)) + require.NoError(h.t, taskDir.Build(fsi == drivers.FSIsolationChroot, tinyChroot)) task := &structs.Task{ Name: t.Name, From 4a21dbcfaa6e7a6fee3a377b9b4a484c0afa11c1 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Thu, 10 Mar 2022 16:35:07 -0500 Subject: [PATCH 58/89] docs: add namespace param to job parse API (#12258) --- website/content/api-docs/jobs.mdx | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/content/api-docs/jobs.mdx b/website/content/api-docs/jobs.mdx index e298c12de..bb265cfbb 100644 --- a/website/content/api-docs/jobs.mdx +++ b/website/content/api-docs/jobs.mdx @@ -209,8 +209,13 @@ The table below shows this endpoint's support for ### Parameters +- `namespace` `(string: "default")` - Specifies the target namespace. If ACL is + enabled, this value must match a namespace that the token is allowed to + access. This is specified as a query string parameter. + - `JobHCL` `(string: )` - Specifies the HCL definition of the job encoded in a JSON string. + - `Canonicalize` `(bool: false)` - Flag to enable setting any unset fields to their default values. From ddbbda65617626da6af57b152df4e70669c9f898 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Fri, 11 Mar 2022 19:44:52 -0500 Subject: [PATCH 59/89] api: apply consistent behaviour of the reverse query parameter (#12244) --- nomad/acl_endpoint.go | 4 +- nomad/alloc_endpoint.go | 2 +- nomad/deployment_endpoint.go | 2 +- nomad/eval_endpoint.go | 2 +- nomad/leader.go | 4 +- nomad/node_endpoint_test.go | 12 +- nomad/search_endpoint.go | 22 +- nomad/state/state_store.go | 62 +++- nomad/state/state_store_test.go | 373 ++++++++++++++--------- website/content/api-docs/acl-tokens.mdx | 8 + website/content/api-docs/allocations.mdx | 5 + website/content/api-docs/deployments.mdx | 7 +- website/content/api-docs/evaluations.mdx | 7 +- 13 files changed, 321 insertions(+), 189 deletions(-) diff --git a/nomad/acl_endpoint.go b/nomad/acl_endpoint.go index 8f3031952..c5df70e29 100644 --- a/nomad/acl_endpoint.go +++ b/nomad/acl_endpoint.go @@ -665,12 +665,12 @@ func (a *ACL) ListTokens(args *structs.ACLTokenListRequest, reply *structs.ACLTo var opts paginator.StructsTokenizerOptions if prefix := args.QueryOptions.Prefix; prefix != "" { - iter, err = state.ACLTokenByAccessorIDPrefix(ws, prefix) + iter, err = state.ACLTokenByAccessorIDPrefix(ws, prefix, sort) opts = paginator.StructsTokenizerOptions{ WithID: true, } } else if args.GlobalOnly { - iter, err = state.ACLTokensByGlobal(ws, true) + iter, err = state.ACLTokensByGlobal(ws, true, sort) opts = paginator.StructsTokenizerOptions{ WithID: true, } diff --git a/nomad/alloc_endpoint.go b/nomad/alloc_endpoint.go index 65b0ea4b7..f06e1cb24 100644 --- a/nomad/alloc_endpoint.go +++ b/nomad/alloc_endpoint.go @@ -80,7 +80,7 @@ func (a *Alloc) List(args *structs.AllocListRequest, reply *structs.AllocListRes return err } else { if prefix := args.QueryOptions.Prefix; prefix != "" { - iter, err = state.AllocsByIDPrefix(ws, namespace, prefix) + iter, err = state.AllocsByIDPrefix(ws, namespace, prefix, sort) opts = paginator.StructsTokenizerOptions{ WithID: true, } diff --git a/nomad/deployment_endpoint.go b/nomad/deployment_endpoint.go index d0217f424..84632f5a5 100644 --- a/nomad/deployment_endpoint.go +++ b/nomad/deployment_endpoint.go @@ -414,7 +414,7 @@ func (d *Deployment) List(args *structs.DeploymentListRequest, reply *structs.De var opts paginator.StructsTokenizerOptions if prefix := args.QueryOptions.Prefix; prefix != "" { - iter, err = store.DeploymentsByIDPrefix(ws, namespace, prefix) + iter, err = store.DeploymentsByIDPrefix(ws, namespace, prefix, sort) opts = paginator.StructsTokenizerOptions{ WithID: true, } diff --git a/nomad/eval_endpoint.go b/nomad/eval_endpoint.go index daec216d2..b3e4d371e 100644 --- a/nomad/eval_endpoint.go +++ b/nomad/eval_endpoint.go @@ -419,7 +419,7 @@ func (e *Eval) List(args *structs.EvalListRequest, reply *structs.EvalListRespon var opts paginator.StructsTokenizerOptions if prefix := args.QueryOptions.Prefix; prefix != "" { - iter, err = store.EvalsByIDPrefix(ws, namespace, prefix) + iter, err = store.EvalsByIDPrefix(ws, namespace, prefix, sort) opts = paginator.StructsTokenizerOptions{ WithID: true, } diff --git a/nomad/leader.go b/nomad/leader.go index 7facb66c7..5a50995a4 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -1524,13 +1524,13 @@ ERR_WAIT: // diffACLTokens is used to perform a two-way diff between the local // tokens and the remote tokens to determine which tokens need to // be deleted or updated. -func diffACLTokens(state *state.StateStore, minIndex uint64, remoteList []*structs.ACLTokenListStub) (delete []string, update []string) { +func diffACLTokens(store *state.StateStore, minIndex uint64, remoteList []*structs.ACLTokenListStub) (delete []string, update []string) { // Construct a set of the local and remote policies local := make(map[string][]byte) remote := make(map[string]struct{}) // Add all the local global tokens - iter, err := state.ACLTokensByGlobal(nil, true) + iter, err := store.ACLTokensByGlobal(nil, true, state.SortDefault) if err != nil { panic("failed to iterate local tokens") } diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 1070892b0..c91dafc1a 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2063,11 +2063,11 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { alloc := mock.Alloc() alloc.NodeID = node.ID alloc.ModifyTime = now - state := s1.fsm.State() - state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) + store := s1.fsm.State() + store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)) start := time.Now() time.AfterFunc(100*time.Millisecond, func() { - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}) if err != nil { t.Fatalf("err: %v", err) } @@ -2101,7 +2101,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { t.Fatalf("bad: %#v", resp2.Allocs) } - iter, err := state.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID) + iter, err := store.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID, state.SortDefault) if err != nil { t.Fatalf("err: %v", err) } @@ -2133,8 +2133,8 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { allocUpdate.NodeID = alloc.NodeID allocUpdate.ID = alloc.ID allocUpdate.ClientStatus = structs.AllocClientStatusRunning - state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID)) - err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate}) + store.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID)) + err := store.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate}) if err != nil { t.Fatalf("err: %v", err) } diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index 73cfb47e2..90bb3e420 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -355,26 +355,26 @@ func sortSet(matches []fuzzyMatch) { // getResourceIter takes a context and returns a memdb iterator specific to // that context -func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) { +func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix string, ws memdb.WatchSet, store *state.StateStore) (memdb.ResultIterator, error) { switch context { case structs.Jobs: - return state.JobsByIDPrefix(ws, namespace, prefix) + return store.JobsByIDPrefix(ws, namespace, prefix) case structs.Evals: - return state.EvalsByIDPrefix(ws, namespace, prefix) + return store.EvalsByIDPrefix(ws, namespace, prefix, state.SortDefault) case structs.Allocs: - return state.AllocsByIDPrefix(ws, namespace, prefix) + return store.AllocsByIDPrefix(ws, namespace, prefix, state.SortDefault) case structs.Nodes: - return state.NodesByIDPrefix(ws, prefix) + return store.NodesByIDPrefix(ws, prefix) case structs.Deployments: - return state.DeploymentsByIDPrefix(ws, namespace, prefix) + return store.DeploymentsByIDPrefix(ws, namespace, prefix, state.SortDefault) case structs.Plugins: - return state.CSIPluginsByIDPrefix(ws, prefix) + return store.CSIPluginsByIDPrefix(ws, prefix) case structs.ScalingPolicies: - return state.ScalingPoliciesByIDPrefix(ws, namespace, prefix) + return store.ScalingPoliciesByIDPrefix(ws, namespace, prefix) case structs.Volumes: - return state.CSIVolumesByIDPrefix(ws, namespace, prefix) + return store.CSIVolumesByIDPrefix(ws, namespace, prefix) case structs.Namespaces: - iter, err := state.NamespacesByNamePrefix(ws, prefix) + iter, err := store.NamespacesByNamePrefix(ws, prefix) if err != nil { return nil, err } @@ -383,7 +383,7 @@ func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix } return memdb.NewFilterIterator(iter, nsCapFilter(aclObj)), nil default: - return getEnterpriseResourceIter(context, aclObj, namespace, prefix, ws, state) + return getEnterpriseResourceIter(context, aclObj, namespace, prefix, ws, store) } } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 60bf36fc1..42f2ebf1c 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -617,11 +617,19 @@ func (s *StateStore) DeploymentsByNamespaceOrdered(ws memdb.WatchSet, namespace return it, nil } -func (s *StateStore) DeploymentsByIDPrefix(ws memdb.WatchSet, namespace, deploymentID string) (memdb.ResultIterator, error) { +func (s *StateStore) DeploymentsByIDPrefix(ws memdb.WatchSet, namespace, deploymentID string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() + var iter memdb.ResultIterator + var err error + // Walk the entire deployments table - iter, err := txn.Get("deployment", "id_prefix", deploymentID) + switch sort { + case SortReverse: + iter, err = txn.GetReverse("deployment", "id_prefix", deploymentID) + default: + iter, err = txn.Get("deployment", "id_prefix", deploymentID) + } if err != nil { return nil, err } @@ -3171,11 +3179,19 @@ func (s *StateStore) EvalByID(ws memdb.WatchSet, id string) (*structs.Evaluation // EvalsByIDPrefix is used to lookup evaluations by prefix in a particular // namespace -func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) { +func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, namespace, id string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() + var iter memdb.ResultIterator + var err error + // Get an iterator over all evals by the id prefix - iter, err := txn.Get("evals", "id_prefix", id) + switch sort { + case SortReverse: + iter, err = txn.GetReverse("evals", "id_prefix", id) + default: + iter, err = txn.Get("evals", "id_prefix", id) + } if err != nil { return nil, fmt.Errorf("eval lookup failed: %v", err) } @@ -3631,10 +3647,18 @@ func (s *StateStore) allocByIDImpl(txn Txn, ws memdb.WatchSet, id string) (*stru } // AllocsByIDPrefix is used to lookup allocs by prefix -func (s *StateStore) AllocsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) { +func (s *StateStore) AllocsByIDPrefix(ws memdb.WatchSet, namespace, id string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - iter, err := txn.Get("allocs", "id_prefix", id) + var iter memdb.ResultIterator + var err error + + switch sort { + case SortReverse: + iter, err = txn.GetReverse("allocs", "id_prefix", id) + default: + iter, err = txn.Get("allocs", "id_prefix", id) + } if err != nil { return nil, fmt.Errorf("alloc lookup failed: %v", err) } @@ -5535,13 +5559,22 @@ func (s *StateStore) ACLTokenBySecretID(ws memdb.WatchSet, secretID string) (*st } // ACLTokenByAccessorIDPrefix is used to lookup tokens by prefix -func (s *StateStore) ACLTokenByAccessorIDPrefix(ws memdb.WatchSet, prefix string) (memdb.ResultIterator, error) { +func (s *StateStore) ACLTokenByAccessorIDPrefix(ws memdb.WatchSet, prefix string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - iter, err := txn.Get("acl_token", "id_prefix", prefix) + var iter memdb.ResultIterator + var err error + + switch sort { + case SortReverse: + iter, err = txn.GetReverse("acl_token", "id_prefix", prefix) + default: + iter, err = txn.Get("acl_token", "id_prefix", prefix) + } if err != nil { return nil, fmt.Errorf("acl token lookup failed: %v", err) } + ws.Add(iter.WatchCh()) return iter, nil } @@ -5568,14 +5601,23 @@ func (s *StateStore) ACLTokens(ws memdb.WatchSet, sort SortOption) (memdb.Result } // ACLTokensByGlobal returns an iterator over all the tokens filtered by global value -func (s *StateStore) ACLTokensByGlobal(ws memdb.WatchSet, globalVal bool) (memdb.ResultIterator, error) { +func (s *StateStore) ACLTokensByGlobal(ws memdb.WatchSet, globalVal bool, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() + var iter memdb.ResultIterator + var err error + // Walk the entire table - iter, err := txn.Get("acl_token", "global", globalVal) + switch sort { + case SortReverse: + iter, err = txn.GetReverse("acl_token", "global", globalVal) + default: + iter, err = txn.Get("acl_token", "global", globalVal) + } if err != nil { return nil, err } + ws.Add(iter.WatchCh()) return iter, nil } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index e9b58b7b8..441344a28 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -694,16 +694,7 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { deploy.ID = "11111111-662e-d0ab-d1c9-3e434af7bdb4" err := state.UpsertDeployment(1000, deploy) - if err != nil { - t.Fatalf("err: %v", err) - } - - // Create a watchset so we can test that getters don't cause it to fire - ws := memdb.NewWatchSet() - iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, deploy.ID) - if err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, err) gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { var deploys []*structs.Deployment @@ -718,60 +709,67 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { return deploys } - deploys := gatherDeploys(iter) - if len(deploys) != 1 { - t.Fatalf("err: %v", err) - } + t.Run("first deployment", func(t *testing.T) { + // Create a watchset so we can test that getters don't cause it to fire + ws := memdb.NewWatchSet() + iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, deploy.ID, SortDefault) + require.NoError(t, err) - if watchFired(ws) { - t.Fatalf("bad") - } + deploys := gatherDeploys(iter) + require.Len(t, deploys, 1) + require.False(t, watchFired(ws)) + }) - iter, err = state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11") - if err != nil { - t.Fatalf("err: %v", err) - } + t.Run("using prefix", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortDefault) + require.NoError(t, err) - deploys = gatherDeploys(iter) - if len(deploys) != 1 { - t.Fatalf("err: %v", err) - } + deploys := gatherDeploys(iter) + require.Len(t, deploys, 1) + require.False(t, watchFired(ws)) + }) deploy = mock.Deployment() deploy.ID = "11222222-662e-d0ab-d1c9-3e434af7bdb4" err = state.UpsertDeployment(1001, deploy) - if err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, err) - if !watchFired(ws) { - t.Fatalf("bad") - } + t.Run("more than one", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortDefault) + require.NoError(t, err) - ws = memdb.NewWatchSet() - iter, err = state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11") - if err != nil { - t.Fatalf("err: %v", err) - } + deploys := gatherDeploys(iter) + require.Len(t, deploys, 2) + }) - deploys = gatherDeploys(iter) - if len(deploys) != 2 { - t.Fatalf("err: %v", err) - } + t.Run("filter to one", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "1111", SortDefault) + require.NoError(t, err) - iter, err = state.DeploymentsByIDPrefix(ws, deploy.Namespace, "1111") - if err != nil { - t.Fatalf("err: %v", err) - } + deploys := gatherDeploys(iter) + require.Len(t, deploys, 1) + require.False(t, watchFired(ws)) + }) - deploys = gatherDeploys(iter) - if len(deploys) != 1 { - t.Fatalf("err: %v", err) - } + t.Run("reverse order", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.DeploymentsByIDPrefix(ws, deploy.Namespace, "11", SortReverse) + require.NoError(t, err) - if watchFired(ws) { - t.Fatalf("bad") - } + got := []string{} + for _, d := range gatherDeploys(iter) { + got = append(got, d.ID) + } + expected := []string{ + "11222222-662e-d0ab-d1c9-3e434af7bdb4", + "11111111-662e-d0ab-d1c9-3e434af7bdb4", + } + require.Equal(t, expected, got) + require.False(t, watchFired(ws)) + }) } func TestStateStore_UpsertNode_Node(t *testing.T) { @@ -3874,12 +3872,6 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { t.Fatalf("err: %v", err) } - ws := memdb.NewWatchSet() - iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa") - if err != nil { - t.Fatalf("err: %v", err) - } - gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { var evals []*structs.Evaluation for { @@ -3892,32 +3884,57 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { return evals } - out := gatherEvals(iter) - if len(out) != 5 { - t.Fatalf("bad: expected five evaluations, got: %#v", out) - } + t.Run("list by prefix", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortDefault) + require.NoError(t, err) - sort.Sort(EvalIDSort(evals)) - - for index, eval := range out { - if ids[index] != eval.ID { - t.Fatalf("bad: got unexpected id: %s", eval.ID) + got := []string{} + for _, e := range gatherEvals(iter) { + got = append(got, e.ID) } - } - iter, err = state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb") - if err != nil { - t.Fatalf("err: %v", err) - } + expected := []string{ + "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", + "aaaaaaab-7bfb-395d-eb95-0685af2176b2", + "aaaaaabb-7bfb-395d-eb95-0685af2176b2", + "aaaaabbb-7bfb-395d-eb95-0685af2176b2", + "aaaabbbb-7bfb-395d-eb95-0685af2176b2", + } + require.Len(t, got, 5, "expected five evaluations") + require.Equal(t, expected, got) // Must be in this order. + }) - out = gatherEvals(iter) - if len(out) != 0 { - t.Fatalf("bad: unexpected zero evaluations, got: %#v", out) - } + t.Run("invalid prefix", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb", SortDefault) + require.NoError(t, err) - if watchFired(ws) { - t.Fatalf("bad") - } + out := gatherEvals(iter) + require.Len(t, out, 0, "expected zero evaluations") + require.False(t, watchFired(ws)) + }) + + t.Run("reverse order", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.EvalsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortReverse) + require.NoError(t, err) + + got := []string{} + for _, e := range gatherEvals(iter) { + got = append(got, e.ID) + } + + expected := []string{ + "aaaabbbb-7bfb-395d-eb95-0685af2176b2", + "aaaaabbb-7bfb-395d-eb95-0685af2176b2", + "aaaaaabb-7bfb-395d-eb95-0685af2176b2", + "aaaaaaab-7bfb-395d-eb95-0685af2176b2", + "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", + } + require.Len(t, got, 5, "expected five evaluations") + require.Equal(t, expected, got) // Must be in this order. + }) } func TestStateStore_UpdateAllocsFromClient(t *testing.T) { @@ -5362,15 +5379,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { } err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, allocs) - if err != nil { - t.Fatalf("err: %v", err) - } - - ws := memdb.NewWatchSet() - iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa") - if err != nil { - t.Fatalf("err: %v", err) - } + require.NoError(t, err) gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { var allocs []*structs.Allocation @@ -5384,32 +5393,61 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { return allocs } - out := gatherAllocs(iter) - if len(out) != 5 { - t.Fatalf("bad: expected five allocations, got: %#v", out) - } + t.Run("allocs by prefix", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortDefault) + require.NoError(t, err) - sort.Sort(AllocIDSort(allocs)) + out := gatherAllocs(iter) + require.Len(t, out, 5, "expected five allocations") - for index, alloc := range out { - if ids[index] != alloc.ID { - t.Fatalf("bad: got unexpected id: %s", alloc.ID) + got := []string{} + for _, a := range out { + got = append(got, a.ID) } - } + expected := []string{ + "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", + "aaaaaaab-7bfb-395d-eb95-0685af2176b2", + "aaaaaabb-7bfb-395d-eb95-0685af2176b2", + "aaaaabbb-7bfb-395d-eb95-0685af2176b2", + "aaaabbbb-7bfb-395d-eb95-0685af2176b2", + } + require.Equal(t, expected, got) + require.False(t, watchFired(ws)) + }) - iter, err = state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb") - if err != nil { - t.Fatalf("err: %v", err) - } + t.Run("invalid prefix", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "b-a7bfb", SortDefault) + require.NoError(t, err) - out = gatherAllocs(iter) - if len(out) != 0 { - t.Fatalf("bad: unexpected zero allocations, got: %#v", out) - } + out := gatherAllocs(iter) + require.Len(t, out, 0) + require.False(t, watchFired(ws)) + }) - if watchFired(ws) { - t.Fatalf("bad") - } + t.Run("reverse", func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.AllocsByIDPrefix(ws, structs.DefaultNamespace, "aaaa", SortReverse) + require.NoError(t, err) + + out := gatherAllocs(iter) + require.Len(t, out, 5, "expected five allocations") + + got := []string{} + for _, a := range out { + got = append(got, a.ID) + } + expected := []string{ + "aaaabbbb-7bfb-395d-eb95-0685af2176b2", + "aaaaabbb-7bfb-395d-eb95-0685af2176b2", + "aaaaaabb-7bfb-395d-eb95-0685af2176b2", + "aaaaaaab-7bfb-395d-eb95-0685af2176b2", + "aaaaaaaa-7bfb-395d-eb95-0685af2176b2", + } + require.Equal(t, expected, got) + require.False(t, watchFired(ws)) + }) } func TestStateStore_Allocs(t *testing.T) { @@ -7717,36 +7755,54 @@ func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { for _, prefix := range prefixes { tk := mock.ACLToken() tk.AccessorID = prefix + tk.AccessorID[4:] - if err := state.UpsertACLTokens(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLToken{tk}); err != nil { - t.Fatalf("err: %v", err) - } + err := state.UpsertACLTokens(structs.MsgTypeTestSetup, baseIndex, []*structs.ACLToken{tk}) + require.NoError(t, err) baseIndex++ } - // Scan by prefix - iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa") - if err != nil { - t.Fatalf("err: %v", err) - } - - // Ensure we see both tokens - count := 0 - out := []string{} - for { - raw := iter.Next() - if raw == nil { - break + gatherTokens := func(iter memdb.ResultIterator) []*structs.ACLToken { + var tokens []*structs.ACLToken + for { + raw := iter.Next() + if raw == nil { + break + } + tokens = append(tokens, raw.(*structs.ACLToken)) } - count++ - out = append(out, raw.(*structs.ACLToken).AccessorID[:4]) + return tokens } - if count != 2 { - t.Fatalf("bad: %d %v", count, out) - } - sort.Strings(out) - expect := []string{"aaaa", "aabb"} - assert.Equal(t, expect, out) + t.Run("scan by prefix", func(t *testing.T) { + iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa", SortDefault) + require.NoError(t, err) + + // Ensure we see both tokens + out := gatherTokens(iter) + require.Len(t, out, 2) + + got := []string{} + for _, t := range out { + got = append(got, t.AccessorID[:4]) + } + expect := []string{"aaaa", "aabb"} + require.Equal(t, expect, got) + }) + + t.Run("reverse order", func(t *testing.T) { + iter, err := state.ACLTokenByAccessorIDPrefix(nil, "aa", SortReverse) + require.NoError(t, err) + + // Ensure we see both tokens + out := gatherTokens(iter) + require.Len(t, out, 2) + + got := []string{} + for _, t := range out { + got = append(got, t.AccessorID[:4]) + } + expect := []string{"aabb", "aaaa"} + require.Equal(t, expect, got) + }) } func TestStateStore_ACLTokensByGlobal(t *testing.T) { @@ -7754,32 +7810,51 @@ func TestStateStore_ACLTokensByGlobal(t *testing.T) { state := testStateStore(t) tk1 := mock.ACLToken() + tk1.AccessorID = "aaaa" + tk1.AccessorID[4:] + tk2 := mock.ACLToken() + tk2.AccessorID = "aabb" + tk2.AccessorID[4:] + tk3 := mock.ACLToken() - tk4 := mock.ACLToken() + tk3.AccessorID = "bbbb" + tk3.AccessorID[4:] tk3.Global = true - if err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2, tk3, tk4}); err != nil { - t.Fatalf("err: %v", err) - } + tk4 := mock.ACLToken() + tk4.AccessorID = "ffff" + tk4.AccessorID[4:] - iter, err := state.ACLTokensByGlobal(nil, true) - if err != nil { - t.Fatalf("err: %v", err) - } + err := state.UpsertACLTokens(structs.MsgTypeTestSetup, 1000, []*structs.ACLToken{tk1, tk2, tk3, tk4}) + require.NoError(t, err) - // Ensure we see the one global policies - count := 0 - for { - raw := iter.Next() - if raw == nil { - break + gatherTokens := func(iter memdb.ResultIterator) []*structs.ACLToken { + var tokens []*structs.ACLToken + for { + raw := iter.Next() + if raw == nil { + break + } + tokens = append(tokens, raw.(*structs.ACLToken)) } - count++ - } - if count != 1 { - t.Fatalf("bad: %d", count) + return tokens } + + t.Run("only global tokens", func(t *testing.T) { + iter, err := state.ACLTokensByGlobal(nil, true, SortDefault) + require.NoError(t, err) + + got := gatherTokens(iter) + require.Len(t, got, 1) + require.Equal(t, tk3.AccessorID, got[0].AccessorID) + }) + + t.Run("reverse order", func(t *testing.T) { + iter, err := state.ACLTokensByGlobal(nil, false, SortReverse) + require.NoError(t, err) + + expected := []*structs.ACLToken{tk4, tk2, tk1} + got := gatherTokens(iter) + require.Len(t, got, 3) + require.Equal(t, expected, got) + }) } func TestStateStore_OneTimeTokens(t *testing.T) { diff --git a/website/content/api-docs/acl-tokens.mdx b/website/content/api-docs/acl-tokens.mdx index acdf03f7a..02663e319 100644 --- a/website/content/api-docs/acl-tokens.mdx +++ b/website/content/api-docs/acl-tokens.mdx @@ -70,11 +70,19 @@ The table below shows this endpoint's support for ### Parameters +- `global` `(bool: false)` - If true, only return ACL tokens that are + replicated globally to all regions. + - `prefix` `(string: "")` - Specifies a string to filter ACL tokens based on an accessor ID prefix. Because the value is decoded to bytes, the prefix must have an even number of hexadecimal characters (0-9a-f). This is specified as a query string parameter. +- `reverse` `(bool: false)` - Specifies the list of returned ACL tokens should + be sorted in the reverse order. By default ACL tokens are returned sorted in + chronological order (older ACL tokens first), or in lexicographical order by + their ID if the `prefix` or `global` query parameters are used. + ### Sample Request ```shell-session diff --git a/website/content/api-docs/allocations.mdx b/website/content/api-docs/allocations.mdx index e39eb7f4c..21303d274 100644 --- a/website/content/api-docs/allocations.mdx +++ b/website/content/api-docs/allocations.mdx @@ -43,6 +43,11 @@ The table below shows this endpoint's support for a large number of allocations may set `task_states=false` to significantly reduce the size of the response. +- `reverse` `(bool: false)` - Specifies the list of returned allocations should + be sorted in the reverse order. By default allocations are returned sorted in + chronological order (older evaluations first), or in lexicographical order by + their ID if the `prefix` query parameter is used. + ### Sample Request ```shell-session diff --git a/website/content/api-docs/deployments.mdx b/website/content/api-docs/deployments.mdx index d9cc238d4..c8f359b0f 100644 --- a/website/content/api-docs/deployments.mdx +++ b/website/content/api-docs/deployments.mdx @@ -50,9 +50,10 @@ The table below shows this endpoint's support for results. Consider using pagination or a query parameter to reduce resource used to serve the request. -- `ascending` `(bool: false)` - Specifies the list of returned deployments should - be sorted in chronological order (oldest evaluations first). By default deployments - are returned sorted in reverse chronological order (newest deployments first). +- `reverse` `(bool: false)` - Specifies the list of returned deployments should + be sorted in the reverse order. By default deployments are returned sorted in + chronological order (older deployments first), or in lexicographical order + by their ID if the `prefix` query parameter is used. ### Sample Request diff --git a/website/content/api-docs/evaluations.mdx b/website/content/api-docs/evaluations.mdx index 37a49228a..9a33d469b 100644 --- a/website/content/api-docs/evaluations.mdx +++ b/website/content/api-docs/evaluations.mdx @@ -57,9 +57,10 @@ The table below shows this endpoint's support for Specifying `*` will return all evaluations across all authorized namespaces. This parameter is used before any `filter` expression is applied. -- `ascending` `(bool: false)` - Specifies the list of returned evaluations should - be sorted in chronological order (oldest evaluations first). By default evaluations - are returned sorted in reverse chronological order (newest evaluations first). +- `reverse` `(bool: false)` - Specifies the list of returned evaluations should + be sorted in the reverse order. By default evaluations are returned sorted in + chronological order (older evaluations first), or in lexicographical order by + their ID if the `prefix` query parameter is used. ### Sample Request From 8c8a8d2e2320bc7b57ca4fbdf82908e10fe53c94 Mon Sep 17 00:00:00 2001 From: Lars Lehtonen Date: Mon, 14 Mar 2022 07:00:59 -0700 Subject: [PATCH 60/89] scheduler: fix unused dstate variable (#12268) --- scheduler/reconcile.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index 16ddc6743..3eb9f870b 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -466,7 +466,7 @@ func (a *allocReconciler) computeGroup(groupName string, all allocSet) bool { a.createDeployment(tg.Name, tg.Update, existingDeployment, dstate, all, destructive) deploymentComplete := a.isDeploymentComplete(groupName, destructive, inplace, - migrate, rescheduleNow, dstate, place, rescheduleLater, requiresCanaries) + migrate, rescheduleNow, place, rescheduleLater, requiresCanaries) return deploymentComplete } @@ -833,7 +833,7 @@ func (a *allocReconciler) createDeployment(groupName string, strategy *structs.U } func (a *allocReconciler) isDeploymentComplete(groupName string, destructive, inplace, migrate, rescheduleNow allocSet, - dstate *structs.DeploymentState, place []allocPlaceResult, rescheduleLater []*delayedRescheduleInfo, requiresCanaries bool) bool { + place []allocPlaceResult, rescheduleLater []*delayedRescheduleInfo, requiresCanaries bool) bool { complete := len(destructive)+len(inplace)+len(place)+len(migrate)+len(rescheduleNow)+len(rescheduleLater) == 0 && !requiresCanaries @@ -844,6 +844,7 @@ func (a *allocReconciler) isDeploymentComplete(groupName string, destructive, in // Final check to see if the deployment is complete is to ensure everything is healthy var ok bool + var dstate *structs.DeploymentState if dstate, ok = a.deployment.TaskGroups[groupName]; ok { if dstate.HealthyAllocs < helper.IntMax(dstate.DesiredTotal, dstate.DesiredCanaries) || // Make sure we have enough healthy allocs (dstate.DesiredCanaries > 0 && !dstate.Promoted) { // Make sure we are promoted if we have canaries From ebbbedd984ac950b222e4f75b209b7ea1ad9b4a0 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Mon, 14 Mar 2022 10:58:42 -0400 Subject: [PATCH 61/89] docs: initial docs for the new API features (#12094) --- website/content/api-docs/acl-tokens.mdx | 14 + website/content/api-docs/allocations.mdx | 14 + website/content/api-docs/deployments.mdx | 6 +- website/content/api-docs/evaluations.mdx | 6 +- website/content/api-docs/index.mdx | 478 ++++++++++++++++++++++- website/content/api-docs/jobs.mdx | 14 + website/content/api-docs/volumes.mdx | 14 + 7 files changed, 538 insertions(+), 8 deletions(-) diff --git a/website/content/api-docs/acl-tokens.mdx b/website/content/api-docs/acl-tokens.mdx index 02663e319..4c08ade21 100644 --- a/website/content/api-docs/acl-tokens.mdx +++ b/website/content/api-docs/acl-tokens.mdx @@ -78,6 +78,20 @@ The table below shows this endpoint's support for have an even number of hexadecimal characters (0-9a-f). This is specified as a query string parameter. +- `next_token` `(string: "")` - This endpoint supports paging. The `next_token` + parameter accepts a string which identifies the next expected ACL token. This + value can be obtained from the `X-Nomad-NextToken` header from the previous + response. + +- `per_page` `(int: 0)` - Specifies a maximum number of ACL tokens to return + for this request. If omitted, the response is not paginated. The value of the + `X-Nomad-NextToken` header of the last response can be used as the + `next_token` of the next request to fetch additional pages. + +- `filter` `(string: "")` - Specifies the [expression](/api-docs#filtering) + used to filter the results. Consider using pagination or a query parameter to + reduce resource used to serve the request. + - `reverse` `(bool: false)` - Specifies the list of returned ACL tokens should be sorted in the reverse order. By default ACL tokens are returned sorted in chronological order (older ACL tokens first), or in lexicographical order by diff --git a/website/content/api-docs/allocations.mdx b/website/content/api-docs/allocations.mdx index 21303d274..02f07fe95 100644 --- a/website/content/api-docs/allocations.mdx +++ b/website/content/api-docs/allocations.mdx @@ -31,6 +31,20 @@ The table below shows this endpoint's support for even number of hexadecimal characters (0-9a-f). This is specified as a query string parameter. +- `next_token` `(string: "")` - This endpoint supports paging. The `next_token` + parameter accepts a string which identifies the next expected allocation. + This value can be obtained from the `X-Nomad-NextToken` header from the + previous response. + +- `per_page` `(int: 0)` - Specifies a maximum number of allocations to return + for this request. If omitted, the response is not paginated. The value of the + `X-Nomad-NextToken` header of the last response can be used as the + `next_token` of the next request to fetch additional pages. + +- `filter` `(string: "")` - Specifies the [expression](/api-docs#filtering) + used to filter the results. Consider using pagination or a query parameter to + reduce resource used to serve the request. + - `namespace` `(string: "default")` - Specifies the namespace to search. Specifying `*` would return all allocations across all the authorized namespaces. diff --git a/website/content/api-docs/deployments.mdx b/website/content/api-docs/deployments.mdx index c8f359b0f..405b1164c 100644 --- a/website/content/api-docs/deployments.mdx +++ b/website/content/api-docs/deployments.mdx @@ -46,9 +46,9 @@ The table below shows this endpoint's support for used as the `last_token` of the next request to fetch additional pages. -- `filter` `(string: "")` - Specifies the expression used to filter the query - results. Consider using pagination or a query parameter to reduce resource - used to serve the request. +- `filter` `(string: "")` - Specifies the [expression](/api-docs#filtering) + used to filter the results. Consider using pagination or a query parameter to + reduce resource used to serve the request. - `reverse` `(bool: false)` - Specifies the list of returned deployments should be sorted in the reverse order. By default deployments are returned sorted in diff --git a/website/content/api-docs/evaluations.mdx b/website/content/api-docs/evaluations.mdx index 9a33d469b..6cb2f34d4 100644 --- a/website/content/api-docs/evaluations.mdx +++ b/website/content/api-docs/evaluations.mdx @@ -42,9 +42,9 @@ The table below shows this endpoint's support for used as the `last_token` of the next request to fetch additional pages. -- `filter` `(string: "")` - Specifies the expression used to filter the query - results. Consider using pagination or a query parameter to reduce resource - used to serve the request. +- `filter` `(string: "")` - Specifies the [expression](/api-docs#filtering) + used to filter the results. Consider using pagination or a query parameter to + reduce resource used to serve the request. - `job` `(string: "")` - Filter the list of evaluations to a specific job ID. diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx index 5f5cf579f..997f3afaf 100644 --- a/website/content/api-docs/index.mdx +++ b/website/content/api-docs/index.mdx @@ -92,10 +92,484 @@ query parameter. Prior to Nomad 1.0 namespaces were Enterprise-only. Here is an example using curl: ```shell-session -$ curl \ - https://localhost:4646/v1/jobs?namespace=qa +$ curl https://localhost:4646/v1/jobs?namespace=qa ``` +## Filtering + +Filter expressions refine data queries for some API listing endpoints, as +notated in the individual API endpoints documentation. + +To create a filter expression, you will write one or more expressions. Each +expression has matching operators composed of selectors and values. + +Filtering is executed on the Nomad server, before data is returned, reducing +the network load. To pass a filter expression to Nomad, use the `filter` query +parameter with the URL encoded expression when sending requests to HTTP API +endpoints that support it. + +```shell-session +$ curl --get https://localhost:4646/v1/ --data-urlencode 'filter=' +``` + +Some endpoints may have other query parameters that are used for filtering, but +they can't be used with the `filter` query parameter. Doing so will result in a +`400` status error response. These query parameters are usually backed by a +database index, so they may be prefereable over an equivalent simple `filter` +expression due to better resource usage and performance. + +### Creating Expressions + +A single expression is a matching operator with a selector and value and they +are written in plain text format. Boolean logic and parenthesization are +supported. In general, whitespace is ignored, except within literal strings. + +#### Matching Operators + +All matching operators use a selector or value to choose what data should be +matched. Each endpoint that supports filtering accepts a potentially +different list of selectors and is detailed in the API documentation for +those endpoints. + +```hcl +// Equality & Inequality checks + == "" + != "" + +// Emptiness checks + is empty + is not empty + +// Contains checks or Substring Matching +"" in +"" not in + contains "" + not contains "" + +// Regular Expression Matching + matches "" + not matches "" +``` + +#### Selectors + +Selectors are used by matching operators to create an expression. They are +defined by a `.` separated list of names. Each name must start with an ASCII +letter and can contain ASCII letters, numbers, and underscores. When part of +the selector references a map value it may be expressed using the form +`[""]` instead of `.`. This allows the possibility +of using map keys that are not valid selectors in and of themselves. + +```hcl +// selects the `cache` key within the `TaskGroups` mapping for the +// /v1/deployments endpoint +TaskGroups.cache + +// Also selects the `cache` key for the same endpoint +TaskGroups["cache"] +``` + +#### Values + +Values are used by matching operators to create an expression. Values can be +any valid selector, a number, or a string. It is best practice to quote values. +Numbers can be base 10 integers or floating point numbers. + +When quoting strings, they may either be enclosed in double quotes or +backticks. When enclosed in backticks they are treated as raw strings and +escape sequences such as `\n` will not be expanded. + +### Connecting Expressions + +There are several methods for connecting expressions, including: + +- logical `or` +- logical `and` +- logical `not` +- grouping with parenthesis +- matching expressions + +```hcl +// Logical Or - evaluates to true if either sub-expression does + or + +// Logical And - evaluates to true if both sub-expressions do + and + +// Logical Not - evaluates to true if the sub-expression does not +not + +// Grouping - Overrides normal precedence rules +( ) + +// Inspects data to check for a match + +``` + +Standard operator precedence can be expected for the various forms. For +example, the following two expressions would be equivalent. + +```hcl + and not or + +( and (not )) or +``` + +### Filter Utilization + +Generally, only the main object is filtered. When filtering for an item within +an array that is not at the top level, the entire array that contains the item +will be returned. This is usually the outermost object of a response, but in +some cases the filtering is performed on a object embedded within the results. + +#### Performance + +Filters are executed on the servers and therefore will consume some amount +of CPU time on the server. For non-stale queries this means that the filter +is executed on the leader. + +#### Filtering Examples + +##### Jobs API + +Command (Unfiltered) + +```shell-session +$ curl --request GET https://localhost:4646/v1/jobs +``` + +Response (Unfiltered) + +```json +[ + { + "CreateIndex": 52, + "Datacenters": [ + "dc1", + "dc2" + ], + "ID": "countdash", + "JobModifyIndex": 56, + "JobSummary": { + "Children": { + "Dead": 0, + "Pending": 0, + "Running": 0 + }, + "CreateIndex": 52, + "JobID": "countdash", + "ModifyIndex": 55, + "Namespace": "default", + "Summary": { + "api": { + "Complete": 0, + "Failed": 0, + "Lost": 0, + "Queued": 1, + "Running": 0, + "Starting": 0 + }, + "dashboard": { + "Complete": 0, + "Failed": 0, + "Lost": 0, + "Queued": 1, + "Running": 0, + "Starting": 0 + } + } + }, + "ModifyIndex": 56, + "Multiregion": null, + "Name": "countdash", + "Namespace": "default", + "ParameterizedJob": false, + "ParentID": "", + "Periodic": false, + "Priority": 50, + "Status": "pending", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1645230445788556000, + "Type": "service" + }, + { + "CreateIndex": 42, + "Datacenters": [ + "dc1" + ], + "ID": "example", + "JobModifyIndex": 42, + "JobSummary": { + "Children": { + "Dead": 0, + "Pending": 0, + "Running": 0 + }, + "CreateIndex": 42, + "JobID": "example", + "ModifyIndex": 46, + "Namespace": "default", + "Summary": { + "cache": { + "Complete": 0, + "Failed": 0, + "Lost": 0, + "Queued": 0, + "Running": 1, + "Starting": 0 + } + } + }, + "ModifyIndex": 49, + "Multiregion": null, + "Name": "example", + "Namespace": "default", + "ParameterizedJob": false, + "ParentID": "", + "Periodic": false, + "Priority": 50, + "Status": "running", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1645230403921889000, + "Type": "service" + } +] +``` + +Command (Filtered) + +```shell +curl --get https://localhost:4646/v1/jobs \ + --data-urlencode 'filter=Datacenters contains "dc2"' +``` + +Response (Filtered) + +```json +[ + { + "CreateIndex": 52, + "Datacenters": [ + "dc1", + "dc2" + ], + "ID": "countdash", + "JobModifyIndex": 56, + "JobSummary": { + "Children": { + "Dead": 0, + "Pending": 0, + "Running": 0 + }, + "CreateIndex": 52, + "JobID": "countdash", + "ModifyIndex": 55, + "Namespace": "default", + "Summary": { + "api": { + "Complete": 0, + "Failed": 0, + "Lost": 0, + "Queued": 1, + "Running": 0, + "Starting": 0 + }, + "dashboard": { + "Complete": 0, + "Failed": 0, + "Lost": 0, + "Queued": 1, + "Running": 0, + "Starting": 0 + } + } + }, + "ModifyIndex": 56, + "Multiregion": null, + "Name": "countdash", + "Namespace": "default", + "ParameterizedJob": false, + "ParentID": "", + "Periodic": false, + "Priority": 50, + "Status": "pending", + "StatusDescription": "", + "Stop": false, + "SubmitTime": 1645230445788556000, + "Type": "service" + } +] +``` + +##### Deployments API + +Command (Unfiltered) + +```shell-session +$ curl --request GET https://localhost:4646/v1/deployments +``` + +Response (Unfiltered) + +```json +[ + { + "CreateIndex": 54, + "EvalPriority": 50, + "ID": "58fd0616-ce64-d14b-6917-03d0ab5af67e", + "IsMultiregion": false, + "JobCreateIndex": 52, + "JobID": "countdash", + "JobModifyIndex": 52, + "JobSpecModifyIndex": 52, + "JobVersion": 0, + "ModifyIndex": 59, + "Namespace": "default", + "Status": "cancelled", + "StatusDescription": "Cancelled due to newer version of job", + "TaskGroups": { + "dashboard": { + "AutoPromote": false, + "AutoRevert": false, + "DesiredCanaries": 0, + "DesiredTotal": 1, + "HealthyAllocs": 0, + "PlacedAllocs": 0, + "PlacedCanaries": null, + "ProgressDeadline": 600000000000, + "Promoted": false, + "RequireProgressBy": null, + "UnhealthyAllocs": 0 + }, + "api": { + "AutoPromote": false, + "AutoRevert": false, + "DesiredCanaries": 0, + "DesiredTotal": 1, + "HealthyAllocs": 0, + "PlacedAllocs": 0, + "PlacedCanaries": null, + "ProgressDeadline": 600000000000, + "Promoted": false, + "RequireProgressBy": null, + "UnhealthyAllocs": 0 + } + } + }, + { + "CreateIndex": 43, + "EvalPriority": 50, + "ID": "1f18b48c-b33b-8e96-5640-71e3f3000242", + "IsMultiregion": false, + "JobCreateIndex": 42, + "JobID": "example", + "JobModifyIndex": 42, + "JobSpecModifyIndex": 42, + "JobVersion": 0, + "ModifyIndex": 49, + "Namespace": "default", + "Status": "successful", + "StatusDescription": "Deployment completed successfully", + "TaskGroups": { + "cache": { + "AutoPromote": false, + "AutoRevert": false, + "DesiredCanaries": 0, + "DesiredTotal": 1, + "HealthyAllocs": 1, + "PlacedAllocs": 1, + "PlacedCanaries": null, + "ProgressDeadline": 600000000000, + "Promoted": false, + "RequireProgressBy": "2022-02-18T19:36:54.421823-05:00", + "UnhealthyAllocs": 0 + } + } + } +] +``` + +Command (Filtered) + +```shell +curl --get https://localhost:4646/v1/deployments \ + --data-urlencode 'filter=Status != "successful"' +``` + +Response (Filtered) + +```json +[ + { + "CreateIndex": 54, + "EvalPriority": 50, + "ID": "58fd0616-ce64-d14b-6917-03d0ab5af67e", + "IsMultiregion": false, + "JobCreateIndex": 52, + "JobID": "countdash", + "JobModifyIndex": 52, + "JobSpecModifyIndex": 52, + "JobVersion": 0, + "ModifyIndex": 59, + "Namespace": "default", + "Status": "cancelled", + "StatusDescription": "Cancelled due to newer version of job", + "TaskGroups": { + "dashboard": { + "AutoPromote": false, + "AutoRevert": false, + "DesiredCanaries": 0, + "DesiredTotal": 1, + "HealthyAllocs": 0, + "PlacedAllocs": 0, + "PlacedCanaries": null, + "ProgressDeadline": 600000000000, + "Promoted": false, + "RequireProgressBy": null, + "UnhealthyAllocs": 0 + }, + "api": { + "AutoPromote": false, + "AutoRevert": false, + "DesiredCanaries": 0, + "DesiredTotal": 1, + "HealthyAllocs": 0, + "PlacedAllocs": 0, + "PlacedCanaries": null, + "ProgressDeadline": 600000000000, + "Promoted": false, + "RequireProgressBy": null, + "UnhealthyAllocs": 0 + } + } + } +] +``` + +## Pagination + +Some list endpoints support partial results to limit the amount of data +retrieved. The returned list is split into pages and the page size can be set +using the `per_page` query parameter with a positive integer value. + +If more data is available past the page requested, the response will contain an +HTTP header named `X-Nomad-Nexttoken` with the value of the next item to be +retrieved. This value can then be set as a query parameter called `next_token` +in a follow-up request to retrieve the next page. + +When the last page is reached, the `X-Nomad-Nexttoken` HTTP header will not +be present in the response, indicating that there is nothing more to return. + +## Ordering + +List results are usually returned in ascending order by their internal key, +such as their `ID`. Some endpoints may return data sorted by their +`CreateIndex` value, which roughly corelates to their creation order. The +result order may be reversed using the `reverse=true` query parameter when +supported by the endpoint. + ## Blocking Queries Many endpoints in Nomad support a feature known as "blocking queries". A diff --git a/website/content/api-docs/jobs.mdx b/website/content/api-docs/jobs.mdx index bb265cfbb..7cf1d3604 100644 --- a/website/content/api-docs/jobs.mdx +++ b/website/content/api-docs/jobs.mdx @@ -29,6 +29,20 @@ The table below shows this endpoint's support for - `prefix` `(string: "")` - Specifies a string to filter jobs on based on an index prefix. This is specified as a query string parameter. +- `next_token` `(string: "")` - This endpoint supports paging. The `next_token` + parameter accepts a string which identifies the next expected job. This value + can be obtained from the `X-Nomad-NextToken` header from the previous + response. + +- `per_page` `(int: 0)` - Specifies a maximum number of jobs to return for this + request. If omitted, the response is not paginated. The value of the + `X-Nomad-NextToken` header of the last response can be used as the + `next_token` of the next request to fetch additional pages. + +- `filter` `(string: "")` - Specifies the [expression](/api-docs#filtering) + used to filter the results. Consider using pagination or a query parameter to + reduce resource used to serve the request. + - `namespace` `(string: "default")` - Specifies the target namespace. Specifying `*` would return all jobs across all the authorized namespaces. diff --git a/website/content/api-docs/volumes.mdx b/website/content/api-docs/volumes.mdx index b0175ccf9..cd810dc3f 100644 --- a/website/content/api-docs/volumes.mdx +++ b/website/content/api-docs/volumes.mdx @@ -41,6 +41,20 @@ The table below shows this endpoint's support for the prefix must have an even number of hexadecimal characters (0-9a-f). This is specified as a query string parameter. +- `next_token` `(string: "")` - This endpoint supports paging. The `next_token` + parameter accepts a string which identifies the next expected volume. This + value can be obtained from the `X-Nomad-NextToken` header from the previous + response. + +- `per_page` `(int: 0)` - Specifies a maximum number of volumes to return for + this request. If omitted, the response is not paginated. The value of the + `X-Nomad-NextToken` header of the last response can be used as the + `next_token` of the next request to fetch additional pages. + +- `filter` `(string: "")` - Specifies the [expression](/api-docs#filtering) + used to filter the results. Consider using pagination or a query parameter to + reduce resource used to serve the request. + ### Sample Request ```shell-session From d371f456dc7e09fbc6da4cf9fb6bf462fddeed7c Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 14 Mar 2022 15:49:08 -0400 Subject: [PATCH 62/89] docs: clarify `restart` inheritance and add examples (#12275) Clarify the behavior of `restart` inheritance with respect to Connect sidecar tasks. Remove incorrect language about the scheduler being involved in restart decisions. Try to make the `delay` mode documentation more clear, and provide examples of delay vs fail. --- .../docs/job-specification/restart.mdx | 58 ++++++++++++++++--- 1 file changed, 50 insertions(+), 8 deletions(-) diff --git a/website/content/docs/job-specification/restart.mdx b/website/content/docs/job-specification/restart.mdx index 3b9a11b93..25128302c 100644 --- a/website/content/docs/job-specification/restart.mdx +++ b/website/content/docs/job-specification/restart.mdx @@ -28,8 +28,9 @@ job "docs" { ``` If specified at the group level, the configuration is inherited by all -tasks in the group. If present on the task, the policy is merged with -the restart policy from the encapsulating task group. +tasks in the group, including any [sidecar tasks][sidecar_task]. If +also present on the task, the policy is merged with the restart policy +from the encapsulating task group. For example, assuming that the task group restart policy is: @@ -61,6 +62,10 @@ restart { } ``` +Because sidecar tasks don't accept a `restart` block, it's recommended +that you set the `restart` for jobs with sidecar tasks at the task +level, so that the Connect sidecar can inherit the default `restart`. + ## `restart` Parameters - `attempts` `(int: )` - Specifies the number of restarts allowed in the @@ -119,10 +124,47 @@ restart { } ``` -- `"delay"` - Instructs the scheduler to delay the next restart until the next - `interval` is reached. +- `"delay"` - Instructs the client to wait until another `interval` + before restarting the task. -- `"fail"` - Instructs the scheduler to not attempt to restart the task on - failure. This is the default behavior. This mode is useful for non-idempotent jobs which are unlikely to - succeed after a few failures. Failed jobs will be restarted according to - the [`reschedule`](/docs/job-specification/reschedule) stanza. +- `"fail"` - Instructs the client not to attempt to restart the task + once the number of `attempts` have been used. This is the default + behavior. This mode is useful for non-idempotent jobs which are + unlikely to succeed after a few failures. The allocation will be + marked as failed and the scheduler will attempt to reschedule the + allocation according to the + [`reschedule`] stanza. + +### `restart` Examples + +With the following `restart` block, a failing task will restart 3 +times with 15 seconds between attempts, and then wait 10 minutes +before attempting another 3 attempts. The task restart will never fail +the entire allocation. + +```hcl +restart { + attempts = 3 + delay = "15s" + interval = "10m" + mode = "delay" +} +``` + +With the following `restart` block, a task that that fails after 1 +minute, after 2 minutes, and after 3 minutes will be restarted each +time. If it fails again before 10 minutes, the entire allocation will +be marked as failed and the scheduler will follow the group's +[`reschedule`] specification, possibly resulting in a new evaluation. + +```hcl +restart { + attempts = 3 + delay = "15s" + interval = "10m" + mode = "fail" +} +``` + +[sidecar_task]: /docs/job-specification/sidecar_task +[`reschedule`]: /docs/job-specification/reschedule From dfd0beead709775fcf545341aeb5e257718ed11e Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Tue, 15 Mar 2022 10:41:07 -0400 Subject: [PATCH 63/89] fix alloc list test (#12297) The alloc list test with pagination was creating allocs before the target namespace existed. This works in OSS but fails in ENT because quotas are checked before the alloc can be created, so the namespace must exist beforehand. --- nomad/alloc_endpoint_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 9efdc06bb..04e6c5e86 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -126,6 +126,10 @@ func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { state := s1.fsm.State() + require.NoError(t, state.UpsertNamespaces(1099, []*structs.Namespace{ + {Name: "non-default"}, + })) + var allocs []*structs.Allocation for i, m := range mocks { allocsInTx := []*structs.Allocation{} @@ -146,10 +150,6 @@ func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, index, allocsInTx)) } - require.NoError(t, state.UpsertNamespaces(1099, []*structs.Namespace{ - {Name: "non-default"}, - })) - aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read", mock.NamespacePolicy("*", "read", nil), From b24295799078a760d1f829cd5b779705a29ef7b8 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Tue, 15 Mar 2022 07:42:43 -0500 Subject: [PATCH 64/89] ci: swap ci parallelization for unconstrained gomaxprocs --- .circleci/config.yml | 3 +- acl/acl_test.go | 19 ++ acl/policy_test.go | 5 + ci/slow.go | 25 ++ client/acl_test.go | 11 +- client/agent_endpoint_test.go | 20 +- client/alloc_endpoint_test.go | 39 ++- client/alloc_watcher_e2e_test.go | 3 +- client/allocdir/alloc_dir_test.go | 20 ++ client/allocdir/fs_linux_test.go | 5 + client/allocdir/task_dir_test.go | 9 +- client/allochealth/tracker_test.go | 15 +- client/allocrunner/alloc_runner_test.go | 39 ++- client/allocrunner/alloc_runner_unix_test.go | 7 +- .../allocrunner/consul_grpc_sock_hook_test.go | 7 +- .../allocrunner/consul_http_sock_hook_test.go | 5 +- client/allocrunner/csi_hook_test.go | 2 + client/allocrunner/groupservice_hook_test.go | 11 +- client/allocrunner/health_hook_test.go | 17 +- client/allocrunner/network_hook_test.go | 3 + .../allocrunner/network_manager_linux_test.go | 3 + client/allocrunner/networking_cni_test.go | 5 + .../allocrunner/task_hook_coordinator_test.go | 22 +- .../taskrunner/artifact_hook_test.go | 5 +- .../taskrunner/connect_native_hook_test.go | 27 +- .../taskrunner/device_hook_test.go | 5 +- .../taskrunner/dispatch_hook_test.go | 7 +- .../taskrunner/envoy_bootstrap_hook_test.go | 25 +- .../taskrunner/envoy_version_hook_test.go | 21 +- client/allocrunner/taskrunner/errors_test.go | 5 +- .../taskrunner/logmon_hook_test.go | 5 +- .../taskrunner/logmon_hook_unix_test.go | 5 +- .../taskrunner/restarts/restarts_test.go | 24 +- .../taskrunner/script_check_hook_test.go | 13 +- .../taskrunner/service_hook_test.go | 2 + .../allocrunner/taskrunner/sids_hook_test.go | 25 +- .../allocrunner/taskrunner/stats_hook_test.go | 9 +- .../taskrunner/task_runner_test.go | 83 ++--- client/allocrunner/taskrunner/tasklet_test.go | 9 +- .../taskrunner/template/template_test.go | 57 ++-- .../taskrunner/validate_hook_test.go | 5 +- .../taskrunner/volume_hook_test.go | 5 + client/allocwatcher/alloc_watcher_test.go | 12 +- .../allocwatcher/alloc_watcher_unix_test.go | 4 +- .../allocwatcher/group_alloc_watcher_test.go | 6 +- client/client_stats_endpoint_test.go | 6 +- client/client_test.go | 70 ++-- client/config/config_test.go | 29 ++ client/consul/identities_test.go | 5 + client/csi_endpoint_test.go | 21 +- client/devicemanager/manager_test.go | 11 +- client/driver_manager_test.go | 7 +- client/dynamicplugins/registry_test.go | 22 +- client/fingerprint/arch_test.go | 3 + client/fingerprint/bridge_linux_test.go | 5 + client/fingerprint/cgroup_test.go | 3 + client/fingerprint/cni_test.go | 3 + client/fingerprint/consul_test.go | 23 +- client/fingerprint/cpu_test.go | 5 + client/fingerprint/env_aws_test.go | 19 ++ client/fingerprint/env_azure_test.go | 7 + client/fingerprint/env_digitalocean_test.go | 5 + client/fingerprint/env_gce_test.go | 7 + client/fingerprint/host_test.go | 3 + client/fingerprint/memory_test.go | 5 + client/fingerprint/network_test.go | 17 + client/fingerprint/nomad_test.go | 3 + client/fingerprint/signal_test.go | 3 + client/fingerprint/storage_test.go | 3 + client/fingerprint/vault_test.go | 3 + client/fingerprint_manager_test.go | 17 +- client/fs_endpoint_test.go | 47 +-- client/gc_test.go | 33 +- client/heartbeatstop_test.go | 3 +- client/logmon/logmon_test.go | 9 +- .../pluginmanager/csimanager/volume_test.go | 13 +- .../drivermanager/manager_test.go | 11 +- client/pluginmanager/group_test.go | 9 +- client/rpc_test.go | 5 +- client/servers/manager_internal_test.go | 7 + client/servers/manager_test.go | 13 + client/state/db_test.go | 15 +- client/state/upgrade_int_test.go | 3 +- client/state/upgrade_test.go | 11 +- client/stats/cpu_test.go | 5 + client/structs/broadcaster_test.go | 11 +- client/taskenv/env_test.go | 43 ++- client/taskenv/network_test.go | 3 + client/taskenv/services_test.go | 11 +- client/taskenv/util_test.go | 17 +- client/util_test.go | 78 ----- client/vaultclient/vaultclient_test.go | 20 +- command/acl_bootstrap_test.go | 7 +- command/acl_policy_apply_test.go | 3 +- command/acl_policy_delete_test.go | 3 +- command/acl_policy_info_test.go | 3 +- command/acl_policy_list_test.go | 3 +- command/acl_token_create_test.go | 3 +- command/acl_token_delete_test.go | 3 +- command/acl_token_info_test.go | 3 +- command/acl_token_list_test.go | 3 +- command/acl_token_self_test.go | 3 +- command/acl_token_update_test.go | 4 +- command/agent/acl_endpoint_test.go | 23 +- command/agent/agent_endpoint_test.go | 55 +-- command/agent/agent_test.go | 57 ++-- command/agent/alloc_endpoint_test.go | 35 +- command/agent/command_test.go | 12 +- command/agent/config_parse_test.go | 15 +- command/agent/config_test.go | 45 ++- command/agent/consul/check_watcher_test.go | 17 +- command/agent/consul/connect_proxies_test.go | 3 + command/agent/consul/connect_test.go | 23 +- command/agent/consul/group_test.go | 3 + command/agent/consul/int_test.go | 3 + .../agent/consul/namespaces_client_test.go | 7 +- command/agent/consul/self_test.go | 5 +- command/agent/consul/service_client_test.go | 21 +- command/agent/consul/unit_test.go | 61 +++- command/agent/consul/version_checker_test.go | 5 +- command/agent/csi_endpoint_test.go | 13 +- command/agent/deployment_endpoint_test.go | 17 +- command/agent/eval_endpoint_test.go | 12 +- command/agent/event_endpoint_test.go | 8 +- command/agent/fs_endpoint_test.go | 39 +-- command/agent/helpers_test.go | 5 +- command/agent/host/host_test.go | 3 + command/agent/http_stdlog_test.go | 5 + command/agent/http_test.go | 67 ++-- command/agent/job_endpoint_test.go | 108 +++--- command/agent/keyring_test.go | 5 +- command/agent/log_file_test.go | 13 +- command/agent/log_levels_test.go | 4 +- command/agent/metrics_endpoint_test.go | 11 +- command/agent/monitor/monitor_test.go | 5 +- command/agent/namespace_endpoint_test.go | 11 +- command/agent/node_endpoint_test.go | 17 +- command/agent/operator_endpoint_test.go | 24 +- command/agent/pprof/pprof_test.go | 9 + command/agent/region_endpoint_test.go | 4 +- command/agent/retry_join_test.go | 13 +- command/agent/scaling_endpoint_test.go | 10 +- command/agent/search_endpoint_test.go | 41 +-- command/agent/stats_endpoint_test.go | 5 +- command/agent/status_endpoint_test.go | 6 +- command/agent/syslog_test.go | 3 +- command/agent/system_endpoint_test.go | 6 +- command/agent_info_test.go | 11 +- command/agent_monitor_test.go | 5 +- command/alloc_exec_test.go | 7 +- command/alloc_fs_test.go | 7 +- command/alloc_logs_test.go | 7 +- command/alloc_restart_test.go | 7 + command/alloc_signal_test.go | 9 +- command/alloc_status_test.go | 19 +- command/alloc_stop_test.go | 5 +- command/check_test.go | 4 +- command/config_validate_test.go | 9 +- command/data_format_test.go | 6 +- command/deployment_fail_test.go | 7 +- command/deployment_list_test.go | 5 +- command/deployment_pause_test.go | 7 +- command/deployment_promote_test.go | 7 +- command/deployment_resume_test.go | 7 +- command/deployment_status_test.go | 7 +- command/deployment_unblock_test.go | 7 +- command/eval_list_test.go | 2 + command/eval_status_test.go | 7 +- command/event_test.go | 3 +- command/helper_devices_test.go | 12 + command/helpers_test.go | 25 +- command/integration_test.go | 6 +- command/job_allocs_test.go | 14 +- command/job_deployments_test.go | 15 +- command/job_dispatch_test.go | 10 +- command/job_eval_test.go | 12 +- command/job_history_test.go | 10 +- command/job_init_test.go | 9 +- command/job_inspect_test.go | 7 +- command/job_periodic_force_test.go | 13 +- command/job_plan_test.go | 11 +- command/job_promote_test.go | 7 +- command/job_revert_test.go | 7 +- command/job_run_test.go | 11 +- command/job_scale_test.go | 5 +- command/job_scaling_events_test.go | 3 +- command/job_status_test.go | 13 +- command/job_stop_test.go | 7 +- command/job_validate_test.go | 11 +- command/license_get_test.go | 5 +- command/meta_test.go | 5 +- command/metrics_test.go | 3 +- command/monitor_test.go | 11 +- command/namespace_apply_test.go | 7 +- command/namespace_delete_test.go | 9 +- command/namespace_inspect_test.go | 11 +- command/namespace_list_test.go | 5 +- command/namespace_status_test.go | 13 +- command/node_config_test.go | 7 +- command/node_drain_test.go | 13 +- command/node_eligibility_test.go | 7 +- command/node_status_test.go | 13 +- command/operator_api_test.go | 5 + command/operator_autopilot_get_test.go | 5 +- command/operator_autopilot_set_test.go | 5 +- command/operator_autopilot_test.go | 3 +- command/operator_debug_test.go | 39 ++- command/operator_keygen_test.go | 4 +- command/operator_raft_list_test.go | 5 +- command/operator_raft_remove_test.go | 9 +- command/operator_raft_test.go | 3 +- command/operator_snapshot_inspect_test.go | 9 +- command/operator_snapshot_restore_test.go | 5 +- command/operator_snapshot_save_test.go | 5 +- command/operator_test.go | 3 +- command/plugin_status_test.go | 7 +- command/quota_apply_test.go | 5 +- command/quota_delete_test.go | 9 +- command/quota_init_test.go | 7 +- command/quota_inspect_test.go | 9 +- command/quota_list_test.go | 7 +- command/quota_status_test.go | 9 +- command/recommendation_apply_test.go | 10 +- command/recommendation_dismiss_test.go | 5 +- command/recommendation_info_test.go | 5 +- command/recommendation_list_test.go | 8 +- command/scaling_policy_info_test.go | 3 +- command/scaling_policy_list_test.go | 8 +- command/scaling_policy_test.go | 3 + command/sentinel_apply_test.go | 3 +- command/sentinel_delete_test.go | 3 +- command/sentinel_list_test.go | 3 +- command/sentinel_read_test.go | 3 +- command/server_force_leave_test.go | 3 +- command/server_join_test.go | 3 +- command/server_members_test.go | 9 +- command/status_test.go | 19 +- command/system_gc_test.go | 5 +- command/system_reconcile_summaries_test.go | 5 +- command/system_reconcile_test.go | 3 +- command/system_test.go | 3 +- command/ui_test.go | 3 +- command/version_test.go | 3 +- command/volume_register_test.go | 5 +- command/volume_status_test.go | 7 +- contributing/testing.md | 24 ++ drivers/docker/config_test.go | 19 ++ drivers/docker/coordinator_test.go | 10 +- drivers/docker/docklog/docker_logger_test.go | 11 +- drivers/docker/driver_linux_test.go | 12 +- drivers/docker/driver_test.go | 212 ++++-------- drivers/docker/driver_unix_test.go | 44 ++- drivers/docker/fingerprint_test.go | 6 +- drivers/docker/network_test.go | 4 +- drivers/docker/ports_test.go | 6 +- drivers/docker/progress_test.go | 2 + drivers/docker/reconciler_test.go | 7 + drivers/docker/stats_test.go | 7 +- drivers/docker/utils_test.go | 3 + drivers/docker/utils_unix_test.go | 5 + drivers/exec/driver_test.go | 31 +- drivers/exec/driver_unix_test.go | 17 +- drivers/java/driver_test.go | 31 +- drivers/java/utils_test.go | 6 + drivers/mock/utils_test.go | 3 + drivers/qemu/driver_test.go | 32 +- drivers/rawexec/driver_test.go | 19 +- drivers/rawexec/driver_unix_test.go | 24 +- drivers/shared/capabilities/defaults_test.go | 9 + drivers/shared/capabilities/set_test.go | 19 +- drivers/shared/eventer/eventer_test.go | 5 +- .../shared/executor/executor_linux_test.go | 24 +- drivers/shared/executor/executor_test.go | 62 ++-- drivers/shared/executor/pid_collector_test.go | 5 +- helper/boltdd/boltdd_test.go | 11 +- helper/envoy/envoy_test.go | 3 +- helper/flags/autopilot_flags_test.go | 5 +- helper/flags/flag_test.go | 7 +- helper/freeport/freeport_test.go | 2 +- helper/pluginutils/hclspecutils/dec_test.go | 21 +- helper/pluginutils/hclutils/testing.go | 7 +- helper/pluginutils/loader/loader_test.go | 45 +-- .../pluginutils/singleton/singleton_test.go | 11 +- helper/raftutil/msgpack_test.go | 5 + helper/raftutil/state_test.go | 3 +- helper/tlsutil/config_test.go | 70 ++++ helper/tlsutil/generate_test.go | 11 +- internal/testing/apitests/jobs_test.go | 5 +- internal/testing/apitests/nodes_test.go | 5 +- .../apitests/operator_autopilot_test.go | 10 +- internal/testing/apitests/operator_test.go | 5 +- .../testing/apitests/streamingsync_test.go | 5 + internal/testing/apitests/structsync_test.go | 9 + internal/testing/apitests/tasks_test.go | 3 + jobspec/parse_test.go | 9 + jobspec/utils_test.go | 3 +- jobspec2/parse_test.go | 38 +++ lib/circbufwriter/writer_test.go | 7 + lib/cpuset/cpuset_test.go | 21 ++ lib/delayheap/delay_heap_test.go | 5 + lib/kheap/score_heap_test.go | 3 + nomad/acl_endpoint_test.go | 55 +-- nomad/acl_test.go | 7 +- nomad/alloc_endpoint_test.go | 36 +- nomad/autopilot_test.go | 14 +- nomad/blocked_evals_stats_test.go | 3 +- nomad/blocked_evals_test.go | 47 +-- nomad/client_agent_endpoint_test.go | 21 +- nomad/client_alloc_endpoint_test.go | 35 +- nomad/client_csi_endpoint_test.go | 39 +-- nomad/client_fs_endpoint_test.go | 41 +-- nomad/client_rpc_test.go | 19 +- nomad/client_stats_endpoint_test.go | 11 +- nomad/consul_oss_test.go | 4 +- nomad/consul_policy_oss_test.go | 3 +- nomad/consul_policy_test.go | 11 +- nomad/consul_test.go | 13 +- nomad/core_sched_test.go | 57 ++-- nomad/csi_endpoint_test.go | 39 ++- nomad/deployment_endpoint_test.go | 47 +-- .../deployments_watcher_test.go | 49 +-- nomad/drainer/drain_heap_test.go | 13 +- nomad/drainer/drainer_util_test.go | 5 +- nomad/drainer/draining_node_test.go | 5 +- nomad/drainer/watch_jobs_test.go | 14 +- nomad/drainer/watch_nodes_test.go | 11 +- nomad/drainer_int_test.go | 17 +- nomad/eval_broker_test.go | 45 +-- nomad/eval_endpoint_test.go | 51 +-- nomad/event_endpoint_test.go | 11 +- nomad/fsm_test.go | 152 ++++----- nomad/heartbeat_test.go | 19 +- nomad/job_endpoint_hook_connect_test.go | 38 ++- nomad/job_endpoint_hook_expose_check_test.go | 23 +- nomad/job_endpoint_oss_test.go | 3 +- nomad/job_endpoint_test.go | 220 ++++++------ nomad/job_endpoint_validators_test.go | 7 +- nomad/leader_test.go | 59 +++- nomad/namespace_endpoint_test.go | 33 +- nomad/node_endpoint_test.go | 112 ++++--- nomad/operator_endpoint_test.go | 29 +- nomad/periodic_endpoint_test.go | 7 +- nomad/periodic_test.go | 47 +-- nomad/plan_apply_pool_test.go | 5 +- nomad/plan_apply_test.go | 33 +- nomad/plan_endpoint_test.go | 5 +- nomad/plan_normalization_test.go | 3 + nomad/plan_queue_test.go | 11 +- nomad/regions_endpoint_test.go | 3 +- nomad/rpc_test.go | 35 +- nomad/scaling_endpoint_test.go | 17 +- nomad/search_endpoint_test.go | 79 ++--- nomad/serf_test.go | 15 +- nomad/server_test.go | 31 +- nomad/state/autopilot_test.go | 5 + nomad/state/deployment_events_test.go | 3 +- nomad/state/events_test.go | 33 +- nomad/state/paginator/filter_test.go | 5 +- nomad/state/paginator/paginator_test.go | 6 +- nomad/state/paginator/tokenizer_test.go | 3 + nomad/state/schema_test.go | 7 + nomad/state/state_store_restore_test.go | 37 ++- nomad/state/state_store_test.go | 312 +++++++++--------- nomad/stats_fetcher_test.go | 3 +- nomad/status_endpoint_test.go | 13 +- nomad/stream/event_broker_test.go | 11 + nomad/stream/event_buffer_test.go | 14 +- nomad/stream/ndjson_test.go | 7 +- nomad/stream/subscription_test.go | 22 +- nomad/structs/batch_future_test.go | 4 +- nomad/structs/bitmap_test.go | 4 + nomad/structs/config/audit_test.go | 3 + nomad/structs/config/autopilot_test.go | 4 + nomad/structs/config/consul_test.go | 19 +- nomad/structs/config/limits_test.go | 7 +- nomad/structs/config/plugins_test.go | 5 +- nomad/structs/config/tls_test.go | 13 + nomad/structs/config/ui_test.go | 4 +- nomad/structs/config/vault_test.go | 5 + nomad/structs/connect_test.go | 3 + nomad/structs/consul_oss_test.go | 3 +- nomad/structs/consul_test.go | 7 + nomad/structs/csi_test.go | 31 ++ nomad/structs/devices_test.go | 15 + nomad/structs/diff_test.go | 9 + nomad/structs/errors_test.go | 3 + nomad/structs/funcs_test.go | 33 ++ nomad/structs/network_test.go | 25 ++ nomad/structs/node_class_test.go | 13 + nomad/structs/node_test.go | 3 + nomad/structs/services_test.go | 73 ++-- nomad/structs/structs_periodic_test.go | 5 + nomad/structs/structs_test.go | 209 +++++++++++- nomad/system_endpoint_test.go | 9 +- nomad/timetable_test.go | 7 +- nomad/util_test.go | 11 +- nomad/vault_test.go | 71 ++-- nomad/volumewatcher/volume_watcher_test.go | 4 +- nomad/volumewatcher/volumes_watcher_test.go | 9 +- nomad/worker_test.go | 37 ++- plugins/base/plugin_test.go | 7 +- plugins/csi/client_test.go | 30 ++ plugins/device/plugin_test.go | 21 +- plugins/drivers/testutils/testing_test.go | 15 +- scheduler/annotate_test.go | 11 + scheduler/context_test.go | 19 +- scheduler/device_test.go | 11 + scheduler/feasible_test.go | 71 +++- scheduler/generic_sched_test.go | 137 +++++++- scheduler/preemption_test.go | 10 +- scheduler/reconcile_test.go | 152 ++++++++- scheduler/reconcile_util_test.go | 5 + scheduler/scheduler_sysbatch_test.go | 45 +++ scheduler/scheduler_system_test.go | 55 +++ scheduler/select_test.go | 7 + scheduler/spread_test.go | 19 +- scheduler/stack_test.go | 33 ++ scheduler/util_test.go | 59 +++- testutil/slow.go | 15 - 419 files changed, 4816 insertions(+), 2697 deletions(-) create mode 100644 ci/slow.go delete mode 100644 client/util_test.go create mode 100644 contributing/testing.md delete mode 100644 testutil/slow.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 047772df1..c97299c45 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,6 @@ references: # common references common_envs: &common_envs - GOMAXPROCS: 1 NOMAD_SLOW_TEST: 1 GOTESTSUM_JUNITFILE: /tmp/test-reports/results.xml GOTESTSUM_JSONFILE: /tmp/test-reports/testjsonfile.json @@ -520,7 +519,7 @@ executors: working_directory: ~/go/src/github.com/hashicorp/nomad machine: image: *go_machine_image - resource_class: medium + resource_class: large environment: &machine_env <<: *common_envs GOLANG_VERSION: 1.17.5 diff --git a/acl/acl_test.go b/acl/acl_test.go index b819bc8af..2ac22f702 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -3,10 +3,13 @@ package acl import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestCapabilitySet(t *testing.T) { + ci.Parallel(t) + var cs capabilitySet = make(map[string]struct{}) // Check no capabilities by default @@ -28,6 +31,8 @@ func TestCapabilitySet(t *testing.T) { } func TestMaxPrivilege(t *testing.T) { + ci.Parallel(t) + type tcase struct { Privilege string PrecedenceOver []string @@ -60,6 +65,8 @@ func TestMaxPrivilege(t *testing.T) { } func TestACLManagement(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) // Create management ACL @@ -88,6 +95,8 @@ func TestACLManagement(t *testing.T) { } func TestACLMerge(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) // Merge read + write policy @@ -222,6 +231,8 @@ quota { ` func TestAllowNamespace(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string Allow bool @@ -264,6 +275,8 @@ func TestAllowNamespace(t *testing.T) { } func TestWildcardNamespaceMatching(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string Allow bool @@ -315,6 +328,8 @@ func TestWildcardNamespaceMatching(t *testing.T) { } func TestWildcardHostVolumeMatching(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string Allow bool @@ -365,6 +380,8 @@ func TestWildcardHostVolumeMatching(t *testing.T) { } } func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string NS string @@ -411,6 +428,8 @@ func TestACL_matchingCapabilitySet_returnsAllMatches(t *testing.T) { } func TestACL_matchingCapabilitySet_difference(t *testing.T) { + ci.Parallel(t) + tests := []struct { Policy string NS string diff --git a/acl/policy_test.go b/acl/policy_test.go index 9060147d0..e3a8afad6 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -5,10 +5,13 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestParse(t *testing.T) { + ci.Parallel(t) + type tcase struct { Raw string ErrStr string @@ -333,6 +336,8 @@ func TestParse(t *testing.T) { } func TestParse_BadInput(t *testing.T) { + ci.Parallel(t) + inputs := []string{ `namespace "\500" {}`, } diff --git a/ci/slow.go b/ci/slow.go new file mode 100644 index 000000000..5becb90b7 --- /dev/null +++ b/ci/slow.go @@ -0,0 +1,25 @@ +package ci + +import ( + "os" + "strconv" + "testing" +) + +// SkipSlow skips a slow test unless NOMAD_SLOW_TEST is set to a true value. +func SkipSlow(t *testing.T, reason string) { + value := os.Getenv("NOMAD_SLOW_TEST") + run, err := strconv.ParseBool(value) + if !run || err != nil { + t.Skipf("Skipping slow test: %s", reason) + } +} + +// Parallel runs t in parallel, unless CI is set to a true value. +func Parallel(t *testing.T) { + value := os.Getenv("CI") + isCI, err := strconv.ParseBool(value) + if !isCI || err != nil { + t.Parallel() + } +} diff --git a/client/acl_test.go b/client/acl_test.go index f076f0aa9..b1cc0a315 100644 --- a/client/acl_test.go +++ b/client/acl_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -13,6 +14,8 @@ import ( ) func TestClient_ACL_resolveTokenValue(t *testing.T) { + ci.Parallel(t) + s1, _, _, cleanupS1 := testACLServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -62,6 +65,8 @@ func TestClient_ACL_resolveTokenValue(t *testing.T) { } func TestClient_ACL_resolvePolicies(t *testing.T) { + ci.Parallel(t) + s1, _, root, cleanupS1 := testACLServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -102,6 +107,8 @@ func TestClient_ACL_resolvePolicies(t *testing.T) { } func TestClient_ACL_ResolveToken_Disabled(t *testing.T) { + ci.Parallel(t) + s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -118,6 +125,8 @@ func TestClient_ACL_ResolveToken_Disabled(t *testing.T) { } func TestClient_ACL_ResolveToken(t *testing.T) { + ci.Parallel(t) + s1, _, _, cleanupS1 := testACLServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -167,7 +176,7 @@ func TestClient_ACL_ResolveToken(t *testing.T) { } func TestClient_ACL_ResolveSecretToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, _, cleanupS1 := testACLServer(t, nil) defer cleanupS1() diff --git a/client/agent_endpoint_test.go b/client/agent_endpoint_test.go index 0c10b4ebb..501f80dd7 100644 --- a/client/agent_endpoint_test.go +++ b/client/agent_endpoint_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" cstructs "github.com/hashicorp/nomad/client/structs" @@ -24,7 +25,8 @@ import ( ) func TestMonitor_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server and client @@ -105,7 +107,8 @@ OUTER: } func TestMonitor_Monitor_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server @@ -217,7 +220,8 @@ func TestMonitor_Monitor_ACL(t *testing.T) { // Test that by default with no acl, endpoint is disabled func TestAgentProfile_DefaultDisabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server and client @@ -243,7 +247,8 @@ func TestAgentProfile_DefaultDisabled(t *testing.T) { } func TestAgentProfile(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server and client @@ -290,7 +295,8 @@ func TestAgentProfile(t *testing.T) { } func TestAgentProfile_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) // start server @@ -355,7 +361,7 @@ func TestAgentProfile_ACL(t *testing.T) { } func TestAgentHost(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server and client s1, cleanup := nomad.TestServer(t, nil) @@ -380,7 +386,7 @@ func TestAgentHost(t *testing.T) { } func TestAgentHost_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanupS := nomad.TestACLServer(t, nil) defer cleanupS() diff --git a/client/alloc_endpoint_test.go b/client/alloc_endpoint_test.go index 97ceff391..2e51a3702 100644 --- a/client/alloc_endpoint_test.go +++ b/client/alloc_endpoint_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pluginutils/catalog" @@ -27,7 +28,8 @@ import ( ) func TestAllocations_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) client, cleanup := TestClient(t, nil) defer cleanup() @@ -66,7 +68,7 @@ func TestAllocations_Restart(t *testing.T) { } func TestAllocations_Restart_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -142,8 +144,9 @@ func TestAllocations_Restart_ACL(t *testing.T) { } func TestAllocations_GarbageCollectAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -153,7 +156,7 @@ func TestAllocations_GarbageCollectAll(t *testing.T) { } func TestAllocations_GarbageCollectAll_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -206,8 +209,9 @@ func TestAllocations_GarbageCollectAll_ACL(t *testing.T) { } func TestAllocations_GarbageCollect(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, func(c *config.Config) { c.GCDiskUsageThreshold = 100.0 }) @@ -249,7 +253,7 @@ func TestAllocations_GarbageCollect(t *testing.T) { } func TestAllocations_GarbageCollect_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -322,7 +326,7 @@ func TestAllocations_GarbageCollect_ACL(t *testing.T) { } func TestAllocations_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) client, cleanup := TestClient(t, nil) defer cleanup() @@ -348,7 +352,7 @@ func TestAllocations_Signal(t *testing.T) { } func TestAllocations_Signal_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -420,8 +424,9 @@ func TestAllocations_Signal_ACL(t *testing.T) { } func TestAllocations_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -453,7 +458,7 @@ func TestAllocations_Stats(t *testing.T) { } func TestAllocations_Stats_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) @@ -525,7 +530,7 @@ func TestAllocations_Stats_ACL(t *testing.T) { } func TestAlloc_ExecStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -629,7 +634,7 @@ OUTER: } func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -684,7 +689,7 @@ func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) { } func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -740,7 +745,7 @@ func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) { } func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server and client s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -843,7 +848,7 @@ func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) { // TestAlloc_ExecStreaming_ACL_WithIsolation_Image asserts that token only needs // alloc-exec acl policy when image isolation is used func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) { - t.Parallel() + ci.Parallel(t) isolation := drivers.FSIsolationImage // Start a server and client @@ -987,7 +992,7 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) { // TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot asserts that token only needs // alloc-exec acl policy when chroot isolation is used func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" || unix.Geteuid() != 0 { t.Skip("chroot isolation requires linux root") @@ -1136,7 +1141,7 @@ func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) { // TestAlloc_ExecStreaming_ACL_WithIsolation_None asserts that token needs // alloc-node-exec acl policy as well when no isolation is used func TestAlloc_ExecStreaming_ACL_WithIsolation_None(t *testing.T) { - t.Parallel() + ci.Parallel(t) isolation := drivers.FSIsolationNone // Start a server and client diff --git a/client/alloc_watcher_e2e_test.go b/client/alloc_watcher_e2e_test.go index 9cdc6fab0..c36afd7a8 100644 --- a/client/alloc_watcher_e2e_test.go +++ b/client/alloc_watcher_e2e_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/mock" @@ -26,7 +27,7 @@ func TestPrevAlloc_StreamAllocDir_TLS(t *testing.T) { clientCertFn = "../helper/tlsutil/testdata/global-client.pem" clientKeyFn = "../helper/tlsutil/testdata/global-client-key.pem" ) - t.Parallel() + ci.Parallel(t) require := require.New(t) server, cleanupS := nomad.TestServer(t, func(c *nomad.Config) { diff --git a/client/allocdir/alloc_dir_test.go b/client/allocdir/alloc_dir_test.go index 4a876c57b..f0764f26e 100644 --- a/client/allocdir/alloc_dir_test.go +++ b/client/allocdir/alloc_dir_test.go @@ -15,6 +15,7 @@ import ( "syscall" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -48,6 +49,8 @@ var ( // Test that AllocDir.Build builds just the alloc directory. func TestAllocDir_BuildAlloc(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -97,7 +100,9 @@ func MountCompatible(t *testing.T) { } func TestAllocDir_MountSharedAlloc(t *testing.T) { + ci.Parallel(t) MountCompatible(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -143,6 +148,8 @@ func TestAllocDir_MountSharedAlloc(t *testing.T) { } func TestAllocDir_Snapshot(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -223,6 +230,8 @@ func TestAllocDir_Snapshot(t *testing.T) { } func TestAllocDir_Move(t *testing.T) { + ci.Parallel(t) + tmp1, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -291,6 +300,8 @@ func TestAllocDir_Move(t *testing.T) { } func TestAllocDir_EscapeChecking(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -332,6 +343,7 @@ func TestAllocDir_EscapeChecking(t *testing.T) { // Test that `nomad fs` can't read secrets func TestAllocDir_ReadAt_SecretDir(t *testing.T) { + ci.Parallel(t) tmp := t.TempDir() d := NewAllocDir(testlog.HCLogger(t), tmp, "test") @@ -359,6 +371,8 @@ func TestAllocDir_ReadAt_SecretDir(t *testing.T) { } func TestAllocDir_SplitPath(t *testing.T) { + ci.Parallel(t) + dir, err := ioutil.TempDir("", "tmpdirtest") if err != nil { log.Fatal(err) @@ -382,6 +396,7 @@ func TestAllocDir_SplitPath(t *testing.T) { } func TestAllocDir_CreateDir(t *testing.T) { + ci.Parallel(t) if syscall.Geteuid() != 0 { t.Skip("Must be root to run test") } @@ -423,6 +438,8 @@ func TestAllocDir_CreateDir(t *testing.T) { } func TestPathFuncs(t *testing.T) { + ci.Parallel(t) + dir, err := ioutil.TempDir("", "nomadtest-pathfuncs") if err != nil { t.Fatalf("error creating temp dir: %v", err) @@ -458,7 +475,9 @@ func TestPathFuncs(t *testing.T) { } func TestAllocDir_DetectContentType(t *testing.T) { + ci.Parallel(t) require := require.New(t) + inputPath := "input/" var testFiles []string err := filepath.Walk(inputPath, func(path string, info os.FileInfo, err error) error { @@ -494,6 +513,7 @@ func TestAllocDir_DetectContentType(t *testing.T) { // Warning: If this test fails it may fill your disk before failing, so be // careful and/or confident. func TestAllocDir_SkipAllocDir(t *testing.T) { + ci.Parallel(t) MountCompatible(t) // Create root, alloc, and other dirs diff --git a/client/allocdir/fs_linux_test.go b/client/allocdir/fs_linux_test.go index c79dcb948..e8087086e 100644 --- a/client/allocdir/fs_linux_test.go +++ b/client/allocdir/fs_linux_test.go @@ -10,6 +10,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "golang.org/x/sys/unix" ) @@ -49,9 +50,11 @@ func isMount(path string) error { // TestLinuxRootSecretDir asserts secret dir creation and removal are // idempotent. func TestLinuxRootSecretDir(t *testing.T) { + ci.Parallel(t) if unix.Geteuid() != 0 { t.Skip("Must be run as root") } + tmpdir, err := ioutil.TempDir("", "nomadtest-rootsecretdir") if err != nil { t.Fatalf("unable to create tempdir for test: %v", err) @@ -109,9 +112,11 @@ func TestLinuxRootSecretDir(t *testing.T) { // TestLinuxUnprivilegedSecretDir asserts secret dir creation and removal are // idempotent. func TestLinuxUnprivilegedSecretDir(t *testing.T) { + ci.Parallel(t) if unix.Geteuid() == 0 { t.Skip("Must not be run as root") } + tmpdir, err := ioutil.TempDir("", "nomadtest-secretdir") if err != nil { t.Fatalf("unable to create tempdir for test: %s", err) diff --git a/client/allocdir/task_dir_test.go b/client/allocdir/task_dir_test.go index 61aa3b302..5ae12404b 100644 --- a/client/allocdir/task_dir_test.go +++ b/client/allocdir/task_dir_test.go @@ -6,11 +6,14 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" ) // Test that building a chroot will skip nonexistent directories. func TestTaskDir_EmbedNonexistent(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -33,6 +36,8 @@ func TestTaskDir_EmbedNonexistent(t *testing.T) { // Test that building a chroot copies files from the host into the task dir. func TestTaskDir_EmbedDirs(t *testing.T) { + ci.Parallel(t) + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -87,6 +92,7 @@ func TestTaskDir_EmbedDirs(t *testing.T) { // Test that task dirs for image based isolation don't require root. func TestTaskDir_NonRoot_Image(t *testing.T) { + ci.Parallel(t) if os.Geteuid() == 0 { t.Skip("test should be run as non-root user") } @@ -110,9 +116,11 @@ func TestTaskDir_NonRoot_Image(t *testing.T) { // Test that task dirs with no isolation don't require root. func TestTaskDir_NonRoot(t *testing.T) { + ci.Parallel(t) if os.Geteuid() == 0 { t.Skip("test should be run as non-root user") } + tmp, err := ioutil.TempDir("", "AllocDir") if err != nil { t.Fatalf("Couldn't create temp dir: %v", err) @@ -134,5 +142,4 @@ func TestTaskDir_NonRoot(t *testing.T) { if _, err = os.Stat(td.SharedTaskDir); !os.IsNotExist(err) { t.Fatalf("Expected a NotExist error for shared alloc dir in task dir: %q", td.SharedTaskDir) } - } diff --git a/client/allochealth/tracker_test.go b/client/allochealth/tracker_test.go index f4aec166d..6e9e6dd8c 100644 --- a/client/allochealth/tracker_test.go +++ b/client/allochealth/tracker_test.go @@ -8,6 +8,7 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/consul" cstructs "github.com/hashicorp/nomad/client/structs" agentconsul "github.com/hashicorp/nomad/command/agent/consul" @@ -19,7 +20,7 @@ import ( ) func TestTracker_Checks_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -90,7 +91,7 @@ func TestTracker_Checks_Healthy(t *testing.T) { } func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.LifecycleAllocWithPoststopDeploy() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -130,7 +131,7 @@ func TestTracker_Checks_PendingPostStop_Healthy(t *testing.T) { } func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.LifecycleAllocWithPoststartDeploy() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = time.Millisecond * 1 @@ -171,7 +172,7 @@ func TestTracker_Succeeded_PostStart_Healthy(t *testing.T) { } func TestTracker_Checks_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -261,7 +262,7 @@ func TestTracker_Checks_Unhealthy(t *testing.T) { } func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() logger := testlog.HCLogger(t) @@ -312,7 +313,7 @@ func TestTracker_Healthy_IfBothTasksAndConsulChecksAreHealthy(t *testing.T) { // TestTracker_Checks_Healthy_Before_TaskHealth asserts that we mark an alloc // healthy, if the checks pass before task health pass func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Migrate.MinHealthyTime = 1 // let's speed things up @@ -419,7 +420,7 @@ func TestTracker_Checks_Healthy_Before_TaskHealth(t *testing.T) { } func TestTracker_Checks_OnUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string diff --git a/client/allocrunner/alloc_runner_test.go b/client/allocrunner/alloc_runner_test.go index 749f72078..0b61a7c43 100644 --- a/client/allocrunner/alloc_runner_test.go +++ b/client/allocrunner/alloc_runner_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allochealth" "github.com/hashicorp/nomad/client/allocwatcher" cconsul "github.com/hashicorp/nomad/client/consul" @@ -30,7 +31,7 @@ func destroy(ar *allocRunner) { // TestAllocRunner_AllocState_Initialized asserts that getting TaskStates via // AllocState() are initialized even before the AllocRunner has run. func TestAllocRunner_AllocState_Initialized(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver" @@ -49,7 +50,7 @@ func TestAllocRunner_AllocState_Initialized(t *testing.T) { // TestAllocRunner_TaskLeader_KillTG asserts that when a leader task dies the // entire task group is killed. func TestAllocRunner_TaskLeader_KillTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -239,7 +240,7 @@ func TestAllocRunner_Lifecycle_Poststart(t *testing.T) { // TestAllocRunner_TaskMain_KillTG asserts that when main tasks die the // entire task group is killed. func TestAllocRunner_TaskMain_KillTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -398,6 +399,8 @@ func TestAllocRunner_TaskMain_KillTG(t *testing.T) { // postop lifecycle hook starts all 3 tasks, only // the ephemeral one finishes, and the other 2 exit when the alloc is stopped. func TestAllocRunner_Lifecycle_Poststop(t *testing.T) { + ci.Parallel(t) + alloc := mock.LifecycleAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -478,7 +481,7 @@ func TestAllocRunner_Lifecycle_Poststop(t *testing.T) { } func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -608,7 +611,7 @@ func TestAllocRunner_TaskGroup_ShutdownDelay(t *testing.T) { // TestAllocRunner_TaskLeader_StopTG asserts that when stopping an alloc with a // leader the leader is stopped before other tasks. func TestAllocRunner_TaskLeader_StopTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -707,7 +710,7 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) { // not stopped as it does not exist. // See https://github.com/hashicorp/nomad/issues/3420#issuecomment-341666932 func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] @@ -785,7 +788,7 @@ func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) { } func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.LifecycleAlloc() @@ -823,7 +826,7 @@ func TestAllocRunner_Restore_LifecycleHooks(t *testing.T) { } func TestAllocRunner_Update_Semantics(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) updatedAlloc := func(a *structs.Allocation) *structs.Allocation { @@ -876,7 +879,7 @@ func TestAllocRunner_Update_Semantics(t *testing.T) { // TestAllocRunner_DeploymentHealth_Healthy_Migration asserts that health is // reported for services that got migrated; not just part of deployments. func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() @@ -924,7 +927,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_Migration(t *testing.T) { // TestAllocRunner_DeploymentHealth_Healthy_NoChecks asserts that the health // watcher will mark the allocation as healthy based on task states alone. func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() @@ -987,7 +990,7 @@ func TestAllocRunner_DeploymentHealth_Healthy_NoChecks(t *testing.T) { // TestAllocRunner_DeploymentHealth_Unhealthy_Checks asserts that the health // watcher will mark the allocation as unhealthy with failing checks. func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1082,7 +1085,7 @@ func TestAllocRunner_DeploymentHealth_Unhealthy_Checks(t *testing.T) { // TestAllocRunner_Destroy asserts that Destroy kills and cleans up a running // alloc. func TestAllocRunner_Destroy(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Ensure task takes some time alloc := mock.BatchAlloc() @@ -1144,7 +1147,7 @@ func TestAllocRunner_Destroy(t *testing.T) { } func TestAllocRunner_SimpleRun(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() @@ -1179,7 +1182,7 @@ func TestAllocRunner_SimpleRun(t *testing.T) { // TestAllocRunner_MoveAllocDir asserts that a rescheduled // allocation copies ephemeral disk content from previous alloc run func TestAllocRunner_MoveAllocDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Step 1: start and run a task alloc := mock.BatchAlloc() @@ -1236,7 +1239,7 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) { // retrying fetching an artifact, other tasks in the group should be able // to proceed. func TestAllocRunner_HandlesArtifactFailure(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() rp := &structs.RestartPolicy{ @@ -1296,6 +1299,8 @@ func TestAllocRunner_HandlesArtifactFailure(t *testing.T) { // Test that alloc runner kills tasks in task group when another task fails func TestAllocRunner_TaskFailed_KillTG(t *testing.T) { + ci.Parallel(t) + alloc := mock.Alloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0 @@ -1425,7 +1430,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) { // Test that alloc becoming terminal should destroy the alloc runner func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() tr := alloc.AllocatedResources.Tasks[alloc.Job.TaskGroups[0].Tasks[0].Name] alloc.Job.TaskGroups[0].RestartPolicy.Attempts = 0 @@ -1513,7 +1518,7 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { // TestAllocRunner_PersistState_Destroyed asserts that destroyed allocs don't persist anymore func TestAllocRunner_PersistState_Destroyed(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() taskName := alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks[0].Name diff --git a/client/allocrunner/alloc_runner_unix_test.go b/client/allocrunner/alloc_runner_unix_test.go index 41b5dabc8..c8bda921a 100644 --- a/client/allocrunner/alloc_runner_unix_test.go +++ b/client/allocrunner/alloc_runner_unix_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/nomad/mock" @@ -25,7 +26,7 @@ import ( // DesiredStatus=Stop, persisting the update, but crashing before terminating // the task. func TestAllocRunner_Restore_RunningTerminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) // 1. Run task // 2. Shutdown alloc runner @@ -143,7 +144,7 @@ func TestAllocRunner_Restore_RunningTerminal(t *testing.T) { // TestAllocRunner_Restore_CompletedBatch asserts that restoring a completed // batch alloc doesn't run it again func TestAllocRunner_Restore_CompletedBatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) // 1. Run task and wait for it to complete // 2. Start new alloc runner @@ -228,7 +229,7 @@ func TestAllocRunner_Restore_CompletedBatch(t *testing.T) { // prestart hooks failed, then the alloc and subsequent tasks transition // to failed state func TestAllocRunner_PreStartFailuresLeadToFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.Type = structs.JobTypeBatch diff --git a/client/allocrunner/consul_grpc_sock_hook_test.go b/client/allocrunner/consul_grpc_sock_hook_test.go index 2730ac627..d7e961db5 100644 --- a/client/allocrunner/consul_grpc_sock_hook_test.go +++ b/client/allocrunner/consul_grpc_sock_hook_test.go @@ -11,6 +11,7 @@ import ( "sync" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" @@ -24,7 +25,7 @@ import ( // Consul unix socket hook's Prerun method is called and stopped with the // Postrun method is called. func TestConsulGRPCSocketHook_PrerunPostrun_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) // As of Consul 1.6.0 the test server does not support the gRPC // endpoint so we have to fake it. @@ -101,7 +102,7 @@ func TestConsulGRPCSocketHook_PrerunPostrun_Ok(t *testing.T) { // TestConsulGRPCSocketHook_Prerun_Error asserts that invalid Consul addresses cause // Prerun to return an error if the alloc requires a grpc proxy. func TestConsulGRPCSocketHook_Prerun_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -153,7 +154,7 @@ func TestConsulGRPCSocketHook_Prerun_Error(t *testing.T) { // TestConsulGRPCSocketHook_proxy_Unix asserts that the destination can be a unix // socket path. func TestConsulGRPCSocketHook_proxy_Unix(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir, err := ioutil.TempDir("", "nomadtest_proxy_Unix") require.NoError(t, err) diff --git a/client/allocrunner/consul_http_sock_hook_test.go b/client/allocrunner/consul_http_sock_hook_test.go index 3d5a97ec5..9a03a9579 100644 --- a/client/allocrunner/consul_http_sock_hook_test.go +++ b/client/allocrunner/consul_http_sock_hook_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -14,7 +15,7 @@ import ( ) func TestConsulSocketHook_PrerunPostrun_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeConsul, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -89,7 +90,7 @@ func TestConsulSocketHook_PrerunPostrun_Ok(t *testing.T) { } func TestConsulHTTPSocketHook_Prerun_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) diff --git a/client/allocrunner/csi_hook_test.go b/client/allocrunner/csi_hook_test.go index 6ed9270d5..ea5c35d3a 100644 --- a/client/allocrunner/csi_hook_test.go +++ b/client/allocrunner/csi_hook_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/pkg/errors" "github.com/stretchr/testify/require" @@ -29,6 +30,7 @@ var _ interfaces.RunnerPostrunHook = (*csiHook)(nil) // var _ interfaces.RunnerUpdateHook = (*csiHook)(nil) func TestCSIHook(t *testing.T) { + ci.Parallel(t) alloc := mock.Alloc() logger := testlog.HCLogger(t) diff --git a/client/allocrunner/groupservice_hook_test.go b/client/allocrunner/groupservice_hook_test.go index 61d9a38b4..dbf3c5483 100644 --- a/client/allocrunner/groupservice_hook_test.go +++ b/client/allocrunner/groupservice_hook_test.go @@ -7,6 +7,7 @@ import ( consulapi "github.com/hashicorp/consul/api" ctestutil "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/taskenv" @@ -27,7 +28,7 @@ var _ interfaces.RunnerTaskRestartHook = (*groupServiceHook)(nil) // TestGroupServiceHook_NoGroupServices asserts calling group service hooks // without group services does not error. func TestGroupServiceHook_NoGroupServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Services = []*structs.Service{{ @@ -65,7 +66,7 @@ func TestGroupServiceHook_NoGroupServices(t *testing.T) { // TestGroupServiceHook_ShutdownDelayUpdate asserts calling group service hooks // update updates the hooks delay value. func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].ShutdownDelay = helper.TimeToPtr(10 * time.Second) @@ -102,7 +103,7 @@ func TestGroupServiceHook_ShutdownDelayUpdate(t *testing.T) { // TestGroupServiceHook_GroupServices asserts group service hooks with group // services does not error. func TestGroupServiceHook_GroupServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.ConnectAlloc() logger := testlog.HCLogger(t) @@ -136,7 +137,7 @@ func TestGroupServiceHook_GroupServices(t *testing.T) { // TestGroupServiceHook_Error asserts group service hooks with group // services but no group network is handled gracefully. func TestGroupServiceHook_NoNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{} @@ -180,7 +181,7 @@ func TestGroupServiceHook_NoNetwork(t *testing.T) { } func TestGroupServiceHook_getWorkloadServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{} diff --git a/client/allocrunner/health_hook_test.go b/client/allocrunner/health_hook_test.go index 62c148925..56de9926b 100644 --- a/client/allocrunner/health_hook_test.go +++ b/client/allocrunner/health_hook_test.go @@ -6,6 +6,7 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" cstructs "github.com/hashicorp/nomad/client/structs" @@ -84,7 +85,7 @@ func (m *mockHealthSetter) HasHealth() bool { // TestHealthHook_PrerunPostrun asserts a health hook does not error if it is // run and postrunned. func TestHealthHook_PrerunPostrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -121,7 +122,7 @@ func TestHealthHook_PrerunPostrun(t *testing.T) { // TestHealthHook_PrerunUpdatePostrun asserts Updates may be applied concurrently. func TestHealthHook_PrerunUpdatePostrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -160,7 +161,7 @@ func TestHealthHook_PrerunUpdatePostrun(t *testing.T) { // TestHealthHook_UpdatePrerunPostrun asserts that a hook may have Update // called before Prerun. func TestHealthHook_UpdatePrerunPostrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -203,7 +204,7 @@ func TestHealthHook_UpdatePrerunPostrun(t *testing.T) { // TestHealthHook_Postrun asserts that a hook may have only Postrun called. func TestHealthHook_Postrun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -222,7 +223,7 @@ func TestHealthHook_Postrun(t *testing.T) { // TestHealthHook_SetHealth_healthy asserts SetHealth is called when health status is // set. Uses task state and health checks. func TestHealthHook_SetHealth_healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -302,7 +303,7 @@ func TestHealthHook_SetHealth_healthy(t *testing.T) { // TestHealthHook_SetHealth_unhealthy asserts SetHealth notices unhealthy allocs func TestHealthHook_SetHealth_unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() @@ -386,7 +387,7 @@ func TestHealthHook_SetHealth_unhealthy(t *testing.T) { // TestHealthHook_SystemNoop asserts that system jobs return the noop tracker. func TestHealthHook_SystemNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.SystemAlloc(), nil, nil, nil) @@ -407,7 +408,7 @@ func TestHealthHook_SystemNoop(t *testing.T) { // TestHealthHook_BatchNoop asserts that batch jobs return the noop tracker. func TestHealthHook_BatchNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := newAllocHealthWatcherHook(testlog.HCLogger(t), mock.BatchAlloc(), nil, nil, nil) diff --git a/client/allocrunner/network_hook_test.go b/client/allocrunner/network_hook_test.go index c5dee542c..5041c2864 100644 --- a/client/allocrunner/network_hook_test.go +++ b/client/allocrunner/network_hook_test.go @@ -3,6 +3,7 @@ package allocrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper/testlog" @@ -42,6 +43,8 @@ func (m *mockNetworkStatusSetter) SetNetworkStatus(status *structs.AllocNetworkS // Test that the prerun and postrun hooks call the setter with the expected spec when // the network mode is not host func TestNetworkHook_Prerun_Postrun(t *testing.T) { + ci.Parallel(t) + alloc := mock.Alloc() alloc.Job.TaskGroups[0].Networks = []*structs.NetworkResource{ { diff --git a/client/allocrunner/network_manager_linux_test.go b/client/allocrunner/network_manager_linux_test.go index ac2f97c8f..7d598d74c 100644 --- a/client/allocrunner/network_manager_linux_test.go +++ b/client/allocrunner/network_manager_linux_test.go @@ -3,6 +3,7 @@ package allocrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" @@ -63,6 +64,8 @@ func (m *mockDriverManager) Dispense(driver string) (drivers.DriverPlugin, error } func TestNewNetworkManager(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name string alloc *structs.Allocation diff --git a/client/allocrunner/networking_cni_test.go b/client/allocrunner/networking_cni_test.go index c4d761d4d..bc759272f 100644 --- a/client/allocrunner/networking_cni_test.go +++ b/client/allocrunner/networking_cni_test.go @@ -8,6 +8,7 @@ import ( "testing" cni "github.com/containerd/go-cni" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -16,6 +17,8 @@ import ( // TestCNI_cniToAllocNet_Fallback asserts if a CNI plugin result lacks an IP on // its sandbox interface, the first IP found is used. func TestCNI_cniToAllocNet_Fallback(t *testing.T) { + ci.Parallel(t) + // Calico's CNI plugin v3.12.3 has been observed to return the // following: cniResult := &cni.CNIResult{ @@ -47,6 +50,8 @@ func TestCNI_cniToAllocNet_Fallback(t *testing.T) { // result lacks any IP addresses. This has not been observed, but Nomad still // must guard against invalid results from external plugins. func TestCNI_cniToAllocNet_Invalid(t *testing.T) { + ci.Parallel(t) + cniResult := &cni.CNIResult{ Interfaces: map[string]*cni.Config{ "eth0": {}, diff --git a/client/allocrunner/task_hook_coordinator_test.go b/client/allocrunner/task_hook_coordinator_test.go index e4343d915..7399acdab 100644 --- a/client/allocrunner/task_hook_coordinator_test.go +++ b/client/allocrunner/task_hook_coordinator_test.go @@ -5,16 +5,17 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/taskrunner" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) { + ci.Parallel(t) + alloc := mock.Alloc() tasks := alloc.Job.TaskGroups[0].Tasks task := tasks[0] @@ -28,6 +29,8 @@ func TestTaskHookCoordinator_OnlyMainApp(t *testing.T) { } func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -48,6 +51,8 @@ func TestTaskHookCoordinator_PrestartRunsBeforeMain(t *testing.T) { } func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -92,6 +97,8 @@ func TestTaskHookCoordinator_MainRunsAfterPrestart(t *testing.T) { } func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -137,6 +144,8 @@ func TestTaskHookCoordinator_MainRunsAfterManyInitTasks(t *testing.T) { } func TestTaskHookCoordinator_FailedInitTask(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -182,6 +191,8 @@ func TestTaskHookCoordinator_FailedInitTask(t *testing.T) { } func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -225,6 +236,8 @@ func TestTaskHookCoordinator_SidecarNeverStarts(t *testing.T) { } func TestTaskHookCoordinator_PoststartStartsAfterMain(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc := mock.LifecycleAlloc() @@ -280,6 +293,7 @@ func isChannelClosed(ch <-chan struct{}) bool { } func TestHasSidecarTasks(t *testing.T) { + ci.Parallel(t) falseV, trueV := false, true diff --git a/client/allocrunner/taskrunner/artifact_hook_test.go b/client/allocrunner/taskrunner/artifact_hook_test.go index 121370867..0a3f21e50 100644 --- a/client/allocrunner/taskrunner/artifact_hook_test.go +++ b/client/allocrunner/taskrunner/artifact_hook_test.go @@ -10,6 +10,7 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -33,7 +34,7 @@ func (m *mockEmitter) EmitEvent(ev *structs.TaskEvent) { // TestTaskRunner_ArtifactHook_Recoverable asserts that failures to download // artifacts are a recoverable error. func TestTaskRunner_ArtifactHook_Recoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) me := &mockEmitter{} artifactHook := newArtifactHook(me, testlog.HCLogger(t)) @@ -66,7 +67,7 @@ func TestTaskRunner_ArtifactHook_Recoverable(t *testing.T) { // already downloaded artifacts when subsequent artifacts fail and cause a // restart. func TestTaskRunner_ArtifactHook_PartialDone(t *testing.T) { - t.Parallel() + ci.Parallel(t) me := &mockEmitter{} artifactHook := newArtifactHook(me, testlog.HCLogger(t)) diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go index a9c43d210..5684e77ed 100644 --- a/client/allocrunner/taskrunner/connect_native_hook_test.go +++ b/client/allocrunner/taskrunner/connect_native_hook_test.go @@ -9,6 +9,7 @@ import ( consulapi "github.com/hashicorp/consul/api" consultest "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -35,7 +36,7 @@ func getTestConsul(t *testing.T) *consultest.TestServer { } func TestConnectNativeHook_Name(t *testing.T) { - t.Parallel() + ci.Parallel(t) name := new(connectNativeHook).Name() require.Equal(t, "connect_native", name) } @@ -61,7 +62,7 @@ func cleanupCertDirs(t *testing.T, original, secrets string) { } func TestConnectNativeHook_copyCertificate(t *testing.T) { - t.Parallel() + ci.Parallel(t) f, d := setupCertDirs(t) defer cleanupCertDirs(t, f, d) @@ -81,7 +82,7 @@ func TestConnectNativeHook_copyCertificate(t *testing.T) { } func TestConnectNativeHook_copyCertificates(t *testing.T) { - t.Parallel() + ci.Parallel(t) f, d := setupCertDirs(t) defer cleanupCertDirs(t, f, d) @@ -109,7 +110,7 @@ func TestConnectNativeHook_copyCertificates(t *testing.T) { } func TestConnectNativeHook_tlsEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) // the hook config comes from client config emptyHook := new(connectNativeHook) @@ -163,7 +164,7 @@ func TestConnectNativeHook_tlsEnv(t *testing.T) { } func TestConnectNativeHook_bridgeEnv_bridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("without tls", func(t *testing.T) { hook := new(connectNativeHook) @@ -208,7 +209,7 @@ func TestConnectNativeHook_bridgeEnv_bridge(t *testing.T) { } func TestConnectNativeHook_bridgeEnv_host(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := new(connectNativeHook) hook.alloc = mock.ConnectNativeAlloc("host") @@ -227,7 +228,7 @@ func TestConnectNativeHook_bridgeEnv_host(t *testing.T) { } func TestConnectNativeHook_hostEnv_host(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := new(connectNativeHook) hook.alloc = mock.ConnectNativeAlloc("host") @@ -249,7 +250,7 @@ func TestConnectNativeHook_hostEnv_host(t *testing.T) { } func TestConnectNativeHook_hostEnv_bridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := new(connectNativeHook) hook.alloc = mock.ConnectNativeAlloc("bridge") @@ -269,7 +270,7 @@ func TestConnectNativeHook_hostEnv_bridge(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) alloc := mock.Alloc() @@ -307,7 +308,7 @@ func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -372,7 +373,7 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -445,7 +446,7 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) { } func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) try := func(t *testing.T, shareSSL *bool) { @@ -566,7 +567,7 @@ func checkFilesInDir(t *testing.T, dir string, includes, excludes []string) { } func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) fakeCert, fakeCertDir := setupCertDirs(t) diff --git a/client/allocrunner/taskrunner/device_hook_test.go b/client/allocrunner/taskrunner/device_hook_test.go index 9d9d6d7b3..9723f0de5 100644 --- a/client/allocrunner/taskrunner/device_hook_test.go +++ b/client/allocrunner/taskrunner/device_hook_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/devicemanager" "github.com/hashicorp/nomad/helper/testlog" @@ -15,7 +16,7 @@ import ( ) func TestDeviceHook_CorrectDevice(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dm := devicemanager.NoopMockManager() @@ -97,7 +98,7 @@ func TestDeviceHook_CorrectDevice(t *testing.T) { } func TestDeviceHook_IncorrectDevice(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dm := devicemanager.NoopMockManager() diff --git a/client/allocrunner/taskrunner/dispatch_hook_test.go b/client/allocrunner/taskrunner/dispatch_hook_test.go index 9f56fe0fd..6d7577612 100644 --- a/client/allocrunner/taskrunner/dispatch_hook_test.go +++ b/client/allocrunner/taskrunner/dispatch_hook_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/golang/snappy" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" @@ -21,7 +22,7 @@ var _ interfaces.TaskPrestartHook = (*dispatchHook)(nil) // TestTaskRunner_DispatchHook_NoPayload asserts that the hook is a noop and is // marked as done if there is no dispatch payload. func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx := context.Background() @@ -57,7 +58,7 @@ func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) { // TestTaskRunner_DispatchHook_Ok asserts that dispatch payloads are written to // a file in the task dir. func TestTaskRunner_DispatchHook_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx := context.Background() @@ -101,7 +102,7 @@ func TestTaskRunner_DispatchHook_Ok(t *testing.T) { // TestTaskRunner_DispatchHook_Error asserts that on an error dispatch payloads // are not written and Done=false. func TestTaskRunner_DispatchHook_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx := context.Background() diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go index a1a9857cb..b1337c288 100644 --- a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go @@ -17,6 +17,7 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -53,7 +54,7 @@ func writeTmp(t *testing.T, s string, fm os.FileMode) string { } func TestEnvoyBootstrapHook_maybeLoadSIToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) // This test fails when running as root because the test case for checking // the error condition when the file is unreadable fails (root can read the @@ -94,7 +95,7 @@ func TestEnvoyBootstrapHook_maybeLoadSIToken(t *testing.T) { } func TestEnvoyBootstrapHook_decodeTriState(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "", decodeTriState(nil)) require.Equal(t, "true", decodeTriState(helper.BoolToPtr(true))) @@ -118,7 +119,7 @@ var ( ) func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("excluding SI token", func(t *testing.T) { ebArgs := envoyBootstrapArgs{ @@ -227,7 +228,7 @@ func TestEnvoyBootstrapHook_envoyBootstrapArgs(t *testing.T) { } func TestEnvoyBootstrapHook_envoyBootstrapEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) environment := []string{"foo=bar", "baz=1"} @@ -291,7 +292,7 @@ type envoyConfig struct { // TestEnvoyBootstrapHook_with_SI_token asserts the bootstrap file written for // Envoy contains a Consul SI token. func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -392,7 +393,7 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) { // creates Envoy's bootstrap.json configuration based on Connect proxy sidecars // registered for the task. func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -487,7 +488,7 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) testConsul := getTestConsul(t) @@ -570,7 +571,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) { // TestTaskRunner_EnvoyBootstrapHook_Noop asserts that the Envoy bootstrap hook // is a noop for non-Connect proxy sidecar / gateway tasks. func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) alloc := mock.Alloc() @@ -607,7 +608,7 @@ func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) { // bootstrap hook returns a Recoverable error if the bootstrap command runs but // fails. func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.RequireConsul(t) testConsul := getTestConsul(t) @@ -685,7 +686,7 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_retryTimeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) testConsul := getTestConsul(t) @@ -812,6 +813,8 @@ func TestTaskRunner_EnvoyBootstrapHook_extractNameAndKind(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) { + ci.Parallel(t) + bridgeH := newEnvoyBootstrapHook(newEnvoyBootstrapHookConfig( mock.ConnectIngressGatewayAlloc("bridge"), new(config.ConsulConfig), @@ -841,6 +844,8 @@ func TestTaskRunner_EnvoyBootstrapHook_grpcAddress(t *testing.T) { } func TestTaskRunner_EnvoyBootstrapHook_isConnectKind(t *testing.T) { + ci.Parallel(t) + require.True(t, isConnectKind(structs.ConnectProxyPrefix)) require.True(t, isConnectKind(structs.ConnectIngressPrefix)) require.True(t, isConnectKind(structs.ConnectTerminatingPrefix)) diff --git a/client/allocrunner/taskrunner/envoy_version_hook_test.go b/client/allocrunner/taskrunner/envoy_version_hook_test.go index 225c8d6e6..43247284a 100644 --- a/client/allocrunner/taskrunner/envoy_version_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_version_hook_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" ifs "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/taskenv" @@ -24,7 +25,7 @@ var ( ) func TestEnvoyVersionHook_semver(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("with v", func(t *testing.T) { result, err := semver("v1.2.3") @@ -45,7 +46,7 @@ func TestEnvoyVersionHook_semver(t *testing.T) { } func TestEnvoyVersionHook_taskImage(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("absent", func(t *testing.T) { result := (*envoyVersionHook)(nil).taskImage(map[string]interface{}{ @@ -70,7 +71,7 @@ func TestEnvoyVersionHook_taskImage(t *testing.T) { } func TestEnvoyVersionHook_tweakImage(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := envoy.ImageFormat @@ -106,7 +107,7 @@ func TestEnvoyVersionHook_tweakImage(t *testing.T) { } func TestEnvoyVersionHook_interpolateImage(t *testing.T) { - t.Parallel() + ci.Parallel(t) hook := (*envoyVersionHook)(nil) @@ -156,7 +157,7 @@ func TestEnvoyVersionHook_interpolateImage(t *testing.T) { } func TestEnvoyVersionHook_skip(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := new(envoyVersionHook) @@ -221,7 +222,7 @@ func TestEnvoyVersionHook_skip(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -264,7 +265,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -308,7 +309,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -355,7 +356,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_fallback(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) @@ -396,7 +397,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_fallback(t *testing.T) { } func TestTaskRunner_EnvoyVersionHook_Prestart_error(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) diff --git a/client/allocrunner/taskrunner/errors_test.go b/client/allocrunner/taskrunner/errors_test.go index 9b32e9cdf..15ad61990 100644 --- a/client/allocrunner/taskrunner/errors_test.go +++ b/client/allocrunner/taskrunner/errors_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -14,7 +15,7 @@ var _ structs.Recoverable = (*hookError)(nil) // TestHookError_Recoverable asserts that a NewHookError is recoverable if // passed a recoverable error. func TestHookError_Recoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create root error root := errors.New("test error") @@ -36,7 +37,7 @@ func TestHookError_Recoverable(t *testing.T) { // TestHookError_Unrecoverable asserts that a NewHookError is not recoverable // unless it is passed a recoverable error. func TestHookError_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create error err := errors.New("test error") diff --git a/client/allocrunner/taskrunner/logmon_hook_test.go b/client/allocrunner/taskrunner/logmon_hook_test.go index b3a087995..8d17b7b66 100644 --- a/client/allocrunner/taskrunner/logmon_hook_test.go +++ b/client/allocrunner/taskrunner/logmon_hook_test.go @@ -9,6 +9,7 @@ import ( "testing" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" @@ -24,7 +25,7 @@ var _ interfaces.TaskStopHook = (*logmonHook)(nil) // TestTaskRunner_LogmonHook_LoadReattach unit tests loading logmon reattach // config from persisted hook state. func TestTaskRunner_LogmonHook_LoadReattach(t *testing.T) { - t.Parallel() + ci.Parallel(t) // No hook data should return nothing cfg, err := reattachConfigFromHookData(nil) @@ -60,7 +61,7 @@ func TestTaskRunner_LogmonHook_LoadReattach(t *testing.T) { // first time Prestart is called, reattached to on subsequent restarts, and // killed on Stop. func TestTaskRunner_LogmonHook_StartStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] diff --git a/client/allocrunner/taskrunner/logmon_hook_unix_test.go b/client/allocrunner/taskrunner/logmon_hook_unix_test.go index df85c054e..03ab80ea1 100644 --- a/client/allocrunner/taskrunner/logmon_hook_unix_test.go +++ b/client/allocrunner/taskrunner/logmon_hook_unix_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -25,7 +26,7 @@ import ( // Nomad client is restarting and asserts failing to reattach to logmon causes // nomad to spawn a new logmon. func TestTaskRunner_LogmonHook_StartCrashStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -94,7 +95,7 @@ func TestTaskRunner_LogmonHook_StartCrashStop(t *testing.T) { // TestTaskRunner_LogmonHook_ShutdownMidStart simulates logmon crashing while the // Nomad client is calling Start() and asserts that we recover and spawn a new logmon. func TestTaskRunner_LogmonHook_ShutdownMidStart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] diff --git a/client/allocrunner/taskrunner/restarts/restarts_test.go b/client/allocrunner/taskrunner/restarts/restarts_test.go index 48afe4915..f679e69f9 100644 --- a/client/allocrunner/taskrunner/restarts/restarts_test.go +++ b/client/allocrunner/taskrunner/restarts/restarts_test.go @@ -5,10 +5,10 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" + "github.com/stretchr/testify/require" ) func testPolicy(success bool, mode string) *structs.RestartPolicy { @@ -34,7 +34,7 @@ func testExitResult(exit int) *drivers.ExitResult { } func TestClient_RestartTracker_ModeDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeDelay) rt := NewRestartTracker(p, structs.JobTypeService, nil) for i := 0; i < p.Attempts; i++ { @@ -60,7 +60,7 @@ func TestClient_RestartTracker_ModeDelay(t *testing.T) { } func TestClient_RestartTracker_ModeFail(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) rt := NewRestartTracker(p, structs.JobTypeSystem, nil) for i := 0; i < p.Attempts; i++ { @@ -80,7 +80,7 @@ func TestClient_RestartTracker_ModeFail(t *testing.T) { } func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(false, structs.RestartPolicyModeDelay) rt := NewRestartTracker(p, structs.JobTypeBatch, nil) if state, _ := rt.SetExitResult(testExitResult(0)).GetState(); state != structs.TaskTerminated { @@ -89,7 +89,7 @@ func TestClient_RestartTracker_NoRestartOnSuccess(t *testing.T) { } func TestClient_RestartTracker_ZeroAttempts(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 0 @@ -122,7 +122,7 @@ func TestClient_RestartTracker_ZeroAttempts(t *testing.T) { } func TestClient_RestartTracker_TaskKilled(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 0 rt := NewRestartTracker(p, structs.JobTypeService, nil) @@ -132,7 +132,7 @@ func TestClient_RestartTracker_TaskKilled(t *testing.T) { } func TestClient_RestartTracker_RestartTriggered(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 0 rt := NewRestartTracker(p, structs.JobTypeService, nil) @@ -142,7 +142,7 @@ func TestClient_RestartTracker_RestartTriggered(t *testing.T) { } func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) p.Attempts = 1 rt := NewRestartTracker(p, structs.JobTypeService, nil) @@ -155,7 +155,7 @@ func TestClient_RestartTracker_RestartTriggered_Failure(t *testing.T) { } func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeFail) rt := NewRestartTracker(p, structs.JobTypeSystem, nil) recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true) @@ -176,7 +176,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Fail(t *testing.T) { } func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) { - t.Parallel() + ci.Parallel(t) p := testPolicy(true, structs.RestartPolicyModeDelay) rt := NewRestartTracker(p, structs.JobTypeSystem, nil) recErr := structs.NewRecoverableError(fmt.Errorf("foo"), true) @@ -201,7 +201,7 @@ func TestClient_RestartTracker_StartError_Recoverable_Delay(t *testing.T) { } func TestClient_RestartTracker_Lifecycle(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCase := []struct { name string diff --git a/client/allocrunner/taskrunner/script_check_hook_test.go b/client/allocrunner/taskrunner/script_check_hook_test.go index 0d50c4fc0..eecdc6722 100644 --- a/client/allocrunner/taskrunner/script_check_hook_test.go +++ b/client/allocrunner/taskrunner/script_check_hook_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/api" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/taskenv" @@ -63,6 +64,8 @@ type heartbeat struct { // TestScript_Exec_Cancel asserts cancelling a script check shortcircuits // any running scripts. func TestScript_Exec_Cancel(t *testing.T) { + ci.Parallel(t) + exec, cancel := newBlockingScriptExec() defer cancel() @@ -89,7 +92,7 @@ func TestScript_Exec_Cancel(t *testing.T) { // TestScript_Exec_TimeoutBasic asserts a script will be killed when the // timeout is reached. func TestScript_Exec_TimeoutBasic(t *testing.T) { - t.Parallel() + ci.Parallel(t) exec, cancel := newBlockingScriptExec() defer cancel() @@ -130,7 +133,7 @@ func TestScript_Exec_TimeoutBasic(t *testing.T) { // the timeout is reached and always set a critical status regardless of what // Exec returns. func TestScript_Exec_TimeoutCritical(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) hb := newFakeHeartbeater() script := newScriptMock(hb, sleeperExec{}, logger, time.Hour, time.Nanosecond) @@ -151,6 +154,8 @@ func TestScript_Exec_TimeoutCritical(t *testing.T) { // TestScript_Exec_Shutdown asserts a script will be executed once more // when told to shutdown. func TestScript_Exec_Shutdown(t *testing.T) { + ci.Parallel(t) + shutdown := make(chan struct{}) exec := newSimpleExec(0, nil) logger := testlog.HCLogger(t) @@ -180,6 +185,7 @@ func TestScript_Exec_Shutdown(t *testing.T) { // TestScript_Exec_Codes asserts script exit codes are translated to their // corresponding Consul health check status. func TestScript_Exec_Codes(t *testing.T) { + ci.Parallel(t) exec := newScriptedExec([]execResult{ {[]byte("output"), 1, nil}, @@ -224,6 +230,7 @@ func TestScript_Exec_Codes(t *testing.T) { // TestScript_TaskEnvInterpolation asserts that script check hooks are // interpolated in the same way that services are func TestScript_TaskEnvInterpolation(t *testing.T) { + ci.Parallel(t) logger := testlog.HCLogger(t) consulClient := consul.NewMockConsulServiceClient(t, logger) @@ -288,6 +295,8 @@ func TestScript_TaskEnvInterpolation(t *testing.T) { } func TestScript_associated(t *testing.T) { + ci.Parallel(t) + t.Run("neither set", func(t *testing.T) { require.False(t, new(scriptCheckHook).associated("task1", "", "")) }) diff --git a/client/allocrunner/taskrunner/service_hook_test.go b/client/allocrunner/taskrunner/service_hook_test.go index bdae6bfd0..efcf14f3e 100644 --- a/client/allocrunner/taskrunner/service_hook_test.go +++ b/client/allocrunner/taskrunner/service_hook_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/helper/testlog" @@ -53,6 +54,7 @@ func TestUpdate_beforePoststart(t *testing.T) { } func Test_serviceHook_multipleDeRegisterCall(t *testing.T) { + ci.Parallel(t) alloc := mock.Alloc() logger := testlog.HCLogger(t) diff --git a/client/allocrunner/taskrunner/sids_hook_test.go b/client/allocrunner/taskrunner/sids_hook_test.go index c0adcb0e9..f475c6a2f 100644 --- a/client/allocrunner/taskrunner/sids_hook_test.go +++ b/client/allocrunner/taskrunner/sids_hook_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" consulapi "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/helper" @@ -46,7 +47,7 @@ func sidecar(task string) (string, structs.TaskKind) { } func TestSIDSHook_recoverToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) secrets := tmpDir(t) @@ -71,7 +72,7 @@ func TestSIDSHook_recoverToken(t *testing.T) { } func TestSIDSHook_recoverToken_empty(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) secrets := tmpDir(t) @@ -92,6 +93,7 @@ func TestSIDSHook_recoverToken_empty(t *testing.T) { } func TestSIDSHook_recoverToken_unReadable(t *testing.T) { + ci.Parallel(t) // This test fails when running as root because the test case for checking // the error condition when the file is unreadable fails (root can read the // file even though the permissions are set to 0200). @@ -99,7 +101,6 @@ func TestSIDSHook_recoverToken_unReadable(t *testing.T) { t.Skip("test only works as non-root") } - t.Parallel() r := require.New(t) secrets := tmpDir(t) @@ -122,7 +123,7 @@ func TestSIDSHook_recoverToken_unReadable(t *testing.T) { } func TestSIDSHook_writeToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) secrets := tmpDir(t) @@ -139,6 +140,7 @@ func TestSIDSHook_writeToken(t *testing.T) { } func TestSIDSHook_writeToken_unWritable(t *testing.T) { + ci.Parallel(t) // This test fails when running as root because the test case for checking // the error condition when the file is unreadable fails (root can read the // file even though the permissions are set to 0200). @@ -146,7 +148,6 @@ func TestSIDSHook_writeToken_unWritable(t *testing.T) { t.Skip("test only works as non-root") } - t.Parallel() r := require.New(t) secrets := tmpDir(t) @@ -162,7 +163,7 @@ func TestSIDSHook_writeToken_unWritable(t *testing.T) { } func Test_SIDSHook_writeToken_nonExistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) base := tmpDir(t) @@ -176,7 +177,7 @@ func Test_SIDSHook_writeToken_nonExistent(t *testing.T) { } func TestSIDSHook_deriveSIToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) taskName, taskKind := sidecar("task1") @@ -197,7 +198,7 @@ func TestSIDSHook_deriveSIToken(t *testing.T) { } func TestSIDSHook_deriveSIToken_timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) siClient := consulapi.NewMockServiceIdentitiesClient() @@ -227,7 +228,7 @@ func TestSIDSHook_deriveSIToken_timeout(t *testing.T) { } func TestSIDSHook_computeBackoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(i int, exp time.Duration) { result := computeBackoff(i) @@ -243,7 +244,7 @@ func TestSIDSHook_computeBackoff(t *testing.T) { } func TestSIDSHook_backoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctx := context.Background() @@ -252,7 +253,7 @@ func TestSIDSHook_backoff(t *testing.T) { } func TestSIDSHook_backoffKilled(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctx, cancel := context.WithTimeout(context.Background(), 1) @@ -263,6 +264,7 @@ func TestSIDSHook_backoffKilled(t *testing.T) { } func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) { + ci.Parallel(t) // Normally this test would live in test_runner_test.go, but since it requires // root and the check for root doesn't like Windows, we put this file in here // for now. @@ -274,7 +276,6 @@ func TestTaskRunner_DeriveSIToken_UnWritableTokenFile(t *testing.T) { t.Skip("test only works as non-root") } - t.Parallel() r := require.New(t) alloc := mock.BatchConnectAlloc() diff --git a/client/allocrunner/taskrunner/stats_hook_test.go b/client/allocrunner/taskrunner/stats_hook_test.go index 3294c9a05..2ab9f6569 100644 --- a/client/allocrunner/taskrunner/stats_hook_test.go +++ b/client/allocrunner/taskrunner/stats_hook_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/testlog" @@ -82,7 +83,7 @@ func (m *mockDriverStats) Called() int { // TestTaskRunner_StatsHook_PoststartExited asserts the stats hook starts and // stops. func TestTaskRunner_StatsHook_PoststartExited(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -114,7 +115,7 @@ func TestTaskRunner_StatsHook_PoststartExited(t *testing.T) { // TestTaskRunner_StatsHook_Periodic asserts the stats hook collects stats on // an interval. func TestTaskRunner_StatsHook_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -179,7 +180,7 @@ WAITING: // TestTaskRunner_StatsHook_NotImplemented asserts the stats hook stops if the // driver returns NotImplemented. func TestTaskRunner_StatsHook_NotImplemented(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) logger := testlog.HCLogger(t) @@ -208,7 +209,7 @@ func TestTaskRunner_StatsHook_NotImplemented(t *testing.T) { // TestTaskRunner_StatsHook_Backoff asserts that stats hook does some backoff // even if the driver doesn't support intervals well func TestTaskRunner_StatsHook_Backoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) su := newMockStatsUpdater() diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go index 77741c802..a3bb1ccad 100644 --- a/client/allocrunner/taskrunner/task_runner_test.go +++ b/client/allocrunner/taskrunner/task_runner_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/golang/snappy" + "github.com/hashicorp/nomad/ci" "github.com/kr/pretty" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -141,7 +142,7 @@ func runTestTaskRunner(t *testing.T, alloc *structs.Allocation, taskName string) } func TestTaskRunner_BuildTaskConfig_CPU_Memory(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -209,7 +210,7 @@ func TestTaskRunner_BuildTaskConfig_CPU_Memory(t *testing.T) { // TestTaskRunner_Stop_ExitCode asserts that the exit code is captured on a task, even if it's stopped func TestTaskRunner_Stop_ExitCode(t *testing.T) { ctestutil.ExecCompatible(t) - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() alloc.Job.TaskGroups[0].Count = 1 @@ -258,7 +259,7 @@ func TestTaskRunner_Stop_ExitCode(t *testing.T) { // TestTaskRunner_Restore_Running asserts restoring a running task does not // rerun the task. func TestTaskRunner_Restore_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -314,7 +315,7 @@ func TestTaskRunner_Restore_Running(t *testing.T) { // returned once it is running and waiting in pending along with a cleanup // func. func setupRestoreFailureTest(t *testing.T, alloc *structs.Allocation) (*TaskRunner, *Config, func()) { - t.Parallel() + ci.Parallel(t) task := alloc.Job.TaskGroups[0].Tasks[0] task.Driver = "raw_exec" @@ -388,6 +389,8 @@ func setupRestoreFailureTest(t *testing.T, alloc *structs.Allocation) (*TaskRunn // TestTaskRunner_Restore_Restart asserts restoring a dead task blocks until // MarkAlive is called. #1795 func TestTaskRunner_Restore_Restart(t *testing.T) { + ci.Parallel(t) + newTR, conf, cleanup := setupRestoreFailureTest(t, mock.Alloc()) defer cleanup() @@ -405,6 +408,8 @@ func TestTaskRunner_Restore_Restart(t *testing.T) { // TestTaskRunner_Restore_Kill asserts restoring a dead task blocks until // the task is killed. #1795 func TestTaskRunner_Restore_Kill(t *testing.T) { + ci.Parallel(t) + newTR, _, cleanup := setupRestoreFailureTest(t, mock.Alloc()) defer cleanup() @@ -430,6 +435,8 @@ func TestTaskRunner_Restore_Kill(t *testing.T) { // TestTaskRunner_Restore_Update asserts restoring a dead task blocks until // Update is called. #1795 func TestTaskRunner_Restore_Update(t *testing.T) { + ci.Parallel(t) + newTR, conf, cleanup := setupRestoreFailureTest(t, mock.Alloc()) defer cleanup() @@ -454,7 +461,7 @@ func TestTaskRunner_Restore_Update(t *testing.T) { // TestTaskRunner_Restore_System asserts restoring a dead system task does not // block. func TestTaskRunner_Restore_System(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() alloc.Job.Type = structs.JobTypeSystem @@ -527,7 +534,7 @@ func TestTaskRunner_Restore_System(t *testing.T) { // TestTaskRunner_TaskEnv_Interpolated asserts driver configurations are // interpolated. func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -571,7 +578,7 @@ func TestTaskRunner_TaskEnv_Interpolated(t *testing.T) { // not host paths. func TestTaskRunner_TaskEnv_Chroot(t *testing.T) { ctestutil.ExecCompatible(t) - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -629,7 +636,7 @@ func TestTaskRunner_TaskEnv_Chroot(t *testing.T) { // not host paths. Host env vars should also be excluded. func TestTaskRunner_TaskEnv_Image(t *testing.T) { ctestutil.DockerCompatible(t) - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -672,7 +679,7 @@ func TestTaskRunner_TaskEnv_Image(t *testing.T) { // TestTaskRunner_TaskEnv_None asserts raw_exec uses host paths and env vars. func TestTaskRunner_TaskEnv_None(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -715,7 +722,7 @@ func TestTaskRunner_TaskEnv_None(t *testing.T) { // Test that devices get sent to the driver func TestTaskRunner_DevicePropogation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a mock alloc that has a gpu @@ -812,7 +819,7 @@ func (h *mockEnvHook) Prestart(ctx context.Context, req *interfaces.TaskPrestart // hook environments set restores the environment without re-running done // hooks. func TestTaskRunner_Restore_HookEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() @@ -849,7 +856,7 @@ func TestTaskRunner_Restore_HookEnv(t *testing.T) { // This test asserts that we can recover from an "external" plugin exiting by // retrieving a new instance of the driver and recovering the task. func TestTaskRunner_RecoverFromDriverExiting(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create an allocation using the mock driver that exits simulating the @@ -922,7 +929,7 @@ func TestTaskRunner_RecoverFromDriverExiting(t *testing.T) { // TestTaskRunner_ShutdownDelay asserts services are removed from Consul // ${shutdown_delay} seconds before killing the process. func TestTaskRunner_ShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1006,7 +1013,7 @@ WAIT: // Consul and tasks are killed without waiting for ${shutdown_delay} // when the alloc has the NoShutdownDelay transition flag set. func TestTaskRunner_NoShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) // don't set this too high so that we don't block the test runner // on shutting down the agent if the test fails @@ -1081,7 +1088,7 @@ func TestTaskRunner_NoShutdownDelay(t *testing.T) { // TestTaskRunner_Dispatch_Payload asserts that a dispatch job runs and the // payload was written to disk. func TestTaskRunner_Dispatch_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1127,7 +1134,7 @@ func TestTaskRunner_Dispatch_Payload(t *testing.T) { // TestTaskRunner_SignalFailure asserts that signal errors are properly // propagated from the driver to TaskRunner. func TestTaskRunner_SignalFailure(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1149,7 +1156,7 @@ func TestTaskRunner_SignalFailure(t *testing.T) { // TestTaskRunner_RestartTask asserts that restarting a task works and emits a // Restarting event. func TestTaskRunner_RestartTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1201,7 +1208,7 @@ func TestTaskRunner_RestartTask(t *testing.T) { // TestTaskRunner_CheckWatcher_Restart asserts that when enabled an unhealthy // Consul check will cause a task to restart following restart policy rules. func TestTaskRunner_CheckWatcher_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() @@ -1319,7 +1326,7 @@ func useMockEnvoyBootstrapHook(tr *TaskRunner) { // TestTaskRunner_BlockForSIDSToken asserts tasks do not start until a Consul // Service Identity token is derived. func TestTaskRunner_BlockForSIDSToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) alloc := mock.BatchConnectAlloc() @@ -1387,7 +1394,7 @@ func TestTaskRunner_BlockForSIDSToken(t *testing.T) { } func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) alloc := mock.BatchConnectAlloc() @@ -1446,7 +1453,7 @@ func TestTaskRunner_DeriveSIToken_Retry(t *testing.T) { // TestTaskRunner_DeriveSIToken_Unrecoverable asserts that an unrecoverable error // from deriving a service identity token will fail a task. func TestTaskRunner_DeriveSIToken_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) alloc := mock.BatchConnectAlloc() @@ -1503,7 +1510,7 @@ func TestTaskRunner_DeriveSIToken_Unrecoverable(t *testing.T) { // TestTaskRunner_BlockForVaultToken asserts tasks do not start until a vault token // is derived. func TestTaskRunner_BlockForVaultToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -1581,7 +1588,7 @@ func TestTaskRunner_BlockForVaultToken(t *testing.T) { // returned when deriving a vault token a task will continue to block while // it's retried. func TestTaskRunner_DeriveToken_Retry(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] task.Vault = &structs.Vault{Policies: []string{"default"}} @@ -1645,7 +1652,7 @@ func TestTaskRunner_DeriveToken_Retry(t *testing.T) { // TestTaskRunner_DeriveToken_Unrecoverable asserts that an unrecoverable error // from deriving a vault token will fail a task. func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Use a batch job with no restarts alloc := mock.BatchAlloc() @@ -1690,7 +1697,7 @@ func TestTaskRunner_DeriveToken_Unrecoverable(t *testing.T) { // TestTaskRunner_Download_ChrootExec asserts that downloaded artifacts may be // executed in a chroot. func TestTaskRunner_Download_ChrootExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctestutil.ExecCompatible(t) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir(".")))) @@ -1731,7 +1738,7 @@ func TestTaskRunner_Download_ChrootExec(t *testing.T) { // TestTaskRunner_Download_Exec asserts that downloaded artifacts may be // executed in a driver without filesystem isolation. func TestTaskRunner_Download_RawExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir(".")))) defer ts.Close() @@ -1771,7 +1778,7 @@ func TestTaskRunner_Download_RawExec(t *testing.T) { // TestTaskRunner_Download_List asserts that multiple artificats are downloaded // before a task is run. func TestTaskRunner_Download_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) ts := httptest.NewServer(http.FileServer(http.Dir(filepath.Dir(".")))) defer ts.Close() @@ -1820,7 +1827,7 @@ func TestTaskRunner_Download_List(t *testing.T) { // TestTaskRunner_Download_Retries asserts that failed artifact downloads are // retried according to the task's restart policy. func TestTaskRunner_Download_Retries(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create an allocation that has a task with bad artifacts. alloc := mock.BatchAlloc() @@ -1866,7 +1873,7 @@ func TestTaskRunner_Download_Retries(t *testing.T) { // TestTaskRunner_DriverNetwork asserts that a driver's network is properly // used in services and checks. func TestTaskRunner_DriverNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2002,7 +2009,7 @@ func TestTaskRunner_DriverNetwork(t *testing.T) { // TestTaskRunner_RestartSignalTask_NotRunning asserts resilience to failures // when a restart or signal is triggered and the task is not running. func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2069,7 +2076,7 @@ func TestTaskRunner_RestartSignalTask_NotRunning(t *testing.T) { // TestTaskRunner_Run_RecoverableStartError asserts tasks are restarted if they // return a recoverable error from StartTask. func TestTaskRunner_Run_RecoverableStartError(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2111,7 +2118,7 @@ func TestTaskRunner_Run_RecoverableStartError(t *testing.T) { // TestTaskRunner_Template_Artifact asserts that tasks can use artifacts as templates. func TestTaskRunner_Template_Artifact(t *testing.T) { - t.Parallel() + ci.Parallel(t) ts := httptest.NewServer(http.FileServer(http.Dir("."))) defer ts.Close() @@ -2171,7 +2178,7 @@ func TestTaskRunner_Template_Artifact(t *testing.T) { // that fails to render in PreStart can gracefully be shutdown by // either killCtx or shutdownCtx func TestTaskRunner_Template_BlockingPreStart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2233,7 +2240,7 @@ func TestTaskRunner_Template_BlockingPreStart(t *testing.T) { // TestTaskRunner_Template_NewVaultToken asserts that a new vault token is // created when rendering template and that it is revoked on alloc completion func TestTaskRunner_Template_NewVaultToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2312,7 +2319,7 @@ func TestTaskRunner_Template_NewVaultToken(t *testing.T) { // TestTaskRunner_VaultManager_Restart asserts that the alloc is restarted when the alloc // derived vault token expires, when task is configured with Restart change mode func TestTaskRunner_VaultManager_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2385,7 +2392,7 @@ func TestTaskRunner_VaultManager_Restart(t *testing.T) { // TestTaskRunner_VaultManager_Signal asserts that the alloc is signalled when the alloc // derived vault token expires, when task is configured with signal change mode func TestTaskRunner_VaultManager_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] @@ -2449,7 +2456,7 @@ func TestTaskRunner_VaultManager_Signal(t *testing.T) { // TestTaskRunner_UnregisterConsul_Retries asserts a task is unregistered from // Consul when waiting to be retried. func TestTaskRunner_UnregisterConsul_Retries(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() // Make the restart policy try one ctx.update @@ -2509,7 +2516,7 @@ func testWaitForTaskToStart(t *testing.T, tr *TaskRunner) { // TestTaskRunner_BaseLabels tests that the base labels for the task metrics // are set appropriately. func TestTaskRunner_BaseLabels(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.BatchAlloc() diff --git a/client/allocrunner/taskrunner/tasklet_test.go b/client/allocrunner/taskrunner/tasklet_test.go index 4dc8f36f2..ea0cf2d7d 100644 --- a/client/allocrunner/taskrunner/tasklet_test.go +++ b/client/allocrunner/taskrunner/tasklet_test.go @@ -10,6 +10,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/testtask" @@ -23,6 +24,8 @@ func TestMain(m *testing.M) { } func TestTasklet_Exec_HappyPath(t *testing.T) { + ci.Parallel(t) + results := []execResult{ {[]byte("output"), 0, nil}, {[]byte("output"), 1, nil}, @@ -53,6 +56,8 @@ func TestTasklet_Exec_HappyPath(t *testing.T) { // TestTasklet_Exec_Cancel asserts cancelling a tasklet short-circuits // any running executions the tasklet func TestTasklet_Exec_Cancel(t *testing.T) { + ci.Parallel(t) + exec, cancel := newBlockingScriptExec() defer cancel() tm := newTaskletMock(exec, testlog.HCLogger(t), time.Hour, time.Hour) @@ -85,7 +90,7 @@ func TestTasklet_Exec_Cancel(t *testing.T) { // TestTasklet_Exec_Timeout asserts a tasklet script will be killed // when the timeout is reached. func TestTasklet_Exec_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) exec, cancel := newBlockingScriptExec() defer cancel() @@ -125,6 +130,8 @@ func TestTasklet_Exec_Timeout(t *testing.T) { // TestTasklet_Exec_Shutdown asserts a script will be executed once more // when told to shutdown. func TestTasklet_Exec_Shutdown(t *testing.T) { + ci.Parallel(t) + exec := newSimpleExec(0, nil) shutdown := make(chan struct{}) tm := newTaskletMock(exec, testlog.HCLogger(t), time.Hour, 3*time.Second) diff --git a/client/allocrunner/taskrunner/template/template_test.go b/client/allocrunner/taskrunner/template/template_test.go index dcd9a8eb0..3b943d864 100644 --- a/client/allocrunner/taskrunner/template/template_test.go +++ b/client/allocrunner/taskrunner/template/template_test.go @@ -18,6 +18,7 @@ import ( templateconfig "github.com/hashicorp/consul-template/config" ctestutil "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" @@ -230,7 +231,7 @@ func (h *testHarness) stop() { } func TestTaskTemplateManager_InvalidConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) hooks := NewMockTaskHooks() clientConfig := &config.Config{Region: "global"} taskDir := "foo" @@ -371,7 +372,7 @@ func TestTaskTemplateManager_InvalidConfig(t *testing.T) { } func TestTaskTemplateManager_HostPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately and write it to a tmp file f, err := ioutil.TempFile("", "") if err != nil { @@ -463,7 +464,7 @@ func TestTaskTemplateManager_HostPath(t *testing.T) { } func TestTaskTemplateManager_Unblock_Static(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := "hello, world!" file := "my.tmpl" @@ -497,7 +498,7 @@ func TestTaskTemplateManager_Unblock_Static(t *testing.T) { } func TestTaskTemplateManager_Permissions(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := "hello, world!" file := "my.tmpl" @@ -532,7 +533,7 @@ func TestTaskTemplateManager_Permissions(t *testing.T) { } func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := `Hello Nomad Task: {{env "NOMAD_TASK_NAME"}}` expected := fmt.Sprintf("Hello Nomad Task: %s", TestTaskName) @@ -567,7 +568,7 @@ func TestTaskTemplateManager_Unblock_Static_NomadEnv(t *testing.T) { } func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately content := "hello, world!" file := "my.tmpl" @@ -608,7 +609,7 @@ func TestTaskTemplateManager_Unblock_Static_AlreadyRendered(t *testing.T) { } func TestTaskTemplateManager_Unblock_Consul(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render based on a key in Consul key := "foo" content := "barbaz" @@ -654,7 +655,7 @@ func TestTaskTemplateManager_Unblock_Consul(t *testing.T) { } func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that will render based on a key in Vault vaultPath := "secret/data/password" @@ -704,7 +705,7 @@ func TestTaskTemplateManager_Unblock_Vault(t *testing.T) { } func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render immediately staticContent := "hello, world!" staticFile := "my.tmpl" @@ -772,7 +773,7 @@ func TestTaskTemplateManager_Unblock_Multi_Template(t *testing.T) { // TestTaskTemplateManager_FirstRender_Restored tests that a task that's been // restored renders and triggers its change mode if the template has changed func TestTaskTemplateManager_FirstRender_Restored(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that will render based on a key in Vault vaultPath := "secret/data/password" @@ -869,7 +870,7 @@ OUTER: } func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will render based on a key in Consul key := "foo" content1 := "bar" @@ -938,7 +939,7 @@ func TestTaskTemplateManager_Rerender_Noop(t *testing.T) { } func TestTaskTemplateManager_Rerender_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that renders based on a key in Consul and sends SIGALRM key1 := "foo" content1_1 := "bar" @@ -1038,7 +1039,7 @@ OUTER: } func TestTaskTemplateManager_Rerender_Restart(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that renders based on a key in Consul and sends restart key1 := "bam" content1_1 := "cat" @@ -1102,7 +1103,7 @@ OUTER: } func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that will have its destination interpolated content := "hello, world!" file := "${node.unique.id}.tmpl" @@ -1137,7 +1138,7 @@ func TestTaskTemplateManager_Interpolate_Destination(t *testing.T) { } func TestTaskTemplateManager_Signal_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that renders based on a key in Consul and sends SIGALRM @@ -1189,7 +1190,7 @@ func TestTaskTemplateManager_Signal_Error(t *testing.T) { // process environment variables. nomad host process environment variables // are to be treated the same as not found environment variables. func TestTaskTemplateManager_FiltersEnvVars(t *testing.T) { - t.Parallel() + ci.Parallel(t) defer os.Setenv("NOMAD_TASK_NAME", os.Getenv("NOMAD_TASK_NAME")) os.Setenv("NOMAD_TASK_NAME", "should be overridden by task") @@ -1233,7 +1234,7 @@ TEST_ENV_NOT_FOUND: {{env "` + testenv + `_NOTFOUND" }}` // TestTaskTemplateManager_Env asserts templates with the env flag set are read // into the task's environment. func TestTaskTemplateManager_Env(t *testing.T) { - t.Parallel() + ci.Parallel(t) template := &structs.Template{ EmbeddedTmpl: ` # Comment lines are ok @@ -1276,7 +1277,7 @@ ANYTHING_goes=Spaces are=ok! // TestTaskTemplateManager_Env_Missing asserts the core env // template processing function returns errors when files don't exist func TestTaskTemplateManager_Env_Missing(t *testing.T) { - t.Parallel() + ci.Parallel(t) d, err := ioutil.TempDir("", "ct_env_missing") if err != nil { t.Fatalf("err: %v", err) @@ -1311,7 +1312,7 @@ func TestTaskTemplateManager_Env_Missing(t *testing.T) { // TestTaskTemplateManager_Env_InterpolatedDest asserts the core env // template processing function handles interpolated destinations func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d, err := ioutil.TempDir("", "ct_env_interpolated") @@ -1352,7 +1353,7 @@ func TestTaskTemplateManager_Env_InterpolatedDest(t *testing.T) { // template processing function returns combined env vars from multiple // templates correctly. func TestTaskTemplateManager_Env_Multi(t *testing.T) { - t.Parallel() + ci.Parallel(t) d, err := ioutil.TempDir("", "ct_env_missing") if err != nil { t.Fatalf("err: %v", err) @@ -1398,7 +1399,7 @@ func TestTaskTemplateManager_Env_Multi(t *testing.T) { } func TestTaskTemplateManager_Rerender_Env(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make a template that renders based on a key in Consul and sends restart key1 := "bam" key2 := "bar" @@ -1480,7 +1481,7 @@ OUTER: // TestTaskTemplateManager_Config_ServerName asserts the tls_server_name // setting is propagated to consul-template's configuration. See #2776 func TestTaskTemplateManager_Config_ServerName(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := config.DefaultConfig() c.VaultConfig = &sconfig.VaultConfig{ Enabled: helper.BoolToPtr(true), @@ -1504,7 +1505,7 @@ func TestTaskTemplateManager_Config_ServerName(t *testing.T) { // TestTaskTemplateManager_Config_VaultNamespace asserts the Vault namespace setting is // propagated to consul-template's configuration. func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) testNS := "test-namespace" @@ -1535,7 +1536,7 @@ func TestTaskTemplateManager_Config_VaultNamespace(t *testing.T) { // TestTaskTemplateManager_Config_VaultNamespace asserts the Vault namespace setting is // propagated to consul-template's configuration. func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) testNS := "test-namespace" @@ -1570,7 +1571,7 @@ func TestTaskTemplateManager_Config_VaultNamespace_TaskOverride(t *testing.T) { // TestTaskTemplateManager_Escapes asserts that when sandboxing is enabled // interpolated paths are not incorrectly treated as escaping the alloc dir. func TestTaskTemplateManager_Escapes(t *testing.T) { - t.Parallel() + ci.Parallel(t) clientConf := config.DefaultConfig() require.False(t, clientConf.TemplateConfig.DisableSandbox, "expected sandbox to be disabled") @@ -1822,7 +1823,7 @@ func TestTaskTemplateManager_BlockedEvents(t *testing.T) { // then subsequently sets 0, 1, 2 keys // then asserts that templates are still blocked on 3 and 4, // and check that we got the relevant task events - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make a template that will render based on a key in Consul @@ -1920,7 +1921,7 @@ WAIT_LOOP: // configuration is accurately mapped from the client to the TaskTemplateManager // and that any operator defined boundaries are enforced. func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { - t.Parallel() + ci.Parallel(t) testNS := "test-namespace" @@ -2126,7 +2127,7 @@ func TestTaskTemplateManager_ClientTemplateConfig_Set(t *testing.T) { // configuration is accurately mapped from the template to the TaskTemplateManager's // template config. func TestTaskTemplateManager_Template_Wait_Set(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := config.DefaultConfig() c.Node = mock.Node() diff --git a/client/allocrunner/taskrunner/validate_hook_test.go b/client/allocrunner/taskrunner/validate_hook_test.go index e71301456..d346b6c47 100644 --- a/client/allocrunner/taskrunner/validate_hook_test.go +++ b/client/allocrunner/taskrunner/validate_hook_test.go @@ -3,6 +3,7 @@ package taskrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" @@ -10,7 +11,7 @@ import ( ) func TestTaskRunner_Validate_UserEnforcement(t *testing.T) { - t.Parallel() + ci.Parallel(t) taskEnv := taskenv.NewEmptyBuilder().Build() conf := config.DefaultConfig() @@ -35,7 +36,7 @@ func TestTaskRunner_Validate_UserEnforcement(t *testing.T) { } func TestTaskRunner_Validate_ServiceName(t *testing.T) { - t.Parallel() + ci.Parallel(t) builder := taskenv.NewEmptyBuilder() conf := config.DefaultConfig() diff --git a/client/allocrunner/taskrunner/volume_hook_test.go b/client/allocrunner/taskrunner/volume_hook_test.go index 951e4d7a7..0bfff5edb 100644 --- a/client/allocrunner/taskrunner/volume_hook_test.go +++ b/client/allocrunner/taskrunner/volume_hook_test.go @@ -3,6 +3,7 @@ package taskrunner import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" cstructs "github.com/hashicorp/nomad/client/structs" @@ -16,6 +17,8 @@ import ( ) func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) { + ci.Parallel(t) + mounts := []*structs.VolumeMount{ { Volume: "foo", @@ -68,6 +71,7 @@ func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) { } func TestVolumeHook_prepareCSIVolumes(t *testing.T) { + ci.Parallel(t) req := &interfaces.TaskPrestartRequest{ Task: &structs.Task{ @@ -157,6 +161,7 @@ func TestVolumeHook_prepareCSIVolumes(t *testing.T) { } func TestVolumeHook_Interpolation(t *testing.T) { + ci.Parallel(t) alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] diff --git a/client/allocwatcher/alloc_watcher_test.go b/client/allocwatcher/alloc_watcher_test.go index 4c4b63702..4aa36433b 100644 --- a/client/allocwatcher/alloc_watcher_test.go +++ b/client/allocwatcher/alloc_watcher_test.go @@ -13,6 +13,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/testlog" @@ -88,6 +89,8 @@ func newConfig(t *testing.T) (Config, func()) { // TestPrevAlloc_Noop asserts that when no previous allocation is set the noop // implementation is returned that does not block or perform migrations. func TestPrevAlloc_Noop(t *testing.T) { + ci.Parallel(t) + conf, cleanup := newConfig(t) defer cleanup() @@ -114,7 +117,8 @@ func TestPrevAlloc_Noop(t *testing.T) { // TestPrevAlloc_LocalPrevAlloc_Block asserts that when a previous alloc runner // is set a localPrevAlloc will block on it. func TestPrevAlloc_LocalPrevAlloc_Block(t *testing.T) { - t.Parallel() + ci.Parallel(t) + conf, cleanup := newConfig(t) defer cleanup() @@ -181,7 +185,8 @@ func TestPrevAlloc_LocalPrevAlloc_Block(t *testing.T) { // TestPrevAlloc_LocalPrevAlloc_Terminated asserts that when a previous alloc // runner has already terminated the watcher does not block on the broadcaster. func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) { - t.Parallel() + ci.Parallel(t) + conf, cleanup := newConfig(t) defer cleanup() @@ -201,7 +206,8 @@ func TestPrevAlloc_LocalPrevAlloc_Terminated(t *testing.T) { // streaming a tar cause the migration to be cancelled and no files are written // (migrations are atomic). func TestPrevAlloc_StreamAllocDir_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) + dest, err := ioutil.TempDir("", "nomadtest-") if err != nil { t.Fatalf("err: %v", err) diff --git a/client/allocwatcher/alloc_watcher_unix_test.go b/client/allocwatcher/alloc_watcher_unix_test.go index 7967a69f0..79f8a2979 100644 --- a/client/allocwatcher/alloc_watcher_unix_test.go +++ b/client/allocwatcher/alloc_watcher_unix_test.go @@ -15,6 +15,7 @@ import ( "syscall" "testing" + "github.com/hashicorp/nomad/ci" ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" ) @@ -22,8 +23,9 @@ import ( // TestPrevAlloc_StreamAllocDir_Ok asserts that streaming a tar to an alloc dir // works. func TestPrevAlloc_StreamAllocDir_Ok(t *testing.T) { + ci.Parallel(t) ctestutil.RequireRoot(t) - t.Parallel() + dir, err := ioutil.TempDir("", "") if err != nil { t.Fatalf("err: %v", err) diff --git a/client/allocwatcher/group_alloc_watcher_test.go b/client/allocwatcher/group_alloc_watcher_test.go index f992f3410..79eeaf07e 100644 --- a/client/allocwatcher/group_alloc_watcher_test.go +++ b/client/allocwatcher/group_alloc_watcher_test.go @@ -5,6 +5,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( // TestPrevAlloc_GroupPrevAllocWatcher_Block asserts that when there are // prevAllocs is set a groupPrevAllocWatcher will block on them func TestPrevAlloc_GroupPrevAllocWatcher_Block(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf, cleanup := newConfig(t) defer cleanup() @@ -80,7 +81,8 @@ func TestPrevAlloc_GroupPrevAllocWatcher_Block(t *testing.T) { // multiple prevAllocs is set a groupPrevAllocWatcher will block until all // are complete func TestPrevAlloc_GroupPrevAllocWatcher_BlockMulti(t *testing.T) { - t.Parallel() + ci.Parallel(t) + conf1, cleanup1 := newConfig(t) defer cleanup1() conf1.Alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{ diff --git a/client/client_stats_endpoint_test.go b/client/client_stats_endpoint_test.go index 9802d84e6..03f6b7717 100644 --- a/client/client_stats_endpoint_test.go +++ b/client/client_stats_endpoint_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/mock" @@ -12,8 +13,9 @@ import ( ) func TestClientStats_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -26,7 +28,7 @@ func TestClientStats_Stats(t *testing.T) { } func TestClientStats_Stats_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, addr, root, cleanupS := testACLServer(t, nil) diff --git a/client/client_test.go b/client/client_test.go index d27ab0ed0..bb99e6cc9 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -12,6 +12,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" "github.com/hashicorp/nomad/client/config" consulApi "github.com/hashicorp/nomad/client/consul" @@ -45,7 +46,8 @@ func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, fu } func TestClient_StartStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, nil) defer cleanup() if err := client.Shutdown(); err != nil { @@ -56,7 +58,7 @@ func TestClient_StartStop(t *testing.T) { // Certain labels for metrics are dependant on client initial setup. This tests // that the client has properly initialized before we assign values to labels func TestClient_BaseLabels(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) client, cleanup := TestClient(t, nil) @@ -81,7 +83,7 @@ func TestClient_BaseLabels(t *testing.T) { } func TestClient_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) _, addr, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -102,7 +104,7 @@ func TestClient_RPC(t *testing.T) { } func TestClient_RPC_FireRetryWatchers(t *testing.T) { - t.Parallel() + ci.Parallel(t) _, addr, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -131,7 +133,7 @@ func TestClient_RPC_FireRetryWatchers(t *testing.T) { } func TestClient_RPC_Passthrough(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -152,7 +154,7 @@ func TestClient_RPC_Passthrough(t *testing.T) { } func TestClient_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -175,7 +177,7 @@ func TestClient_Fingerprint(t *testing.T) { // TestClient_Fingerprint_Periodic asserts that driver node attributes are // periodically fingerprinted. func TestClient_Fingerprint_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) c1, cleanup := TestClient(t, func(c *config.Config) { confs := []*nconfig.PluginConfig{ @@ -253,7 +255,8 @@ func TestClient_Fingerprint_Periodic(t *testing.T) { // TestClient_MixedTLS asserts that when a server is running with TLS enabled // it will reject any RPC connections from clients that lack TLS. See #2525 func TestClient_MixedTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) + const ( cafile = "../helper/tlsutil/testdata/ca.pem" foocert = "../helper/tlsutil/testdata/nomad-foo.pem" @@ -300,7 +303,7 @@ func TestClient_MixedTLS(t *testing.T) { // enabled -- but their certificates are signed by different CAs -- they're // unable to communicate. func TestClient_BadTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -356,7 +359,7 @@ func TestClient_BadTLS(t *testing.T) { } func TestClient_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -389,7 +392,7 @@ func TestClient_Register(t *testing.T) { } func TestClient_Heartbeat(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, func(c *nomad.Config) { c.MinHeartbeatTTL = 50 * time.Millisecond @@ -426,7 +429,7 @@ func TestClient_Heartbeat(t *testing.T) { // TestClient_UpdateAllocStatus that once running allocations send updates to // the server. func TestClient_UpdateAllocStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -452,7 +455,7 @@ func TestClient_UpdateAllocStatus(t *testing.T) { } func TestClient_WatchAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -552,7 +555,7 @@ func waitTilNodeReady(client *Client, t *testing.T) { } func TestClient_SaveRestoreState(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -653,7 +656,7 @@ func TestClient_SaveRestoreState(t *testing.T) { } func TestClient_AddAllocError(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, _, cleanupS1 := testServer(t, nil) @@ -729,7 +732,8 @@ func TestClient_AddAllocError(t *testing.T) { } func TestClient_Init(t *testing.T) { - t.Parallel() + ci.Parallel(t) + dir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -759,7 +763,7 @@ func TestClient_Init(t *testing.T) { } func TestClient_BlockedAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := testServer(t, nil) defer cleanupS1() @@ -872,7 +876,7 @@ func TestClient_BlockedAllocations(t *testing.T) { } func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, cleanup := TestClient(t, func(c *config.Config) { @@ -888,7 +892,7 @@ func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) { } func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, cleanup := TestClient(t, func(c *config.Config) { @@ -904,7 +908,7 @@ func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) { } func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, cleanup := TestClient(t, func(c *config.Config) {}) @@ -914,7 +918,7 @@ func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) { } func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, addr, cleanupS1 := testServer(t, func(c *nomad.Config) { @@ -990,7 +994,7 @@ func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) { } func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, addr, cleanupS1 := testServer(t, func(c *nomad.Config) { @@ -1067,7 +1071,8 @@ func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) { // TestClient_ServerList tests client methods that interact with the internal // nomad server list. func TestClient_ServerList(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, func(c *config.Config) {}) defer cleanup() @@ -1090,7 +1095,8 @@ func TestClient_ServerList(t *testing.T) { } func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, func(c *config.Config) {}) defer cleanup() @@ -1188,7 +1194,7 @@ func TestClient_UpdateNodeFromDevicesAccumulates(t *testing.T) { // TestClient_UpdateNodeFromFingerprintKeepsConfig asserts manually configured // network interfaces take precedence over fingerprinted ones. func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("assertions assume linux platform") } @@ -1266,7 +1272,7 @@ func TestClient_UpdateNodeFromFingerprintKeepsConfig(t *testing.T) { // Support multiple IP addresses (ipv4 vs. 6, e.g.) on the configured network interface func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) { - t.Parallel() + ci.Parallel(t) var dev string switch runtime.GOOS { @@ -1304,6 +1310,8 @@ func Test_UpdateNodeFromFingerprintMultiIP(t *testing.T) { } func TestClient_computeAllocatedDeviceStats(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) c := &Client{logger: logger} @@ -1400,8 +1408,9 @@ func TestClient_computeAllocatedDeviceStats(t *testing.T) { } func TestClient_getAllocatedResources(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -1515,7 +1524,8 @@ func TestClient_getAllocatedResources(t *testing.T) { } func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) + client, cleanup := TestClient(t, nil) defer cleanup() @@ -1598,7 +1608,7 @@ func TestClient_updateNodeFromDriverUpdatesAll(t *testing.T) { // COMPAT(0.12): remove once upgrading from 0.9.5 is no longer supported func TestClient_hasLocalState(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -1638,7 +1648,7 @@ func TestClient_hasLocalState(t *testing.T) { } func Test_verifiedTasks(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) // produce a result and check against expected tasks and/or error output diff --git a/client/config/config_test.go b/client/config/config_test.go index bef9995c6..88f5bd1b7 100644 --- a/client/config/config_test.go +++ b/client/config/config_test.go @@ -5,11 +5,14 @@ import ( "time" "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestConfigRead(t *testing.T) { + ci.Parallel(t) + config := Config{} actual := config.Read("cake") @@ -26,6 +29,8 @@ func TestConfigRead(t *testing.T) { } func TestConfigReadDefault(t *testing.T) { + ci.Parallel(t) + config := Config{} expected := "vanilla" @@ -50,6 +55,8 @@ func mockWaitConfig() *WaitConfig { } func TestWaitConfig_Copy(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Wait *WaitConfig @@ -95,6 +102,8 @@ func TestWaitConfig_Copy(t *testing.T) { } func TestWaitConfig_IsEmpty(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Wait *WaitConfig @@ -127,6 +136,8 @@ func TestWaitConfig_IsEmpty(t *testing.T) { } func TestWaitConfig_IsEqual(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Wait *WaitConfig @@ -170,6 +181,8 @@ func TestWaitConfig_IsEqual(t *testing.T) { } func TestWaitConfig_IsValid(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *WaitConfig @@ -223,6 +236,8 @@ func TestWaitConfig_IsValid(t *testing.T) { } func TestWaitConfig_Merge(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Target *WaitConfig @@ -280,6 +295,8 @@ func TestWaitConfig_Merge(t *testing.T) { } func TestWaitConfig_ToConsulTemplate(t *testing.T) { + ci.Parallel(t) + expected := config.WaitConfig{ Enabled: helper.BoolToPtr(true), Min: helper.TimeToPtr(5 * time.Second), @@ -307,6 +324,8 @@ func mockRetryConfig() *RetryConfig { } } func TestRetryConfig_Copy(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -382,6 +401,8 @@ func TestRetryConfig_Copy(t *testing.T) { } func TestRetryConfig_IsEmpty(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -414,6 +435,8 @@ func TestRetryConfig_IsEmpty(t *testing.T) { } func TestRetryConfig_IsEqual(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -502,6 +525,8 @@ func TestRetryConfig_IsEqual(t *testing.T) { } func TestRetryConfig_IsValid(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Retry *RetryConfig @@ -570,6 +595,8 @@ func TestRetryConfig_IsValid(t *testing.T) { } func TestRetryConfig_Merge(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Target *RetryConfig @@ -645,6 +672,8 @@ func TestRetryConfig_Merge(t *testing.T) { } func TestRetryConfig_ToConsulTemplate(t *testing.T) { + ci.Parallel(t) + expected := config.RetryConfig{ Enabled: helper.BoolToPtr(true), Attempts: helper.IntToPtr(5), diff --git a/client/consul/identities_test.go b/client/consul/identities_test.go index 0ac7ac275..b41f3520e 100644 --- a/client/consul/identities_test.go +++ b/client/consul/identities_test.go @@ -4,12 +4,15 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestSI_DeriveTokens(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) dFunc := func(alloc *structs.Allocation, taskNames []string) (map[string]string, error) { return map[string]string{"a": "b"}, nil @@ -21,6 +24,8 @@ func TestSI_DeriveTokens(t *testing.T) { } func TestSI_DeriveTokens_error(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) dFunc := func(alloc *structs.Allocation, taskNames []string) (map[string]string, error) { return nil, errors.New("some failure") diff --git a/client/csi_endpoint_test.go b/client/csi_endpoint_test.go index 5e8382eb9..7b6df1534 100644 --- a/client/csi_endpoint_test.go +++ b/client/csi_endpoint_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" nstructs "github.com/hashicorp/nomad/nomad/structs" @@ -25,7 +26,7 @@ var fakeNodePlugin = &dynamicplugins.PluginInfo{ } func TestCSIController_AttachVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -172,7 +173,7 @@ func TestCSIController_AttachVolume(t *testing.T) { } func TestCSIController_ValidateVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -275,7 +276,7 @@ func TestCSIController_ValidateVolume(t *testing.T) { } func TestCSIController_DetachVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -358,7 +359,7 @@ func TestCSIController_DetachVolume(t *testing.T) { } func TestCSIController_CreateVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -460,7 +461,7 @@ func TestCSIController_CreateVolume(t *testing.T) { } func TestCSIController_DeleteVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -524,7 +525,7 @@ func TestCSIController_DeleteVolume(t *testing.T) { } func TestCSIController_ListVolumes(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -632,7 +633,7 @@ func TestCSIController_ListVolumes(t *testing.T) { } } func TestCSIController_CreateSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -725,7 +726,7 @@ func TestCSIController_CreateSnapshot(t *testing.T) { } func TestCSIController_DeleteSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -789,7 +790,7 @@ func TestCSIController_DeleteSnapshot(t *testing.T) { } func TestCSIController_ListSnapshots(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -893,7 +894,7 @@ func TestCSIController_ListSnapshots(t *testing.T) { } func TestCSINode_DetachVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string diff --git a/client/devicemanager/manager_test.go b/client/devicemanager/manager_test.go index 2cc78da5c..c3da419e2 100644 --- a/client/devicemanager/manager_test.go +++ b/client/devicemanager/manager_test.go @@ -9,6 +9,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pluginutils/loader" @@ -234,7 +235,7 @@ func nvidiaAndIntelDefaultPlugins(catalog *loader.MockCatalog) { // Test collecting statistics from all devices func TestManager_AllStats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) @@ -283,7 +284,7 @@ func TestManager_AllStats(t *testing.T) { // Test collecting statistics from a particular device func TestManager_DeviceStats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) @@ -330,7 +331,7 @@ func TestManager_DeviceStats(t *testing.T) { // Test reserving a particular device func TestManager_Reserve(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) config, _, catalog := baseTestConfig(t) @@ -428,7 +429,7 @@ func TestManager_Reserve(t *testing.T) { // Test that shutdown shutsdown the plugins func TestManager_Shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) @@ -455,7 +456,7 @@ func TestManager_Shutdown(t *testing.T) { // Test that startup shutsdown previously launched plugins func TestManager_Run_ShutdownOld(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) config, _, catalog := baseTestConfig(t) diff --git a/client/driver_manager_test.go b/client/driver_manager_test.go index 8514749dd..8a930b75e 100644 --- a/client/driver_manager_test.go +++ b/client/driver_manager_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/helper/pluginutils/catalog" @@ -16,7 +17,7 @@ import ( // TestDriverManager_Fingerprint_Run asserts that node is populated with // driver fingerprints func TestDriverManager_Fingerprint_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) testClient, cleanup := TestClient(t, nil) defer cleanup() @@ -54,7 +55,7 @@ func TestDriverManager_Fingerprint_Run(t *testing.T) { // TestDriverManager_Fingerprint_Run asserts that node is populated with // driver fingerprints and it's updated periodically func TestDriverManager_Fingerprint_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) testClient, cleanup := TestClient(t, func(c *config.Config) { pluginConfig := []*nconfig.PluginConfig{ @@ -124,7 +125,7 @@ func TestDriverManager_Fingerprint_Periodic(t *testing.T) { // TestDriverManager_NodeAttributes_Run asserts that node attributes are populated // in addition to node.Drivers until we fully deprecate it func TestDriverManager_NodeAttributes_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) testClient, cleanup := TestClient(t, func(c *config.Config) { c.Options = map[string]string{ diff --git a/client/dynamicplugins/registry_test.go b/client/dynamicplugins/registry_test.go index a820a675f..c55af5c5a 100644 --- a/client/dynamicplugins/registry_test.go +++ b/client/dynamicplugins/registry_test.go @@ -7,11 +7,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) { - t.Parallel() + ci.Parallel(t) + b := newPluginEventBroadcaster() defer close(b.stopCh) var rcv1, rcv2 bool @@ -37,7 +39,7 @@ func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) { } func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := newPluginEventBroadcaster() defer close(b.stopCh) @@ -66,7 +68,8 @@ func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { } func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) + r := NewRegistry(nil, nil) ctx, cancelFn := context.WithCancel(context.Background()) @@ -104,7 +107,8 @@ func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { } func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) + r := NewRegistry(nil, nil) ctx, cancelFn := context.WithCancel(context.Background()) @@ -147,6 +151,8 @@ func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { } func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { + ci.Parallel(t) + dispenseFn := func(i *PluginInfo) (interface{}, error) { return struct{}{}, nil } @@ -174,7 +180,8 @@ func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { } func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) { - t.Parallel() + ci.Parallel(t) + r := NewRegistry(nil, nil) err := r.RegisterPlugin(&PluginInfo{ @@ -200,7 +207,8 @@ func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) { } func TestDynamicRegistry_StateStore(t *testing.T) { - t.Parallel() + ci.Parallel(t) + dispenseFn := func(i *PluginInfo) (interface{}, error) { return i, nil } @@ -226,8 +234,8 @@ func TestDynamicRegistry_StateStore(t *testing.T) { } func TestDynamicRegistry_ConcurrentAllocs(t *testing.T) { + ci.Parallel(t) - t.Parallel() dispenseFn := func(i *PluginInfo) (interface{}, error) { return i, nil } diff --git a/client/fingerprint/arch_test.go b/client/fingerprint/arch_test.go index c5faa2fff..9861b95a7 100644 --- a/client/fingerprint/arch_test.go +++ b/client/fingerprint/arch_test.go @@ -3,12 +3,15 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestArchFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewArchFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/bridge_linux_test.go b/client/fingerprint/bridge_linux_test.go index 8917598e2..739ef73f4 100644 --- a/client/fingerprint/bridge_linux_test.go +++ b/client/fingerprint/bridge_linux_test.go @@ -8,11 +8,14 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" ) func TestBridgeFingerprint_detect(t *testing.T) { + ci.Parallel(t) + f := &BridgeFingerprint{logger: testlog.HCLogger(t)} require.NoError(t, f.detect("ip_tables")) @@ -73,6 +76,8 @@ kernel/net/bridge/bridgeRHEL.ko.xz: kernel/net/802/stp.ko.xz kernel/net/llc/llc. ) func TestBridgeFingerprint_search(t *testing.T) { + ci.Parallel(t) + f := &BridgeFingerprint{logger: testlog.HCLogger(t)} t.Run("dynamic loaded module", func(t *testing.T) { diff --git a/client/fingerprint/cgroup_test.go b/client/fingerprint/cgroup_test.go index d357c1e17..11119b1d0 100644 --- a/client/fingerprint/cgroup_test.go +++ b/client/fingerprint/cgroup_test.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -41,6 +42,8 @@ func (m *MountPointDetectorEmptyMountPoint) MountPoint() (string, error) { } func TestCGroupFingerprint(t *testing.T) { + ci.Parallel(t) + { f := &CGroupFingerprint{ logger: testlog.HCLogger(t), diff --git a/client/fingerprint/cni_test.go b/client/fingerprint/cni_test.go index 3fd125b7d..90186da0e 100644 --- a/client/fingerprint/cni_test.go +++ b/client/fingerprint/cni_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -13,6 +14,8 @@ import ( var _ ReloadableFingerprint = &CNIFingerprint{} func TestCNIFingerprint(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string req *FingerprintRequest diff --git a/client/fingerprint/consul_test.go b/client/fingerprint/consul_test.go index fb7ff3ca8..4b3887478 100644 --- a/client/fingerprint/consul_test.go +++ b/client/fingerprint/consul_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" agentconsul "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/testlog" @@ -47,7 +48,7 @@ func newConsulFingerPrint(t *testing.T) *ConsulFingerprint { } func TestConsulFingerprint_server(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -83,7 +84,7 @@ func TestConsulFingerprint_server(t *testing.T) { } func TestConsulFingerprint_version(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -119,7 +120,7 @@ func TestConsulFingerprint_version(t *testing.T) { } func TestConsulFingerprint_sku(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -171,7 +172,7 @@ func TestConsulFingerprint_sku(t *testing.T) { } func TestConsulFingerprint_revision(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -199,7 +200,7 @@ func TestConsulFingerprint_revision(t *testing.T) { } func TestConsulFingerprint_dc(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -227,7 +228,7 @@ func TestConsulFingerprint_dc(t *testing.T) { } func TestConsulFingerprint_segment(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -262,7 +263,7 @@ func TestConsulFingerprint_segment(t *testing.T) { } func TestConsulFingerprint_connect(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -291,7 +292,7 @@ func TestConsulFingerprint_connect(t *testing.T) { } func TestConsulFingerprint_grpc(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -321,7 +322,7 @@ func TestConsulFingerprint_grpc(t *testing.T) { } func TestConsulFingerprint_namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) fp := newConsulFingerPrint(t) @@ -362,6 +363,8 @@ func TestConsulFingerprint_namespaces(t *testing.T) { } func TestConsulFingerprint_Fingerprint_oss(t *testing.T) { + ci.Parallel(t) + cf := newConsulFingerPrint(t) ts, cfg := fakeConsul(fakeConsulPayload(t, "test_fixtures/consul/agent_self_oss.json")) @@ -449,6 +452,8 @@ func TestConsulFingerprint_Fingerprint_oss(t *testing.T) { } func TestConsulFingerprint_Fingerprint_ent(t *testing.T) { + ci.Parallel(t) + cf := newConsulFingerPrint(t) ts, cfg := fakeConsul(fakeConsulPayload(t, "test_fixtures/consul/agent_self_ent.json")) diff --git a/client/fingerprint/cpu_test.go b/client/fingerprint/cpu_test.go index b6f4fdaca..5d2e23e16 100644 --- a/client/fingerprint/cpu_test.go +++ b/client/fingerprint/cpu_test.go @@ -4,12 +4,15 @@ import ( "strconv" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestCPUFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewCPUFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), @@ -58,6 +61,8 @@ func TestCPUFingerprint(t *testing.T) { // TestCPUFingerprint_OverrideCompute asserts that setting cpu_total_compute in // the client config overrides the detected CPU freq (if any). func TestCPUFingerprint_OverrideCompute(t *testing.T) { + ci.Parallel(t) + f := NewCPUFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/env_aws_test.go b/client/fingerprint/env_aws_test.go index fb3f0510b..caca5f4ba 100644 --- a/client/fingerprint/env_aws_test.go +++ b/client/fingerprint/env_aws_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -13,6 +14,8 @@ import ( ) func TestEnvAWSFingerprint_nonAws(t *testing.T) { + ci.Parallel(t) + f := NewEnvAWSFingerprint(testlog.HCLogger(t)) f.(*EnvAWSFingerprint).endpoint = "http://127.0.0.1/latest" @@ -28,6 +31,8 @@ func TestEnvAWSFingerprint_nonAws(t *testing.T) { } func TestEnvAWSFingerprint_aws(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -69,6 +74,8 @@ func TestEnvAWSFingerprint_aws(t *testing.T) { } func TestNetworkFingerprint_AWS(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -97,6 +104,8 @@ func TestNetworkFingerprint_AWS(t *testing.T) { } func TestNetworkFingerprint_AWS_network(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -158,6 +167,8 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) { } func TestNetworkFingerprint_AWS_NoNetwork(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, noNetworkAWSStubs) defer cleanup() @@ -181,6 +192,8 @@ func TestNetworkFingerprint_AWS_NoNetwork(t *testing.T) { } func TestNetworkFingerprint_AWS_IncompleteImitation(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, incompleteAWSImitationStubs) defer cleanup() @@ -203,6 +216,8 @@ func TestNetworkFingerprint_AWS_IncompleteImitation(t *testing.T) { } func TestCPUFingerprint_AWS_InstanceFound(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -224,6 +239,8 @@ func TestCPUFingerprint_AWS_InstanceFound(t *testing.T) { } func TestCPUFingerprint_AWS_OverrideCompute(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, awsStubs) defer cleanup() @@ -247,6 +264,8 @@ func TestCPUFingerprint_AWS_OverrideCompute(t *testing.T) { } func TestCPUFingerprint_AWS_InstanceNotFound(t *testing.T) { + ci.Parallel(t) + endpoint, cleanup := startFakeEC2Metadata(t, unknownInstanceType) defer cleanup() diff --git a/client/fingerprint/env_azure_test.go b/client/fingerprint/env_azure_test.go index 9bd0c9e02..91afb3229 100644 --- a/client/fingerprint/env_azure_test.go +++ b/client/fingerprint/env_azure_test.go @@ -9,12 +9,15 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestAzureFingerprint_nonAzure(t *testing.T) { + ci.Parallel(t) + os.Setenv("AZURE_ENV_URL", "http://127.0.0.1/metadata/instance/") f := NewEnvAzureFingerprint(testlog.HCLogger(t)) node := &structs.Node{ @@ -211,9 +214,13 @@ const AZURE_routes = ` ` func TestFingerprint_AzureWithExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_Azure(t, true) } func TestFingerprint_AzureWithoutExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_Azure(t, false) } diff --git a/client/fingerprint/env_digitalocean_test.go b/client/fingerprint/env_digitalocean_test.go index 8b0ccca5f..c70a08a97 100644 --- a/client/fingerprint/env_digitalocean_test.go +++ b/client/fingerprint/env_digitalocean_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -16,6 +17,8 @@ import ( ) func TestDigitalOceanFingerprint_nonDigitalOcean(t *testing.T) { + ci.Parallel(t) + os.Setenv("DO_ENV_URL", "http://127.0.0.1/metadata/v1/") f := NewEnvDigitalOceanFingerprint(testlog.HCLogger(t)) node := &structs.Node{ @@ -39,6 +42,8 @@ func TestDigitalOceanFingerprint_nonDigitalOcean(t *testing.T) { } func TestFingerprint_DigitalOcean(t *testing.T) { + ci.Parallel(t) + node := &structs.Node{ Attributes: make(map[string]string), } diff --git a/client/fingerprint/env_gce_test.go b/client/fingerprint/env_gce_test.go index 03f1f60f9..653d9d258 100644 --- a/client/fingerprint/env_gce_test.go +++ b/client/fingerprint/env_gce_test.go @@ -9,12 +9,15 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestGCEFingerprint_nonGCE(t *testing.T) { + ci.Parallel(t) + os.Setenv("GCE_ENV_URL", "http://127.0.0.1/computeMetadata/v1/instance/") f := NewEnvGCEFingerprint(testlog.HCLogger(t)) node := &structs.Node{ @@ -207,9 +210,13 @@ const GCE_routes = ` ` func TestFingerprint_GCEWithExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_GCE(t, true) } func TestFingerprint_GCEWithoutExternalIp(t *testing.T) { + ci.Parallel(t) + testFingerprint_GCE(t, false) } diff --git a/client/fingerprint/host_test.go b/client/fingerprint/host_test.go index 02f7b5d0e..5fd9dbf85 100644 --- a/client/fingerprint/host_test.go +++ b/client/fingerprint/host_test.go @@ -3,12 +3,15 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestHostFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewHostFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/memory_test.go b/client/fingerprint/memory_test.go index 8635b55fd..e93599e9b 100644 --- a/client/fingerprint/memory_test.go +++ b/client/fingerprint/memory_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -11,6 +12,8 @@ import ( ) func TestMemoryFingerprint(t *testing.T) { + ci.Parallel(t) + require := require.New(t) f := NewMemoryFingerprint(testlog.HCLogger(t)) @@ -31,6 +34,8 @@ func TestMemoryFingerprint(t *testing.T) { } func TestMemoryFingerprint_Override(t *testing.T) { + ci.Parallel(t) + f := NewMemoryFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/network_test.go b/client/fingerprint/network_test.go index 7628b814b..08332ba09 100644 --- a/client/fingerprint/network_test.go +++ b/client/fingerprint/network_test.go @@ -7,6 +7,7 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -182,6 +183,8 @@ func (n *NetworkInterfaceDetectorMultipleInterfaces) Addrs(intf *net.Interface) } func TestNetworkFingerprint_basic(t *testing.T) { + ci.Parallel(t) + if v := os.Getenv(skipOnlineTestsEnvVar); v != "" { t.Skipf("Environment variable %+q not empty, skipping test", skipOnlineTestsEnvVar) } @@ -237,6 +240,8 @@ func TestNetworkFingerprint_basic(t *testing.T) { } func TestNetworkFingerprint_default_device_absent(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -260,6 +265,8 @@ func TestNetworkFingerprint_default_device_absent(t *testing.T) { } func TestNetworkFingerPrint_default_device(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorOnlyLo{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -311,6 +318,8 @@ func TestNetworkFingerPrint_default_device(t *testing.T) { } func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -358,6 +367,8 @@ func TestNetworkFingerPrint_LinkLocal_Allowed(t *testing.T) { } func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -412,6 +423,8 @@ func TestNetworkFingerPrint_LinkLocal_Allowed_MixedIntf(t *testing.T) { } func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -441,6 +454,8 @@ func TestNetworkFingerPrint_LinkLocal_Disallowed(t *testing.T) { } func TestNetworkFingerPrint_MultipleAliases(t *testing.T) { + ci.Parallel(t) + f := &NetworkFingerprint{logger: testlog.HCLogger(t), interfaceDetector: &NetworkInterfaceDetectorMultipleInterfaces{}} node := &structs.Node{ Attributes: make(map[string]string), @@ -488,6 +503,8 @@ func TestNetworkFingerPrint_MultipleAliases(t *testing.T) { } func TestNetworkFingerPrint_HostNetworkReservedPorts(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string hostNetworks map[string]*structs.ClientHostNetworkConfig diff --git a/client/fingerprint/nomad_test.go b/client/fingerprint/nomad_test.go index c2dba204f..2e349ae5d 100644 --- a/client/fingerprint/nomad_test.go +++ b/client/fingerprint/nomad_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -10,6 +11,8 @@ import ( ) func TestNomadFingerprint(t *testing.T) { + ci.Parallel(t) + f := NewNomadFingerprint(testlog.HCLogger(t)) v := "foo" diff --git a/client/fingerprint/signal_test.go b/client/fingerprint/signal_test.go index d88c4a85a..4cdc3b01d 100644 --- a/client/fingerprint/signal_test.go +++ b/client/fingerprint/signal_test.go @@ -3,11 +3,14 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestSignalFingerprint(t *testing.T) { + ci.Parallel(t) + fp := NewSignalFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/storage_test.go b/client/fingerprint/storage_test.go index 3227d4d2b..1c00fcbc4 100644 --- a/client/fingerprint/storage_test.go +++ b/client/fingerprint/storage_test.go @@ -4,11 +4,14 @@ import ( "strconv" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" ) func TestStorageFingerprint(t *testing.T) { + ci.Parallel(t) + fp := NewStorageFingerprint(testlog.HCLogger(t)) node := &structs.Node{ Attributes: make(map[string]string), diff --git a/client/fingerprint/vault_test.go b/client/fingerprint/vault_test.go index 2056dda4d..2891d5057 100644 --- a/client/fingerprint/vault_test.go +++ b/client/fingerprint/vault_test.go @@ -3,6 +3,7 @@ package fingerprint import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -10,6 +11,8 @@ import ( ) func TestVaultFingerprint(t *testing.T) { + ci.Parallel(t) + tv := testutil.NewTestVault(t) defer tv.Stop() diff --git a/client/fingerprint_manager_test.go b/client/fingerprint_manager_test.go index a4ba0184a..576b6514b 100644 --- a/client/fingerprint_manager_test.go +++ b/client/fingerprint_manager_test.go @@ -3,13 +3,15 @@ package client import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/stretchr/testify/require" ) func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + testClient, cleanup := TestClient(t, nil) defer cleanup() @@ -33,7 +35,7 @@ func TestFingerprintManager_Run_ResourcesFingerprint(t *testing.T) { } func TestFimgerprintManager_Run_InWhitelist(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testClient, cleanup := TestClient(t, func(c *config.Config) { @@ -62,12 +64,13 @@ func TestFimgerprintManager_Run_InWhitelist(t *testing.T) { } func TestFingerprintManager_Run_InDenylist(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) + testClient, cleanup := TestClient(t, func(c *config.Config) { c.Options = map[string]string{ "fingerprint.allowlist": " arch,memory,foo,bar ", - "fingerprint.denylist": " cpu ", + "fingerprint.denylist": " cpu ", } }) defer cleanup() @@ -91,13 +94,13 @@ func TestFingerprintManager_Run_InDenylist(t *testing.T) { } func TestFingerprintManager_Run_Combination(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testClient, cleanup := TestClient(t, func(c *config.Config) { c.Options = map[string]string{ "fingerprint.allowlist": " arch,cpu,memory,foo,bar ", - "fingerprint.denylist": " memory,host ", + "fingerprint.denylist": " memory,host ", } }) defer cleanup() @@ -123,7 +126,7 @@ func TestFingerprintManager_Run_Combination(t *testing.T) { } func TestFingerprintManager_Run_CombinationLegacyNames(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testClient, cleanup := TestClient(t, func(c *config.Config) { diff --git a/client/fs_endpoint_test.go b/client/fs_endpoint_test.go index 76fad1847..8df47b90d 100644 --- a/client/fs_endpoint_test.go +++ b/client/fs_endpoint_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" @@ -50,7 +51,7 @@ func (n nopWriteCloser) Close() error { } func TestFS_Stat_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -71,7 +72,7 @@ func TestFS_Stat_NoAlloc(t *testing.T) { } func TestFS_Stat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -108,7 +109,7 @@ func TestFS_Stat(t *testing.T) { } func TestFS_Stat_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -183,7 +184,7 @@ func TestFS_Stat_ACL(t *testing.T) { } func TestFS_List_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -204,7 +205,7 @@ func TestFS_List_NoAlloc(t *testing.T) { } func TestFS_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -241,7 +242,7 @@ func TestFS_List(t *testing.T) { } func TestFS_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -316,7 +317,7 @@ func TestFS_List_ACL(t *testing.T) { } func TestFS_Stream_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -391,7 +392,7 @@ OUTER: } func TestFS_Stream_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := nomad.TestACLServer(t, nil) @@ -519,7 +520,7 @@ func TestFS_Stream_ACL(t *testing.T) { } func TestFS_Stream(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -635,7 +636,7 @@ func (r *ReadWriteCloseChecker) Close() error { } func TestFS_Stream_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -732,7 +733,7 @@ OUTER: } func TestFS_Stream_Limit(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -826,7 +827,7 @@ OUTER: } func TestFS_Logs_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a client @@ -904,7 +905,7 @@ OUTER: // TestFS_Logs_TaskPending asserts that trying to stream logs for tasks which // have not started returns a 404 error. func TestFS_Logs_TaskPending(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1019,7 +1020,7 @@ func TestFS_Logs_TaskPending(t *testing.T) { } func TestFS_Logs_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -1150,7 +1151,7 @@ func TestFS_Logs_ACL(t *testing.T) { } func TestFS_Logs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1251,7 +1252,7 @@ OUTER: } func TestFS_Logs_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1555,7 +1556,8 @@ func TestFS_findClosest(t *testing.T) { } func TestFS_streamFile_NoFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) + c, cleanup := TestClient(t, nil) defer cleanup() @@ -1578,7 +1580,7 @@ func TestFS_streamFile_NoFile(t *testing.T) { } func TestFS_streamFile_Modify(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -1649,7 +1651,8 @@ func TestFS_streamFile_Modify(t *testing.T) { } func TestFS_streamFile_Truncate(t *testing.T) { - t.Parallel() + ci.Parallel(t) + c, cleanup := TestClient(t, nil) defer cleanup() @@ -1752,10 +1755,10 @@ func TestFS_streamFile_Truncate(t *testing.T) { } func TestFS_streamImpl_Delete(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows does not allow us to delete a file while it is open") } - t.Parallel() c, cleanup := TestClient(t, nil) defer cleanup() @@ -1828,7 +1831,7 @@ func TestFS_streamImpl_Delete(t *testing.T) { } func TestFS_logsImpl_NoFollow(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() @@ -1897,7 +1900,7 @@ func TestFS_logsImpl_NoFollow(t *testing.T) { } func TestFS_logsImpl_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, cleanup := TestClient(t, nil) defer cleanup() diff --git a/client/gc_test.go b/client/gc_test.go index fd0061f47..83a25c50a 100644 --- a/client/gc_test.go +++ b/client/gc_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/stats" @@ -37,7 +38,8 @@ func exitAllocRunner(runners ...AllocRunner) { } func TestIndexedGCAllocPQ(t *testing.T) { - t.Parallel() + ci.Parallel(t) + pq := NewIndexedGCAllocPQ() ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc()) @@ -122,7 +124,8 @@ func (m *MockStatsCollector) Stats() *stats.HostStats { } func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) @@ -138,7 +141,8 @@ func TestAllocGarbageCollector_MarkForCollection(t *testing.T) { } func TestAllocGarbageCollector_Collect(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) @@ -164,7 +168,8 @@ func TestAllocGarbageCollector_Collect(t *testing.T) { } func TestAllocGarbageCollector_CollectAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig()) @@ -184,7 +189,8 @@ func TestAllocGarbageCollector_CollectAll(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -226,7 +232,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -269,7 +276,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -308,7 +316,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) { } func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -348,6 +357,8 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) // TestAllocGarbageCollector_MakeRoomFor_MaxAllocs asserts that when making room for new // allocs, terminal allocs are GC'd until old_allocs + new_allocs <= limit func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { + ci.Parallel(t) + const maxAllocs = 6 require := require.New(t) @@ -494,7 +505,8 @@ func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) { } func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() @@ -533,7 +545,8 @@ func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) { } func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) { - t.Parallel() + ci.Parallel(t) + logger := testlog.HCLogger(t) statsCollector := &MockStatsCollector{} conf := gcConfig() diff --git a/client/heartbeatstop_test.go b/client/heartbeatstop_test.go index 86638cb01..55c54c2bf 100644 --- a/client/heartbeatstop_test.go +++ b/client/heartbeatstop_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -12,7 +13,7 @@ import ( ) func TestHeartbeatStop_allocHook(t *testing.T) { - t.Parallel() + ci.Parallel(t) server, _, cleanupS1 := testServer(t, nil) defer cleanupS1() diff --git a/client/logmon/logmon_test.go b/client/logmon/logmon_test.go index a3e62408b..d8481cb7e 100644 --- a/client/logmon/logmon_test.go +++ b/client/logmon/logmon_test.go @@ -9,6 +9,7 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/fifo" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -17,6 +18,8 @@ import ( ) func TestLogmon_Start_rotate(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var stdoutFifoPath, stderrFifoPath string @@ -77,6 +80,8 @@ func TestLogmon_Start_rotate(t *testing.T) { // asserts that calling Start twice restarts the log rotator and that any logs // published while the listener was unavailable are received. func TestLogmon_Start_restart_flusheslogs(t *testing.T) { + ci.Parallel(t) + if runtime.GOOS == "windows" { t.Skip("windows does not support pushing data to a pipe with no servers") } @@ -184,6 +189,8 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { // asserts that calling Start twice restarts the log rotator func TestLogmon_Start_restart(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var stdoutFifoPath, stderrFifoPath string @@ -280,7 +287,7 @@ func (panicWriter) Close() error { // TestLogmon_NewError asserts that newLogRotatorWrapper will return an error // if its unable to create the necessray files. func TestLogmon_NewError(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Pick a path that does not exist path := filepath.Join(uuid.Generate(), uuid.Generate(), uuid.Generate()) diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index af40227e3..94ef2cf06 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -8,6 +8,7 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/mount" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -39,7 +40,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -136,7 +137,7 @@ func TestVolumeManager_stageVolume(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -217,7 +218,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -280,7 +281,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -406,7 +407,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -471,7 +472,7 @@ func TestVolumeManager_MountVolumeEvents(t *testing.T) { if !checkMountSupport() { t.Skip("mount point detection not supported for this platform") } - t.Parallel() + ci.Parallel(t) tmpPath := tmpDir(t) defer os.RemoveAll(tmpPath) diff --git a/client/pluginmanager/drivermanager/manager_test.go b/client/pluginmanager/drivermanager/manager_test.go index 1a4773343..e3f7798a4 100644 --- a/client/pluginmanager/drivermanager/manager_test.go +++ b/client/pluginmanager/drivermanager/manager_test.go @@ -9,6 +9,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/helper/pluginutils/loader" @@ -101,7 +102,7 @@ func noopUpdater(string, *structs.DriverInfo) {} func noopEventHandlerFactory(string, string) EventHandler { return nil } func TestManager_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, _, mgr := testSetup(t) var infos []*structs.DriverInfo @@ -168,7 +169,7 @@ func TestManager_Fingerprint(t *testing.T) { } func TestManager_TaskEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, evChan, mgr := testSetup(t) go mgr.Run() @@ -199,7 +200,7 @@ func TestManager_TaskEvents(t *testing.T) { } func TestManager_Run_AllowedDrivers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, _, mgr := testSetup(t) mgr.allowedDrivers = map[string]struct{}{"foo": {}} @@ -219,7 +220,7 @@ func TestManager_Run_AllowedDrivers(t *testing.T) { } func TestManager_Run_BlockedDrivers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fpChan, _, mgr := testSetup(t) mgr.blockedDrivers = map[string]struct{}{"mock": {}} @@ -239,7 +240,7 @@ func TestManager_Run_BlockedDrivers(t *testing.T) { } func TestManager_Run_AllowedBlockedDrivers_Combined(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) drvs := map[string]drivers.DriverPlugin{} fpChs := map[string]chan *drivers.Fingerprint{} diff --git a/client/pluginmanager/group_test.go b/client/pluginmanager/group_test.go index 07448d542..fc91824e8 100644 --- a/client/pluginmanager/group_test.go +++ b/client/pluginmanager/group_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" ) func TestPluginGroup_RegisterAndRun(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var hasRun bool @@ -29,7 +30,7 @@ func TestPluginGroup_RegisterAndRun(t *testing.T) { } func TestPluginGroup_Shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var stack []int @@ -66,7 +67,7 @@ func TestPluginGroup_Shutdown(t *testing.T) { } func TestPluginGroup_WaitForFirstFingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) managerCh := make(chan struct{}) @@ -95,7 +96,7 @@ func TestPluginGroup_WaitForFirstFingerprint(t *testing.T) { } func TestPluginGroup_WaitForFirstFingerprint_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) managerCh := make(chan struct{}) diff --git a/client/rpc_test.go b/client/rpc_test.go index 2f8c13378..162e4d790 100644 --- a/client/rpc_test.go +++ b/client/rpc_test.go @@ -4,6 +4,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/nomad" "github.com/hashicorp/nomad/nomad/structs" @@ -13,7 +14,7 @@ import ( ) func TestRpc_streamingRpcConn_badEndpoint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := nomad.TestServer(t, nil) @@ -51,7 +52,7 @@ func TestRpc_streamingRpcConn_badEndpoint(t *testing.T) { } func TestRpc_streamingRpcConn_badEndpoint_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( diff --git a/client/servers/manager_internal_test.go b/client/servers/manager_internal_test.go index f28074dea..17cf29b58 100644 --- a/client/servers/manager_internal_test.go +++ b/client/servers/manager_internal_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" ) @@ -50,6 +51,8 @@ func testManagerFailProb(t *testing.T, failPct float64) (m *Manager) { } func TestManagerInternal_cycleServer(t *testing.T) { + ci.Parallel(t) + server0 := &Server{Addr: &fauxAddr{"server1"}} server1 := &Server{Addr: &fauxAddr{"server2"}} server2 := &Server{Addr: &fauxAddr{"server3"}} @@ -81,6 +84,8 @@ func TestManagerInternal_cycleServer(t *testing.T) { } func TestManagerInternal_New(t *testing.T) { + ci.Parallel(t) + m := testManager(t) if m == nil { t.Fatalf("Manager nil") @@ -97,6 +102,8 @@ func TestManagerInternal_New(t *testing.T) { // func (l *serverList) refreshServerRebalanceTimer() { func TestManagerInternal_refreshServerRebalanceTimer(t *testing.T) { + ci.Parallel(t) + type clusterSizes struct { numNodes int32 numServers int diff --git a/client/servers/manager_test.go b/client/servers/manager_test.go index 1c13889a7..e219c3f4b 100644 --- a/client/servers/manager_test.go +++ b/client/servers/manager_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" @@ -47,6 +48,8 @@ func testManagerFailProb(t *testing.T, failPct float64) (m *servers.Manager) { } func TestServers_SetServers(t *testing.T) { + ci.Parallel(t) + require := require.New(t) m := testManager(t) var num int @@ -82,6 +85,8 @@ func TestServers_SetServers(t *testing.T) { } func TestServers_FindServer(t *testing.T) { + ci.Parallel(t) + m := testManager(t) if m.FindServer() != nil { @@ -126,6 +131,8 @@ func TestServers_FindServer(t *testing.T) { } func TestServers_New(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) shutdownCh := make(chan struct{}) m := servers.New(logger, shutdownCh, &fauxConnPool{}) @@ -135,6 +142,8 @@ func TestServers_New(t *testing.T) { } func TestServers_NotifyFailedServer(t *testing.T) { + ci.Parallel(t) + m := testManager(t) if m.NumServers() != 0 { @@ -194,6 +203,8 @@ func TestServers_NotifyFailedServer(t *testing.T) { } func TestServers_NumServers(t *testing.T) { + ci.Parallel(t) + m := testManager(t) var num int num = m.NumServers() @@ -210,6 +221,8 @@ func TestServers_NumServers(t *testing.T) { } func TestServers_RebalanceServers(t *testing.T) { + ci.Parallel(t) + const failPct = 0.5 m := testManagerFailProb(t, failPct) const maxServers = 100 diff --git a/client/state/db_test.go b/client/state/db_test.go index c4f92ce75..05081d14d 100644 --- a/client/state/db_test.go +++ b/client/state/db_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" "github.com/hashicorp/nomad/client/dynamicplugins" @@ -62,7 +63,7 @@ func testDB(t *testing.T, f func(*testing.T, StateDB)) { // TestStateDB_Allocations asserts the behavior of GetAllAllocations, PutAllocation, and // DeleteAllocationBucket for all operational StateDB implementations. func TestStateDB_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -147,7 +148,7 @@ func ceilDiv(a, b int) int { // TestStateDB_Batch asserts the behavior of PutAllocation, PutNetworkStatus and // DeleteAllocationBucket in batch mode, for all operational StateDB implementations. func TestStateDB_Batch(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -255,7 +256,7 @@ func TestStateDB_Batch(t *testing.T) { // TestStateDB_TaskState asserts the behavior of task state related StateDB // methods. func TestStateDB_TaskState(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -307,7 +308,7 @@ func TestStateDB_TaskState(t *testing.T) { // TestStateDB_DeviceManager asserts the behavior of device manager state related StateDB // methods. func TestStateDB_DeviceManager(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -332,7 +333,7 @@ func TestStateDB_DeviceManager(t *testing.T) { // TestStateDB_DriverManager asserts the behavior of device manager state related StateDB // methods. func TestStateDB_DriverManager(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -357,7 +358,7 @@ func TestStateDB_DriverManager(t *testing.T) { // TestStateDB_DynamicRegistry asserts the behavior of dynamic registry state related StateDB // methods. func TestStateDB_DynamicRegistry(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require := require.New(t) @@ -382,7 +383,7 @@ func TestStateDB_DynamicRegistry(t *testing.T) { // TestStateDB_Upgrade asserts calling Upgrade on new databases always // succeeds. func TestStateDB_Upgrade(t *testing.T) { - t.Parallel() + ci.Parallel(t) testDB(t, func(t *testing.T, db StateDB) { require.NoError(t, db.Upgrade()) diff --git a/client/state/upgrade_int_test.go b/client/state/upgrade_int_test.go index 96df3fbad..4cc8bdc40 100644 --- a/client/state/upgrade_int_test.go +++ b/client/state/upgrade_int_test.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocrunner" "github.com/hashicorp/nomad/client/allocwatcher" clientconfig "github.com/hashicorp/nomad/client/config" @@ -32,7 +33,7 @@ import ( // TestBoltStateDB_Upgrade_Ok asserts upgading an old state db does not error // during upgrade and restore. func TestBoltStateDB_UpgradeOld_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) dbFromTestFile := func(t *testing.T, dir, fn string) *BoltStateDB { diff --git a/client/state/upgrade_test.go b/client/state/upgrade_test.go index 88cf6d112..5f248d787 100644 --- a/client/state/upgrade_test.go +++ b/client/state/upgrade_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/boltdd" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -32,7 +33,7 @@ func setupBoltDB(t *testing.T) (*bbolt.DB, func()) { // TestUpgrade_NeedsUpgrade_New asserts new state dbs do not need upgrading. func TestUpgrade_NeedsUpgrade_New(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Setting up a new StateDB should initialize it at the latest version. db, cleanup := setupBoltStateDB(t) @@ -47,7 +48,7 @@ func TestUpgrade_NeedsUpgrade_New(t *testing.T) { // TestUpgrade_NeedsUpgrade_Old asserts state dbs with just the alloctions // bucket *do* need upgrading. func TestUpgrade_NeedsUpgrade_Old(t *testing.T) { - t.Parallel() + ci.Parallel(t) db, cleanup := setupBoltDB(t) defer cleanup() @@ -77,7 +78,7 @@ func TestUpgrade_NeedsUpgrade_Old(t *testing.T) { // NeedsUpgrade if an invalid db version is found. This is a safety measure to // prevent invalid and unintentional upgrades when downgrading Nomad. func TestUpgrade_NeedsUpgrade_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := [][]byte{ {'"', '2', '"'}, // wrong type @@ -107,7 +108,7 @@ func TestUpgrade_NeedsUpgrade_Error(t *testing.T) { // TestUpgrade_DeleteInvalidAllocs asserts invalid allocations are deleted // during state upgades instead of failing the entire agent. func TestUpgrade_DeleteInvalidAllocs_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) bdb, cleanup := setupBoltDB(t) defer cleanup() @@ -152,7 +153,7 @@ func TestUpgrade_DeleteInvalidAllocs_NoAlloc(t *testing.T) { // TestUpgrade_DeleteInvalidTaskEntries asserts invalid entries under a task // bucket are deleted. func TestUpgrade_upgradeTaskBucket_InvalidEntries(t *testing.T) { - t.Parallel() + ci.Parallel(t) db, cleanup := setupBoltDB(t) defer cleanup() diff --git a/client/stats/cpu_test.go b/client/stats/cpu_test.go index 024fba93a..4dc9b19b4 100644 --- a/client/stats/cpu_test.go +++ b/client/stats/cpu_test.go @@ -6,12 +6,15 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" shelpers "github.com/hashicorp/nomad/helper/stats" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/assert" ) func TestCpuStatsPercent(t *testing.T) { + ci.Parallel(t) + cs := NewCpuStats() cs.Percent(79.7) time.Sleep(1 * time.Second) @@ -23,6 +26,8 @@ func TestCpuStatsPercent(t *testing.T) { } func TestHostStats_CPU(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) assert.Nil(shelpers.Init()) diff --git a/client/structs/broadcaster_test.go b/client/structs/broadcaster_test.go index f23de0b93..1bbc1006d 100644 --- a/client/structs/broadcaster_test.go +++ b/client/structs/broadcaster_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( // TestAllocBroadcaster_SendRecv asserts the latest sends to a broadcaster are // received by listeners. func TestAllocBroadcaster_SendRecv(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := NewAllocBroadcaster(testlog.HCLogger(t)) defer b.Close() @@ -47,7 +48,7 @@ func TestAllocBroadcaster_SendRecv(t *testing.T) { // TestAllocBroadcaster_RecvBlocks asserts listeners are blocked until a send occurs. func TestAllocBroadcaster_RecvBlocks(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() b := NewAllocBroadcaster(testlog.HCLogger(t)) @@ -87,7 +88,7 @@ func TestAllocBroadcaster_RecvBlocks(t *testing.T) { // TestAllocBroadcaster_Concurrency asserts that the broadcaster behaves // correctly with concurrent listeners being added and closed. func TestAllocBroadcaster_Concurrency(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() b := NewAllocBroadcaster(testlog.HCLogger(t)) @@ -164,7 +165,7 @@ func TestAllocBroadcaster_Concurrency(t *testing.T) { // TestAllocBroadcaster_PrimeListener asserts that newly created listeners are // primed with the last sent alloc. func TestAllocBroadcaster_PrimeListener(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := NewAllocBroadcaster(testlog.HCLogger(t)) defer b.Close() @@ -188,7 +189,7 @@ func TestAllocBroadcaster_PrimeListener(t *testing.T) { // TestAllocBroadcaster_Closed asserts that newly created listeners are // primed with the last sent alloc even when the broadcaster is closed. func TestAllocBroadcaster_Closed(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := NewAllocBroadcaster(testlog.HCLogger(t)) diff --git a/client/taskenv/env_test.go b/client/taskenv/env_test.go index b4cde4029..7b3156e47 100644 --- a/client/taskenv/env_test.go +++ b/client/taskenv/env_test.go @@ -11,6 +11,7 @@ import ( hcl "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -62,6 +63,8 @@ func testEnvBuilder() *Builder { } func TestEnvironment_ParseAndReplace_Env(t *testing.T) { + ci.Parallel(t) + env := testEnvBuilder() input := []string{fmt.Sprintf(`"${%v}"!`, envOneKey), fmt.Sprintf("${%s}${%s}", envOneKey, envTwoKey)} @@ -74,6 +77,8 @@ func TestEnvironment_ParseAndReplace_Env(t *testing.T) { } func TestEnvironment_ParseAndReplace_Meta(t *testing.T) { + ci.Parallel(t) + input := []string{fmt.Sprintf("${%v%v}", nodeMetaPrefix, metaKey)} exp := []string{metaVal} env := testEnvBuilder() @@ -85,6 +90,8 @@ func TestEnvironment_ParseAndReplace_Meta(t *testing.T) { } func TestEnvironment_ParseAndReplace_Attr(t *testing.T) { + ci.Parallel(t) + input := []string{fmt.Sprintf("${%v%v}", nodeAttributePrefix, attrKey)} exp := []string{attrVal} env := testEnvBuilder() @@ -96,6 +103,8 @@ func TestEnvironment_ParseAndReplace_Attr(t *testing.T) { } func TestEnvironment_ParseAndReplace_Node(t *testing.T) { + ci.Parallel(t) + input := []string{fmt.Sprintf("${%v}", nodeNameKey), fmt.Sprintf("${%v}", nodeClassKey)} exp := []string{nodeName, nodeClass} env := testEnvBuilder() @@ -107,6 +116,8 @@ func TestEnvironment_ParseAndReplace_Node(t *testing.T) { } func TestEnvironment_ParseAndReplace_Mixed(t *testing.T) { + ci.Parallel(t) + input := []string{ fmt.Sprintf("${%v}${%v%v}", nodeNameKey, nodeAttributePrefix, attrKey), fmt.Sprintf("${%v}${%v%v}", nodeClassKey, nodeMetaPrefix, metaKey), @@ -126,6 +137,8 @@ func TestEnvironment_ParseAndReplace_Mixed(t *testing.T) { } func TestEnvironment_ReplaceEnv_Mixed(t *testing.T) { + ci.Parallel(t) + input := fmt.Sprintf("${%v}${%v%v}", nodeNameKey, nodeAttributePrefix, attrKey) exp := fmt.Sprintf("%v%v", nodeName, attrVal) env := testEnvBuilder() @@ -137,6 +150,8 @@ func TestEnvironment_ReplaceEnv_Mixed(t *testing.T) { } func TestEnvironment_AsList(t *testing.T) { + ci.Parallel(t) + n := mock.Node() n.Meta = map[string]string{ "metaKey": "metaVal", @@ -227,7 +242,7 @@ func TestEnvironment_AsList(t *testing.T) { } func TestEnvironment_AllValues(t *testing.T) { - t.Parallel() + ci.Parallel(t) n := mock.Node() n.Meta = map[string]string{ @@ -431,6 +446,8 @@ func TestEnvironment_AllValues(t *testing.T) { } func TestEnvironment_VaultToken(t *testing.T) { + ci.Parallel(t) + n := mock.Node() a := mock.Alloc() env := NewBuilder(n, a, a.Job.TaskGroups[0].Tasks[0], "global") @@ -491,6 +508,8 @@ func TestEnvironment_VaultToken(t *testing.T) { } func TestEnvironment_Envvars(t *testing.T) { + ci.Parallel(t) + envMap := map[string]string{"foo": "baz", "bar": "bang"} n := mock.Node() a := mock.Alloc() @@ -512,6 +531,8 @@ func TestEnvironment_Envvars(t *testing.T) { // TestEnvironment_HookVars asserts hook env vars are LWW and deletes of later // writes allow earlier hook's values to be visible. func TestEnvironment_HookVars(t *testing.T) { + ci.Parallel(t) + n := mock.Node() a := mock.Alloc() builder := NewBuilder(n, a, a.Job.TaskGroups[0].Tasks[0], "global") @@ -548,6 +569,8 @@ func TestEnvironment_HookVars(t *testing.T) { // TestEnvironment_DeviceHookVars asserts device hook env vars are accessible // separately. func TestEnvironment_DeviceHookVars(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := mock.Node() a := mock.Alloc() @@ -573,6 +596,8 @@ func TestEnvironment_DeviceHookVars(t *testing.T) { } func TestEnvironment_Interpolate(t *testing.T) { + ci.Parallel(t) + n := mock.Node() n.Attributes["arch"] = "x86" n.NodeClass = "test class" @@ -598,6 +623,8 @@ func TestEnvironment_Interpolate(t *testing.T) { } func TestEnvironment_AppendHostEnvvars(t *testing.T) { + ci.Parallel(t) + host := os.Environ() if len(host) < 2 { t.Skip("No host environment variables. Can't test") @@ -620,6 +647,8 @@ func TestEnvironment_AppendHostEnvvars(t *testing.T) { // converted to underscores in environment variables. // See: https://github.com/hashicorp/nomad/issues/2405 func TestEnvironment_DashesInTaskName(t *testing.T) { + ci.Parallel(t) + a := mock.Alloc() task := a.Job.TaskGroups[0].Tasks[0] task.Env = map[string]string{ @@ -639,6 +668,8 @@ func TestEnvironment_DashesInTaskName(t *testing.T) { // TestEnvironment_UpdateTask asserts env vars and task meta are updated when a // task is updated. func TestEnvironment_UpdateTask(t *testing.T) { + ci.Parallel(t) + a := mock.Alloc() a.Job.TaskGroups[0].Meta = map[string]string{"tgmeta": "tgmetaval"} task := a.Job.TaskGroups[0].Tasks[0] @@ -688,6 +719,8 @@ func TestEnvironment_UpdateTask(t *testing.T) { // job, if an optional meta field is not set, it will get interpolated as an // empty string. func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) { + ci.Parallel(t) + require := require.New(t) a := mock.Alloc() a.Job.ParameterizedJob = &structs.ParameterizedJobConfig{ @@ -704,7 +737,7 @@ func TestEnvironment_InterpolateEmptyOptionalMeta(t *testing.T) { // TestEnvironment_Upsteams asserts that group.service.upstreams entries are // added to the environment. func TestEnvironment_Upstreams(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some upstreams to the mock alloc a := mock.Alloc() @@ -754,6 +787,8 @@ func TestEnvironment_Upstreams(t *testing.T) { } func TestEnvironment_SetPortMapEnvs(t *testing.T) { + ci.Parallel(t) + envs := map[string]string{ "foo": "bar", "NOMAD_PORT_ssh": "2342", @@ -774,6 +809,8 @@ func TestEnvironment_SetPortMapEnvs(t *testing.T) { } func TestEnvironment_TasklessBuilder(t *testing.T) { + ci.Parallel(t) + node := mock.Node() alloc := mock.Alloc() alloc.Job.Meta["jobt"] = "foo" @@ -789,6 +826,8 @@ func TestEnvironment_TasklessBuilder(t *testing.T) { } func TestTaskEnv_ClientPath(t *testing.T) { + ci.Parallel(t) + builder := testEnvBuilder() builder.SetAllocDir("/tmp/testAlloc") builder.SetClientSharedAllocDir("/tmp/testAlloc/alloc") diff --git a/client/taskenv/network_test.go b/client/taskenv/network_test.go index ec892d4d3..5c1f3deba 100644 --- a/client/taskenv/network_test.go +++ b/client/taskenv/network_test.go @@ -3,11 +3,14 @@ package taskenv import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) func Test_InterpolateNetworks(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputTaskEnv *TaskEnv inputNetworks structs.Networks diff --git a/client/taskenv/services_test.go b/client/taskenv/services_test.go index dc6a5593a..bc1ce6d46 100644 --- a/client/taskenv/services_test.go +++ b/client/taskenv/services_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -12,7 +13,7 @@ import ( // TestInterpolateServices asserts that all service // and check fields are properly interpolated. func TestInterpolateServices(t *testing.T) { - t.Parallel() + ci.Parallel(t) services := []*structs.Service{ { @@ -107,7 +108,7 @@ var testEnv = NewTaskEnv( nil, nil, "", "") func TestInterpolate_interpolateMapStringSliceString(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, interpolateMapStringSliceString(testEnv, nil)) @@ -125,7 +126,7 @@ func TestInterpolate_interpolateMapStringSliceString(t *testing.T) { } func TestInterpolate_interpolateMapStringString(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, interpolateMapStringString(testEnv, nil)) @@ -143,7 +144,7 @@ func TestInterpolate_interpolateMapStringString(t *testing.T) { } func TestInterpolate_interpolateMapStringInterface(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, interpolateMapStringInterface(testEnv, nil)) @@ -161,7 +162,7 @@ func TestInterpolate_interpolateMapStringInterface(t *testing.T) { } func TestInterpolate_interpolateConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) e := map[string]string{ "tag1": "_tag1", diff --git a/client/taskenv/util_test.go b/client/taskenv/util_test.go index e97cc5716..4f4538781 100644 --- a/client/taskenv/util_test.go +++ b/client/taskenv/util_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" "github.com/zclconf/go-cty/cty" ) @@ -11,6 +12,8 @@ import ( // TestAddNestedKey_Ok asserts test cases that succeed when passed to // addNestedKey. func TestAddNestedKey_Ok(t *testing.T) { + ci.Parallel(t) + cases := []struct { // M will be initialized if unset M map[string]interface{} @@ -209,7 +212,7 @@ func TestAddNestedKey_Ok(t *testing.T) { name = fmt.Sprintf("%s-%d", name, len(tc.M)) } t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) if tc.M == nil { tc.M = map[string]interface{}{} } @@ -222,6 +225,8 @@ func TestAddNestedKey_Ok(t *testing.T) { // TestAddNestedKey_Bad asserts test cases return an error when passed to // addNestedKey. func TestAddNestedKey_Bad(t *testing.T) { + ci.Parallel(t) + cases := []struct { // M will be initialized if unset M func() map[string]interface{} @@ -320,7 +325,7 @@ func TestAddNestedKey_Bad(t *testing.T) { name += "-cleanup" } t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Copy original M value to ensure it doesn't get altered if tc.M == nil { @@ -341,6 +346,8 @@ func TestAddNestedKey_Bad(t *testing.T) { } func TestCtyify_Ok(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string In map[string]interface{} @@ -402,7 +409,7 @@ func TestCtyify_Ok(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.Name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) // ctiyif and check for errors result, err := ctyify(tc.In) @@ -417,6 +424,8 @@ func TestCtyify_Ok(t *testing.T) { } func TestCtyify_Bad(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string In map[string]interface{} @@ -441,7 +450,7 @@ func TestCtyify_Bad(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.Name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) // ctiyif and check for errors result, err := ctyify(tc.In) diff --git a/client/util_test.go b/client/util_test.go deleted file mode 100644 index ca16bbeea..000000000 --- a/client/util_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package client - -/* -TODO(clientv2) -import ( - "reflect" - "testing" - - "github.com/hashicorp/nomad/helper/uuid" - "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/structs" -) - -func TestDiffAllocs(t *testing.T) { - t.Parallel() - alloc1 := mock.Alloc() // Ignore - alloc2 := mock.Alloc() // Update - alloc2u := new(structs.Allocation) - *alloc2u = *alloc2 - alloc2u.AllocModifyIndex += 1 - alloc3 := mock.Alloc() // Remove - alloc4 := mock.Alloc() // Add - - exist := []*structs.Allocation{ - alloc1, - alloc2, - alloc3, - } - update := &allocUpdates{ - pulled: map[string]*structs.Allocation{ - alloc2u.ID: alloc2u, - alloc4.ID: alloc4, - }, - filtered: map[string]struct{}{ - alloc1.ID: {}, - }, - } - - result := diffAllocs(exist, update) - - if len(result.ignore) != 1 || result.ignore[0] != alloc1 { - t.Fatalf("Bad: %#v", result.ignore) - } - if len(result.added) != 1 || result.added[0] != alloc4 { - t.Fatalf("Bad: %#v", result.added) - } - if len(result.removed) != 1 || result.removed[0] != alloc3 { - t.Fatalf("Bad: %#v", result.removed) - } - if len(result.updated) != 1 { - t.Fatalf("Bad: %#v", result.updated) - } - if result.updated[0].exist != alloc2 || result.updated[0].updated != alloc2u { - t.Fatalf("Bad: %#v", result.updated) - } -} - -func TestShuffleStrings(t *testing.T) { - t.Parallel() - // Generate input - inp := make([]string, 10) - for idx := range inp { - inp[idx] = uuid.Generate() - } - - // Copy the input - orig := make([]string, len(inp)) - copy(orig, inp) - - // Shuffle - shuffleStrings(inp) - - // Ensure order is not the same - if reflect.DeepEqual(inp, orig) { - t.Fatalf("shuffle failed") - } -} -*/ diff --git a/client/vaultclient/vaultclient_test.go b/client/vaultclient/vaultclient_test.go index 9128d14b3..66f711761 100644 --- a/client/vaultclient/vaultclient_test.go +++ b/client/vaultclient/vaultclient_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" @@ -15,7 +16,8 @@ import ( ) func TestVaultClient_TokenRenewals(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -103,7 +105,8 @@ func TestVaultClient_TokenRenewals(t *testing.T) { // TestVaultClient_NamespaceSupport tests that the Vault namespace config, if present, will result in the // namespace header being set on the created Vault client. func TestVaultClient_NamespaceSupport(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) tr := true testNs := "test-namespace" @@ -120,7 +123,8 @@ func TestVaultClient_NamespaceSupport(t *testing.T) { } func TestVaultClient_Heap(t *testing.T) { - t.Parallel() + ci.Parallel(t) + tr := true conf := config.DefaultConfig() conf.VaultConfig.Enabled = &tr @@ -226,7 +230,8 @@ func TestVaultClient_Heap(t *testing.T) { } func TestVaultClient_RenewNonRenewableLease(t *testing.T) { - t.Parallel() + ci.Parallel(t) + v := testutil.NewTestVault(t) defer v.Stop() @@ -275,7 +280,8 @@ func TestVaultClient_RenewNonRenewableLease(t *testing.T) { } func TestVaultClient_RenewNonexistentLease(t *testing.T) { - t.Parallel() + ci.Parallel(t) + v := testutil.NewTestVault(t) defer v.Stop() @@ -311,7 +317,7 @@ func TestVaultClient_RenewNonexistentLease(t *testing.T) { // TestVaultClient_RenewalTime_Long asserts that for leases over 1m the renewal // time is jittered. func TestVaultClient_RenewalTime_Long(t *testing.T) { - t.Parallel() + ci.Parallel(t) // highRoller is a randIntn func that always returns the max value highRoller := func(n int) int { @@ -337,7 +343,7 @@ func TestVaultClient_RenewalTime_Long(t *testing.T) { // TestVaultClient_RenewalTime_Short asserts that for leases under 1m the renewal // time is lease/2. func TestVaultClient_RenewalTime_Short(t *testing.T) { - t.Parallel() + ci.Parallel(t) dice := func(int) int { require.Fail(t, "dice should not have been called") diff --git a/command/acl_bootstrap_test.go b/command/acl_bootstrap_test.go index 9b029f07a..c972f4488 100644 --- a/command/acl_bootstrap_test.go +++ b/command/acl_bootstrap_test.go @@ -3,13 +3,14 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestACLBootstrapCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) // create a acl-enabled server without bootstrapping the token @@ -36,7 +37,7 @@ func TestACLBootstrapCommand(t *testing.T) { // If a bootstrap token has already been created, attempts to create more should // fail. func TestACLBootstrapCommand_ExistingBootstrapToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) config := func(c *agent.Config) { @@ -60,7 +61,7 @@ func TestACLBootstrapCommand_ExistingBootstrapToken(t *testing.T) { // Attempting to bootstrap a token on a non-ACL enabled server should fail. func TestACLBootstrapCommand_NonACLServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) srv, _, url := testServer(t, true, nil) diff --git a/command/acl_policy_apply_test.go b/command/acl_policy_apply_test.go index 608dbf0e6..076d0a551 100644 --- a/command/acl_policy_apply_test.go +++ b/command/acl_policy_apply_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" @@ -13,8 +14,8 @@ import ( ) func TestACLPolicyApplyCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_policy_delete_test.go b/command/acl_policy_delete_test.go index 2ea171260..2ca293827 100644 --- a/command/acl_policy_delete_test.go +++ b/command/acl_policy_delete_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,8 +15,8 @@ import ( ) func TestACLPolicyDeleteCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_policy_info_test.go b/command/acl_policy_info_test.go index 1d8934303..828b4022b 100644 --- a/command/acl_policy_info_test.go +++ b/command/acl_policy_info_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -12,8 +13,8 @@ import ( ) func TestACLPolicyInfoCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_policy_list_test.go b/command/acl_policy_list_test.go index e18d3725f..ce3f2bcf7 100644 --- a/command/acl_policy_list_test.go +++ b/command/acl_policy_list_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -13,8 +14,8 @@ import ( ) func TestACLPolicyListCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_create_test.go b/command/acl_token_create_test.go index 7d8c59f6c..e24e4c507 100644 --- a/command/acl_token_create_test.go +++ b/command/acl_token_create_test.go @@ -4,14 +4,15 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestACLTokenCreateCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_delete_test.go b/command/acl_token_delete_test.go index cbacdae59..8da29208d 100644 --- a/command/acl_token_delete_test.go +++ b/command/acl_token_delete_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,8 +15,8 @@ import ( ) func TestACLTokenDeleteCommand_ViaEnvVariable(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_info_test.go b/command/acl_token_info_test.go index 095621452..23a6e15b9 100644 --- a/command/acl_token_info_test.go +++ b/command/acl_token_info_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,10 +15,10 @@ import ( ) func TestACLTokenInfoCommand_ViaEnvVar(t *testing.T) { + ci.Parallel(t) defer os.Setenv("NOMAD_TOKEN", os.Getenv("NOMAD_TOKEN")) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_list_test.go b/command/acl_token_list_test.go index df0f5cb29..59622838b 100644 --- a/command/acl_token_list_test.go +++ b/command/acl_token_list_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -13,8 +14,8 @@ import ( ) func TestACLTokenListCommand(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_self_test.go b/command/acl_token_self_test.go index 30c66dab7..d907cd781 100644 --- a/command/acl_token_self_test.go +++ b/command/acl_token_self_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,10 +15,10 @@ import ( ) func TestACLTokenSelfCommand_ViaEnvVar(t *testing.T) { + ci.Parallel(t) defer os.Setenv("NOMAD_TOKEN", os.Getenv("NOMAD_TOKEN")) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/acl_token_update_test.go b/command/acl_token_update_test.go index f8a573550..e98002217 100644 --- a/command/acl_token_update_test.go +++ b/command/acl_token_update_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -12,8 +13,9 @@ import ( ) func TestACLTokenUpdateCommand(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true } diff --git a/command/agent/acl_endpoint_test.go b/command/agent/acl_endpoint_test.go index dc34a1e68..64f37a333 100644 --- a/command/agent/acl_endpoint_test.go +++ b/command/agent/acl_endpoint_test.go @@ -5,6 +5,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -12,7 +13,7 @@ import ( ) func TestHTTP_ACLPolicyList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLPolicy() p2 := mock.ACLPolicy() @@ -63,7 +64,7 @@ func TestHTTP_ACLPolicyList(t *testing.T) { } func TestHTTP_ACLPolicyQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLPolicy() args := structs.ACLPolicyUpsertRequest{ @@ -112,7 +113,7 @@ func TestHTTP_ACLPolicyQuery(t *testing.T) { } func TestHTTP_ACLPolicyCreate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Make the HTTP request p1 := mock.ACLPolicy() @@ -147,7 +148,7 @@ func TestHTTP_ACLPolicyCreate(t *testing.T) { } func TestHTTP_ACLPolicyDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLPolicy() args := structs.ACLPolicyUpsertRequest{ @@ -189,7 +190,7 @@ func TestHTTP_ACLPolicyDelete(t *testing.T) { } func TestHTTP_ACLTokenBootstrap(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.ACL.Enabled = true c.ACL.PolicyTTL = 0 // Special flag to disable auto-bootstrap @@ -221,7 +222,7 @@ func TestHTTP_ACLTokenBootstrap(t *testing.T) { } func TestHTTP_ACLTokenList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -275,7 +276,7 @@ func TestHTTP_ACLTokenList(t *testing.T) { } func TestHTTP_ACLTokenQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -324,7 +325,7 @@ func TestHTTP_ACLTokenQuery(t *testing.T) { } func TestHTTP_ACLTokenSelf(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -373,7 +374,7 @@ func TestHTTP_ACLTokenSelf(t *testing.T) { } func TestHTTP_ACLTokenCreate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Make the HTTP request p1 := mock.ACLToken() @@ -407,7 +408,7 @@ func TestHTTP_ACLTokenCreate(t *testing.T) { } func TestHTTP_ACLTokenDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { p1 := mock.ACLToken() p1.AccessorID = "" @@ -451,7 +452,7 @@ func TestHTTP_ACLTokenDelete(t *testing.T) { } func TestHTTP_OneTimeToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Setup the ACL token diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index efad48880..85bc61d44 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -21,6 +21,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/nomad/mock" @@ -31,7 +32,7 @@ import ( ) func TestHTTP_AgentSelf(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { @@ -94,7 +95,7 @@ func TestHTTP_AgentSelf(t *testing.T) { } func TestHTTP_AgentSelf_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -150,7 +151,7 @@ func TestHTTP_AgentSelf_ACL(t *testing.T) { } func TestHTTP_AgentJoin(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Determine the join address member := s.Agent.Server().LocalMember() @@ -182,7 +183,7 @@ func TestHTTP_AgentJoin(t *testing.T) { } func TestHTTP_AgentMembers(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/agent/members", nil) @@ -206,7 +207,7 @@ func TestHTTP_AgentMembers(t *testing.T) { } func TestHTTP_AgentMembers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -260,7 +261,7 @@ func TestHTTP_AgentMembers_ACL(t *testing.T) { } func TestHTTP_AgentMonitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid log_json parameter", func(t *testing.T) { httpTest(t, nil, func(s *TestAgent) { @@ -452,6 +453,8 @@ func TestHTTP_AgentMonitor(t *testing.T) { // | /agent/pprof | `false` | on | **yes** | // +---------------+------------------+--------+------------------+ func TestAgent_PprofRequest_Permissions(t *testing.T) { + ci.Parallel(t) + trueP, falseP := helper.BoolToPtr(true), helper.BoolToPtr(false) cases := []struct { acl *bool @@ -524,6 +527,8 @@ func TestAgent_PprofRequest_Permissions(t *testing.T) { } func TestAgent_PprofRequest(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string url string @@ -634,7 +639,7 @@ func (r *closableRecorder) CloseNotify() <-chan bool { } func TestHTTP_AgentForceLeave(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/agent/force-leave?node=foo", nil) @@ -652,7 +657,7 @@ func TestHTTP_AgentForceLeave(t *testing.T) { } func TestHTTP_AgentForceLeave_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -702,7 +707,7 @@ func TestHTTP_AgentForceLeave_ACL(t *testing.T) { } func TestHTTP_AgentSetServers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { addr := s.Config.AdvertiseAddrs.RPC @@ -764,7 +769,7 @@ func TestHTTP_AgentSetServers(t *testing.T) { } func TestHTTP_AgentSetServers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -836,7 +841,7 @@ func TestHTTP_AgentSetServers_ACL(t *testing.T) { } func TestHTTP_AgentListServers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -901,7 +906,7 @@ func TestHTTP_AgentListServers_ACL(t *testing.T) { } func TestHTTP_AgentListKeys(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" @@ -922,7 +927,7 @@ func TestHTTP_AgentListKeys(t *testing.T) { } func TestHTTP_AgentListKeys_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" @@ -982,7 +987,7 @@ func TestHTTP_AgentListKeys_ACL(t *testing.T) { } func TestHTTP_AgentInstallKey(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" key2 := "wH1Bn9hlJ0emgWB1JttVRA==" @@ -1022,7 +1027,7 @@ func TestHTTP_AgentInstallKey(t *testing.T) { } func TestHTTP_AgentRemoveKey(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "HS5lJ+XuTlYKWaeGYyG+/A==" key2 := "wH1Bn9hlJ0emgWB1JttVRA==" @@ -1071,7 +1076,7 @@ func TestHTTP_AgentRemoveKey(t *testing.T) { } func TestHTTP_AgentHealth_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Enable ACLs to ensure they're not enforced @@ -1151,7 +1156,7 @@ func TestHTTP_AgentHealth_Ok(t *testing.T) { } func TestHTTP_AgentHealth_BadServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverAgent := NewTestAgent(t, "server", nil) @@ -1197,7 +1202,7 @@ func TestHTTP_AgentHealth_BadServer(t *testing.T) { } func TestHTTP_AgentHealth_BadClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Disable client to make server unhealthy if requested @@ -1350,7 +1355,7 @@ func NewFakeRW() *fakeRW { // TestHTTP_XSS_Monitor asserts /v1/agent/monitor is safe against XSS attacks // even when log output contains HTML+Javascript. func TestHTTP_XSS_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -1382,7 +1387,7 @@ func TestHTTP_XSS_Monitor(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.Name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -1594,6 +1599,8 @@ func schedulerWorkerInfoTest_testCases() []schedulerWorkerAPITest_testCase { } func TestHTTP_AgentSchedulerWorkerInfoRequest(t *testing.T) { + ci.Parallel(t) + configFn := func(c *Config) { var numSchedulers = 4 c.Server.NumSchedulers = &numSchedulers @@ -1886,6 +1893,8 @@ func schedulerWorkerConfigTest_testCases() []scheduleWorkerConfigTest_workerRequ } func TestHTTP_AgentSchedulerWorkerConfigRequest_NoACL(t *testing.T) { + ci.Parallel(t) + configFn := func(c *Config) { var numSchedulers = 8 c.Server.NumSchedulers = &numSchedulers @@ -1917,6 +1926,8 @@ func TestHTTP_AgentSchedulerWorkerConfigRequest_NoACL(t *testing.T) { } func TestHTTP_AgentSchedulerWorkerConfigRequest_ACL(t *testing.T) { + ci.Parallel(t) + configFn := func(c *Config) { var numSchedulers = 8 c.Server.NumSchedulers = &numSchedulers @@ -2002,6 +2013,8 @@ func schedulerWorkerTest_parseError(t *testing.T, isACLEnabled bool, tc schedule } func TestHTTP_AgentSchedulerWorkerInfoRequest_Client(t *testing.T) { + ci.Parallel(t) + verbs := []string{"GET", "POST", "PUT"} path := "schedulers" @@ -2026,6 +2039,8 @@ func TestHTTP_AgentSchedulerWorkerInfoRequest_Client(t *testing.T) { } func TestHTTP_AgentSchedulerWorkerConfigRequest_Client(t *testing.T) { + ci.Parallel(t) + verbs := []string{"GET", "POST", "PUT"} path := "schedulers/config" diff --git a/command/agent/agent_test.go b/command/agent/agent_test.go index f61e47c04..5318c75d5 100644 --- a/command/agent/agent_test.go +++ b/command/agent/agent_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" @@ -27,7 +28,7 @@ func tmpDir(t testing.TB) string { } func TestAgent_RPC_Ping(t *testing.T) { - t.Parallel() + ci.Parallel(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() @@ -38,7 +39,7 @@ func TestAgent_RPC_Ping(t *testing.T) { } func TestAgent_ServerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := DefaultConfig() conf.DevMode = true // allow localhost for advertise addrs conf.Server.Enabled = true @@ -183,6 +184,8 @@ func TestAgent_ServerConfig(t *testing.T) { } func TestAgent_ServerConfig_SchedulerFlags(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input *structs.SchedulerConfiguration @@ -249,7 +252,7 @@ func TestAgent_ServerConfig_SchedulerFlags(t *testing.T) { // cause errors. This is the server-only (RPC) counterpart to // TestHTTPServer_Limits_Error. func TestAgent_ServerConfig_Limits_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -316,7 +319,7 @@ func TestAgent_ServerConfig_Limits_Error(t *testing.T) { // cause errors. This is the server-only (RPC) counterpart to // TestHTTPServer_Limits_OK. func TestAgent_ServerConfig_Limits_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -371,7 +374,7 @@ func TestAgent_ServerConfig_Limits_OK(t *testing.T) { } func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { multiplier *int @@ -456,7 +459,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Ok(t *testing.T) { } func TestAgent_ServerConfig_RaftMultiplier_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []int{ -1, @@ -478,7 +481,7 @@ func TestAgent_ServerConfig_RaftMultiplier_Bad(t *testing.T) { } func TestAgent_ClientConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := DefaultConfig() conf.Client.Enabled = true @@ -522,7 +525,7 @@ func TestAgent_ClientConfig(t *testing.T) { } func TestAgent_ClientConfig_ReservedCores(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := DefaultConfig() conf.Client.Enabled = true conf.Client.ReserveableCores = "0-7" @@ -536,6 +539,8 @@ func TestAgent_ClientConfig_ReservedCores(t *testing.T) { // Clients should inherit telemetry configuration func TestAgent_Client_TelemetryConfiguration(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) conf := DefaultConfig() @@ -556,7 +561,7 @@ func TestAgent_Client_TelemetryConfiguration(t *testing.T) { // TestAgent_HTTPCheck asserts Agent.agentHTTPCheck properly alters the HTTP // API health check depending on configuration. func TestAgent_HTTPCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) agent := func() *Agent { return &Agent{ @@ -634,7 +639,7 @@ func TestAgent_HTTPCheck(t *testing.T) { // TestAgent_HTTPCheckPath asserts clients and servers use different endpoints // for healthchecks. func TestAgent_HTTPCheckPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Agent.agentHTTPCheck only needs a config and logger a := &Agent{ config: DevConfig(nil), @@ -669,7 +674,7 @@ func TestAgent_HTTPCheckPath(t *testing.T) { // reloaded. I can't find a good way to fetch this from the logger itself, so // we pull it only from the agents configuration struct, not the logger. func TestAgent_Reload_LogLevel(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) agent := NewTestAgent(t, t.Name(), func(c *Config) { @@ -691,7 +696,7 @@ func TestAgent_Reload_LogLevel(t *testing.T) { // across the Agent, Server, and Client. This is essential for certificate // reloading to work. func TestServer_Reload_TLS_Shared_Keyloader(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) // We will start out with a bad cert and then reload with a good one. @@ -759,7 +764,7 @@ func TestServer_Reload_TLS_Shared_Keyloader(t *testing.T) { } func TestServer_Reload_TLS_Certificate(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -808,7 +813,7 @@ func TestServer_Reload_TLS_Certificate(t *testing.T) { } func TestServer_Reload_TLS_Certificate_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -853,6 +858,8 @@ func TestServer_Reload_TLS_Certificate_Invalid(t *testing.T) { } func Test_GetConfig(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) agentConfig := &Config{ @@ -877,7 +884,7 @@ func Test_GetConfig(t *testing.T) { } func TestServer_Reload_TLS_WithNilConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) logger := testlog.HCLogger(t) @@ -893,7 +900,7 @@ func TestServer_Reload_TLS_WithNilConfiguration(t *testing.T) { } func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -936,7 +943,7 @@ func TestServer_Reload_TLS_UpgradeToTLS(t *testing.T) { } func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -979,7 +986,7 @@ func TestServer_Reload_TLS_DowngradeFromTLS(t *testing.T) { } func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -1019,7 +1026,7 @@ func TestServer_ShouldReload_ReturnFalseForNoChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -1059,7 +1066,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyHTTPChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -1099,7 +1106,7 @@ func TestServer_ShouldReload_ReturnTrueForOnlyRPCChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -1141,7 +1148,7 @@ func TestServer_ShouldReload_ReturnTrueForConfigChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) oldCertificate := ` @@ -1244,7 +1251,7 @@ func TestServer_ShouldReload_ReturnTrueForFileChanges(t *testing.T) { } func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -1297,7 +1304,7 @@ func TestServer_ShouldReload_ShouldHandleMultipleChanges(t *testing.T) { } func TestServer_ShouldReload_ReturnTrueForRPCUpgradeModeChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) sameAgentConfig := &Config{ TLSConfig: &config.TLSConfig{ @@ -1318,7 +1325,7 @@ func TestServer_ShouldReload_ReturnTrueForRPCUpgradeModeChanges(t *testing.T) { } func TestAgent_ProxyRPC_Dev(t *testing.T) { - t.Parallel() + ci.Parallel(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index ce1d994bc..9896cdbdd 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -15,6 +15,7 @@ import ( "github.com/golang/snappy" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -25,7 +26,7 @@ import ( ) func TestHTTP_AllocsList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -86,7 +87,7 @@ func TestHTTP_AllocsList(t *testing.T) { } func TestHTTP_AllocsPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -157,7 +158,7 @@ func TestHTTP_AllocsPrefixList(t *testing.T) { } func TestHTTP_AllocQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -198,7 +199,7 @@ func TestHTTP_AllocQuery(t *testing.T) { } func TestHTTP_AllocQuery_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -255,7 +256,7 @@ func TestHTTP_AllocQuery_Payload(t *testing.T) { } func TestHTTP_AllocRestart(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Validates that all methods of forwarding the request are processed correctly @@ -323,7 +324,7 @@ func TestHTTP_AllocRestart(t *testing.T) { } func TestHTTP_AllocRestart_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -388,7 +389,7 @@ func TestHTTP_AllocRestart_ACL(t *testing.T) { } func TestHTTP_AllocStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -434,7 +435,7 @@ func TestHTTP_AllocStop(t *testing.T) { } func TestHTTP_AllocStats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { @@ -498,7 +499,7 @@ func TestHTTP_AllocStats(t *testing.T) { } func TestHTTP_AllocStats_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { @@ -553,7 +554,7 @@ func TestHTTP_AllocStats_ACL(t *testing.T) { } func TestHTTP_AllocSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/client/allocation/123/snapshot", nil) @@ -571,7 +572,7 @@ func TestHTTP_AllocSnapshot(t *testing.T) { } func TestHTTP_AllocSnapshot_WithMigrateToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { // Request without a token fails @@ -607,7 +608,7 @@ func TestHTTP_AllocSnapshot_WithMigrateToken(t *testing.T) { // TestHTTP_AllocSnapshot_Atomic ensures that when a client encounters an error // snapshotting a valid tar is not returned. func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, func(c *Config) { // Disable the schedulers c.Server.NumSchedulers = helper.IntToPtr(0) @@ -716,7 +717,7 @@ func TestHTTP_AllocSnapshot_Atomic(t *testing.T) { } func TestHTTP_AllocGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) path := fmt.Sprintf("/v1/client/allocation/%s/gc", uuid.Generate()) httpTest(t, nil, func(s *TestAgent) { @@ -786,7 +787,7 @@ func TestHTTP_AllocGC(t *testing.T) { } func TestHTTP_AllocGC_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) path := fmt.Sprintf("/v1/client/allocation/%s/gc", uuid.Generate()) @@ -842,7 +843,7 @@ func TestHTTP_AllocGC_ACL(t *testing.T) { } func TestHTTP_AllocAllGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Local node, local resp @@ -904,7 +905,7 @@ func TestHTTP_AllocAllGC(t *testing.T) { } func TestHTTP_AllocAllGC_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpACLTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -953,6 +954,8 @@ func TestHTTP_AllocAllGC_ACL(t *testing.T) { } func TestHTTP_ReadWsHandshake(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string token string diff --git a/command/agent/command_test.go b/command/agent/command_test.go index bd722d9fb..ae6ed0185 100644 --- a/command/agent/command_test.go +++ b/command/agent/command_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -17,12 +18,12 @@ import ( ) func TestCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &Command{} } func TestCommand_Args(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -96,6 +97,8 @@ func TestCommand_Args(t *testing.T) { } func TestCommand_MetaConfigValidation(t *testing.T) { + ci.Parallel(t) + tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -149,6 +152,8 @@ func TestCommand_MetaConfigValidation(t *testing.T) { } func TestCommand_NullCharInDatacenter(t *testing.T) { + ci.Parallel(t) + tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -198,6 +203,8 @@ func TestCommand_NullCharInDatacenter(t *testing.T) { } func TestCommand_NullCharInRegion(t *testing.T) { + ci.Parallel(t) + tmpDir, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -248,6 +255,7 @@ func TestCommand_NullCharInRegion(t *testing.T) { // TestIsValidConfig asserts that invalid configurations return false. func TestIsValidConfig(t *testing.T) { + ci.Parallel(t) cases := []struct { name string diff --git a/command/agent/config_parse_test.go b/command/agent/config_parse_test.go index 4b6129e7d..d2567c7fc 100644 --- a/command/agent/config_parse_test.go +++ b/command/agent/config_parse_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" @@ -411,7 +412,7 @@ var nonoptConfig = &Config{ } func TestConfig_ParseMerge(t *testing.T) { - t.Parallel() + ci.Parallel(t) path, err := filepath.Abs(filepath.Join(".", "testdata", "basic.hcl")) require.NoError(t, err) @@ -435,7 +436,7 @@ func TestConfig_ParseMerge(t *testing.T) { } func TestConfig_Parse(t *testing.T) { - t.Parallel() + ci.Parallel(t) basicConfig.addDefaults() pluginConfig.addDefaults() @@ -545,6 +546,8 @@ func (c *Config) addDefaults() { // length 1 described in // https://github.com/hashicorp/nomad/issues/1290 func TestConfig_ParsePanic(t *testing.T) { + ci.Parallel(t) + c, err := ParseConfigFile("./testdata/obj-len-one.hcl") if err != nil { t.Fatalf("parse error: %s\n", err) @@ -561,6 +564,8 @@ func TestConfig_ParsePanic(t *testing.T) { // Top level keys left by hcl when parsing slices in the config // structure should not be unexpected func TestConfig_ParseSliceExtra(t *testing.T) { + ci.Parallel(t) + c, err := ParseConfigFile("./testdata/config-slices.json") require.NoError(t, err) @@ -677,6 +682,8 @@ var sample0 = &Config{ } func TestConfig_ParseSample0(t *testing.T) { + ci.Parallel(t) + c, err := ParseConfigFile("./testdata/sample0.json") require.NoError(t, err) require.EqualValues(t, sample0, c) @@ -766,6 +773,8 @@ var sample1 = &Config{ } func TestConfig_ParseDir(t *testing.T) { + ci.Parallel(t) + c, err := LoadConfig("./testdata/sample1") require.NoError(t, err) @@ -798,6 +807,8 @@ func TestConfig_ParseDir(t *testing.T) { // that parsing a directory config is the equivalent of // parsing individual files in any order func TestConfig_ParseDir_Matches_IndividualParsing(t *testing.T) { + ci.Parallel(t) + dirConfig, err := LoadConfig("./testdata/sample1") require.NoError(t, err) diff --git a/command/agent/config_test.go b/command/agent/config_test.go index b795f8fad..213e2e40b 100644 --- a/command/agent/config_test.go +++ b/command/agent/config_test.go @@ -13,6 +13,7 @@ import ( "time" sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/nomad/ci" client "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper" @@ -29,6 +30,8 @@ var ( ) func TestConfig_Merge(t *testing.T) { + ci.Parallel(t) + c0 := &Config{} c1 := &Config{ @@ -437,6 +440,8 @@ func TestConfig_Merge(t *testing.T) { } func TestConfig_ParseConfigFile(t *testing.T) { + ci.Parallel(t) + // Fails if the file doesn't exist if _, err := ParseConfigFile("/unicorns/leprechauns"); err == nil { t.Fatalf("expected error, got nothing") @@ -477,6 +482,8 @@ func TestConfig_ParseConfigFile(t *testing.T) { } func TestConfig_LoadConfigDir(t *testing.T) { + ci.Parallel(t) + // Fails if the dir doesn't exist. if _, err := LoadConfigDir("/unicorns/leprechauns"); err == nil { t.Fatalf("expected error, got nothing") @@ -535,6 +542,8 @@ func TestConfig_LoadConfigDir(t *testing.T) { } func TestConfig_LoadConfig(t *testing.T) { + ci.Parallel(t) + // Fails if the target doesn't exist if _, err := LoadConfig("/unicorns/leprechauns"); err == nil { t.Fatalf("expected error, got nothing") @@ -594,6 +603,8 @@ func TestConfig_LoadConfig(t *testing.T) { } func TestConfig_LoadConfigsFileOrder(t *testing.T) { + ci.Parallel(t) + config1, err := LoadConfigDir("test-resources/etcnomad") if err != nil { t.Fatalf("Failed to load config: %s", err) @@ -620,6 +631,8 @@ func TestConfig_LoadConfigsFileOrder(t *testing.T) { } func TestConfig_Listener(t *testing.T) { + ci.Parallel(t) + config := DefaultConfig() // Fails on invalid input @@ -669,6 +682,8 @@ func TestConfig_Listener(t *testing.T) { } func TestConfig_DevModeFlag(t *testing.T) { + ci.Parallel(t) + cases := []struct { dev bool connect bool @@ -727,6 +742,8 @@ func TestConfig_DevModeFlag(t *testing.T) { // TestConfig_normalizeAddrs_DevMode asserts that normalizeAddrs allows // advertising localhost in dev mode. func TestConfig_normalizeAddrs_DevMode(t *testing.T) { + ci.Parallel(t) + // allow to advertise 127.0.0.1 if dev-mode is enabled c := &Config{ BindAddr: "127.0.0.1", @@ -777,6 +794,8 @@ func TestConfig_normalizeAddrs_DevMode(t *testing.T) { // TestConfig_normalizeAddrs_NoAdvertise asserts that normalizeAddrs will // fail if no valid advertise address available in non-dev mode. func TestConfig_normalizeAddrs_NoAdvertise(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "127.0.0.1", Ports: &Ports{ @@ -809,6 +828,8 @@ func TestConfig_normalizeAddrs_NoAdvertise(t *testing.T) { // TestConfig_normalizeAddrs_AdvertiseLocalhost asserts localhost can be // advertised if it's explicitly set in the config. func TestConfig_normalizeAddrs_AdvertiseLocalhost(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "127.0.0.1", Ports: &Ports{ @@ -846,6 +867,8 @@ func TestConfig_normalizeAddrs_AdvertiseLocalhost(t *testing.T) { // TestConfig_normalizeAddrs_IPv6Loopback asserts that an IPv6 loopback address // is normalized properly. See #2739 func TestConfig_normalizeAddrs_IPv6Loopback(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "::1", Ports: &Ports{ @@ -884,6 +907,8 @@ func TestConfig_normalizeAddrs_IPv6Loopback(t *testing.T) { // TestConfig_normalizeAddrs_MultipleInterface asserts that normalizeAddrs will // handle normalizing multiple interfaces in a single protocol. func TestConfig_normalizeAddrs_MultipleInterfaces(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string addressConfig *Addresses @@ -931,6 +956,8 @@ func TestConfig_normalizeAddrs_MultipleInterfaces(t *testing.T) { } func TestConfig_normalizeAddrs(t *testing.T) { + ci.Parallel(t) + c := &Config{ BindAddr: "169.254.1.5", Ports: &Ports{ @@ -1042,6 +1069,8 @@ func TestConfig_normalizeAddrs(t *testing.T) { } func TestConfig_templateNetworkInterface(t *testing.T) { + ci.Parallel(t) + // find the first interface ifaces, err := sockaddr.GetAllInterfaces() if err != nil { @@ -1139,6 +1168,8 @@ func TestConfig_templateNetworkInterface(t *testing.T) { } func TestIsMissingPort(t *testing.T) { + ci.Parallel(t) + _, _, err := net.SplitHostPort("localhost") if missing := isMissingPort(err); !missing { t.Errorf("expected missing port error, but got %v", err) @@ -1150,6 +1181,8 @@ func TestIsMissingPort(t *testing.T) { } func TestMergeServerJoin(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -1256,7 +1289,8 @@ func TestMergeServerJoin(t *testing.T) { } func TestTelemetry_PrefixFilters(t *testing.T) { - t.Parallel() + ci.Parallel(t) + cases := []struct { in []string expAllow []string @@ -1298,6 +1332,8 @@ func TestTelemetry_PrefixFilters(t *testing.T) { } func TestTelemetry_Parse(t *testing.T) { + ci.Parallel(t) + require := require.New(t) dir, err := ioutil.TempDir("", "nomad") require.NoError(err) @@ -1321,6 +1357,7 @@ func TestTelemetry_Parse(t *testing.T) { } func TestEventBroker_Parse(t *testing.T) { + ci.Parallel(t) require := require.New(t) { @@ -1367,6 +1404,8 @@ func TestEventBroker_Parse(t *testing.T) { } func TestConfig_LoadConsulTemplateConfig(t *testing.T) { + ci.Parallel(t) + defaultConfig := DefaultConfig() // Test that loading without template config didn't create load errors agentConfig, err := LoadConfig("test-resources/minimal_client.hcl") @@ -1414,6 +1453,8 @@ func TestConfig_LoadConsulTemplateConfig(t *testing.T) { } func TestConfig_LoadConsulTemplateBasic(t *testing.T) { + ci.Parallel(t) + defaultConfig := DefaultConfig() // hcl @@ -1449,6 +1490,8 @@ func TestConfig_LoadConsulTemplateBasic(t *testing.T) { } func TestParseMultipleIPTemplates(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string tmpl string diff --git a/command/agent/consul/check_watcher_test.go b/command/agent/consul/check_watcher_test.go index c24c4d10d..9323d32bb 100644 --- a/command/agent/consul/check_watcher_test.go +++ b/command/agent/consul/check_watcher_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -172,7 +173,7 @@ func testCheck() *structs.ServiceCheck { // TestCheckWatcher_Skip asserts unwatched checks are ignored. func TestCheckWatcher_Skip(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a check with restarting disabled check := testCheck() @@ -194,7 +195,7 @@ func TestCheckWatcher_Skip(t *testing.T) { // TestCheckWatcher_Healthy asserts healthy tasks are not restarted. func TestCheckWatcher_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -228,7 +229,7 @@ func TestCheckWatcher_Healthy(t *testing.T) { // TestCheckWatcher_Unhealthy asserts unhealthy tasks are restarted exactly once. func TestCheckWatcher_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -251,7 +252,7 @@ func TestCheckWatcher_Unhealthy(t *testing.T) { // TestCheckWatcher_HealthyWarning asserts checks in warning with // ignore_warnings=true do not restart tasks. func TestCheckWatcher_HealthyWarning(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -279,7 +280,7 @@ func TestCheckWatcher_HealthyWarning(t *testing.T) { // TestCheckWatcher_Flapping asserts checks that flap from healthy to unhealthy // before the unhealthy limit is reached do not restart tasks. func TestCheckWatcher_Flapping(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -308,7 +309,7 @@ func TestCheckWatcher_Flapping(t *testing.T) { // TestCheckWatcher_Unwatch asserts unwatching checks prevents restarts. func TestCheckWatcher_Unwatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -337,7 +338,7 @@ func TestCheckWatcher_Unwatch(t *testing.T) { // for a single task, all checks should be removed when any of them restart the // task to avoid multiple restarts. func TestCheckWatcher_MultipleChecks(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) @@ -386,7 +387,7 @@ func TestCheckWatcher_MultipleChecks(t *testing.T) { // attempting to restart a task even if its update queue is full. // https://github.com/hashicorp/nomad/issues/5395 func TestCheckWatcher_Deadlock(t *testing.T) { - t.Parallel() + ci.Parallel(t) fakeAPI, cw := testWatcherSetup(t) diff --git a/command/agent/consul/connect_proxies_test.go b/command/agent/consul/connect_proxies_test.go index 8ebcee193..b9dad693c 100644 --- a/command/agent/consul/connect_proxies_test.go +++ b/command/agent/consul/connect_proxies_test.go @@ -3,10 +3,13 @@ package consul import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestConnectProxies_Proxies(t *testing.T) { + ci.Parallel(t) + pc := NewConnectProxiesClient(NewMockAgent(ossFeatures)) proxies, err := pc.Proxies() diff --git a/command/agent/consul/connect_test.go b/command/agent/consul/connect_test.go index 4f3430664..4e7f1a6d2 100644 --- a/command/agent/consul/connect_test.go +++ b/command/agent/consul/connect_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -30,7 +31,7 @@ var ( ) func TestConnect_newConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { asr, err := newConnect("", "", nil, nil, nil) @@ -111,7 +112,7 @@ func TestConnect_newConnect(t *testing.T) { } func TestConnect_connectSidecarRegistration(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { sidecarReg, err := connectSidecarRegistration("", nil, testConnectNetwork, testConnectPorts) @@ -172,7 +173,7 @@ func TestConnect_connectSidecarRegistration(t *testing.T) { } func TestConnect_connectProxy(t *testing.T) { - t.Parallel() + ci.Parallel(t) // If the input proxy is nil, we expect the output to be a proxy with its // config set to default values. @@ -243,7 +244,7 @@ func TestConnect_connectProxy(t *testing.T) { } func TestConnect_connectProxyExpose(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { exposeConfig, err := connectProxyExpose(nil, nil) @@ -284,7 +285,7 @@ func TestConnect_connectProxyExpose(t *testing.T) { } func TestConnect_connectProxyExposePaths(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { upstreams, err := connectProxyExposePaths(nil, nil) @@ -332,7 +333,7 @@ func TestConnect_connectProxyExposePaths(t *testing.T) { } func TestConnect_connectUpstreams(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, connectUpstreams(nil)) @@ -363,7 +364,7 @@ func TestConnect_connectUpstreams(t *testing.T) { } func TestConnect_connectProxyConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil map", func(t *testing.T) { require.Equal(t, map[string]interface{}{ @@ -384,7 +385,7 @@ func TestConnect_connectProxyConfig(t *testing.T) { } func TestConnect_getConnectPort(t *testing.T) { - t.Parallel() + ci.Parallel(t) networks := structs.Networks{{ IP: "192.168.30.1", @@ -432,7 +433,7 @@ func TestConnect_getConnectPort(t *testing.T) { } func TestConnect_getExposePathPort(t *testing.T) { - t.Parallel() + ci.Parallel(t) networks := structs.Networks{{ Device: "eth0", @@ -470,7 +471,7 @@ func TestConnect_getExposePathPort(t *testing.T) { } func TestConnect_newConnectGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("not a gateway", func(t *testing.T) { result := newConnectGateway("s1", &structs.ConsulConnect{Native: true}) @@ -546,7 +547,7 @@ func TestConnect_newConnectGateway(t *testing.T) { } func Test_connectMeshGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := connectMeshGateway(nil) diff --git a/command/agent/consul/group_test.go b/command/agent/consul/group_test.go index a76aac73e..43c82dd3a 100644 --- a/command/agent/consul/group_test.go +++ b/command/agent/consul/group_test.go @@ -7,6 +7,7 @@ import ( consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,6 +15,8 @@ import ( ) func TestConsul_Connect(t *testing.T) { + ci.Parallel(t) + // Create an embedded Consul server testconsul, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) { // If -v wasn't specified squelch consul logging diff --git a/command/agent/consul/int_test.go b/command/agent/consul/int_test.go index 12aa80f8f..b49e082eb 100644 --- a/command/agent/consul/int_test.go +++ b/command/agent/consul/int_test.go @@ -10,6 +10,7 @@ import ( consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/sdk/testutil" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/allocrunner/taskrunner" "github.com/hashicorp/nomad/client/config" @@ -35,6 +36,8 @@ func (m *mockUpdater) TaskStateUpdated() { // TestConsul_Integration asserts TaskRunner properly registers and deregisters // services and checks with Consul using an embedded Consul agent. func TestConsul_Integration(t *testing.T) { + ci.Parallel(t) + if testing.Short() { t.Skip("-short set; skipping") } diff --git a/command/agent/consul/namespaces_client_test.go b/command/agent/consul/namespaces_client_test.go index bc7ebdf37..6b8704920 100644 --- a/command/agent/consul/namespaces_client_test.go +++ b/command/agent/consul/namespaces_client_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestNamespacesClient_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("oss", func(t *testing.T) { c := NewNamespacesClient(NewMockNamespaces(nil), NewMockAgent(Features{ @@ -45,7 +46,7 @@ func TestNamespacesClient_List(t *testing.T) { } func TestNewNamespacesClient_stale(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("ok", func(t *testing.T) { now := time.Now() @@ -63,7 +64,7 @@ func TestNewNamespacesClient_stale(t *testing.T) { } func TestNewNamespacesClient_allowable(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(ent, feature, enabled, exp bool, updated, now time.Time) { expired := now.After(updated.Add(namespaceEnabledCacheTTL)) diff --git a/command/agent/consul/self_test.go b/command/agent/consul/self_test.go index 3089c2422..db274d7f0 100644 --- a/command/agent/consul/self_test.go +++ b/command/agent/consul/self_test.go @@ -3,6 +3,7 @@ package consul import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -14,7 +15,7 @@ var ( ) func TestSelf_SKU(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("oss", func(t *testing.T) { s, ok := SKU(Self{ @@ -64,7 +65,7 @@ func TestSelf_SKU(t *testing.T) { } func TestSelf_Namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("supports namespaces", func(t *testing.T) { enabled := Namespaces(Self{ diff --git a/command/agent/consul/service_client_test.go b/command/agent/consul/service_client_test.go index 9cacaa38d..ffe5bc661 100644 --- a/command/agent/consul/service_client_test.go +++ b/command/agent/consul/service_client_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -13,7 +14,7 @@ import ( ) func TestSyncLogic_agentServiceUpdateRequired(t *testing.T) { - t.Parallel() + ci.Parallel(t) // the service as known by nomad wanted := func() api.AgentServiceRegistration { @@ -253,6 +254,8 @@ func TestSyncLogic_agentServiceUpdateRequired(t *testing.T) { } func TestSyncLogic_tagsDifferent(t *testing.T) { + ci.Parallel(t) + t.Run("nil nil", func(t *testing.T) { require.False(t, tagsDifferent(nil, nil)) }) @@ -284,6 +287,8 @@ func TestSyncLogic_tagsDifferent(t *testing.T) { } func TestSyncLogic_sidecarTagsDifferent(t *testing.T) { + ci.Parallel(t) + type tc struct { parent, wanted, sidecar []string expect bool @@ -310,7 +315,7 @@ func TestSyncLogic_sidecarTagsDifferent(t *testing.T) { } func TestSyncLogic_maybeTweakTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) differentPointers := func(a, b []string) bool { return &(a) != &(b) @@ -355,7 +360,7 @@ func TestSyncLogic_maybeTweakTags(t *testing.T) { } func TestSyncLogic_maybeTweakTags_emptySC(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Check the edge cases where the connect service is deleted on the nomad // side (i.e. are we checking multiple nil pointers). @@ -385,7 +390,7 @@ func TestSyncLogic_maybeTweakTags_emptySC(t *testing.T) { // TestServiceRegistration_CheckOnUpdate tests that a ServiceRegistrations // CheckOnUpdate is populated and updated properly func TestServiceRegistration_CheckOnUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) mockAgent := NewMockAgent(ossFeatures) namespacesClient := NewNamespacesClient(NewMockNamespaces(nil), mockAgent) @@ -467,7 +472,7 @@ func TestServiceRegistration_CheckOnUpdate(t *testing.T) { } func TestSyncLogic_proxyUpstreamsDifferent(t *testing.T) { - t.Parallel() + ci.Parallel(t) upstream1 := func() api.Upstream { return api.Upstream{ @@ -602,7 +607,7 @@ func TestSyncLogic_proxyUpstreamsDifferent(t *testing.T) { } func TestSyncReason_String(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "periodic", fmt.Sprintf("%s", syncPeriodic)) require.Equal(t, "shutdown", fmt.Sprintf("%s", syncShutdown)) @@ -611,7 +616,7 @@ func TestSyncReason_String(t *testing.T) { } func TestSyncOps_empty(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(ops *operations, exp bool) { require.Equal(t, exp, ops.empty()) @@ -626,6 +631,8 @@ func TestSyncOps_empty(t *testing.T) { } func TestSyncLogic_maybeSidecarProxyCheck(t *testing.T) { + ci.Parallel(t) + try := func(input string, exp bool) { result := maybeSidecarProxyCheck(input) require.Equal(t, exp, result) diff --git a/command/agent/consul/unit_test.go b/command/agent/consul/unit_test.go index b3f035ad3..2609ffd16 100644 --- a/command/agent/consul/unit_test.go +++ b/command/agent/consul/unit_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" @@ -121,7 +122,8 @@ func setupFake(t *testing.T) *testFakeCtx { } func TestConsul_ChangeTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) r := require.New(t) @@ -157,7 +159,8 @@ func TestConsul_ChangeTags(t *testing.T) { } func TestConsul_EnableTagOverride_Syncs(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) r := require.New(t) @@ -204,6 +207,8 @@ func TestConsul_EnableTagOverride_Syncs(t *testing.T) { // it in Consul. Pre-0.7.1 ports were not part of the service ID and this was a // slightly different code path than changing tags. func TestConsul_ChangePorts(t *testing.T) { + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) @@ -327,6 +332,8 @@ func TestConsul_ChangePorts(t *testing.T) { // TestConsul_ChangeChecks asserts that updating only the checks on a service // properly syncs with Consul. func TestConsul_ChangeChecks(t *testing.T) { + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services[0].Checks = []*structs.ServiceCheck{ { @@ -561,6 +568,8 @@ func TestConsul_ChangeChecks(t *testing.T) { // TestConsul_RegServices tests basic service registration. func TestConsul_RegServices(t *testing.T) { + ci.Parallel(t) + ctx := setupFake(t) // Add a check w/restarting @@ -697,6 +706,8 @@ func TestConsul_RegServices(t *testing.T) { // TestConsul_ShutdownOK tests the ok path for the shutdown logic in // ServiceClient. func TestConsul_ShutdownOK(t *testing.T) { + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) go ctx.ServiceClient.Run() @@ -735,8 +746,10 @@ func TestConsul_ShutdownOK(t *testing.T) { // TestConsul_ShutdownBlocked tests the blocked past deadline path for the // shutdown logic in ServiceClient. func TestConsul_ShutdownBlocked(t *testing.T) { + ci.Parallel(t) + require := require.New(t) - t.Parallel() + ci.Parallel(t) ctx := setupFake(t) // can be short because we're intentionally blocking, but needs to // be longer than the time we'll block Consul so we can be sure @@ -802,7 +815,8 @@ func TestConsul_ShutdownBlocked(t *testing.T) { // auto-use set then services should advertise it unless explicitly set to // host. Checks should always use host. func TestConsul_DriverNetwork_AutoUse(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services = []*structs.Service{ @@ -929,7 +943,8 @@ func TestConsul_DriverNetwork_AutoUse(t *testing.T) { // set auto-use only services which request the driver's network should // advertise it. func TestConsul_DriverNetwork_NoAutoUse(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services = []*structs.Service{ @@ -1003,7 +1018,8 @@ func TestConsul_DriverNetwork_NoAutoUse(t *testing.T) { // TestConsul_DriverNetwork_Change asserts that if a driver network is // specified and a service updates its use its properly updated in Consul. func TestConsul_DriverNetwork_Change(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) ctx.Workload.Services = []*structs.Service{ @@ -1075,7 +1091,8 @@ func TestConsul_DriverNetwork_Change(t *testing.T) { // TestConsul_CanaryTags asserts CanaryTags are used when Canary=true func TestConsul_CanaryTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1108,7 +1125,8 @@ func TestConsul_CanaryTags(t *testing.T) { // TestConsul_CanaryTags_NoTags asserts Tags are used when Canary=true and there // are no specified canary tags func TestConsul_CanaryTags_NoTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1140,7 +1158,8 @@ func TestConsul_CanaryTags_NoTags(t *testing.T) { // TestConsul_CanaryMeta asserts CanaryMeta are used when Canary=true func TestConsul_CanaryMeta(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1174,7 +1193,8 @@ func TestConsul_CanaryMeta(t *testing.T) { // TestConsul_CanaryMeta_NoMeta asserts Meta are used when Canary=true and there // are no specified canary meta func TestConsul_CanaryMeta_NoMeta(t *testing.T) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) ctx := setupFake(t) @@ -1208,7 +1228,7 @@ func TestConsul_CanaryMeta_NoMeta(t *testing.T) { // TestConsul_PeriodicSync asserts that Nomad periodically reconciles with // Consul. func TestConsul_PeriodicSync(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx := setupFake(t) defer ctx.ServiceClient.Shutdown() @@ -1235,7 +1255,7 @@ func TestConsul_PeriodicSync(t *testing.T) { // TestIsNomadService asserts the isNomadService helper returns true for Nomad // task IDs and false for unknown IDs and Nomad agent IDs (see #2827). func TestIsNomadService(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []struct { id string @@ -1268,7 +1288,8 @@ func TestIsNomadService(t *testing.T) { // TestCreateCheckReg_HTTP asserts Nomad ServiceCheck structs are properly // converted to Consul API AgentCheckRegistrations for HTTP checks. func TestCreateCheckReg_HTTP(t *testing.T) { - t.Parallel() + ci.Parallel(t) + check := &structs.ServiceCheck{ Name: "name", Type: "http", @@ -1315,7 +1336,8 @@ func TestCreateCheckReg_HTTP(t *testing.T) { // TestCreateCheckReg_GRPC asserts Nomad ServiceCheck structs are properly // converted to Consul API AgentCheckRegistrations for GRPC checks. func TestCreateCheckReg_GRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) + check := &structs.ServiceCheck{ Name: "name", Type: "grpc", @@ -1352,6 +1374,8 @@ func TestCreateCheckReg_GRPC(t *testing.T) { // TestGetAddress asserts Nomad uses the correct ip and port for services and // checks depending on port labels, driver networks, and address mode. func TestGetAddress(t *testing.T) { + ci.Parallel(t) + const HostIP = "127.0.0.1" cases := []struct { @@ -1705,7 +1729,8 @@ func TestGetAddress(t *testing.T) { } func TestConsul_ServiceName_Duplicates(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) @@ -1768,7 +1793,8 @@ func TestConsul_ServiceName_Duplicates(t *testing.T) { // TestConsul_ServiceDeregistration_OutOfProbation asserts that during in steady // state we remove any services we don't reconize locally func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) @@ -1877,7 +1903,8 @@ func TestConsul_ServiceDeregistration_OutProbation(t *testing.T) { // services untouched. This adds a grace period for restoring recovered tasks // before deregistering them func TestConsul_ServiceDeregistration_InProbation(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ctx := setupFake(t) require := require.New(t) diff --git a/command/agent/consul/version_checker_test.go b/command/agent/consul/version_checker_test.go index 351c89702..e098cf0ea 100644 --- a/command/agent/consul/version_checker_test.go +++ b/command/agent/consul/version_checker_test.go @@ -3,10 +3,13 @@ package consul import ( "encoding/json" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestConsulSupportsTLSSkipVerify(t *testing.T) { - t.Parallel() + ci.Parallel(t) + assertSupport := func(expected bool, blob string) { self := map[string]map[string]interface{}{} if err := json.Unmarshal([]byte("{"+blob+"}"), &self); err != nil { diff --git a/command/agent/csi_endpoint_test.go b/command/agent/csi_endpoint_test.go index bbb997857..7115027e2 100644 --- a/command/agent/csi_endpoint_test.go +++ b/command/agent/csi_endpoint_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -14,7 +15,7 @@ import ( ) func TestHTTP_CSIEndpointPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPlugin(server.State(), "foo") @@ -45,7 +46,7 @@ func TestHTTP_CSIEndpointPlugin(t *testing.T) { } func TestHTTP_CSIParseSecrets(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCases := []struct { val string expect structs.CSISecrets @@ -82,7 +83,7 @@ func TestHTTP_CSIEndpointUtils(t *testing.T) { } func TestHTTP_CSIEndpointRegisterVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPluginNodeOnly(server.State(), "foo") @@ -124,7 +125,7 @@ func TestHTTP_CSIEndpointRegisterVolume(t *testing.T) { } func TestHTTP_CSIEndpointCreateVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPlugin(server.State(), "foo") @@ -156,7 +157,7 @@ func TestHTTP_CSIEndpointCreateVolume(t *testing.T) { } func TestHTTP_CSIEndpointSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { server := s.Agent.Server() cleanup := state.CreateTestCSIPlugin(server.State(), "foo") @@ -181,7 +182,7 @@ func TestHTTP_CSIEndpointSnapshot(t *testing.T) { // TestHTTP_CSIEndpoint_Cast is a smoke test for converting from structs to // API structs func TestHTTP_CSIEndpoint_Cast(t *testing.T) { - t.Parallel() + ci.Parallel(t) plugin := mock.CSIPlugin() plugin.Nodes["node1"] = &structs.CSIInfo{ diff --git a/command/agent/deployment_endpoint_test.go b/command/agent/deployment_endpoint_test.go index 1332776a3..2a1742157 100644 --- a/command/agent/deployment_endpoint_test.go +++ b/command/agent/deployment_endpoint_test.go @@ -5,13 +5,14 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) func TestHTTP_DeploymentList(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -42,7 +43,7 @@ func TestHTTP_DeploymentList(t *testing.T) { } func TestHTTP_DeploymentPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -76,7 +77,7 @@ func TestHTTP_DeploymentPrefixList(t *testing.T) { } func TestHTTP_DeploymentAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -137,7 +138,7 @@ func TestHTTP_DeploymentAllocations(t *testing.T) { } func TestHTTP_DeploymentQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -166,7 +167,7 @@ func TestHTTP_DeploymentQuery(t *testing.T) { } func TestHTTP_DeploymentPause(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -207,7 +208,7 @@ func TestHTTP_DeploymentPause(t *testing.T) { } func TestHTTP_DeploymentPromote(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -248,7 +249,7 @@ func TestHTTP_DeploymentPromote(t *testing.T) { } func TestHTTP_DeploymentAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state @@ -293,7 +294,7 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) { } func TestHTTP_DeploymentFail(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state diff --git a/command/agent/eval_endpoint_test.go b/command/agent/eval_endpoint_test.go index f45bc9ede..15217ea1b 100644 --- a/command/agent/eval_endpoint_test.go +++ b/command/agent/eval_endpoint_test.go @@ -6,14 +6,14 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestHTTP_EvalList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -60,7 +60,7 @@ func TestHTTP_EvalList(t *testing.T) { } func TestHTTP_EvalPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -111,7 +111,7 @@ func TestHTTP_EvalPrefixList(t *testing.T) { } func TestHTTP_EvalAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() @@ -159,7 +159,7 @@ func TestHTTP_EvalAllocations(t *testing.T) { } func TestHTTP_EvalQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Directly manipulate the state state := s.Agent.server.State() diff --git a/command/agent/event_endpoint_test.go b/command/agent/event_endpoint_test.go index c450a9179..acef9f08e 100644 --- a/command/agent/event_endpoint_test.go +++ b/command/agent/event_endpoint_test.go @@ -10,9 +10,9 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -23,7 +23,7 @@ type testEvent struct { } func TestEventStream(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { ctx, cancel := context.WithCancel(context.Background()) @@ -68,7 +68,7 @@ func TestEventStream(t *testing.T) { } func TestEventStream_NamespaceQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { ctx, cancel := context.WithCancel(context.Background()) @@ -120,7 +120,7 @@ func TestEventStream_NamespaceQuery(t *testing.T) { } func TestEventStream_QueryParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string diff --git a/command/agent/fs_endpoint_test.go b/command/agent/fs_endpoint_test.go index 7e0f286b6..1acb468a2 100644 --- a/command/agent/fs_endpoint_test.go +++ b/command/agent/fs_endpoint_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -114,7 +115,7 @@ func mockFSAlloc(nodeID string, config map[string]interface{}) *structs.Allocati } func TestHTTP_FS_List_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/ls/", nil) @@ -126,7 +127,7 @@ func TestHTTP_FS_List_MissingParams(t *testing.T) { } func TestHTTP_FS_Stat_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/stat/", nil) @@ -146,7 +147,7 @@ func TestHTTP_FS_Stat_MissingParams(t *testing.T) { } func TestHTTP_FS_ReadAt_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/readat/", nil) @@ -170,7 +171,7 @@ func TestHTTP_FS_ReadAt_MissingParams(t *testing.T) { } func TestHTTP_FS_Cat_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/cat/", nil) @@ -190,7 +191,7 @@ func TestHTTP_FS_Cat_MissingParams(t *testing.T) { } func TestHTTP_FS_Stream_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/client/fs/stream/", nil) @@ -220,7 +221,7 @@ func TestHTTP_FS_Stream_MissingParams(t *testing.T) { // TestHTTP_FS_Logs_MissingParams asserts proper error codes and messages are // returned for incorrect parameters (eg missing tasks). func TestHTTP_FS_Logs_MissingParams(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // AllocID Not Present @@ -262,7 +263,7 @@ func TestHTTP_FS_Logs_MissingParams(t *testing.T) { } func TestHTTP_FS_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -282,7 +283,7 @@ func TestHTTP_FS_List(t *testing.T) { } func TestHTTP_FS_Stat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -303,7 +304,7 @@ func TestHTTP_FS_Stat(t *testing.T) { } func TestHTTP_FS_ReadAt(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -329,7 +330,7 @@ func TestHTTP_FS_ReadAt(t *testing.T) { // TestHTTP_FS_ReadAt_XSS asserts that the readat API is safe from XSS. func TestHTTP_FS_ReadAt_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -353,7 +354,7 @@ func TestHTTP_FS_ReadAt_XSS(t *testing.T) { } func TestHTTP_FS_Cat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -375,7 +376,7 @@ func TestHTTP_FS_Cat(t *testing.T) { // TestHTTP_FS_Cat_XSS asserts that the cat API is safe from XSS. func TestHTTP_FS_Cat_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -398,7 +399,7 @@ func TestHTTP_FS_Cat_XSS(t *testing.T) { } func TestHTTP_FS_Stream_NoFollow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -443,7 +444,7 @@ func TestHTTP_FS_Stream_NoFollow(t *testing.T) { // TestHTTP_FS_Stream_NoFollow_XSS asserts that the stream API is safe from XSS. func TestHTTP_FS_Stream_NoFollow_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -462,7 +463,7 @@ func TestHTTP_FS_Stream_NoFollow_XSS(t *testing.T) { } func TestHTTP_FS_Stream_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -506,7 +507,7 @@ func TestHTTP_FS_Stream_Follow(t *testing.T) { } func TestHTTP_FS_Logs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -544,7 +545,7 @@ func TestHTTP_FS_Logs(t *testing.T) { // text/plain or application/json content regardless of whether the logs are // HTML+Javascript or not. func TestHTTP_FS_Logs_XSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), xssLoggerMockDriver) addAllocToClient(s, a, terminalClientAlloc) @@ -565,7 +566,7 @@ func TestHTTP_FS_Logs_XSS(t *testing.T) { } func TestHTTP_FS_Logs_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { a := mockFSAlloc(s.client.NodeID(), nil) @@ -607,7 +608,7 @@ func TestHTTP_FS_Logs_Follow(t *testing.T) { } func TestHTTP_FS_Logs_PropagatesErrors(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { path := fmt.Sprintf("/v1/client/fs/logs/%s?type=stdout&task=web&offset=0&origin=end&plain=true", uuid.Generate()) diff --git a/command/agent/helpers_test.go b/command/agent/helpers_test.go index 3dab65388..a53c6ea95 100644 --- a/command/agent/helpers_test.go +++ b/command/agent/helpers_test.go @@ -3,12 +3,13 @@ package agent import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/stretchr/testify/require" ) func TestHTTP_rpcHandlerForAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() @@ -52,7 +53,7 @@ func TestHTTP_rpcHandlerForAlloc(t *testing.T) { } func TestHTTP_rpcHandlerForNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() diff --git a/command/agent/host/host_test.go b/command/agent/host/host_test.go index 12131fa87..aebb09b8b 100644 --- a/command/agent/host/host_test.go +++ b/command/agent/host/host_test.go @@ -4,6 +4,7 @@ import ( "os" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -18,6 +19,8 @@ func TestHostUtils(t *testing.T) { } func TestMakeHostData(t *testing.T) { + ci.Parallel(t) + // setenv variables that should be redacted prev := os.Getenv("VAULT_TOKEN") os.Setenv("VAULT_TOKEN", "foo") diff --git a/command/agent/http_stdlog_test.go b/command/agent/http_stdlog_test.go index 523086b22..aa9430242 100644 --- a/command/agent/http_stdlog_test.go +++ b/command/agent/http_stdlog_test.go @@ -5,10 +5,13 @@ import ( "testing" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestHttpServerLoggerFilters_Level_Info(t *testing.T) { + ci.Parallel(t) + var buf bytes.Buffer hclogger := hclog.New(&hclog.LoggerOptions{ Name: "testlog", @@ -29,6 +32,8 @@ func TestHttpServerLoggerFilters_Level_Info(t *testing.T) { } func TestHttpServerLoggerFilters_Level_Trace(t *testing.T) { + ci.Parallel(t) + var buf bytes.Buffer hclogger := hclog.New(&hclog.LoggerOptions{ Name: "testlog", diff --git a/command/agent/http_test.go b/command/agent/http_test.go index dd1521387..e5cb11571 100644 --- a/command/agent/http_test.go +++ b/command/agent/http_test.go @@ -20,17 +20,17 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // makeHTTPServer returns a test server whose logs will be written to @@ -71,6 +71,8 @@ func BenchmarkHTTPRequests(b *testing.B) { } func TestMultipleInterfaces(t *testing.T) { + ci.Parallel(t) + httpIps := []string{"127.0.0.1", "127.0.0.2"} s := makeHTTPServer(t, func(c *Config) { @@ -91,7 +93,7 @@ func TestMultipleInterfaces(t *testing.T) { // TestRootFallthrough tests rootFallthrough handler to // verify redirect and 404 behavior func TestRootFallthrough(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string @@ -150,7 +152,7 @@ func TestRootFallthrough(t *testing.T) { } func TestSetIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() setIndex(resp, 1000) header := resp.Header().Get("X-Nomad-Index") @@ -164,7 +166,7 @@ func TestSetIndex(t *testing.T) { } func TestSetKnownLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() setKnownLeader(resp, true) header := resp.Header().Get("X-Nomad-KnownLeader") @@ -180,7 +182,7 @@ func TestSetKnownLeader(t *testing.T) { } func TestSetLastContact(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() setLastContact(resp, 123456*time.Microsecond) header := resp.Header().Get("X-Nomad-LastContact") @@ -190,7 +192,7 @@ func TestSetLastContact(t *testing.T) { } func TestSetMeta(t *testing.T) { - t.Parallel() + ci.Parallel(t) meta := structs.QueryMeta{ Index: 1000, KnownLeader: true, @@ -213,7 +215,7 @@ func TestSetMeta(t *testing.T) { } func TestSetHeaders(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) s.Agent.config.HTTPAPIResponseHeaders = map[string]string{"foo": "bar"} defer s.Shutdown() @@ -234,7 +236,7 @@ func TestSetHeaders(t *testing.T) { } func TestContentTypeIsJSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -255,7 +257,7 @@ func TestContentTypeIsJSON(t *testing.T) { } func TestWrapNonJSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -274,7 +276,7 @@ func TestWrapNonJSON(t *testing.T) { } func TestWrapNonJSON_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -309,17 +311,17 @@ func TestWrapNonJSON_Error(t *testing.T) { } func TestPrettyPrint(t *testing.T) { - t.Parallel() + ci.Parallel(t) testPrettyPrint("pretty=1", true, t) } func TestPrettyPrintOff(t *testing.T) { - t.Parallel() + ci.Parallel(t) testPrettyPrint("pretty=0", false, t) } func TestPrettyPrintBare(t *testing.T) { - t.Parallel() + ci.Parallel(t) testPrettyPrint("pretty", true, t) } @@ -409,7 +411,7 @@ func TestTokenNotFound(t *testing.T) { } func TestParseWait(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() var b structs.QueryOptions @@ -432,7 +434,7 @@ func TestParseWait(t *testing.T) { } func TestParseWait_InvalidTime(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() var b structs.QueryOptions @@ -452,7 +454,7 @@ func TestParseWait_InvalidTime(t *testing.T) { } func TestParseWait_InvalidIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) resp := httptest.NewRecorder() var b structs.QueryOptions @@ -472,7 +474,7 @@ func TestParseWait_InvalidIndex(t *testing.T) { } func TestParseConsistency(t *testing.T) { - t.Parallel() + ci.Parallel(t) var b structs.QueryOptions req, err := http.NewRequest("GET", @@ -500,7 +502,7 @@ func TestParseConsistency(t *testing.T) { } func TestParseRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -529,7 +531,7 @@ func TestParseRegion(t *testing.T) { } func TestParseToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -547,7 +549,7 @@ func TestParseToken(t *testing.T) { } func TestParseBool(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Input string @@ -594,7 +596,7 @@ func TestParseBool(t *testing.T) { } func Test_parseInt(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Input string @@ -641,7 +643,7 @@ func Test_parseInt(t *testing.T) { } func TestParsePagination(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := makeHTTPServer(t, nil) defer s.Shutdown() @@ -686,7 +688,7 @@ func TestParsePagination(t *testing.T) { // TestHTTP_VerifyHTTPSClient asserts that a client certificate signed by the // appropriate CA is required when VerifyHTTPSClient=true. func TestHTTP_VerifyHTTPSClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../../helper/tlsutil/testdata/ca.pem" foocert = "../../helper/tlsutil/testdata/nomad-foo.pem" @@ -807,7 +809,7 @@ func TestHTTP_VerifyHTTPSClient(t *testing.T) { } func TestHTTP_VerifyHTTPSClient_AfterConfigReload(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -911,7 +913,7 @@ func TestHTTP_VerifyHTTPSClient_AfterConfigReload(t *testing.T) { // TestHTTPServer_Limits_Error asserts invalid Limits cause errors. This is the // HTTP counterpart to TestAgent_ServerConfig_Limits_Error. func TestHTTPServer_Limits_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { tls bool @@ -961,7 +963,7 @@ func TestHTTPServer_Limits_Error(t *testing.T) { tc := cases[i] name := fmt.Sprintf("%d-tls-%t-timeout-%s-limit-%v", i, tc.tls, tc.timeout, tc.limit) t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := &Config{ normalizedAddrs: &NormalizedAddrs{ @@ -1001,7 +1003,7 @@ func limitStr(limit *int) string { // TestHTTPServer_Limits_OK asserts that all valid limits combinations // (tls/timeout/conns) work. func TestHTTPServer_Limits_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../../helper/tlsutil/testdata/ca.pem" @@ -1274,7 +1276,7 @@ func TestHTTPServer_Limits_OK(t *testing.T) { tc := cases[i] name := fmt.Sprintf("%d-tls-%t-timeout-%s-limit-%v", i, tc.tls, tc.timeout, limitStr(tc.limit)) t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) if tc.limit != nil && *tc.limit >= maxConns { t.Fatalf("test fixture failure: cannot assert limit (%d) >= max (%d)", *tc.limit, maxConns) @@ -1317,7 +1319,7 @@ func TestHTTPServer_Limits_OK(t *testing.T) { } func TestHTTPServer_ResolveToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Setup two servers, one with ACL enabled and another with ACL disabled. noACLServer := makeHTTPServer(t, func(c *Config) { @@ -1368,6 +1370,8 @@ func TestHTTPServer_ResolveToken(t *testing.T) { } func Test_IsAPIClientError(t *testing.T) { + ci.Parallel(t) + trueCases := []int{400, 403, 404, 499} for _, c := range trueCases { require.Truef(t, isAPIClientError(c), "code: %v", c) @@ -1380,6 +1384,7 @@ func Test_IsAPIClientError(t *testing.T) { } func Test_decodeBody(t *testing.T) { + ci.Parallel(t) testCases := []struct { inputReq *http.Request diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index ff4d615a6..08b489bb1 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -9,18 +9,18 @@ import ( "time" "github.com/golang/snappy" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" api "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestHTTP_JobsList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -71,12 +71,14 @@ func TestHTTP_JobsList(t *testing.T) { } func TestHTTP_PrefixJobsList(t *testing.T) { + ci.Parallel(t) + ids := []string{ "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706", "aabbbbbb-e8f7-fd38-c855-ab94ceb89706", "aabbcccc-e8f7-fd38-c855-ab94ceb89706", } - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -129,7 +131,7 @@ func TestHTTP_PrefixJobsList(t *testing.T) { } func TestHTTP_JobsList_AllNamespaces_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -169,7 +171,7 @@ func TestHTTP_JobsList_AllNamespaces_OSS(t *testing.T) { } func TestHTTP_JobsRegister(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -223,7 +225,7 @@ func TestHTTP_JobsRegister(t *testing.T) { } func TestHTTP_JobsRegister_IgnoresParentID(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -286,7 +288,7 @@ func TestHTTP_JobsRegister_IgnoresParentID(t *testing.T) { // Test that ACL token is properly threaded through to the RPC endpoint func TestHTTP_JobsRegister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -316,7 +318,7 @@ func TestHTTP_JobsRegister_ACL(t *testing.T) { } func TestHTTP_JobsRegister_Defaulting(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -377,7 +379,7 @@ func TestHTTP_JobsRegister_Defaulting(t *testing.T) { } func TestHTTP_JobsParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()}) req, err := http.NewRequest("POST", "/v1/jobs/parse", buf) @@ -410,7 +412,7 @@ func TestHTTP_JobsParse(t *testing.T) { } func TestHTTP_JobsParse_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpACLTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -531,7 +533,7 @@ func TestHTTP_JobsParse_ACL(t *testing.T) { } func TestHTTP_JobQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -580,7 +582,7 @@ func TestHTTP_JobQuery(t *testing.T) { } func TestHTTP_JobQuery_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -634,7 +636,7 @@ func TestHTTP_JobQuery_Payload(t *testing.T) { } func TestHTTP_jobUpdate_systemScaling(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -664,7 +666,7 @@ func TestHTTP_jobUpdate_systemScaling(t *testing.T) { } func TestHTTP_JobUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -721,7 +723,7 @@ func TestHTTP_JobUpdate(t *testing.T) { } func TestHTTP_JobUpdate_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCases := []struct { inputEvalPriority int @@ -816,7 +818,7 @@ func TestHTTP_JobUpdate_EvalPriority(t *testing.T) { } func TestHTTP_JobUpdateRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -915,7 +917,7 @@ func TestHTTP_JobUpdateRegion(t *testing.T) { } func TestHTTP_JobDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1017,7 +1019,7 @@ func TestHTTP_JobDelete(t *testing.T) { } func TestHTTP_JobDelete_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) testCases := []struct { inputEvalPriority int @@ -1128,7 +1130,7 @@ func TestHTTP_JobDelete_EvalPriority(t *testing.T) { } func TestHTTP_Job_ScaleTaskGroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -1189,7 +1191,7 @@ func TestHTTP_Job_ScaleTaskGroup(t *testing.T) { } func TestHTTP_Job_ScaleStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -1228,7 +1230,7 @@ func TestHTTP_Job_ScaleStatus(t *testing.T) { } func TestHTTP_JobForceEvaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1271,7 +1273,7 @@ func TestHTTP_JobForceEvaluate(t *testing.T) { } func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1322,7 +1324,7 @@ func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) { } func TestHTTP_JobEvaluations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1373,7 +1375,7 @@ func TestHTTP_JobEvaluations(t *testing.T) { } func TestHTTP_JobAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job alloc1 := mock.Alloc() @@ -1438,8 +1440,8 @@ func TestHTTP_JobAllocations(t *testing.T) { } func TestHTTP_JobDeployments(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Create the job j := mock.Job() @@ -1482,8 +1484,8 @@ func TestHTTP_JobDeployments(t *testing.T) { } func TestHTTP_JobDeployment(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Create the job j := mock.Job() @@ -1525,7 +1527,7 @@ func TestHTTP_JobDeployment(t *testing.T) { } func TestHTTP_JobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := mock.Job() @@ -1603,7 +1605,7 @@ func TestHTTP_JobVersions(t *testing.T) { } func TestHTTP_PeriodicForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create and register a periodic job. job := mock.PeriodicJob() @@ -1646,7 +1648,7 @@ func TestHTTP_PeriodicForce(t *testing.T) { } func TestHTTP_JobPlan(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := MockJob() @@ -1686,7 +1688,7 @@ func TestHTTP_JobPlan(t *testing.T) { } func TestHTTP_JobPlanRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Name string @@ -1761,7 +1763,7 @@ func TestHTTP_JobPlanRegion(t *testing.T) { } func TestHTTP_JobDispatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the parameterized job job := mock.BatchJob() @@ -1816,7 +1818,7 @@ func TestHTTP_JobDispatch(t *testing.T) { } func TestHTTP_JobRevert(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job and register it twice job := mock.Job() @@ -1875,7 +1877,7 @@ func TestHTTP_JobRevert(t *testing.T) { } func TestHTTP_JobStable(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job and register it twice job := mock.Job() @@ -1933,7 +1935,7 @@ func TestHTTP_JobStable(t *testing.T) { } func TestJobs_ParsingWriteRequest(t *testing.T) { - t.Parallel() + ci.Parallel(t) // defaults agentRegion := "agentRegion" @@ -2074,7 +2076,7 @@ func TestJobs_ParsingWriteRequest(t *testing.T) { } func TestJobs_RegionForJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) // defaults agentRegion := "agentRegion" @@ -2176,7 +2178,7 @@ func TestJobs_RegionForJob(t *testing.T) { } func TestJobs_NamespaceForJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) // test namespace for pointer inputs ns := "dev" @@ -2247,6 +2249,8 @@ func TestJobs_NamespaceForJob(t *testing.T) { } func TestJobs_ApiJobToStructsJob(t *testing.T) { + ci.Parallel(t) + apiJob := &api.Job{ Stop: helper.BoolToPtr(true), Region: helper.StringToPtr("global"), @@ -3284,6 +3288,8 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { } func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { + ci.Parallel(t) + apiJob := &api.Job{ Update: &api.UpdateStrategy{ Stagger: helper.TimeToPtr(1 * time.Second), @@ -3362,7 +3368,7 @@ func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) { // While this is an odd place to test that, this is where both are imported, // validated, and converted. func TestJobs_Matching_Resources(t *testing.T) { - t.Parallel() + ci.Parallel(t) // api.MinResources == structs.MinResources structsMinRes := ApiResourcesToStructs(api.MinResources()) @@ -3376,7 +3382,7 @@ func TestJobs_Matching_Resources(t *testing.T) { // TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate // stanza fails to validate but does not panic (see #5477). func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job job := &api.Job{ @@ -3417,13 +3423,13 @@ func TestHTTP_JobValidate_SystemMigrate(t *testing.T) { } func TestConversion_dereferenceInt(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, 0, dereferenceInt(nil)) require.Equal(t, 42, dereferenceInt(helper.IntToPtr(42))) } func TestConversion_apiLogConfigToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiLogConfigToStructs(nil)) require.Equal(t, &structs.LogConfig{ MaxFiles: 2, @@ -3435,7 +3441,7 @@ func TestConversion_apiLogConfigToStructs(t *testing.T) { } func TestConversion_apiResourcesToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -3482,7 +3488,7 @@ func TestConversion_apiResourcesToStructs(t *testing.T) { } func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConnectSidecarTaskToStructs(nil)) delay := time.Duration(200) timeout := time.Duration(1000) @@ -3529,7 +3535,7 @@ func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) { } func TestConversion_apiConsulExposePathsToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConsulExposePathsToStructs(nil)) require.Nil(t, apiConsulExposePathsToStructs(make([]*api.ConsulExposePath, 0))) require.Equal(t, []structs.ConsulExposePath{{ @@ -3546,7 +3552,7 @@ func TestConversion_apiConsulExposePathsToStructs(t *testing.T) { } func TestConversion_apiConsulExposeConfigToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConsulExposeConfigToStructs(nil)) require.Equal(t, &structs.ConsulExposeConfig{ Paths: []structs.ConsulExposePath{{Path: "/health"}}, @@ -3556,7 +3562,7 @@ func TestConversion_apiConsulExposeConfigToStructs(t *testing.T) { } func TestConversion_apiUpstreamsToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiUpstreamsToStructs(nil)) require.Nil(t, apiUpstreamsToStructs(make([]*api.ConsulUpstream, 0))) require.Equal(t, []structs.ConsulUpstream{{ @@ -3575,14 +3581,14 @@ func TestConversion_apiUpstreamsToStructs(t *testing.T) { } func TestConversion_apiConsulMeshGatewayToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiMeshGatewayToStructs(nil)) require.Equal(t, &structs.ConsulMeshGateway{Mode: "remote"}, apiMeshGatewayToStructs(&api.ConsulMeshGateway{Mode: "remote"})) } func TestConversion_apiConnectSidecarServiceProxyToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConnectSidecarServiceProxyToStructs(nil)) config := make(map[string]interface{}) require.Equal(t, &structs.ConsulProxy{ @@ -3611,7 +3617,7 @@ func TestConversion_apiConnectSidecarServiceProxyToStructs(t *testing.T) { } func TestConversion_apiConnectSidecarServiceToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, apiConnectSidecarTaskToStructs(nil)) require.Equal(t, &structs.ConsulSidecarService{ Tags: []string{"foo"}, @@ -3629,7 +3635,7 @@ func TestConversion_apiConnectSidecarServiceToStructs(t *testing.T) { } func TestConversion_ApiConsulConnectToStructs(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { require.Nil(t, ApiConsulConnectToStructs(nil)) diff --git a/command/agent/keyring_test.go b/command/agent/keyring_test.go index 3408d8df0..284a11314 100644 --- a/command/agent/keyring_test.go +++ b/command/agent/keyring_test.go @@ -8,10 +8,11 @@ import ( "testing" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" ) func TestAgent_LoadKeyrings(t *testing.T) { - t.Parallel() + ci.Parallel(t) key := "tbLJg26ZJyJ9pK3qhc9jig==" // Should be no configured keyring file by default @@ -45,7 +46,7 @@ func TestAgent_LoadKeyrings(t *testing.T) { } func TestAgent_InitKeyring(t *testing.T) { - t.Parallel() + ci.Parallel(t) key1 := "tbLJg26ZJyJ9pK3qhc9jig==" key2 := "4leC33rgtXKIVUr9Nr0snQ==" expected := fmt.Sprintf(`["%s"]`, key1) diff --git a/command/agent/log_file_test.go b/command/agent/log_file_test.go index 8bcc52a00..78e6ce5d9 100644 --- a/command/agent/log_file_test.go +++ b/command/agent/log_file_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/logutils" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -18,7 +19,7 @@ const ( ) func TestLogFile_timeRotation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterTimeTest") @@ -43,7 +44,7 @@ func TestLogFile_timeRotation(t *testing.T) { } func TestLogFile_openNew(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterOpenTest") @@ -80,7 +81,7 @@ func TestLogFile_openNew(t *testing.T) { } func TestLogFile_byteRotation(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterByteTest") @@ -104,7 +105,7 @@ func TestLogFile_byteRotation(t *testing.T) { } func TestLogFile_logLevelFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterFilterTest") @@ -127,7 +128,7 @@ func TestLogFile_logLevelFiltering(t *testing.T) { } func TestLogFile_deleteArchives(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterDeleteArchivesTest") @@ -167,7 +168,7 @@ func TestLogFile_deleteArchives(t *testing.T) { } func TestLogFile_deleteArchivesDisabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tempDir, err := ioutil.TempDir("", "LogWriterDeleteArchivesDisabledTest") diff --git a/command/agent/log_levels_test.go b/command/agent/log_levels_test.go index a3e863465..ab4d03842 100644 --- a/command/agent/log_levels_test.go +++ b/command/agent/log_levels_test.go @@ -4,10 +4,11 @@ import ( "testing" "github.com/hashicorp/logutils" + "github.com/hashicorp/nomad/ci" ) func TestLevelFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) filt := LevelFilter() filt.Levels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERR"} @@ -24,5 +25,4 @@ func TestLevelFilter(t *testing.T) { if ValidateLevelFilter(level, filt) { t.Fatalf("expected invalid LogLevel, %s was valid", level) } - } diff --git a/command/agent/metrics_endpoint_test.go b/command/agent/metrics_endpoint_test.go index 712fdb5d8..787f114b5 100644 --- a/command/agent/metrics_endpoint_test.go +++ b/command/agent/metrics_endpoint_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -16,9 +17,9 @@ import ( ) func TestHTTP_MetricsWithIllegalMethod(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("DELETE", "/v1/metrics", nil) assert.Nil(err) @@ -30,9 +31,9 @@ func TestHTTP_MetricsWithIllegalMethod(t *testing.T) { } func TestHTTP_MetricsPrometheusDisabled(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, func(c *Config) { c.Telemetry.PrometheusMetrics = false }, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/metrics?format=prometheus", nil) assert.Nil(err) @@ -44,9 +45,9 @@ func TestHTTP_MetricsPrometheusDisabled(t *testing.T) { } func TestHTTP_MetricsPrometheusEnabled(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("GET", "/v1/metrics?format=prometheus", nil) assert.Nil(err) @@ -64,9 +65,9 @@ func TestHTTP_MetricsPrometheusEnabled(t *testing.T) { } func TestHTTP_Metrics(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // make a separate HTTP request first, to ensure Nomad has written metrics // and prevent a race condition @@ -101,6 +102,8 @@ func TestHTTP_Metrics(t *testing.T) { // // **Cannot** be run in parallel as metrics are global. func TestHTTP_FreshClientAllocMetrics(t *testing.T) { + ci.Parallel(t) + require := require.New(t) numTasks := 10 diff --git a/command/agent/monitor/monitor_test.go b/command/agent/monitor/monitor_test.go index 697b5bdc5..f005e4210 100644 --- a/command/agent/monitor/monitor_test.go +++ b/command/agent/monitor/monitor_test.go @@ -7,11 +7,12 @@ import ( "time" log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestMonitor_Start(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ Level: log.Error, @@ -42,7 +43,7 @@ func TestMonitor_Start(t *testing.T) { // Ensure number of dropped messages are logged func TestMonitor_DroppedMessages(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := log.NewInterceptLogger(&log.LoggerOptions{ Level: log.Warn, diff --git a/command/agent/namespace_endpoint_test.go b/command/agent/namespace_endpoint_test.go index 3c5b1bf2f..b488fe16a 100644 --- a/command/agent/namespace_endpoint_test.go +++ b/command/agent/namespace_endpoint_test.go @@ -5,14 +5,15 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) func TestHTTP_NamespaceList(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { ns1 := mock.Namespace() ns2 := mock.Namespace() @@ -44,8 +45,8 @@ func TestHTTP_NamespaceList(t *testing.T) { } func TestHTTP_NamespaceQuery(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { ns1 := mock.Namespace() args := structs.NamespaceUpsertRequest{ @@ -75,8 +76,8 @@ func TestHTTP_NamespaceQuery(t *testing.T) { } func TestHTTP_NamespaceCreate(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request ns1 := mock.Namespace() @@ -106,8 +107,8 @@ func TestHTTP_NamespaceCreate(t *testing.T) { } func TestHTTP_NamespaceUpdate(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request ns1 := mock.Namespace() @@ -137,8 +138,8 @@ func TestHTTP_NamespaceUpdate(t *testing.T) { } func TestHTTP_NamespaceDelete(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { ns1 := mock.Namespace() args := structs.NamespaceUpsertRequest{ diff --git a/command/agent/node_endpoint_test.go b/command/agent/node_endpoint_test.go index c5ec234be..0b682b3ad 100644 --- a/command/agent/node_endpoint_test.go +++ b/command/agent/node_endpoint_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -14,7 +15,7 @@ import ( ) func TestHTTP_NodesList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the node @@ -62,7 +63,7 @@ func TestHTTP_NodesList(t *testing.T) { } func TestHTTP_NodesPrefixList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { ids := []string{ "12345678-abcd-efab-cdef-123456789abc", @@ -119,7 +120,7 @@ func TestHTTP_NodesPrefixList(t *testing.T) { } func TestHTTP_NodeForceEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the node node := mock.Node() @@ -171,7 +172,7 @@ func TestHTTP_NodeForceEval(t *testing.T) { } func TestHTTP_NodeAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job node := mock.Node() @@ -240,7 +241,7 @@ func TestHTTP_NodeAllocations(t *testing.T) { } func TestHTTP_NodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Create the node @@ -336,7 +337,7 @@ func TestHTTP_NodeDrain(t *testing.T) { } func TestHTTP_NodeEligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Create the node @@ -390,7 +391,7 @@ func TestHTTP_NodeEligible(t *testing.T) { } func TestHTTP_NodePurge(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the node node := mock.Node() @@ -455,7 +456,7 @@ func TestHTTP_NodePurge(t *testing.T) { } func TestHTTP_NodeQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Create the job node := mock.Node() diff --git a/command/agent/operator_endpoint_test.go b/command/agent/operator_endpoint_test.go index b621d7d4e..e97ff6fcf 100644 --- a/command/agent/operator_endpoint_test.go +++ b/command/agent/operator_endpoint_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -25,7 +26,7 @@ import ( ) func TestHTTP_OperatorRaftConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer(nil) req, err := http.NewRequest("GET", "/v1/operator/raft/configuration", body) @@ -54,8 +55,8 @@ func TestHTTP_OperatorRaftConfiguration(t *testing.T) { } func TestHTTP_OperatorRaftPeer(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer(nil) req, err := http.NewRequest("DELETE", "/v1/operator/raft/peer?address=nope", body) @@ -88,7 +89,7 @@ func TestHTTP_OperatorRaftPeer(t *testing.T) { } func TestOperator_AutopilotGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer(nil) req, _ := http.NewRequest("GET", "/v1/operator/autopilot/configuration", body) @@ -111,7 +112,7 @@ func TestOperator_AutopilotGetConfiguration(t *testing.T) { } func TestOperator_AutopilotSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`)) req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body) @@ -140,7 +141,7 @@ func TestOperator_AutopilotSetConfiguration(t *testing.T) { } func TestOperator_AutopilotCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { body := bytes.NewBuffer([]byte(`{"CleanupDeadServers": false}`)) req, _ := http.NewRequest("PUT", "/v1/operator/autopilot/configuration", body) @@ -208,6 +209,8 @@ func TestOperator_AutopilotCASConfiguration(t *testing.T) { } func TestOperator_ServerHealth(t *testing.T) { + ci.Parallel(t) + httpTest(t, func(c *Config) { c.Server.RaftProtocol = 3 }, func(s *TestAgent) { @@ -238,7 +241,7 @@ func TestOperator_ServerHealth(t *testing.T) { } func TestOperator_ServerHealth_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, func(c *Config) { c.Server.RaftProtocol = 3 c.Autopilot.LastContactThreshold = -1 * time.Second @@ -268,7 +271,7 @@ func TestOperator_ServerHealth_Unhealthy(t *testing.T) { } func TestOperator_SchedulerGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { require := require.New(t) body := bytes.NewBuffer(nil) @@ -290,7 +293,7 @@ func TestOperator_SchedulerGetConfiguration(t *testing.T) { } func TestOperator_SchedulerSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { require := require.New(t) body := bytes.NewBuffer([]byte(` @@ -328,7 +331,7 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) { } func TestOperator_SchedulerCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { require := require.New(t) body := bytes.NewBuffer([]byte(`{"PreemptionConfig": { @@ -406,7 +409,7 @@ func TestOperator_SchedulerCASConfiguration(t *testing.T) { } func TestOperator_SnapshotRequests(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir, err := ioutil.TempDir("", "nomadtest-operator-") require.NoError(t, err) @@ -498,5 +501,4 @@ func TestOperator_SnapshotRequests(t *testing.T) { require.True(t, jobExists()) }) - } diff --git a/command/agent/pprof/pprof_test.go b/command/agent/pprof/pprof_test.go index cf68fc7a7..08b6596de 100644 --- a/command/agent/pprof/pprof_test.go +++ b/command/agent/pprof/pprof_test.go @@ -4,10 +4,13 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestProfile(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string profile string @@ -58,6 +61,8 @@ func TestProfile(t *testing.T) { } func TestCPUProfile(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string expectedHeaders map[string]string @@ -84,6 +89,8 @@ func TestCPUProfile(t *testing.T) { } func TestTrace(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string expectedHeaders map[string]string @@ -110,6 +117,8 @@ func TestTrace(t *testing.T) { } func TestCmdline(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string expectedHeaders map[string]string diff --git a/command/agent/region_endpoint_test.go b/command/agent/region_endpoint_test.go index 2549c6a9f..30f84ed00 100644 --- a/command/agent/region_endpoint_test.go +++ b/command/agent/region_endpoint_test.go @@ -4,10 +4,12 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestHTTP_RegionList(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/regions", nil) diff --git a/command/agent/retry_join_test.go b/command/agent/retry_join_test.go index 48a2e2514..a2c27aa3b 100644 --- a/command/agent/retry_join_test.go +++ b/command/agent/retry_join_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -29,7 +30,7 @@ func (m *MockDiscover) Names() []string { } func TestRetryJoin_Integration(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create two agents and have one retry join the other agent := NewTestAgent(t, t.Name(), nil) @@ -73,7 +74,7 @@ func TestRetryJoin_Integration(t *testing.T) { } func TestRetryJoin_Server_NonCloud(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -103,7 +104,7 @@ func TestRetryJoin_Server_NonCloud(t *testing.T) { } func TestRetryJoin_Server_Cloud(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -135,7 +136,7 @@ func TestRetryJoin_Server_Cloud(t *testing.T) { } func TestRetryJoin_Server_MixedProvider(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -167,7 +168,7 @@ func TestRetryJoin_Server_MixedProvider(t *testing.T) { } func TestRetryJoin_Client(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) serverJoin := &ServerJoin{ @@ -197,7 +198,7 @@ func TestRetryJoin_Client(t *testing.T) { } func TestRetryJoin_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) type validateExpect struct { config *Config isValid bool diff --git a/command/agent/scaling_endpoint_test.go b/command/agent/scaling_endpoint_test.go index 0abb21c0d..fe7faa2e3 100644 --- a/command/agent/scaling_endpoint_test.go +++ b/command/agent/scaling_endpoint_test.go @@ -5,15 +5,15 @@ import ( "net/http/httptest" "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestHTTP_ScalingPoliciesList(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() httpTest(t, nil, func(s *TestAgent) { for i := 0; i < 3; i++ { // Create the job @@ -52,8 +52,8 @@ func TestHTTP_ScalingPoliciesList(t *testing.T) { } func TestHTTP_ScalingPoliciesList_Filter(t *testing.T) { - t.Parallel() require := require.New(t) + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { var job *structs.Job for i := 0; i < 3; i++ { @@ -100,7 +100,7 @@ func TestHTTP_ScalingPoliciesList_Filter(t *testing.T) { } func TestHTTP_ScalingPolicyGet(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { // Create the job diff --git a/command/agent/search_endpoint_test.go b/command/agent/search_endpoint_test.go index 68ca16bb9..da0140e1d 100644 --- a/command/agent/search_endpoint_test.go +++ b/command/agent/search_endpoint_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -26,7 +27,7 @@ func createJobForTest(jobID string, s *TestAgent, t *testing.T) { } func TestHTTP_PrefixSearchWithIllegalMethod(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("DELETE", "/v1/search", nil) @@ -39,7 +40,7 @@ func TestHTTP_PrefixSearchWithIllegalMethod(t *testing.T) { } func TestHTTP_FuzzySearchWithIllegalMethod(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { req, err := http.NewRequest("DELETE", "/v1/search/fuzzy", nil) @@ -63,7 +64,7 @@ func createCmdJobForTest(name, cmd string, s *TestAgent, t *testing.T) *structs. } func TestHTTP_PrefixSearch_POST(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJob := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobPrefix := "aaaaaaaa-e8f7-fd38" @@ -93,7 +94,7 @@ func TestHTTP_PrefixSearch_POST(t *testing.T) { } func TestHTTP_FuzzySearch_POST(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobID := uuid.Generate() @@ -123,7 +124,7 @@ func TestHTTP_FuzzySearch_POST(t *testing.T) { } func TestHTTP_PrefixSearch_PUT(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJob := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobPrefix := "aaaaaaaa-e8f7-fd38" @@ -153,7 +154,7 @@ func TestHTTP_PrefixSearch_PUT(t *testing.T) { } func TestHTTP_FuzzySearch_PUT(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobID := uuid.Generate() @@ -183,7 +184,7 @@ func TestHTTP_FuzzySearch_PUT(t *testing.T) { } func TestHTTP_PrefixSearch_MultipleJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobA := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobB := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89707" @@ -219,7 +220,7 @@ func TestHTTP_PrefixSearch_MultipleJobs(t *testing.T) { } func TestHTTP_FuzzySearch_MultipleJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { job1ID := createCmdJobForTest("job1", "/bin/yes", s, t).ID @@ -262,7 +263,7 @@ func TestHTTP_FuzzySearch_MultipleJobs(t *testing.T) { } func TestHTTP_PrefixSearch_Evaluation(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -294,7 +295,7 @@ func TestHTTP_PrefixSearch_Evaluation(t *testing.T) { } func TestHTTP_FuzzySearch_Evaluation(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -335,7 +336,7 @@ func mockAlloc() *structs.Allocation { } func TestHTTP_PrefixSearch_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -366,7 +367,7 @@ func TestHTTP_PrefixSearch_Allocations(t *testing.T) { } func TestHTTP_FuzzySearch_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -396,7 +397,7 @@ func TestHTTP_FuzzySearch_Allocations(t *testing.T) { } func TestHTTP_PrefixSearch_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -427,7 +428,7 @@ func TestHTTP_PrefixSearch_Nodes(t *testing.T) { } func TestHTTP_FuzzySearch_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -457,7 +458,7 @@ func TestHTTP_FuzzySearch_Nodes(t *testing.T) { } func TestHTTP_PrefixSearch_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -485,7 +486,7 @@ func TestHTTP_PrefixSearch_Deployments(t *testing.T) { } func TestHTTP_FuzzySearch_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() @@ -514,7 +515,7 @@ func TestHTTP_FuzzySearch_Deployments(t *testing.T) { } func TestHTTP_PrefixSearch_NoJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { data := structs.SearchRequest{Prefix: "12345", Context: structs.Jobs} @@ -534,7 +535,7 @@ func TestHTTP_PrefixSearch_NoJob(t *testing.T) { } func TestHTTP_FuzzySearch_NoJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { data := structs.FuzzySearchRequest{Text: "12345", Context: structs.Jobs} @@ -553,7 +554,7 @@ func TestHTTP_FuzzySearch_NoJob(t *testing.T) { } func TestHTTP_PrefixSearch_AllContext(t *testing.T) { - t.Parallel() + ci.Parallel(t) testJobID := "aaaaaaaa-e8f7-fd38-c855-ab94ceb89706" testJobPrefix := "aaaaaaaa-e8f7-fd38" @@ -588,7 +589,7 @@ func TestHTTP_PrefixSearch_AllContext(t *testing.T) { } func TestHTTP_FuzzySearch_AllContext(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { jobID := createCmdJobForTest("job1", "/bin/aardvark", s, t).ID diff --git a/command/agent/stats_endpoint_test.go b/command/agent/stats_endpoint_test.go index 2720b1a0c..ad8c6d550 100644 --- a/command/agent/stats_endpoint_test.go +++ b/command/agent/stats_endpoint_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -16,7 +17,7 @@ import ( ) func TestClientStatsRequest(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) httpTest(t, nil, func(s *TestAgent) { @@ -77,7 +78,7 @@ func TestClientStatsRequest(t *testing.T) { } func TestClientStatsRequest_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) httpACLTest(t, nil, func(s *TestAgent) { state := s.Agent.server.State() diff --git a/command/agent/status_endpoint_test.go b/command/agent/status_endpoint_test.go index 2dbe39cfd..9be35c4f2 100644 --- a/command/agent/status_endpoint_test.go +++ b/command/agent/status_endpoint_test.go @@ -4,10 +4,12 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestHTTP_StatusLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/status/leader", nil) @@ -30,7 +32,7 @@ func TestHTTP_StatusLeader(t *testing.T) { } func TestHTTP_StatusPeers(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("GET", "/v1/status/peers", nil) diff --git a/command/agent/syslog_test.go b/command/agent/syslog_test.go index 37c4e6fff..44fa82c5a 100644 --- a/command/agent/syslog_test.go +++ b/command/agent/syslog_test.go @@ -7,10 +7,11 @@ import ( gsyslog "github.com/hashicorp/go-syslog" "github.com/hashicorp/logutils" + "github.com/hashicorp/nomad/ci" ) func TestSyslogFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Syslog not supported on Windows") } diff --git a/command/agent/system_endpoint_test.go b/command/agent/system_endpoint_test.go index f45c17c2c..504b44f54 100644 --- a/command/agent/system_endpoint_test.go +++ b/command/agent/system_endpoint_test.go @@ -4,10 +4,12 @@ import ( "net/http" "net/http/httptest" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestHTTP_SystemGarbageCollect(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/system/gc", nil) @@ -24,7 +26,7 @@ func TestHTTP_SystemGarbageCollect(t *testing.T) { } func TestHTTP_ReconcileJobSummaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) httpTest(t, nil, func(s *TestAgent) { // Make the HTTP request req, err := http.NewRequest("PUT", "/v1/system/reconcile/summaries", nil) diff --git a/command/agent_info_test.go b/command/agent_info_test.go index 5674a4f26..8b0869963 100644 --- a/command/agent_info_test.go +++ b/command/agent_info_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestAgentInfoCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AgentInfoCommand{} } func TestAgentInfoCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -27,7 +28,7 @@ func TestAgentInfoCommand_Run(t *testing.T) { } func TestAgentInfoCommand_Run_JSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -44,7 +45,7 @@ func TestAgentInfoCommand_Run_JSON(t *testing.T) { } func TestAgentInfoCommand_Run_Gotemplate(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -61,7 +62,7 @@ func TestAgentInfoCommand_Run_Gotemplate(t *testing.T) { } func TestAgentInfoCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &AgentInfoCommand{Meta: Meta{Ui: ui}} diff --git a/command/agent_monitor_test.go b/command/agent_monitor_test.go index ba5a2a5b7..cf8b186a0 100644 --- a/command/agent_monitor_test.go +++ b/command/agent_monitor_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestMonitorCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &MonitorCommand{} } func TestMonitorCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/alloc_exec_test.go b/command/alloc_exec_test.go index f064aa404..e5586e4df 100644 --- a/command/alloc_exec_test.go +++ b/command/alloc_exec_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -20,7 +21,7 @@ import ( var _ cli.Command = &AllocExecCommand{} func TestAllocExecCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -143,8 +144,8 @@ func TestAllocExecCommand_Fails(t *testing.T) { } func TestAllocExecCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -167,7 +168,7 @@ func TestAllocExecCommand_AutocompleteArgs(t *testing.T) { } func TestAllocExecCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_fs_test.go b/command/alloc_fs_test.go index 86042eec0..967c3dd11 100644 --- a/command/alloc_fs_test.go +++ b/command/alloc_fs_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestFSCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocFSCommand{} } func TestFSCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -88,8 +89,8 @@ func TestFSCommand_Fails(t *testing.T) { } func TestFSCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_logs_test.go b/command/alloc_logs_test.go index 6ac55bfb3..5698433fd 100644 --- a/command/alloc_logs_test.go +++ b/command/alloc_logs_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestLogsCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocLogsCommand{} } func TestLogsCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -70,8 +71,8 @@ func TestLogsCommand_Fails(t *testing.T) { } func TestLogsCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_restart_test.go b/command/alloc_restart_test.go index a37a20aca..fb1e88762 100644 --- a/command/alloc_restart_test.go +++ b/command/alloc_restart_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -19,6 +20,8 @@ func TestAllocRestartCommand_Implements(t *testing.T) { } func TestAllocRestartCommand_Fails(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -91,6 +94,8 @@ func TestAllocRestartCommand_Fails(t *testing.T) { } func TestAllocRestartCommand_Run(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -152,6 +157,8 @@ func TestAllocRestartCommand_Run(t *testing.T) { } func TestAllocRestartCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) diff --git a/command/alloc_signal_test.go b/command/alloc_signal_test.go index 7142564cc..46e18d146 100644 --- a/command/alloc_signal_test.go +++ b/command/alloc_signal_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -15,12 +16,12 @@ import ( ) func TestAllocSignalCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocSignalCommand{} } func TestAllocSignalCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -57,6 +58,8 @@ func TestAllocSignalCommand_Fails(t *testing.T) { } func TestAllocSignalCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) @@ -81,6 +84,8 @@ func TestAllocSignalCommand_AutocompleteArgs(t *testing.T) { } func TestAllocSignalCommand_Run(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index fca8a2a19..d377a16a8 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -21,12 +22,12 @@ import ( ) func TestAllocStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocStatusCommand{} } func TestAllocStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -88,7 +89,7 @@ func TestAllocStatusCommand_Fails(t *testing.T) { } func TestAllocStatusCommand_LifecycleInfo(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -151,7 +152,7 @@ func TestAllocStatusCommand_LifecycleInfo(t *testing.T) { } func TestAllocStatusCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -248,7 +249,7 @@ func TestAllocStatusCommand_Run(t *testing.T) { } func TestAllocStatusCommand_RescheduleInfo(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -297,7 +298,7 @@ func TestAllocStatusCommand_RescheduleInfo(t *testing.T) { } func TestAllocStatusCommand_ScoreMetrics(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -359,8 +360,8 @@ func TestAllocStatusCommand_ScoreMetrics(t *testing.T) { } func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -383,7 +384,7 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { } func TestAllocStatusCommand_HostVolumes(t *testing.T) { - t.Parallel() + ci.Parallel(t) // We have to create a tempdir for the host volume even though we're // not going to use it b/c the server validates the config on startup tmpDir, err := ioutil.TempDir("", "vol0") @@ -451,7 +452,7 @@ func TestAllocStatusCommand_HostVolumes(t *testing.T) { } func TestAllocStatusCommand_CSIVolumes(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() state := srv.Agent.Server().State() diff --git a/command/alloc_stop_test.go b/command/alloc_stop_test.go index 55d4cd1cc..54b0f7c1e 100644 --- a/command/alloc_stop_test.go +++ b/command/alloc_stop_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -12,7 +13,7 @@ import ( ) func TestAllocStopCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &AllocStopCommand{} } @@ -51,6 +52,8 @@ func TestAllocStop_Fails(t *testing.T) { } func TestAllocStop_Run(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/check_test.go b/command/check_test.go index ecc47abdd..ea6af4f8d 100644 --- a/command/check_test.go +++ b/command/check_test.go @@ -4,11 +4,12 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestAgentCheckCommand_ServerHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -26,5 +27,4 @@ func TestAgentCheckCommand_ServerHealth(t *testing.T) { if code != HealthCritical { t.Fatalf("expected exitcode: %v, actual: %v", HealthCritical, code) } - } diff --git a/command/config_validate_test.go b/command/config_validate_test.go index 80ac91fe1..04cde7785 100644 --- a/command/config_validate_test.go +++ b/command/config_validate_test.go @@ -6,11 +6,12 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestConfigValidateCommand_FailWithEmptyDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -28,7 +29,7 @@ func TestConfigValidateCommand_FailWithEmptyDir(t *testing.T) { } func TestConfigValidateCommand_SucceedWithMinimalConfigFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -55,7 +56,7 @@ func TestConfigValidateCommand_SucceedWithMinimalConfigFile(t *testing.T) { } func TestConfigValidateCommand_FailOnParseBadConfigFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -79,7 +80,7 @@ func TestConfigValidateCommand_FailOnParseBadConfigFile(t *testing.T) { } func TestConfigValidateCommand_FailOnValidateParsableConfigFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempDir("", "nomad") if err != nil { t.Fatalf("err: %s", err) diff --git a/command/data_format_test.go b/command/data_format_test.go index 7765a0d72..103f8fb7c 100644 --- a/command/data_format_test.go +++ b/command/data_format_test.go @@ -3,6 +3,8 @@ package command import ( "strings" "testing" + + "github.com/hashicorp/nomad/ci" ) type testData struct { @@ -24,7 +26,7 @@ var ( ) func TestDataFormat(t *testing.T) { - t.Parallel() + ci.Parallel(t) for k, v := range testFormat { fm, err := DataFormat(k, v) if err != nil { @@ -43,7 +45,7 @@ func TestDataFormat(t *testing.T) { } func TestInvalidJSONTemplate(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Invalid template {{.foo}} fm, err := DataFormat("template", "{{.foo}}") if err != nil { diff --git a/command/deployment_fail_test.go b/command/deployment_fail_test.go index 0cc8af90c..463300c4a 100644 --- a/command/deployment_fail_test.go +++ b/command/deployment_fail_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentFailCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentFailCommand{} } func TestDeploymentFailCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentFailCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentFailCommand_Fails(t *testing.T) { } func TestDeploymentFailCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_list_test.go b/command/deployment_list_test.go index 433fa67a8..95099a1b3 100644 --- a/command/deployment_list_test.go +++ b/command/deployment_list_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestDeploymentListCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentListCommand{} } func TestDeploymentListCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentListCommand{Meta: Meta{Ui: ui}} diff --git a/command/deployment_pause_test.go b/command/deployment_pause_test.go index 1b0c5094e..a677461cd 100644 --- a/command/deployment_pause_test.go +++ b/command/deployment_pause_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentPauseCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentPauseCommand{} } func TestDeploymentPauseCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentPauseCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentPauseCommand_Fails(t *testing.T) { } func TestDeploymentPauseCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_promote_test.go b/command/deployment_promote_test.go index 9e9afd8e7..7259caca8 100644 --- a/command/deployment_promote_test.go +++ b/command/deployment_promote_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentPromoteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentPromoteCommand{} } func TestDeploymentPromoteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentPromoteCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentPromoteCommand_Fails(t *testing.T) { } func TestDeploymentPromoteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_resume_test.go b/command/deployment_resume_test.go index 605f82f64..b136b34be 100644 --- a/command/deployment_resume_test.go +++ b/command/deployment_resume_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentResumeCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentResumeCommand{} } func TestDeploymentResumeCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentResumeCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentResumeCommand_Fails(t *testing.T) { } func TestDeploymentResumeCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_status_test.go b/command/deployment_status_test.go index 7a28d9967..777addd1e 100644 --- a/command/deployment_status_test.go +++ b/command/deployment_status_test.go @@ -3,6 +3,7 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentStatusCommand{} } func TestDeploymentStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentStatusCommand{Meta: Meta{Ui: ui}} @@ -51,8 +52,8 @@ func TestDeploymentStatusCommand_Fails(t *testing.T) { } func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/deployment_unblock_test.go b/command/deployment_unblock_test.go index a55ec7259..ee1350b79 100644 --- a/command/deployment_unblock_test.go +++ b/command/deployment_unblock_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestDeploymentUnblockCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &DeploymentUnblockCommand{} } func TestDeploymentUnblockCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &DeploymentUnblockCommand{Meta: Meta{Ui: ui}} @@ -39,8 +40,8 @@ func TestDeploymentUnblockCommand_Fails(t *testing.T) { } func TestDeploymentUnblockCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/eval_list_test.go b/command/eval_list_test.go index 0984b3ac8..141e64a36 100644 --- a/command/eval_list_test.go +++ b/command/eval_list_test.go @@ -4,10 +4,12 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestEvalList_ArgsWithoutPageToken(t *testing.T) { + ci.Parallel(t) cases := []struct { cli string diff --git a/command/eval_status_test.go b/command/eval_status_test.go index f66d2c0f2..5315b4163 100644 --- a/command/eval_status_test.go +++ b/command/eval_status_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestEvalStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &EvalStatusCommand{} } func TestEvalStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -62,8 +63,8 @@ func TestEvalStatusCommand_Fails(t *testing.T) { } func TestEvalStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/event_test.go b/command/event_test.go index 5bc7c4dea..8c6a01651 100644 --- a/command/event_test.go +++ b/command/event_test.go @@ -3,12 +3,13 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestEventCommand_BaseCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/helper_devices_test.go b/command/helper_devices_test.go index a54af1cad..062a4c1b1 100644 --- a/command/helper_devices_test.go +++ b/command/helper_devices_test.go @@ -4,12 +4,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestDeviceQualifiedID(t *testing.T) { + ci.Parallel(t) require := require.New(t) @@ -19,6 +21,8 @@ func TestDeviceQualifiedID(t *testing.T) { } func TestBuildDeviceStatsSummaryMap(t *testing.T) { + ci.Parallel(t) + hostDeviceStats := []*api.DeviceGroupStats{ { Vendor: "vendor1", @@ -74,6 +78,8 @@ func TestBuildDeviceStatsSummaryMap(t *testing.T) { } func TestFormatDeviceStats(t *testing.T) { + ci.Parallel(t) + statValue := func(v string) *api.StatValue { return &api.StatValue{ StringVal: helper.StringToPtr(v), @@ -129,6 +135,8 @@ func TestFormatDeviceStats(t *testing.T) { } func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { + ci.Parallel(t) + hostDeviceStats := []*api.DeviceGroupStats{ { Vendor: "vendor1", @@ -201,6 +209,8 @@ func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { } func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { + ci.Parallel(t) + hostDeviceStats := []*api.DeviceGroupStats{ { Vendor: "vendor1", @@ -248,6 +258,8 @@ func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { assert.Equal(t, expected, formattedDevices) } func TestGetDeviceAttributes(t *testing.T) { + ci.Parallel(t) + d := &api.NodeDeviceResource{ Vendor: "Vendor", Type: "Type", diff --git a/command/helpers_test.go b/command/helpers_test.go index 275a5d249..7884b3abe 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/flatmap" "github.com/kr/pretty" @@ -21,7 +22,7 @@ import ( ) func TestHelpers_FormatKV(t *testing.T) { - t.Parallel() + ci.Parallel(t) in := []string{"alpha|beta", "charlie|delta", "echo|"} out := formatKV(in) @@ -35,7 +36,7 @@ func TestHelpers_FormatKV(t *testing.T) { } func TestHelpers_FormatList(t *testing.T) { - t.Parallel() + ci.Parallel(t) in := []string{"alpha|beta||delta"} out := formatList(in) @@ -47,7 +48,7 @@ func TestHelpers_FormatList(t *testing.T) { } func TestHelpers_NodeID(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, _ := testServer(t, false, nil) defer srv.Shutdown() @@ -64,7 +65,7 @@ func TestHelpers_NodeID(t *testing.T) { } func TestHelpers_LineLimitReader_NoTimeLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) helloString := `hello world this @@ -166,7 +167,7 @@ func (t *testReadCloser) Close() error { } func TestHelpers_LineLimitReader_TimeLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create the test reader in := &testReadCloser{data: make(chan []byte)} @@ -256,7 +257,7 @@ var ( // Test APIJob with local jobfile func TestJobGetter_LocalFile(t *testing.T) { - t.Parallel() + ci.Parallel(t) fh, err := ioutil.TempFile("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -283,7 +284,7 @@ func TestJobGetter_LocalFile(t *testing.T) { // TestJobGetter_LocalFile_InvalidHCL2 asserts that a custom message is emited // if the file is a valid HCL1 but not HCL2 func TestJobGetter_LocalFile_InvalidHCL2(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -331,7 +332,7 @@ func TestJobGetter_LocalFile_InvalidHCL2(t *testing.T) { // TestJobGetter_HCL2_Variables asserts variable arguments from CLI // and varfiles are both honored func TestJobGetter_HCL2_Variables(t *testing.T) { - t.Parallel() + ci.Parallel(t) hcl := ` variables { @@ -376,7 +377,7 @@ job "example" { } func TestJobGetter_HCL2_Variables_StrictFalse(t *testing.T) { - t.Parallel() + ci.Parallel(t) hcl := ` variables { @@ -396,7 +397,7 @@ job "example" { // Both the CLI and var file contain variables that are not used with the // template and therefore would error, if hcl2-strict was true. - cliArgs := []string{`var2=from-cli`,`unsedVar1=from-cli`} + cliArgs := []string{`var2=from-cli`, `unsedVar1=from-cli`} fileVars := ` var3 = "from-varfile" unsedVar2 = "from-varfile" @@ -428,7 +429,7 @@ unsedVar2 = "from-varfile" // Test StructJob with jobfile from HTTP Server func TestJobGetter_HTTPServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, job) }) @@ -493,7 +494,7 @@ func TestPrettyTimeDiff(t *testing.T) { // TestUiErrorWriter asserts that writer buffers and func TestUiErrorWriter(t *testing.T) { - t.Parallel() + ci.Parallel(t) var outBuf, errBuf bytes.Buffer ui := &cli.BasicUi{ diff --git a/command/integration_test.go b/command/integration_test.go index f509b26cb..1cf207010 100644 --- a/command/integration_test.go +++ b/command/integration_test.go @@ -11,11 +11,12 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestIntegration_Command_NomadInit(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomadtest-rootsecretdir") if err != nil { t.Fatalf("unable to create tempdir for test: %v", err) @@ -41,8 +42,8 @@ func TestIntegration_Command_NomadInit(t *testing.T) { } func TestIntegration_Command_RoundTripJob(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() tmpDir, err := ioutil.TempDir("", "nomadtest-rootsecretdir") assert.Nil(err) defer os.RemoveAll(tmpDir) @@ -91,5 +92,4 @@ func TestIntegration_Command_RoundTripJob(t *testing.T) { _, err := cmd.Output() assert.Nil(err) } - } diff --git a/command/job_allocs_test.go b/command/job_allocs_test.go index 449a12253..ce2cfea63 100644 --- a/command/job_allocs_test.go +++ b/command/job_allocs_test.go @@ -3,21 +3,21 @@ package command import ( "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/require" ) func TestJobAllocsCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobAllocsCommand{} } func TestJobAllocsCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -50,7 +50,7 @@ func TestJobAllocsCommand_Fails(t *testing.T) { } func TestJobAllocsCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -93,7 +93,7 @@ func TestJobAllocsCommand_Run(t *testing.T) { } func TestJobAllocsCommand_Template(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -152,7 +152,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { } func TestJobAllocsCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_deployments_test.go b/command/job_deployments_test.go index eb764ba2b..d727ac2fa 100644 --- a/command/job_deployments_test.go +++ b/command/job_deployments_test.go @@ -4,21 +4,21 @@ import ( "strings" "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestJobDeploymentsCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDeploymentsCommand{} } func TestJobDeploymentsCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobDeploymentsCommand{Meta: Meta{Ui: ui}} @@ -41,7 +41,8 @@ func TestJobDeploymentsCommand_Fails(t *testing.T) { } func TestJobDeploymentsCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -85,7 +86,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { } func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -129,8 +130,8 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { } func TestJobDeploymentsCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_dispatch_test.go b/command/job_dispatch_test.go index 37e4a3926..ff7410340 100644 --- a/command/job_dispatch_test.go +++ b/command/job_dispatch_test.go @@ -4,21 +4,21 @@ import ( "strings" "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/require" ) func TestJobDispatchCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDispatchCommand{} } func TestJobDispatchCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobDispatchCommand{Meta: Meta{Ui: ui}} @@ -50,7 +50,7 @@ func TestJobDispatchCommand_Fails(t *testing.T) { } func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_eval_test.go b/command/job_eval_test.go index 6bc72df8a..74f627601 100644 --- a/command/job_eval_test.go +++ b/command/job_eval_test.go @@ -1,11 +1,11 @@ package command import ( + "fmt" "strings" "testing" - "fmt" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -16,12 +16,12 @@ import ( ) func TestJobEvalCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobEvalCommand{} } func TestJobEvalCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobEvalCommand{Meta: Meta{Ui: ui}} @@ -46,7 +46,7 @@ func TestJobEvalCommand_Fails(t *testing.T) { } func TestJobEvalCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -103,8 +103,8 @@ func TestJobEvalCommand_Run(t *testing.T) { } func TestJobEvalCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_history_test.go b/command/job_history_test.go index c5c613d48..c85fee2a9 100644 --- a/command/job_history_test.go +++ b/command/job_history_test.go @@ -4,21 +4,21 @@ import ( "strings" "testing" - "github.com/hashicorp/nomad/nomad/structs" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestJobHistoryCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDispatchCommand{} } func TestJobHistoryCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobHistoryCommand{Meta: Meta{Ui: ui}} @@ -41,8 +41,8 @@ func TestJobHistoryCommand_Fails(t *testing.T) { } func TestJobHistoryCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_init_test.go b/command/job_init_test.go index 35575c657..32e628a6a 100644 --- a/command/job_init_test.go +++ b/command/job_init_test.go @@ -6,17 +6,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestInitCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobInitCommand{} } func TestInitCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobInitCommand{Meta: Meta{Ui: ui}} @@ -79,7 +80,7 @@ func TestInitCommand_Run(t *testing.T) { } func TestInitCommand_defaultJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Ensure the job file is always written with spaces instead of tabs. Since // the default job file is embedded in the go file, it's easy for tabs to // slip in. @@ -90,7 +91,7 @@ func TestInitCommand_defaultJob(t *testing.T) { } func TestInitCommand_customFilename(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobInitCommand{Meta: Meta{Ui: ui}} filename := "custom.nomad" diff --git a/command/job_inspect_test.go b/command/job_inspect_test.go index 6f72b4254..ac3809778 100644 --- a/command/job_inspect_test.go +++ b/command/job_inspect_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestInspectCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobInspectCommand{} } func TestInspectCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -61,8 +62,8 @@ func TestInspectCommand_Fails(t *testing.T) { } func TestInspectCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_periodic_force_test.go b/command/job_periodic_force_test.go index 8197d0fed..b7ffc19e0 100644 --- a/command/job_periodic_force_test.go +++ b/command/job_periodic_force_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,12 +16,12 @@ import ( ) func TestJobPeriodicForceCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobPeriodicForceCommand{} } func TestJobPeriodicForceCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPeriodicForceCommand{Meta: Meta{Ui: ui}} @@ -38,7 +39,7 @@ func TestJobPeriodicForceCommand_Fails(t *testing.T) { } func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -76,7 +77,7 @@ func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { } func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -113,7 +114,7 @@ func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { } func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -155,7 +156,7 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { } func TestJobPeriodicForceCommand_SuccessfulPeriodicForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/job_plan_test.go b/command/job_plan_test.go index 317370519..ad8a28ba3 100644 --- a/command/job_plan_test.go +++ b/command/job_plan_test.go @@ -8,18 +8,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestPlanCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobRunCommand{} } func TestPlanCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server s := testutil.NewTestServer(t, nil) @@ -113,7 +114,7 @@ job "job1" { } func TestPlanCommand_From_STDIN(t *testing.T) { - t.Parallel() + ci.Parallel(t) stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatalf("err: %s", err) @@ -156,7 +157,7 @@ job "job1" { } func TestPlanCommand_From_URL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPlanCommand{ Meta: Meta{Ui: ui}, @@ -173,7 +174,7 @@ func TestPlanCommand_From_URL(t *testing.T) { } func TestPlanCommad_Preemptions(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPlanCommand{Meta: Meta{Ui: ui}} require := require.New(t) diff --git a/command/job_promote_test.go b/command/job_promote_test.go index 43630cd73..ec036e9a1 100644 --- a/command/job_promote_test.go +++ b/command/job_promote_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/mock" @@ -13,12 +14,12 @@ import ( ) func TestJobPromoteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobPromoteCommand{} } func TestJobPromoteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPromoteCommand{Meta: Meta{Ui: ui}} @@ -41,8 +42,8 @@ func TestJobPromoteCommand_Fails(t *testing.T) { } func TestJobPromoteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_revert_test.go b/command/job_revert_test.go index dcab1851c..f7378f2ad 100644 --- a/command/job_revert_test.go +++ b/command/job_revert_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" structs "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestJobRevertCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobDispatchCommand{} } func TestJobRevertCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRevertCommand{Meta: Meta{Ui: ui}} @@ -40,8 +41,8 @@ func TestJobRevertCommand_Fails(t *testing.T) { } func TestJobRevertCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_run_test.go b/command/job_run_test.go index 80609aed9..7caf907ca 100644 --- a/command/job_run_test.go +++ b/command/job_run_test.go @@ -6,17 +6,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestRunCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobRunCommand{} } func TestRunCommand_Output_Json(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRunCommand{Meta: Meta{Ui: ui}} @@ -52,7 +53,7 @@ job "job1" { } func TestRunCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server s := testutil.NewTestServer(t, nil) @@ -156,7 +157,7 @@ job "job1" { } func TestRunCommand_From_STDIN(t *testing.T) { - t.Parallel() + ci.Parallel(t) stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatalf("err: %s", err) @@ -199,7 +200,7 @@ job "job1" { } func TestRunCommand_From_URL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRunCommand{ Meta: Meta{Ui: ui}, diff --git a/command/job_scale_test.go b/command/job_scale_test.go index c61546594..d3726c55c 100644 --- a/command/job_scale_test.go +++ b/command/job_scale_test.go @@ -6,13 +6,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestJobScaleCommand_SingleGroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -53,7 +54,7 @@ func TestJobScaleCommand_SingleGroup(t *testing.T) { } func TestJobScaleCommand_MultiGroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/job_scaling_events_test.go b/command/job_scaling_events_test.go index e9954baab..530021280 100644 --- a/command/job_scaling_events_test.go +++ b/command/job_scaling_events_test.go @@ -5,13 +5,14 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestJobScalingEventsCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/job_status_test.go b/command/job_status_test.go index fbd169cdc..5c0aac2b2 100644 --- a/command/job_status_test.go +++ b/command/job_status_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -19,12 +20,12 @@ import ( ) func TestJobStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobStatusCommand{} } func TestJobStatusCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -225,7 +226,7 @@ func TestJobStatusCommand_Run(t *testing.T) { } func TestJobStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobStatusCommand{Meta: Meta{Ui: ui}} @@ -248,8 +249,8 @@ func TestJobStatusCommand_Fails(t *testing.T) { } func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -272,8 +273,8 @@ func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { } func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() config := func(c *agent.Config) { c.ACL.Enabled = true @@ -339,7 +340,7 @@ func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { } func TestJobStatusCommand_RescheduleEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_stop_test.go b/command/job_stop_test.go index 4bfca4eb4..a73c08309 100644 --- a/command/job_stop_test.go +++ b/command/job_stop_test.go @@ -4,6 +4,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -12,12 +13,12 @@ import ( ) func TestStopCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobStopCommand{} } func TestStopCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -52,8 +53,8 @@ func TestStopCommand_Fails(t *testing.T) { } func TestStopCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/job_validate_test.go b/command/job_validate_test.go index 6e51f33d2..12c3980ef 100644 --- a/command/job_validate_test.go +++ b/command/job_validate_test.go @@ -6,17 +6,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestValidateCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &JobValidateCommand{} } func TestValidateCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server s := testutil.NewTestServer(t, nil) defer s.Stop() @@ -56,7 +57,7 @@ job "job1" { } func TestValidateCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobValidateCommand{Meta: Meta{Ui: ui}} @@ -114,7 +115,7 @@ func TestValidateCommand_Fails(t *testing.T) { } func TestValidateCommand_From_STDIN(t *testing.T) { - t.Parallel() + ci.Parallel(t) stdinR, stdinW, err := os.Pipe() if err != nil { t.Fatalf("err: %s", err) @@ -160,7 +161,7 @@ job "job1" { } func TestValidateCommand_From_URL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobRunCommand{ Meta: Meta{Ui: ui}, diff --git a/command/license_get_test.go b/command/license_get_test.go index 22ca08b25..f8f4de253 100644 --- a/command/license_get_test.go +++ b/command/license_get_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -12,7 +13,7 @@ import ( var _ cli.Command = &LicenseGetCommand{} func TestCommand_LicenseGet_OSSErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -30,6 +31,8 @@ func TestCommand_LicenseGet_OSSErr(t *testing.T) { } func TestOutputLicenseReply(t *testing.T) { + ci.Parallel(t) + now := time.Now() lic := &api.LicenseReply{ License: &api.License{ diff --git a/command/meta_test.go b/command/meta_test.go index 7ebce9036..27724faaa 100644 --- a/command/meta_test.go +++ b/command/meta_test.go @@ -8,12 +8,13 @@ import ( "testing" "github.com/creack/pty" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestMeta_FlagSet(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { Flags FlagSetFlags Expected []string @@ -61,6 +62,8 @@ func TestMeta_FlagSet(t *testing.T) { } func TestMeta_Colorize(t *testing.T) { + ci.Parallel(t) + type testCaseSetupFn func(*testing.T, *Meta) cases := []struct { diff --git a/command/metrics_test.go b/command/metrics_test.go index a537f3997..4412f1e50 100644 --- a/command/metrics_test.go +++ b/command/metrics_test.go @@ -3,6 +3,7 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -10,7 +11,7 @@ import ( var _ cli.Command = &OperatorMetricsCommand{} func TestCommand_Metrics_Cases(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/monitor_test.go b/command/monitor_test.go index 297560f06..0d54b567d 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -6,13 +6,14 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestMonitor_Update_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() mon := newMonitor(ui, nil, fullId) @@ -66,7 +67,7 @@ func TestMonitor_Update_Eval(t *testing.T) { } func TestMonitor_Update_Allocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() mon := newMonitor(ui, nil, fullId) @@ -137,7 +138,7 @@ func TestMonitor_Update_Allocs(t *testing.T) { } func TestMonitor_Update_AllocModification(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() mon := newMonitor(ui, nil, fullId) @@ -173,7 +174,7 @@ func TestMonitor_Update_AllocModification(t *testing.T) { } func TestMonitor_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, _ := testServer(t, false, nil) defer srv.Shutdown() @@ -220,6 +221,8 @@ func TestMonitor_Monitor(t *testing.T) { } func TestMonitor_formatAllocMetric(t *testing.T) { + ci.Parallel(t) + tests := []struct { Name string Metrics *api.AllocationMetric diff --git a/command/namespace_apply_test.go b/command/namespace_apply_test.go index 95164b2ba..e27b93531 100644 --- a/command/namespace_apply_test.go +++ b/command/namespace_apply_test.go @@ -4,17 +4,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestNamespaceApplyCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceApplyCommand{} } func TestNamespaceApplyCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceApplyCommand{Meta: Meta{Ui: ui}} @@ -37,7 +38,7 @@ func TestNamespaceApplyCommand_Fails(t *testing.T) { } func TestNamespaceApplyCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) diff --git a/command/namespace_delete_test.go b/command/namespace_delete_test.go index 95fe7c8ba..b806b2aa7 100644 --- a/command/namespace_delete_test.go +++ b/command/namespace_delete_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestNamespaceDeleteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceDeleteCommand{} } func TestNamespaceDeleteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceDeleteCommand{Meta: Meta{Ui: ui}} @@ -39,7 +40,7 @@ func TestNamespaceDeleteCommand_Fails(t *testing.T) { } func TestNamespaceDeleteCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -66,8 +67,8 @@ func TestNamespaceDeleteCommand_Good(t *testing.T) { } func TestNamespaceDeleteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/namespace_inspect_test.go b/command/namespace_inspect_test.go index 35bfef085..e81744051 100644 --- a/command/namespace_inspect_test.go +++ b/command/namespace_inspect_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestNamespaceInspectCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceInspectCommand{} } func TestNamespaceInspectCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceInspectCommand{Meta: Meta{Ui: ui}} @@ -39,7 +40,7 @@ func TestNamespaceInspectCommand_Fails(t *testing.T) { } func TestNamespaceInspectCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -67,8 +68,8 @@ func TestNamespaceInspectCommand_Good(t *testing.T) { } func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -96,7 +97,7 @@ func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) { // command should pull the matching namespace rather than // displaying the multiple match error func TestNamespaceInspectCommand_NamespaceMatchesPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) diff --git a/command/namespace_list_test.go b/command/namespace_list_test.go index b8e662bdd..0cc2d67a4 100644 --- a/command/namespace_list_test.go +++ b/command/namespace_list_test.go @@ -4,13 +4,14 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) var _ cli.Command = (*NamespaceListCommand)(nil) func TestNamespaceListCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceListCommand{Meta: Meta{Ui: ui}} @@ -33,7 +34,7 @@ func TestNamespaceListCommand_Fails(t *testing.T) { } func TestNamespaceListCommand_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, _, url := testServer(t, true, nil) diff --git a/command/namespace_status_test.go b/command/namespace_status_test.go index fa70ed218..084ef3233 100644 --- a/command/namespace_status_test.go +++ b/command/namespace_status_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestNamespaceStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NamespaceStatusCommand{} } func TestNamespaceStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui}} @@ -39,7 +40,7 @@ func TestNamespaceStatusCommand_Fails(t *testing.T) { } func TestNamespaceStatusCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -68,7 +69,7 @@ func TestNamespaceStatusCommand_Good(t *testing.T) { } func TestNamespaceStatusCommand_Good_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -112,8 +113,8 @@ func TestNamespaceStatusCommand_Good_Quota(t *testing.T) { } func TestNamespaceStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -141,7 +142,7 @@ func TestNamespaceStatusCommand_AutocompleteArgs(t *testing.T) { // command should pull the matching namespace rather than // displaying the multiple match error func TestNamespaceStatusCommand_NamespaceMatchesPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) diff --git a/command/node_config_test.go b/command/node_config_test.go index c7dd3c57c..4583883a9 100644 --- a/command/node_config_test.go +++ b/command/node_config_test.go @@ -4,17 +4,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" ) func TestClientConfigCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeConfigCommand{} } func TestClientConfigCommand_UpdateServers(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, func(c *agent.Config) { c.Server.BootstrapExpect = 0 }) @@ -47,7 +48,7 @@ func TestClientConfigCommand_UpdateServers(t *testing.T) { } func TestClientConfigCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &NodeConfigCommand{Meta: Meta{Ui: ui}} diff --git a/command/node_drain_test.go b/command/node_drain_test.go index 22aa5c2ed..02b3f11e0 100644 --- a/command/node_drain_test.go +++ b/command/node_drain_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" @@ -18,12 +19,12 @@ import ( ) func TestNodeDrainCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeDrainCommand{} } func TestNodeDrainCommand_Detach(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_detach_node" @@ -96,7 +97,7 @@ func TestNodeDrainCommand_Detach(t *testing.T) { } func TestNodeDrainCommand_Monitor(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_monitor_node" @@ -256,7 +257,7 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { } func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_monitor_node2" @@ -298,7 +299,7 @@ func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) { } func TestNodeDrainCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -412,8 +413,8 @@ func TestNodeDrainCommand_Fails(t *testing.T) { } func TestNodeDrainCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/node_eligibility_test.go b/command/node_eligibility_test.go index 2e9120d86..bf2b0e546 100644 --- a/command/node_eligibility_test.go +++ b/command/node_eligibility_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -12,12 +13,12 @@ import ( ) func TestNodeEligibilityCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeEligibilityCommand{} } func TestNodeEligibilityCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -90,8 +91,8 @@ func TestNodeEligibilityCommand_Fails(t *testing.T) { } func TestNodeEligibilityCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/node_status_test.go b/command/node_status_test.go index 58dfb3e0f..eccd9f773 100644 --- a/command/node_status_test.go +++ b/command/node_status_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" @@ -15,12 +16,12 @@ import ( ) func TestNodeStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &NodeStatusCommand{} } func TestNodeStatusCommand_Self(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start in dev mode so we get a node registration srv, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "mynode" @@ -71,7 +72,7 @@ func TestNodeStatusCommand_Self(t *testing.T) { } func TestNodeStatusCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start in dev mode so we get a node registration srv, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "mynode" @@ -163,7 +164,7 @@ func TestNodeStatusCommand_Run(t *testing.T) { } func TestNodeStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) defer srv.Shutdown() @@ -216,8 +217,8 @@ func TestNodeStatusCommand_Fails(t *testing.T) { } func TestNodeStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -251,7 +252,7 @@ func TestNodeStatusCommand_AutocompleteArgs(t *testing.T) { } func TestNodeStatusCommand_FormatDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) node := &api.Node{} diff --git a/command/operator_api_test.go b/command/operator_api_test.go index 937db63aa..813534a8d 100644 --- a/command/operator_api_test.go +++ b/command/operator_api_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) @@ -15,6 +16,8 @@ import ( // TestOperatorAPICommand_Paths asserts that the op api command normalizes // various path formats to the proper full address. func TestOperatorAPICommand_Paths(t *testing.T) { + ci.Parallel(t) + hits := make(chan *url.URL, 1) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { hits <- r.URL @@ -74,6 +77,8 @@ func TestOperatorAPICommand_Paths(t *testing.T) { // TestOperatorAPICommand_Curl asserts that -dryrun outputs a valid curl // command. func TestOperatorAPICommand_Curl(t *testing.T) { + ci.Parallel(t) + buf := bytes.NewBuffer(nil) ui := &cli.BasicUi{ ErrorWriter: buf, diff --git a/command/operator_autopilot_get_test.go b/command/operator_autopilot_get_test.go index f5410b71d..07b510bd4 100644 --- a/command/operator_autopilot_get_test.go +++ b/command/operator_autopilot_get_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Autopilot_GetConfig_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftListCommand{} } func TestOperatorAutopilotGetConfigCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_autopilot_set_test.go b/command/operator_autopilot_set_test.go index 9977abc40..0bff571fa 100644 --- a/command/operator_autopilot_set_test.go +++ b/command/operator_autopilot_set_test.go @@ -5,17 +5,18 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestOperator_Autopilot_SetConfig_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftListCommand{} } func TestOperatorAutopilotSetConfigCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_autopilot_test.go b/command/operator_autopilot_test.go index 5bff69291..85fd99103 100644 --- a/command/operator_autopilot_test.go +++ b/command/operator_autopilot_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Autopilot_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorAutopilotCommand{} } diff --git a/command/operator_debug_test.go b/command/operator_debug_test.go index caeb3f814..943aa46ed 100644 --- a/command/operator_debug_test.go +++ b/command/operator_debug_test.go @@ -15,6 +15,7 @@ import ( consulapi "github.com/hashicorp/consul/api" consultest "github.com/hashicorp/consul/sdk/testutil" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" clienttest "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper" @@ -74,7 +75,7 @@ func newClientAgentConfigFunc(region string, nodeClass string, srvRPCAddr string } func TestDebug_NodeClass(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start test server and API client srv, _, url := testServer(t, false, nil) @@ -124,7 +125,7 @@ func TestDebug_NodeClass(t *testing.T) { } func TestDebug_ClientToServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start test server and API client srv, _, url := testServer(t, false, nil) @@ -173,6 +174,8 @@ func TestDebug_ClientToServer(t *testing.T) { } func TestDebug_MultiRegion(t *testing.T) { + ci.Parallel(t) + region1 := "region1" region2 := "region2" @@ -269,7 +272,7 @@ func TestDebug_MultiRegion(t *testing.T) { } func TestDebug_SingleServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) testutil.WaitForLeader(t, srv.Agent.RPC) @@ -303,7 +306,7 @@ func TestDebug_SingleServer(t *testing.T) { } func TestDebug_Failures(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, false, nil) testutil.WaitForLeader(t, srv.Agent.RPC) @@ -356,7 +359,7 @@ func TestDebug_Failures(t *testing.T) { } func TestDebug_Bad_CSIPlugin_Names(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start test server and API client srv, _, url := testServer(t, false, nil) @@ -407,7 +410,7 @@ func buildPathSlice(path string, files []string) []string { } func TestDebug_CapturedFiles(t *testing.T) { - // t.Parallel() + // ci.Parallel(t) srv, _, url := testServer(t, true, nil) testutil.WaitForLeader(t, srv.Agent.RPC) @@ -517,7 +520,7 @@ func TestDebug_CapturedFiles(t *testing.T) { } func TestDebug_ExistingOutput(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &OperatorDebugCommand{Meta: Meta{Ui: ui}} @@ -534,7 +537,7 @@ func TestDebug_ExistingOutput(t *testing.T) { } func TestDebug_Fail_Pprof(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Setup agent config with debug endpoints disabled agentConfFunc := func(c *agent.Config) { @@ -562,7 +565,7 @@ func TestDebug_Fail_Pprof(t *testing.T) { } func TestDebug_StringToSlice(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { input string @@ -581,7 +584,7 @@ func TestDebug_StringToSlice(t *testing.T) { } func TestDebug_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) // address calculation honors CONSUL_HTTP_SSL // ssl: true - Correct alignment @@ -623,7 +626,7 @@ func TestDebug_External(t *testing.T) { } func TestDebug_WriteBytes_Nil(t *testing.T) { - t.Parallel() + ci.Parallel(t) var testDir, testFile, testPath string var testBytes []byte @@ -646,7 +649,7 @@ func TestDebug_WriteBytes_Nil(t *testing.T) { } func TestDebug_WriteBytes_PathEscapesSandbox(t *testing.T) { - t.Parallel() + ci.Parallel(t) var testDir, testFile string var testBytes []byte @@ -669,7 +672,7 @@ func TestDebug_WriteBytes_PathEscapesSandbox(t *testing.T) { } func TestDebug_CollectConsul(t *testing.T) { - t.Parallel() + ci.Parallel(t) if testing.Short() { t.Skip("-short set; skipping") } @@ -724,7 +727,7 @@ func TestDebug_CollectConsul(t *testing.T) { } func TestDebug_CollectVault(t *testing.T) { - t.Parallel() + ci.Parallel(t) if testing.Short() { t.Skip("-short set; skipping") } @@ -768,6 +771,8 @@ func TestDebug_CollectVault(t *testing.T) { // TestDebug_RedirectError asserts that redirect errors are detected so they // can be translated into more understandable output. func TestDebug_RedirectError(t *testing.T) { + ci.Parallel(t) + // Create a test server that always returns the error many versions of // Nomad return instead of a 404 for unknown paths. // 1st request redirects to /ui/ @@ -798,6 +803,8 @@ func TestDebug_RedirectError(t *testing.T) { // complete a debug run have their query options configured with the // -stale flag func TestDebug_StaleLeadership(t *testing.T) { + ci.Parallel(t) + srv, _, url := testServerWithoutLeader(t, false, nil) addrServer := srv.HTTPAddr() @@ -854,6 +861,8 @@ type testOutput struct { } func TestDebug_EventStream_TopicsFromString(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string topicList string @@ -914,6 +923,8 @@ func TestDebug_EventStream_TopicsFromString(t *testing.T) { } func TestDebug_EventStream(t *testing.T) { + ci.Parallel(t) + // TODO dmay: specify output directory to allow inspection of eventstream.json // TODO dmay: require specific events in the eventstream.json file(s) // TODO dmay: scenario where no events are expected, verify "No events captured" diff --git a/command/operator_keygen_test.go b/command/operator_keygen_test.go index 1f12eb3c4..d003c7402 100644 --- a/command/operator_keygen_test.go +++ b/command/operator_keygen_test.go @@ -4,11 +4,13 @@ import ( "encoding/base64" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestKeygenCommand(t *testing.T) { - t.Parallel() + ci.Parallel(t) + ui := cli.NewMockUi() c := &OperatorKeygenCommand{Meta: Meta{Ui: ui}} code := c.Run(nil) diff --git a/command/operator_raft_list_test.go b/command/operator_raft_list_test.go index df283e88e..8c615bd3d 100644 --- a/command/operator_raft_list_test.go +++ b/command/operator_raft_list_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Raft_ListPeers_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftListCommand{} } func TestOperator_Raft_ListPeers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_raft_remove_test.go b/command/operator_raft_remove_test.go index 886f07032..eea792459 100644 --- a/command/operator_raft_remove_test.go +++ b/command/operator_raft_remove_test.go @@ -3,17 +3,18 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestOperator_Raft_RemovePeers_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftRemoveCommand{} } func TestOperator_Raft_RemovePeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -41,7 +42,7 @@ func TestOperator_Raft_RemovePeer(t *testing.T) { } func TestOperator_Raft_RemovePeerAddress(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -60,7 +61,7 @@ func TestOperator_Raft_RemovePeerAddress(t *testing.T) { } func TestOperator_Raft_RemovePeerID(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() diff --git a/command/operator_raft_test.go b/command/operator_raft_test.go index 73934acff..6ee6d2d9a 100644 --- a/command/operator_raft_test.go +++ b/command/operator_raft_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Raft_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorRaftCommand{} } diff --git a/command/operator_snapshot_inspect_test.go b/command/operator_snapshot_inspect_test.go index 16a408942..a0203cbf1 100644 --- a/command/operator_snapshot_inspect_test.go +++ b/command/operator_snapshot_inspect_test.go @@ -7,13 +7,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestOperatorSnapshotInspect_Works(t *testing.T) { - t.Parallel() + ci.Parallel(t) snapPath := generateSnapshotFile(t, nil) @@ -33,10 +34,10 @@ func TestOperatorSnapshotInspect_Works(t *testing.T) { } { require.Contains(t, output, key) } - } + func TestOperatorSnapshotInspect_HandlesFailure(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad-clitests-") require.NoError(t, err) @@ -65,11 +66,9 @@ func TestOperatorSnapshotInspect_HandlesFailure(t *testing.T) { require.NotZero(t, code) require.Contains(t, ui.ErrorWriter.String(), "Error verifying snapshot") }) - } func generateSnapshotFile(t *testing.T, prepare func(srv *agent.TestAgent, client *api.Client, url string)) string { - tmpDir, err := ioutil.TempDir("", "nomad-tempdir") require.NoError(t, err) diff --git a/command/operator_snapshot_restore_test.go b/command/operator_snapshot_restore_test.go index be88fee6f..af1143d5d 100644 --- a/command/operator_snapshot_restore_test.go +++ b/command/operator_snapshot_restore_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -15,7 +16,7 @@ import ( ) func TestOperatorSnapshotRestore_Works(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad-tempdir") require.NoError(t, err) @@ -77,7 +78,7 @@ job "snapshot-test-job" { } func TestOperatorSnapshotRestore_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &OperatorSnapshotRestoreCommand{Meta: Meta{Ui: ui}} diff --git a/command/operator_snapshot_save_test.go b/command/operator_snapshot_save_test.go index 2e712db0c..fd0c9a086 100644 --- a/command/operator_snapshot_save_test.go +++ b/command/operator_snapshot_save_test.go @@ -6,6 +6,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/snapshot" "github.com/mitchellh/cli" @@ -13,7 +14,7 @@ import ( ) func TestOperatorSnapshotSave_Works(t *testing.T) { - t.Parallel() + ci.Parallel(t) tmpDir, err := ioutil.TempDir("", "nomad-tempdir") require.NoError(t, err) @@ -51,7 +52,7 @@ func TestOperatorSnapshotSave_Works(t *testing.T) { } func TestOperatorSnapshotSave_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &OperatorSnapshotSaveCommand{Meta: Meta{Ui: ui}} diff --git a/command/operator_test.go b/command/operator_test.go index 7364a7cfa..d1d9d92f4 100644 --- a/command/operator_test.go +++ b/command/operator_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestOperator_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &OperatorCommand{} } diff --git a/command/plugin_status_test.go b/command/plugin_status_test.go index 15f037c24..d33317953 100644 --- a/command/plugin_status_test.go +++ b/command/plugin_status_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/state" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -11,12 +12,12 @@ import ( ) func TestPluginStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &PluginStatusCommand{} } func TestPluginStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &PluginStatusCommand{Meta: Meta{Ui: ui}} @@ -38,7 +39,7 @@ func TestPluginStatusCommand_Fails(t *testing.T) { } func TestPluginStatusCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/quota_apply_test.go b/command/quota_apply_test.go index 57b1b195c..487111d04 100644 --- a/command/quota_apply_test.go +++ b/command/quota_apply_test.go @@ -4,16 +4,17 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestQuotaApplyCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaApplyCommand{} } func TestQuotaApplyCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaApplyCommand{Meta: Meta{Ui: ui}} diff --git a/command/quota_delete_test.go b/command/quota_delete_test.go index 351332c62..f8afb7079 100644 --- a/command/quota_delete_test.go +++ b/command/quota_delete_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/mitchellh/cli" "github.com/posener/complete" @@ -15,12 +16,12 @@ import ( ) func TestQuotaDeleteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaDeleteCommand{} } func TestQuotaDeleteCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaDeleteCommand{Meta: Meta{Ui: ui}} @@ -43,7 +44,7 @@ func TestQuotaDeleteCommand_Fails(t *testing.T) { } func TestQuotaDeleteCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -68,8 +69,8 @@ func TestQuotaDeleteCommand_Good(t *testing.T) { } func TestQuotaDeleteCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/quota_init_test.go b/command/quota_init_test.go index 889d66a87..936e28988 100644 --- a/command/quota_init_test.go +++ b/command/quota_init_test.go @@ -5,17 +5,18 @@ import ( "os" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestQuotaInitCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaInitCommand{} } func TestQuotaInitCommand_Run_HCL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}} @@ -64,7 +65,7 @@ func TestQuotaInitCommand_Run_HCL(t *testing.T) { } func TestQuotaInitCommand_Run_JSON(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}} diff --git a/command/quota_inspect_test.go b/command/quota_inspect_test.go index 6fee89c60..73c941e07 100644 --- a/command/quota_inspect_test.go +++ b/command/quota_inspect_test.go @@ -7,18 +7,19 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestQuotaInspectCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaInspectCommand{} } func TestQuotaInspectCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaInspectCommand{Meta: Meta{Ui: ui}} @@ -41,7 +42,7 @@ func TestQuotaInspectCommand_Fails(t *testing.T) { } func TestQuotaInspectCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -67,8 +68,8 @@ func TestQuotaInspectCommand_Good(t *testing.T) { } func TestQuotaInspectCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/quota_list_test.go b/command/quota_list_test.go index d50935e7c..90785e3e8 100644 --- a/command/quota_list_test.go +++ b/command/quota_list_test.go @@ -7,17 +7,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" ) func TestQuotaListCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaListCommand{} } func TestQuotaListCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaListCommand{Meta: Meta{Ui: ui}} @@ -40,7 +41,7 @@ func TestQuotaListCommand_Fails(t *testing.T) { } func TestQuotaListCommand_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) // Create a server diff --git a/command/quota_status_test.go b/command/quota_status_test.go index 53689320f..b580414bf 100644 --- a/command/quota_status_test.go +++ b/command/quota_status_test.go @@ -7,18 +7,19 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/assert" ) func TestQuotaStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &QuotaStatusCommand{} } func TestQuotaStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &QuotaStatusCommand{Meta: Meta{Ui: ui}} @@ -41,7 +42,7 @@ func TestQuotaStatusCommand_Fails(t *testing.T) { } func TestQuotaStatusCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -73,8 +74,8 @@ func TestQuotaStatusCommand_Good(t *testing.T) { } func TestQuotaStatusCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/command/recommendation_apply_test.go b/command/recommendation_apply_test.go index e3538b6b2..1ed797d7b 100644 --- a/command/recommendation_apply_test.go +++ b/command/recommendation_apply_test.go @@ -4,16 +4,16 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/testutil" ) func TestRecommendationApplyCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -92,6 +92,8 @@ func TestRecommendationApplyCommand_Run(t *testing.T) { } func TestRecommendationApplyCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/recommendation_dismiss_test.go b/command/recommendation_dismiss_test.go index 71553b7ae..ebe095369 100644 --- a/command/recommendation_dismiss_test.go +++ b/command/recommendation_dismiss_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/stretchr/testify/require" @@ -14,8 +15,8 @@ import ( ) func TestRecommendationDismissCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -109,8 +110,8 @@ func TestRecommendationDismissCommand_AutocompleteArgs(t *testing.T) { } func testRecommendationAutocompleteCommand(t *testing.T, client *api.Client, srv *agent.TestAgent, cmd *RecommendationAutocompleteCommand) { + ci.Parallel(t) require := require.New(t) - t.Parallel() // Register a test job to write a recommendation against. testJob := testJob("recommendation_autocomplete") diff --git a/command/recommendation_info_test.go b/command/recommendation_info_test.go index 1e1a31864..2529b5986 100644 --- a/command/recommendation_info_test.go +++ b/command/recommendation_info_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" @@ -12,8 +13,8 @@ import ( ) func TestRecommendationInfoCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -88,6 +89,8 @@ func TestRecommendationInfoCommand_Run(t *testing.T) { } func TestRecommendationInfoCommand_AutocompleteArgs(t *testing.T) { + ci.Parallel(t) + srv, client, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/recommendation_list_test.go b/command/recommendation_list_test.go index c7234bd7c..aef63191b 100644 --- a/command/recommendation_list_test.go +++ b/command/recommendation_list_test.go @@ -4,16 +4,16 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api" ) func TestRecommendationListCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -74,6 +74,8 @@ func TestRecommendationListCommand_Run(t *testing.T) { } func TestRecommendationListCommand_Sort(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputRecommendationList []*api.Recommendation expectedOutputList []*api.Recommendation diff --git a/command/scaling_policy_info_test.go b/command/scaling_policy_info_test.go index 29c93784c..962460bb7 100644 --- a/command/scaling_policy_info_test.go +++ b/command/scaling_policy_info_test.go @@ -6,13 +6,14 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) func TestScalingPolicyInfoCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { diff --git a/command/scaling_policy_list_test.go b/command/scaling_policy_list_test.go index 860646559..20439379d 100644 --- a/command/scaling_policy_list_test.go +++ b/command/scaling_policy_list_test.go @@ -3,16 +3,16 @@ package command import ( "testing" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/helper" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/helper" ) func TestScalingPolicyListCommand_Run(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() srv, client, url := testServer(t, false, nil) defer srv.Shutdown() diff --git a/command/scaling_policy_test.go b/command/scaling_policy_test.go index 8b7c62935..4aa4b35b2 100644 --- a/command/scaling_policy_test.go +++ b/command/scaling_policy_test.go @@ -3,10 +3,13 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func Test_formatScalingPolicyTarget(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputMap map[string]string expectedOutput string diff --git a/command/sentinel_apply_test.go b/command/sentinel_apply_test.go index 65f979cc1..29169c356 100644 --- a/command/sentinel_apply_test.go +++ b/command/sentinel_apply_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelApplyCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelApplyCommand{} } diff --git a/command/sentinel_delete_test.go b/command/sentinel_delete_test.go index 313e438aa..88682930f 100644 --- a/command/sentinel_delete_test.go +++ b/command/sentinel_delete_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelDeleteCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelDeleteCommand{} } diff --git a/command/sentinel_list_test.go b/command/sentinel_list_test.go index 98d1a307b..bb109cc83 100644 --- a/command/sentinel_list_test.go +++ b/command/sentinel_list_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelListCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelListCommand{} } diff --git a/command/sentinel_read_test.go b/command/sentinel_read_test.go index 8abb9d0c8..5e874cce9 100644 --- a/command/sentinel_read_test.go +++ b/command/sentinel_read_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSentinelReadCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SentinelReadCommand{} } diff --git a/command/server_force_leave_test.go b/command/server_force_leave_test.go index 9f449b900..4c01769d7 100644 --- a/command/server_force_leave_test.go +++ b/command/server_force_leave_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestServerForceLeaveCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &ServerForceLeaveCommand{} } diff --git a/command/server_join_test.go b/command/server_join_test.go index f3ec540be..bf8c5ca05 100644 --- a/command/server_join_test.go +++ b/command/server_join_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestServerJoinCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &ServerJoinCommand{} } diff --git a/command/server_members_test.go b/command/server_members_test.go index 523b47932..bf2e423d7 100644 --- a/command/server_members_test.go +++ b/command/server_members_test.go @@ -5,17 +5,18 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" ) func TestServerMembersCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &ServerMembersCommand{} } func TestServerMembersCommand_Run(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, client, url := testServer(t, false, nil) defer srv.Shutdown() @@ -47,7 +48,7 @@ func TestServerMembersCommand_Run(t *testing.T) { } func TestMembersCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &ServerMembersCommand{Meta: Meta{Ui: ui}} @@ -72,7 +73,7 @@ func TestMembersCommand_Fails(t *testing.T) { // Tests that a single server region that left should still // not return an error and list other members in other regions func TestServerMembersCommand_MultiRegion_Leave(t *testing.T) { - t.Parallel() + ci.Parallel(t) config1 := func(c *agent.Config) { c.Region = "r1" diff --git a/command/status_test.go b/command/status_test.go index 4b3ec1a66..0e9e635f7 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -5,6 +5,7 @@ import ( "regexp" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -16,8 +17,8 @@ import ( ) func TestStatusCommand_Run_JobStatus(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -42,8 +43,8 @@ func TestStatusCommand_Run_JobStatus(t *testing.T) { } func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -72,7 +73,7 @@ func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { func TestStatusCommand_Run_EvalStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -98,7 +99,7 @@ func TestStatusCommand_Run_EvalStatus(t *testing.T) { func TestStatusCommand_Run_NodeStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) // Start in dev mode so we get a node registration srv, client, url := testServer(t, true, func(c *agent.Config) { @@ -138,7 +139,7 @@ func TestStatusCommand_Run_NodeStatus(t *testing.T) { func TestStatusCommand_Run_AllocStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -163,7 +164,7 @@ func TestStatusCommand_Run_AllocStatus(t *testing.T) { func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -189,7 +190,7 @@ func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { func TestStatusCommand_Run_NoPrefix(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -215,7 +216,7 @@ func TestStatusCommand_Run_NoPrefix(t *testing.T) { func TestStatusCommand_AutocompleteArgs(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -237,7 +238,7 @@ func TestStatusCommand_AutocompleteArgs(t *testing.T) { } func TestStatusCommand_Run_HostNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() diff --git a/command/system_gc_test.go b/command/system_gc_test.go index 4ced96815..10e636e91 100644 --- a/command/system_gc_test.go +++ b/command/system_gc_test.go @@ -3,16 +3,17 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemGCCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemGCCommand{} } func TestSystemGCCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, _, url := testServer(t, true, nil) diff --git a/command/system_reconcile_summaries_test.go b/command/system_reconcile_summaries_test.go index ae50c299a..ca10e734b 100644 --- a/command/system_reconcile_summaries_test.go +++ b/command/system_reconcile_summaries_test.go @@ -3,16 +3,17 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemReconcileSummariesCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemReconcileSummariesCommand{} } func TestSystemReconcileSummariesCommand_Good(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a server srv, _, url := testServer(t, true, nil) diff --git a/command/system_reconcile_test.go b/command/system_reconcile_test.go index defb5cdc8..f1445e793 100644 --- a/command/system_reconcile_test.go +++ b/command/system_reconcile_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemReconcileCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemCommand{} } diff --git a/command/system_test.go b/command/system_test.go index fa4ca3ceb..44ae3e370 100644 --- a/command/system_test.go +++ b/command/system_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestSystemCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &SystemCommand{} } diff --git a/command/ui_test.go b/command/ui_test.go index aac4a1f69..8b0d049ae 100644 --- a/command/ui_test.go +++ b/command/ui_test.go @@ -5,12 +5,13 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/stretchr/testify/require" ) func TestCommand_Ui(t *testing.T) { - t.Parallel() + ci.Parallel(t) type testCaseSetupFn func(*testing.T) diff --git a/command/version_test.go b/command/version_test.go index 07af3a006..2a39e3bc5 100644 --- a/command/version_test.go +++ b/command/version_test.go @@ -3,10 +3,11 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" ) func TestVersionCommand_implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &VersionCommand{} } diff --git a/command/volume_register_test.go b/command/volume_register_test.go index b65b923cd..29bf025e5 100644 --- a/command/volume_register_test.go +++ b/command/volume_register_test.go @@ -5,11 +5,12 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestVolumeDispatchParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { hcl string @@ -43,7 +44,7 @@ rando = "bar" } func TestCSIVolumeDecode(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string diff --git a/command/volume_status_test.go b/command/volume_status_test.go index 313d57502..7eac9f0e0 100644 --- a/command/volume_status_test.go +++ b/command/volume_status_test.go @@ -3,6 +3,7 @@ package command import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" @@ -11,12 +12,12 @@ import ( ) func TestCSIVolumeStatusCommand_Implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var _ cli.Command = &VolumeStatusCommand{} } func TestCSIVolumeStatusCommand_Fails(t *testing.T) { - t.Parallel() + ci.Parallel(t) ui := cli.NewMockUi() cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui}} @@ -30,7 +31,7 @@ func TestCSIVolumeStatusCommand_Fails(t *testing.T) { } func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() diff --git a/contributing/testing.md b/contributing/testing.md new file mode 100644 index 000000000..178273bf0 --- /dev/null +++ b/contributing/testing.md @@ -0,0 +1,24 @@ +# Writing Tests + +The Nomad repository strives to maintain comprehensive unit test coverage. Any new +features, bug fixes, or refactoring should include additional or updated test cases +demonstrating correct functionality. + +Each unit test should meet a few criteria: + +- Use testify + - Prefer using require.* functions + +- Undo any changes to the environment + - Set environment variables must be unset + - Scratch files/dirs must be removed (use t.TempDir) + - Consumed ports must be freed (e.g. TestServer.Cleanup, freeport.Return) + +- Able to run in parallel + - All package level Test* functions should start with ci.Parallel + - Always use dynamic scratch dirs, files + - Always get ports from helpers (TestServer, TestClient, TestAgent, freeport.Get) + +- Log control + - Logging must go through the testing.T (use helper/testlog.HCLogger) + - Avoid excessive logging in test cases - prefer failure messages \ No newline at end of file diff --git a/drivers/docker/config_test.go b/drivers/docker/config_test.go index b9deb9213..df237a440 100644 --- a/drivers/docker/config_test.go +++ b/drivers/docker/config_test.go @@ -3,12 +3,15 @@ package docker import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" ) func TestConfig_ParseHCL(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string @@ -46,6 +49,8 @@ func TestConfig_ParseHCL(t *testing.T) { } func TestConfig_ParseJSON(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input string @@ -113,6 +118,8 @@ func TestConfig_ParseJSON(t *testing.T) { } func TestConfig_PortMap_Deserialization(t *testing.T) { + ci.Parallel(t) + parser := hclutils.NewConfigParser(taskConfigSpec) expectedMap := map[string]int{ @@ -185,6 +192,8 @@ config { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { image = "redis:3.2" @@ -493,6 +502,8 @@ config { // TestConfig_DriverConfig_GC asserts that gc is parsed // and populated with defaults as expected func TestConfig_DriverConfig_GC(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -598,6 +609,8 @@ func TestConfig_DriverConfig_GC(t *testing.T) { } func TestConfig_InternalCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -632,6 +645,8 @@ func TestConfig_InternalCapabilities(t *testing.T) { } func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -659,6 +674,8 @@ func TestConfig_DriverConfig_InfraImagePullTimeout(t *testing.T) { } func TestConfig_DriverConfig_PullActivityTimeout(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string @@ -686,6 +703,8 @@ func TestConfig_DriverConfig_PullActivityTimeout(t *testing.T) { } func TestConfig_DriverConfig_AllowRuntimes(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string config string diff --git a/drivers/docker/coordinator_test.go b/drivers/docker/coordinator_test.go index eddda78bd..755c6b99e 100644 --- a/drivers/docker/coordinator_test.go +++ b/drivers/docker/coordinator_test.go @@ -8,6 +8,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/testutil" @@ -55,7 +56,7 @@ func (m *mockImageClient) RemoveImage(id string) error { } func TestDockerCoordinator_ConcurrentPulls(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -107,7 +108,7 @@ func TestDockerCoordinator_ConcurrentPulls(t *testing.T) { } func TestDockerCoordinator_Pull_Remove(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -180,7 +181,7 @@ func TestDockerCoordinator_Pull_Remove(t *testing.T) { } func TestDockerCoordinator_Remove_Cancel(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -229,7 +230,7 @@ func TestDockerCoordinator_Remove_Cancel(t *testing.T) { } func TestDockerCoordinator_No_Cleanup(t *testing.T) { - t.Parallel() + ci.Parallel(t) image := "foo" imageID := uuid.Generate() mapping := map[string]string{imageID: image} @@ -265,6 +266,7 @@ func TestDockerCoordinator_No_Cleanup(t *testing.T) { } func TestDockerCoordinator_Cleanup_HonorsCtx(t *testing.T) { + ci.Parallel(t) image1ID := uuid.Generate() image2ID := uuid.Generate() diff --git a/drivers/docker/docklog/docker_logger_test.go b/drivers/docker/docklog/docker_logger_test.go index 0397e4e98..611877f3d 100644 --- a/drivers/docker/docklog/docker_logger_test.go +++ b/drivers/docker/docklog/docker_logger_test.go @@ -9,6 +9,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" ctu "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" @@ -34,9 +35,9 @@ func testContainerDetails() (image string, imageName string, imageTag string) { } func TestDockerLogger_Success(t *testing.T) { + ci.Parallel(t) ctu.DockerCompatible(t) - t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() @@ -115,9 +116,9 @@ func TestDockerLogger_Success(t *testing.T) { } func TestDockerLogger_Success_TTY(t *testing.T) { + ci.Parallel(t) ctu.DockerCompatible(t) - t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() @@ -212,9 +213,9 @@ func echoToContainer(t *testing.T, client *docker.Client, id string, line string } func TestDockerLogger_LoggingNotSupported(t *testing.T) { + ci.Parallel(t) ctu.DockerCompatible(t) - t.Parallel() require := require.New(t) containerImage, containerImageName, containerImageTag := testContainerDetails() @@ -303,6 +304,8 @@ func (*noopCloser) Close() error { } func TestNextBackoff(t *testing.T) { + ci.Parallel(t) + cases := []struct { currentBackoff float64 min float64 @@ -325,6 +328,8 @@ func TestNextBackoff(t *testing.T) { } func TestIsLoggingTerminalError(t *testing.T) { + ci.Parallel(t) + terminalErrs := []error{ errors.New("docker returned: configured logging driver does not support reading"), &docker.Error{ diff --git a/drivers/docker/driver_linux_test.go b/drivers/docker/driver_linux_test.go index ba79af839..006517b28 100644 --- a/drivers/docker/driver_linux_test.go +++ b/drivers/docker/driver_linux_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/freeport" @@ -17,6 +18,8 @@ import ( ) func TestDockerDriver_authFromHelper(t *testing.T) { + ci.Parallel(t) + dir, err := ioutil.TempDir("", "test-docker-driver_authfromhelper") require.NoError(t, err) defer os.RemoveAll(dir) @@ -47,9 +50,7 @@ func TestDockerDriver_authFromHelper(t *testing.T) { } func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) @@ -72,9 +73,8 @@ func TestDockerDriver_PluginConfig_PidsLimit(t *testing.T) { } func TestDockerDriver_PidsLimit(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) + testutil.DockerCompatible(t) require := require.New(t) diff --git a/drivers/docker/driver_test.go b/drivers/docker/driver_test.go index a149da1b9..26cbf9916 100644 --- a/drivers/docker/driver_test.go +++ b/drivers/docker/driver_test.go @@ -17,6 +17,7 @@ import ( docker "github.com/fsouza/go-dockerclient" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/freeport" @@ -228,9 +229,7 @@ func newTestDockerClient(t *testing.T) *docker.Client { // If you want to checkout/revert those tests, please check commit: 41715b1860778aa80513391bd64abd721d768ab0 func TestDockerDriver_Start_Wait(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", busyboxLongRunningCmd) @@ -264,9 +263,7 @@ func TestDockerDriver_Start_Wait(t *testing.T) { } func TestDockerDriver_Start_WaitFinish(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"echo", "hello"}) @@ -307,9 +304,7 @@ func TestDockerDriver_Start_WaitFinish(t *testing.T) { // // See https://github.com/hashicorp/nomad/issues/3419 func TestDockerDriver_Start_StoppedContainer(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"sleep", "9001"}) @@ -368,9 +363,7 @@ func TestDockerDriver_Start_StoppedContainer(t *testing.T) { } func TestDockerDriver_Start_LoadImage(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"}) @@ -419,9 +412,7 @@ func TestDockerDriver_Start_LoadImage(t *testing.T) { // Tests that starting a task without an image fails func TestDockerDriver_Start_NoImage(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := TaskConfig{ @@ -448,9 +439,7 @@ func TestDockerDriver_Start_NoImage(t *testing.T) { } func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := TaskConfig{ @@ -486,9 +475,7 @@ func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { } func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) // This test requires that the alloc dir be mounted into docker as a volume. // Because this cannot happen when docker is run remotely, e.g. when running // docker in a VM, we skip this when we detect Docker is being run remotely. @@ -549,9 +536,7 @@ func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { } func TestDockerDriver_Start_Kill_Wait(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", busyboxLongRunningCmd) @@ -597,9 +582,7 @@ func TestDockerDriver_Start_Kill_Wait(t *testing.T) { } func TestDockerDriver_Start_KillTimeout(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { @@ -649,12 +632,10 @@ func TestDockerDriver_Start_KillTimeout(t *testing.T) { } func TestDockerDriver_StartN(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows Docker does not support SIGINT") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) require := require.New(t) @@ -705,12 +686,10 @@ func TestDockerDriver_StartN(t *testing.T) { } func TestDockerDriver_StartNVersions(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Skipped on windows, we don't have image variants available") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) require := require.New(t) @@ -776,9 +755,7 @@ func TestDockerDriver_StartNVersions(t *testing.T) { } func TestDockerDriver_Labels(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -807,9 +784,7 @@ func TestDockerDriver_Labels(t *testing.T) { } func TestDockerDriver_ExtraLabels(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -844,9 +819,7 @@ func TestDockerDriver_ExtraLabels(t *testing.T) { } func TestDockerDriver_LoggingConfiguration(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -873,9 +846,7 @@ func TestDockerDriver_LoggingConfiguration(t *testing.T) { } func TestDockerDriver_ForcePull(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -896,13 +867,10 @@ func TestDockerDriver_ForcePull(t *testing.T) { } func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("TODO: Skipped digest test on Windows") } - - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -925,12 +893,10 @@ func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { } func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support seccomp") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -951,13 +917,10 @@ func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { } func TestDockerDriver_SecurityOptFromFile(t *testing.T) { - + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support seccomp") } - if !tu.IsCI() { - t.Parallel() - } testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -976,9 +939,7 @@ func TestDockerDriver_SecurityOptFromFile(t *testing.T) { } func TestDockerDriver_Runtime(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -999,7 +960,7 @@ func TestDockerDriver_Runtime(t *testing.T) { } func TestDockerDriver_CreateContainerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1023,7 +984,7 @@ func TestDockerDriver_CreateContainerConfig(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1048,7 +1009,7 @@ func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { - t.Parallel() + ci.Parallel(t) dh := dockerDriverHarness(t, nil) driver := dh.Impl().(*Driver) @@ -1087,7 +1048,7 @@ func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1105,7 +1066,7 @@ func TestDockerDriver_CreateContainerConfig_User(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1139,7 +1100,7 @@ func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -1218,7 +1179,7 @@ func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1296,9 +1257,7 @@ func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) { } func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testCases := []struct { description string gpuRuntimeSet bool @@ -1368,9 +1327,7 @@ func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { } func TestDockerDriver_Capabilities(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Capabilities not supported on windows") @@ -1480,9 +1437,7 @@ func TestDockerDriver_Capabilities(t *testing.T) { } func TestDockerDriver_DNS(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) testutil.ExecCompatible(t) @@ -1527,9 +1482,7 @@ func TestDockerDriver_DNS(t *testing.T) { } func TestDockerDriver_Init(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support init.") @@ -1552,9 +1505,7 @@ func TestDockerDriver_Init(t *testing.T) { } func TestDockerDriver_CPUSetCPUs(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support CPUSetCPUs.") @@ -1599,9 +1550,7 @@ func TestDockerDriver_CPUSetCPUs(t *testing.T) { } func TestDockerDriver_MemoryHardLimit(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows does not support MemoryReservation") @@ -1625,9 +1574,7 @@ func TestDockerDriver_MemoryHardLimit(t *testing.T) { } func TestDockerDriver_MACAddress(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Windows docker does not support setting MacAddress") @@ -1649,9 +1596,7 @@ func TestDockerDriver_MACAddress(t *testing.T) { } func TestDockerWorkDir(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1678,9 +1623,7 @@ func inSlice(needle string, haystack []string) bool { } func TestDockerDriver_PortsNoMap(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, _, ports := dockerTask(t) @@ -1722,9 +1665,7 @@ func TestDockerDriver_PortsNoMap(t *testing.T) { } func TestDockerDriver_PortsMapping(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1774,7 +1715,7 @@ func TestDockerDriver_PortsMapping(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1817,7 +1758,7 @@ func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) { } func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, cfg, ports := dockerTask(t) defer freeport.Return(ports) @@ -1853,9 +1794,7 @@ func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { } func TestDockerDriver_CleanupContainer(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1893,6 +1832,7 @@ func TestDockerDriver_CleanupContainer(t *testing.T) { } func TestDockerDriver_EnableImageGC(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -1959,6 +1899,7 @@ func TestDockerDriver_EnableImageGC(t *testing.T) { } func TestDockerDriver_DisableImageGC(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2021,6 +1962,7 @@ func TestDockerDriver_DisableImageGC(t *testing.T) { } func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2087,9 +2029,7 @@ func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { } func TestDockerDriver_Stats(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2162,9 +2102,7 @@ func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath strin } func TestDockerDriver_VolumesDisabled(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) cfg := map[string]interface{}{ @@ -2234,9 +2172,7 @@ func TestDockerDriver_VolumesDisabled(t *testing.T) { } func TestDockerDriver_VolumesEnabled(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) cfg := map[string]interface{}{ @@ -2279,9 +2215,7 @@ func TestDockerDriver_VolumesEnabled(t *testing.T) { } func TestDockerDriver_Mounts(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) goodMount := DockerMount{ @@ -2351,9 +2285,7 @@ func TestDockerDriver_Mounts(t *testing.T) { } func TestDockerDriver_AuthConfiguration(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) path := "./test-resources/docker/auth.json" @@ -2402,9 +2334,7 @@ func TestDockerDriver_AuthConfiguration(t *testing.T) { } func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) cases := []struct { Auth DockerAuth @@ -2456,9 +2386,7 @@ func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { } func TestDockerDriver_OOMKilled(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { @@ -2507,9 +2435,7 @@ func TestDockerDriver_OOMKilled(t *testing.T) { } func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) brokenConfigs := []DockerDevice{ @@ -2547,9 +2473,7 @@ func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { } func TestDockerDriver_Device_Success(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS != "linux" { @@ -2587,9 +2511,7 @@ func TestDockerDriver_Device_Success(t *testing.T) { } func TestDockerDriver_Entrypoint(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) entrypoint := []string{"sh", "-c"} @@ -2614,9 +2536,7 @@ func TestDockerDriver_Entrypoint(t *testing.T) { } func TestDockerDriver_ReadonlyRootfs(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) if runtime.GOOS == "windows" { @@ -2658,9 +2578,7 @@ func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) erro // TestDockerDriver_VolumeError asserts volume related errors when creating a // container are recoverable. func TestDockerDriver_VolumeError(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) // setup _, cfg, ports := dockerTask(t) @@ -2673,9 +2591,7 @@ func TestDockerDriver_VolumeError(t *testing.T) { } func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) expectedPrefix := "2001:db8:1::242:ac11" @@ -2725,6 +2641,8 @@ func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { } func TestParseDockerImage(t *testing.T) { + ci.Parallel(t) + tests := []struct { Image string Repo string @@ -2745,6 +2663,7 @@ func TestParseDockerImage(t *testing.T) { } func TestDockerImageRef(t *testing.T) { + ci.Parallel(t) tests := []struct { Image string Repo string @@ -2781,9 +2700,7 @@ func waitForExist(t *testing.T, client *docker.Client, containerID string) { // and startContainers functions are idempotent, as we have some retry // logic there without ensureing we delete/destroy containers func TestDockerDriver_CreationIdempotent(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -2852,7 +2769,7 @@ func TestDockerDriver_CreationIdempotent(t *testing.T) { // TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default // CPU quota and period are set when cpu_hard_limit = true. func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) task, _, ports := dockerTask(t) defer freeport.Return(ports) @@ -2878,7 +2795,7 @@ func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) { } func TestDockerDriver_memoryLimits(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string @@ -2934,7 +2851,7 @@ func TestDockerDriver_memoryLimits(t *testing.T) { } func TestDockerDriver_parseSignal(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []struct { name string @@ -2983,11 +2900,12 @@ func TestDockerDriver_parseSignal(t *testing.T) { // This test asserts that Nomad isn't overriding the STOPSIGNAL in a Dockerfile func TestDockerDriver_StopSignal(t *testing.T) { + ci.Parallel(t) + testutil.DockerCompatible(t) if runtime.GOOS == "windows" { t.Skip("Skipped on windows, we don't have image variants available") } - testutil.DockerCompatible(t) cases := []struct { name string variant string diff --git a/drivers/docker/driver_unix_test.go b/drivers/docker/driver_unix_test.go index 701ed8985..fb4fbba25 100644 --- a/drivers/docker/driver_unix_test.go +++ b/drivers/docker/driver_unix_test.go @@ -16,6 +16,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/freeport" @@ -28,10 +29,9 @@ import ( ) func TestDockerDriver_User(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) + task, cfg, ports := dockerTask(t) defer freeport.Return(ports) task.User = "alice" @@ -56,10 +56,9 @@ func TestDockerDriver_User(t *testing.T) { } func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) + require := require.New(t) // Because go-dockerclient doesn't provide api for query network aliases, just check that @@ -105,9 +104,7 @@ func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { } func TestDockerDriver_NetworkMode_Host(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) expected := "host" @@ -149,9 +146,7 @@ func TestDockerDriver_NetworkMode_Host(t *testing.T) { } func TestDockerDriver_CPUCFSPeriod(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -172,7 +167,9 @@ func TestDockerDriver_CPUCFSPeriod(t *testing.T) { } func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) + task, cfg, ports := dockerTask(t) defer freeport.Return(ports) expectedUlimits := map[string]string{ @@ -219,7 +216,9 @@ func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { } func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) + brokenConfigs := []map[string]string{ { "nofile": "", @@ -262,8 +261,7 @@ func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) { // negative case for non existent mount paths. We should write a similar test // for windows. func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) { - t.Parallel() - + ci.Parallel(t) testutil.DockerCompatible(t) allocDir := "/tmp/nomad/alloc-dir" @@ -398,7 +396,7 @@ func TestDockerDriver_BindMountsHonorVolumesEnabledFlag(t *testing.T) { // an absolute path, changing path expansion behaviour. A similar test should // be written for windows. func TestDockerDriver_MountsSerialization(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.DockerCompatible(t) allocDir := "/tmp/nomad/alloc-dir" @@ -567,7 +565,7 @@ func TestDockerDriver_MountsSerialization(t *testing.T) { // and present in docker.CreateContainerOptions, and that it is appended // to any devices/mounts a user sets in the task config. func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.DockerCompatible(t) task, cfg, ports := dockerTask(t) @@ -667,6 +665,7 @@ func TestDockerDriver_CreateContainerConfig_MountsCombined(t *testing.T) { // TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images. // Doesn't run on windows because it requires an image variant func TestDockerDriver_Cleanup(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) // using a small image and an specific point release to avoid accidental conflicts with other tasks @@ -711,9 +710,7 @@ func TestDockerDriver_Cleanup(t *testing.T) { // Tests that images prefixed with "https://" are supported func TestDockerDriver_Start_Image_HTTPS(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := TaskConfig{ @@ -788,9 +785,7 @@ func copyFile(src, dst string, t *testing.T) { } func TestDocker_ExecTaskStreaming(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) taskCfg := newTaskConfig("", []string{"/bin/sleep", "1000"}) @@ -818,10 +813,9 @@ func TestDocker_ExecTaskStreaming(t *testing.T) { // Tests that a given DNSConfig properly configures dns func Test_dnsConfig(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) + require := require.New(t) harness := dockerDriverHarness(t, nil) defer harness.Kill() diff --git a/drivers/docker/fingerprint_test.go b/drivers/docker/fingerprint_test.go index 52389dfb2..b6303164d 100644 --- a/drivers/docker/fingerprint_test.go +++ b/drivers/docker/fingerprint_test.go @@ -4,10 +4,10 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/plugins/drivers" - tu "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) @@ -16,9 +16,7 @@ import ( // // In Linux CI and AppVeyor Windows environment, it should be enabled. func TestDockerDriver_FingerprintHealth(t *testing.T) { - if !tu.IsCI() { - t.Parallel() - } + ci.Parallel(t) testutil.DockerCompatible(t) ctx, cancel := context.WithCancel(context.Background()) diff --git a/drivers/docker/network_test.go b/drivers/docker/network_test.go index 9fada5bf4..4b1ccd517 100644 --- a/drivers/docker/network_test.go +++ b/drivers/docker/network_test.go @@ -1,14 +1,16 @@ package docker import ( - "github.com/hashicorp/nomad/plugins/drivers" "testing" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/assert" ) func TestDriver_createSandboxContainerConfig(t *testing.T) { + ci.Parallel(t) testCases := []struct { inputAllocID string inputNetworkCreateRequest *drivers.NetworkCreateRequest diff --git a/drivers/docker/ports_test.go b/drivers/docker/ports_test.go index 2500f19ff..ca7e13573 100644 --- a/drivers/docker/ports_test.go +++ b/drivers/docker/ports_test.go @@ -3,12 +3,14 @@ package docker import ( "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" + "github.com/stretchr/testify/require" ) func TestPublishedPorts_add(t *testing.T) { + ci.Parallel(t) + p := newPublishedPorts(testlog.HCLogger(t)) p.add("label", "10.0.0.1", 1234, 80) p.add("label", "10.0.0.1", 5678, 80) diff --git a/drivers/docker/progress_test.go b/drivers/docker/progress_test.go index 7f5b5dc46..4df095415 100644 --- a/drivers/docker/progress_test.go +++ b/drivers/docker/progress_test.go @@ -4,10 +4,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func Test_DockerImageProgressManager(t *testing.T) { + ci.Parallel(t) pm := &imageProgressManager{ imageProgress: &imageProgress{ diff --git a/drivers/docker/reconciler_test.go b/drivers/docker/reconciler_test.go index ff5284cf1..dc6e45e27 100644 --- a/drivers/docker/reconciler_test.go +++ b/drivers/docker/reconciler_test.go @@ -7,6 +7,7 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/uuid" @@ -31,6 +32,8 @@ func fakeContainerList(t *testing.T) (nomadContainer, nonNomadContainer docker.A } func Test_HasMount(t *testing.T) { + ci.Parallel(t) + nomadContainer, nonNomadContainer := fakeContainerList(t) require.True(t, hasMount(nomadContainer, "/alloc")) @@ -45,6 +48,8 @@ func Test_HasMount(t *testing.T) { } func Test_HasNomadName(t *testing.T) { + ci.Parallel(t) + nomadContainer, nonNomadContainer := fakeContainerList(t) require.True(t, hasNomadName(nomadContainer)) @@ -54,6 +59,7 @@ func Test_HasNomadName(t *testing.T) { // TestDanglingContainerRemoval asserts containers without corresponding tasks // are removed after the creation grace period. func TestDanglingContainerRemoval(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) // start two containers: one tracked nomad container, and one unrelated container @@ -157,6 +163,7 @@ func TestDanglingContainerRemoval(t *testing.T) { // TestDanglingContainerRemoval_Stopped asserts stopped containers without // corresponding tasks are not removed even if after creation grace period. func TestDanglingContainerRemoval_Stopped(t *testing.T) { + ci.Parallel(t) testutil.DockerCompatible(t) _, cfg, ports := dockerTask(t) diff --git a/drivers/docker/stats_test.go b/drivers/docker/stats_test.go index 4ae932bf4..f17a80c84 100644 --- a/drivers/docker/stats_test.go +++ b/drivers/docker/stats_test.go @@ -7,14 +7,15 @@ import ( "time" docker "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/stretchr/testify/require" ) func TestDriver_DockerStatsCollector(t *testing.T) { - t.Parallel() - + ci.Parallel(t) require := require.New(t) + src := make(chan *docker.Stats) defer close(src) dst, recvCh := newStatsChanPipe() @@ -69,7 +70,7 @@ func TestDriver_DockerStatsCollector(t *testing.T) { // TestDriver_DockerUsageSender asserts that the TaskResourceUsage chan wrapper // supports closing and sending on a chan from concurrent goroutines. func TestDriver_DockerUsageSender(t *testing.T) { - t.Parallel() + ci.Parallel(t) // sample payload res := &cstructs.TaskResourceUsage{} diff --git a/drivers/docker/utils_test.go b/drivers/docker/utils_test.go index c6ae1995f..c99cf69c9 100644 --- a/drivers/docker/utils_test.go +++ b/drivers/docker/utils_test.go @@ -3,10 +3,12 @@ package docker import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestIsParentPath(t *testing.T) { + ci.Parallel(t) require.True(t, isParentPath("/a/b/c", "/a/b/c")) require.True(t, isParentPath("/a/b/c", "/a/b/c/d")) require.True(t, isParentPath("/a/b/c", "/a/b/c/d/e")) @@ -18,6 +20,7 @@ func TestIsParentPath(t *testing.T) { } func TestParseVolumeSpec_Linux(t *testing.T) { + ci.Parallel(t) validCases := []struct { name string bindSpec string diff --git a/drivers/docker/utils_unix_test.go b/drivers/docker/utils_unix_test.go index 29f526964..e53c72bec 100644 --- a/drivers/docker/utils_unix_test.go +++ b/drivers/docker/utils_unix_test.go @@ -7,10 +7,13 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestValidateCgroupPermission(t *testing.T) { + ci.Parallel(t) + positiveCases := []string{ "r", "rw", @@ -40,6 +43,8 @@ func TestValidateCgroupPermission(t *testing.T) { } func TestExpandPath(t *testing.T) { + ci.Parallel(t) + cases := []struct { base string target string diff --git a/drivers/exec/driver_test.go b/drivers/exec/driver_test.go index 2188dc8be..f5c8d9a2b 100644 --- a/drivers/exec/driver_test.go +++ b/drivers/exec/driver_test.go @@ -16,6 +16,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" ctestutils "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/drivers/shared/executor" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" @@ -52,9 +53,7 @@ var testResources = &drivers.Resources{ } func TestExecDriver_Fingerprint_NonLinux(t *testing.T) { - if !testutil.IsCI() { - t.Parallel() - } + ci.Parallel(t) require := require.New(t) if runtime.GOOS == "linux" { t.Skip("Test only available not on Linux") @@ -77,7 +76,7 @@ func TestExecDriver_Fingerprint_NonLinux(t *testing.T) { } func TestExecDriver_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -100,7 +99,7 @@ func TestExecDriver_Fingerprint(t *testing.T) { } func TestExecDriver_StartWait(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -135,7 +134,7 @@ func TestExecDriver_StartWait(t *testing.T) { } func TestExecDriver_StartWaitStopKill(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -199,7 +198,7 @@ func TestExecDriver_StartWaitStopKill(t *testing.T) { } func TestExecDriver_StartWaitRecover(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -274,7 +273,7 @@ func TestExecDriver_StartWaitRecover(t *testing.T) { // TestExecDriver_NoOrphans asserts that when the main // task dies, the orphans in the PID namespaces are killed by the kernel func TestExecDriver_NoOrphans(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctestutils.ExecCompatible(t) @@ -390,7 +389,7 @@ func TestExecDriver_NoOrphans(t *testing.T) { } func TestExecDriver_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -436,7 +435,7 @@ func TestExecDriver_Stats(t *testing.T) { } func TestExecDriver_Start_Wait_AllocDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -488,7 +487,7 @@ func TestExecDriver_Start_Wait_AllocDir(t *testing.T) { } func TestExecDriver_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -525,7 +524,7 @@ func TestExecDriver_User(t *testing.T) { // TestExecDriver_HandlerExec ensures the exec driver's handle properly // executes commands inside the container. func TestExecDriver_HandlerExec(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -609,7 +608,7 @@ func TestExecDriver_HandlerExec(t *testing.T) { } func TestExecDriver_DevicesAndMounts(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -705,6 +704,8 @@ touch: cannot touch '/tmp/task-path-ro/testfile-from-ro': Read-only file system` } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { command = "/bin/bash" @@ -723,7 +724,7 @@ config { } func TestExecDriver_NoPivotRoot(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) ctestutils.ExecCompatible(t) @@ -764,6 +765,7 @@ func TestExecDriver_NoPivotRoot(t *testing.T) { } func TestDriver_Config_validate(t *testing.T) { + ci.Parallel(t) t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string @@ -804,6 +806,7 @@ func TestDriver_Config_validate(t *testing.T) { } func TestDriver_TaskConfig_validate(t *testing.T) { + ci.Parallel(t) t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string diff --git a/drivers/exec/driver_unix_test.go b/drivers/exec/driver_unix_test.go index a9da57c02..6d62902f8 100644 --- a/drivers/exec/driver_unix_test.go +++ b/drivers/exec/driver_unix_test.go @@ -10,22 +10,22 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" + ctestutils "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/drivers/shared/capabilities" "github.com/hashicorp/nomad/drivers/shared/executor" - basePlug "github.com/hashicorp/nomad/plugins/base" - "github.com/stretchr/testify/require" - "golang.org/x/sys/unix" - - ctestutils "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" + basePlug "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" ) func TestExecDriver_StartWaitStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctestutils.ExecCompatible(t) @@ -87,7 +87,7 @@ func TestExecDriver_StartWaitStop(t *testing.T) { } func TestExec_ExecTaskStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -121,7 +121,7 @@ func TestExec_ExecTaskStreaming(t *testing.T) { // Tests that a given DNSConfig properly configures dns func TestExec_dnsConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctestutils.RequireRoot(t) ctestutils.ExecCompatible(t) require := require.New(t) @@ -181,6 +181,7 @@ func TestExec_dnsConfig(t *testing.T) { } func TestExecDriver_Capabilities(t *testing.T) { + ci.Parallel(t) ctestutils.ExecCompatible(t) task := &drivers.TaskConfig{ diff --git a/drivers/java/driver_test.go b/drivers/java/driver_test.go index e407e3c19..63cb4c120 100644 --- a/drivers/java/driver_test.go +++ b/drivers/java/driver_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" ctestutil "github.com/hashicorp/nomad/client/testutil" @@ -33,10 +34,8 @@ func javaCompatible(t *testing.T) { } func TestJavaDriver_Fingerprint(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -58,10 +57,8 @@ func TestJavaDriver_Fingerprint(t *testing.T) { } func TestJavaDriver_Jar_Start_Wait(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -101,10 +98,8 @@ func TestJavaDriver_Jar_Start_Wait(t *testing.T) { } func TestJavaDriver_Jar_Stop_Wait(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -165,10 +160,8 @@ func TestJavaDriver_Jar_Stop_Wait(t *testing.T) { } func TestJavaDriver_Class_Start_Wait(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -207,6 +200,8 @@ func TestJavaDriver_Class_Start_Wait(t *testing.T) { } func TestJavaCmdArgs(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string cfg TaskConfig @@ -256,10 +251,8 @@ func TestJavaCmdArgs(t *testing.T) { } func TestJavaDriver_ExecTaskStreaming(t *testing.T) { + ci.Parallel(t) javaCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -335,6 +328,8 @@ func copyFile(src, dst string, t *testing.T) { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { class = "java.main" @@ -360,7 +355,7 @@ config { // Tests that a given DNSConfig properly configures dns func Test_dnsConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctestutil.RequireRoot(t) javaCompatible(t) require := require.New(t) @@ -415,6 +410,8 @@ func Test_dnsConfig(t *testing.T) { } func TestDriver_Config_validate(t *testing.T) { + ci.Parallel(t) + t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string @@ -455,6 +452,8 @@ func TestDriver_Config_validate(t *testing.T) { } func TestDriver_TaskConfig_validate(t *testing.T) { + ci.Parallel(t) + t.Run("pid/ipc", func(t *testing.T) { for _, tc := range []struct { pidMode, ipcMode string diff --git a/drivers/java/utils_test.go b/drivers/java/utils_test.go index 8f2d7ad5b..f07acd4df 100644 --- a/drivers/java/utils_test.go +++ b/drivers/java/utils_test.go @@ -5,10 +5,13 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestDriver_parseJavaVersionOutput(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string output string @@ -88,6 +91,7 @@ func TestDriver_parseJavaVersionOutput(t *testing.T) { } func TestDriver_javaVersionInfo(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("test requires bash to run") } @@ -114,6 +118,7 @@ func TestDriver_javaVersionInfo(t *testing.T) { } func TestDriver_javaVersionInfo_UnexpectedOutput(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("test requires bash to run") } @@ -136,6 +141,7 @@ func TestDriver_javaVersionInfo_UnexpectedOutput(t *testing.T) { } func TestDriver_javaVersionInfo_JavaVersionFails(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("test requires bash to run") } diff --git a/drivers/mock/utils_test.go b/drivers/mock/utils_test.go index 42458c978..8cd8cd471 100644 --- a/drivers/mock/utils_test.go +++ b/drivers/mock/utils_test.go @@ -4,10 +4,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestParseDuration(t *testing.T) { + ci.Parallel(t) + t.Run("valid case", func(t *testing.T) { v, err := parseDuration("10m") require.NoError(t, err) diff --git a/drivers/qemu/driver_test.go b/drivers/qemu/driver_test.go index 8777c7de5..5d2b71538 100644 --- a/drivers/qemu/driver_test.go +++ b/drivers/qemu/driver_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" "github.com/hashicorp/nomad/helper/testlog" @@ -26,10 +27,8 @@ import ( // Verifies starting a qemu image and stopping it func TestQemuDriver_Start_Wait_Stop(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -91,10 +90,8 @@ func TestQemuDriver_Start_Wait_Stop(t *testing.T) { // Verifies monitor socket path for old qemu func TestQemuDriver_GetMonitorPathOldQemu(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -149,10 +146,8 @@ func TestQemuDriver_GetMonitorPathOldQemu(t *testing.T) { // Verifies monitor socket path for new qemu version func TestQemuDriver_GetMonitorPathNewQemu(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -232,10 +227,8 @@ func copyFile(src, dst string, t *testing.T) { // Verifies starting a qemu image and stopping it func TestQemuDriver_User(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -292,10 +285,8 @@ func TestQemuDriver_User(t *testing.T) { // Verifies getting resource usage stats // TODO(preetha) this test needs random sleeps to pass func TestQemuDriver_Stats(t *testing.T) { + ci.Parallel(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -371,12 +362,10 @@ func TestQemuDriver_Stats(t *testing.T) { } func TestQemuDriver_Fingerprint(t *testing.T) { + ci.Parallel(t) require := require.New(t) ctestutil.QemuCompatible(t) - if !testutil.IsCI() { - t.Parallel() - } ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -396,6 +385,8 @@ func TestQemuDriver_Fingerprint(t *testing.T) { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { image_path = "/tmp/image_path" @@ -426,6 +417,8 @@ config { } func TestIsAllowedImagePath(t *testing.T) { + ci.Parallel(t) + allowedPaths := []string{"/tmp", "/opt/qemu"} allocDir := "/opt/nomad/some-alloc-dir" @@ -455,7 +448,8 @@ func TestIsAllowedImagePath(t *testing.T) { } func TestArgsAllowList(t *testing.T) { - + ci.Parallel(t) + pluginConfigAllowList := []string{"-drive", "-net", "-snapshot"} validArgs := [][]string{ diff --git a/drivers/rawexec/driver_test.go b/drivers/rawexec/driver_test.go index d6bf2f0bf..4b7635098 100644 --- a/drivers/rawexec/driver_test.go +++ b/drivers/rawexec/driver_test.go @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" ctestutil "github.com/hashicorp/nomad/client/testutil" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" "github.com/hashicorp/nomad/helper/testlog" @@ -42,7 +43,7 @@ func newEnabledRawExecDriver(t *testing.T) *Driver { } func TestRawExecDriver_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -80,7 +81,7 @@ func TestRawExecDriver_SetConfig(t *testing.T) { } func TestRawExecDriver_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) fingerprintTest := func(config *Config, expected *drivers.Fingerprint) func(t *testing.T) { return func(t *testing.T) { @@ -142,7 +143,7 @@ func TestRawExecDriver_Fingerprint(t *testing.T) { } func TestRawExecDriver_StartWait(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -184,7 +185,7 @@ func TestRawExecDriver_StartWait(t *testing.T) { } func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -267,7 +268,7 @@ func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) { } func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -320,8 +321,8 @@ func TestRawExecDriver_Start_Wait_AllocDir(t *testing.T) { // processes cleanup of the children would not be possible. Thus the test // asserts that the processes get killed properly when using cgroups. func TestRawExecDriver_Start_Kill_Wait_Cgroup(t *testing.T) { + ci.Parallel(t) ctestutil.ExecCompatible(t) - t.Parallel() require := require.New(t) pidFile := "pid" @@ -412,7 +413,7 @@ func TestRawExecDriver_Start_Kill_Wait_Cgroup(t *testing.T) { } func TestRawExecDriver_Exec(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -467,6 +468,8 @@ func TestRawExecDriver_Exec(t *testing.T) { } func TestConfig_ParseAllHCL(t *testing.T) { + ci.Parallel(t) + cfgStr := ` config { command = "/bin/bash" @@ -485,7 +488,7 @@ config { } func TestRawExecDriver_Disabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) diff --git a/drivers/rawexec/driver_unix_test.go b/drivers/rawexec/driver_unix_test.go index 9d6d6d70a..bf7e0c4e2 100644 --- a/drivers/rawexec/driver_unix_test.go +++ b/drivers/rawexec/driver_unix_test.go @@ -5,19 +5,19 @@ package rawexec import ( "context" + "fmt" + "io/ioutil" "os" + "path/filepath" "regexp" "runtime" "strconv" + "strings" "syscall" "testing" - - "fmt" - "io/ioutil" - "path/filepath" - "strings" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testtask" "github.com/hashicorp/nomad/helper/uuid" basePlug "github.com/hashicorp/nomad/plugins/base" @@ -29,7 +29,7 @@ import ( ) func TestRawExecDriver_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("Linux only test") } @@ -61,7 +61,7 @@ func TestRawExecDriver_User(t *testing.T) { } func TestRawExecDriver_Signal(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("Linux only test") } @@ -135,7 +135,7 @@ done } func TestRawExecDriver_StartWaitStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) d := newEnabledRawExecDriver(t) @@ -204,7 +204,7 @@ func TestRawExecDriver_StartWaitStop(t *testing.T) { // TestRawExecDriver_DestroyKillsAll asserts that when TaskDestroy is called all // task processes are cleaned up. func TestRawExecDriver_DestroyKillsAll(t *testing.T) { - t.Parallel() + ci.Parallel(t) // This only works reliably with cgroup PID tracking, happens in linux only if runtime.GOOS != "linux" { @@ -307,7 +307,7 @@ func TestRawExecDriver_DestroyKillsAll(t *testing.T) { } func TestRawExec_ExecTaskStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS == "darwin" { t.Skip("skip running exec tasks on darwin as darwin has restrictions on starting tty shells") } @@ -341,7 +341,7 @@ func TestRawExec_ExecTaskStreaming(t *testing.T) { } func TestRawExec_ExecTaskStreaming_User(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("skip, requires running on Linux for testing custom user setting") } @@ -380,7 +380,7 @@ func TestRawExec_ExecTaskStreaming_User(t *testing.T) { } func TestRawExecDriver_NoCgroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) if runtime.GOOS != "linux" { t.Skip("Linux only test") } diff --git a/drivers/shared/capabilities/defaults_test.go b/drivers/shared/capabilities/defaults_test.go index 7fd03513e..4b4e238cb 100644 --- a/drivers/shared/capabilities/defaults_test.go +++ b/drivers/shared/capabilities/defaults_test.go @@ -5,10 +5,13 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestSet_NomadDefaults(t *testing.T) { + ci.Parallel(t) + result := NomadDefaults() require.Len(t, result.Slice(false), 13) defaults := strings.ToLower(HCLSpecLiteral) @@ -18,12 +21,16 @@ func TestSet_NomadDefaults(t *testing.T) { } func TestSet_DockerDefaults(t *testing.T) { + ci.Parallel(t) + result := DockerDefaults() require.Len(t, result.Slice(false), 14) require.Contains(t, result.String(), "net_raw") } func TestCaps_Calculate(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name string @@ -149,6 +156,8 @@ func TestCaps_Calculate(t *testing.T) { } func TestCaps_Delta(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name string diff --git a/drivers/shared/capabilities/set_test.go b/drivers/shared/capabilities/set_test.go index 2134719f2..b6cbaf2d2 100644 --- a/drivers/shared/capabilities/set_test.go +++ b/drivers/shared/capabilities/set_test.go @@ -3,11 +3,12 @@ package capabilities import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestSet_Empty(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := New(nil).Empty() @@ -26,7 +27,7 @@ func TestSet_Empty(t *testing.T) { } func TestSet_New(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("duplicates", func(t *testing.T) { result := New([]string{"chown", "sys_time", "chown"}) @@ -46,7 +47,7 @@ func TestSet_New(t *testing.T) { } func TestSet_Slice(t *testing.T) { - t.Parallel() + ci.Parallel(t) exp := []string{"chown", "net_raw", "sys_time"} @@ -67,7 +68,7 @@ func TestSet_Slice(t *testing.T) { } func TestSet_String(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("empty", func(t *testing.T) { result := New(nil).String() @@ -83,7 +84,7 @@ func TestSet_String(t *testing.T) { } func TestSet_Add(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("add one", func(t *testing.T) { s := New([]string{"chown", "net_raw"}) @@ -114,7 +115,7 @@ func TestSet_Add(t *testing.T) { } func TestSet_Remove(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("remove one", func(t *testing.T) { s := New([]string{"af_net", "chown", "net_raw", "seteuid", "sys_time"}) @@ -137,7 +138,7 @@ func TestSet_Remove(t *testing.T) { } func TestSet_Difference(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("a is empty", func(t *testing.T) { a := New(nil) @@ -162,7 +163,7 @@ func TestSet_Difference(t *testing.T) { } func TestSet_Intersect(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("empty", func(t *testing.T) { a := New(nil) @@ -188,7 +189,7 @@ func TestSet_Intersect(t *testing.T) { } func TestSet_Union(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("empty", func(t *testing.T) { a := New(nil) diff --git a/drivers/shared/eventer/eventer_test.go b/drivers/shared/eventer/eventer_test.go index ce81bcca9..7ca0234f7 100644 --- a/drivers/shared/eventer/eventer_test.go +++ b/drivers/shared/eventer/eventer_test.go @@ -6,13 +6,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" ) func TestEventer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ctx, cancel := context.WithCancel(context.Background()) @@ -78,7 +79,7 @@ func TestEventer(t *testing.T) { } func TestEventer_iterateConsumers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) e := &Eventer{ diff --git a/drivers/shared/executor/executor_linux_test.go b/drivers/shared/executor/executor_linux_test.go index 687c64635..67a991111 100644 --- a/drivers/shared/executor/executor_linux_test.go +++ b/drivers/shared/executor/executor_linux_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/client/testutil" @@ -90,6 +91,7 @@ func testExecutorCommandWithChroot(t *testing.T) *testExecCmd { } func TestExecutor_configureNamespaces(t *testing.T) { + ci.Parallel(t) t.Run("host host", func(t *testing.T) { require.Equal(t, lconfigs.Namespaces{ {Type: lconfigs.NEWNS}, @@ -120,7 +122,7 @@ func TestExecutor_configureNamespaces(t *testing.T) { } func TestExecutor_Isolation_PID_and_IPC_hostMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) testutil.ExecCompatible(t) @@ -161,7 +163,7 @@ func TestExecutor_Isolation_PID_and_IPC_hostMode(t *testing.T) { } func TestExecutor_IsolationAndConstraints(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) testutil.ExecCompatible(t) @@ -253,7 +255,7 @@ passwd` // TestExecutor_CgroupPaths asserts that process starts with independent cgroups // hierarchy created for this process func TestExecutor_CgroupPaths(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testutil.ExecCompatible(t) @@ -308,7 +310,7 @@ func TestExecutor_CgroupPaths(t *testing.T) { // TestExecutor_CgroupPaths asserts that all cgroups created for a task // are destroyed on shutdown func TestExecutor_CgroupPathsAreDestroyed(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testutil.ExecCompatible(t) @@ -388,7 +390,7 @@ func TestExecutor_CgroupPathsAreDestroyed(t *testing.T) { } func TestUniversalExecutor_LookupTaskBin(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a temp dir @@ -430,7 +432,7 @@ func TestUniversalExecutor_LookupTaskBin(t *testing.T) { // Exec Launch looks for the binary only inside the chroot func TestExecutor_EscapeContainer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) testutil.ExecCompatible(t) @@ -468,7 +470,7 @@ func TestExecutor_EscapeContainer(t *testing.T) { // TestExecutor_DoesNotInheritOomScoreAdj asserts that the exec processes do not // inherit the oom_score_adj value of Nomad agent/executor process func TestExecutor_DoesNotInheritOomScoreAdj(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) oomPath := "/proc/self/oom_score_adj" @@ -522,7 +524,7 @@ func TestExecutor_DoesNotInheritOomScoreAdj(t *testing.T) { } func TestExecutor_Capabilities(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) cases := []struct { @@ -602,7 +604,7 @@ CapAmb: 0000000000000000`, } func TestExecutor_ClientCleanup(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) require := require.New(t) @@ -647,6 +649,7 @@ func TestExecutor_ClientCleanup(t *testing.T) { } func TestExecutor_cmdDevices(t *testing.T) { + ci.Parallel(t) input := []*drivers.DeviceConfig{ { HostPath: "/dev/null", @@ -680,6 +683,7 @@ func TestExecutor_cmdDevices(t *testing.T) { } func TestExecutor_cmdMounts(t *testing.T) { + ci.Parallel(t) input := []*drivers.MountConfig{ { HostPath: "/host/path-ro", @@ -716,7 +720,7 @@ func TestExecutor_cmdMounts(t *testing.T) { // TestUniversalExecutor_NoCgroup asserts that commands are executed in the // same cgroup as parent process func TestUniversalExecutor_NoCgroup(t *testing.T) { - t.Parallel() + ci.Parallel(t) testutil.ExecCompatible(t) expectedBytes, err := ioutil.ReadFile("/proc/self/cgroup") diff --git a/drivers/shared/executor/executor_test.go b/drivers/shared/executor/executor_test.go index 0b7e8b000..4d930843c 100644 --- a/drivers/shared/executor/executor_test.go +++ b/drivers/shared/executor/executor_test.go @@ -16,6 +16,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper/testlog" @@ -129,11 +130,11 @@ func configureTLogging(t *testing.T, testcmd *testExecCmd) { return } -func TestExecutor_Start_Invalid(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Invalid(t *testing.T) { + ci.Parallel(t) invalid := "/bin/foobar" for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -150,10 +151,10 @@ func TestExecutor_Start_Invalid(pt *testing.T) { } } -func TestExecutor_Start_Wait_Failure_Code(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Wait_Failure_Code(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -174,10 +175,10 @@ func TestExecutor_Start_Wait_Failure_Code(pt *testing.T) { } } -func TestExecutor_Start_Wait(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Wait(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -211,10 +212,10 @@ func TestExecutor_Start_Wait(pt *testing.T) { } } -func TestExecutor_Start_Wait_Children(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Wait_Children(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -250,10 +251,10 @@ func TestExecutor_Start_Wait_Children(pt *testing.T) { } } -func TestExecutor_WaitExitSignal(pt *testing.T) { - pt.Parallel() +func TestExecutor_WaitExitSignal(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir execCmd.Cmd = "/bin/sleep" @@ -308,10 +309,10 @@ func TestExecutor_WaitExitSignal(pt *testing.T) { } } -func TestExecutor_Start_Kill(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Kill(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -341,8 +342,8 @@ func TestExecutor_Start_Kill(pt *testing.T) { } func TestExecutor_Shutdown_Exit(t *testing.T) { + ci.Parallel(t) require := require.New(t) - t.Parallel() testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir execCmd.Cmd = "/bin/sleep" @@ -372,7 +373,7 @@ func TestExecutor_Shutdown_Exit(t *testing.T) { } func TestUniversalExecutor_MakeExecutable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a temp file f, err := ioutil.TempFile("", "") if err != nil { @@ -403,7 +404,7 @@ func TestUniversalExecutor_MakeExecutable(t *testing.T) { } func TestUniversalExecutor_LookupPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a temp dir tmpDir, err := ioutil.TempDir("", "") @@ -515,10 +516,10 @@ func copyFile(t *testing.T, src, dst string) { // TestExecutor_Start_Kill_Immediately_NoGrace asserts that executors shutdown // immediately when sent a kill signal with no grace period. -func TestExecutor_Start_Kill_Immediately_NoGrace(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Kill_Immediately_NoGrace(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -551,10 +552,10 @@ func TestExecutor_Start_Kill_Immediately_NoGrace(pt *testing.T) { } } -func TestExecutor_Start_Kill_Immediately_WithGrace(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_Kill_Immediately_WithGrace(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) testExecCmd := testExecutorCommand(t) execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir @@ -589,11 +590,11 @@ func TestExecutor_Start_Kill_Immediately_WithGrace(pt *testing.T) { // TestExecutor_Start_NonExecutableBinaries asserts that executor marks binary as executable // before starting -func TestExecutor_Start_NonExecutableBinaries(pt *testing.T) { - pt.Parallel() +func TestExecutor_Start_NonExecutableBinaries(t *testing.T) { + ci.Parallel(t) for name, factory := range executorFactories { - pt.Run(name, func(t *testing.T) { + t.Run(name, func(t *testing.T) { require := require.New(t) tmpDir, err := ioutil.TempDir("", "nomad-executor-tests") @@ -645,5 +646,4 @@ func TestExecutor_Start_NonExecutableBinaries(pt *testing.T) { }) }) } - } diff --git a/drivers/shared/executor/pid_collector_test.go b/drivers/shared/executor/pid_collector_test.go index c42397b68..f71c4d827 100644 --- a/drivers/shared/executor/pid_collector_test.go +++ b/drivers/shared/executor/pid_collector_test.go @@ -3,11 +3,12 @@ package executor import ( "testing" - ps "github.com/mitchellh/go-ps" + "github.com/hashicorp/nomad/ci" + "github.com/mitchellh/go-ps" ) func TestScanPids(t *testing.T) { - t.Parallel() + ci.Parallel(t) p1 := NewFakeProcess(2, 5) p2 := NewFakeProcess(10, 2) p3 := NewFakeProcess(15, 6) diff --git a/helper/boltdd/boltdd_test.go b/helper/boltdd/boltdd_test.go index 19c4e6ec2..838b7ba6a 100644 --- a/helper/boltdd/boltdd_test.go +++ b/helper/boltdd/boltdd_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -46,7 +47,7 @@ func setupBoltDB(t testingT) (*DB, func()) { } func TestDB_Open(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) @@ -56,7 +57,7 @@ func TestDB_Open(t *testing.T) { } func TestDB_Close(t *testing.T) { - t.Parallel() + ci.Parallel(t) db, cleanup := setupBoltDB(t) defer cleanup() @@ -75,7 +76,7 @@ func TestDB_Close(t *testing.T) { } func TestBucket_Create(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) @@ -112,7 +113,7 @@ func TestBucket_Create(t *testing.T) { } func TestBucket_DedupeWrites(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) @@ -166,7 +167,7 @@ func TestBucket_DedupeWrites(t *testing.T) { } func TestBucket_Delete(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) db, cleanup := setupBoltDB(t) diff --git a/helper/envoy/envoy_test.go b/helper/envoy/envoy_test.go index 87a979c85..90baa2d88 100644 --- a/helper/envoy/envoy_test.go +++ b/helper/envoy/envoy_test.go @@ -4,12 +4,13 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestEnvoy_PortLabel(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, tc := range []struct { prefix string diff --git a/helper/flags/autopilot_flags_test.go b/helper/flags/autopilot_flags_test.go index ac9dbe092..8d44e7219 100644 --- a/helper/flags/autopilot_flags_test.go +++ b/helper/flags/autopilot_flags_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestFlagHelper_Pointers_Set(t *testing.T) { - t.Parallel() + ci.Parallel(t) var ( B BoolValue @@ -44,7 +45,7 @@ func TestFlagHelper_Pointers_Set(t *testing.T) { } func TestFlagHelper_Pointers_Ignored(t *testing.T) { - t.Parallel() + ci.Parallel(t) var ( B BoolValue diff --git a/helper/flags/flag_test.go b/helper/flags/flag_test.go index 03b064824..9f94c601d 100644 --- a/helper/flags/flag_test.go +++ b/helper/flags/flag_test.go @@ -5,11 +5,12 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestStringFlag_implements(t *testing.T) { - t.Parallel() + ci.Parallel(t) var raw interface{} raw = new(StringFlag) @@ -19,7 +20,7 @@ func TestStringFlag_implements(t *testing.T) { } func TestStringFlagSet(t *testing.T) { - t.Parallel() + ci.Parallel(t) sv := new(StringFlag) err := sv.Set("foo") @@ -38,7 +39,7 @@ func TestStringFlagSet(t *testing.T) { } } func TestStringFlagSet_Append(t *testing.T) { - t.Parallel() + ci.Parallel(t) var ( // A test to make sure StringFlag can replace AppendSliceValue diff --git a/helper/freeport/freeport_test.go b/helper/freeport/freeport_test.go index 6d8b1f4c5..4f70491ad 100644 --- a/helper/freeport/freeport_test.go +++ b/helper/freeport/freeport_test.go @@ -61,7 +61,7 @@ func stats() (numTotal, numPending, numFree int) { func TestTakeReturn(t *testing.T) { // NOTE: for global var reasons this cannot execute in parallel - // t.Parallel() + // ci.Parallel(t) // Since this test is destructive (i.e. it leaks all ports) it means that // any other test cases in this package will not function after it runs. To diff --git a/helper/pluginutils/hclspecutils/dec_test.go b/helper/pluginutils/hclspecutils/dec_test.go index 067739024..82dcd4ee8 100644 --- a/helper/pluginutils/hclspecutils/dec_test.go +++ b/helper/pluginutils/hclspecutils/dec_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/plugins/shared/hclspec" "github.com/stretchr/testify/require" "github.com/zclconf/go-cty/cty" @@ -38,7 +39,7 @@ func testSpecConversions(t *testing.T, cases []testConversions) { } func TestDec_Convert_Object(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -98,7 +99,7 @@ func TestDec_Convert_Object(t *testing.T) { } func TestDec_Convert_Array(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -148,7 +149,7 @@ func TestDec_Convert_Array(t *testing.T) { } func TestDec_Convert_Attr(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -206,7 +207,7 @@ func TestDec_Convert_Attr(t *testing.T) { } func TestDec_Convert_Block(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -283,7 +284,7 @@ func TestDec_Convert_Block(t *testing.T) { } func TestDec_Convert_BlockAttrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -321,7 +322,7 @@ func TestDec_Convert_BlockAttrs(t *testing.T) { } func TestDec_Convert_BlockList(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -380,7 +381,7 @@ func TestDec_Convert_BlockList(t *testing.T) { } func TestDec_Convert_BlockSet(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -439,7 +440,7 @@ func TestDec_Convert_BlockSet(t *testing.T) { } func TestDec_Convert_BlockMap(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -514,7 +515,7 @@ func TestDec_Convert_BlockMap(t *testing.T) { } func TestDec_Convert_Default(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { @@ -558,7 +559,7 @@ func TestDec_Convert_Default(t *testing.T) { } func TestDec_Convert_Literal(t *testing.T) { - t.Parallel() + ci.Parallel(t) tests := []testConversions{ { diff --git a/helper/pluginutils/hclutils/testing.go b/helper/pluginutils/hclutils/testing.go index d5b67eab2..469cec7d5 100644 --- a/helper/pluginutils/hclutils/testing.go +++ b/helper/pluginutils/hclutils/testing.go @@ -6,14 +6,13 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" - "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/require" - "github.com/zclconf/go-cty/cty" - "github.com/hashicorp/nomad/helper/pluginutils/hclspecutils" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" "github.com/hashicorp/nomad/plugins/shared/hclspec" + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/require" + "github.com/zclconf/go-cty/cty" ) type HCLParser struct { diff --git a/helper/pluginutils/loader/loader_test.go b/helper/pluginutils/loader/loader_test.go index 1825104d7..de99d012e 100644 --- a/helper/pluginutils/loader/loader_test.go +++ b/helper/pluginutils/loader/loader_test.go @@ -12,6 +12,7 @@ import ( log "github.com/hashicorp/go-hclog" version "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/nomad/plugins/base" @@ -107,7 +108,7 @@ func (h *harness) cleanup() { } func TestPluginLoader_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -167,7 +168,7 @@ func TestPluginLoader_External(t *testing.T) { } func TestPluginLoader_External_ApiVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -271,7 +272,7 @@ func TestPluginLoader_External_ApiVersions(t *testing.T) { } func TestPluginLoader_External_NoApiVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -301,7 +302,7 @@ func TestPluginLoader_External_NoApiVersion(t *testing.T) { } func TestPluginLoader_External_Config(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -368,7 +369,7 @@ func TestPluginLoader_External_Config(t *testing.T) { // Pass a config but make sure it is fatal func TestPluginLoader_External_Config_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a plugin @@ -403,7 +404,7 @@ func TestPluginLoader_External_Config_Bad(t *testing.T) { } func TestPluginLoader_External_VersionOverlap(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -455,7 +456,7 @@ func TestPluginLoader_External_VersionOverlap(t *testing.T) { } func TestPluginLoader_Internal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -517,7 +518,7 @@ func TestPluginLoader_Internal(t *testing.T) { } func TestPluginLoader_Internal_ApiVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -599,7 +600,7 @@ func TestPluginLoader_Internal_ApiVersions(t *testing.T) { } func TestPluginLoader_Internal_NoApiVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -630,7 +631,7 @@ func TestPluginLoader_Internal_NoApiVersion(t *testing.T) { } func TestPluginLoader_Internal_Config(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -701,7 +702,7 @@ func TestPluginLoader_Internal_Config(t *testing.T) { // Tests that an external config can override the config of an internal plugin func TestPluginLoader_Internal_ExternalConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -772,7 +773,7 @@ func TestPluginLoader_Internal_ExternalConfig(t *testing.T) { // Pass a config but make sure it is fatal func TestPluginLoader_Internal_Config_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create the harness @@ -810,7 +811,7 @@ func TestPluginLoader_Internal_Config_Bad(t *testing.T) { } func TestPluginLoader_InternalOverrideExternal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -867,7 +868,7 @@ func TestPluginLoader_InternalOverrideExternal(t *testing.T) { } func TestPluginLoader_ExternalOverrideInternal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -924,7 +925,7 @@ func TestPluginLoader_ExternalOverrideInternal(t *testing.T) { } func TestPluginLoader_Dispense_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -971,7 +972,7 @@ func TestPluginLoader_Dispense_External(t *testing.T) { } func TestPluginLoader_Dispense_Internal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -1030,7 +1031,7 @@ func TestPluginLoader_Dispense_Internal(t *testing.T) { } func TestPluginLoader_Dispense_NoConfigSchema_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -1078,7 +1079,7 @@ func TestPluginLoader_Dispense_NoConfigSchema_External(t *testing.T) { } func TestPluginLoader_Dispense_NoConfigSchema_Internal(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create two plugins @@ -1129,7 +1130,7 @@ func TestPluginLoader_Dispense_NoConfigSchema_Internal(t *testing.T) { } func TestPluginLoader_Reattach_External(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a plugin @@ -1193,7 +1194,7 @@ func TestPluginLoader_Reattach_External(t *testing.T) { // Test the loader trying to launch a non-plugin binary func TestPluginLoader_Bad_Executable(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a plugin @@ -1222,10 +1223,10 @@ func TestPluginLoader_Bad_Executable(t *testing.T) { // Test that we skip directories, non-executables and follow symlinks func TestPluginLoader_External_SkipBadFiles(t *testing.T) { + ci.Parallel(t) if runtime.GOOS == "windows" { t.Skip("Windows currently does not skip non exe files") } - t.Parallel() require := require.New(t) // Create two plugins @@ -1285,6 +1286,8 @@ func TestPluginLoader_External_SkipBadFiles(t *testing.T) { } func TestPluginLoader_ConvertVersions(t *testing.T) { + ci.Parallel(t) + v010 := version.Must(version.NewVersion("v0.1.0")) v020 := version.Must(version.NewVersion("v0.2.0")) v021 := version.Must(version.NewVersion("v0.2.1")) diff --git a/helper/pluginutils/singleton/singleton_test.go b/helper/pluginutils/singleton/singleton_test.go index cfe067844..6252ec0e0 100644 --- a/helper/pluginutils/singleton/singleton_test.go +++ b/helper/pluginutils/singleton/singleton_test.go @@ -8,6 +8,7 @@ import ( log "github.com/hashicorp/go-hclog" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pluginutils/loader" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/plugins/base" @@ -22,7 +23,7 @@ func harness(t *testing.T) (*SingletonLoader, *loader.MockCatalog) { // Test that multiple dispenses return the same instance func TestSingleton_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled := 0 @@ -71,7 +72,7 @@ func TestSingleton_Dispense(t *testing.T) { // Test that after a plugin is dispensed, if it exits, an error is returned on // the next dispense func TestSingleton_Dispense_Exit_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) exited := false @@ -121,7 +122,7 @@ func TestSingleton_Dispense_Exit_Dispense(t *testing.T) { // Test that if a plugin errors while being dispensed, the error is returned but // not saved func TestSingleton_DispenseError_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled := 0 @@ -164,7 +165,7 @@ func TestSingleton_DispenseError_Dispense(t *testing.T) { // Test that if a plugin errors while being reattached, the error is returned but // not saved func TestSingleton_ReattachError_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled, reattachCalled := 0, 0 @@ -204,7 +205,7 @@ func TestSingleton_ReattachError_Dispense(t *testing.T) { // Test that after reattaching, dispense returns the same instance func TestSingleton_Reattach_Dispense(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dispenseCalled, reattachCalled := 0, 0 diff --git a/helper/raftutil/msgpack_test.go b/helper/raftutil/msgpack_test.go index f75488292..111e08146 100644 --- a/helper/raftutil/msgpack_test.go +++ b/helper/raftutil/msgpack_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -28,6 +29,8 @@ func TestMaybeDecodeTimeIgnoresASCII(t *testing.T) { } func TestDecodesTime(t *testing.T) { + ci.Parallel(t) + type Value struct { CreateTime time.Time Mode string @@ -59,6 +62,8 @@ func TestDecodesTime(t *testing.T) { } func TestMyDate(t *testing.T) { + ci.Parallel(t) + handler := &codec.MsgpackHandle{} handler.TimeNotBuiltin = true diff --git a/helper/raftutil/state_test.go b/helper/raftutil/state_test.go index 3d653fbc9..d4384c8ab 100644 --- a/helper/raftutil/state_test.go +++ b/helper/raftutil/state_test.go @@ -4,6 +4,7 @@ import ( "path/filepath" "testing" + "github.com/hashicorp/nomad/ci" raftboltdb "github.com/hashicorp/raft-boltdb/v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,7 +15,7 @@ import ( // logs" fail with a helpful error message when called on an inuse // database. func TestRaftStateInfo_InUse(t *testing.T) { - t.Parallel() // since there's a 1s timeout. + ci.Parallel(t) // since there's a 1s timeout. // First create an empty raft db dir := filepath.Join(t.TempDir(), "raft.db") diff --git a/helper/tlsutil/config_test.go b/helper/tlsutil/config_test.go index f1837db7b..4a5afdb28 100644 --- a/helper/tlsutil/config_test.go +++ b/helper/tlsutil/config_test.go @@ -11,6 +11,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs/config" "github.com/hashicorp/yamux" "github.com/stretchr/testify/assert" @@ -27,6 +28,8 @@ const ( ) func TestConfig_AppendCA_None(t *testing.T) { + ci.Parallel(t) + require := require.New(t) conf := &Config{} @@ -37,6 +40,8 @@ func TestConfig_AppendCA_None(t *testing.T) { } func TestConfig_AppendCA_Valid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) conf := &Config{ @@ -49,6 +54,8 @@ func TestConfig_AppendCA_Valid(t *testing.T) { } func TestConfig_AppendCA_Valid_MultipleCerts(t *testing.T) { + ci.Parallel(t) + require := require.New(t) certs := ` @@ -102,6 +109,8 @@ TttDu+g2VdbcBwVDZ49X2Md6OY2N3G8Irdlj+n+mCQJaHwVt52DRzz0= // TestConfig_AppendCA_Valid_Whitespace asserts that a PEM file containing // trailing whitespace is valid. func TestConfig_AppendCA_Valid_Whitespace(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const cacertWhitespace = "./testdata/ca-whitespace.pem" @@ -117,6 +126,8 @@ func TestConfig_AppendCA_Valid_Whitespace(t *testing.T) { // TestConfig_AppendCA_Invalid_MultipleCerts_Whitespace asserts that a PEM file // containing non-PEM data between certificate blocks is still valid. func TestConfig_AppendCA_Valid_MultipleCerts_ExtraData(t *testing.T) { + ci.Parallel(t) + require := require.New(t) certs := ` @@ -176,6 +187,8 @@ TttDu+g2VdbcBwVDZ49X2Md6OY2N3G8Irdlj+n+mCQJaHwVt52DRzz0= // TestConfig_AppendCA_Invalid_MultipleCerts asserts only the valid certificate // is returned. func TestConfig_AppendCA_Invalid_MultipleCerts(t *testing.T) { + ci.Parallel(t) + require := require.New(t) certs := ` @@ -214,6 +227,8 @@ Invalid } func TestConfig_AppendCA_Invalid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { conf := &Config{ @@ -245,6 +260,8 @@ func TestConfig_AppendCA_Invalid(t *testing.T) { } func TestConfig_CACertificate_Valid(t *testing.T) { + ci.Parallel(t) + conf := &Config{ CAFile: cacert, } @@ -259,6 +276,8 @@ func TestConfig_CACertificate_Valid(t *testing.T) { } func TestConfig_LoadKeyPair_None(t *testing.T) { + ci.Parallel(t) + conf := &Config{ KeyLoader: &config.KeyLoader{}, } @@ -272,6 +291,8 @@ func TestConfig_LoadKeyPair_None(t *testing.T) { } func TestConfig_LoadKeyPair_Valid(t *testing.T) { + ci.Parallel(t) + conf := &Config{ CertFile: foocert, KeyFile: fookey, @@ -287,6 +308,8 @@ func TestConfig_LoadKeyPair_Valid(t *testing.T) { } func TestConfig_OutgoingTLS_MissingCA(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyOutgoing: true, } @@ -300,6 +323,8 @@ func TestConfig_OutgoingTLS_MissingCA(t *testing.T) { } func TestConfig_OutgoingTLS_OnlyCA(t *testing.T) { + ci.Parallel(t) + conf := &Config{ CAFile: cacert, } @@ -313,6 +338,8 @@ func TestConfig_OutgoingTLS_OnlyCA(t *testing.T) { } func TestConfig_OutgoingTLS_VerifyOutgoing(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyOutgoing: true, CAFile: cacert, @@ -333,6 +360,8 @@ func TestConfig_OutgoingTLS_VerifyOutgoing(t *testing.T) { } func TestConfig_OutgoingTLS_VerifyHostname(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyServerHostname: true, CAFile: cacert, @@ -353,6 +382,8 @@ func TestConfig_OutgoingTLS_VerifyHostname(t *testing.T) { } func TestConfig_OutgoingTLS_WithKeyPair(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) conf := &Config{ @@ -375,6 +406,8 @@ func TestConfig_OutgoingTLS_WithKeyPair(t *testing.T) { } func TestConfig_OutgoingTLS_PreferServerCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -399,6 +432,8 @@ func TestConfig_OutgoingTLS_PreferServerCipherSuites(t *testing.T) { } func TestConfig_OutgoingTLS_TLSCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -466,6 +501,8 @@ func startTLSServer(config *Config) (net.Conn, chan error) { // TODO sign the certificates for "server.regionFoo.nomad func TestConfig_outgoingWrapper_OK(t *testing.T) { + ci.Parallel(t) + config := &Config{ CAFile: cacert, CertFile: foocert, @@ -501,6 +538,7 @@ func TestConfig_outgoingWrapper_OK(t *testing.T) { } func TestConfig_outgoingWrapper_BadCert(t *testing.T) { + ci.Parallel(t) // TODO this test is currently hanging, need to investigate more. t.SkipNow() config := &Config{ @@ -536,6 +574,8 @@ func TestConfig_outgoingWrapper_BadCert(t *testing.T) { } func TestConfig_wrapTLS_OK(t *testing.T) { + ci.Parallel(t) + config := &Config{ CAFile: cacert, CertFile: foocert, @@ -567,6 +607,8 @@ func TestConfig_wrapTLS_OK(t *testing.T) { } func TestConfig_wrapTLS_BadCert(t *testing.T) { + ci.Parallel(t) + serverConfig := &Config{ CAFile: cacert, CertFile: badcert, @@ -604,6 +646,8 @@ func TestConfig_wrapTLS_BadCert(t *testing.T) { } func TestConfig_IncomingTLS(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) conf := &Config{ @@ -634,6 +678,8 @@ func TestConfig_IncomingTLS(t *testing.T) { } func TestConfig_IncomingTLS_MissingCA(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyIncoming: true, CertFile: foocert, @@ -647,6 +693,8 @@ func TestConfig_IncomingTLS_MissingCA(t *testing.T) { } func TestConfig_IncomingTLS_MissingKey(t *testing.T) { + ci.Parallel(t) + conf := &Config{ VerifyIncoming: true, CAFile: cacert, @@ -658,6 +706,8 @@ func TestConfig_IncomingTLS_MissingKey(t *testing.T) { } func TestConfig_IncomingTLS_NoVerify(t *testing.T) { + ci.Parallel(t) + conf := &Config{} tlsC, err := conf.IncomingTLSConfig() if err != nil { @@ -678,6 +728,8 @@ func TestConfig_IncomingTLS_NoVerify(t *testing.T) { } func TestConfig_IncomingTLS_PreferServerCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -697,6 +749,8 @@ func TestConfig_IncomingTLS_PreferServerCipherSuites(t *testing.T) { } func TestConfig_IncomingTLS_TLSCipherSuites(t *testing.T) { + ci.Parallel(t) + require := require.New(t) { @@ -725,6 +779,8 @@ func TestConfig_IncomingTLS_TLSCipherSuites(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_Valid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) tlsConfig := &config.TLSConfig{ @@ -780,6 +836,8 @@ func TestConfig_ParseCiphers_Valid(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_Default(t *testing.T) { + ci.Parallel(t) + require := require.New(t) expectedCiphers := []uint16{ @@ -808,6 +866,8 @@ func TestConfig_ParseCiphers_Default(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_Invalid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) invalidCiphers := []string{ @@ -832,6 +892,8 @@ func TestConfig_ParseCiphers_Invalid(t *testing.T) { // This test relies on the fact that the specified certificate has an ECDSA // signature algorithm func TestConfig_ParseCiphers_SupportedSignature(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Supported signature @@ -862,6 +924,8 @@ func TestConfig_ParseCiphers_SupportedSignature(t *testing.T) { } func TestConfig_ParseMinVersion_Valid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) validVersions := []string{"tls10", @@ -883,6 +947,8 @@ func TestConfig_ParseMinVersion_Valid(t *testing.T) { } func TestConfig_ParseMinVersion_Invalid(t *testing.T) { + ci.Parallel(t) + require := require.New(t) invalidVersions := []string{"tls13", @@ -898,6 +964,8 @@ func TestConfig_ParseMinVersion_Invalid(t *testing.T) { } func TestConfig_NewTLSConfiguration(t *testing.T) { + ci.Parallel(t) + require := require.New(t) conf := &config.TLSConfig{ @@ -920,6 +988,8 @@ func TestConfig_NewTLSConfiguration(t *testing.T) { } func TestConfig_ShouldReloadRPCConnections(t *testing.T) { + ci.Parallel(t) + require := require.New(t) type shouldReloadTestInput struct { diff --git a/helper/tlsutil/generate_test.go b/helper/tlsutil/generate_test.go index 5be9f7e2b..8df3fb270 100644 --- a/helper/tlsutil/generate_test.go +++ b/helper/tlsutil/generate_test.go @@ -10,11 +10,11 @@ import ( "encoding/pem" "io" "net" + "strings" "testing" "time" - "strings" - + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -34,7 +34,7 @@ func TestSerialNumber(t *testing.T) { } func TestGeneratePrivateKey(t *testing.T) { - t.Parallel() + ci.Parallel(t) _, p, err := GeneratePrivateKey() require.Nil(t, err) require.NotEmpty(t, p) @@ -62,6 +62,8 @@ func (s *TestSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) } func TestGenerateCA(t *testing.T) { + ci.Parallel(t) + t.Run("no signer", func(t *testing.T) { ca, pk, err := GenerateCA(CAOpts{Signer: &TestSigner{}}) require.Error(t, err) @@ -114,7 +116,8 @@ func TestGenerateCA(t *testing.T) { } func TestGenerateCert(t *testing.T) { - t.Parallel() + ci.Parallel(t) + signer, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.Nil(t, err) ca, _, err := GenerateCA(CAOpts{Signer: signer}) diff --git a/internal/testing/apitests/jobs_test.go b/internal/testing/apitests/jobs_test.go index fd7001930..7c373ae4c 100644 --- a/internal/testing/apitests/jobs_test.go +++ b/internal/testing/apitests/jobs_test.go @@ -4,12 +4,13 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/assert" ) func TestJobs_Parse(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -46,7 +47,7 @@ func TestJobs_Parse(t *testing.T) { } func TestJobs_Summary_WithACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) c, s, root := makeACLClient(t, nil, nil) diff --git a/internal/testing/apitests/nodes_test.go b/internal/testing/apitests/nodes_test.go index 2bc67b0c9..becba2f2d 100644 --- a/internal/testing/apitests/nodes_test.go +++ b/internal/testing/apitests/nodes_test.go @@ -3,13 +3,14 @@ package apitests import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestNodes_GC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -21,7 +22,7 @@ func TestNodes_GC(t *testing.T) { } func TestNodes_GcAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/internal/testing/apitests/operator_autopilot_test.go b/internal/testing/apitests/operator_autopilot_test.go index 8c22bf644..84945ec55 100644 --- a/internal/testing/apitests/operator_autopilot_test.go +++ b/internal/testing/apitests/operator_autopilot_test.go @@ -1,18 +1,18 @@ package apitests import ( - "testing" - "fmt" + "testing" "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -37,7 +37,7 @@ func TestAPI_OperatorAutopilotGetSetConfiguration(t *testing.T) { } func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -75,7 +75,7 @@ func TestAPI_OperatorAutopilotCASConfiguration(t *testing.T) { } func TestAPI_OperatorAutopilotServerHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.Server.RaftProtocol = 3 }) diff --git a/internal/testing/apitests/operator_test.go b/internal/testing/apitests/operator_test.go index bdf7477ad..fe0f1f173 100644 --- a/internal/testing/apitests/operator_test.go +++ b/internal/testing/apitests/operator_test.go @@ -5,11 +5,12 @@ import ( "github.com/hashicorp/consul/sdk/testutil/retry" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -47,7 +48,7 @@ func TestAPI_OperatorSchedulerGetSetConfiguration(t *testing.T) { } func TestAPI_OperatorSchedulerCASConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/internal/testing/apitests/streamingsync_test.go b/internal/testing/apitests/streamingsync_test.go index e17853331..edf110594 100644 --- a/internal/testing/apitests/streamingsync_test.go +++ b/internal/testing/apitests/streamingsync_test.go @@ -5,12 +5,15 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" ) // TestExecStreamingInputIsInSync asserts that a rountrip of exec streaming input doesn't lose any data func TestExecStreamingInputIsInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input api.ExecStreamingInput @@ -52,6 +55,8 @@ func TestExecStreamingInputIsInSync(t *testing.T) { // TestExecStreamingOutputIsInSync asserts that a rountrip of exec streaming input doesn't lose any data func TestExecStreamingOutputIsInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input api.ExecStreamingOutput diff --git a/internal/testing/apitests/structsync_test.go b/internal/testing/apitests/structsync_test.go index 6a3858c41..a40c2c718 100644 --- a/internal/testing/apitests/structsync_test.go +++ b/internal/testing/apitests/structsync_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) @@ -16,6 +17,8 @@ import ( // such dependency without affecting api clients. func TestDefaultResourcesAreInSync(t *testing.T) { + ci.Parallel(t) + apiR := api.DefaultResources() structsR := structs.DefaultResources() @@ -28,6 +31,8 @@ func TestDefaultResourcesAreInSync(t *testing.T) { } func TestMinResourcesAreInSync(t *testing.T) { + ci.Parallel(t) + apiR := api.MinResources() structsR := structs.MinResources() @@ -40,6 +45,8 @@ func TestMinResourcesAreInSync(t *testing.T) { } func TestNewDefaultRescheulePolicyInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { typ string expected structs.ReschedulePolicy @@ -62,6 +69,8 @@ func TestNewDefaultRescheulePolicyInSync(t *testing.T) { } func TestNewDefaultRestartPolicyInSync(t *testing.T) { + ci.Parallel(t) + cases := []struct { typ string expected structs.RestartPolicy diff --git a/internal/testing/apitests/tasks_test.go b/internal/testing/apitests/tasks_test.go index 7c9a45d80..b4cbe8cf9 100644 --- a/internal/testing/apitests/tasks_test.go +++ b/internal/testing/apitests/tasks_test.go @@ -5,12 +5,15 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" ) // Verifies that reschedule policy is merged correctly func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string jobReschedulePolicy *api.ReschedulePolicy diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index aa219c117..389d01245 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -8,6 +8,7 @@ import ( capi "github.com/hashicorp/consul/api" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -24,6 +25,8 @@ const ( ) func TestParse(t *testing.T) { + ci.Parallel(t) + cases := []struct { File string Result *api.Job @@ -1784,6 +1787,8 @@ func TestParse(t *testing.T) { } func TestBadPorts(t *testing.T) { + ci.Parallel(t) + path, err := filepath.Abs(filepath.Join("./test-fixtures", "bad-ports.hcl")) if err != nil { t.Fatalf("Can't get absolute path for file: %s", err) @@ -1797,6 +1802,8 @@ func TestBadPorts(t *testing.T) { } func TestOverlappingPorts(t *testing.T) { + ci.Parallel(t) + path, err := filepath.Abs(filepath.Join("./test-fixtures", "overlapping-ports.hcl")) if err != nil { t.Fatalf("Can't get absolute path for file: %s", err) @@ -1814,6 +1821,8 @@ func TestOverlappingPorts(t *testing.T) { } func TestIncorrectKey(t *testing.T) { + ci.Parallel(t) + path, err := filepath.Abs(filepath.Join("./test-fixtures", "basic_wrong_key.hcl")) if err != nil { t.Fatalf("Can't get absolute path for file: %s", err) diff --git a/jobspec/utils_test.go b/jobspec/utils_test.go index c571b7cba..810f6d0cc 100644 --- a/jobspec/utils_test.go +++ b/jobspec/utils_test.go @@ -3,13 +3,14 @@ package jobspec import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) // TestFlattenMapSlice asserts flattenMapSlice recursively flattens a slice of maps into a // single map. func TestFlattenMapSlice(t *testing.T) { - t.Parallel() + ci.Parallel(t) input := map[string]interface{}{ "foo": 123, diff --git a/jobspec2/parse_test.go b/jobspec2/parse_test.go index 2cb0496a9..b3e7e9963 100644 --- a/jobspec2/parse_test.go +++ b/jobspec2/parse_test.go @@ -8,11 +8,14 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/jobspec" "github.com/stretchr/testify/require" ) func TestEquivalentToHCL1(t *testing.T) { + ci.Parallel(t) + hclSpecDir := "../jobspec/test-fixtures/" fis, err := ioutil.ReadDir(hclSpecDir) require.NoError(t, err) @@ -41,6 +44,8 @@ func TestEquivalentToHCL1(t *testing.T) { } func TestEquivalentToHCL1_ComplexConfig(t *testing.T) { + ci.Parallel(t) + name := "./test-fixtures/config-compatibility.hcl" f, err := os.Open(name) require.NoError(t, err) @@ -58,6 +63,8 @@ func TestEquivalentToHCL1_ComplexConfig(t *testing.T) { } func TestParse_VarsAndFunctions(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default" @@ -82,6 +89,8 @@ job "example" { } func TestParse_VariablesDefaultsAndSet(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default_region" @@ -179,6 +188,8 @@ job "example" { // TestParse_UnknownVariables asserts that unknown variables are left intact for further processing func TestParse_UnknownVariables(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default" @@ -212,6 +223,8 @@ job "example" { // TestParse_UnsetVariables asserts that variables that have neither types nor // values return early instead of panicking. func TestParse_UnsetVariables(t *testing.T) { + ci.Parallel(t) + hcl := ` variable "region_var" {} job "example" { @@ -232,6 +245,8 @@ job "example" { } func TestParse_Locals(t *testing.T) { + ci.Parallel(t) + hcl := ` variables { region_var = "default_region" @@ -279,6 +294,8 @@ job "example" { } func TestParse_FileOperators(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { region = file("parse_test.go") @@ -314,6 +331,8 @@ job "example" { } func TestParseDynamic(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { @@ -375,6 +394,8 @@ job "example" { } func TestParse_InvalidHCL(t *testing.T) { + ci.Parallel(t) + t.Run("invalid body", func(t *testing.T) { hcl := `invalid{hcl` @@ -418,6 +439,8 @@ job "example" { } func TestParse_InvalidScalingSyntax(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string expectedErr string @@ -582,6 +605,8 @@ job "example" { } func TestParseJob_JobWithFunctionsAndLookups(t *testing.T) { + ci.Parallel(t) + hcl := ` variable "env" { description = "target environment for the job" @@ -711,6 +736,8 @@ job "job-webserver" { } func TestParse_TaskEnvs(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string envSnippet string @@ -784,6 +811,8 @@ job "example" { } func TestParse_TaskEnvs_Multiple(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { group "group" { @@ -808,6 +837,8 @@ job "example" { } func Test_TaskEnvs_Invalid(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string envSnippet string @@ -856,6 +887,8 @@ job "example" { } func TestParse_Meta_Alternatives(t *testing.T) { + ci.Parallel(t) + hcl := ` job "example" { group "group" { task "task" { @@ -904,6 +937,7 @@ func TestParse_Meta_Alternatives(t *testing.T) { // TestParse_UndefinedVariables asserts that values with undefined variables are left // intact in the job representation func TestParse_UndefinedVariables(t *testing.T) { + ci.Parallel(t) cases := []string{ "plain", @@ -947,6 +981,8 @@ func TestParse_UndefinedVariables(t *testing.T) { } func TestParseServiceCheck(t *testing.T) { + ci.Parallel(t) + hcl := ` job "group_service_check_script" { group "group" { service { @@ -996,6 +1032,8 @@ func TestParseServiceCheck(t *testing.T) { } func TestWaitConfig(t *testing.T) { + ci.Parallel(t) + hclBytes, err := os.ReadFile("test-fixtures/template-wait-config.hcl") require.NoError(t, err) diff --git a/lib/circbufwriter/writer_test.go b/lib/circbufwriter/writer_test.go index e4075b496..d48a03ddf 100644 --- a/lib/circbufwriter/writer_test.go +++ b/lib/circbufwriter/writer_test.go @@ -6,11 +6,14 @@ import ( "io/ioutil" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) func TestWriter_NonBlockingWrite(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var buf bytes.Buffer w := New(&buf, 64) @@ -40,6 +43,8 @@ func (b *blockingWriter) Write(p []byte) (nn int, err error) { } func TestWriter_BlockingWrite(t *testing.T) { + ci.Parallel(t) + require := require.New(t) blockCh := make(chan struct{}) bw := &blockingWriter{unblock: blockCh} @@ -64,6 +69,8 @@ func TestWriter_BlockingWrite(t *testing.T) { } func TestWriter_CloseClose(t *testing.T) { + ci.Parallel(t) + require := require.New(t) w := New(ioutil.Discard, 64) require.NoError(w.Close()) diff --git a/lib/cpuset/cpuset_test.go b/lib/cpuset/cpuset_test.go index ea7e72108..178517680 100644 --- a/lib/cpuset/cpuset_test.go +++ b/lib/cpuset/cpuset_test.go @@ -3,16 +3,21 @@ package cpuset import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestCPUSet_Size(t *testing.T) { + ci.Parallel(t) + set := New(0, 1, 2, 3) require.Equal(t, 4, set.Size()) require.Equal(t, 0, New().Size()) } func TestCPUSet_ToSlice(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string in CPUSet @@ -41,6 +46,8 @@ func TestCPUSet_ToSlice(t *testing.T) { } func TestCPUSet_Equals(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -64,6 +71,8 @@ func TestCPUSet_Equals(t *testing.T) { } func TestCPUSet_Union(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -87,6 +96,8 @@ func TestCPUSet_Union(t *testing.T) { } func TestCPUSet_Difference(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -109,6 +120,8 @@ func TestCPUSet_Difference(t *testing.T) { } func TestCPUSet_IsSubsetOf(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -128,6 +141,8 @@ func TestCPUSet_IsSubsetOf(t *testing.T) { } func TestCPUSet_IsSupersetOf(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -147,6 +162,8 @@ func TestCPUSet_IsSupersetOf(t *testing.T) { } func TestCPUSet_ContainsAny(t *testing.T) { + ci.Parallel(t) + cases := []struct { a CPUSet b CPUSet @@ -166,6 +183,8 @@ func TestCPUSet_ContainsAny(t *testing.T) { } func TestParse(t *testing.T) { + ci.Parallel(t) + cases := []struct { cpuset string expected CPUSet @@ -187,6 +206,8 @@ func TestParse(t *testing.T) { } func TestCPUSet_String(t *testing.T) { + ci.Parallel(t) + cases := []struct { cpuset CPUSet expected string diff --git a/lib/delayheap/delay_heap_test.go b/lib/delayheap/delay_heap_test.go index 3b44acc19..28d524591 100644 --- a/lib/delayheap/delay_heap_test.go +++ b/lib/delayheap/delay_heap_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -27,6 +28,8 @@ func (d *heapNodeImpl) Namespace() string { } func TestDelayHeap_PushPop(t *testing.T) { + ci.Parallel(t) + delayHeap := NewDelayHeap() now := time.Now() require := require.New(t) @@ -72,6 +75,8 @@ func TestDelayHeap_PushPop(t *testing.T) { } func TestDelayHeap_Update(t *testing.T) { + ci.Parallel(t) + delayHeap := NewDelayHeap() now := time.Now() require := require.New(t) diff --git a/lib/kheap/score_heap_test.go b/lib/kheap/score_heap_test.go index 932c08ccf..ca500bc86 100644 --- a/lib/kheap/score_heap_test.go +++ b/lib/kheap/score_heap_test.go @@ -4,6 +4,7 @@ import ( "container/heap" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -21,6 +22,8 @@ func (h *heapItem) Score() float64 { } func TestScoreHeap(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string items map[string]float64 diff --git a/nomad/acl_endpoint_test.go b/nomad/acl_endpoint_test.go index 6e934b004..4485aa4c4 100644 --- a/nomad/acl_endpoint_test.go +++ b/nomad/acl_endpoint_test.go @@ -10,6 +10,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -19,7 +20,7 @@ import ( ) func TestACLEndpoint_GetPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -105,7 +106,7 @@ func TestACLEndpoint_GetPolicy(t *testing.T) { } func TestACLEndpoint_GetPolicy_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -185,7 +186,7 @@ func TestACLEndpoint_GetPolicy_Blocking(t *testing.T) { } func TestACLEndpoint_GetPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -225,7 +226,7 @@ func TestACLEndpoint_GetPolicies(t *testing.T) { } func TestACLEndpoint_GetPolicies_TokenSubset(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -266,7 +267,7 @@ func TestACLEndpoint_GetPolicies_TokenSubset(t *testing.T) { } func TestACLEndpoint_GetPolicies_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -346,8 +347,8 @@ func TestACLEndpoint_GetPolicies_Blocking(t *testing.T) { } func TestACLEndpoint_ListPolicies(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -417,7 +418,7 @@ func TestACLEndpoint_ListPolicies(t *testing.T) { // unauthenticated ListPolicies returns anonymous policy if one // exists, otherwise, empty func TestACLEndpoint_ListPolicies_Unauthenticated(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -466,7 +467,7 @@ func TestACLEndpoint_ListPolicies_Unauthenticated(t *testing.T) { } func TestACLEndpoint_ListPolicies_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -527,7 +528,7 @@ func TestACLEndpoint_ListPolicies_Blocking(t *testing.T) { } func TestACLEndpoint_DeletePolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -554,7 +555,7 @@ func TestACLEndpoint_DeletePolicies(t *testing.T) { } func TestACLEndpoint_UpsertPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -585,7 +586,7 @@ func TestACLEndpoint_UpsertPolicies(t *testing.T) { } func TestACLEndpoint_UpsertPolicies_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -613,7 +614,7 @@ func TestACLEndpoint_UpsertPolicies_Invalid(t *testing.T) { } func TestACLEndpoint_GetToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -659,7 +660,7 @@ func TestACLEndpoint_GetToken(t *testing.T) { } func TestACLEndpoint_GetToken_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -739,7 +740,7 @@ func TestACLEndpoint_GetToken_Blocking(t *testing.T) { } func TestACLEndpoint_GetTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -778,7 +779,7 @@ func TestACLEndpoint_GetTokens(t *testing.T) { } func TestACLEndpoint_GetTokens_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -858,7 +859,7 @@ func TestACLEndpoint_GetTokens_Blocking(t *testing.T) { } func TestACLEndpoint_ListTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -920,7 +921,7 @@ func TestACLEndpoint_ListTokens(t *testing.T) { } func TestACLEndpoint_ListTokens_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.ACLEnabled = true }) @@ -1108,7 +1109,7 @@ func TestACLEndpoint_ListTokens_PaginationFiltering(t *testing.T) { } func TestACLEndpoint_ListTokens_Order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.ACLEnabled = true @@ -1200,7 +1201,7 @@ func TestACLEndpoint_ListTokens_Order(t *testing.T) { } func TestACLEndpoint_ListTokens_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1261,7 +1262,7 @@ func TestACLEndpoint_ListTokens_Blocking(t *testing.T) { } func TestACLEndpoint_DeleteTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1288,7 +1289,7 @@ func TestACLEndpoint_DeleteTokens(t *testing.T) { } func TestACLEndpoint_DeleteTokens_WithNonexistentToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -1315,7 +1316,7 @@ func TestACLEndpoint_DeleteTokens_WithNonexistentToken(t *testing.T) { } func TestACLEndpoint_Bootstrap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.ACLEnabled = true }) @@ -1350,7 +1351,7 @@ func TestACLEndpoint_Bootstrap(t *testing.T) { } func TestACLEndpoint_Bootstrap_Reset(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -1412,7 +1413,7 @@ func TestACLEndpoint_Bootstrap_Reset(t *testing.T) { } func TestACLEndpoint_UpsertTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1469,7 +1470,7 @@ func TestACLEndpoint_UpsertTokens(t *testing.T) { } func TestACLEndpoint_UpsertTokens_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1497,7 +1498,7 @@ func TestACLEndpoint_UpsertTokens_Invalid(t *testing.T) { } func TestACLEndpoint_ResolveToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1529,7 +1530,7 @@ func TestACLEndpoint_ResolveToken(t *testing.T) { } func TestACLEndpoint_OneTimeToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/acl_test.go b/nomad/acl_test.go index 43337295b..867150639 100644 --- a/nomad/acl_test.go +++ b/nomad/acl_test.go @@ -5,6 +5,7 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -14,7 +15,7 @@ import ( ) func TestResolveACLToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create mock state store and cache state := state.TestStateStore(t) @@ -93,7 +94,7 @@ func TestResolveACLToken(t *testing.T) { } func TestResolveACLToken_LeaderToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -109,7 +110,7 @@ func TestResolveACLToken_LeaderToken(t *testing.T) { } func TestResolveSecretToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 9efdc06bb..259a6311d 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -6,19 +6,19 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAllocEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -92,7 +92,7 @@ func TestAllocEndpoint_List(t *testing.T) { } func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -313,7 +313,7 @@ func TestAllocEndpoint_List_PaginationFiltering(t *testing.T) { } func TestAllocEndpoint_List_order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -401,7 +401,7 @@ func TestAllocEndpoint_List_order(t *testing.T) { } func TestAllocEndpoint_List_Fields(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -502,7 +502,7 @@ func TestAllocEndpoint_List_Fields(t *testing.T) { } func TestAllocEndpoint_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -559,7 +559,7 @@ func TestAllocEndpoint_List_ACL(t *testing.T) { } func TestAllocEndpoint_List_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -637,7 +637,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) { // TestAllocEndpoint_List_AllNamespaces_OSS asserts that server // returns all allocations across namespaces. func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -718,7 +718,7 @@ func TestAllocEndpoint_List_AllNamespaces_OSS(t *testing.T) { } func TestAllocEndpoint_GetAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -759,7 +759,7 @@ func TestAllocEndpoint_GetAlloc(t *testing.T) { } func TestAllocEndpoint_GetAlloc_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -870,7 +870,7 @@ func TestAllocEndpoint_GetAlloc_ACL(t *testing.T) { } func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -926,7 +926,7 @@ func TestAllocEndpoint_GetAlloc_Blocking(t *testing.T) { } func TestAllocEndpoint_GetAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -974,7 +974,7 @@ func TestAllocEndpoint_GetAllocs(t *testing.T) { } func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1030,7 +1030,7 @@ func TestAllocEndpoint_GetAllocs_Blocking(t *testing.T) { } func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, _, cleanupS1 := TestACLServer(t, nil) @@ -1114,7 +1114,7 @@ func TestAllocEndpoint_UpdateDesiredTransition(t *testing.T) { } func TestAllocEndpoint_Stop_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, _, cleanupS1 := TestACLServer(t, nil) @@ -1176,7 +1176,7 @@ func TestAllocEndpoint_Stop_ACL(t *testing.T) { } func TestAllocEndpoint_List_AllNamespaces_ACL_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/autopilot_test.go b/nomad/autopilot_test.go index 6e8dae543..8be118a58 100644 --- a/nomad/autopilot_test.go +++ b/nomad/autopilot_test.go @@ -1,13 +1,13 @@ package nomad import ( + "fmt" "testing" "time" - "fmt" - "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/sdk/testutil/retry" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" @@ -67,7 +67,7 @@ func wantRaft(servers []*Server) error { } func TestAutopilot_CleanupDeadServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("raft_v2", func(t *testing.T) { testCleanupDeadServer(t, 2) }) t.Run("raft_v3", func(t *testing.T) { testCleanupDeadServer(t, 3) }) } @@ -143,7 +143,7 @@ func testCleanupDeadServer(t *testing.T, raftVersion int) { } func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.BootstrapExpect = 5 @@ -193,7 +193,7 @@ func TestAutopilot_CleanupDeadServerPeriodic(t *testing.T) { } func TestAutopilot_RollingUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.BootstrapExpect = 3 @@ -270,7 +270,7 @@ func TestAutopilot_RollingUpdate(t *testing.T) { func TestAutopilot_CleanupStaleRaftServer(t *testing.T) { t.Skip("TestAutopilot_CleanupDeadServer is very flaky, removing it for now") - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.BootstrapExpect = 3 @@ -319,7 +319,7 @@ func TestAutopilot_CleanupStaleRaftServer(t *testing.T) { } func TestAutopilot_PromoteNonVoter(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 diff --git a/nomad/blocked_evals_stats_test.go b/nomad/blocked_evals_stats_test.go index 235e451c1..7e6fcd525 100644 --- a/nomad/blocked_evals_stats_test.go +++ b/nomad/blocked_evals_stats_test.go @@ -8,6 +8,7 @@ import ( "testing/quick" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -98,7 +99,7 @@ func clearTimestampFromBlockedResourceStats(b *BlockedResourcesStats) { // TestBlockedEvalsStats_BlockedResources generates random evals and processes // them using the expected code paths and a manual check of the expeceted result. func TestBlockedEvalsStats_BlockedResources(t *testing.T) { - t.Parallel() + ci.Parallel(t) blocked, _ := testBlockedEvals(t) // evalHistory stores all evals generated during the test. diff --git a/nomad/blocked_evals_test.go b/nomad/blocked_evals_test.go index 244308fba..cb96a6f89 100644 --- a/nomad/blocked_evals_test.go +++ b/nomad/blocked_evals_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -21,7 +22,7 @@ func testBlockedEvals(t *testing.T) (*BlockedEvals, *EvalBroker) { } func TestBlockedEvals_Block_Disabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -40,7 +41,7 @@ func TestBlockedEvals_Block_Disabled(t *testing.T) { } func TestBlockedEvals_Block_SameJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -60,7 +61,7 @@ func TestBlockedEvals_Block_SameJob(t *testing.T) { } func TestBlockedEvals_Block_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -78,7 +79,7 @@ func TestBlockedEvals_Block_Quota(t *testing.T) { } func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -101,7 +102,7 @@ func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) { } func TestBlockedEvals_GetDuplicates(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -163,7 +164,7 @@ func TestBlockedEvals_GetDuplicates(t *testing.T) { } func TestBlockedEvals_UnblockEscaped(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -210,7 +211,7 @@ func requireBlockedEvalsEnqueued(t *testing.T, blocked *BlockedEvals, broker *Ev } func TestBlockedEvals_UnblockEligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -231,7 +232,7 @@ func TestBlockedEvals_UnblockEligible(t *testing.T) { } func TestBlockedEvals_UnblockIneligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -275,7 +276,7 @@ func TestBlockedEvals_UnblockIneligible(t *testing.T) { } func TestBlockedEvals_UnblockUnknown(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -298,7 +299,7 @@ func TestBlockedEvals_UnblockUnknown(t *testing.T) { } func TestBlockedEvals_UnblockEligible_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -319,7 +320,7 @@ func TestBlockedEvals_UnblockEligible_Quota(t *testing.T) { } func TestBlockedEvals_UnblockIneligible_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -363,7 +364,7 @@ func TestBlockedEvals_UnblockIneligible_Quota(t *testing.T) { } func TestBlockedEvals_Reblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -404,7 +405,7 @@ func TestBlockedEvals_Reblock(t *testing.T) { // Test the block case in which the eval should be immediately unblocked since // it is escaped and old func TestBlockedEvals_Block_ImmediateUnblock_Escaped(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -432,7 +433,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_Escaped(t *testing.T) { // there is an unblock on an unseen class that occurred while it was in the // scheduler func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_After(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -460,7 +461,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_After(t *testing.T) { // there is an unblock on an unseen class that occurred before it was in the // scheduler func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_Before(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -485,7 +486,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_UnseenClass_Before(t *testing.T) { // Test the block case in which the eval should be immediately unblocked since // it a class it is eligible for has been unblocked func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -512,7 +513,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) { // Test the block case in which the eval should be immediately unblocked since // it a quota has changed that it is using func TestBlockedEvals_Block_ImmediateUnblock_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -538,7 +539,7 @@ func TestBlockedEvals_Block_ImmediateUnblock_Quota(t *testing.T) { } func TestBlockedEvals_UnblockFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -584,7 +585,7 @@ func TestBlockedEvals_UnblockFailed(t *testing.T) { } func TestBlockedEvals_Untrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -612,7 +613,7 @@ func TestBlockedEvals_Untrack(t *testing.T) { } func TestBlockedEvals_Untrack_Quota(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -640,7 +641,7 @@ func TestBlockedEvals_Untrack_Quota(t *testing.T) { } func TestBlockedEvals_UnblockNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, broker := testBlockedEvals(t) @@ -670,7 +671,7 @@ func TestBlockedEvals_UnblockNode(t *testing.T) { } func TestBlockedEvals_SystemUntrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) @@ -699,7 +700,7 @@ func TestBlockedEvals_SystemUntrack(t *testing.T) { } func TestBlockedEvals_SystemDisableFlush(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) blocked, _ := testBlockedEvals(t) diff --git a/nomad/client_agent_endpoint_test.go b/nomad/client_agent_endpoint_test.go index e3c11ee83..2b7578600 100644 --- a/nomad/client_agent_endpoint_test.go +++ b/nomad/client_agent_endpoint_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" sframer "github.com/hashicorp/nomad/client/lib/streamframer" @@ -27,7 +28,7 @@ import ( ) func TestMonitor_Monitor_Remote_Client(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server and client @@ -124,7 +125,7 @@ OUTER: } func TestMonitor_Monitor_RemoteServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) foreignRegion := "foo" // start servers @@ -325,7 +326,7 @@ func TestMonitor_Monitor_RemoteServer(t *testing.T) { } func TestMonitor_MonitorServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -419,7 +420,7 @@ OUTER: } func TestMonitor_Monitor_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -524,7 +525,7 @@ func TestMonitor_Monitor_ACL(t *testing.T) { } func TestAgentProfile_RemoteClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server and client @@ -648,7 +649,7 @@ func TestAgentProfile_RemoteRegion(t *testing.T) { } func TestAgentProfile_Server(t *testing.T) { - + ci.Parallel(t) // start servers s1, cleanup := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -750,7 +751,7 @@ func TestAgentProfile_Server(t *testing.T) { } func TestAgentProfile_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -809,7 +810,7 @@ func TestAgentProfile_ACL(t *testing.T) { } func TestAgentHost_Server(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start servers s1, cleanup := TestServer(t, func(c *Config) { @@ -932,7 +933,7 @@ func TestAgentHost_Server(t *testing.T) { } func TestAgentHost_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server s, root, cleanupS := TestACLServer(t, nil) @@ -989,7 +990,7 @@ func TestAgentHost_ACL(t *testing.T) { } func TestAgentHost_ACLDebugRequired(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server s, cleanupS := TestServer(t, func(c *Config) { diff --git a/nomad/client_alloc_endpoint_test.go b/nomad/client_alloc_endpoint_test.go index 24916904e..68cbfe276 100644 --- a/nomad/client_alloc_endpoint_test.go +++ b/nomad/client_alloc_endpoint_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -25,7 +26,7 @@ import ( ) func TestClientAllocations_GarbageCollectAll_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -65,7 +66,7 @@ func TestClientAllocations_GarbageCollectAll_Local(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -125,7 +126,7 @@ func TestClientAllocations_GarbageCollectAll_Local_ACL(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_NoNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -148,7 +149,7 @@ func TestClientAllocations_GarbageCollectAll_NoNode(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and fake an old client @@ -181,7 +182,7 @@ func TestClientAllocations_GarbageCollectAll_OldNode(t *testing.T) { } func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -241,7 +242,7 @@ func TestClientAllocations_GarbageCollectAll_Remote(t *testing.T) { } func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and fake an old client @@ -281,7 +282,7 @@ func TestClientAllocations_GarbageCollect_OldNode(t *testing.T) { } func TestClientAllocations_GarbageCollect_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -363,7 +364,7 @@ func TestClientAllocations_GarbageCollect_Local(t *testing.T) { } func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -429,7 +430,7 @@ func TestClientAllocations_GarbageCollect_Local_ACL(t *testing.T) { } func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -532,7 +533,7 @@ func TestClientAllocations_GarbageCollect_Remote(t *testing.T) { } func TestClientAllocations_Stats_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and fake an old client @@ -571,7 +572,7 @@ func TestClientAllocations_Stats_OldNode(t *testing.T) { } func TestClientAllocations_Stats_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -653,7 +654,7 @@ func TestClientAllocations_Stats_Local(t *testing.T) { } func TestClientAllocations_Stats_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -719,7 +720,7 @@ func TestClientAllocations_Stats_Local_ACL(t *testing.T) { } func TestClientAllocations_Stats_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -810,7 +811,7 @@ func TestClientAllocations_Stats_Remote(t *testing.T) { } func TestClientAllocations_Restart_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -916,7 +917,7 @@ func TestClientAllocations_Restart_Local(t *testing.T) { } func TestClientAllocations_Restart_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1009,6 +1010,8 @@ func TestClientAllocations_Restart_Remote(t *testing.T) { } func TestClientAllocations_Restart_ACL(t *testing.T) { + ci.Parallel(t) + // Start a server s, root, cleanupS := TestACLServer(t, nil) defer cleanupS() @@ -1075,7 +1078,7 @@ func TestClientAllocations_Restart_ACL(t *testing.T) { // TestAlloc_ExecStreaming asserts that exec task requests are forwarded // to appropriate server or remote regions func TestAlloc_ExecStreaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) ////// Nomad clusters topology - not specific to test localServer, cleanupLS := TestServer(t, func(c *Config) { diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go index 45c288f9a..9291b2fcf 100644 --- a/nomad/client_csi_endpoint_test.go +++ b/nomad/client_csi_endpoint_test.go @@ -8,6 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -97,7 +98,7 @@ func (c *MockClientCSI) NodeDetachVolume(req *cstructs.ClientCSINodeDetachVolume } func TestClientCSIController_AttachVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -113,7 +114,7 @@ func TestClientCSIController_AttachVolume_Local(t *testing.T) { } func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -129,7 +130,7 @@ func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { } func TestClientCSIController_DetachVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -145,7 +146,7 @@ func TestClientCSIController_DetachVolume_Local(t *testing.T) { } func TestClientCSIController_DetachVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -161,7 +162,7 @@ func TestClientCSIController_DetachVolume_Forwarded(t *testing.T) { } func TestClientCSIController_ValidateVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -178,7 +179,7 @@ func TestClientCSIController_ValidateVolume_Local(t *testing.T) { } func TestClientCSIController_ValidateVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -195,7 +196,7 @@ func TestClientCSIController_ValidateVolume_Forwarded(t *testing.T) { } func TestClientCSIController_CreateVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -211,7 +212,7 @@ func TestClientCSIController_CreateVolume_Local(t *testing.T) { } func TestClientCSIController_CreateVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -227,7 +228,7 @@ func TestClientCSIController_CreateVolume_Forwarded(t *testing.T) { } func TestClientCSIController_DeleteVolume_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -244,7 +245,7 @@ func TestClientCSIController_DeleteVolume_Local(t *testing.T) { } func TestClientCSIController_DeleteVolume_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -261,7 +262,7 @@ func TestClientCSIController_DeleteVolume_Forwarded(t *testing.T) { } func TestClientCSIController_ListVolumes_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -277,7 +278,7 @@ func TestClientCSIController_ListVolumes_Local(t *testing.T) { } func TestClientCSIController_ListVolumes_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -293,7 +294,7 @@ func TestClientCSIController_ListVolumes_Forwarded(t *testing.T) { } func TestClientCSIController_CreateSnapshot_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -309,7 +310,7 @@ func TestClientCSIController_CreateSnapshot_Local(t *testing.T) { } func TestClientCSIController_CreateSnapshot_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -325,7 +326,7 @@ func TestClientCSIController_CreateSnapshot_Forwarded(t *testing.T) { } func TestClientCSIController_DeleteSnapshot_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -342,7 +343,7 @@ func TestClientCSIController_DeleteSnapshot_Local(t *testing.T) { } func TestClientCSIController_DeleteSnapshot_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -359,7 +360,7 @@ func TestClientCSIController_DeleteSnapshot_Forwarded(t *testing.T) { } func TestClientCSIController_ListSnapshots_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupLocal(t) defer cleanup() @@ -375,7 +376,7 @@ func TestClientCSIController_ListSnapshots_Local(t *testing.T) { } func TestClientCSIController_ListSnapshots_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) codec, cleanup := setupForward(t) defer cleanup() @@ -391,7 +392,7 @@ func TestClientCSIController_ListSnapshots_Forwarded(t *testing.T) { } func TestClientCSI_NodeForControllerPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) {}) testutil.WaitForLeader(t, srv.RPC) defer shutdown() diff --git a/nomad/client_fs_endpoint_test.go b/nomad/client_fs_endpoint_test.go index fc5a68e48..709e0d77b 100644 --- a/nomad/client_fs_endpoint_test.go +++ b/nomad/client_fs_endpoint_test.go @@ -11,6 +11,7 @@ import ( codec "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -22,7 +23,7 @@ import ( ) func TestClientFS_List_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -106,7 +107,7 @@ func TestClientFS_List_Local(t *testing.T) { } func TestClientFS_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -173,7 +174,7 @@ func TestClientFS_List_ACL(t *testing.T) { } func TestClientFS_List_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -267,7 +268,7 @@ func TestClientFS_List_Remote(t *testing.T) { } func TestClientFS_Stat_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -298,7 +299,7 @@ func TestClientFS_Stat_OldNode(t *testing.T) { } func TestClientFS_Stat_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -382,7 +383,7 @@ func TestClientFS_Stat_Local(t *testing.T) { } func TestClientFS_Stat_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -449,7 +450,7 @@ func TestClientFS_Stat_ACL(t *testing.T) { } func TestClientFS_Stat_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -543,7 +544,7 @@ func TestClientFS_Stat_Remote(t *testing.T) { } func TestClientFS_Streaming_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -614,7 +615,7 @@ OUTER: } func TestClientFS_Streaming_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -730,7 +731,7 @@ func TestClientFS_Streaming_ACL(t *testing.T) { } func TestClientFS_Streaming_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -862,7 +863,7 @@ OUTER: } func TestClientFS_Streaming_Local_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1000,7 +1001,7 @@ OUTER: } func TestClientFS_Streaming_Remote_Server(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1148,7 +1149,7 @@ OUTER: } func TestClientFS_Streaming_Remote_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1292,7 +1293,7 @@ OUTER: } func TestClientFS_Logs_NoAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1363,7 +1364,7 @@ OUTER: } func TestClientFS_Logs_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -1443,7 +1444,7 @@ OUTER: } func TestClientFS_Logs_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Start a server s, root, cleanupS := TestACLServer(t, nil) @@ -1559,7 +1560,7 @@ func TestClientFS_Logs_ACL(t *testing.T) { } func TestClientFS_Logs_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1692,7 +1693,7 @@ OUTER: } func TestClientFS_Logs_Local_Follow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1831,7 +1832,7 @@ OUTER: } func TestClientFS_Logs_Remote_Server(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -1980,7 +1981,7 @@ OUTER: } func TestClientFS_Logs_Remote_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client diff --git a/nomad/client_rpc_test.go b/nomad/client_rpc_test.go index a65344028..efb3ce658 100644 --- a/nomad/client_rpc_test.go +++ b/nomad/client_rpc_test.go @@ -5,6 +5,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/helper/uuid" @@ -28,7 +29,7 @@ func (n namedConnWrapper) LocalAddr() net.Addr { } func TestServer_removeNodeConn_differentAddrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -85,7 +86,7 @@ func TestServer_removeNodeConn_differentAddrs(t *testing.T) { } func TestServerWithNodeConn_NoPath(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -107,7 +108,7 @@ func TestServerWithNodeConn_NoPath(t *testing.T) { } func TestServerWithNodeConn_NoPath_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -121,7 +122,7 @@ func TestServerWithNodeConn_NoPath_Region(t *testing.T) { } func TestServerWithNodeConn_Path(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -149,7 +150,7 @@ func TestServerWithNodeConn_Path(t *testing.T) { } func TestServerWithNodeConn_Path_Region(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -175,7 +176,7 @@ func TestServerWithNodeConn_Path_Region(t *testing.T) { } func TestServerWithNodeConn_Path_Newest(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -211,7 +212,7 @@ func TestServerWithNodeConn_Path_Newest(t *testing.T) { } func TestServerWithNodeConn_PathAndErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -247,7 +248,7 @@ func TestServerWithNodeConn_PathAndErr(t *testing.T) { } func TestServerWithNodeConn_NoPathAndErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -281,7 +282,7 @@ func TestServerWithNodeConn_NoPathAndErr(t *testing.T) { } func TestNodeStreamingRpc_badEndpoint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() diff --git a/nomad/client_stats_endpoint_test.go b/nomad/client_stats_endpoint_test.go index f36c838d2..de6da4bec 100644 --- a/nomad/client_stats_endpoint_test.go +++ b/nomad/client_stats_endpoint_test.go @@ -6,6 +6,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -17,7 +18,7 @@ import ( ) func TestClientStats_Stats_Local(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -58,7 +59,7 @@ func TestClientStats_Stats_Local(t *testing.T) { } func TestClientStats_Stats_Local_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -118,7 +119,7 @@ func TestClientStats_Stats_Local_ACL(t *testing.T) { } func TestClientStats_Stats_NoNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client @@ -142,7 +143,7 @@ func TestClientStats_Stats_NoNode(t *testing.T) { } func TestClientStats_Stats_OldNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server @@ -169,7 +170,7 @@ func TestClientStats_Stats_OldNode(t *testing.T) { } func TestClientStats_Stats_Remote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Start a server and client diff --git a/nomad/consul_oss_test.go b/nomad/consul_oss_test.go index 520467d71..c1cd375b4 100644 --- a/nomad/consul_oss_test.go +++ b/nomad/consul_oss_test.go @@ -8,6 +8,7 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" @@ -15,13 +16,12 @@ import ( ) func TestConsulACLsAPI_CheckPermissions_oss(t *testing.T) { + ci.Parallel(t) // In Nomad OSS, CheckPermissions will only receive "" as input for the // namespace parameter - as the ConsulUsage map from namespace to usages will // always contain one key - the empty string. - t.Parallel() - try := func(t *testing.T, namespace string, usage *structs.ConsulUsage, secretID string, exp error) { logger := testlog.HCLogger(t) aclAPI := consul.NewMockACLsAPI(logger) diff --git a/nomad/consul_policy_oss_test.go b/nomad/consul_policy_oss_test.go index 464bfcf09..9936c4fe9 100644 --- a/nomad/consul_policy_oss_test.go +++ b/nomad/consul_policy_oss_test.go @@ -7,13 +7,14 @@ import ( "testing" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/testlog" "github.com/stretchr/testify/require" ) func TestConsulACLsAPI_hasSufficientPolicy_oss(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, namespace, task string, token *api.ACLToken, exp bool) { logger := testlog.HCLogger(t) diff --git a/nomad/consul_policy_test.go b/nomad/consul_policy_test.go index 150780d15..c0e648399 100644 --- a/nomad/consul_policy_test.go +++ b/nomad/consul_policy_test.go @@ -4,12 +4,13 @@ import ( "testing" "github.com/hashicorp/consul/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/stretchr/testify/require" ) func TestConsulPolicy_ParseConsulPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, text string, expPolicy *ConsulPolicy, expErr string) { policy, err := parseConsulPolicy(text) @@ -103,7 +104,7 @@ namespace "foo" { } func TestConsulACLsAPI_allowsServiceWrite(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, matches bool, namespace, task string, cp *ConsulPolicy, exp bool) { // If matches is false, the implication is that the consul acl token is in @@ -342,6 +343,8 @@ func TestConsulACLsAPI_allowsServiceWrite(t *testing.T) { } func TestConsulPolicy_isManagementToken(t *testing.T) { + ci.Parallel(t) + aclsAPI := new(consulACLsAPI) t.Run("nil", func(t *testing.T) { @@ -394,6 +397,8 @@ func TestConsulPolicy_isManagementToken(t *testing.T) { } func TestConsulPolicy_namespaceCheck(t *testing.T) { + ci.Parallel(t) + withoutNS := &api.ACLToken{Namespace: ""} withDefault := &api.ACLToken{Namespace: "default"} withOther := &api.ACLToken{Namespace: "other"} @@ -455,6 +460,8 @@ func TestConsulPolicy_namespaceCheck(t *testing.T) { } func TestConsulPolicy_allowKeystoreRead(t *testing.T) { + ci.Parallel(t) + t.Run("empty", func(t *testing.T) { require.False(t, new(ConsulPolicy).allowsKeystoreRead(true, "default")) }) diff --git a/nomad/consul_test.go b/nomad/consul_test.go index f1561d651..5c6b5a834 100644 --- a/nomad/consul_test.go +++ b/nomad/consul_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" @@ -21,7 +22,7 @@ var _ ConsulACLsAPI = (*mockConsulACLsAPI)(nil) var _ ConsulConfigsAPI = (*consulConfigsAPI)(nil) func TestConsulConfigsAPI_SetCE(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, expect error, f func(ConsulConfigsAPI) error) { logger := testlog.HCLogger(t) @@ -140,7 +141,7 @@ func (m *mockConsulACLsAPI) storeForRevocation(accessors []*structs.SITokenAcces } func TestConsulACLsAPI_CreateToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, expErr error) { logger := testlog.HCLogger(t) @@ -182,7 +183,7 @@ func TestConsulACLsAPI_CreateToken(t *testing.T) { } func TestConsulACLsAPI_RevokeTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) setup := func(t *testing.T, exp error) (context.Context, ConsulACLsAPI, *structs.SIToken) { logger := testlog.HCLogger(t) @@ -237,7 +238,7 @@ func TestConsulACLsAPI_RevokeTokens(t *testing.T) { } func TestConsulACLsAPI_MarkForRevocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) aclAPI := consul.NewMockACLsAPI(logger) @@ -266,7 +267,7 @@ func TestConsulACLsAPI_MarkForRevocation(t *testing.T) { } func TestConsulACLsAPI_bgRetryRevoke(t *testing.T) { - t.Parallel() + ci.Parallel(t) // manually create so the bg daemon does not run, letting us explicitly // call and test bgRetryRevoke @@ -327,7 +328,7 @@ func TestConsulACLsAPI_bgRetryRevoke(t *testing.T) { } func TestConsulACLsAPI_Stop(t *testing.T) { - t.Parallel() + ci.Parallel(t) setup := func(t *testing.T) *consulACLsAPI { logger := testlog.HCLogger(t) diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 095975a31..120c058d0 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -7,6 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -17,7 +18,7 @@ import ( ) func TestCoreScheduler_EvalGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -111,7 +112,7 @@ func TestCoreScheduler_EvalGC(t *testing.T) { // Tests GC behavior on allocations being rescheduled func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -216,7 +217,7 @@ func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) { // Tests GC behavior on stopped job with reschedulable allocs func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -292,7 +293,7 @@ func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) { // An EvalGC should never reap a batch job that has not been stopped func TestCoreScheduler_EvalGC_Batch(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -396,7 +397,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) { // An EvalGC should reap allocations from jobs with an older modify index func TestCoreScheduler_EvalGC_Batch_OldVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -519,7 +520,7 @@ func TestCoreScheduler_EvalGC_Batch_OldVersion(t *testing.T) { // An EvalGC should reap a batch job that has been stopped func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -616,7 +617,7 @@ func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) { } func TestCoreScheduler_EvalGC_Partial(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -733,7 +734,7 @@ func TestCoreScheduler_EvalGC_Partial(t *testing.T) { } func TestCoreScheduler_EvalGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { require := require.New(t) @@ -817,7 +818,7 @@ func TestCoreScheduler_EvalGC_Force(t *testing.T) { } func TestCoreScheduler_NodeGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { var server *Server @@ -874,7 +875,7 @@ func TestCoreScheduler_NodeGC(t *testing.T) { } func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -930,7 +931,7 @@ func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { } func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -988,7 +989,7 @@ func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { } func TestCoreScheduler_NodeGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1032,7 +1033,7 @@ func TestCoreScheduler_NodeGC_Force(t *testing.T) { } func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1156,7 +1157,7 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { } func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1302,7 +1303,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { // This test ensures that batch jobs are GC'd in one shot, meaning it all // allocs/evals and job or nothing func TestCoreScheduler_JobGC_OneShot(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1415,7 +1416,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) { // This test ensures that stopped jobs are GCd func TestCoreScheduler_JobGC_Stopped(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1516,7 +1517,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) { } func TestCoreScheduler_JobGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { var server *Server @@ -1588,7 +1589,7 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) { // This test ensures parameterized jobs only get gc'd when stopped func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1668,7 +1669,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { // This test ensures periodic jobs don't get GCd until they are stopped func TestCoreScheduler_JobGC_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1742,7 +1743,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) { } func TestCoreScheduler_DeploymentGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1793,7 +1794,7 @@ func TestCoreScheduler_DeploymentGC(t *testing.T) { } func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, withAcl := range []bool{false, true} { t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) { var server *Server @@ -1839,7 +1840,7 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { } func TestCoreScheduler_PartitionEvalReap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1882,7 +1883,7 @@ func TestCoreScheduler_PartitionEvalReap(t *testing.T) { } func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1919,7 +1920,7 @@ func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) { } func TestCoreScheduler_PartitionJobReap(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2197,7 +2198,7 @@ func TestAllocation_GCEligible(t *testing.T) { } func TestCoreScheduler_CSIPluginGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, cleanupSRV := TestServer(t, nil) defer cleanupSRV() @@ -2251,7 +2252,7 @@ func TestCoreScheduler_CSIPluginGC(t *testing.T) { } func TestCoreScheduler_CSIVolumeClaimGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv, shutdown := TestServer(t, func(c *Config) { @@ -2402,7 +2403,7 @@ func TestCoreScheduler_CSIVolumeClaimGC(t *testing.T) { } func TestCoreScheduler_CSIBadState_ClaimGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv, shutdown := TestServer(t, func(c *Config) { @@ -2442,7 +2443,7 @@ func TestCoreScheduler_CSIBadState_ClaimGC(t *testing.T) { } func TestCoreScheduler_FailLoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv, cleanupSrv := TestServer(t, func(c *Config) { diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index c8591e38e..6e91c4e78 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -7,6 +7,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" cconfig "github.com/hashicorp/nomad/client/config" cstructs "github.com/hashicorp/nomad/client/structs" @@ -19,7 +20,7 @@ import ( ) func TestCSIVolumeEndpoint_Get(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -65,7 +66,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { } func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -116,7 +117,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { } func TestCSIVolumeEndpoint_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -216,7 +217,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { // are honored only if the volume exists, the mode is permitted, and the volume // is schedulable according to its count of claims. func TestCSIVolumeEndpoint_Claim(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -372,7 +373,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { // TestCSIVolumeEndpoint_ClaimWithController exercises the VolumeClaim RPC // when a controller is required. func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.ACLEnabled = true c.NumSchedulers = 0 // Prevent automatic dequeue @@ -458,7 +459,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { } func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) defer shutdown() testutil.WaitForLeader(t, srv.RPC) @@ -590,7 +591,7 @@ func TestCSIVolumeEndpoint_Unpublish(t *testing.T) { } func TestCSIVolumeEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -672,7 +673,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { } func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -754,7 +755,7 @@ func TestCSIVolumeEndpoint_ListAllNamespaces(t *testing.T) { } func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -948,7 +949,7 @@ func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) { } func TestCSIVolumeEndpoint_Create(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1093,7 +1094,7 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) { } func TestCSIVolumeEndpoint_Delete(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1207,7 +1208,7 @@ func TestCSIVolumeEndpoint_Delete(t *testing.T) { } func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1312,7 +1313,7 @@ func TestCSIVolumeEndpoint_ListExternal(t *testing.T) { } func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1411,7 +1412,7 @@ func TestCSIVolumeEndpoint_CreateSnapshot(t *testing.T) { } func TestCSIVolumeEndpoint_DeleteSnapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1491,7 +1492,7 @@ func TestCSIVolumeEndpoint_DeleteSnapshot(t *testing.T) { } func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { - t.Parallel() + ci.Parallel(t) var err error srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1587,7 +1588,7 @@ func TestCSIVolumeEndpoint_ListSnapshots(t *testing.T) { } func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -1659,7 +1660,7 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { } func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, nil) defer shutdown() testutil.WaitForLeader(t, srv.RPC) @@ -1736,7 +1737,7 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { } func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -1817,6 +1818,8 @@ func TestCSIPluginEndpoint_DeleteViaGC(t *testing.T) { } func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { + ci.Parallel(t) + srv, shutdown := TestServer(t, func(c *Config) {}) defer shutdown() testutil.WaitForLeader(t, srv.RPC) diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index 08982aeda..e58c62da3 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -7,6 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -17,7 +18,7 @@ import ( ) func TestDeploymentEndpoint_GetDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -49,7 +50,7 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) { } func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -103,7 +104,7 @@ func TestDeploymentEndpoint_GetDeployment_ACL(t *testing.T) { } func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -153,7 +154,7 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) { } func TestDeploymentEndpoint_Fail(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -203,7 +204,7 @@ func TestDeploymentEndpoint_Fail(t *testing.T) { } func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -279,7 +280,7 @@ func TestDeploymentEndpoint_Fail_ACL(t *testing.T) { } func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -357,7 +358,7 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) { } func TestDeploymentEndpoint_Pause(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -400,7 +401,7 @@ func TestDeploymentEndpoint_Pause(t *testing.T) { } func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -469,7 +470,7 @@ func TestDeploymentEndpoint_Pause_ACL(t *testing.T) { } func TestDeploymentEndpoint_Promote(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -534,7 +535,7 @@ func TestDeploymentEndpoint_Promote(t *testing.T) { } func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -625,7 +626,7 @@ func TestDeploymentEndpoint_Promote_ACL(t *testing.T) { } func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -693,7 +694,7 @@ func TestDeploymentEndpoint_SetAllocHealth(t *testing.T) { } func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -787,7 +788,7 @@ func TestDeploymentEndpoint_SetAllocHealth_ACL(t *testing.T) { } func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -877,7 +878,7 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) { // tests rollback upon alloc health failure to job with identical spec does not succeed func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -964,7 +965,7 @@ func TestDeploymentEndpoint_SetAllocHealth_NoRollback(t *testing.T) { } func TestDeploymentEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1033,7 +1034,7 @@ func TestDeploymentEndpoint_List(t *testing.T) { } func TestDeploymentEndpoint_List_order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1121,7 +1122,7 @@ func TestDeploymentEndpoint_List_order(t *testing.T) { } func TestDeploymentEndpoint_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1190,7 +1191,7 @@ func TestDeploymentEndpoint_List_ACL(t *testing.T) { } func TestDeploymentEndpoint_List_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1248,7 +1249,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) { } func TestDeploymentEndpoint_List_Pagination(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1464,7 +1465,7 @@ func TestDeploymentEndpoint_List_Pagination(t *testing.T) { } func TestDeploymentEndpoint_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1502,7 +1503,7 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) { } func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1577,7 +1578,7 @@ func TestDeploymentEndpoint_Allocations_ACL(t *testing.T) { } func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1645,7 +1646,7 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) { } func TestDeploymentEndpoint_Reap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() diff --git a/nomad/deploymentwatcher/deployments_watcher_test.go b/nomad/deploymentwatcher/deployments_watcher_test.go index 50835b74c..467ffcca2 100644 --- a/nomad/deploymentwatcher/deployments_watcher_test.go +++ b/nomad/deploymentwatcher/deployments_watcher_test.go @@ -6,6 +6,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -29,7 +30,7 @@ func defaultTestDeploymentWatcher(t *testing.T) (*Watcher, *mockBackend) { // Tests that the watcher properly watches for deployments and reconciles them func TestWatcher_WatchDeployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -84,7 +85,7 @@ func TestWatcher_WatchDeployments(t *testing.T) { // Tests that calls against an unknown deployment fail func TestWatcher_UnknownDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -141,7 +142,7 @@ func TestWatcher_UnknownDeployment(t *testing.T) { // Test setting an unknown allocation's health func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -186,7 +187,7 @@ func TestWatcher_SetAllocHealth_Unknown(t *testing.T) { // Test setting allocation health func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -231,7 +232,7 @@ func TestWatcher_SetAllocHealth_Healthy(t *testing.T) { // Test setting allocation unhealthy func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -283,7 +284,7 @@ func TestWatcher_SetAllocHealth_Unhealthy(t *testing.T) { // Test setting allocation unhealthy and that there should be a rollback func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -350,7 +351,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_Rollback(t *testing.T) { // Test setting allocation unhealthy on job with identical spec and there should be no rollback func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -415,7 +416,7 @@ func TestWatcher_SetAllocHealth_Unhealthy_NoRollback(t *testing.T) { // Test promoting a deployment func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -475,7 +476,7 @@ func TestWatcher_PromoteDeployment_HealthyCanaries(t *testing.T) { // Test promoting a deployment with unhealthy canaries func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -531,7 +532,7 @@ func TestWatcher_PromoteDeployment_UnhealthyCanaries(t *testing.T) { } func TestWatcher_AutoPromoteDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) w, m := defaultTestDeploymentWatcher(t) now := time.Now() @@ -697,7 +698,7 @@ func TestWatcher_AutoPromoteDeployment(t *testing.T) { // Test pausing a deployment that is running func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -739,7 +740,7 @@ func TestWatcher_PauseDeployment_Pause_Running(t *testing.T) { // Test pausing a deployment that is paused func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -782,7 +783,7 @@ func TestWatcher_PauseDeployment_Pause_Paused(t *testing.T) { // Test unpausing a deployment that is paused func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -823,7 +824,7 @@ func TestWatcher_PauseDeployment_Unpause_Paused(t *testing.T) { // Test unpausing a deployment that is running func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -863,7 +864,7 @@ func TestWatcher_PauseDeployment_Unpause_Running(t *testing.T) { // Test failing a deployment that is running func TestWatcher_FailDeployment_Running(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := defaultTestDeploymentWatcher(t) @@ -903,7 +904,7 @@ func TestWatcher_FailDeployment_Running(t *testing.T) { // Tests that the watcher properly watches for allocation changes and takes the // proper actions func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1023,7 +1024,7 @@ func TestDeploymentWatcher_Watch_NoProgressDeadline(t *testing.T) { } func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1099,7 +1100,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline(t *testing.T) { // Test that progress deadline handling works when there are multiple groups func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1205,7 +1206,7 @@ func TestDeploymentWatcher_ProgressCutoff(t *testing.T) { // Test that we will allow the progress deadline to be reached when the canaries // are healthy but we haven't promoted func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1287,7 +1288,7 @@ func TestDeploymentWatcher_Watch_ProgressDeadline_Canaries(t *testing.T) { // Test that a promoted deployment with alloc healthy updates create // evals to move the deployment forward func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1376,7 +1377,7 @@ func TestDeploymentWatcher_PromotedCanary_UpdatedAllocs(t *testing.T) { } func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mtype := structs.MsgTypeTestSetup @@ -1584,7 +1585,7 @@ func TestDeploymentWatcher_ProgressDeadline_LatePromote(t *testing.T) { // Test scenario where deployment initially has no progress deadline // After the deployment is updated, a failed alloc's DesiredTransition should be set func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1646,7 +1647,7 @@ func TestDeploymentWatcher_Watch_StartWithoutProgressDeadline(t *testing.T) { // Tests that the watcher fails rollback when the spec hasn't changed func TestDeploymentWatcher_RollbackFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Millisecond) @@ -1756,7 +1757,7 @@ func TestDeploymentWatcher_RollbackFailed(t *testing.T) { // Test allocation updates and evaluation creation is batched between watchers func TestWatcher_BatchAllocUpdates(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, m := testDeploymentWatcher(t, 1000.0, 1*time.Second) diff --git a/nomad/drainer/drain_heap_test.go b/nomad/drainer/drain_heap_test.go index 930801d92..22ed77d96 100644 --- a/nomad/drainer/drain_heap_test.go +++ b/nomad/drainer/drain_heap_test.go @@ -5,19 +5,20 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" ) func TestDeadlineHeap_Interface(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) require.Implements((*DrainDeadlineNotifier)(nil), h) } func TestDeadlineHeap_WatchAndGet(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) @@ -39,7 +40,7 @@ func TestDeadlineHeap_WatchAndGet(t *testing.T) { } func TestDeadlineHeap_WatchThenUpdateAndGet(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) @@ -66,7 +67,7 @@ func TestDeadlineHeap_WatchThenUpdateAndGet(t *testing.T) { } func TestDeadlineHeap_MultiwatchAndDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) @@ -94,7 +95,7 @@ func TestDeadlineHeap_MultiwatchAndDelete(t *testing.T) { } func TestDeadlineHeap_WatchCoalesce(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 100*time.Millisecond) @@ -150,7 +151,7 @@ func TestDeadlineHeap_WatchCoalesce(t *testing.T) { } func TestDeadlineHeap_MultipleForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) h := NewDeadlineHeap(context.Background(), 1*time.Second) diff --git a/nomad/drainer/drainer_util_test.go b/nomad/drainer/drainer_util_test.go index d18a22d1b..75fc531ff 100644 --- a/nomad/drainer/drainer_util_test.go +++ b/nomad/drainer/drainer_util_test.go @@ -3,12 +3,13 @@ package drainer import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestDrainer_PartitionAllocDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Set the max ids per reap to something lower. maxIdsPerTxn := 2 @@ -32,7 +33,7 @@ func TestDrainer_PartitionAllocDrain(t *testing.T) { } func TestDrainer_PartitionIds(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Set the max ids per reap to something lower. diff --git a/nomad/drainer/draining_node_test.go b/nomad/drainer/draining_node_test.go index b508fd702..b93efcb77 100644 --- a/nomad/drainer/draining_node_test.go +++ b/nomad/drainer/draining_node_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -44,6 +45,8 @@ func assertDrainingNode(t *testing.T, dn *drainingNode, isDone bool, remaining, } func TestDrainingNode_Table(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string isDone bool @@ -206,7 +209,7 @@ func TestDrainingNode_Table(t *testing.T) { for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) dn := testDrainingNode(t) tc.setup(t, dn) assertDrainingNode(t, dn, tc.isDone, tc.remaining, tc.running) diff --git a/nomad/drainer/watch_jobs_test.go b/nomad/drainer/watch_jobs_test.go index f15f4e0ed..192fb1124 100644 --- a/nomad/drainer/watch_jobs_test.go +++ b/nomad/drainer/watch_jobs_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -47,6 +48,8 @@ func testDrainingJobWatcher(t *testing.T, state *state.StateStore) (*drainingJob // TestDrainingJobWatcher_Interface is a compile-time assertion that we // implement the intended interface. func TestDrainingJobWatcher_Interface(t *testing.T) { + ci.Parallel(t) + w, cancel := testDrainingJobWatcher(t, state.TestStateStore(t)) cancel() var _ DrainingJobWatcher = w @@ -99,7 +102,7 @@ func assertJobWatcherOps(t *testing.T, jw DrainingJobWatcher, drained, migrated // TestDrainingJobWatcher_DrainJobs asserts DrainingJobWatcher batches // allocation changes from multiple jobs. func TestDrainingJobWatcher_DrainJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := state.TestStateStore(t) @@ -326,6 +329,8 @@ type handleTaskGroupTestCase struct { } func TestHandeTaskGroup_Table(t *testing.T) { + ci.Parallel(t) + cases := []handleTaskGroupTestCase{ { // All allocs on draining node @@ -543,7 +548,8 @@ func TestHandeTaskGroup_Table(t *testing.T) { } func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { - t.Parallel() + ci.Parallel(t) + require := require.New(t) assert := assert.New(t) @@ -599,7 +605,7 @@ func testHandleTaskGroup(t *testing.T, tc handleTaskGroupTestCase) { } func TestHandleTaskGroup_Migrations(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a draining node @@ -668,7 +674,7 @@ func TestHandleTaskGroup_Migrations(t *testing.T) { // This test asserts that handle task group works when an allocation is on a // garbage collected node func TestHandleTaskGroup_GarbageCollectedNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Create a draining node diff --git a/nomad/drainer/watch_nodes_test.go b/nomad/drainer/watch_nodes_test.go index 6484c1750..f1154c460 100644 --- a/nomad/drainer/watch_nodes_test.go +++ b/nomad/drainer/watch_nodes_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -25,14 +26,14 @@ func testNodeDrainWatcher(t *testing.T) (*nodeDrainWatcher, *state.StateStore, * } func TestNodeDrainWatcher_Interface(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) w, _, _ := testNodeDrainWatcher(t) require.Implements((*DrainingNodeWatcher)(nil), w) } func TestNodeDrainWatcher_AddDraining(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) @@ -62,7 +63,7 @@ func TestNodeDrainWatcher_AddDraining(t *testing.T) { } func TestNodeDrainWatcher_Remove(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) @@ -100,7 +101,7 @@ func TestNodeDrainWatcher_Remove(t *testing.T) { } func TestNodeDrainWatcher_Remove_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) @@ -138,7 +139,7 @@ func TestNodeDrainWatcher_Remove_Nonexistent(t *testing.T) { } func TestNodeDrainWatcher_Update(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) _, state, m := testNodeDrainWatcher(t) diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index cb8aca2ea..5c23ac891 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -10,6 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -120,7 +121,7 @@ func getNodeAllocsImpl(nodeID string) func(ws memdb.WatchSet, state *state.State } func TestDrainer_Simple_ServiceOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -225,7 +226,7 @@ func TestDrainer_Simple_ServiceOnly(t *testing.T) { } func TestDrainer_Simple_ServiceOnly_Deadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -322,7 +323,7 @@ func TestDrainer_Simple_ServiceOnly_Deadline(t *testing.T) { } func TestDrainer_DrainEmptyNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -373,7 +374,7 @@ func TestDrainer_DrainEmptyNode(t *testing.T) { } func TestDrainer_AllTypes_Deadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -540,7 +541,7 @@ func TestDrainer_AllTypes_Deadline(t *testing.T) { // Test that drain is unset when batch jobs naturally finish func TestDrainer_AllTypes_NoDeadline(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -707,7 +708,7 @@ func TestDrainer_AllTypes_NoDeadline(t *testing.T) { } func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -883,7 +884,7 @@ func TestDrainer_AllTypes_Deadline_GarbageCollectedNode(t *testing.T) { // TestDrainer_MultipleNSes_ServiceOnly asserts that all jobs on an alloc, even // when they belong to different namespaces and share the same ID func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1011,7 +1012,7 @@ func TestDrainer_MultipleNSes_ServiceOnly(t *testing.T) { // Test that transitions to force drain work. func TestDrainer_Batch_TransitionToForce(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, inf := range []bool{true, false} { name := "Infinite" diff --git a/nomad/eval_broker_test.go b/nomad/eval_broker_test.go index af1f9a88f..3b0988eae 100644 --- a/nomad/eval_broker_test.go +++ b/nomad/eval_broker_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -52,7 +53,7 @@ func testBrokerFromConfig(t *testing.T, c *Config) *EvalBroker { } func TestEvalBroker_Enqueue_Dequeue_Nack_Ack(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) // Enqueue, but broker is disabled! @@ -228,7 +229,7 @@ func TestEvalBroker_Enqueue_Dequeue_Nack_Ack(t *testing.T) { } func TestEvalBroker_Nack_Delay(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) // Enqueue, but broker is disabled! @@ -386,7 +387,7 @@ func TestEvalBroker_Nack_Delay(t *testing.T) { } func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -625,7 +626,7 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) { } func TestEvalBroker_Enqueue_Disable(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) // Enqueue @@ -650,7 +651,7 @@ func TestEvalBroker_Enqueue_Disable(t *testing.T) { } func TestEvalBroker_Enqueue_Disable_Delay(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) baseEval := mock.Eval() b.SetEnabled(true) @@ -708,7 +709,7 @@ func TestEvalBroker_Enqueue_Disable_Delay(t *testing.T) { } func TestEvalBroker_Dequeue_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -729,7 +730,7 @@ func TestEvalBroker_Dequeue_Timeout(t *testing.T) { } func TestEvalBroker_Dequeue_Empty_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -773,7 +774,7 @@ func TestEvalBroker_Dequeue_Empty_Timeout(t *testing.T) { // Ensure higher priority dequeued first func TestEvalBroker_Dequeue_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -807,7 +808,7 @@ func TestEvalBroker_Dequeue_Priority(t *testing.T) { // Ensure FIFO at fixed priority func TestEvalBroker_Dequeue_FIFO(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) NUM := 100 @@ -829,7 +830,7 @@ func TestEvalBroker_Dequeue_FIFO(t *testing.T) { // Ensure fairness between schedulers func TestEvalBroker_Dequeue_Fairness(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) NUM := 1000 @@ -871,7 +872,7 @@ func TestEvalBroker_Dequeue_Fairness(t *testing.T) { // Ensure we get unblocked func TestEvalBroker_Dequeue_Blocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -928,7 +929,7 @@ func TestEvalBroker_Dequeue_Blocked(t *testing.T) { // Ensure we nack in a timely manner func TestEvalBroker_Nack_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 5*time.Millisecond) b.SetEnabled(true) @@ -964,7 +965,7 @@ func TestEvalBroker_Nack_Timeout(t *testing.T) { // Ensure we nack in a timely manner func TestEvalBroker_Nack_TimeoutReset(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 50*time.Millisecond) b.SetEnabled(true) @@ -1005,7 +1006,7 @@ func TestEvalBroker_Nack_TimeoutReset(t *testing.T) { } func TestEvalBroker_PauseResumeNackTimeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 50*time.Millisecond) b.SetEnabled(true) @@ -1065,7 +1066,7 @@ func TestEvalBroker_PauseResumeNackTimeout(t *testing.T) { } func TestEvalBroker_DeliveryLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1155,7 +1156,7 @@ func TestEvalBroker_DeliveryLimit(t *testing.T) { } func TestEvalBroker_AckAtDeliveryLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1199,7 +1200,7 @@ func TestEvalBroker_AckAtDeliveryLimit(t *testing.T) { // TestEvalBroker_Wait asserts delayed evaluations cannot be dequeued until // their wait duration has elapsed. func TestEvalBroker_Wait(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1247,7 +1248,7 @@ func TestEvalBroker_Wait(t *testing.T) { // Ensure that delayed evaluations work as expected func TestEvalBroker_WaitUntil(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1293,7 +1294,7 @@ func TestEvalBroker_WaitUntil(t *testing.T) { // Ensure that priority is taken into account when enqueueing many evaluations. func TestEvalBroker_EnqueueAll_Dequeue_Fair(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1355,7 +1356,7 @@ func TestEvalBroker_EnqueueAll_Dequeue_Fair(t *testing.T) { } func TestEvalBroker_EnqueueAll_Requeue_Ack(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1412,7 +1413,7 @@ func TestEvalBroker_EnqueueAll_Requeue_Ack(t *testing.T) { } func TestEvalBroker_EnqueueAll_Requeue_Nack(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) @@ -1465,7 +1466,7 @@ func TestEvalBroker_EnqueueAll_Requeue_Nack(t *testing.T) { } func TestEvalBroker_NamespacedJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) b := testBroker(t, 0) b.SetEnabled(true) diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 6a2bf4575..500b3502c 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -10,6 +10,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -20,7 +21,7 @@ import ( ) func TestEvalEndpoint_GetEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -62,7 +63,7 @@ func TestEvalEndpoint_GetEval(t *testing.T) { } func TestEvalEndpoint_GetEval_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -123,7 +124,7 @@ func TestEvalEndpoint_GetEval_ACL(t *testing.T) { } func TestEvalEndpoint_GetEval_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -202,7 +203,7 @@ func TestEvalEndpoint_GetEval_Blocking(t *testing.T) { } func TestEvalEndpoint_Dequeue(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -247,7 +248,7 @@ func TestEvalEndpoint_Dequeue(t *testing.T) { // TestEvalEndpoint_Dequeue_WaitIndex_Snapshot asserts that an eval's wait // index will be equal to the highest eval modify index in the state store. func TestEvalEndpoint_Dequeue_WaitIndex_Snapshot(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -298,7 +299,7 @@ func TestEvalEndpoint_Dequeue_WaitIndex_Snapshot(t *testing.T) { // indexes in the state store. This can happen if Dequeue receives an eval that // has not yet been applied from the Raft log to the local node's state store. func TestEvalEndpoint_Dequeue_WaitIndex_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -337,7 +338,7 @@ func TestEvalEndpoint_Dequeue_WaitIndex_Eval(t *testing.T) { func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { // test enqueuing an eval, updating a plan result for the same eval and de-queueing the eval - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -403,7 +404,7 @@ func TestEvalEndpoint_Dequeue_UpdateWaitIndex(t *testing.T) { } func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -430,7 +431,7 @@ func TestEvalEndpoint_Dequeue_Version_Mismatch(t *testing.T) { } func TestEvalEndpoint_Ack(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -471,7 +472,7 @@ func TestEvalEndpoint_Ack(t *testing.T) { } func TestEvalEndpoint_Nack(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { // Disable all of the schedulers so we can manually dequeue @@ -525,7 +526,7 @@ func TestEvalEndpoint_Nack(t *testing.T) { } func TestEvalEndpoint_Update(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -574,7 +575,7 @@ func TestEvalEndpoint_Update(t *testing.T) { } func TestEvalEndpoint_Create(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -627,7 +628,7 @@ func TestEvalEndpoint_Create(t *testing.T) { } func TestEvalEndpoint_Reap(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -663,7 +664,7 @@ func TestEvalEndpoint_Reap(t *testing.T) { } func TestEvalEndpoint_List(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -718,7 +719,7 @@ func TestEvalEndpoint_List(t *testing.T) { } func TestEvalEndpoint_List_order(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -806,7 +807,7 @@ func TestEvalEndpoint_List_order(t *testing.T) { } func TestEvalEndpoint_ListAllNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -841,7 +842,7 @@ func TestEvalEndpoint_ListAllNamespaces(t *testing.T) { } func TestEvalEndpoint_List_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -907,7 +908,7 @@ func TestEvalEndpoint_List_ACL(t *testing.T) { } func TestEvalEndpoint_List_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -974,7 +975,7 @@ func TestEvalEndpoint_List_Blocking(t *testing.T) { } func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1299,7 +1300,7 @@ func TestEvalEndpoint_List_PaginationFiltering(t *testing.T) { } func TestEvalEndpoint_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1337,7 +1338,7 @@ func TestEvalEndpoint_Allocations(t *testing.T) { } func TestEvalEndpoint_Allocations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1402,7 +1403,7 @@ func TestEvalEndpoint_Allocations_ACL(t *testing.T) { } func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1458,7 +1459,7 @@ func TestEvalEndpoint_Allocations_Blocking(t *testing.T) { } func TestEvalEndpoint_Reblock_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1495,7 +1496,7 @@ func TestEvalEndpoint_Reblock_Nonexistent(t *testing.T) { } func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1538,7 +1539,7 @@ func TestEvalEndpoint_Reblock_NonBlocked(t *testing.T) { } func TestEvalEndpoint_Reblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue diff --git a/nomad/event_endpoint_test.go b/nomad/event_endpoint_test.go index a314444c9..31dddfa7e 100644 --- a/nomad/event_endpoint_test.go +++ b/nomad/event_endpoint_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" @@ -23,7 +24,7 @@ import ( ) func TestEventStream(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnableEventBroker = true @@ -127,7 +128,7 @@ OUTER: // TestEventStream_StreamErr asserts an error is returned when an event publisher // closes its subscriptions func TestEventStream_StreamErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnableEventBroker = true @@ -209,7 +210,7 @@ OUTER: // TestEventStream_RegionForward tests event streaming from one server // to another in a different region func TestEventStream_RegionForward(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnableEventBroker = true @@ -307,7 +308,7 @@ OUTER: } func TestEventStream_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // start server @@ -507,7 +508,7 @@ func TestEventStream_ACL(t *testing.T) { // TestEventStream_ACL_Update_Close_Stream asserts that an active subscription // is closed after the token is no longer valid func TestEventStream_ACL_Update_Close_Stream(t *testing.T) { - t.Parallel() + ci.Parallel(t) // start server s1, root, cleanupS := TestACLServer(t, nil) diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 42b3a7e25..292a43f26 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -11,11 +11,7 @@ import ( "github.com/google/go-cmp/cmp" memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/raft" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -24,6 +20,10 @@ import ( "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/hashicorp/raft" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type MockSink struct { @@ -81,7 +81,7 @@ func makeLog(buf []byte) *raft.Log { } func TestFSM_UpsertNodeEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) state := fsm.State() @@ -125,7 +125,7 @@ func TestFSM_UpsertNodeEvents(t *testing.T) { } func TestFSM_UpsertNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -182,7 +182,7 @@ func TestFSM_UpsertNode(t *testing.T) { } func TestFSM_UpsertNode_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -209,7 +209,7 @@ func TestFSM_UpsertNode_Canonicalize(t *testing.T) { } func TestFSM_UpsertNode_Canonicalize_Ineligible(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -236,7 +236,7 @@ func TestFSM_UpsertNode_Canonicalize_Ineligible(t *testing.T) { } func TestFSM_DeregisterNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) node := mock.Node() @@ -278,7 +278,7 @@ func TestFSM_DeregisterNode(t *testing.T) { } func TestFSM_UpdateNodeStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -335,7 +335,7 @@ func TestFSM_UpdateNodeStatus(t *testing.T) { } func TestFSM_BatchUpdateNodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -384,7 +384,7 @@ func TestFSM_BatchUpdateNodeDrain(t *testing.T) { } func TestFSM_UpdateNodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -427,7 +427,7 @@ func TestFSM_UpdateNodeDrain(t *testing.T) { } func TestFSM_UpdateNodeEligibility(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -497,7 +497,7 @@ func TestFSM_UpdateNodeEligibility(t *testing.T) { } func TestFSM_UpdateNodeEligibility_Unblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -551,7 +551,7 @@ func TestFSM_UpdateNodeEligibility_Unblock(t *testing.T) { } func TestFSM_RegisterJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.PeriodicJob() @@ -607,7 +607,7 @@ func TestFSM_RegisterJob(t *testing.T) { } func TestFSM_RegisterPeriodicJob_NonLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) // Disable the dispatcher @@ -666,7 +666,7 @@ func TestFSM_RegisterPeriodicJob_NonLeader(t *testing.T) { } func TestFSM_RegisterJob_BadNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.Job() @@ -706,7 +706,7 @@ func TestFSM_RegisterJob_BadNamespace(t *testing.T) { } func TestFSM_DeregisterJob_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.Job() @@ -729,7 +729,7 @@ func TestFSM_DeregisterJob_Error(t *testing.T) { } func TestFSM_DeregisterJob_Purge(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.PeriodicJob() @@ -796,7 +796,7 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) { } func TestFSM_DeregisterJob_NoPurge(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.PeriodicJob() @@ -866,7 +866,7 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) { } func TestFSM_BatchDeregisterJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fsm := testFSM(t) @@ -944,7 +944,7 @@ func TestFSM_BatchDeregisterJob(t *testing.T) { } func TestFSM_UpdateEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) @@ -982,7 +982,7 @@ func TestFSM_UpdateEval(t *testing.T) { } func TestFSM_UpdateEval_Blocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) fsm.blockedEvals.SetEnabled(true) @@ -1031,7 +1031,7 @@ func TestFSM_UpdateEval_Blocked(t *testing.T) { } func TestFSM_UpdateEval_Untrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) fsm.blockedEvals.SetEnabled(true) @@ -1086,7 +1086,7 @@ func TestFSM_UpdateEval_Untrack(t *testing.T) { } func TestFSM_UpdateEval_NoUntrack(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) fsm.blockedEvals.SetEnabled(true) @@ -1143,7 +1143,7 @@ func TestFSM_UpdateEval_NoUntrack(t *testing.T) { } func TestFSM_DeleteEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) eval := mock.Eval() @@ -1185,7 +1185,7 @@ func TestFSM_DeleteEval(t *testing.T) { } func TestFSM_UpsertAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1244,7 +1244,7 @@ func TestFSM_UpsertAllocs(t *testing.T) { } func TestFSM_UpsertAllocs_SharedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1317,7 +1317,7 @@ func TestFSM_UpsertAllocs_SharedJob(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1398,7 +1398,7 @@ func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) { // TestFSM_UpsertAllocs_Canonicalize asserts that allocations are Canonicalized // to handle logs emited by servers running old versions func TestFSM_UpsertAllocs_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) alloc := mock.Alloc() @@ -1435,7 +1435,7 @@ func TestFSM_UpsertAllocs_Canonicalize(t *testing.T) { } func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) state := fsm.State() @@ -1520,7 +1520,7 @@ func TestFSM_UpdateAllocFromClient_Unblock(t *testing.T) { } func TestFSM_UpdateAllocFromClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() require := require.New(t) @@ -1568,7 +1568,7 @@ func TestFSM_UpdateAllocFromClient(t *testing.T) { } func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() require := require.New(t) @@ -1625,7 +1625,7 @@ func TestFSM_UpdateAllocDesiredTransition(t *testing.T) { } func TestFSM_UpsertVaultAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -1675,7 +1675,7 @@ func TestFSM_UpsertVaultAccessor(t *testing.T) { } func TestFSM_DeregisterVaultAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.blockedEvals.SetEnabled(true) @@ -1718,7 +1718,7 @@ func TestFSM_DeregisterVaultAccessor(t *testing.T) { } func TestFSM_UpsertSITokenAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) fsm := testFSM(t) @@ -1753,7 +1753,7 @@ func TestFSM_UpsertSITokenAccessor(t *testing.T) { } func TestFSM_DeregisterSITokenAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) fsm := testFSM(t) @@ -1791,7 +1791,7 @@ func TestFSM_DeregisterSITokenAccessor(t *testing.T) { } func TestFSM_ApplyPlanResults(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) // Create the request and create a deployment @@ -1934,7 +1934,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) { } func TestFSM_DeploymentStatusUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2003,7 +2003,7 @@ func TestFSM_DeploymentStatusUpdate(t *testing.T) { } func TestFSM_JobStabilityUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2044,7 +2044,7 @@ func TestFSM_JobStabilityUpdate(t *testing.T) { } func TestFSM_DeploymentPromotion(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2149,7 +2149,7 @@ func TestFSM_DeploymentPromotion(t *testing.T) { } func TestFSM_DeploymentAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) fsm.evalBroker.SetEnabled(true) state := fsm.State() @@ -2256,7 +2256,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) { } func TestFSM_DeleteDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() @@ -2291,7 +2291,7 @@ func TestFSM_DeleteDeployment(t *testing.T) { } func TestFSM_UpsertACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) policy := mock.ACLPolicy() @@ -2316,7 +2316,7 @@ func TestFSM_UpsertACLPolicies(t *testing.T) { } func TestFSM_DeleteACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) policy := mock.ACLPolicy() @@ -2344,7 +2344,7 @@ func TestFSM_DeleteACLPolicies(t *testing.T) { } func TestFSM_BootstrapACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) token := mock.ACLToken() @@ -2389,7 +2389,7 @@ func TestFSM_BootstrapACLTokens(t *testing.T) { } func TestFSM_UpsertACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) token := mock.ACLToken() @@ -2414,7 +2414,7 @@ func TestFSM_UpsertACLTokens(t *testing.T) { } func TestFSM_DeleteACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) token := mock.ACLToken() @@ -2481,7 +2481,7 @@ func testSnapshotRestore(t *testing.T, fsm *nomadFSM) *nomadFSM { } func TestFSM_SnapshotRestore_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2498,7 +2498,7 @@ func TestFSM_SnapshotRestore_Nodes(t *testing.T) { } func TestFSM_SnapshotRestore_Jobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2522,7 +2522,7 @@ func TestFSM_SnapshotRestore_Jobs(t *testing.T) { } func TestFSM_SnapshotRestore_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2546,7 +2546,7 @@ func TestFSM_SnapshotRestore_Evals(t *testing.T) { } func TestFSM_SnapshotRestore_Allocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2572,7 +2572,7 @@ func TestFSM_SnapshotRestore_Allocs(t *testing.T) { } func TestFSM_SnapshotRestore_Allocs_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2599,7 +2599,7 @@ func TestFSM_SnapshotRestore_Allocs_Canonicalize(t *testing.T) { } func TestFSM_SnapshotRestore_Indexes(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2620,7 +2620,7 @@ func TestFSM_SnapshotRestore_Indexes(t *testing.T) { } func TestFSM_SnapshotRestore_TimeTable(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) @@ -2642,7 +2642,7 @@ func TestFSM_SnapshotRestore_TimeTable(t *testing.T) { } func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2677,7 +2677,7 @@ func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) { } func TestFSM_SnapshotRestore_JobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2705,7 +2705,7 @@ func TestFSM_SnapshotRestore_JobSummary(t *testing.T) { } func TestFSM_SnapshotRestore_VaultAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2728,7 +2728,7 @@ func TestFSM_SnapshotRestore_VaultAccessors(t *testing.T) { } func TestFSM_SnapshotRestore_JobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2756,7 +2756,7 @@ func TestFSM_SnapshotRestore_JobVersions(t *testing.T) { } func TestFSM_SnapshotRestore_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2786,7 +2786,7 @@ func TestFSM_SnapshotRestore_Deployments(t *testing.T) { } func TestFSM_SnapshotRestore_ACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2805,7 +2805,7 @@ func TestFSM_SnapshotRestore_ACLPolicy(t *testing.T) { } func TestFSM_SnapshotRestore_ACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2824,7 +2824,7 @@ func TestFSM_SnapshotRestore_ACLTokens(t *testing.T) { } func TestFSM_SnapshotRestore_SchedulerConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2847,7 +2847,7 @@ func TestFSM_SnapshotRestore_SchedulerConfiguration(t *testing.T) { } func TestFSM_SnapshotRestore_ClusterMetadata(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) state := fsm.State() @@ -2866,7 +2866,7 @@ func TestFSM_SnapshotRestore_ClusterMetadata(t *testing.T) { } func TestFSM_ReconcileSummaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -2946,7 +2946,7 @@ func TestFSM_ReconcileSummaries(t *testing.T) { // COMPAT: Remove in 0.11 func TestFSM_ReconcileParentJobSummary(t *testing.T) { // This test exercises code to handle https://github.com/hashicorp/nomad/issues/3886 - t.Parallel() + ci.Parallel(t) require := require.New(t) // Add some state @@ -3016,7 +3016,7 @@ func TestFSM_ReconcileParentJobSummary(t *testing.T) { } func TestFSM_LeakedDeployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Add some state @@ -3034,7 +3034,7 @@ func TestFSM_LeakedDeployments(t *testing.T) { } func TestFSM_Autopilot(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) // Set the autopilot config using a request. @@ -3097,7 +3097,7 @@ func TestFSM_Autopilot(t *testing.T) { } func TestFSM_SchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) require := require.New(t) @@ -3146,7 +3146,7 @@ func TestFSM_SchedulerConfig(t *testing.T) { } func TestFSM_ClusterMetadata(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) fsm := testFSM(t) @@ -3186,7 +3186,7 @@ func TestFSM_ClusterMetadata(t *testing.T) { func TestFSM_UpsertNamespaces(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) ns1 := mock.Namespace() @@ -3211,7 +3211,7 @@ func TestFSM_UpsertNamespaces(t *testing.T) { func TestFSM_DeleteNamespaces(t *testing.T) { assert := assert.New(t) - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) ns1 := mock.Namespace() @@ -3237,7 +3237,7 @@ func TestFSM_DeleteNamespaces(t *testing.T) { } func TestFSM_SnapshotRestore_Namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Add some state fsm := testFSM(t) state := fsm.State() @@ -3260,7 +3260,7 @@ func TestFSM_SnapshotRestore_Namespaces(t *testing.T) { } func TestFSM_ACLEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { desc string @@ -3408,7 +3408,7 @@ func TestFSM_ACLEvents(t *testing.T) { // TestFSM_EventBroker_JobRegisterFSMEvents asserts that only a single job // register event is emitted when registering a job func TestFSM_EventBroker_JobRegisterFSMEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) fsm := testFSM(t) job := mock.Job() diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index a7f9223f9..ec7aaa165 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -7,6 +7,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -14,7 +15,7 @@ import ( ) func TestHeartbeat_InitializeHeartbeatTimers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -41,7 +42,7 @@ func TestHeartbeat_InitializeHeartbeatTimers(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -64,7 +65,7 @@ func TestHeartbeat_ResetHeartbeatTimer(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimer_Nonleader(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -81,7 +82,7 @@ func TestHeartbeat_ResetHeartbeatTimer_Nonleader(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimerLocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -103,7 +104,7 @@ func TestHeartbeat_ResetHeartbeatTimerLocked(t *testing.T) { } func TestHeartbeat_ResetHeartbeatTimerLocked_Renew(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -143,7 +144,7 @@ func TestHeartbeat_ResetHeartbeatTimerLocked_Renew(t *testing.T) { } func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -168,7 +169,7 @@ func TestHeartbeat_InvalidateHeartbeat(t *testing.T) { } func TestHeartbeat_ClearHeartbeatTimer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -189,7 +190,7 @@ func TestHeartbeat_ClearHeartbeatTimer(t *testing.T) { } func TestHeartbeat_ClearAllHeartbeatTimers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -212,7 +213,7 @@ func TestHeartbeat_ClearAllHeartbeatTimers(t *testing.T) { } func TestHeartbeat_Server_HeartbeatTTL_Failover(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 diff --git a/nomad/job_endpoint_hook_connect_test.go b/nomad/job_endpoint_hook_connect_test.go index 75253ce13..ed6b28e95 100644 --- a/nomad/job_endpoint_hook_connect_test.go +++ b/nomad/job_endpoint_hook_connect_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -13,7 +14,7 @@ import ( ) func TestJobEndpointConnect_isSidecarForService(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { task *structs.Task @@ -54,7 +55,7 @@ func TestJobEndpointConnect_isSidecarForService(t *testing.T) { } func TestJobEndpointConnect_groupConnectHook(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that connect-proxy task is inserted for backend service job := mock.Job() @@ -113,7 +114,7 @@ func TestJobEndpointConnect_groupConnectHook(t *testing.T) { } func TestJobEndpointConnect_groupConnectHook_IngressGateway_BridgeNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect ingress gateway task is inserted if a gateway service // exists and since this is a bridge network, will rewrite the default gateway proxy @@ -146,7 +147,7 @@ func TestJobEndpointConnect_groupConnectHook_IngressGateway_BridgeNetwork(t *tes } func TestJobEndpointConnect_groupConnectHook_IngressGateway_HostNetwork(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect ingress gateway task is inserted if a gateway service // exists. In host network mode, the default values are used. @@ -178,7 +179,7 @@ func TestJobEndpointConnect_groupConnectHook_IngressGateway_HostNetwork(t *testi } func TestJobEndpointConnect_groupConnectHook_IngressGateway_CustomTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect gateway task is inserted if a gateway service exists // and since this is a bridge network, will rewrite the default gateway proxy @@ -247,7 +248,7 @@ func TestJobEndpointConnect_groupConnectHook_IngressGateway_CustomTask(t *testin } func TestJobEndpointConnect_groupConnectHook_TerminatingGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Tests that the connect terminating gateway task is inserted if a gateway // service exists and since this is a bridge network, will rewrite the default @@ -280,7 +281,7 @@ func TestJobEndpointConnect_groupConnectHook_TerminatingGateway(t *testing.T) { } func TestJobEndpointConnect_groupConnectHook_MeshGateway(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Test that the connect mesh gateway task is inserted if a gateway service // exists and since this is a bridge network, will rewrite the default gateway @@ -326,7 +327,7 @@ func TestJobEndpointConnect_groupConnectHook_MeshGateway(t *testing.T) { // // See https://github.com/hashicorp/nomad/issues/6853 func TestJobEndpointConnect_ConnectInterpolation(t *testing.T) { - t.Parallel() + ci.Parallel(t) server := &Server{logger: testlog.HCLogger(t)} jobEndpoint := NewJobEndpoints(server) @@ -342,7 +343,7 @@ func TestJobEndpointConnect_ConnectInterpolation(t *testing.T) { } func TestJobEndpointConnect_groupConnectSidecarValidate(t *testing.T) { - t.Parallel() + ci.Parallel(t) // network validation @@ -457,6 +458,8 @@ func TestJobEndpointConnect_groupConnectSidecarValidate(t *testing.T) { } func TestJobEndpointConnect_groupConnectUpstreamsValidate(t *testing.T) { + ci.Parallel(t) + t.Run("no connect services", func(t *testing.T) { err := groupConnectUpstreamsValidate("group", []*structs.Service{{Name: "s1"}, {Name: "s2"}}) @@ -543,7 +546,7 @@ func TestJobEndpointConnect_groupConnectUpstreamsValidate(t *testing.T) { } func TestJobEndpointConnect_getNamedTaskForNativeService(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("named exists", func(t *testing.T) { task, err := getNamedTaskForNativeService(&structs.TaskGroup{ @@ -583,7 +586,7 @@ func TestJobEndpointConnect_getNamedTaskForNativeService(t *testing.T) { } func TestJobEndpointConnect_groupConnectGatewayValidate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no group network", func(t *testing.T) { err := groupConnectGatewayValidate(&structs.TaskGroup{ @@ -605,7 +608,7 @@ func TestJobEndpointConnect_groupConnectGatewayValidate(t *testing.T) { } func TestJobEndpointConnect_newConnectGatewayTask_host(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("ingress", func(t *testing.T) { task := newConnectGatewayTask(structs.ConnectIngressPrefix, "foo", true) @@ -627,14 +630,14 @@ func TestJobEndpointConnect_newConnectGatewayTask_host(t *testing.T) { } func TestJobEndpointConnect_newConnectGatewayTask_bridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) task := newConnectGatewayTask(structs.ConnectIngressPrefix, "service1", false) require.NotContains(t, task.Config, "network_mode") } func TestJobEndpointConnect_hasGatewayTaskForService(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no gateway task", func(t *testing.T) { result := hasGatewayTaskForService(&structs.TaskGroup{ @@ -682,7 +685,7 @@ func TestJobEndpointConnect_hasGatewayTaskForService(t *testing.T) { } func TestJobEndpointConnect_gatewayProxyIsDefault(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := gatewayProxyIsDefault(nil) @@ -725,7 +728,7 @@ func TestJobEndpointConnect_gatewayProxyIsDefault(t *testing.T) { } func TestJobEndpointConnect_gatewayBindAddressesForBridge(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { @@ -792,7 +795,7 @@ func TestJobEndpointConnect_gatewayBindAddressesForBridge(t *testing.T) { } func TestJobEndpointConnect_gatewayProxy(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { result := gatewayProxy(nil, "bridge") @@ -986,5 +989,4 @@ func TestJobEndpointConnect_gatewayProxy(t *testing.T) { EnvoyGatewayBindAddresses: nil, }, result) }) - } diff --git a/nomad/job_endpoint_hook_expose_check_test.go b/nomad/job_endpoint_hook_expose_check_test.go index f3a2cbe3a..8e71883ba 100644 --- a/nomad/job_endpoint_hook_expose_check_test.go +++ b/nomad/job_endpoint_hook_expose_check_test.go @@ -3,18 +3,19 @@ package nomad import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestJobExposeCheckHook_Name(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "expose-check", new(jobExposeCheckHook).Name()) } func TestJobExposeCheckHook_tgUsesExposeCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no check.expose", func(t *testing.T) { require.False(t, tgUsesExposeCheck(&structs.TaskGroup{ @@ -40,7 +41,7 @@ func TestJobExposeCheckHook_tgUsesExposeCheck(t *testing.T) { } func TestJobExposeCheckHook_tgValidateUseOfBridgeMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1 := &structs.Service{ Name: "s1", @@ -88,7 +89,7 @@ func TestJobExposeCheckHook_tgValidateUseOfBridgeMode(t *testing.T) { } func TestJobExposeCheckHook_tgValidateUseOfCheckExpose(t *testing.T) { - t.Parallel() + ci.Parallel(t) withCustomProxyTask := &structs.Service{ Name: "s1", @@ -138,6 +139,8 @@ func TestJobExposeCheckHook_tgValidateUseOfCheckExpose(t *testing.T) { } func TestJobExposeCheckHook_Validate(t *testing.T) { + ci.Parallel(t) + s1 := &structs.Service{ Name: "s1", Checks: []*structs.ServiceCheck{{ @@ -224,7 +227,7 @@ func TestJobExposeCheckHook_Validate(t *testing.T) { } func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) const checkIdx = 0 @@ -314,7 +317,7 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { Name: "group1", Services: []*structs.Service{s}, Networks: structs.Networks{{ - Mode: "bridge", + Mode: "bridge", DynamicPorts: []structs.Port{ // service declares "sPort", but does not exist }, @@ -400,7 +403,7 @@ func TestJobExposeCheckHook_exposePathForCheck(t *testing.T) { } func TestJobExposeCheckHook_containsExposePath(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("contains path", func(t *testing.T) { require.True(t, containsExposePath([]structs.ConsulExposePath{{ @@ -442,7 +445,7 @@ func TestJobExposeCheckHook_containsExposePath(t *testing.T) { } func TestJobExposeCheckHook_serviceExposeConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("proxy is nil", func(t *testing.T) { require.NotNil(t, serviceExposeConfig(&structs.Service{ @@ -521,7 +524,7 @@ func TestJobExposeCheckHook_serviceExposeConfig(t *testing.T) { } func TestJobExposeCheckHook_checkIsExposable(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("grpc", func(t *testing.T) { require.True(t, checkIsExposable(&structs.ServiceCheck{ @@ -561,7 +564,7 @@ func TestJobExposeCheckHook_checkIsExposable(t *testing.T) { } func TestJobExposeCheckHook_Mutate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("typical", func(t *testing.T) { result, warnings, err := new(jobExposeCheckHook).Mutate(&structs.Job{ diff --git a/nomad/job_endpoint_oss_test.go b/nomad/job_endpoint_oss_test.go index e90e7cb82..304422d81 100644 --- a/nomad/job_endpoint_oss_test.go +++ b/nomad/job_endpoint_oss_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -21,7 +22,7 @@ import ( // submission fails allow_unauthenticated is false, and either an invalid or no // operator Consul token is provided. func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse_oss(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 8cb936399..3cf238b35 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -10,21 +10,21 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/hashicorp/raft" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/hashicorp/raft" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJobEndpoint_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -110,7 +110,7 @@ func TestJobEndpoint_Register(t *testing.T) { } func TestJobEndpoint_Register_PreserveCounts(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -170,7 +170,7 @@ func TestJobEndpoint_Register_PreserveCounts(t *testing.T) { } func TestJobEndpoint_Register_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssert := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -202,7 +202,7 @@ func TestJobEndpoint_Register_EvalPriority(t *testing.T) { } func TestJobEndpoint_Register_Connect(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -271,7 +271,7 @@ func TestJobEndpoint_Register_Connect(t *testing.T) { } func TestJobEndpoint_Register_ConnectIngressGateway_minimum(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -346,7 +346,7 @@ func TestJobEndpoint_Register_ConnectIngressGateway_minimum(t *testing.T) { } func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -473,7 +473,7 @@ func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) { } func TestJobEndpoint_Register_ConnectExposeCheck(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -587,7 +587,7 @@ func TestJobEndpoint_Register_ConnectExposeCheck(t *testing.T) { } func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -684,7 +684,7 @@ func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) { } func TestJobEndpoint_Register_Connect_ValidatesWithoutSidecarTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -735,7 +735,7 @@ func TestJobEndpoint_Register_Connect_ValidatesWithoutSidecarTask(t *testing.T) } func TestJobEndpoint_Register_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -899,7 +899,7 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { } func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -939,7 +939,7 @@ func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) { } func TestJobEndpoint_Register_Payload(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -973,7 +973,7 @@ func TestJobEndpoint_Register_Payload(t *testing.T) { } func TestJobEndpoint_Register_Existing(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1096,7 +1096,7 @@ func TestJobEndpoint_Register_Existing(t *testing.T) { } func TestJobEndpoint_Register_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1149,7 +1149,7 @@ func TestJobEndpoint_Register_Periodic(t *testing.T) { } func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1197,7 +1197,7 @@ func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) { } func TestJobEndpoint_Register_Dispatched(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -1227,7 +1227,7 @@ func TestJobEndpoint_Register_Dispatched(t *testing.T) { } func TestJobEndpoint_Register_EnforceIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1359,7 +1359,7 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) { // TestJobEndpoint_Register_Vault_Disabled asserts that submitting a job that // uses Vault when Vault is *disabled* results in an error. func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1396,7 +1396,7 @@ func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) { // with a Vault policy but without a Vault token is *succeeds* if // allow_unauthenticated=true. func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1453,7 +1453,7 @@ func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) { // submitters can specify their own Vault constraint to override the // automatically injected one. func TestJobEndpoint_Register_Vault_OverrideConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1510,7 +1510,7 @@ func TestJobEndpoint_Register_Vault_OverrideConstraint(t *testing.T) { } func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1551,7 +1551,7 @@ func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) { } func TestJobEndpoint_Register_Vault_Policies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1693,7 +1693,7 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) { } func TestJobEndpoint_Register_Vault_MultiNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1747,7 +1747,7 @@ func TestJobEndpoint_Register_Vault_MultiNamespaces(t *testing.T) { // TestJobEndpoint_Register_SemverConstraint asserts that semver ordering is // used when evaluating semver constraints. func TestJobEndpoint_Register_SemverConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1829,7 +1829,7 @@ func TestJobEndpoint_Register_SemverConstraint(t *testing.T) { // TestJobEndpoint_Register_EvalCreation_Modern asserts that job register creates an eval // atomically with the registration func TestJobEndpoint_Register_EvalCreation_Modern(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -1950,7 +1950,7 @@ func TestJobEndpoint_Register_EvalCreation_Modern(t *testing.T) { // TestJobEndpoint_Register_EvalCreation_Legacy asserts that job register creates an eval // atomically with the registration, but handle legacy clients by adding a new eval update func TestJobEndpoint_Register_EvalCreation_Legacy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -2092,7 +2092,7 @@ func TestJobEndpoint_Register_EvalCreation_Legacy(t *testing.T) { } func TestJobEndpoint_Register_ValidateMemoryMax(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2172,7 +2172,7 @@ func evalUpdateFromRaft(t *testing.T, s *Server, evalID string) *structs.Evaluat } func TestJobEndpoint_Register_ACL_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -2249,7 +2249,7 @@ func TestJobEndpoint_Register_ACL_Namespace(t *testing.T) { } func TestJobRegister_ACL_RejectedBySchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue }) @@ -2343,7 +2343,7 @@ func TestJobRegister_ACL_RejectedBySchedulerConfig(t *testing.T) { } func TestJobEndpoint_Revert(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2513,7 +2513,7 @@ func TestJobEndpoint_Revert(t *testing.T) { } func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2613,7 +2613,7 @@ func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) { // TestJobEndpoint_Revert_Vault_Policies asserts that job revert uses the // revert request's Vault token when authorizing policies. func TestJobEndpoint_Revert_Vault_Policies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2726,7 +2726,7 @@ func TestJobEndpoint_Revert_Vault_Policies(t *testing.T) { } func TestJobEndpoint_Revert_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -2791,7 +2791,7 @@ func TestJobEndpoint_Revert_ACL(t *testing.T) { } func TestJobEndpoint_Stable(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2855,7 +2855,7 @@ func TestJobEndpoint_Stable(t *testing.T) { } func TestJobEndpoint_Stable_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -2922,7 +2922,7 @@ func TestJobEndpoint_Stable_ACL(t *testing.T) { } func TestJobEndpoint_Evaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3008,7 +3008,7 @@ func TestJobEndpoint_Evaluate(t *testing.T) { } func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3086,7 +3086,7 @@ func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) { } func TestJobEndpoint_Evaluate_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -3160,7 +3160,7 @@ func TestJobEndpoint_Evaluate_ACL(t *testing.T) { } func TestJobEndpoint_Evaluate_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3204,7 +3204,7 @@ func TestJobEndpoint_Evaluate_Periodic(t *testing.T) { } func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3249,7 +3249,7 @@ func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) { } func TestJobEndpoint_Deregister(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3340,7 +3340,7 @@ func TestJobEndpoint_Deregister(t *testing.T) { } func TestJobEndpoint_Deregister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -3425,7 +3425,7 @@ func TestJobEndpoint_Deregister_ACL(t *testing.T) { } func TestJobEndpoint_Deregister_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3492,7 +3492,7 @@ func TestJobEndpoint_Deregister_Nonexistent(t *testing.T) { } func TestJobEndpoint_Deregister_EvalPriority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssert := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3534,7 +3534,7 @@ func TestJobEndpoint_Deregister_EvalPriority(t *testing.T) { } func TestJobEndpoint_Deregister_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3593,7 +3593,7 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) { } func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3655,7 +3655,7 @@ func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) { // TestJobEndpoint_Deregister_EvalCreation_Modern asserts that job deregister creates an eval // atomically with the registration func TestJobEndpoint_Deregister_EvalCreation_Modern(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -3736,7 +3736,7 @@ func TestJobEndpoint_Deregister_EvalCreation_Modern(t *testing.T) { // creates an eval atomically with the registration, but handle legacy clients // by adding a new eval update func TestJobEndpoint_Deregister_EvalCreation_Legacy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -3832,7 +3832,7 @@ func TestJobEndpoint_Deregister_EvalCreation_Legacy(t *testing.T) { } func TestJobEndpoint_Deregister_NoShutdownDelay(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -3923,7 +3923,7 @@ func TestJobEndpoint_Deregister_NoShutdownDelay(t *testing.T) { } func TestJobEndpoint_BatchDeregister(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -4016,7 +4016,7 @@ func TestJobEndpoint_BatchDeregister(t *testing.T) { } func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -4085,7 +4085,7 @@ func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) { } func TestJobEndpoint_Deregister_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssertion := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -4135,7 +4135,7 @@ func TestJobEndpoint_Deregister_Priority(t *testing.T) { } func TestJobEndpoint_GetJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4213,7 +4213,7 @@ func TestJobEndpoint_GetJob(t *testing.T) { } func TestJobEndpoint_GetJob_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -4271,7 +4271,7 @@ func TestJobEndpoint_GetJob_ACL(t *testing.T) { } func TestJobEndpoint_GetJob_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4348,7 +4348,7 @@ func TestJobEndpoint_GetJob_Blocking(t *testing.T) { } func TestJobEndpoint_GetJobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4421,7 +4421,7 @@ func TestJobEndpoint_GetJobVersions(t *testing.T) { } func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -4488,7 +4488,7 @@ func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) { } func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4585,7 +4585,7 @@ func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) { } func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4671,7 +4671,7 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) { } func TestJobEndpoint_GetJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -4733,7 +4733,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) { } func TestJobEndpoint_Summary_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -4820,7 +4820,7 @@ func TestJobEndpoint_Summary_ACL(t *testing.T) { } func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4914,7 +4914,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) { } func TestJobEndpoint_ListJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -4963,7 +4963,7 @@ func TestJobEndpoint_ListJobs(t *testing.T) { // returns all jobs across namespace. // func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5025,7 +5025,7 @@ func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) { } func TestJobEndpoint_ListJobs_WithACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -5085,7 +5085,7 @@ func TestJobEndpoint_ListJobs_WithACL(t *testing.T) { } func TestJobEndpoint_ListJobs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5152,7 +5152,7 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) { } func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, _, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -5330,7 +5330,7 @@ func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) { } func TestJobEndpoint_Allocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5371,7 +5371,7 @@ func TestJobEndpoint_Allocations(t *testing.T) { } func TestJobEndpoint_Allocations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5433,7 +5433,7 @@ func TestJobEndpoint_Allocations_ACL(t *testing.T) { } func TestJobEndpoint_Allocations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5493,7 +5493,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) { // TestJobEndpoint_Allocations_NoJobID asserts not setting a JobID in the // request returns an error. func TestJobEndpoint_Allocations_NoJobID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5515,7 +5515,7 @@ func TestJobEndpoint_Allocations_NoJobID(t *testing.T) { } func TestJobEndpoint_Evaluations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5554,7 +5554,7 @@ func TestJobEndpoint_Evaluations(t *testing.T) { } func TestJobEndpoint_Evaluations_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5614,7 +5614,7 @@ func TestJobEndpoint_Evaluations_ACL(t *testing.T) { } func TestJobEndpoint_Evaluations_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5670,7 +5670,7 @@ func TestJobEndpoint_Evaluations_Blocking(t *testing.T) { } func TestJobEndpoint_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5707,7 +5707,7 @@ func TestJobEndpoint_Deployments(t *testing.T) { } func TestJobEndpoint_Deployments_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5771,7 +5771,7 @@ func TestJobEndpoint_Deployments_ACL(t *testing.T) { } func TestJobEndpoint_Deployments_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5818,7 +5818,7 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) { } func TestJobEndpoint_LatestDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5857,7 +5857,7 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) { } func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -5926,7 +5926,7 @@ func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) { } func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -5974,7 +5974,7 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) { } func TestJobEndpoint_Plan_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6008,7 +6008,7 @@ func TestJobEndpoint_Plan_ACL(t *testing.T) { } func TestJobEndpoint_Plan_WithDiff(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6068,7 +6068,7 @@ func TestJobEndpoint_Plan_WithDiff(t *testing.T) { } func TestJobEndpoint_Plan_NoDiff(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6130,7 +6130,7 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) { // TestJobEndpoint_Plan_Scaling asserts that the plan endpoint handles // jobs with scaling stanza func TestJobEndpoint_Plan_Scaling(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6164,7 +6164,7 @@ func TestJobEndpoint_Plan_Scaling(t *testing.T) { } func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6234,7 +6234,7 @@ func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) { } func TestJobEndpoint_ValidateJob_ConsulConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -6324,7 +6324,7 @@ func TestJobEndpoint_ValidateJob_ConsulConnect(t *testing.T) { } func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -6397,7 +6397,7 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) { } func TestJobEndpoint_ValidateJobUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) old := mock.Job() new := mock.Job() @@ -6441,7 +6441,7 @@ func TestJobEndpoint_ValidateJobUpdate(t *testing.T) { } func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -6477,7 +6477,7 @@ func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) { } func TestJobEndpoint_Dispatch_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { @@ -6554,7 +6554,7 @@ func TestJobEndpoint_Dispatch_ACL(t *testing.T) { } func TestJobEndpoint_Dispatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) // No requirements d1 := mock.BatchJob() @@ -6901,7 +6901,7 @@ func TestJobEndpoint_Dispatch(t *testing.T) { // TestJobEndpoint_Dispatch_JobChildrenSummary asserts that the job summary is updated // appropriately as its dispatched/children jobs status are updated. func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -7010,7 +7010,7 @@ func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) { } func TestJobEndpoint_Dispatch_ACL_RejectedBySchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -7099,7 +7099,7 @@ func TestJobEndpoint_Dispatch_ACL_RejectedBySchedulerConfig(t *testing.T) { } func TestJobEndpoint_Scale(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7146,7 +7146,7 @@ func TestJobEndpoint_Scale(t *testing.T) { } func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7225,7 +7225,7 @@ func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) { } func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7302,7 +7302,7 @@ func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) { } func TestJobEndpoint_Scale_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -7387,7 +7387,7 @@ func TestJobEndpoint_Scale_ACL(t *testing.T) { } func TestJobEndpoint_Scale_ACL_RejectedBySchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -7478,7 +7478,7 @@ func TestJobEndpoint_Scale_ACL_RejectedBySchedulerConfig(t *testing.T) { } func TestJobEndpoint_Scale_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7529,7 +7529,7 @@ func TestJobEndpoint_Scale_Invalid(t *testing.T) { } func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7572,7 +7572,7 @@ func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) { } func TestJobEndpoint_Scale_NoEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7637,7 +7637,7 @@ func TestJobEndpoint_Scale_NoEval(t *testing.T) { } func TestJobEndpoint_Scale_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) requireAssertion := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7687,7 +7687,7 @@ func TestJobEndpoint_Scale_Priority(t *testing.T) { } func TestJobEndpoint_InvalidCount(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7717,7 +7717,7 @@ func TestJobEndpoint_InvalidCount(t *testing.T) { } func TestJobEndpoint_GetScaleStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -7833,7 +7833,7 @@ func TestJobEndpoint_GetScaleStatus(t *testing.T) { } func TestJobEndpoint_GetScaleStatus_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) diff --git a/nomad/job_endpoint_validators_test.go b/nomad/job_endpoint_validators_test.go index f84fd90e2..de8acaa90 100644 --- a/nomad/job_endpoint_validators_test.go +++ b/nomad/job_endpoint_validators_test.go @@ -3,6 +3,7 @@ package nomad import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -10,13 +11,13 @@ import ( ) func TestJobNamespaceConstraintCheckHook_Name(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Equal(t, "namespace-constraint-check", new(jobNamespaceConstraintCheckHook).Name()) } func TestJobNamespaceConstraintCheckHook_taskValidateDriver(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { description string @@ -85,7 +86,7 @@ func TestJobNamespaceConstraintCheckHook_taskValidateDriver(t *testing.T) { } func TestJobNamespaceConstraintCheckHook_validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) diff --git a/nomad/leader_test.go b/nomad/leader_test.go index 07f854d09..b244273b0 100644 --- a/nomad/leader_test.go +++ b/nomad/leader_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -24,6 +25,8 @@ import ( ) func TestLeader_LeftServer(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 }) @@ -86,6 +89,8 @@ func TestLeader_LeftServer(t *testing.T) { } func TestLeader_LeftLeader(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 }) @@ -132,6 +137,8 @@ func TestLeader_LeftLeader(t *testing.T) { } func TestLeader_MultiBootstrap(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -162,6 +169,8 @@ func TestLeader_MultiBootstrap(t *testing.T) { } func TestLeader_PlanQueue_Reset(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 }) @@ -218,6 +227,8 @@ func TestLeader_PlanQueue_Reset(t *testing.T) { } func TestLeader_EvalBroker_Reset(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -276,6 +287,8 @@ func TestLeader_EvalBroker_Reset(t *testing.T) { } func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -366,6 +379,8 @@ func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) { } func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -422,6 +437,8 @@ func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) { } func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -479,6 +496,8 @@ func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) { } func TestLeader_PeriodicDispatch(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 c.EvalGCInterval = 5 * time.Millisecond @@ -499,6 +518,8 @@ func TestLeader_PeriodicDispatch(t *testing.T) { } func TestLeader_ReapFailedEval(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 c.EvalDeliveryLimit = 1 @@ -577,6 +598,8 @@ func TestLeader_ReapFailedEval(t *testing.T) { } func TestLeader_ReapDuplicateEval(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -607,6 +630,8 @@ func TestLeader_ReapDuplicateEval(t *testing.T) { } func TestLeader_revokeVaultAccessorsOnRestore(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) @@ -635,7 +660,7 @@ func TestLeader_revokeVaultAccessorsOnRestore(t *testing.T) { } func TestLeader_revokeSITokenAccessorsOnRestore(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -667,7 +692,7 @@ func TestLeader_revokeSITokenAccessorsOnRestore(t *testing.T) { } func TestLeader_ClusterID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -683,7 +708,7 @@ func TestLeader_ClusterID(t *testing.T) { } func TestLeader_ClusterID_upgradePath(t *testing.T) { - t.Parallel() + ci.Parallel(t) before := version.Must(version.NewVersion("0.10.1")).String() after := minClusterIDVersion.String() @@ -791,7 +816,7 @@ func TestLeader_ClusterID_upgradePath(t *testing.T) { } func TestLeader_ClusterID_noUpgrade(t *testing.T) { - t.Parallel() + ci.Parallel(t) type server struct { s *Server @@ -856,7 +881,7 @@ func agreeClusterID(t *testing.T, servers []*Server) { } func TestLeader_ReplicateACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" @@ -893,7 +918,7 @@ func TestLeader_ReplicateACLPolicies(t *testing.T) { } func TestLeader_DiffACLPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := state.TestStateStore(t) @@ -925,7 +950,7 @@ func TestLeader_DiffACLPolicies(t *testing.T) { } func TestLeader_ReplicateACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" @@ -963,7 +988,7 @@ func TestLeader_ReplicateACLTokens(t *testing.T) { } func TestLeader_DiffACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := state.TestStateStore(t) @@ -1001,7 +1026,7 @@ func TestLeader_DiffACLTokens(t *testing.T) { } func TestLeader_UpgradeRaftVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Datacenter = "dc1" @@ -1091,6 +1116,8 @@ func TestLeader_UpgradeRaftVersion(t *testing.T) { } func TestLeader_Reelection(t *testing.T) { + ci.Parallel(t) + raftProtocols := []int{1, 2, 3} for _, p := range raftProtocols { t.Run(fmt.Sprintf("Leader Election - Protocol version %d", p), func(t *testing.T) { @@ -1158,7 +1185,7 @@ func leaderElectionTest(t *testing.T, raftProtocol raft.ProtocolVersion) { } func TestLeader_RollRaftServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 2 @@ -1284,6 +1311,8 @@ func TestLeader_RollRaftServer(t *testing.T) { } func TestLeader_RevokeLeadership_MultipleTimes(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -1300,6 +1329,8 @@ func TestLeader_RevokeLeadership_MultipleTimes(t *testing.T) { } func TestLeader_TransitionsUpdateConsistencyRead(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() testutil.WaitForLeader(t, s1.RPC) @@ -1321,6 +1352,8 @@ func TestLeader_TransitionsUpdateConsistencyRead(t *testing.T) { // TestLeader_PausingWorkers asserts that scheduling workers are paused // (and unpaused) upon leader elections (and step downs). func TestLeader_PausingWorkers(t *testing.T) { + ci.Parallel(t) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 12 }) @@ -1359,7 +1392,7 @@ func TestLeader_PausingWorkers(t *testing.T) { // This verifies that removing the server and adding it back with a uuid works // even if the server's address stays the same. func TestServer_ReconcileMember(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Create a three node cluster s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -1457,7 +1490,7 @@ func TestServer_ReconcileMember(t *testing.T) { } func TestLeader_ReplicateNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" @@ -1504,7 +1537,7 @@ func TestLeader_ReplicateNamespaces(t *testing.T) { } func TestLeader_DiffNamespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := state.TestStateStore(t) diff --git a/nomad/namespace_endpoint_test.go b/nomad/namespace_endpoint_test.go index eec1c50bd..f56e4cc8e 100644 --- a/nomad/namespace_endpoint_test.go +++ b/nomad/namespace_endpoint_test.go @@ -7,6 +7,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,8 +16,8 @@ import ( ) func TestNamespaceEndpoint_GetNamespace(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -44,8 +45,8 @@ func TestNamespaceEndpoint_GetNamespace(t *testing.T) { } func TestNamespaceEndpoint_GetNamespace_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -105,8 +106,8 @@ func TestNamespaceEndpoint_GetNamespace_ACL(t *testing.T) { } func TestNamespaceEndpoint_GetNamespace_Blocking(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() state := s1.fsm.State() @@ -164,8 +165,8 @@ func TestNamespaceEndpoint_GetNamespace_Blocking(t *testing.T) { } func TestNamespaceEndpoint_GetNamespaces(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -190,8 +191,8 @@ func TestNamespaceEndpoint_GetNamespaces(t *testing.T) { } func TestNamespaceEndpoint_GetNamespaces_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -239,8 +240,8 @@ func TestNamespaceEndpoint_GetNamespaces_ACL(t *testing.T) { } func TestNamespaceEndpoint_GetNamespaces_Blocking(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() state := s1.fsm.State() @@ -298,8 +299,8 @@ func TestNamespaceEndpoint_GetNamespaces_Blocking(t *testing.T) { } func TestNamespaceEndpoint_List(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -336,8 +337,8 @@ func TestNamespaceEndpoint_List(t *testing.T) { } func TestNamespaceEndpoint_List_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -410,8 +411,8 @@ func TestNamespaceEndpoint_List_ACL(t *testing.T) { } func TestNamespaceEndpoint_List_Blocking(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() state := s1.fsm.State() @@ -460,8 +461,8 @@ func TestNamespaceEndpoint_List_Blocking(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -483,8 +484,8 @@ func TestNamespaceEndpoint_DeleteNamespaces(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Local(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -513,8 +514,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Local(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Federated_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.Region = "region1" c.AuthoritativeRegion = "region1" @@ -574,8 +575,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_NonTerminal_Federated_ACL(t *testing } func TestNamespaceEndpoint_DeleteNamespaces_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -649,8 +650,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_ACL(t *testing.T) { } func TestNamespaceEndpoint_DeleteNamespaces_Default(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -666,8 +667,8 @@ func TestNamespaceEndpoint_DeleteNamespaces_Default(t *testing.T) { } func TestNamespaceEndpoint_UpsertNamespaces(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -697,8 +698,8 @@ func TestNamespaceEndpoint_UpsertNamespaces(t *testing.T) { } func TestNamespaceEndpoint_UpsertNamespaces_ACL(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index c91dafc1a..8d1cd12cc 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -11,12 +11,8 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - vapi "github.com/hashicorp/vault/api" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -24,10 +20,14 @@ import ( "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + vapi "github.com/hashicorp/vault/api" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestClientEndpoint_Register(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -90,7 +90,7 @@ func TestClientEndpoint_Register(t *testing.T) { // forwarded RPCs. This is essential otherwise we will think a Yamux session to // a Nomad server is actually the session to the node. func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -181,7 +181,7 @@ func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) { } func TestClientEndpoint_Register_SecretMismatch(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -211,7 +211,7 @@ func TestClientEndpoint_Register_SecretMismatch(t *testing.T) { // Test the deprecated single node deregistration path func TestClientEndpoint_DeregisterOne(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -257,7 +257,7 @@ func TestClientEndpoint_DeregisterOne(t *testing.T) { } func TestClientEndpoint_Deregister_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -323,7 +323,7 @@ func TestClientEndpoint_Deregister_ACL(t *testing.T) { } func TestClientEndpoint_Deregister_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -385,7 +385,7 @@ func TestClientEndpoint_Deregister_Vault(t *testing.T) { } func TestClientEndpoint_UpdateStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -465,7 +465,7 @@ func TestClientEndpoint_UpdateStatus(t *testing.T) { } func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -524,7 +524,7 @@ func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) { } func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -575,7 +575,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) { } func TestClientEndpoint_Register_GetEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -670,7 +670,7 @@ func TestClientEndpoint_Register_GetEvals(t *testing.T) { } func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -754,7 +754,7 @@ func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) { } func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 3 @@ -832,7 +832,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) { } func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) advAddr := "127.0.1.1:1234" @@ -875,7 +875,7 @@ func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) { // * an evaluation is created when the node becomes eligible // * drain metadata is properly persisted in Node.LastDrain func TestClientEndpoint_UpdateDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -992,7 +992,7 @@ func TestClientEndpoint_UpdateDrain(t *testing.T) { // is properly persisted in Node.LastDrain as the node drain is updated and // completes. func TestClientEndpoint_UpdatedDrainAndCompleted(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1099,7 +1099,7 @@ func TestClientEndpoint_UpdatedDrainAndCompleted(t *testing.T) { // persisted in Node.LastDrain when calls to Node.UpdateDrain() don't affect // the drain status. func TestClientEndpoint_UpdatedDrainNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1174,7 +1174,7 @@ func TestClientEndpoint_UpdatedDrainNoop(t *testing.T) { // node.write ACLs, and that token accessor ID is properly persisted in // Node.LastDrain.AccessorID func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1243,7 +1243,7 @@ func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) { // This test ensures that Nomad marks client state of allocations which are in // pending/running state to lost when a node is marked as down. func TestClientEndpoint_Drain_Down(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1374,7 +1374,7 @@ func TestClientEndpoint_Drain_Down(t *testing.T) { } func TestClientEndpoint_UpdateEligibility(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -1432,7 +1432,7 @@ func TestClientEndpoint_UpdateEligibility(t *testing.T) { } func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1488,7 +1488,7 @@ func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) { } func TestClientEndpoint_GetNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1555,7 +1555,7 @@ func TestClientEndpoint_GetNode(t *testing.T) { } func TestClientEndpoint_GetNode_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1619,7 +1619,7 @@ func TestClientEndpoint_GetNode_ACL(t *testing.T) { } func TestClientEndpoint_GetNode_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1722,7 +1722,7 @@ func TestClientEndpoint_GetNode_Blocking(t *testing.T) { } func TestClientEndpoint_GetAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -1785,7 +1785,7 @@ func TestClientEndpoint_GetAllocs(t *testing.T) { } func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -1860,7 +1860,7 @@ func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) { } func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() codec := rpcClient(t, s1) @@ -1956,7 +1956,7 @@ func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) { } func TestClientEndpoint_GetClientAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2036,7 +2036,7 @@ func TestClientEndpoint_GetClientAllocs(t *testing.T) { } func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2158,7 +2158,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) { } func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2235,7 +2235,7 @@ func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) { // A MigrateToken should not be created if an allocation shares the same node // with its previous allocation func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -2288,7 +2288,7 @@ func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) { } func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2381,7 +2381,7 @@ func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) { } func TestClientEndpoint_UpdateAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { // Disabling scheduling in this test so that we can @@ -2479,7 +2479,7 @@ func TestClientEndpoint_UpdateAlloc(t *testing.T) { } func TestClientEndpoint_BatchUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2537,7 +2537,7 @@ func TestClientEndpoint_BatchUpdate(t *testing.T) { } func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2623,7 +2623,7 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { } func TestClientEndpoint_CreateNodeEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2708,7 +2708,7 @@ func TestClientEndpoint_CreateNodeEvals(t *testing.T) { // TestClientEndpoint_CreateNodeEvals_MultipleNSes asserts that evals are made // for all jobs across namespaces func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2767,7 +2767,7 @@ func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) { } func TestClientEndpoint_Evaluate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -2855,7 +2855,7 @@ func TestClientEndpoint_Evaluate(t *testing.T) { } func TestClientEndpoint_Evaluate_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -2914,7 +2914,7 @@ func TestClientEndpoint_Evaluate_ACL(t *testing.T) { } func TestClientEndpoint_ListNodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -2986,7 +2986,7 @@ func TestClientEndpoint_ListNodes(t *testing.T) { } func TestClientEndpoint_ListNodes_Fields(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3023,7 +3023,7 @@ func TestClientEndpoint_ListNodes_Fields(t *testing.T) { } func TestClientEndpoint_ListNodes_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -3078,7 +3078,7 @@ func TestClientEndpoint_ListNodes_ACL(t *testing.T) { } func TestClientEndpoint_ListNodes_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3210,7 +3210,7 @@ func TestClientEndpoint_ListNodes_Blocking(t *testing.T) { } func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3292,7 +3292,7 @@ func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) { } func TestClientEndpoint_DeriveVaultToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3385,7 +3385,7 @@ func TestClientEndpoint_DeriveVaultToken(t *testing.T) { } func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -3443,7 +3443,7 @@ func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) { } func TestClientEndpoint_taskUsesConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, task *structs.Task, exp bool) { result := taskUsesConnect(task) @@ -3471,7 +3471,7 @@ func TestClientEndpoint_taskUsesConnect(t *testing.T) { } func TestClientEndpoint_tasksNotUsingConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) taskGroup := &structs.TaskGroup{ Name: "testgroup", @@ -3523,7 +3523,7 @@ func mutateConnectJob(t *testing.T, job *structs.Job) { } func TestClientEndpoint_DeriveSIToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, nil) // already sets consul mocks @@ -3576,7 +3576,7 @@ func TestClientEndpoint_DeriveSIToken(t *testing.T) { } func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -3624,7 +3624,7 @@ func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) { } func TestClientEndpoint_EmitEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -3663,6 +3663,8 @@ func TestClientEndpoint_EmitEvents(t *testing.T) { } func TestClientEndpoint_ShouldCreateNodeEval(t *testing.T) { + ci.Parallel(t) + t.Run("spurious changes don't require eval", func(t *testing.T) { n1 := mock.Node() n2 := n1.Copy() diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 47c632351..48ad4d6ae 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/snapshot" @@ -30,7 +31,7 @@ import ( ) func TestOperator_RaftGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -74,7 +75,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) { } func TestOperator_RaftGetConfiguration_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -138,7 +139,7 @@ func TestOperator_RaftGetConfiguration_ACL(t *testing.T) { } func TestOperator_RaftRemovePeerByAddress(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(2) @@ -200,7 +201,7 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) { } func TestOperator_RaftRemovePeerByAddress_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = raft.ProtocolVersion(2) @@ -255,7 +256,7 @@ func TestOperator_RaftRemovePeerByAddress_ACL(t *testing.T) { } func TestOperator_RaftRemovePeerByID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -317,7 +318,7 @@ func TestOperator_RaftRemovePeerByID(t *testing.T) { } func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -371,7 +372,7 @@ func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) { } func TestOperator_SchedulerGetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Build = "0.9.0+unittest" @@ -395,7 +396,7 @@ func TestOperator_SchedulerGetConfiguration(t *testing.T) { } func TestOperator_SchedulerSetConfiguration(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Build = "0.9.0+unittest" @@ -437,7 +438,7 @@ func TestOperator_SchedulerSetConfiguration(t *testing.T) { } func TestOperator_SchedulerGetConfiguration_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -484,7 +485,7 @@ func TestOperator_SchedulerGetConfiguration_ACL(t *testing.T) { } func TestOperator_SchedulerSetConfiguration_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.RaftConfig.ProtocolVersion = 3 @@ -535,7 +536,7 @@ func TestOperator_SchedulerSetConfiguration_ACL(t *testing.T) { } func TestOperator_SnapshotSave(t *testing.T) { - t.Parallel() + ci.Parallel(t) ////// Nomad clusters topology - not specific to test dir, err := ioutil.TempDir("", "nomadtest-operator-") @@ -642,7 +643,7 @@ func TestOperator_SnapshotSave(t *testing.T) { } func TestOperator_SnapshotSave_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) ////// Nomad clusters topology - not specific to test dir, err := ioutil.TempDir("", "nomadtest-operator-") @@ -718,6 +719,8 @@ func TestOperator_SnapshotSave_ACL(t *testing.T) { } func TestOperator_SnapshotRestore(t *testing.T) { + ci.Parallel(t) + targets := []string{"leader", "non_leader", "remote_region"} for _, c := range targets { @@ -881,7 +884,7 @@ func testRestoreSnapshot(t *testing.T, req *structs.SnapshotRestoreRequest, snap } func TestOperator_SnapshotRestore_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir, err := ioutil.TempDir("", "nomadtest-operator-") require.NoError(t, err) diff --git a/nomad/periodic_endpoint_test.go b/nomad/periodic_endpoint_test.go index 5f2a5ed8e..2fb2a38cb 100644 --- a/nomad/periodic_endpoint_test.go +++ b/nomad/periodic_endpoint_test.go @@ -6,6 +6,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -13,7 +14,7 @@ import ( ) func TestPeriodicEndpoint_Force(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -64,7 +65,7 @@ func TestPeriodicEndpoint_Force(t *testing.T) { } func TestPeriodicEndpoint_Force_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -163,7 +164,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { } func TestPeriodicEndpoint_Force_NonPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue diff --git a/nomad/periodic_test.go b/nomad/periodic_test.go index 605032cd3..862a2c5e7 100644 --- a/nomad/periodic_test.go +++ b/nomad/periodic_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -119,7 +120,7 @@ func testPeriodicJob(times ...time.Time) *structs.Job { // TestPeriodicDispatch_SetEnabled test that setting enabled twice is a no-op. // This tests the reported issue: https://github.com/hashicorp/nomad/issues/2829 func TestPeriodicDispatch_SetEnabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) // SetEnabled has been called once but do it again. @@ -142,7 +143,7 @@ func TestPeriodicDispatch_SetEnabled(t *testing.T) { } func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.Job() if err := p.Add(job); err != nil { @@ -156,7 +157,7 @@ func TestPeriodicDispatch_Add_NonPeriodic(t *testing.T) { } func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() job.ParameterizedJob = &structs.ParameterizedJobConfig{} @@ -171,7 +172,7 @@ func TestPeriodicDispatch_Add_Periodic_Parameterized(t *testing.T) { } func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() job.Stop = true @@ -186,7 +187,7 @@ func TestPeriodicDispatch_Add_Periodic_Stopped(t *testing.T) { } func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() err := p.Add(job) @@ -208,8 +209,8 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) { } func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) - t.Parallel() p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() job2 := mock.PeriodicJob() @@ -226,7 +227,7 @@ func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) { } func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() if err := p.Add(job); err != nil { @@ -251,7 +252,7 @@ func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) { } func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that won't be evaluated for a while. @@ -294,7 +295,7 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) { } func TestPeriodicDispatch_Remove_Untracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) if err := p.Remove("ns", "foo"); err != nil { t.Fatalf("Remove failed %v; expected a no-op", err) @@ -302,7 +303,7 @@ func TestPeriodicDispatch_Remove_Untracked(t *testing.T) { } func TestPeriodicDispatch_Remove_Tracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) job := mock.PeriodicJob() @@ -326,7 +327,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) { } func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) // Create a job that will be evaluated soon. @@ -356,7 +357,7 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) { } func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, _ := testPeriodicDispatcher(t) if _, err := p.ForceRun("ns", "foo"); err == nil { @@ -365,7 +366,7 @@ func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) { } func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that won't be evaluated for a while. @@ -394,7 +395,7 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) { } func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that will trigger two launches but disallows overlapping. @@ -424,7 +425,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) { } func TestPeriodicDispatch_Run_Multiple(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create a job that will be launched twice. @@ -456,7 +457,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) { } func TestPeriodicDispatch_Run_SameTime(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create two job that will be launched at the same time. @@ -494,7 +495,7 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) { } func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create two job that will be launched at the same time. @@ -541,7 +542,7 @@ func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) { // some after each other and some invalid times, and ensures the correct // behavior. func TestPeriodicDispatch_Complex(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) // Create some jobs launching at different times. @@ -625,7 +626,7 @@ func shuffle(jobs []*structs.Job) { } func TestPeriodicHeap_Order(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := NewPeriodicHeap() j1 := mock.PeriodicJob() j2 := mock.PeriodicJob() @@ -663,7 +664,7 @@ func deriveChildJob(parent *structs.Job) *structs.Job { } func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -687,7 +688,7 @@ func TestPeriodicDispatch_RunningChildren_NoEvals(t *testing.T) { } func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -724,7 +725,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveEvals(t *testing.T) { } func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -772,7 +773,7 @@ func TestPeriodicDispatch_RunningChildren_ActiveAllocs(t *testing.T) { // TestPeriodicDispatch_JobEmptyStatus asserts that dispatched // job will always has an empty status func TestPeriodicDispatch_JobEmptyStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) p, m := testPeriodicDispatcher(t) job := testPeriodicJob(time.Now().Add(1 * time.Second)) diff --git a/nomad/plan_apply_pool_test.go b/nomad/plan_apply_pool_test.go index 4743dc25e..1f88a5ccc 100644 --- a/nomad/plan_apply_pool_test.go +++ b/nomad/plan_apply_pool_test.go @@ -3,12 +3,13 @@ package nomad import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) func TestEvaluatePool(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -41,7 +42,7 @@ func TestEvaluatePool(t *testing.T) { } func TestEvaluatePool_Resize(t *testing.T) { - t.Parallel() + ci.Parallel(t) pool := NewEvaluatePool(1, 4) defer pool.Shutdown() if n := pool.Size(); n != 1 { diff --git a/nomad/plan_apply_test.go b/nomad/plan_apply_test.go index 7550baf41..984708010 100644 --- a/nomad/plan_apply_test.go +++ b/nomad/plan_apply_test.go @@ -6,6 +6,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -65,7 +66,7 @@ func testRegisterJob(t *testing.T, s *Server, j *structs.Job) { // COMPAT 0.11: Tests the older unoptimized code path for applyPlan func TestPlanApply_applyPlan(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -239,7 +240,7 @@ func TestPlanApply_applyPlan(t *testing.T) { // Verifies that applyPlan properly updates the constituent objects in MemDB, // when the plan contains normalized allocs. func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Build = "0.9.2" @@ -390,7 +391,7 @@ func TestPlanApply_applyPlanWithNormalizedAllocs(t *testing.T) { } func TestPlanApply_EvalPlan_Simple(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -434,7 +435,7 @@ func TestPlanApply_EvalPlan_Simple(t *testing.T) { } func TestPlanApply_EvalPlan_Preemption(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() node.NodeResources = &structs.NodeResources{ @@ -548,7 +549,7 @@ func TestPlanApply_EvalPlan_Preemption(t *testing.T) { } func TestPlanApply_EvalPlan_Partial(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -606,7 +607,7 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) { } func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -657,7 +658,7 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) { } func TestPlanApply_EvalNodePlan_Simple(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -684,7 +685,7 @@ func TestPlanApply_EvalNodePlan_Simple(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() node.Status = structs.NodeStatusInit @@ -712,7 +713,7 @@ func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.DrainNode() state.UpsertNode(structs.MsgTypeTestSetup, 1000, node) @@ -739,7 +740,7 @@ func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) snap, _ := state.Snapshot() @@ -765,7 +766,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -802,7 +803,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) { // Test that we detect device oversubscription func TestPlanApply_EvalNodePlan_NodeFull_Device(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) alloc := mock.Alloc() state := testStateStore(t) @@ -855,7 +856,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Device(t *testing.T) { } func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -887,7 +888,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -925,7 +926,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() @@ -958,7 +959,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) { } func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) { - t.Parallel() + ci.Parallel(t) alloc := mock.Alloc() state := testStateStore(t) node := mock.Node() diff --git a/nomad/plan_endpoint_test.go b/nomad/plan_endpoint_test.go index a3fb596a6..8c02c2ba9 100644 --- a/nomad/plan_endpoint_test.go +++ b/nomad/plan_endpoint_test.go @@ -5,6 +5,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -12,7 +13,7 @@ import ( ) func TestPlanEndpoint_Submit(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -54,7 +55,7 @@ func TestPlanEndpoint_Submit(t *testing.T) { // TestPlanEndpoint_Submit_Bad asserts that the Plan.Submit endpoint rejects // bad data with an error instead of panicking. func TestPlanEndpoint_Submit_Bad(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 diff --git a/nomad/plan_normalization_test.go b/nomad/plan_normalization_test.go index ba427d423..6dbe18b22 100644 --- a/nomad/plan_normalization_test.go +++ b/nomad/plan_normalization_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/assert" @@ -17,6 +18,8 @@ import ( // Whenever this test is changed, care should be taken to ensure the older msgpack size // is recalculated when new fields are introduced in ApplyPlanResultsRequest func TestPlanNormalize(t *testing.T) { + ci.Parallel(t) + // This size was calculated using the older ApplyPlanResultsRequest format, in which allocations // didn't use OmitEmpty and only the job was normalized in the stopped and preempted allocs. // The newer format uses OmitEmpty and uses a minimal set of fields for the diff of the diff --git a/nomad/plan_queue_test.go b/nomad/plan_queue_test.go index 933bd1f39..42877846b 100644 --- a/nomad/plan_queue_test.go +++ b/nomad/plan_queue_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -17,7 +18,7 @@ func testPlanQueue(t *testing.T) *PlanQueue { } func TestPlanQueue_Enqueue_Dequeue(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) if pq.Enabled() { t.Fatalf("should not be enabled") @@ -84,7 +85,7 @@ func TestPlanQueue_Enqueue_Dequeue(t *testing.T) { } func TestPlanQueue_Enqueue_Disable(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) // Enqueue @@ -115,7 +116,7 @@ func TestPlanQueue_Enqueue_Disable(t *testing.T) { } func TestPlanQueue_Dequeue_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) pq.SetEnabled(true) @@ -137,7 +138,7 @@ func TestPlanQueue_Dequeue_Timeout(t *testing.T) { // Ensure higher priority dequeued first func TestPlanQueue_Dequeue_Priority(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) pq.SetEnabled(true) @@ -171,7 +172,7 @@ func TestPlanQueue_Dequeue_Priority(t *testing.T) { // Ensure FIFO at fixed priority func TestPlanQueue_Dequeue_FIFO(t *testing.T) { - t.Parallel() + ci.Parallel(t) pq := testPlanQueue(t) pq.SetEnabled(true) diff --git a/nomad/regions_endpoint_test.go b/nomad/regions_endpoint_test.go index 7f3e216de..97c7d1b04 100644 --- a/nomad/regions_endpoint_test.go +++ b/nomad/regions_endpoint_test.go @@ -5,12 +5,13 @@ import ( "testing" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" ) func TestRegionList(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make the servers s1, cleanupS1 := TestServer(t, func(c *Config) { diff --git a/nomad/rpc_test.go b/nomad/rpc_test.go index bd738f279..d7c0dc910 100644 --- a/nomad/rpc_test.go +++ b/nomad/rpc_test.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/helper/testlog" @@ -47,7 +48,7 @@ func rpcClient(t *testing.T, s *Server) rpc.ClientCodec { } func TestRPC_forwardLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -89,7 +90,7 @@ func TestRPC_forwardLeader(t *testing.T) { } func TestRPC_WaitForConsistentReads(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS2 := TestServer(t, func(c *Config) { c.RPCHoldTimeout = 20 * time.Millisecond @@ -131,7 +132,7 @@ func TestRPC_WaitForConsistentReads(t *testing.T) { } func TestRPC_forwardRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -156,7 +157,7 @@ func TestRPC_forwardRegion(t *testing.T) { } func TestRPC_getServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -182,7 +183,7 @@ func TestRPC_getServer(t *testing.T) { } func TestRPC_PlaintextRPCSucceedsWhenInUpgradeMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -225,7 +226,7 @@ func TestRPC_PlaintextRPCSucceedsWhenInUpgradeMode(t *testing.T) { } func TestRPC_PlaintextRPCFailsWhenNotInUpgradeMode(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -262,7 +263,7 @@ func TestRPC_PlaintextRPCFailsWhenNotInUpgradeMode(t *testing.T) { } func TestRPC_streamingRpcConn_badMethod(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -292,7 +293,7 @@ func TestRPC_streamingRpcConn_badMethod(t *testing.T) { } func TestRPC_streamingRpcConn_badMethod_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -352,7 +353,7 @@ func TestRPC_streamingRpcConn_badMethod_TLS(t *testing.T) { } func TestRPC_streamingRpcConn_goodMethod_Plaintext(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) dir := tmpDir(t) defer os.RemoveAll(dir) @@ -404,7 +405,7 @@ func TestRPC_streamingRpcConn_goodMethod_Plaintext(t *testing.T) { } func TestRPC_streamingRpcConn_goodMethod_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) const ( @@ -484,7 +485,7 @@ func TestRPC_streamingRpcConn_goodMethod_TLS(t *testing.T) { // switch the conn pool to establishing v2 connections and we can deprecate this // test. func TestRPC_handleMultiplexV2(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s, cleanupS := TestServer(t, nil) @@ -544,7 +545,7 @@ func TestRPC_handleMultiplexV2(t *testing.T) { // TestRPC_TLS_in_TLS asserts that trying to nest TLS connections fails. func TestRPC_TLS_in_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -604,7 +605,7 @@ func TestRPC_TLS_in_TLS(t *testing.T) { // // Invalid limits are tested in command/agent/agent_test.go func TestRPC_Limits_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -847,7 +848,7 @@ func TestRPC_Limits_OK(t *testing.T) { tc := cases[i] name := fmt.Sprintf("%d-tls-%t-timeout-%s-limit-%v", i, tc.tls, tc.timeout, tc.limit) t.Run(name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) if tc.limit >= maxConns { t.Fatalf("test fixture failure: cannot assert limit (%d) >= max (%d)", tc.limit, maxConns) @@ -898,7 +899,7 @@ func TestRPC_Limits_OK(t *testing.T) { // the overall connection limit to prevent DOS via server-routed streaming API // calls. func TestRPC_Limits_Streaming(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { limits := config.DefaultLimits() @@ -1019,7 +1020,7 @@ func TestRPC_Limits_Streaming(t *testing.T) { } func TestRPC_TLS_Enforcement_Raft(t *testing.T) { - t.Parallel() + ci.Parallel(t) defer func() { //TODO Avoid panics from logging during shutdown @@ -1102,7 +1103,7 @@ func TestRPC_TLS_Enforcement_Raft(t *testing.T) { } func TestRPC_TLS_Enforcement_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) defer func() { //TODO Avoid panics from logging during shutdown diff --git a/nomad/scaling_endpoint_test.go b/nomad/scaling_endpoint_test.go index 6dabf3087..673d3be14 100644 --- a/nomad/scaling_endpoint_test.go +++ b/nomad/scaling_endpoint_test.go @@ -5,17 +5,18 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestScalingEndpoint_StaleReadSupport(t *testing.T) { + ci.Parallel(t) assert := assert.New(t) list := &structs.ScalingPolicyListRequest{} assert.True(list.IsRead()) @@ -24,7 +25,7 @@ func TestScalingEndpoint_StaleReadSupport(t *testing.T) { } func TestScalingEndpoint_GetPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) @@ -63,7 +64,7 @@ func TestScalingEndpoint_GetPolicy(t *testing.T) { } func TestScalingEndpoint_GetPolicy_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -138,7 +139,7 @@ func TestScalingEndpoint_GetPolicy_ACL(t *testing.T) { } func TestScalingEndpoint_ListPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -230,7 +231,7 @@ func TestScalingEndpoint_ListPolicies(t *testing.T) { } func TestScalingEndpoint_ListPolicies_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, root, cleanupS1 := TestACLServer(t, nil) @@ -309,7 +310,7 @@ func TestScalingEndpoint_ListPolicies_ACL(t *testing.T) { } func TestScalingEndpoint_ListPolicies_Blocking(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) s1, cleanupS1 := TestServer(t, nil) diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index cbd634b3e..071520ca5 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -8,6 +8,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -37,7 +38,7 @@ func mockAlloc() *structs.Allocation { } func TestSearch_PrefixSearch_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -70,7 +71,7 @@ func TestSearch_PrefixSearch_Job(t *testing.T) { } func TestSearch_PrefixSearch_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) jobID := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -179,7 +180,7 @@ func TestSearch_PrefixSearch_ACL(t *testing.T) { } func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "example-test-------" // Assert that a job with more than 4 hyphens works @@ -221,7 +222,7 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) { } func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := strings.Repeat("a", 100) @@ -261,7 +262,7 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) { // truncate should limit results to 20 func TestSearch_PrefixSearch_Truncate(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -294,7 +295,7 @@ func TestSearch_PrefixSearch_Truncate(t *testing.T) { } func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -330,7 +331,7 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) { } func TestSearch_PrefixSearch_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -363,7 +364,7 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) { } func TestSearch_PrefixSearch_Allocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -400,7 +401,7 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) { } func TestSearch_PrefixSearch_All_UUID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -443,7 +444,7 @@ func TestSearch_PrefixSearch_All_UUID(t *testing.T) { } func TestSearch_PrefixSearch_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -480,7 +481,7 @@ func TestSearch_PrefixSearch_Node(t *testing.T) { } func TestSearch_PrefixSearch_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -512,7 +513,7 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) { } func TestSearch_PrefixSearch_AllContext(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -553,7 +554,7 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) { // Tests that the top 20 matches are returned when no prefix is set func TestSearch_PrefixSearch_NoPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -585,7 +586,7 @@ func TestSearch_PrefixSearch_NoPrefix(t *testing.T) { // Tests that the zero matches are returned when a prefix has no matching // results func TestSearch_PrefixSearch_NoMatches(t *testing.T) { - t.Parallel() + ci.Parallel(t) prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970" @@ -614,7 +615,7 @@ func TestSearch_PrefixSearch_NoMatches(t *testing.T) { // Prefixes can only be looked up if their length is a power of two. For // prefixes which are an odd length, use the length-1 characters. func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { - t.Parallel() + ci.Parallel(t) id1 := "aaafaaaa-e8f7-fd38-c855-ab94ceb89" id2 := "aaafeaaa-e8f7-fd38-c855-ab94ceb89" @@ -646,7 +647,7 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) { } func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { - t.Parallel() + ci.Parallel(t) jobName := "exampleexample" @@ -687,7 +688,7 @@ func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { } func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -718,7 +719,7 @@ func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { } func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -755,7 +756,7 @@ func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { } func TestSearch_PrefixSearch_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -787,7 +788,7 @@ func TestSearch_PrefixSearch_Namespace(t *testing.T) { } func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanup := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -914,7 +915,7 @@ func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) { } func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -952,7 +953,7 @@ func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) { } func TestSearch_FuzzySearch_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanupS := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1057,7 +1058,7 @@ func TestSearch_FuzzySearch_ACL(t *testing.T) { } func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1085,7 +1086,7 @@ func TestSearch_FuzzySearch_NotEnabled(t *testing.T) { } func TestSearch_FuzzySearch_ShortText(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1113,7 +1114,7 @@ func TestSearch_FuzzySearch_ShortText(t *testing.T) { } func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1146,7 +1147,7 @@ func TestSearch_FuzzySearch_TruncateLimitQuery(t *testing.T) { } func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1181,7 +1182,7 @@ func TestSearch_FuzzySearch_TruncateLimitResults(t *testing.T) { } func TestSearch_FuzzySearch_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1213,7 +1214,7 @@ func TestSearch_FuzzySearch_Evals(t *testing.T) { } func TestSearch_FuzzySearch_Allocation(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1248,7 +1249,7 @@ func TestSearch_FuzzySearch_Allocation(t *testing.T) { } func TestSearch_FuzzySearch_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1280,7 +1281,7 @@ func TestSearch_FuzzySearch_Node(t *testing.T) { } func TestSearch_FuzzySearch_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1310,7 +1311,7 @@ func TestSearch_FuzzySearch_Deployment(t *testing.T) { } func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1338,7 +1339,7 @@ func TestSearch_FuzzySearch_CSIPlugin(t *testing.T) { } func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1373,7 +1374,7 @@ func TestSearch_FuzzySearch_CSIVolume(t *testing.T) { } func TestSearch_FuzzySearch_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1403,7 +1404,7 @@ func TestSearch_FuzzySearch_Namespace(t *testing.T) { } func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanup := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1434,7 +1435,7 @@ func TestSearch_FuzzySearch_Namespace_caseInsensitive(t *testing.T) { } func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1471,7 +1472,7 @@ func TestSearch_FuzzySearch_ScalingPolicy(t *testing.T) { } func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanup := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1585,7 +1586,7 @@ func TestSearch_FuzzySearch_Namespace_ACL(t *testing.T) { } func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, root, cleanupS := TestACLServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -1879,7 +1880,7 @@ func TestSearch_FuzzySearch_MultiNamespace_ACL(t *testing.T) { } func TestSearch_FuzzySearch_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) s, cleanupS := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -2025,6 +2026,8 @@ func TestSearch_FuzzySearch_Job(t *testing.T) { } func TestSearch_FuzzySearch_fuzzyIndex(t *testing.T) { + ci.Parallel(t) + for _, tc := range []struct { name, text string exp int diff --git a/nomad/serf_test.go b/nomad/serf_test.go index 7444368b1..938360196 100644 --- a/nomad/serf_test.go +++ b/nomad/serf_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" @@ -17,7 +18,7 @@ import ( ) func TestNomad_JoinPeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -59,7 +60,7 @@ func TestNomad_JoinPeer(t *testing.T) { } func TestNomad_RemovePeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -99,7 +100,7 @@ func TestNomad_RemovePeer(t *testing.T) { } func TestNomad_ReapPeer(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) @@ -195,7 +196,7 @@ func TestNomad_ReapPeer(t *testing.T) { } func TestNomad_BootstrapExpect(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) @@ -272,7 +273,7 @@ func TestNomad_BootstrapExpect(t *testing.T) { } func TestNomad_BootstrapExpect_NonVoter(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := t.TempDir() @@ -343,7 +344,7 @@ func TestNomad_BootstrapExpect_NonVoter(t *testing.T) { } func TestNomad_BadExpect(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 @@ -386,7 +387,7 @@ func TestNomad_BadExpect(t *testing.T) { // TestNomad_NonBootstraping_ShouldntBootstap asserts that if BootstrapExpect is zero, // the server shouldn't bootstrap func TestNomad_NonBootstraping_ShouldntBootstap(t *testing.T) { - t.Parallel() + ci.Parallel(t) dir := tmpDir(t) defer os.RemoveAll(dir) diff --git a/nomad/server_test.go b/nomad/server_test.go index 5872f3ceb..db1b1091e 100644 --- a/nomad/server_test.go +++ b/nomad/server_test.go @@ -11,6 +11,7 @@ import ( "time" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -31,7 +32,7 @@ func tmpDir(t *testing.T) string { } func TestServer_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -43,7 +44,7 @@ func TestServer_RPC(t *testing.T) { } func TestServer_RPC_TLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -109,7 +110,7 @@ func TestServer_RPC_TLS(t *testing.T) { } func TestServer_RPC_MixedTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) const ( cafile = "../helper/tlsutil/testdata/ca.pem" @@ -178,7 +179,7 @@ func TestServer_RPC_MixedTLS(t *testing.T) { } func TestServer_Regions(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Make the servers s1, cleanupS1 := TestServer(t, func(c *Config) { @@ -211,7 +212,7 @@ func TestServer_Regions(t *testing.T) { } func TestServer_Reload_Vault(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.Region = "global" @@ -243,7 +244,7 @@ func connectionReset(msg string) bool { // Tests that the server will successfully reload its network connections, // upgrading from plaintext to TLS if the server's TLS configuration changes. func TestServer_Reload_TLSConnections_PlaintextToTLS(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -292,7 +293,7 @@ func TestServer_Reload_TLSConnections_PlaintextToTLS(t *testing.T) { // Tests that the server will successfully reload its network connections, // downgrading from TLS to plaintext if the server's TLS configuration changes. func TestServer_Reload_TLSConnections_TLSToPlaintext_RPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -339,7 +340,7 @@ func TestServer_Reload_TLSConnections_TLSToPlaintext_RPC(t *testing.T) { // Tests that the server will successfully reload its network connections, // downgrading only RPC connections func TestServer_Reload_TLSConnections_TLSToPlaintext_OnlyRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -393,7 +394,7 @@ func TestServer_Reload_TLSConnections_TLSToPlaintext_OnlyRPC(t *testing.T) { // Tests that the server will successfully reload its network connections, // upgrading only RPC connections func TestServer_Reload_TLSConnections_PlaintextToTLS_OnlyRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -449,7 +450,7 @@ func TestServer_Reload_TLSConnections_PlaintextToTLS_OnlyRPC(t *testing.T) { // Test that Raft connections are reloaded as expected when a Nomad server is // upgraded from plaintext to TLS func TestServer_Reload_TLSConnections_Raft(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) const ( @@ -529,7 +530,7 @@ func TestServer_Reload_TLSConnections_Raft(t *testing.T) { } func TestServer_InvalidSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Set the config to not have the core scheduler @@ -553,7 +554,7 @@ func TestServer_InvalidSchedulers(t *testing.T) { } func TestServer_RPCNameAndRegionValidation(t *testing.T) { - t.Parallel() + ci.Parallel(t) for _, tc := range []struct { name string region string @@ -580,7 +581,7 @@ func TestServer_RPCNameAndRegionValidation(t *testing.T) { } func TestServer_ReloadSchedulers_NumSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 8 @@ -598,7 +599,7 @@ func TestServer_ReloadSchedulers_NumSchedulers(t *testing.T) { } func TestServer_ReloadSchedulers_EnabledSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.EnabledSchedulers = []string{structs.JobTypeCore, structs.JobTypeSystem} @@ -618,7 +619,7 @@ func TestServer_ReloadSchedulers_EnabledSchedulers(t *testing.T) { } func TestServer_ReloadSchedulers_InvalidSchedulers(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Set the config to not have the core scheduler config := DefaultConfig() diff --git a/nomad/state/autopilot_test.go b/nomad/state/autopilot_test.go index f1805e0f3..9379f9ba8 100644 --- a/nomad/state/autopilot_test.go +++ b/nomad/state/autopilot_test.go @@ -5,10 +5,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) func TestStateStore_Autopilot(t *testing.T) { + ci.Parallel(t) + s := testStateStore(t) expected := &structs.AutopilotConfig{ @@ -39,6 +42,8 @@ func TestStateStore_Autopilot(t *testing.T) { } func TestStateStore_AutopilotCAS(t *testing.T) { + ci.Parallel(t) + s := testStateStore(t) expected := &structs.AutopilotConfig{ diff --git a/nomad/state/deployment_events_test.go b/nomad/state/deployment_events_test.go index 223812449..7bda62063 100644 --- a/nomad/state/deployment_events_test.go +++ b/nomad/state/deployment_events_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/stream" "github.com/hashicorp/nomad/nomad/structs" @@ -12,7 +13,7 @@ import ( ) func TestDeploymentEventFromChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() diff --git a/nomad/state/events_test.go b/nomad/state/events_test.go index 078ba43ce..61ffa65b4 100644 --- a/nomad/state/events_test.go +++ b/nomad/state/events_test.go @@ -5,6 +5,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -15,7 +16,7 @@ import ( // TestEventFromChange_SingleEventPerTable ensures that only a single event is // created per table per memdb.Change func TestEventFromChange_SingleEventPerTable(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -42,7 +43,7 @@ func TestEventFromChange_SingleEventPerTable(t *testing.T) { } func TestEventFromChange_ACLTokenSecretID(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -95,7 +96,7 @@ func TestEventFromChange_ACLTokenSecretID(t *testing.T) { } func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -141,7 +142,7 @@ func TestEventsFromChanges_DeploymentUpdate(t *testing.T) { } func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -218,7 +219,7 @@ func TestEventsFromChanges_DeploymentPromotion(t *testing.T) { } func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -314,7 +315,7 @@ func TestEventsFromChanges_DeploymentAllocHealthRequestType(t *testing.T) { } func TestEventsFromChanges_UpsertNodeEventsType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -355,7 +356,7 @@ func TestEventsFromChanges_UpsertNodeEventsType(t *testing.T) { } func TestEventsFromChanges_NodeUpdateStatusRequest(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -386,7 +387,7 @@ func TestEventsFromChanges_NodeUpdateStatusRequest(t *testing.T) { } func TestEventsFromChanges_EvalUpdateRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -420,7 +421,7 @@ func TestEventsFromChanges_EvalUpdateRequestType(t *testing.T) { } func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -481,7 +482,7 @@ func TestEventsFromChanges_ApplyPlanResultsRequestType(t *testing.T) { } func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -536,7 +537,7 @@ func TestEventsFromChanges_BatchNodeUpdateDrainRequestType(t *testing.T) { } func TestEventsFromChanges_NodeUpdateEligibilityRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -576,7 +577,7 @@ func TestEventsFromChanges_NodeUpdateEligibilityRequestType(t *testing.T) { } func TestEventsFromChanges_AllocUpdateDesiredTransitionRequestType(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() @@ -647,7 +648,7 @@ func TestEventsFromChanges_JobDeregisterRequestType(t *testing.T) { } func TestEventsFromChanges_WithDeletion(t *testing.T) { - t.Parallel() + ci.Parallel(t) changes := Changes{ Index: uint64(1), @@ -673,7 +674,7 @@ func TestEventsFromChanges_WithDeletion(t *testing.T) { } func TestEventsFromChanges_WithNodeDeregistration(t *testing.T) { - t.Parallel() + ci.Parallel(t) before := &structs.Node{ ID: "some-id", @@ -712,6 +713,8 @@ func TestEventsFromChanges_WithNodeDeregistration(t *testing.T) { } func TestNodeEventsFromChanges(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string MsgType structs.MessageType @@ -904,7 +907,7 @@ func TestNodeEventsFromChanges(t *testing.T) { } func TestNodeDrainEventFromChanges(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := TestStateStoreCfg(t, TestStateStorePublisher(t)) defer s.StopEventBroker() diff --git a/nomad/state/paginator/filter_test.go b/nomad/state/paginator/filter_test.go index d94f49a57..20e94bb95 100644 --- a/nomad/state/paginator/filter_test.go +++ b/nomad/state/paginator/filter_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/hashicorp/go-bexpr" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -12,7 +13,7 @@ import ( ) func TestGenericFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) ids := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"} filters := []Filter{GenericFilter{ @@ -45,7 +46,7 @@ func TestGenericFilter(t *testing.T) { } func TestNamespaceFilter(t *testing.T) { - t.Parallel() + ci.Parallel(t) mocks := []*mockObject{ {namespace: "default"}, diff --git a/nomad/state/paginator/paginator_test.go b/nomad/state/paginator/paginator_test.go index e3678da53..2d7daa2a2 100644 --- a/nomad/state/paginator/paginator_test.go +++ b/nomad/state/paginator/paginator_test.go @@ -4,13 +4,13 @@ import ( "errors" "testing" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" ) func TestPaginator(t *testing.T) { - t.Parallel() + ci.Parallel(t) ids := []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"} cases := []struct { diff --git a/nomad/state/paginator/tokenizer_test.go b/nomad/state/paginator/tokenizer_test.go index c74fe8a67..174f1f1d3 100644 --- a/nomad/state/paginator/tokenizer_test.go +++ b/nomad/state/paginator/tokenizer_test.go @@ -4,11 +4,14 @@ import ( "fmt" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/require" ) func TestStructsTokenizer(t *testing.T) { + ci.Parallel(t) + j := mock.Job() cases := []struct { diff --git a/nomad/state/schema_test.go b/nomad/state/schema_test.go index f5b1b620f..2131ffa5e 100644 --- a/nomad/state/schema_test.go +++ b/nomad/state/schema_test.go @@ -4,11 +4,14 @@ import ( "testing" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/stretchr/testify/require" ) func TestStateStoreSchema(t *testing.T) { + ci.Parallel(t) + schema := stateStoreSchema() _, err := memdb.NewMemDB(schema) if err != nil { @@ -17,6 +20,8 @@ func TestStateStoreSchema(t *testing.T) { } func TestState_singleRecord(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( @@ -87,6 +92,8 @@ func TestState_singleRecord(t *testing.T) { } func TestState_ScalingPolicyTargetFieldIndex_FromObject(t *testing.T) { + ci.Parallel(t) + require := require.New(t) policy := mock.ScalingPolicy() diff --git a/nomad/state/state_store_restore_test.go b/nomad/state/state_store_restore_test.go index a69f2c620..7c4e18e85 100644 --- a/nomad/state/state_store_restore_test.go +++ b/nomad/state/state_store_restore_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,7 +15,7 @@ import ( ) func TestStateStore_RestoreNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -42,7 +43,7 @@ func TestStateStore_RestoreNode(t *testing.T) { } func TestStateStore_RestoreJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -70,7 +71,7 @@ func TestStateStore_RestoreJob(t *testing.T) { } func TestStateStore_RestorePeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -107,7 +108,7 @@ func TestStateStore_RestorePeriodicLaunch(t *testing.T) { } func TestStateStore_RestoreJobVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -139,7 +140,7 @@ func TestStateStore_RestoreJobVersion(t *testing.T) { } func TestStateStore_RestoreDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) d := mock.Deployment() @@ -171,7 +172,7 @@ func TestStateStore_RestoreDeployment(t *testing.T) { } func TestStateStore_RestoreJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -207,7 +208,7 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { } func TestStateStore_RestoreCSIPlugin(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -227,7 +228,7 @@ func TestStateStore_RestoreCSIPlugin(t *testing.T) { } func TestStateStore_RestoreCSIVolume(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -248,7 +249,7 @@ func TestStateStore_RestoreCSIVolume(t *testing.T) { } func TestStateStore_RestoreIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -276,7 +277,7 @@ func TestStateStore_RestoreIndex(t *testing.T) { } func TestStateStore_RestoreEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval := mock.Eval() @@ -304,7 +305,7 @@ func TestStateStore_RestoreEval(t *testing.T) { } func TestStateStore_RestoreAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -337,7 +338,7 @@ func TestStateStore_RestoreAlloc(t *testing.T) { } func TestStateStore_RestoreVaultAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) a := mock.VaultAccessor() @@ -369,7 +370,7 @@ func TestStateStore_RestoreVaultAccessor(t *testing.T) { } func TestStateStore_RestoreSITokenAccessor(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -393,7 +394,7 @@ func TestStateStore_RestoreSITokenAccessor(t *testing.T) { } func TestStateStore_RestoreACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) policy := mock.ACLPolicy() @@ -418,7 +419,7 @@ func TestStateStore_RestoreACLPolicy(t *testing.T) { } func TestStateStore_RestoreACLToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) token := mock.ACLToken() @@ -465,7 +466,7 @@ func TestStateStore_ClusterMetadataRestore(t *testing.T) { } func TestStateStore_RestoreScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -485,7 +486,7 @@ func TestStateStore_RestoreScalingPolicy(t *testing.T) { } func TestStateStore_RestoreScalingEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -515,7 +516,7 @@ func TestStateStore_RestoreScalingEvents(t *testing.T) { } func TestStateStore_RestoreSchedulerConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) schedConfig := &structs.SchedulerConfiguration{ diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 441344a28..5f1c8e49c 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -10,14 +10,14 @@ import ( "time" "github.com/hashicorp/go-memdb" - "github.com/kr/pretty" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func testStateStore(t *testing.T) *StateStore { @@ -25,7 +25,7 @@ func testStateStore(t *testing.T) *StateStore { } func TestStateStore_Blocking_Error(t *testing.T) { - t.Parallel() + ci.Parallel(t) expected := fmt.Errorf("test error") errFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { @@ -39,7 +39,7 @@ func TestStateStore_Blocking_Error(t *testing.T) { } func TestStateStore_Blocking_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) noopFn := func(memdb.WatchSet, *StateStore) (interface{}, uint64, error) { return nil, 5, nil @@ -57,7 +57,7 @@ func TestStateStore_Blocking_Timeout(t *testing.T) { } func TestStateStore_Blocking_MinQuery(t *testing.T) { - t.Parallel() + ci.Parallel(t) node := mock.Node() count := 0 @@ -99,7 +99,7 @@ func TestStateStore_Blocking_MinQuery(t *testing.T) { // 1) The job is denormalized // 2) Allocations are created func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -154,7 +154,7 @@ func TestStateStore_UpsertPlanResults_AllocationsCreated_Denormalized(t *testing // 2) Allocations are denormalized and updated with the diff // That stopped allocs Job is unmodified func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -249,7 +249,7 @@ func TestStateStore_UpsertPlanResults_AllocationsDenormalized(t *testing.T) { // This test checks that the deployment is created and allocations count towards // the deployment func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -356,7 +356,7 @@ func TestStateStore_UpsertPlanResults_Deployment(t *testing.T) { // 1) Preempted allocations in plan results are updated // 2) Evals are inserted for preempted jobs func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -437,7 +437,7 @@ func TestStateStore_UpsertPlanResults_PreemptedAllocs(t *testing.T) { // This test checks that deployment updates are applied correctly func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) // Create a job that applies to all @@ -520,7 +520,7 @@ func TestStateStore_UpsertPlanResults_DeploymentUpdates(t *testing.T) { } func TestStateStore_UpsertDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) deployment := mock.Deployment() @@ -565,7 +565,7 @@ func TestStateStore_UpsertDeployment(t *testing.T) { // Tests that deployments of older create index and same job id are not returned func TestStateStore_OldDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -603,7 +603,7 @@ func TestStateStore_OldDeployment(t *testing.T) { } func TestStateStore_DeleteDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) d1 := mock.Deployment() @@ -656,7 +656,7 @@ func TestStateStore_DeleteDeployment(t *testing.T) { } func TestStateStore_Deployments(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var deployments []*structs.Deployment @@ -687,7 +687,7 @@ func TestStateStore_Deployments(t *testing.T) { } func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) deploy := mock.Deployment() @@ -773,7 +773,7 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { } func TestStateStore_UpsertNode_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -817,7 +817,7 @@ func TestStateStore_UpsertNode_Node(t *testing.T) { } func TestStateStore_DeleteNode_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -862,7 +862,7 @@ func TestStateStore_DeleteNode_Node(t *testing.T) { } func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -900,7 +900,7 @@ func TestStateStore_UpdateNodeStatus_Node(t *testing.T) { } func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -962,7 +962,7 @@ func TestStateStore_BatchUpdateNodeDrain(t *testing.T) { } func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1007,7 +1007,7 @@ func TestStateStore_UpdateNodeDrain_Node(t *testing.T) { } func TestStateStore_AddSingleNodeEvent(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1051,7 +1051,7 @@ func TestStateStore_AddSingleNodeEvent(t *testing.T) { // To prevent stale node events from accumulating, we limit the number of // stored node events to 10. func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1100,7 +1100,7 @@ func TestStateStore_NodeEvents_RetentionWindow(t *testing.T) { } func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1154,7 +1154,7 @@ func TestStateStore_UpdateNodeDrain_ResetEligiblity(t *testing.T) { } func TestStateStore_UpdateNodeEligibility(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -1210,7 +1210,7 @@ func TestStateStore_UpdateNodeEligibility(t *testing.T) { } func TestStateStore_Nodes(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var nodes []*structs.Node @@ -1254,7 +1254,7 @@ func TestStateStore_Nodes(t *testing.T) { } func TestStateStore_NodesByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -1342,7 +1342,7 @@ func TestStateStore_NodesByIDPrefix(t *testing.T) { } func TestStateStore_UpsertJob_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -1422,7 +1422,7 @@ func TestStateStore_UpsertJob_Job(t *testing.T) { } func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -1527,7 +1527,7 @@ func TestStateStore_UpdateUpsertJob_Job(t *testing.T) { } func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.PeriodicJob() @@ -1583,7 +1583,7 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) { } func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) assert := assert.New(t) state := testStateStore(t) @@ -1602,7 +1602,7 @@ func TestStateStore_UpsertJob_BadNamespace(t *testing.T) { // Upsert a job that is the child of a parent job and ensures its summary gets // updated. func TestStateStore_UpsertJob_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -1647,7 +1647,7 @@ func TestStateStore_UpsertJob_ChildJob(t *testing.T) { } func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -1738,7 +1738,7 @@ func TestStateStore_UpdateUpsertJob_JobVersion(t *testing.T) { } func TestStateStore_DeleteJob_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -1819,7 +1819,7 @@ func TestStateStore_DeleteJob_Job(t *testing.T) { } func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -1890,7 +1890,7 @@ func TestStateStore_DeleteJobTxn_BatchDeletes(t *testing.T) { } func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) assert := assert.New(t) @@ -1947,7 +1947,7 @@ func TestStateStore_DeleteJob_MultipleVersions(t *testing.T) { } func TestStateStore_DeleteJob_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -2001,7 +2001,7 @@ func TestStateStore_DeleteJob_ChildJob(t *testing.T) { } func TestStateStore_Jobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var jobs []*structs.Job @@ -2043,7 +2043,7 @@ func TestStateStore_Jobs(t *testing.T) { } func TestStateStore_JobVersions(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var jobs []*structs.Job @@ -2085,7 +2085,7 @@ func TestStateStore_JobVersions(t *testing.T) { } func TestStateStore_JobsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2169,7 +2169,7 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { } func TestStateStore_JobsByPeriodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var periodic, nonPeriodic []*structs.Job @@ -2241,7 +2241,7 @@ func TestStateStore_JobsByPeriodic(t *testing.T) { } func TestStateStore_JobsByScheduler(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var serviceJobs []*structs.Job @@ -2315,7 +2315,7 @@ func TestStateStore_JobsByScheduler(t *testing.T) { } func TestStateStore_JobsByGC(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) gc, nonGc := make(map[string]struct{}), make(map[string]struct{}) @@ -2389,7 +2389,7 @@ func TestStateStore_JobsByGC(t *testing.T) { } func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2444,7 +2444,7 @@ func TestStateStore_UpsertPeriodicLaunch(t *testing.T) { } func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2509,7 +2509,7 @@ func TestStateStore_UpdateUpsertPeriodicLaunch(t *testing.T) { } func TestStateStore_DeletePeriodicLaunch(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -2563,7 +2563,7 @@ func TestStateStore_DeletePeriodicLaunch(t *testing.T) { } func TestStateStore_PeriodicLaunches(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var launches []*structs.PeriodicLaunch @@ -2839,7 +2839,7 @@ func TestStateStore_CSIVolume(t *testing.T) { } func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { - t.Parallel() + ci.Parallel(t) store := testStateStore(t) plugID := "foo" @@ -3209,7 +3209,7 @@ func TestStateStore_CSIPlugin_Lifecycle(t *testing.T) { } func TestStateStore_Indexes(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -3254,7 +3254,7 @@ func TestStateStore_Indexes(t *testing.T) { } func TestStateStore_LatestIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3278,7 +3278,7 @@ func TestStateStore_LatestIndex(t *testing.T) { } func TestStateStore_UpsertEvals_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval := mock.Eval() @@ -3322,7 +3322,7 @@ func TestStateStore_UpsertEvals_Eval(t *testing.T) { } func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3401,7 +3401,7 @@ func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { } func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval := mock.Eval() @@ -3468,7 +3468,7 @@ func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { } func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3559,7 +3559,7 @@ func TestStateStore_UpsertEvals_Eval_ChildJob(t *testing.T) { } func TestStateStore_DeleteEval_Eval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) eval1 := mock.Eval() @@ -3693,7 +3693,7 @@ func TestStateStore_DeleteEval_Eval(t *testing.T) { } func TestStateStore_DeleteEval_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3764,7 +3764,7 @@ func TestStateStore_DeleteEval_ChildJob(t *testing.T) { } func TestStateStore_EvalsByJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -3802,7 +3802,7 @@ func TestStateStore_EvalsByJob(t *testing.T) { } func TestStateStore_Evals(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var evals []*structs.Evaluation @@ -3845,7 +3845,7 @@ func TestStateStore_Evals(t *testing.T) { } func TestStateStore_EvalsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var evals []*structs.Evaluation @@ -3938,7 +3938,7 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { } func TestStateStore_UpdateAllocsFromClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) parent := mock.Job() @@ -4028,7 +4028,7 @@ func TestStateStore_UpdateAllocsFromClient(t *testing.T) { } func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc1 := mock.Alloc() @@ -4164,7 +4164,7 @@ func TestStateStore_UpdateAllocsFromClient_ChildJob(t *testing.T) { } func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4235,7 +4235,7 @@ func TestStateStore_UpdateMultipleAllocsFromClient(t *testing.T) { } func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4279,7 +4279,7 @@ func TestStateStore_UpdateAllocsFromClient_Deployment(t *testing.T) { // This tests that the deployment state is merged correctly func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4320,7 +4320,7 @@ func TestStateStore_UpdateAllocsFromClient_DeploymentStateMerges(t *testing.T) { } func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4395,7 +4395,7 @@ func TestStateStore_UpsertAlloc_Alloc(t *testing.T) { } func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4449,7 +4449,7 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { // Testing to ensure we keep issue // https://github.com/hashicorp/nomad/issues/2583 fixed func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4462,7 +4462,7 @@ func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { } func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -4505,7 +4505,7 @@ func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) { } func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4606,7 +4606,7 @@ func TestStateStore_UpdateAlloc_Alloc(t *testing.T) { // This test ensures that the state store will mark the clients status as lost // when set rather than preferring the existing status. func TestStateStore_UpdateAlloc_Lost(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4643,7 +4643,7 @@ func TestStateStore_UpdateAlloc_Lost(t *testing.T) { // associated with it. This will happen when a job is stopped by an user which // has non-terminal allocations on clients func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -4687,7 +4687,7 @@ func TestStateStore_UpdateAlloc_NoJob(t *testing.T) { } func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -4755,7 +4755,7 @@ func TestStateStore_UpdateAllocDesiredTransition(t *testing.T) { } func TestStateStore_JobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -4879,7 +4879,7 @@ func TestStateStore_JobSummary(t *testing.T) { } func TestStateStore_ReconcileJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -4970,7 +4970,7 @@ func TestStateStore_ReconcileJobSummary(t *testing.T) { } func TestStateStore_ReconcileParentJobSummary(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -5060,7 +5060,7 @@ func TestStateStore_ReconcileParentJobSummary(t *testing.T) { } func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -5111,7 +5111,7 @@ func TestStateStore_UpdateAlloc_JobNotPresent(t *testing.T) { } func TestStateStore_EvictAlloc_Alloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -5150,7 +5150,7 @@ func TestStateStore_EvictAlloc_Alloc(t *testing.T) { } func TestStateStore_AllocsByNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5189,7 +5189,7 @@ func TestStateStore_AllocsByNode(t *testing.T) { } func TestStateStore_AllocsByNodeTerminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs, term, nonterm []*structs.Allocation @@ -5248,7 +5248,7 @@ func TestStateStore_AllocsByNodeTerminal(t *testing.T) { } func TestStateStore_AllocsByJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5287,7 +5287,7 @@ func TestStateStore_AllocsByJob(t *testing.T) { } func TestStateStore_AllocsForRegisteredJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5352,7 +5352,7 @@ func TestStateStore_AllocsForRegisteredJob(t *testing.T) { } func TestStateStore_AllocsByIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5451,7 +5451,7 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { } func TestStateStore_Allocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5497,7 +5497,7 @@ func TestStateStore_Allocs(t *testing.T) { } func TestStateStore_Allocs_PrevAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) var allocs []*structs.Allocation @@ -5551,7 +5551,7 @@ func TestStateStore_Allocs_PrevAlloc(t *testing.T) { } func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) index := uint64(0) state := testStateStore(t) @@ -5587,7 +5587,7 @@ func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) { } func TestStateStore_SetJobStatus_NoOp(t *testing.T) { - t.Parallel() + ci.Parallel(t) index := uint64(0) state := testStateStore(t) @@ -5618,7 +5618,7 @@ func TestStateStore_SetJobStatus_NoOp(t *testing.T) { } func TestStateStore_SetJobStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) txn := state.db.WriteTxn(uint64(0)) @@ -5653,7 +5653,7 @@ func TestStateStore_SetJobStatus(t *testing.T) { } func TestStateStore_GetJobStatus_NoEvalsOrAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) job := mock.Job() state := testStateStore(t) @@ -5669,7 +5669,7 @@ func TestStateStore_GetJobStatus_NoEvalsOrAllocs(t *testing.T) { } func TestStateStore_GetJobStatus_NoEvalsOrAllocs_Periodic(t *testing.T) { - t.Parallel() + ci.Parallel(t) job := mock.PeriodicJob() state := testStateStore(t) @@ -5685,7 +5685,7 @@ func TestStateStore_GetJobStatus_NoEvalsOrAllocs_Periodic(t *testing.T) { } func TestStateStore_GetJobStatus_NoEvalsOrAllocs_EvalDelete(t *testing.T) { - t.Parallel() + ci.Parallel(t) job := mock.Job() state := testStateStore(t) @@ -5701,7 +5701,7 @@ func TestStateStore_GetJobStatus_NoEvalsOrAllocs_EvalDelete(t *testing.T) { } func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5735,7 +5735,7 @@ func TestStateStore_GetJobStatus_DeadEvalsAndAllocs(t *testing.T) { } func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5761,7 +5761,7 @@ func TestStateStore_GetJobStatus_RunningAlloc(t *testing.T) { } func TestStateStore_GetJobStatus_PeriodicJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.PeriodicJob() @@ -5789,7 +5789,7 @@ func TestStateStore_GetJobStatus_PeriodicJob(t *testing.T) { } func TestStateStore_GetJobStatus_ParameterizedJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5818,7 +5818,7 @@ func TestStateStore_GetJobStatus_ParameterizedJob(t *testing.T) { } func TestStateStore_SetJobStatus_PendingEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.Job() @@ -5845,7 +5845,7 @@ func TestStateStore_SetJobStatus_PendingEval(t *testing.T) { // TestStateStore_SetJobStatus_SystemJob asserts that system jobs are still // considered running until explicitly stopped. func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) job := mock.SystemJob() @@ -5882,7 +5882,7 @@ func TestStateStore_SetJobStatus_SystemJob(t *testing.T) { } func TestStateJobSummary_UpdateJobCount(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -6013,7 +6013,7 @@ func TestStateJobSummary_UpdateJobCount(t *testing.T) { } func TestJobSummary_UpdateClientStatus(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -6089,7 +6089,7 @@ func TestJobSummary_UpdateClientStatus(t *testing.T) { // Test that nonexistent deployment can't be updated func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6108,7 +6108,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Nonexistent(t *testing.T) { // Test that terminal deployment can't be updated func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6136,7 +6136,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Terminal(t *testing.T) { // Test that a non terminal deployment is updated and that a job and eval are // created. func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6198,7 +6198,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_NonTerminal(t *testing.T) { // Test that when a deployment is updated to successful the job is updated to // stable func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6255,7 +6255,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) { } func TestStateStore_UpdateJobStability(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6291,7 +6291,7 @@ func TestStateStore_UpdateJobStability(t *testing.T) { // Test that nonexistent deployment can't be promoted func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6310,7 +6310,7 @@ func TestStateStore_UpsertDeploymentPromotion_Nonexistent(t *testing.T) { // Test that terminal deployment can't be updated func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6337,7 +6337,7 @@ func TestStateStore_UpsertDeploymentPromotion_Terminal(t *testing.T) { // Test promoting unhealthy canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) require := require.New(t) @@ -6386,7 +6386,7 @@ func TestStateStore_UpsertDeploymentPromotion_Unhealthy(t *testing.T) { // Test promoting a deployment with no canaries func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) require := require.New(t) @@ -6415,7 +6415,7 @@ func TestStateStore_UpsertDeploymentPromotion_NoCanaries(t *testing.T) { // Test promoting all canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6514,7 +6514,7 @@ func TestStateStore_UpsertDeploymentPromotion_All(t *testing.T) { // Test promoting a subset of canaries in a deployment. func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -6617,7 +6617,7 @@ func TestStateStore_UpsertDeploymentPromotion_Subset(t *testing.T) { // Test that allocation health can't be set against a nonexistent deployment func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6636,7 +6636,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Nonexistent(t *testing.T) { // Test that allocation health can't be set against a terminal deployment func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6663,7 +6663,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_Terminal(t *testing.T) { // Test that allocation health can't be set against a nonexistent alloc func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6688,7 +6688,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_Nonexistent(t *testing. // Test that a deployments PlacedCanaries is properly updated func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6758,7 +6758,7 @@ func TestStateStore_UpsertDeploymentAlloc_Canaries(t *testing.T) { } func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6792,7 +6792,7 @@ func TestStateStore_UpsertDeploymentAlloc_NoCanaries(t *testing.T) { // Test that allocation health can't be set for an alloc with mismatched // deployment ids func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6828,7 +6828,7 @@ func TestStateStore_UpsertDeploymentAllocHealth_BadAlloc_MismatchDeployment(t *t // Test that allocation health is properly set func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) @@ -6936,7 +6936,7 @@ func TestStateStore_UpsertDeploymentAllocHealth(t *testing.T) { } func TestStateStore_UpsertVaultAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) a := mock.VaultAccessor() @@ -7017,7 +7017,7 @@ func TestStateStore_UpsertVaultAccessors(t *testing.T) { } func TestStateStore_DeleteVaultAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) a1 := mock.VaultAccessor() @@ -7073,7 +7073,7 @@ func TestStateStore_DeleteVaultAccessors(t *testing.T) { } func TestStateStore_VaultAccessorsByAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -7121,7 +7121,7 @@ func TestStateStore_VaultAccessorsByAlloc(t *testing.T) { } func TestStateStore_VaultAccessorsByNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) node := mock.Node() @@ -7169,7 +7169,7 @@ func TestStateStore_VaultAccessorsByNode(t *testing.T) { } func TestStateStore_UpsertSITokenAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7222,7 +7222,7 @@ func TestStateStore_UpsertSITokenAccessors(t *testing.T) { } func TestStateStore_DeleteSITokenAccessors(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7263,7 +7263,7 @@ func TestStateStore_DeleteSITokenAccessors(t *testing.T) { } func TestStateStore_SITokenAccessorsByAlloc(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7301,7 +7301,7 @@ func TestStateStore_SITokenAccessorsByAlloc(t *testing.T) { } func TestStateStore_SITokenAccessorsByNode(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) state := testStateStore(t) @@ -7340,7 +7340,7 @@ func TestStateStore_SITokenAccessorsByNode(t *testing.T) { } func TestStateStore_UpsertACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) policy := mock.ACLPolicy() @@ -7402,7 +7402,7 @@ func TestStateStore_UpsertACLPolicy(t *testing.T) { } func TestStateStore_DeleteACLPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) policy := mock.ACLPolicy() @@ -7469,7 +7469,7 @@ func TestStateStore_DeleteACLPolicy(t *testing.T) { } func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) names := []string{ @@ -7518,7 +7518,7 @@ func TestStateStore_ACLPolicyByNamePrefix(t *testing.T) { } func TestStateStore_BootstrapACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7602,7 +7602,7 @@ func TestStateStore_BootstrapACLTokens(t *testing.T) { } func TestStateStore_UpsertACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7672,7 +7672,7 @@ func TestStateStore_UpsertACLTokens(t *testing.T) { } func TestStateStore_DeleteACLTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7739,7 +7739,7 @@ func TestStateStore_DeleteACLTokens(t *testing.T) { } func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) prefixes := []string{ @@ -7806,7 +7806,7 @@ func TestStateStore_ACLTokenByAccessorIDPrefix(t *testing.T) { } func TestStateStore_ACLTokensByGlobal(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) tk1 := mock.ACLToken() @@ -7858,7 +7858,7 @@ func TestStateStore_ACLTokensByGlobal(t *testing.T) { } func TestStateStore_OneTimeTokens(t *testing.T) { - t.Parallel() + ci.Parallel(t) index := uint64(100) state := testStateStore(t) @@ -8009,7 +8009,7 @@ func TestStateStore_ClusterMetadata(t *testing.T) { } func TestStateStore_UpsertScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -8092,7 +8092,7 @@ func TestStateStore_UpsertScalingPolicy(t *testing.T) { } func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) otherNamespace := "not-default-namespace" @@ -8142,7 +8142,7 @@ func TestStateStore_UpsertScalingPolicy_Namespace(t *testing.T) { } func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ns1 := "name" @@ -8197,7 +8197,7 @@ func TestStateStore_UpsertScalingPolicy_Namespace_PrefixBug(t *testing.T) { // Subsequent updates of the job should preserve the ID for the scaling policy // associated with a given target. func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8242,7 +8242,7 @@ func TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex(t *testing.T) { // Updating the scaling policy for a job should update the index table and fire the watch. // This test is the converse of TestStateStore_UpsertJob_PreserveScalingPolicyIDsAndIndex func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8286,7 +8286,7 @@ func TestStateStore_UpsertJob_UpdateScalingPolicy(t *testing.T) { } func TestStateStore_DeleteScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8341,7 +8341,7 @@ func TestStateStore_DeleteScalingPolicies(t *testing.T) { } func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8388,7 +8388,7 @@ func TestStateStore_StopJob_DeleteScalingPolicies(t *testing.T) { } func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8431,7 +8431,7 @@ func TestStateStore_UnstopJob_UpsertScalingPolicies(t *testing.T) { } func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8462,7 +8462,7 @@ func TestStateStore_DeleteJob_DeleteScalingPolicies(t *testing.T) { } func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8496,7 +8496,7 @@ func TestStateStore_DeleteJob_DeleteScalingPoliciesPrefixBug(t *testing.T) { // will not cause the scaling_policy table index to increase, on either job // registration or deletion. func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8524,7 +8524,7 @@ func TestStateStore_DeleteJob_ScalingPolicyIndexNoop(t *testing.T) { } func TestStateStore_ScalingPoliciesByType(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8581,7 +8581,7 @@ func TestStateStore_ScalingPoliciesByType(t *testing.T) { } func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8654,7 +8654,7 @@ func TestStateStore_ScalingPoliciesByTypePrefix(t *testing.T) { } func TestStateStore_ScalingPoliciesByJob(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8718,7 +8718,7 @@ func TestStateStore_ScalingPoliciesByJob(t *testing.T) { } func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8757,7 +8757,7 @@ func TestStateStore_ScalingPoliciesByJob_PrefixBug(t *testing.T) { } func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) @@ -8799,7 +8799,7 @@ func TestStateStore_ScalingPolicyByTargetAndType(t *testing.T) { } func TestStateStore_UpsertScalingEvent(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -8868,7 +8868,7 @@ func TestStateStore_UpsertScalingEvent(t *testing.T) { } func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) state := testStateStore(t) @@ -8935,7 +8935,7 @@ func TestStateStore_UpsertScalingEvent_LimitAndOrder(t *testing.T) { } func TestStateStore_Abandon(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := testStateStore(t) abandonCh := s.AbandonCh() @@ -8949,7 +8949,7 @@ func TestStateStore_Abandon(t *testing.T) { // Verifies that an error is returned when an allocation doesn't exist in the state store. func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testing.T) { - t.Parallel() + ci.Parallel(t) state := testStateStore(t) alloc := mock.Alloc() @@ -8977,7 +8977,7 @@ func TestStateSnapshot_DenormalizeAllocationDiffSlice_AllocDoesNotExist(t *testi // TestStateStore_SnapshotMinIndex_OK asserts StateStore.SnapshotMinIndex blocks // until the StateStore's latest index is >= the requested index. func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := testStateStore(t) index, err := s.LatestIndex() @@ -9054,7 +9054,7 @@ func TestStateStore_SnapshotMinIndex_OK(t *testing.T) { // TestStateStore_SnapshotMinIndex_Timeout asserts StateStore.SnapshotMinIndex // returns an error if the desired index is not reached within the deadline. func TestStateStore_SnapshotMinIndex_Timeout(t *testing.T) { - t.Parallel() + ci.Parallel(t) s := testStateStore(t) index, err := s.LatestIndex() diff --git a/nomad/stats_fetcher_test.go b/nomad/stats_fetcher_test.go index 3c508c73a..36362d604 100644 --- a/nomad/stats_fetcher_test.go +++ b/nomad/stats_fetcher_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" ) func TestStatsFetcher(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := func(c *Config) { c.Region = "region-a" diff --git a/nomad/status_endpoint_test.go b/nomad/status_endpoint_test.go index acae7235a..8f5e95287 100644 --- a/nomad/status_endpoint_test.go +++ b/nomad/status_endpoint_test.go @@ -5,6 +5,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,7 +15,7 @@ import ( ) func TestStatusPing(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -28,7 +29,7 @@ func TestStatusPing(t *testing.T) { } func TestStatusLeader(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -51,7 +52,7 @@ func TestStatusLeader(t *testing.T) { } func TestStatusPeers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -73,7 +74,7 @@ func TestStatusPeers(t *testing.T) { } func TestStatusMembers(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -93,7 +94,7 @@ func TestStatusMembers(t *testing.T) { } func TestStatusMembers_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -147,7 +148,7 @@ func TestStatusMembers_ACL(t *testing.T) { } func TestStatus_HasClientConn(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() diff --git a/nomad/stream/event_broker_test.go b/nomad/stream/event_broker_test.go index a136031cb..ea7457aa1 100644 --- a/nomad/stream/event_broker_test.go +++ b/nomad/stream/event_broker_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-memdb" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,6 +16,8 @@ import ( ) func TestEventBroker_PublishChangesAndSubscribe(t *testing.T) { + ci.Parallel(t) + subscription := &SubscribeRequest{ Topics: map[structs.Topic][]string{ "Test": {"sub-key"}, @@ -66,6 +69,8 @@ func TestEventBroker_PublishChangesAndSubscribe(t *testing.T) { } func TestEventBroker_ShutdownClosesSubscriptions(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -95,6 +100,8 @@ func TestEventBroker_ShutdownClosesSubscriptions(t *testing.T) { // the subscriptions should still be handled indeppendtly of each other when // unssubscribing. func TestEventBroker_EmptyReqToken_DistinctSubscriptions(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -117,6 +124,8 @@ func TestEventBroker_EmptyReqToken_DistinctSubscriptions(t *testing.T) { } func TestEventBroker_handleACLUpdates_TokenDeleted(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -176,6 +185,8 @@ func (p *fakeACLTokenProvider) ACLPolicyByName(ws memdb.WatchSet, policyName str } func TestEventBroker_handleACLUpdates_policyupdated(t *testing.T) { + ci.Parallel(t) + ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) diff --git a/nomad/stream/event_buffer_test.go b/nomad/stream/event_buffer_test.go index 3b52927fe..f9ec7bd22 100644 --- a/nomad/stream/event_buffer_test.go +++ b/nomad/stream/event_buffer_test.go @@ -7,13 +7,15 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestEventBufferFuzz(t *testing.T) { + ci.Parallel(t) + nReaders := 1000 nMessages := 1000 @@ -85,6 +87,8 @@ func TestEventBufferFuzz(t *testing.T) { } func TestEventBuffer_Slow_Reader(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(10) for i := 1; i < 11; i++ { @@ -116,6 +120,8 @@ func TestEventBuffer_Slow_Reader(t *testing.T) { } func TestEventBuffer_Size(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(100) for i := 0; i < 10; i++ { @@ -129,6 +135,8 @@ func TestEventBuffer_Size(t *testing.T) { } func TestEventBuffer_MaxSize(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(10) var events []structs.Event @@ -144,6 +152,8 @@ func TestEventBuffer_MaxSize(t *testing.T) { // are removed, the event buffer should advance its head down to the last message // and insert a placeholder sentinel value. func TestEventBuffer_Emptying_Buffer(t *testing.T) { + ci.Parallel(t) + b := newEventBuffer(10) for i := 0; i < 10; i++ { @@ -184,6 +194,8 @@ func TestEventBuffer_Emptying_Buffer(t *testing.T) { } func TestEventBuffer_StartAt_CurrentIdx_Past_Start(t *testing.T) { + ci.Parallel(t) + cases := []struct { desc string req uint64 diff --git a/nomad/stream/ndjson_test.go b/nomad/stream/ndjson_test.go index 95bc2b23a..5da4f08cd 100644 --- a/nomad/stream/ndjson_test.go +++ b/nomad/stream/ndjson_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -13,7 +14,7 @@ type testObj struct { } func TestJsonStream(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -42,7 +43,7 @@ func TestJsonStream(t *testing.T) { } func TestJson_Send_After_Stop(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -57,7 +58,7 @@ func TestJson_Send_After_Stop(t *testing.T) { } func TestJson_HeartBeat(t *testing.T) { - t.Parallel() + ci.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/nomad/stream/subscription_test.go b/nomad/stream/subscription_test.go index d1d40f350..d7bb9be36 100644 --- a/nomad/stream/subscription_test.go +++ b/nomad/stream/subscription_test.go @@ -3,12 +3,14 @@ package stream import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" - "github.com/stretchr/testify/require" ) func TestFilter_AllTopics(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -22,6 +24,8 @@ func TestFilter_AllTopics(t *testing.T) { } func TestFilter_AllKeys(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -35,6 +39,8 @@ func TestFilter_AllKeys(t *testing.T) { } func TestFilter_PartialMatch_Topic(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Exclude", Key: "Two"}) @@ -51,6 +57,8 @@ func TestFilter_PartialMatch_Topic(t *testing.T) { } func TestFilter_Match_TopicAll_SpecificKey(t *testing.T) { + ci.Parallel(t) + events := []structs.Event{ {Topic: "Match", Key: "Two"}, {Topic: "NoMatch", Key: "One"}, @@ -72,6 +80,8 @@ func TestFilter_Match_TopicAll_SpecificKey(t *testing.T) { } func TestFilter_Match_TopicAll_SpecificKey_Plus(t *testing.T) { + ci.Parallel(t) + events := []structs.Event{ {Topic: "FirstTwo", Key: "Two"}, {Topic: "Test", Key: "One"}, @@ -95,6 +105,8 @@ func TestFilter_Match_TopicAll_SpecificKey_Plus(t *testing.T) { } func TestFilter_PartialMatch_Key(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -111,6 +123,8 @@ func TestFilter_PartialMatch_Key(t *testing.T) { } func TestFilter_NoMatch(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One"}, structs.Event{Topic: "Test", Key: "Two"}) @@ -128,6 +142,8 @@ func TestFilter_NoMatch(t *testing.T) { } func TestFilter_Namespace(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", Namespace: "foo"}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Test", Key: "Two", Namespace: "bar"}) @@ -148,6 +164,8 @@ func TestFilter_Namespace(t *testing.T) { } func TestFilter_NamespaceAll(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", Namespace: "foo"}, @@ -171,6 +189,8 @@ func TestFilter_NamespaceAll(t *testing.T) { } func TestFilter_FilterKeys(t *testing.T) { + ci.Parallel(t) + events := make([]structs.Event, 0, 5) events = append(events, structs.Event{Topic: "Test", Key: "One", FilterKeys: []string{"extra-key"}}, structs.Event{Topic: "Test", Key: "Two"}, structs.Event{Topic: "Test", Key: "Two"}) diff --git a/nomad/structs/batch_future_test.go b/nomad/structs/batch_future_test.go index 52ff12563..84693fe59 100644 --- a/nomad/structs/batch_future_test.go +++ b/nomad/structs/batch_future_test.go @@ -4,10 +4,12 @@ import ( "fmt" "testing" "time" + + "github.com/hashicorp/nomad/ci" ) func TestBatchFuture(t *testing.T) { - t.Parallel() + ci.Parallel(t) bf := NewBatchFuture() // Async respond to the future diff --git a/nomad/structs/bitmap_test.go b/nomad/structs/bitmap_test.go index 42b2c635e..e28d831d6 100644 --- a/nomad/structs/bitmap_test.go +++ b/nomad/structs/bitmap_test.go @@ -3,9 +3,13 @@ package structs import ( "reflect" "testing" + + "github.com/hashicorp/nomad/ci" ) func TestBitmap(t *testing.T) { + ci.Parallel(t) + // Check invalid sizes _, err := NewBitmap(0) if err == nil { diff --git a/nomad/structs/config/audit_test.go b/nomad/structs/config/audit_test.go index 4efc3bbfa..7cd9d930a 100644 --- a/nomad/structs/config/audit_test.go +++ b/nomad/structs/config/audit_test.go @@ -4,11 +4,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestAuditConfig_Merge(t *testing.T) { + ci.Parallel(t) + c1 := &AuditConfig{ Enabled: helper.BoolToPtr(true), Sinks: []*AuditSink{ diff --git a/nomad/structs/config/autopilot_test.go b/nomad/structs/config/autopilot_test.go index e379ff8de..053bfe51f 100644 --- a/nomad/structs/config/autopilot_test.go +++ b/nomad/structs/config/autopilot_test.go @@ -4,9 +4,13 @@ import ( "reflect" "testing" "time" + + "github.com/hashicorp/nomad/ci" ) func TestAutopilotConfig_Merge(t *testing.T) { + ci.Parallel(t) + trueValue, falseValue := true, false c1 := &AutopilotConfig{ diff --git a/nomad/structs/config/consul_test.go b/nomad/structs/config/consul_test.go index d2243e418..e66bae2cf 100644 --- a/nomad/structs/config/consul_test.go +++ b/nomad/structs/config/consul_test.go @@ -11,6 +11,7 @@ import ( consulapi "github.com/hashicorp/consul/api" sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,6 +32,8 @@ func TestMain(m *testing.M) { } func TestConsulConfig_Merge(t *testing.T) { + ci.Parallel(t) + yes, no := true, false c1 := &ConsulConfig{ @@ -121,7 +124,7 @@ func TestConsulConfig_Merge(t *testing.T) { // TestConsulConfig_Defaults asserts Consul defaults are copied from their // upstream API package defaults. func TestConsulConfig_Defaults(t *testing.T) { - t.Parallel() + ci.Parallel(t) nomadDef := DefaultConsulConfig() consulDef := consulapi.DefaultConfig() @@ -136,7 +139,7 @@ func TestConsulConfig_Defaults(t *testing.T) { // TestConsulConfig_Exec asserts Consul defaults use env vars when they are // set by forking a subprocess. func TestConsulConfig_Exec(t *testing.T) { - t.Parallel() + ci.Parallel(t) self, err := os.Executable() if err != nil { @@ -171,7 +174,7 @@ func TestConsulConfig_Exec(t *testing.T) { } func TestConsulConfig_IpTemplateParse(t *testing.T) { - t.Parallel() + ci.Parallel(t) privateIps, err := sockaddr.GetPrivateIP() require.NoError(t, err) @@ -182,16 +185,16 @@ func TestConsulConfig_IpTemplateParse(t *testing.T) { tmpl string expectedOut string expectErr bool - } { - { name: "string address keeps working", tmpl: "10.0.1.0:8500", expectedOut: "10.0.1.0:8500", expectErr: false }, - { name: "single ip sock-addr template", tmpl: "{{ GetPrivateIP }}:8500", expectedOut: privateIp+":8500", expectErr: false }, - { name: "multi ip sock-addr template", tmpl: "{{ GetPrivateIPs }}:8500", expectedOut: "", expectErr: true }, + }{ + {name: "string address keeps working", tmpl: "10.0.1.0:8500", expectedOut: "10.0.1.0:8500", expectErr: false}, + {name: "single ip sock-addr template", tmpl: "{{ GetPrivateIP }}:8500", expectedOut: privateIp + ":8500", expectErr: false}, + {name: "multi ip sock-addr template", tmpl: "{{ GetPrivateIPs }}:8500", expectedOut: "", expectErr: true}, } for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := ConsulConfig{ Addr: tc.tmpl, } diff --git a/nomad/structs/config/limits_test.go b/nomad/structs/config/limits_test.go index e4bd9d598..7a4082f3d 100644 --- a/nomad/structs/config/limits_test.go +++ b/nomad/structs/config/limits_test.go @@ -4,13 +4,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) // TestLimits_Defaults asserts the default limits are valid. func TestLimits_Defaults(t *testing.T) { - t.Parallel() + ci.Parallel(t) l := DefaultLimits() d, err := time.ParseDuration(l.HTTPSHandshakeTimeout) @@ -24,7 +25,7 @@ func TestLimits_Defaults(t *testing.T) { // TestLimits_Copy asserts Limits structs are deep copied. func TestLimits_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) o := DefaultLimits() c := o.Copy() @@ -52,7 +53,7 @@ func TestLimits_Copy(t *testing.T) { // TestLimits_Merge asserts non-zero fields from the method argument take // precedence over the existing limits. func TestLimits_Merge(t *testing.T) { - t.Parallel() + ci.Parallel(t) l := Limits{} o := DefaultLimits() diff --git a/nomad/structs/config/plugins_test.go b/nomad/structs/config/plugins_test.go index e0e98d108..e380ba622 100644 --- a/nomad/structs/config/plugins_test.go +++ b/nomad/structs/config/plugins_test.go @@ -4,11 +4,12 @@ import ( "sort" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestPluginConfig_Merge(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) a := &PluginConfig{ Name: "foo", @@ -56,7 +57,7 @@ func TestPluginConfig_Merge(t *testing.T) { } func TestPluginConfigSet_Merge(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) a := &PluginConfig{ diff --git a/nomad/structs/config/tls_test.go b/nomad/structs/config/tls_test.go index b57b4fa25..59bcfed18 100644 --- a/nomad/structs/config/tls_test.go +++ b/nomad/structs/config/tls_test.go @@ -3,11 +3,14 @@ package config import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTLSConfig_Merge(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) a := &TLSConfig{ CAFile: "test-ca-file", @@ -31,6 +34,8 @@ func TestTLSConfig_Merge(t *testing.T) { } func TestTLS_CertificateInfoIsEqual_TrueWhenEmpty(t *testing.T) { + ci.Parallel(t) + require := require.New(t) a := &TLSConfig{} b := &TLSConfig{} @@ -40,6 +45,8 @@ func TestTLS_CertificateInfoIsEqual_TrueWhenEmpty(t *testing.T) { } func TestTLS_CertificateInfoIsEqual_FalseWhenUnequal(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( cafile = "../../../helper/tlsutil/testdata/ca.pem" @@ -143,6 +150,8 @@ func TestTLS_CertificateInfoIsEqual_FalseWhenUnequal(t *testing.T) { // Certificate info should be equal when the CA file, certificate file, and key // file all are equal func TestTLS_CertificateInfoIsEqual_TrueWhenEqual(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( cafile = "../../../helper/tlsutil/testdata/ca.pem" @@ -167,6 +176,8 @@ func TestTLS_CertificateInfoIsEqual_TrueWhenEqual(t *testing.T) { } func TestTLS_Copy(t *testing.T) { + ci.Parallel(t) + require := require.New(t) const ( cafile = "../../../helper/tlsutil/testdata/ca.pem" @@ -192,6 +203,8 @@ func TestTLS_Copy(t *testing.T) { // GetKeyLoader should always return an initialized KeyLoader for a TLSConfig // object func TestTLS_GetKeyloader(t *testing.T) { + ci.Parallel(t) + require := require.New(t) a := &TLSConfig{} require.NotNil(a.GetKeyLoader()) diff --git a/nomad/structs/config/ui_test.go b/nomad/structs/config/ui_test.go index af040b519..d310403b5 100644 --- a/nomad/structs/config/ui_test.go +++ b/nomad/structs/config/ui_test.go @@ -3,10 +3,12 @@ package config import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestUIConfig_Merge(t *testing.T) { + ci.Parallel(t) fullConfig := &UIConfig{ Enabled: true, @@ -69,7 +71,7 @@ func TestUIConfig_Merge(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) result := tc.left.Merge(tc.right) require.Equal(t, tc.expect, result) }) diff --git a/nomad/structs/config/vault_test.go b/nomad/structs/config/vault_test.go index e48b6ef02..c4eda801c 100644 --- a/nomad/structs/config/vault_test.go +++ b/nomad/structs/config/vault_test.go @@ -4,10 +4,13 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestVaultConfig_Merge(t *testing.T) { + ci.Parallel(t) + trueValue, falseValue := true, false c1 := &VaultConfig{ Enabled: &falseValue, @@ -61,6 +64,8 @@ func TestVaultConfig_Merge(t *testing.T) { } func TestVaultConfig_IsEqual(t *testing.T) { + ci.Parallel(t) + require := require.New(t) trueValue, falseValue := true, false diff --git a/nomad/structs/connect_test.go b/nomad/structs/connect_test.go index 385716fe8..11f93e894 100644 --- a/nomad/structs/connect_test.go +++ b/nomad/structs/connect_test.go @@ -3,10 +3,13 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestTaskKind_IsAnyConnectGateway(t *testing.T) { + ci.Parallel(t) + t.Run("gateways", func(t *testing.T) { require.True(t, NewTaskKind(ConnectIngressPrefix, "foo").IsAnyConnectGateway()) require.True(t, NewTaskKind(ConnectTerminatingPrefix, "foo").IsAnyConnectGateway()) diff --git a/nomad/structs/consul_oss_test.go b/nomad/structs/consul_oss_test.go index de3359c34..a1cabf60b 100644 --- a/nomad/structs/consul_oss_test.go +++ b/nomad/structs/consul_oss_test.go @@ -6,11 +6,12 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestJob_ConfigEntries(t *testing.T) { - t.Parallel() + ci.Parallel(t) ingress := &ConsulConnect{ Gateway: &ConsulGateway{ diff --git a/nomad/structs/consul_test.go b/nomad/structs/consul_test.go index 43801c933..66e15584c 100644 --- a/nomad/structs/consul_test.go +++ b/nomad/structs/consul_test.go @@ -3,10 +3,13 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestConsul_Copy(t *testing.T) { + ci.Parallel(t) + t.Run("nil", func(t *testing.T) { result := (*Consul)(nil).Copy() require.Nil(t, result) @@ -21,6 +24,8 @@ func TestConsul_Copy(t *testing.T) { } func TestConsul_Equals(t *testing.T) { + ci.Parallel(t) + t.Run("nil and nil", func(t *testing.T) { result := (*Consul)(nil).Equals((*Consul)(nil)) require.True(t, result) @@ -43,6 +48,8 @@ func TestConsul_Equals(t *testing.T) { } func TestConsul_Validate(t *testing.T) { + ci.Parallel(t) + t.Run("empty ns", func(t *testing.T) { result := (&Consul{Namespace: ""}).Validate() require.Nil(t, result) diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 8f84d6226..9ef5a7f8b 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -5,11 +5,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) // TestCSIVolumeClaim ensures that a volume claim workflows work as expected. func TestCSIVolumeClaim(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeUnknown @@ -187,6 +190,8 @@ func TestCSIVolumeClaim(t *testing.T) { // // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0 func TestCSIVolumeClaim_CompatOldClaims(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter @@ -283,6 +288,8 @@ func TestCSIVolumeClaim_CompatOldClaims(t *testing.T) { // // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0 func TestCSIVolumeClaim_CompatNewClaimsOK(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter @@ -388,6 +395,8 @@ func TestCSIVolumeClaim_CompatNewClaimsOK(t *testing.T) { // // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0 func TestCSIVolumeClaim_CompatNewClaimsNoUpgrade(t *testing.T) { + ci.Parallel(t) + vol := NewCSIVolume("vol0", 0) vol.Schedulable = true vol.AccessMode = CSIVolumeAccessModeMultiNodeReader @@ -471,6 +480,7 @@ func TestCSIVolumeClaim_CompatNewClaimsNoUpgrade(t *testing.T) { } func TestVolume_Copy(t *testing.T) { + ci.Parallel(t) a1 := MockAlloc() a2 := MockAlloc() @@ -555,6 +565,8 @@ func TestVolume_Copy(t *testing.T) { } func TestCSIVolume_Validate(t *testing.T) { + ci.Parallel(t) + vol := &CSIVolume{ ID: "test", PluginID: "test", @@ -570,6 +582,7 @@ func TestCSIVolume_Validate(t *testing.T) { } func TestCSIVolume_Merge(t *testing.T) { + ci.Parallel(t) testCases := []struct { name string @@ -739,6 +752,8 @@ func TestCSIVolume_Merge(t *testing.T) { } func TestCSIPluginJobs(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) controller := &Job{ ID: "job", @@ -789,6 +804,8 @@ func TestCSIPluginJobs(t *testing.T) { } func TestCSIPluginCleanup(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.AddPlugin("n0", &CSIInfo{ PluginID: "foo", @@ -824,6 +841,8 @@ func TestCSIPluginCleanup(t *testing.T) { } func TestDeleteNodeForType_Controller(t *testing.T) { + ci.Parallel(t) + info := &CSIInfo{ PluginID: "foo", AllocID: "a0", @@ -847,6 +866,8 @@ func TestDeleteNodeForType_Controller(t *testing.T) { } func TestDeleteNodeForType_NilController(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Controllers["n0"] = nil @@ -861,6 +882,8 @@ func TestDeleteNodeForType_NilController(t *testing.T) { } func TestDeleteNodeForType_Node(t *testing.T) { + ci.Parallel(t) + info := &CSIInfo{ PluginID: "foo", AllocID: "a0", @@ -884,6 +907,8 @@ func TestDeleteNodeForType_Node(t *testing.T) { } func TestDeleteNodeForType_NilNode(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Nodes["n0"] = nil @@ -898,6 +923,8 @@ func TestDeleteNodeForType_NilNode(t *testing.T) { } func TestDeleteNodeForType_Monolith(t *testing.T) { + ci.Parallel(t) + controllerInfo := &CSIInfo{ PluginID: "foo", AllocID: "a0", @@ -940,6 +967,8 @@ func TestDeleteNodeForType_Monolith(t *testing.T) { } func TestDeleteNodeForType_Monolith_NilController(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Controllers["n0"] = nil @@ -972,6 +1001,8 @@ func TestDeleteNodeForType_Monolith_NilController(t *testing.T) { } func TestDeleteNodeForType_Monolith_NilNode(t *testing.T) { + ci.Parallel(t) + plug := NewCSIPlugin("foo", 1000) plug.Nodes["n0"] = nil diff --git a/nomad/structs/devices_test.go b/nomad/structs/devices_test.go index 013b6fcec..8d43a45ad 100644 --- a/nomad/structs/devices_test.go +++ b/nomad/structs/devices_test.go @@ -3,6 +3,7 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" "github.com/stretchr/testify/require" @@ -54,6 +55,8 @@ func devNode() *Node { // Make sure that the device accounter works even if the node has no devices func TestDeviceAccounter_AddAllocs_NoDeviceNode(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := MockNode() d := NewDeviceAccounter(n) @@ -71,6 +74,8 @@ func TestDeviceAccounter_AddAllocs_NoDeviceNode(t *testing.T) { // Add allocs to a node with a device func TestDeviceAccounter_AddAllocs(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -109,6 +114,8 @@ func TestDeviceAccounter_AddAllocs(t *testing.T) { // operate on previous allocs even if the device has changed to unhealthy and we // don't track it func TestDeviceAccounter_AddAllocs_UnknownID(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -137,6 +144,8 @@ func TestDeviceAccounter_AddAllocs_UnknownID(t *testing.T) { // Test that collision detection works func TestDeviceAccounter_AddAllocs_Collision(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -155,6 +164,8 @@ func TestDeviceAccounter_AddAllocs_Collision(t *testing.T) { // Make sure that the device allocator works even if the node has no devices func TestDeviceAccounter_AddReserved_NoDeviceNode(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := MockNode() d := NewDeviceAccounter(n) @@ -166,6 +177,8 @@ func TestDeviceAccounter_AddReserved_NoDeviceNode(t *testing.T) { // Add reserved to a node with a device func TestDeviceAccounter_AddReserved(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) @@ -197,6 +210,8 @@ func TestDeviceAccounter_AddReserved(t *testing.T) { // Test that collision detection works func TestDeviceAccounter_AddReserved_Collision(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := devNode() d := NewDeviceAccounter(n) diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index ef950b90d..e5409b17c 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -5,11 +5,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestJobDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { Old, New *Job Expected *JobDiff @@ -1340,6 +1343,8 @@ func TestJobDiff(t *testing.T) { } func TestTaskGroupDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { TestCase string Old, New *TaskGroup @@ -3918,6 +3923,8 @@ func TestTaskGroupDiff(t *testing.T) { } func TestTaskDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Old, New *Task @@ -7257,6 +7264,8 @@ func TestTaskDiff(t *testing.T) { } func TestServicesDiff(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Old, New []*Service diff --git a/nomad/structs/errors_test.go b/nomad/structs/errors_test.go index 08e5fb716..0fb67753d 100644 --- a/nomad/structs/errors_test.go +++ b/nomad/structs/errors_test.go @@ -4,10 +4,13 @@ import ( "errors" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" ) func TestRPCCodedErrors(t *testing.T) { + ci.Parallel(t) + cases := []struct { err error code int diff --git a/nomad/structs/funcs_test.go b/nomad/structs/funcs_test.go index 24211c67a..a36a36c58 100644 --- a/nomad/structs/funcs_test.go +++ b/nomad/structs/funcs_test.go @@ -7,12 +7,15 @@ import ( "testing" lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestRemoveAllocs(t *testing.T) { + ci.Parallel(t) + l := []*Allocation{ {ID: "foo"}, {ID: "bar"}, @@ -30,6 +33,8 @@ func TestRemoveAllocs(t *testing.T) { } func TestFilterTerminalAllocs(t *testing.T) { + ci.Parallel(t) + l := []*Allocation{ { ID: "bar", @@ -81,6 +86,8 @@ func TestFilterTerminalAllocs(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) { + ci.Parallel(t) + n := &Node{ Resources: &Resources{ Networks: []*NetworkResource{ @@ -137,6 +144,8 @@ func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestAllocsFit_Old(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -200,6 +209,8 @@ func TestAllocsFit_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestAllocsFit_TerminalAlloc_Old(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -264,6 +275,8 @@ func TestAllocsFit_TerminalAlloc_Old(t *testing.T) { } func TestAllocsFit(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -405,6 +418,8 @@ func TestAllocsFit(t *testing.T) { } func TestAllocsFit_TerminalAlloc(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := &Node{ @@ -488,6 +503,8 @@ func TestAllocsFit_TerminalAlloc(t *testing.T) { // Tests that AllocsFit detects device collisions func TestAllocsFit_Devices(t *testing.T) { + ci.Parallel(t) + require := require.New(t) n := MockNvidiaNode() @@ -555,6 +572,8 @@ func TestAllocsFit_Devices(t *testing.T) { // TestAllocsFit_MemoryOversubscription asserts that only reserved memory is // used for capacity func TestAllocsFit_MemoryOversubscription(t *testing.T) { + ci.Parallel(t) + n := &Node{ NodeResources: &NodeResources{ Cpu: NodeCpuResources{ @@ -609,6 +628,8 @@ func TestAllocsFit_MemoryOversubscription(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestScoreFitBinPack_Old(t *testing.T) { + ci.Parallel(t) + node := &Node{} node.Resources = &Resources{ CPU: 4096, @@ -669,6 +690,8 @@ func TestScoreFitBinPack_Old(t *testing.T) { } func TestScoreFitBinPack(t *testing.T) { + ci.Parallel(t) + node := &Node{} node.NodeResources = &NodeResources{ Cpu: NodeCpuResources{ @@ -738,6 +761,8 @@ func TestScoreFitBinPack(t *testing.T) { } func TestACLPolicyListHash(t *testing.T) { + ci.Parallel(t) + h1 := ACLPolicyListHash(nil) assert.NotEqual(t, "", h1) @@ -784,6 +809,8 @@ func TestACLPolicyListHash(t *testing.T) { } func TestCompileACLObject(t *testing.T) { + ci.Parallel(t) + p1 := &ACLPolicy{ Name: fmt.Sprintf("policy-%s", uuid.Generate()), Description: "Super cool policy!", @@ -843,6 +870,8 @@ func TestCompileACLObject(t *testing.T) { // TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP // headers and CompareMigrateToken works as expected. func TestGenerateMigrateToken(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) allocID := uuid.Generate() nodeSecret := uuid.Generate() @@ -863,6 +892,8 @@ func TestGenerateMigrateToken(t *testing.T) { } func TestMergeMultierrorWarnings(t *testing.T) { + ci.Parallel(t) + var errs []error // empty @@ -883,6 +914,8 @@ func TestMergeMultierrorWarnings(t *testing.T) { // TestParsePortRanges asserts ParsePortRanges errors on invalid port ranges. func TestParsePortRanges(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string spec string diff --git a/nomad/structs/network_test.go b/nomad/structs/network_test.go index 277f36f1a..ccb2900c1 100644 --- a/nomad/structs/network_test.go +++ b/nomad/structs/network_test.go @@ -6,10 +6,13 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestNetworkIndex_Copy(t *testing.T) { + ci.Parallel(t) + n := &Node{ NodeResources: &NodeResources{ Networks: []*NetworkResource{ @@ -123,6 +126,7 @@ func TestNetworkIndex_Copy(t *testing.T) { func TestNetworkIndex_Overcommitted(t *testing.T) { t.Skip() + ci.Parallel(t) idx := NewNetworkIndex() // Consume some network @@ -165,6 +169,8 @@ func TestNetworkIndex_Overcommitted(t *testing.T) { } func TestNetworkIndex_SetNode(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ NodeResources: &NodeResources{ @@ -200,6 +206,8 @@ func TestNetworkIndex_SetNode(t *testing.T) { } func TestNetworkIndex_AddAllocs(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() allocs := []*Allocation{ { @@ -255,6 +263,8 @@ func TestNetworkIndex_AddAllocs(t *testing.T) { } func TestNetworkIndex_AddReserved(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() reserved := &NetworkResource{ @@ -288,6 +298,8 @@ func TestNetworkIndex_AddReserved(t *testing.T) { // XXX Reserving ports doesn't work when yielding from a CIDR block. This is // okay for now since we do not actually fingerprint CIDR blocks. func TestNetworkIndex_yieldIP(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ NodeResources: &NodeResources{ @@ -316,6 +328,7 @@ func TestNetworkIndex_yieldIP(t *testing.T) { } func TestNetworkIndex_AssignNetwork(t *testing.T) { + ci.Parallel(t) idx := NewNetworkIndex() n := &Node{ NodeResources: &NodeResources{ @@ -419,6 +432,7 @@ func TestNetworkIndex_AssignNetwork(t *testing.T) { // This test ensures that even with a small domain of available ports we are // able to make a dynamic port allocation. func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) { + ci.Parallel(t) // Create a node that only has one free port idx := NewNetworkIndex() @@ -465,6 +479,8 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_SetNode_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ Resources: &Resources{ @@ -508,6 +524,8 @@ func TestNetworkIndex_SetNode_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_AddAllocs_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() allocs := []*Allocation{ { @@ -560,6 +578,8 @@ func TestNetworkIndex_AddAllocs_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_yieldIP_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ Resources: &Resources{ @@ -599,6 +619,8 @@ func TestNetworkIndex_yieldIP_Old(t *testing.T) { // COMPAT(0.11): Remove in 0.11 func TestNetworkIndex_AssignNetwork_Old(t *testing.T) { + ci.Parallel(t) + idx := NewNetworkIndex() n := &Node{ Resources: &Resources{ @@ -735,6 +757,7 @@ func TestNetworkIndex_AssignNetwork_Old(t *testing.T) { // This test ensures that even with a small domain of available ports we are // able to make a dynamic port allocation. func TestNetworkIndex_AssignNetwork_Dynamic_Contention_Old(t *testing.T) { + ci.Parallel(t) // Create a node that only has one free port idx := NewNetworkIndex() @@ -787,6 +810,8 @@ func TestNetworkIndex_AssignNetwork_Dynamic_Contention_Old(t *testing.T) { } func TestIntContains(t *testing.T) { + ci.Parallel(t) + l := []int{1, 2, 10, 20} if isPortReserved(l, 50) { t.Fatalf("bad") diff --git a/nomad/structs/node_class_test.go b/nomad/structs/node_class_test.go index 1c7ffc3f6..77faca36b 100644 --- a/nomad/structs/node_class_test.go +++ b/nomad/structs/node_class_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" "github.com/stretchr/testify/require" @@ -52,6 +53,8 @@ func testNode() *Node { } func TestNode_ComputedClass(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Create a node and gets it computed class @@ -81,6 +84,8 @@ func TestNode_ComputedClass(t *testing.T) { } func TestNode_ComputedClass_Ignore(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Create a node and gets it computed class @@ -98,6 +103,8 @@ func TestNode_ComputedClass_Ignore(t *testing.T) { } func TestNode_ComputedClass_Device_Attr(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Create a node and gets it computed class @@ -122,6 +129,8 @@ func TestNode_ComputedClass_Device_Attr(t *testing.T) { } func TestNode_ComputedClass_Attr(t *testing.T) { + ci.Parallel(t) + // Create a node and gets it computed class n := testNode() if err := n.ComputeClass(); err != nil { @@ -168,6 +177,8 @@ func TestNode_ComputedClass_Attr(t *testing.T) { } func TestNode_ComputedClass_Meta(t *testing.T) { + ci.Parallel(t) + // Create a node and gets it computed class n := testNode() if err := n.ComputeClass(); err != nil { @@ -205,6 +216,8 @@ func TestNode_ComputedClass_Meta(t *testing.T) { } func TestNode_EscapedConstraints(t *testing.T) { + ci.Parallel(t) + // Non-escaped constraints ne1 := &Constraint{ LTarget: "${attr.kernel.name}", diff --git a/nomad/structs/node_test.go b/nomad/structs/node_test.go index aee21accb..6c829dca3 100644 --- a/nomad/structs/node_test.go +++ b/nomad/structs/node_test.go @@ -3,10 +3,13 @@ package structs import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) func TestDriverInfoEquals(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var driverInfoTest = []struct { input []*DriverInfo diff --git a/nomad/structs/services_test.go b/nomad/structs/services_test.go index 9375366d7..6965f2421 100644 --- a/nomad/structs/services_test.go +++ b/nomad/structs/services_test.go @@ -4,12 +4,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) func TestServiceCheck_Hash(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := &ServiceCheck{ Name: "check", @@ -53,7 +54,7 @@ func TestServiceCheck_Hash(t *testing.T) { } func TestServiceCheck_validate_PassingTypes(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("valid", func(t *testing.T) { for _, checkType := range []string{"tcp", "http", "grpc"} { @@ -83,7 +84,7 @@ func TestServiceCheck_validate_PassingTypes(t *testing.T) { } func TestServiceCheck_validate_FailingTypes(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("valid", func(t *testing.T) { for _, checkType := range []string{"tcp", "http", "grpc"} { @@ -114,7 +115,7 @@ func TestServiceCheck_validate_FailingTypes(t *testing.T) { } func TestServiceCheck_validate_PassFailZero_on_scripts(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid", func(t *testing.T) { err := (&ServiceCheck{ @@ -131,7 +132,7 @@ func TestServiceCheck_validate_PassFailZero_on_scripts(t *testing.T) { } func TestServiceCheck_validate_OnUpdate_CheckRestart_Conflict(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid", func(t *testing.T) { err := (&ServiceCheck{ @@ -186,7 +187,7 @@ func TestServiceCheck_validate_OnUpdate_CheckRestart_Conflict(t *testing.T) { } func TestService_Hash(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := &Service{ Name: "myService", @@ -293,7 +294,7 @@ func TestService_Hash(t *testing.T) { } func TestConsulConnect_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulConnect{} @@ -312,7 +313,7 @@ func TestConsulConnect_Validate(t *testing.T) { } func TestConsulConnect_CopyEquals(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulConnect{ SidecarService: &ConsulSidecarService{ @@ -349,7 +350,7 @@ func TestConsulConnect_CopyEquals(t *testing.T) { } func TestConsulConnect_GatewayProxy_CopyEquals(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulGatewayProxy{ ConnectTimeout: helper.TimeToPtr(1 * time.Second), @@ -366,7 +367,7 @@ func TestConsulConnect_GatewayProxy_CopyEquals(t *testing.T) { } func TestSidecarTask_MergeIntoTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) task := MockJob().TaskGroups[0].Tasks[0] sTask := &SidecarTask{ @@ -422,7 +423,7 @@ func TestSidecarTask_MergeIntoTask(t *testing.T) { } func TestSidecarTask_Equals(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := &SidecarTask{ Name: "sidecar-task-1", @@ -501,7 +502,7 @@ func TestSidecarTask_Equals(t *testing.T) { } func TestConsulUpstream_upstreamEquals(t *testing.T) { - t.Parallel() + ci.Parallel(t) up := func(name string, port int) ConsulUpstream { return ConsulUpstream{ @@ -542,7 +543,7 @@ func TestConsulUpstream_upstreamEquals(t *testing.T) { } func TestConsulExposePath_exposePathsEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) expose := func(path, protocol, listen string, local int) ConsulExposePath { return ConsulExposePath{ @@ -579,7 +580,7 @@ func TestConsulExposePath_exposePathsEqual(t *testing.T) { } func TestConsulExposeConfig_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, (*ConsulExposeConfig)(nil).Copy()) require.Equal(t, &ConsulExposeConfig{ @@ -594,7 +595,7 @@ func TestConsulExposeConfig_Copy(t *testing.T) { } func TestConsulExposeConfig_Equals(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.True(t, (*ConsulExposeConfig)(nil).Equals(nil)) require.True(t, (&ConsulExposeConfig{ @@ -609,7 +610,7 @@ func TestConsulExposeConfig_Equals(t *testing.T) { } func TestConsulSidecarService_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { s := (*ConsulSidecarService)(nil) @@ -700,6 +701,8 @@ var ( ) func TestConsulGateway_Prefix(t *testing.T) { + ci.Parallel(t) + t.Run("ingress", func(t *testing.T) { result := (&ConsulGateway{Ingress: new(ConsulIngressConfigEntry)}).Prefix() require.Equal(t, ConnectIngressPrefix, result) @@ -717,7 +720,7 @@ func TestConsulGateway_Prefix(t *testing.T) { } func TestConsulGateway_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { g := (*ConsulGateway)(nil) @@ -748,7 +751,7 @@ func TestConsulGateway_Copy(t *testing.T) { } func TestConsulGateway_Equals_mesh(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { a := (*ConsulGateway)(nil) @@ -764,7 +767,7 @@ func TestConsulGateway_Equals_mesh(t *testing.T) { } func TestConsulGateway_Equals_ingress(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { a := (*ConsulGateway)(nil) @@ -858,7 +861,7 @@ func TestConsulGateway_Equals_ingress(t *testing.T) { } func TestConsulGateway_Equals_terminating(t *testing.T) { - t.Parallel() + ci.Parallel(t) original := consulTerminatingGateway1.Copy() @@ -911,7 +914,7 @@ func TestConsulGateway_Equals_terminating(t *testing.T) { } func TestConsulGateway_ingressServicesEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) igs1 := []*ConsulIngressService{{ Name: "service1", @@ -942,7 +945,7 @@ func TestConsulGateway_ingressServicesEqual(t *testing.T) { } func TestConsulGateway_ingressListenersEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) ils1 := []*ConsulIngressListener{{ Port: 2000, @@ -969,7 +972,7 @@ func TestConsulGateway_ingressListenersEqual(t *testing.T) { } func TestConsulGateway_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("bad proxy", func(t *testing.T) { err := (&ConsulGateway{ @@ -1037,7 +1040,7 @@ func TestConsulGateway_Validate(t *testing.T) { } func TestConsulGatewayBindAddress_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no address", func(t *testing.T) { err := (&ConsulGatewayBindAddress{ @@ -1065,7 +1068,7 @@ func TestConsulGatewayBindAddress_Validate(t *testing.T) { } func TestConsulGatewayProxy_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no timeout", func(t *testing.T) { err := (&ConsulGatewayProxy{ @@ -1117,7 +1120,7 @@ func TestConsulGatewayProxy_Validate(t *testing.T) { } func TestConsulIngressService_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid name", func(t *testing.T) { err := (&ConsulIngressService{ @@ -1172,7 +1175,7 @@ func TestConsulIngressService_Validate(t *testing.T) { } func TestConsulIngressListener_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("invalid port", func(t *testing.T) { err := (&ConsulIngressListener{ @@ -1229,7 +1232,7 @@ func TestConsulIngressListener_Validate(t *testing.T) { } func TestConsulIngressConfigEntry_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("no listeners", func(t *testing.T) { err := (&ConsulIngressConfigEntry{}).Validate() @@ -1264,7 +1267,7 @@ func TestConsulIngressConfigEntry_Validate(t *testing.T) { } func TestConsulLinkedService_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { err := (*ConsulLinkedService)(nil).Validate() @@ -1349,7 +1352,7 @@ func TestConsulLinkedService_Validate(t *testing.T) { } func TestConsulLinkedService_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, (*ConsulLinkedService)(nil).Copy()) require.Equal(t, &ConsulLinkedService{ @@ -1368,7 +1371,7 @@ func TestConsulLinkedService_Copy(t *testing.T) { } func TestConsulLinkedService_linkedServicesEqual(t *testing.T) { - t.Parallel() + ci.Parallel(t) services := []*ConsulLinkedService{{ Name: "service1", @@ -1399,7 +1402,7 @@ func TestConsulLinkedService_linkedServicesEqual(t *testing.T) { } func TestConsulTerminatingConfigEntry_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { err := (*ConsulTerminatingConfigEntry)(nil).Validate() @@ -1433,7 +1436,7 @@ func TestConsulTerminatingConfigEntry_Validate(t *testing.T) { } func TestConsulMeshGateway_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require.Nil(t, (*ConsulMeshGateway)(nil)) require.Equal(t, &ConsulMeshGateway{ @@ -1444,7 +1447,7 @@ func TestConsulMeshGateway_Copy(t *testing.T) { } func TestConsulMeshGateway_Equals(t *testing.T) { - t.Parallel() + ci.Parallel(t) c := &ConsulMeshGateway{Mode: "local"} require.False(t, c.Equals(nil)) @@ -1455,7 +1458,7 @@ func TestConsulMeshGateway_Equals(t *testing.T) { } func TestConsulMeshGateway_Validate(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("nil", func(t *testing.T) { err := (*ConsulMeshGateway)(nil).Validate() diff --git a/nomad/structs/structs_periodic_test.go b/nomad/structs/structs_periodic_test.go index de795296c..f828bced4 100644 --- a/nomad/structs/structs_periodic_test.go +++ b/nomad/structs/structs_periodic_test.go @@ -6,11 +6,14 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPeriodicConfig_DSTChange_Transitions(t *testing.T) { + ci.Parallel(t) + locName := "America/Los_Angeles" loc, err := time.LoadLocation(locName) require.NoError(t, err) @@ -221,6 +224,8 @@ func TestPeriodicConfig_DSTChange_Transitions(t *testing.T) { } func TestPeriodConfig_DSTSprintForward_Property(t *testing.T) { + ci.Parallel(t) + locName := "America/Los_Angeles" loc, err := time.LoadLocation(locName) require.NoError(t, err) diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 40b343910..b7a1e7a01 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" @@ -19,6 +20,8 @@ import ( ) func TestJob_Validate(t *testing.T) { + ci.Parallel(t) + j := &Job{} err := j.Validate() requireErrors(t, err, @@ -98,6 +101,8 @@ func TestJob_Validate(t *testing.T) { } func TestJob_ValidateScaling(t *testing.T) { + ci.Parallel(t) + require := require.New(t) p := &ScalingPolicy{ @@ -142,6 +147,8 @@ func TestJob_ValidateScaling(t *testing.T) { } func TestJob_ValidateNullChar(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) // job id should not allow null characters @@ -166,6 +173,8 @@ func TestJob_ValidateNullChar(t *testing.T) { } func TestJob_Warnings(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Job *Job @@ -270,6 +279,8 @@ func TestJob_Warnings(t *testing.T) { } func TestJob_SpecChanged(t *testing.T) { + ci.Parallel(t) + // Get a base test job base := testJob() @@ -412,6 +423,8 @@ func testJob() *Job { } func TestJob_Copy(t *testing.T) { + ci.Parallel(t) + j := testJob() c := j.Copy() if !reflect.DeepEqual(j, c) { @@ -420,6 +433,8 @@ func TestJob_Copy(t *testing.T) { } func TestJob_IsPeriodic(t *testing.T) { + ci.Parallel(t) + j := &Job{ Type: JobTypeService, Periodic: &PeriodicConfig{ @@ -439,6 +454,8 @@ func TestJob_IsPeriodic(t *testing.T) { } func TestJob_IsPeriodicActive(t *testing.T) { + ci.Parallel(t) + cases := []struct { job *Job active bool @@ -549,6 +566,8 @@ func TestJob_SystemJob_Validate(t *testing.T) { } func TestJob_VaultPolicies(t *testing.T) { + ci.Parallel(t) + j0 := &Job{} e0 := make(map[string]map[string]*Vault, 0) @@ -632,7 +651,7 @@ func TestJob_VaultPolicies(t *testing.T) { } func TestJob_ConnectTasks(t *testing.T) { - t.Parallel() + ci.Parallel(t) r := require.New(t) j0 := &Job{ @@ -697,6 +716,8 @@ func TestJob_ConnectTasks(t *testing.T) { } func TestJob_RequiredSignals(t *testing.T) { + ci.Parallel(t) + j0 := &Job{} e0 := make(map[string]map[string][]string, 0) @@ -809,6 +830,8 @@ func TestJob_RequiredSignals(t *testing.T) { // test new Equal comparisons for components of Jobs func TestJob_PartEqual(t *testing.T) { + ci.Parallel(t) + ns := &Networks{} require.True(t, ns.Equals(&Networks{})) @@ -854,7 +877,7 @@ func TestJob_PartEqual(t *testing.T) { } func TestTask_UsesConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) t.Run("normal task", func(t *testing.T) { task := testJob().TaskGroups[0].Tasks[0] @@ -900,7 +923,7 @@ func TestTask_UsesConnect(t *testing.T) { } func TestTaskGroup_UsesConnect(t *testing.T) { - t.Parallel() + ci.Parallel(t) try := func(t *testing.T, tg *TaskGroup, exp bool) { result := tg.UsesConnect() @@ -948,6 +971,8 @@ func TestTaskGroup_UsesConnect(t *testing.T) { } func TestTaskGroup_Validate(t *testing.T) { + ci.Parallel(t) + j := testJob() tg := &TaskGroup{ Count: -1, @@ -1228,6 +1253,8 @@ func TestTaskGroup_Validate(t *testing.T) { } func TestTaskGroupNetwork_Validate(t *testing.T) { + ci.Parallel(t) + cases := []struct { TG *TaskGroup ErrContains string @@ -1489,6 +1516,8 @@ func TestTaskGroupNetwork_Validate(t *testing.T) { } func TestTask_Validate(t *testing.T) { + ci.Parallel(t) + task := &Task{} ephemeralDisk := DefaultEphemeralDisk() err := task.Validate(ephemeralDisk, JobTypeBatch, nil, nil) @@ -1534,6 +1563,8 @@ func TestTask_Validate(t *testing.T) { } func TestTask_Validate_Resources(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string res *Resources @@ -1628,6 +1659,8 @@ func TestTask_Validate_Resources(t *testing.T) { } func TestNetworkResource_Copy(t *testing.T) { + ci.Parallel(t) + testCases := []struct { inputNetworkResource *NetworkResource name string @@ -1687,6 +1720,8 @@ func TestNetworkResource_Copy(t *testing.T) { } func TestTask_Validate_Services(t *testing.T) { + ci.Parallel(t) + s1 := &Service{ Name: "service-name", PortLabel: "bar", @@ -1785,6 +1820,8 @@ func TestTask_Validate_Services(t *testing.T) { } func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { + ci.Parallel(t) + ephemeralDisk := DefaultEphemeralDisk() getTask := func(s *Service) *Task { task := &Task{ @@ -1846,6 +1883,8 @@ func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { } func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { + ci.Parallel(t) + ephemeralDisk := DefaultEphemeralDisk() getTask := func(s *Service) *Task { return &Task{ @@ -1899,6 +1938,7 @@ func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { } func TestTask_Validate_Service_Check(t *testing.T) { + ci.Parallel(t) invalidCheck := ServiceCheck{ Name: "check-name", @@ -2006,6 +2046,8 @@ func TestTask_Validate_Service_Check(t *testing.T) { // TestTask_Validate_Service_Check_AddressMode asserts that checks do not // inherit address mode but do inherit ports. func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { + ci.Parallel(t) + getTask := func(s *Service) (*Task, *TaskGroup) { return &Task{ Services: []*Service{s}, @@ -2184,7 +2226,7 @@ func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { } func TestTask_Validate_Service_Check_GRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Bad (no port) invalidGRPC := &ServiceCheck{ Type: ServiceCheckGRPC, @@ -2210,7 +2252,7 @@ func TestTask_Validate_Service_Check_GRPC(t *testing.T) { } func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { - t.Parallel() + ci.Parallel(t) invalidCheckRestart := &CheckRestart{ Limit: -1, Grace: -1, @@ -2229,6 +2271,8 @@ func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { } func TestTask_Validate_ConnectProxyKind(t *testing.T) { + ci.Parallel(t) + ephemeralDisk := DefaultEphemeralDisk() getTask := func(kind TaskKind, leader bool) *Task { task := &Task{ @@ -2336,6 +2380,8 @@ func TestTask_Validate_ConnectProxyKind(t *testing.T) { } func TestTask_Validate_LogConfig(t *testing.T) { + ci.Parallel(t) + task := &Task{ LogConfig: DefaultLogConfig(), } @@ -2348,6 +2394,8 @@ func TestTask_Validate_LogConfig(t *testing.T) { } func TestLogConfig_Equals(t *testing.T) { + ci.Parallel(t) + t.Run("both nil", func(t *testing.T) { a := (*LogConfig)(nil) b := (*LogConfig)(nil) @@ -2380,6 +2428,8 @@ func TestLogConfig_Equals(t *testing.T) { } func TestTask_Validate_CSIPluginConfig(t *testing.T) { + ci.Parallel(t) + table := []struct { name string pc *TaskCSIPluginConfig @@ -2426,6 +2476,7 @@ func TestTask_Validate_CSIPluginConfig(t *testing.T) { } func TestTask_Validate_Template(t *testing.T) { + ci.Parallel(t) bad := &Template{} task := &Task{ @@ -2471,6 +2522,8 @@ func TestTask_Validate_Template(t *testing.T) { } func TestTemplate_Validate(t *testing.T) { + ci.Parallel(t) + cases := []struct { Tmpl *Template Fail bool @@ -2613,6 +2666,8 @@ func TestTemplate_Validate(t *testing.T) { } func TestTaskWaitConfig_Equals(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string config *WaitConfig @@ -2662,6 +2717,8 @@ func TestTaskWaitConfig_Equals(t *testing.T) { } func TestConstraint_Validate(t *testing.T) { + ci.Parallel(t) + c := &Constraint{} err := c.Validate() require.Error(t, err, "Missing constraint operand") @@ -2734,6 +2791,8 @@ func TestConstraint_Validate(t *testing.T) { } func TestAffinity_Validate(t *testing.T) { + ci.Parallel(t) + type tc struct { affinity *Affinity err error @@ -2820,6 +2879,8 @@ func TestAffinity_Validate(t *testing.T) { } func TestUpdateStrategy_Validate(t *testing.T) { + ci.Parallel(t) + u := &UpdateStrategy{ MaxParallel: -1, HealthCheck: "foo", @@ -2844,6 +2905,8 @@ func TestUpdateStrategy_Validate(t *testing.T) { } func TestResource_NetIndex(t *testing.T) { + ci.Parallel(t) + r := &Resources{ Networks: []*NetworkResource{ {Device: "eth0"}, @@ -2863,6 +2926,8 @@ func TestResource_NetIndex(t *testing.T) { } func TestResource_Add(t *testing.T) { + ci.Parallel(t) + r1 := &Resources{ CPU: 2000, MemoryMB: 2048, @@ -2909,6 +2974,8 @@ func TestResource_Add(t *testing.T) { } func TestResource_Add_Network(t *testing.T) { + ci.Parallel(t) + r1 := &Resources{} r2 := &Resources{ Networks: []*NetworkResource{ @@ -2945,6 +3012,8 @@ func TestResource_Add_Network(t *testing.T) { } func TestComparableResources_Subtract(t *testing.T) { + ci.Parallel(t) + r1 := &ComparableResources{ Flattened: AllocatedTaskResources{ Cpu: AllocatedCpuResources{ @@ -3020,6 +3089,8 @@ func TestComparableResources_Subtract(t *testing.T) { } func TestMemoryResources_Add(t *testing.T) { + ci.Parallel(t) + r := &AllocatedMemoryResources{} // adding plain no max @@ -3043,6 +3114,8 @@ func TestMemoryResources_Add(t *testing.T) { } func TestNodeNetworkResource_Copy(t *testing.T) { + ci.Parallel(t) + netResource := &NodeNetworkResource{ Mode: "host", Device: "eth0", @@ -3073,6 +3146,8 @@ func TestNodeNetworkResource_Copy(t *testing.T) { } func TestEncodeDecode(t *testing.T) { + ci.Parallel(t) + type FooRequest struct { Foo string Bar int @@ -3117,6 +3192,8 @@ func BenchmarkEncodeDecode(b *testing.B) { } func TestInvalidServiceCheck(t *testing.T) { + ci.Parallel(t) + s := Service{ Name: "service-name", PortLabel: "bar", @@ -3227,6 +3304,8 @@ func TestInvalidServiceCheck(t *testing.T) { } func TestDistinctCheckID(t *testing.T) { + ci.Parallel(t) + c1 := ServiceCheck{ Name: "web-health", Type: "http", @@ -3261,6 +3340,8 @@ func TestDistinctCheckID(t *testing.T) { } func TestService_Canonicalize(t *testing.T) { + ci.Parallel(t) + job := "example" taskGroup := "cache" task := "redis" @@ -3295,6 +3376,8 @@ func TestService_Canonicalize(t *testing.T) { } func TestService_Validate(t *testing.T) { + ci.Parallel(t) + s := Service{ Name: "testservice", } @@ -3320,6 +3403,8 @@ func TestService_Validate(t *testing.T) { } func TestService_Equals(t *testing.T) { + ci.Parallel(t) + s := Service{ Name: "testservice", } @@ -3365,6 +3450,8 @@ func TestService_Equals(t *testing.T) { } func TestJob_ExpandServiceNames(t *testing.T) { + ci.Parallel(t) + j := &Job{ Name: "my-job", TaskGroups: []*TaskGroup{ @@ -3410,6 +3497,8 @@ func TestJob_ExpandServiceNames(t *testing.T) { } func TestJob_CombinedTaskMeta(t *testing.T) { + ci.Parallel(t) + j := &Job{ Meta: map[string]string{ "job_test": "job", @@ -3455,6 +3544,8 @@ func TestJob_CombinedTaskMeta(t *testing.T) { } func TestPeriodicConfig_EnabledInvalid(t *testing.T) { + ci.Parallel(t) + // Create a config that is enabled but with no interval specified. p := &PeriodicConfig{Enabled: true} if err := p.Validate(); err == nil { @@ -3481,6 +3572,8 @@ func TestPeriodicConfig_EnabledInvalid(t *testing.T) { } func TestPeriodicConfig_InvalidCron(t *testing.T) { + ci.Parallel(t) + specs := []string{"foo", "* *", "@foo"} for _, spec := range specs { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} @@ -3492,6 +3585,8 @@ func TestPeriodicConfig_InvalidCron(t *testing.T) { } func TestPeriodicConfig_ValidCron(t *testing.T) { + ci.Parallel(t) + specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} for _, spec := range specs { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} @@ -3503,6 +3598,8 @@ func TestPeriodicConfig_ValidCron(t *testing.T) { } func TestPeriodicConfig_NextCron(t *testing.T) { + ci.Parallel(t) + from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) cases := []struct { @@ -3543,6 +3640,8 @@ func TestPeriodicConfig_NextCron(t *testing.T) { } func TestPeriodicConfig_ValidTimeZone(t *testing.T) { + ci.Parallel(t) + zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} for _, zone := range zones { p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} @@ -3554,6 +3653,8 @@ func TestPeriodicConfig_ValidTimeZone(t *testing.T) { } func TestPeriodicConfig_DST(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC @@ -3583,6 +3684,8 @@ func TestPeriodicConfig_DST(t *testing.T) { } func TestTaskLifecycleConfig_Validate(t *testing.T) { + ci.Parallel(t) + testCases := []struct { name string tlc *TaskLifecycleConfig @@ -3628,6 +3731,8 @@ func TestTaskLifecycleConfig_Validate(t *testing.T) { } func TestRestartPolicy_Validate(t *testing.T) { + ci.Parallel(t) + // Policy with acceptable restart options passes p := &RestartPolicy{ Mode: RestartPolicyModeFail, @@ -3682,6 +3787,8 @@ func TestRestartPolicy_Validate(t *testing.T) { } func TestReschedulePolicy_Validate(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string ReschedulePolicy *ReschedulePolicy @@ -3873,6 +3980,8 @@ func TestReschedulePolicy_Validate(t *testing.T) { } func TestAllocation_Index(t *testing.T) { + ci.Parallel(t) + a1 := Allocation{ Name: "example.cache[1]", TaskGroup: "cache", @@ -3892,6 +4001,8 @@ func TestAllocation_Index(t *testing.T) { } func TestTaskArtifact_Validate_Source(t *testing.T) { + ci.Parallel(t) + valid := &TaskArtifact{GetterSource: "google.com"} if err := valid.Validate(); err != nil { t.Fatalf("unexpected error: %v", err) @@ -3899,6 +4010,8 @@ func TestTaskArtifact_Validate_Source(t *testing.T) { } func TestTaskArtifact_Validate_Dest(t *testing.T) { + ci.Parallel(t) + valid := &TaskArtifact{GetterSource: "google.com"} if err := valid.Validate(); err != nil { t.Fatalf("unexpected error: %v", err) @@ -3923,7 +4036,7 @@ func TestTaskArtifact_Validate_Dest(t *testing.T) { // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the // fields change. func TestTaskArtifact_Hash(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []TaskArtifact{ {}, @@ -4008,6 +4121,8 @@ func TestTaskArtifact_Hash(t *testing.T) { } func TestAllocation_ShouldMigrate(t *testing.T) { + ci.Parallel(t) + alloc := Allocation{ PreviousAllocation: "123", TaskGroup: "foo", @@ -4103,6 +4218,8 @@ func TestAllocation_ShouldMigrate(t *testing.T) { } func TestTaskArtifact_Validate_Checksum(t *testing.T) { + ci.Parallel(t) + cases := []struct { Input *TaskArtifact Err bool @@ -4155,7 +4272,7 @@ func TestTaskArtifact_Validate_Checksum(t *testing.T) { } func TestPlan_NormalizeAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) plan := &Plan{ NodeUpdate: make(map[string][]*Allocation), NodePreemptions: make(map[string][]*Allocation), @@ -4186,7 +4303,7 @@ func TestPlan_NormalizeAllocations(t *testing.T) { } func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) plan := &Plan{ NodeUpdate: make(map[string][]*Allocation), } @@ -4215,7 +4332,7 @@ func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { } func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { - t.Parallel() + ci.Parallel(t) plan := &Plan{ NodePreemptions: make(map[string][]*Allocation), } @@ -4240,7 +4357,7 @@ func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { } func TestAllocation_MsgPackTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) planType := reflect.TypeOf(Allocation{}) msgPackTags, _ := planType.FieldByName("_struct") @@ -4249,7 +4366,7 @@ func TestAllocation_MsgPackTags(t *testing.T) { } func TestEvaluation_MsgPackTags(t *testing.T) { - t.Parallel() + ci.Parallel(t) planType := reflect.TypeOf(Evaluation{}) msgPackTags, _ := planType.FieldByName("_struct") @@ -4258,6 +4375,8 @@ func TestEvaluation_MsgPackTags(t *testing.T) { } func TestAllocation_Terminated(t *testing.T) { + ci.Parallel(t) + type desiredState struct { ClientStatus string DesiredStatus string @@ -4297,6 +4416,8 @@ func TestAllocation_Terminated(t *testing.T) { } func TestAllocation_ShouldReschedule(t *testing.T) { + ci.Parallel(t) + type testCase struct { Desc string FailTime time.Time @@ -4433,6 +4554,8 @@ func TestAllocation_ShouldReschedule(t *testing.T) { } func TestAllocation_LastEventTime(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string taskState map[string]*TaskState @@ -4495,6 +4618,8 @@ func TestAllocation_LastEventTime(t *testing.T) { } func TestAllocation_NextDelay(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string reschedulePolicy *ReschedulePolicy @@ -4978,6 +5103,8 @@ func TestAllocation_NextDelay(t *testing.T) { } func TestAllocation_WaitClientStop(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string stop time.Duration @@ -5037,6 +5164,8 @@ func TestAllocation_WaitClientStop(t *testing.T) { } func TestAllocation_Canonicalize_Old(t *testing.T) { + ci.Parallel(t) + alloc := MockAlloc() alloc.AllocatedResources = nil alloc.TaskResources = map[string]*Resources{ @@ -5090,6 +5219,8 @@ func TestAllocation_Canonicalize_Old(t *testing.T) { // TestAllocation_Canonicalize_New asserts that an alloc with latest // schema isn't modified with Canonicalize func TestAllocation_Canonicalize_New(t *testing.T) { + ci.Parallel(t) + alloc := MockAlloc() copy := alloc.Copy() @@ -5098,6 +5229,8 @@ func TestAllocation_Canonicalize_New(t *testing.T) { } func TestRescheduleTracker_Copy(t *testing.T) { + ci.Parallel(t) + type testCase struct { original *RescheduleTracker expected *RescheduleTracker @@ -5125,6 +5258,8 @@ func TestRescheduleTracker_Copy(t *testing.T) { } func TestVault_Validate(t *testing.T) { + ci.Parallel(t) + v := &Vault{ Env: true, ChangeMode: VaultChangeModeNoop, @@ -5151,6 +5286,8 @@ func TestVault_Validate(t *testing.T) { } func TestParameterizedJobConfig_Validate(t *testing.T) { + ci.Parallel(t) + d := &ParameterizedJobConfig{ Payload: "foo", } @@ -5169,6 +5306,8 @@ func TestParameterizedJobConfig_Validate(t *testing.T) { } func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { + ci.Parallel(t) + job := testJob() job.ParameterizedJob = &ParameterizedJobConfig{ Payload: DispatchPayloadOptional, @@ -5181,6 +5320,8 @@ func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { } func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) { + ci.Parallel(t) + // Setup a system Job with stop_after_client_disconnect set, which is invalid job := testJob() job.Type = JobTypeSystem @@ -5208,6 +5349,8 @@ func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) { } func TestParameterizedJobConfig_Canonicalize(t *testing.T) { + ci.Parallel(t) + d := &ParameterizedJobConfig{} d.Canonicalize() if d.Payload != DispatchPayloadOptional { @@ -5216,6 +5359,8 @@ func TestParameterizedJobConfig_Canonicalize(t *testing.T) { } func TestDispatchPayloadConfig_Validate(t *testing.T) { + ci.Parallel(t) + d := &DispatchPayloadConfig{ File: "foo", } @@ -5239,6 +5384,8 @@ func TestDispatchPayloadConfig_Validate(t *testing.T) { } func TestScalingPolicy_Canonicalize(t *testing.T) { + ci.Parallel(t) + cases := []struct { name string input *ScalingPolicy @@ -5267,6 +5414,8 @@ func TestScalingPolicy_Canonicalize(t *testing.T) { } func TestScalingPolicy_Validate(t *testing.T) { + ci.Parallel(t) + type testCase struct { name string input *ScalingPolicy @@ -5404,6 +5553,8 @@ func TestScalingPolicy_Validate(t *testing.T) { } func TestIsRecoverable(t *testing.T) { + ci.Parallel(t) + if IsRecoverable(nil) { t.Errorf("nil should not be recoverable") } @@ -5422,6 +5573,8 @@ func TestIsRecoverable(t *testing.T) { } func TestACLTokenValidate(t *testing.T) { + ci.Parallel(t) + tk := &ACLToken{} // Missing a type @@ -5467,6 +5620,8 @@ func TestACLTokenValidate(t *testing.T) { } func TestACLTokenPolicySubset(t *testing.T) { + ci.Parallel(t) + tk := &ACLToken{ Type: ACLClientToken, Policies: []string{"foo", "bar", "baz"}, @@ -5492,6 +5647,8 @@ func TestACLTokenPolicySubset(t *testing.T) { } func TestACLTokenSetHash(t *testing.T) { + ci.Parallel(t) + tk := &ACLToken{ Name: "foo", Type: ACLClientToken, @@ -5512,6 +5669,8 @@ func TestACLTokenSetHash(t *testing.T) { } func TestACLPolicySetHash(t *testing.T) { + ci.Parallel(t) + ap := &ACLPolicy{ Name: "foo", Description: "great policy", @@ -5531,6 +5690,8 @@ func TestACLPolicySetHash(t *testing.T) { } func TestTaskEventPopulate(t *testing.T) { + ci.Parallel(t) + prepopulatedEvent := NewTaskEvent(TaskSetup) prepopulatedEvent.DisplayMessage = "Hola" testcases := []struct { @@ -5583,6 +5744,8 @@ func TestTaskEventPopulate(t *testing.T) { } func TestNetworkResourcesEquals(t *testing.T) { + ci.Parallel(t) + require := require.New(t) var networkResourcesTest = []struct { input []*NetworkResource @@ -5742,7 +5905,7 @@ func TestNetworkResourcesEquals(t *testing.T) { } func TestNode_Canonicalize(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // Make sure the eligiblity is set properly @@ -5762,7 +5925,7 @@ func TestNode_Canonicalize(t *testing.T) { } func TestNode_Copy(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) node := &Node{ @@ -5873,6 +6036,8 @@ func TestNode_Copy(t *testing.T) { } func TestNode_Sanitize(t *testing.T) { + ci.Parallel(t) + require := require.New(t) testCases := []*Node{ @@ -5898,6 +6063,8 @@ func TestNode_Sanitize(t *testing.T) { } func TestSpread_Validate(t *testing.T) { + ci.Parallel(t) + type tc struct { spread *Spread err error @@ -6013,6 +6180,8 @@ func TestSpread_Validate(t *testing.T) { } func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { + ci.Parallel(t) + require := require.New(t) cases := []struct { Input string @@ -6059,6 +6228,8 @@ func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { } func TestMultiregion_CopyCanonicalize(t *testing.T) { + ci.Parallel(t) + require := require.New(t) emptyOld := &Multiregion{} @@ -6100,6 +6271,8 @@ func TestMultiregion_CopyCanonicalize(t *testing.T) { } func TestNodeResources_Copy(t *testing.T) { + ci.Parallel(t) + orig := &NodeResources{ Cpu: NodeCpuResources{ CpuShares: int64(32000), @@ -6146,6 +6319,8 @@ func TestNodeResources_Copy(t *testing.T) { } func TestNodeResources_Merge(t *testing.T) { + ci.Parallel(t) + res := &NodeResources{ Cpu: NodeCpuResources{ CpuShares: int64(32000), @@ -6194,6 +6369,8 @@ func TestNodeResources_Merge(t *testing.T) { } func TestAllocatedResources_Canonicalize(t *testing.T) { + ci.Parallel(t) + cases := map[string]struct { input *AllocatedResources expected *AllocatedResources @@ -6294,6 +6471,8 @@ func TestAllocatedResources_Canonicalize(t *testing.T) { } func TestAllocatedSharedResources_Canonicalize(t *testing.T) { + ci.Parallel(t) + a := &AllocatedSharedResources{ Networks: []*NetworkResource{ { @@ -6334,6 +6513,8 @@ func TestAllocatedSharedResources_Canonicalize(t *testing.T) { } func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) { + ci.Parallel(t) + t.Run("service task not set", func(t *testing.T) { tg := &TaskGroup{ Name: "group1", @@ -6400,6 +6581,8 @@ func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) { } func TestComparableResources_Superset(t *testing.T) { + ci.Parallel(t) + base := &ComparableResources{ Flattened: AllocatedTaskResources{ Cpu: AllocatedCpuResources{ diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index ee5eabd46..d0651a121 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -8,6 +8,7 @@ import ( memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -15,7 +16,7 @@ import ( ) func TestSystemEndpoint_GarbageCollect(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -66,7 +67,7 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) { } func TestSystemEndpoint_GarbageCollect_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() @@ -111,7 +112,7 @@ func TestSystemEndpoint_GarbageCollect_ACL(t *testing.T) { } func TestSystemEndpoint_ReconcileSummaries(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, nil) defer cleanupS1() @@ -174,7 +175,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) { } func TestSystemEndpoint_ReconcileJobSummaries_ACL(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, root, cleanupS1 := TestACLServer(t, nil) defer cleanupS1() diff --git a/nomad/timetable_test.go b/nomad/timetable_test.go index b5377fbf3..eeacd08f0 100644 --- a/nomad/timetable_test.go +++ b/nomad/timetable_test.go @@ -9,11 +9,12 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) func TestTimeTable(t *testing.T) { - t.Parallel() + ci.Parallel(t) tt := NewTimeTable(time.Second, time.Minute) index := tt.NearestIndex(time.Now()) @@ -90,7 +91,7 @@ func TestTimeTable(t *testing.T) { } func TestTimeTable_SerializeDeserialize(t *testing.T) { - t.Parallel() + ci.Parallel(t) tt := NewTimeTable(time.Second, time.Minute) // Witness some data @@ -132,7 +133,7 @@ func TestTimeTable_SerializeDeserialize(t *testing.T) { } func TestTimeTable_Overflow(t *testing.T) { - t.Parallel() + ci.Parallel(t) tt := NewTimeTable(time.Second, 3*time.Second) // Witness some data diff --git a/nomad/util_test.go b/nomad/util_test.go index 7f36e6850..af7583fd0 100644 --- a/nomad/util_test.go +++ b/nomad/util_test.go @@ -6,13 +6,14 @@ import ( "testing" version "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/serf/serf" "github.com/stretchr/testify/require" ) func TestIsNomadServer(t *testing.T) { - t.Parallel() + ci.Parallel(t) m := serf.Member{ Name: "foo", Addr: net.IP([]byte{127, 0, 0, 1}), @@ -84,7 +85,7 @@ func TestIsNomadServer(t *testing.T) { } func TestServersMeetMinimumVersionExcludingFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { members []serf.Member @@ -153,7 +154,7 @@ func TestServersMeetMinimumVersionExcludingFailed(t *testing.T) { } func TestServersMeetMinimumVersionIncludingFailed(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { members []serf.Member @@ -206,7 +207,7 @@ func makeMember(version string, status serf.MemberStatus) serf.Member { } func TestShuffleStrings(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Generate input inp := make([]string, 10) for idx := range inp { @@ -242,7 +243,7 @@ func Test_partitionAll(t *testing.T) { } func TestMaxUint64(t *testing.T) { - t.Parallel() + ci.Parallel(t) if maxUint64(1, 2) != 2 { t.Fatalf("bad") } diff --git a/nomad/vault_test.go b/nomad/vault_test.go index 243a5a6b6..86532858e 100644 --- a/nomad/vault_test.go +++ b/nomad/vault_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -153,7 +154,7 @@ func testVaultRoleAndToken(v *testutil.TestVault, t *testing.T, vaultPolicies ma } func TestVaultClient_BadConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) conf := &config.VaultConfig{} logger := testlog.HCLogger(t) @@ -180,7 +181,7 @@ func TestVaultClient_BadConfig(t *testing.T) { // TestVaultClient_WithNamespaceSupport tests that the Vault namespace config, if present, will result in the // namespace header being set on the created Vault client. func TestVaultClient_WithNamespaceSupport(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tr := true testNs := "test-namespace" @@ -206,7 +207,7 @@ func TestVaultClient_WithNamespaceSupport(t *testing.T) { // TestVaultClient_WithoutNamespaceSupport tests that the Vault namespace config, if present, will result in the // namespace header being set on the created Vault client. func TestVaultClient_WithoutNamespaceSupport(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) tr := true conf := &config.VaultConfig{ @@ -232,7 +233,7 @@ func TestVaultClient_WithoutNamespaceSupport(t *testing.T) { // Test that the Vault Client can establish a connection even if it is started // before Vault is available. func TestVaultClient_EstablishConnection(t *testing.T) { - t.Parallel() + ci.Parallel(t) for i := 10; i >= 0; i-- { v := testutil.NewTestVaultDelayed(t) logger := testlog.HCLogger(t) @@ -286,7 +287,7 @@ func TestVaultClient_EstablishConnection(t *testing.T) { } func TestVaultClient_ValidateRole(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -335,7 +336,7 @@ func TestVaultClient_ValidateRole(t *testing.T) { // TestVaultClient_ValidateRole_Success asserts that a valid token role // gets marked as valid func TestVaultClient_ValidateRole_Success(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -381,7 +382,7 @@ func TestVaultClient_ValidateRole_Success(t *testing.T) { // TestVaultClient_ValidateRole_Deprecated_Success asserts that a valid token // role gets marked as valid, even if it uses deprecated field, period func TestVaultClient_ValidateRole_Deprecated_Success(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -425,7 +426,7 @@ func TestVaultClient_ValidateRole_Deprecated_Success(t *testing.T) { } func TestVaultClient_ValidateRole_NonExistent(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -465,7 +466,7 @@ func TestVaultClient_ValidateRole_NonExistent(t *testing.T) { } func TestVaultClient_ValidateToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -519,7 +520,7 @@ func TestVaultClient_ValidateToken(t *testing.T) { } func TestVaultClient_SetActive(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -549,7 +550,7 @@ func TestVaultClient_SetActive(t *testing.T) { // Test that we can update the config and things keep working func TestVaultClient_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -612,7 +613,7 @@ func TestVaultClient_SetConfig(t *testing.T) { // TestVaultClient_SetConfig_Deadlock asserts that calling SetConfig // concurrently with establishConnection does not deadlock. func TestVaultClient_SetConfig_Deadlock(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -643,7 +644,7 @@ func TestVaultClient_SetConfig_Deadlock(t *testing.T) { // Test that we can disable vault func TestVaultClient_SetConfig_Disable(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -677,7 +678,7 @@ func TestVaultClient_SetConfig_Disable(t *testing.T) { } func TestVaultClient_RenewalLoop(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -713,7 +714,7 @@ func TestVaultClient_RenewalLoop(t *testing.T) { } func TestVaultClientRenewUpdatesExpiration(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -752,7 +753,7 @@ func TestVaultClientRenewUpdatesExpiration(t *testing.T) { } func TestVaultClient_StopsAfterPermissionError(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -786,7 +787,7 @@ func TestVaultClient_StopsAfterPermissionError(t *testing.T) { }) } func TestVaultClient_LoopsUntilCannotRenew(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -847,7 +848,7 @@ func parseTTLFromLookup(s *vapi.Secret, t *testing.T) int64 { } func TestVaultClient_LookupToken_Invalid(t *testing.T) { - t.Parallel() + ci.Parallel(t) tr := true conf := &config.VaultConfig{ Enabled: &tr, @@ -871,7 +872,7 @@ func TestVaultClient_LookupToken_Invalid(t *testing.T) { } func TestVaultClient_LookupToken_Root(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -933,7 +934,7 @@ func TestVaultClient_LookupToken_Root(t *testing.T) { } func TestVaultClient_LookupToken_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -998,7 +999,7 @@ func TestVaultClient_LookupToken_Role(t *testing.T) { } func TestVaultClient_LookupToken_RateLimit(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1059,7 +1060,7 @@ func TestVaultClient_LookupToken_RateLimit(t *testing.T) { } func TestVaultClient_CreateToken_Root(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1103,7 +1104,7 @@ func TestVaultClient_CreateToken_Root(t *testing.T) { } func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1151,7 +1152,7 @@ func TestVaultClient_CreateToken_Whitelist_Role(t *testing.T) { } func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1202,7 +1203,7 @@ func TestVaultClient_CreateToken_Root_Target_Role(t *testing.T) { } func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) // Need to skip if test is 0.6.4 version, err := testutil.VaultVersion() if err != nil { @@ -1261,7 +1262,7 @@ func TestVaultClient_CreateToken_Blacklist_Role(t *testing.T) { } func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1300,7 +1301,7 @@ func TestVaultClient_CreateToken_Role_InvalidToken(t *testing.T) { } func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1335,7 +1336,7 @@ func TestVaultClient_CreateToken_Role_Unrecoverable(t *testing.T) { } func TestVaultClient_CreateToken_Prestart(t *testing.T) { - t.Parallel() + ci.Parallel(t) vconfig := &config.VaultConfig{ Enabled: helper.BoolToPtr(true), Token: uuid.Generate(), @@ -1395,7 +1396,7 @@ func TestVaultClient_MarkForRevocation(t *testing.T) { } func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { - t.Parallel() + ci.Parallel(t) vconfig := &config.VaultConfig{ Enabled: helper.BoolToPtr(true), Token: uuid.Generate(), @@ -1441,7 +1442,7 @@ func TestVaultClient_RevokeTokens_PreEstablishs(t *testing.T) { // TestVaultClient_RevokeTokens_Failures_TTL asserts that // the registered TTL doesn't get extended on retries func TestVaultClient_RevokeTokens_Failures_TTL(t *testing.T) { - t.Parallel() + ci.Parallel(t) vconfig := &config.VaultConfig{ Enabled: helper.BoolToPtr(true), Token: uuid.Generate(), @@ -1485,7 +1486,7 @@ func TestVaultClient_RevokeTokens_Failures_TTL(t *testing.T) { } func TestVaultClient_RevokeTokens_Root(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1550,7 +1551,7 @@ func TestVaultClient_RevokeTokens_Root(t *testing.T) { } func TestVaultClient_RevokeTokens_Role(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1619,7 +1620,7 @@ func TestVaultClient_RevokeTokens_Role(t *testing.T) { // TestVaultClient_RevokeTokens_Idempotent asserts that token revocation // is idempotent, and can cope with cases if token was deleted out of band. func TestVaultClient_RevokeTokens_Idempotent(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1699,7 +1700,7 @@ func TestVaultClient_RevokeTokens_Idempotent(t *testing.T) { // TestVaultClient_RevokeDaemon_Bounded asserts that token revocation // batches are bounded in size. func TestVaultClient_RevokeDaemon_Bounded(t *testing.T) { - t.Parallel() + ci.Parallel(t) v := testutil.NewTestVault(t) defer v.Stop() @@ -1780,6 +1781,8 @@ func waitForConnection(v *vaultClient, t *testing.T) { } func TestVaultClient_nextBackoff(t *testing.T) { + ci.Parallel(t) + simpleCases := []struct { name string initBackoff float64 diff --git a/nomad/volumewatcher/volume_watcher_test.go b/nomad/volumewatcher/volume_watcher_test.go index 4bb4ddae4..5c6b39c8d 100644 --- a/nomad/volumewatcher/volume_watcher_test.go +++ b/nomad/volumewatcher/volume_watcher_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -12,7 +13,7 @@ import ( ) func TestVolumeWatch_Reap(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockRPCServer{ @@ -81,6 +82,7 @@ func TestVolumeWatch_Reap(t *testing.T) { } func TestVolumeReapBadState(t *testing.T) { + ci.Parallel(t) store := state.TestStateStore(t) err := state.TestBadCSIState(t, store) diff --git a/nomad/volumewatcher/volumes_watcher_test.go b/nomad/volumewatcher/volumes_watcher_test.go index 47f1c970a..c5c900674 100644 --- a/nomad/volumewatcher/volumes_watcher_test.go +++ b/nomad/volumewatcher/volumes_watcher_test.go @@ -5,6 +5,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/state" @@ -15,7 +16,7 @@ import ( // TestVolumeWatch_EnableDisable tests the watcher registration logic that needs // to happen during leader step-up/step-down func TestVolumeWatch_EnableDisable(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockRPCServer{} @@ -55,7 +56,7 @@ func TestVolumeWatch_EnableDisable(t *testing.T) { // TestVolumeWatch_LeadershipTransition tests the correct behavior of // claim reaping across leader step-up/step-down func TestVolumeWatch_LeadershipTransition(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockRPCServer{} @@ -139,7 +140,7 @@ func TestVolumeWatch_LeadershipTransition(t *testing.T) { // TestVolumeWatch_StartStop tests the start and stop of the watcher when // it receives notifcations and has completed its work func TestVolumeWatch_StartStop(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockStatefulRPCServer{} @@ -234,7 +235,7 @@ func TestVolumeWatch_StartStop(t *testing.T) { // TestVolumeWatch_RegisterDeregister tests the start and stop of // watchers around registration func TestVolumeWatch_RegisterDeregister(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) srv := &MockStatefulRPCServer{} diff --git a/nomad/worker_test.go b/nomad/worker_test.go index 0e872dedd..d8d5f4481 100644 --- a/nomad/worker_test.go +++ b/nomad/worker_test.go @@ -10,6 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" "github.com/hashicorp/nomad/helper/testlog" @@ -64,7 +65,7 @@ func NewTestWorker(shutdownCtx context.Context, srv *Server) *Worker { } func TestWorker_dequeueEvaluation(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -102,7 +103,7 @@ func TestWorker_dequeueEvaluation(t *testing.T) { // Test that the worker picks up the correct wait index when there are multiple // evals for the same job. func TestWorker_dequeueEvaluation_SerialJobs(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -172,7 +173,7 @@ func TestWorker_dequeueEvaluation_SerialJobs(t *testing.T) { } func TestWorker_dequeueEvaluation_paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -221,7 +222,7 @@ func TestWorker_dequeueEvaluation_paused(t *testing.T) { } func TestWorker_dequeueEvaluation_shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -252,7 +253,7 @@ func TestWorker_dequeueEvaluation_shutdown(t *testing.T) { } func TestWorker_Shutdown(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -276,7 +277,7 @@ func TestWorker_Shutdown(t *testing.T) { } func TestWorker_Shutdown_paused(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -303,7 +304,7 @@ func TestWorker_Shutdown_paused(t *testing.T) { } func TestWorker_sendAck(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -352,7 +353,7 @@ func TestWorker_sendAck(t *testing.T) { } func TestWorker_waitForIndex(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -392,7 +393,7 @@ func TestWorker_waitForIndex(t *testing.T) { } func TestWorker_invokeScheduler(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -413,7 +414,7 @@ func TestWorker_invokeScheduler(t *testing.T) { } func TestWorker_SubmitPlan(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -482,7 +483,7 @@ func TestWorker_SubmitPlan(t *testing.T) { } func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -535,7 +536,7 @@ func TestWorker_SubmitPlanNormalizedAllocations(t *testing.T) { } func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -609,7 +610,7 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) { } func TestWorker_UpdateEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -660,7 +661,7 @@ func TestWorker_UpdateEval(t *testing.T) { } func TestWorker_CreateEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -712,7 +713,7 @@ func TestWorker_CreateEval(t *testing.T) { } func TestWorker_ReblockEval(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -805,7 +806,7 @@ func TestWorker_ReblockEval(t *testing.T) { } func TestWorker_Info(t *testing.T) { - t.Parallel() + ci.Parallel(t) s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 @@ -830,7 +831,7 @@ const ( ) func TestWorker_SetPause(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) srv := &Server{ logger: logger, @@ -869,7 +870,7 @@ func TestWorker_SetPause(t *testing.T) { } func TestWorker_SetPause_OutOfOrderEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) logger := testlog.HCLogger(t) srv := &Server{ logger: logger, diff --git a/plugins/base/plugin_test.go b/plugins/base/plugin_test.go index 19172f009..1f7954684 100644 --- a/plugins/base/plugin_test.go +++ b/plugins/base/plugin_test.go @@ -5,6 +5,7 @@ import ( pb "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/shared/hclspec" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ import ( ) func TestBasePlugin_PluginInfo_GRPC(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var ( @@ -79,7 +80,7 @@ func TestBasePlugin_PluginInfo_GRPC(t *testing.T) { } func TestBasePlugin_ConfigSchema(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockPlugin{ @@ -110,7 +111,7 @@ func TestBasePlugin_ConfigSchema(t *testing.T) { } func TestBasePlugin_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var receivedData []byte diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 6554662c7..35abf626a 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -9,6 +9,7 @@ import ( csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/protobuf/ptypes/wrappers" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" fake "github.com/hashicorp/nomad/plugins/csi/testing" "github.com/stretchr/testify/require" @@ -41,6 +42,8 @@ func newTestClient(t *testing.T) (*fake.IdentityClient, *fake.ControllerClient, } func TestClient_RPC_PluginProbe(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -99,6 +102,8 @@ func TestClient_RPC_PluginProbe(t *testing.T) { } func TestClient_RPC_PluginInfo(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -152,6 +157,8 @@ func TestClient_RPC_PluginInfo(t *testing.T) { } func TestClient_RPC_PluginGetCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -215,6 +222,8 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { } func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -313,6 +322,8 @@ func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { } func TestClient_RPC_NodeGetCapabilities(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -371,6 +382,8 @@ func TestClient_RPC_NodeGetCapabilities(t *testing.T) { } func TestClient_RPC_ControllerPublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Request *ControllerPublishVolumeRequest @@ -436,6 +449,8 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { } func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Request *ControllerUnpublishVolumeRequest @@ -482,6 +497,7 @@ func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { } func TestClient_RPC_ControllerValidateVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -706,6 +722,7 @@ func TestClient_RPC_ControllerValidateVolume(t *testing.T) { } func TestClient_RPC_ControllerCreateVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -833,6 +850,7 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { } func TestClient_RPC_ControllerDeleteVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -875,6 +893,7 @@ func TestClient_RPC_ControllerDeleteVolume(t *testing.T) { } func TestClient_RPC_ControllerListVolume(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -969,6 +988,7 @@ func TestClient_RPC_ControllerListVolume(t *testing.T) { } func TestClient_RPC_ControllerCreateSnapshot(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -1030,6 +1050,7 @@ func TestClient_RPC_ControllerCreateSnapshot(t *testing.T) { } func TestClient_RPC_ControllerDeleteSnapshot(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -1072,6 +1093,7 @@ func TestClient_RPC_ControllerDeleteSnapshot(t *testing.T) { } func TestClient_RPC_ControllerListSnapshots(t *testing.T) { + ci.Parallel(t) cases := []struct { Name string @@ -1136,6 +1158,8 @@ func TestClient_RPC_ControllerListSnapshots(t *testing.T) { } func TestClient_RPC_NodeStageVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -1177,6 +1201,8 @@ func TestClient_RPC_NodeStageVolume(t *testing.T) { } func TestClient_RPC_NodeUnstageVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ResponseErr error @@ -1214,6 +1240,8 @@ func TestClient_RPC_NodeUnstageVolume(t *testing.T) { } func TestClient_RPC_NodePublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string Request *NodePublishVolumeRequest @@ -1269,6 +1297,8 @@ func TestClient_RPC_NodePublishVolume(t *testing.T) { } } func TestClient_RPC_NodeUnpublishVolume(t *testing.T) { + ci.Parallel(t) + cases := []struct { Name string ExternalID string diff --git a/plugins/device/plugin_test.go b/plugins/device/plugin_test.go index a07fa329b..52629489f 100644 --- a/plugins/device/plugin_test.go +++ b/plugins/device/plugin_test.go @@ -8,6 +8,7 @@ import ( pb "github.com/golang/protobuf/proto" plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" @@ -21,7 +22,7 @@ import ( ) func TestDevicePlugin_PluginInfo(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var ( @@ -90,7 +91,7 @@ func TestDevicePlugin_PluginInfo(t *testing.T) { } func TestDevicePlugin_ConfigSchema(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockDevicePlugin{ @@ -124,7 +125,7 @@ func TestDevicePlugin_ConfigSchema(t *testing.T) { } func TestDevicePlugin_SetConfig(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) var receivedData []byte @@ -184,7 +185,7 @@ func TestDevicePlugin_SetConfig(t *testing.T) { } func TestDevicePlugin_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) devices1 := []*DeviceGroup{ @@ -288,7 +289,7 @@ func TestDevicePlugin_Fingerprint(t *testing.T) { } func TestDevicePlugin_Fingerprint_StreamErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ferr := fmt.Errorf("mock fingerprinting failed") @@ -348,7 +349,7 @@ func TestDevicePlugin_Fingerprint_StreamErr(t *testing.T) { } func TestDevicePlugin_Fingerprint_CancelCtx(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockDevicePlugin{ @@ -408,7 +409,7 @@ func TestDevicePlugin_Fingerprint_CancelCtx(t *testing.T) { } func TestDevicePlugin_Reserve(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) reservation := &ContainerReservation{ @@ -464,7 +465,7 @@ func TestDevicePlugin_Reserve(t *testing.T) { } func TestDevicePlugin_Stats(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) devices1 := []*DeviceGroupStats{ @@ -605,7 +606,7 @@ func TestDevicePlugin_Stats(t *testing.T) { } func TestDevicePlugin_Stats_StreamErr(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) ferr := fmt.Errorf("mock stats failed") @@ -665,7 +666,7 @@ func TestDevicePlugin_Stats_StreamErr(t *testing.T) { } func TestDevicePlugin_Stats_CancelCtx(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) mock := &MockDevicePlugin{ diff --git a/plugins/drivers/testutils/testing_test.go b/plugins/drivers/testutils/testing_test.go index 0c0c9efe2..cd368a3bd 100644 --- a/plugins/drivers/testutils/testing_test.go +++ b/plugins/drivers/testutils/testing_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -19,6 +20,8 @@ var _ drivers.DriverPlugin = (*MockDriver)(nil) // Very simple test to ensure the test harness works as expected func TestDriverHarness(t *testing.T) { + ci.Parallel(t) + handle := &drivers.TaskHandle{Config: &drivers.TaskConfig{Name: "mock"}} d := &MockDriver{ StartTaskF: func(task *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { @@ -38,7 +41,7 @@ type testDriverState struct { } func TestBaseDriver_Fingerprint(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) fingerprints := []*drivers.Fingerprint{ @@ -100,7 +103,7 @@ func TestBaseDriver_Fingerprint(t *testing.T) { } func TestBaseDriver_RecoverTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) // build driver state and encode it into proto msg @@ -130,7 +133,7 @@ func TestBaseDriver_RecoverTask(t *testing.T) { } func TestBaseDriver_StartTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) cfg := &drivers.TaskConfig{ @@ -162,7 +165,7 @@ func TestBaseDriver_StartTask(t *testing.T) { } func TestBaseDriver_WaitTask(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) result := &drivers.ExitResult{ExitCode: 1, Signal: 9} @@ -200,7 +203,7 @@ func TestBaseDriver_WaitTask(t *testing.T) { } func TestBaseDriver_TaskEvents(t *testing.T) { - t.Parallel() + ci.Parallel(t) require := require.New(t) now := time.Now().UTC().Truncate(time.Millisecond) @@ -263,6 +266,8 @@ func TestBaseDriver_TaskEvents(t *testing.T) { } func TestBaseDriver_Capabilities(t *testing.T) { + ci.Parallel(t) + capabilities := &drivers.Capabilities{ NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, diff --git a/scheduler/annotate_test.go b/scheduler/annotate_test.go index 57c95ce6e..9f651fc70 100644 --- a/scheduler/annotate_test.go +++ b/scheduler/annotate_test.go @@ -4,10 +4,13 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) func TestAnnotateTaskGroup_Updates(t *testing.T) { + ci.Parallel(t) + annotations := &structs.PlanAnnotations{ DesiredTGUpdates: map[string]*structs.DesiredUpdates{ "foo": { @@ -50,6 +53,8 @@ func TestAnnotateTaskGroup_Updates(t *testing.T) { } func TestAnnotateCountChange_NonEdited(t *testing.T) { + ci.Parallel(t) + tg := &structs.TaskGroupDiff{} tgOrig := &structs.TaskGroupDiff{} annotateCountChange(tg) @@ -59,6 +64,8 @@ func TestAnnotateCountChange_NonEdited(t *testing.T) { } func TestAnnotateCountChange(t *testing.T) { + ci.Parallel(t) + up := &structs.FieldDiff{ Type: structs.DiffTypeEdited, Name: "Count", @@ -100,6 +107,8 @@ func TestAnnotateCountChange(t *testing.T) { } func TestAnnotateTask_NonEdited(t *testing.T) { + ci.Parallel(t) + tgd := &structs.TaskGroupDiff{Type: structs.DiffTypeNone} td := &structs.TaskDiff{Type: structs.DiffTypeNone} tdOrig := &structs.TaskDiff{Type: structs.DiffTypeNone} @@ -110,6 +119,8 @@ func TestAnnotateTask_NonEdited(t *testing.T) { } func TestAnnotateTask(t *testing.T) { + ci.Parallel(t) + cases := []struct { Diff *structs.TaskDiff Parent *structs.TaskGroupDiff diff --git a/scheduler/context_test.go b/scheduler/context_test.go index 8187e9cbf..37afc149a 100644 --- a/scheduler/context_test.go +++ b/scheduler/context_test.go @@ -3,6 +3,7 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -27,6 +28,8 @@ func testContext(t testing.TB) (*state.StateStore, *EvalContext) { } func TestEvalContext_ProposedAlloc(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*RankedNode{ { @@ -156,7 +159,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) { // See https://github.com/hashicorp/nomad/issues/6787 // func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { - t.Parallel() + ci.Parallel(t) state, ctx := testContext(t) nodes := []*RankedNode{ { @@ -261,6 +264,8 @@ func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) { } func TestEvalEligibility_JobStatus(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() cc := "v1:100" @@ -282,6 +287,8 @@ func TestEvalEligibility_JobStatus(t *testing.T) { } func TestEvalEligibility_TaskGroupStatus(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() cc := "v1:100" tg := "foo" @@ -304,6 +311,8 @@ func TestEvalEligibility_TaskGroupStatus(t *testing.T) { } func TestEvalEligibility_SetJob(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() ne1 := &structs.Constraint{ LTarget: "${attr.kernel.name}", @@ -349,6 +358,8 @@ func TestEvalEligibility_SetJob(t *testing.T) { } func TestEvalEligibility_GetClasses(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() e.SetJobEligibility(true, "v1:1") e.SetJobEligibility(false, "v1:2") @@ -372,6 +383,8 @@ func TestEvalEligibility_GetClasses(t *testing.T) { require.Equal(t, expClasses, actClasses) } func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T) { + ci.Parallel(t) + e := NewEvalEligibility() e.SetJobEligibility(true, "v1:1") e.SetTaskGroupEligibility(false, "foo", "v1:1") @@ -395,6 +408,8 @@ func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T } func TestPortCollisionEvent_Copy(t *testing.T) { + ci.Parallel(t) + ev := &PortCollisionEvent{ Reason: "original", Node: mock.Node(), @@ -425,6 +440,8 @@ func TestPortCollisionEvent_Copy(t *testing.T) { } func TestPortCollisionEvent_Sanitize(t *testing.T) { + ci.Parallel(t) + ev := &PortCollisionEvent{ Reason: "original", Node: mock.Node(), diff --git a/scheduler/device_test.go b/scheduler/device_test.go index 332165c4b..7bf6319bc 100644 --- a/scheduler/device_test.go +++ b/scheduler/device_test.go @@ -3,6 +3,7 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -88,6 +89,8 @@ func collectInstanceIDs(devices ...*structs.NodeDeviceResource) []string { // Test that asking for a device that isn't fully specified works. func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) n := devNode() @@ -109,6 +112,8 @@ func TestDeviceAllocator_Allocate_GenericRequest(t *testing.T) { // Test that asking for a device that is fully specified works. func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) n := devNode() @@ -130,6 +135,8 @@ func TestDeviceAllocator_Allocate_FullyQualifiedRequest(t *testing.T) { // Test that asking for a device with too much count doesn't place func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) n := devNode() @@ -147,6 +154,8 @@ func TestDeviceAllocator_Allocate_NotEnoughInstances(t *testing.T) { // Test that asking for a device with constraints works func TestDeviceAllocator_Allocate_Constraints(t *testing.T) { + ci.Parallel(t) + n := multipleNvidiaNode() nvidia0 := n.NodeResources.Devices[0] nvidia1 := n.NodeResources.Devices[1] @@ -257,6 +266,8 @@ func TestDeviceAllocator_Allocate_Constraints(t *testing.T) { // Test that asking for a device with affinities works func TestDeviceAllocator_Allocate_Affinities(t *testing.T) { + ci.Parallel(t) + n := multipleNvidiaNode() nvidia0 := n.NodeResources.Devices[0] nvidia1 := n.NodeResources.Devices[1] diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 1689fd415..16a0ada00 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -15,6 +16,8 @@ import ( ) func TestStaticIterator_Reset(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 3; i++ { @@ -46,6 +49,8 @@ func TestStaticIterator_Reset(t *testing.T) { } func TestStaticIterator_SetNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 3; i++ { @@ -63,6 +68,8 @@ func TestStaticIterator_SetNodes(t *testing.T) { } func TestRandomIterator(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) var nodes []*structs.Node for i := 0; i < 10; i++ { @@ -83,6 +90,8 @@ func TestRandomIterator(t *testing.T) { } func TestHostVolumeChecker(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -165,6 +174,8 @@ func TestHostVolumeChecker(t *testing.T) { } func TestHostVolumeChecker_ReadOnly(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -233,7 +244,7 @@ func TestHostVolumeChecker_ReadOnly(t *testing.T) { } func TestCSIVolumeChecker(t *testing.T) { - t.Parallel() + ci.Parallel(t) state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -494,6 +505,8 @@ func TestCSIVolumeChecker(t *testing.T) { } func TestNetworkChecker(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) node := func(mode string) *structs.Node { @@ -638,6 +651,8 @@ func TestNetworkChecker(t *testing.T) { } func TestNetworkChecker_bridge_upgrade_path(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) t.Run("older client", func(t *testing.T) { @@ -668,6 +683,8 @@ func TestNetworkChecker_bridge_upgrade_path(t *testing.T) { } func TestDriverChecker_DriverInfo(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -717,6 +734,8 @@ func TestDriverChecker_DriverInfo(t *testing.T) { } } func TestDriverChecker_Compatibility(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -768,6 +787,8 @@ func TestDriverChecker_Compatibility(t *testing.T) { } func Test_HealthChecks(t *testing.T) { + ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) @@ -831,6 +852,8 @@ func Test_HealthChecks(t *testing.T) { } func TestConstraintChecker(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -891,6 +914,8 @@ func TestConstraintChecker(t *testing.T) { } func TestResolveConstraintTarget(t *testing.T) { + ci.Parallel(t) + type tcase struct { target string node *structs.Node @@ -966,6 +991,8 @@ func TestResolveConstraintTarget(t *testing.T) { } func TestCheckConstraint(t *testing.T) { + ci.Parallel(t) + type tcase struct { op string lVal, rVal interface{} @@ -1103,6 +1130,8 @@ func TestCheckConstraint(t *testing.T) { } func TestCheckLexicalOrder(t *testing.T) { + ci.Parallel(t) + type tcase struct { op string lVal, rVal interface{} @@ -1143,7 +1172,7 @@ func TestCheckLexicalOrder(t *testing.T) { } func TestCheckVersionConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) type tcase struct { lVal, rVal interface{} @@ -1196,7 +1225,7 @@ func TestCheckVersionConstraint(t *testing.T) { } func TestCheckSemverConstraint(t *testing.T) { - t.Parallel() + ci.Parallel(t) type tcase struct { name string @@ -1258,6 +1287,8 @@ func TestCheckSemverConstraint(t *testing.T) { } func TestCheckRegexpConstraint(t *testing.T) { + ci.Parallel(t) + type tcase struct { lVal, rVal interface{} result bool @@ -1295,6 +1326,8 @@ func TestCheckRegexpConstraint(t *testing.T) { // This test puts allocations on the node to test if it detects infeasibility of // nodes correctly and picks the only feasible one func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1369,6 +1402,8 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) { } func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1420,6 +1455,8 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) { } func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1488,6 +1525,8 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) { // value to detect if the constraint at the job level properly considers all // task groups. func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1668,6 +1707,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) { // detect if the constraint at the job level properly considers all task groups // when the constraint allows a count greater than one func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1875,6 +1916,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) { // there is a plan to re-use that for a new allocation, that the next select // won't select that node. func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -1957,6 +2000,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin // test if it detects infeasibility of property values correctly and picks the // only feasible one func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -2034,6 +2079,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) { // test if it detects infeasibility of property values correctly and picks the // only feasible one func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -2129,6 +2176,8 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin // test if it detects infeasibility of property values correctly and picks the // only feasible one when the constraint is at the task group. func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -2290,6 +2339,8 @@ func (c *mockFeasibilityChecker) Feasible(*structs.Node) bool { func (c *mockFeasibilityChecker) calls() int { return c.i } func TestFeasibilityWrapper_JobIneligible(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2308,6 +2359,8 @@ func TestFeasibilityWrapper_JobIneligible(t *testing.T) { } func TestFeasibilityWrapper_JobEscapes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2333,6 +2386,8 @@ func TestFeasibilityWrapper_JobEscapes(t *testing.T) { } func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2355,6 +2410,8 @@ func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) { } func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2377,6 +2434,8 @@ func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) { } func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) @@ -2404,6 +2463,8 @@ func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { } func TestSetContainsAny(t *testing.T) { + ci.Parallel(t) + require.True(t, checkSetContainsAny("a", "a")) require.True(t, checkSetContainsAny("a,b", "a")) require.True(t, checkSetContainsAny(" a,b ", "a ")) @@ -2412,6 +2473,8 @@ func TestSetContainsAny(t *testing.T) { } func TestDeviceChecker(t *testing.T) { + ci.Parallel(t) + getTg := func(devices ...*structs.RequestedDevice) *structs.TaskGroup { return &structs.TaskGroup{ Name: "example", @@ -2750,6 +2813,8 @@ func TestDeviceChecker(t *testing.T) { } func TestCheckAttributeConstraint(t *testing.T) { + ci.Parallel(t) + type tcase struct { op string lVal, rVal *psstructs.Attribute diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 713c6a94e..08d1ebc5a 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -8,6 +8,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -18,6 +19,8 @@ import ( ) func TestServiceSched_JobRegister(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -106,6 +109,7 @@ func TestServiceSched_JobRegister(t *testing.T) { } func TestServiceSched_JobRegister_MemoryMaxHonored(t *testing.T) { + ci.Parallel(t) cases := []struct { name string @@ -218,6 +222,8 @@ func TestServiceSched_JobRegister_MemoryMaxHonored(t *testing.T) { } func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -310,6 +316,8 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { } func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -384,6 +392,8 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { } func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -466,6 +476,8 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { } func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -563,6 +575,8 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { } func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -642,6 +656,8 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { } func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) assert := assert.New(t) @@ -724,6 +740,8 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) // Test job registration with spread configured func TestServiceSched_Spread(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) start := uint8(100) @@ -818,6 +836,8 @@ func TestServiceSched_Spread(t *testing.T) { // Test job registration with even spread across dc func TestServiceSched_EvenSpread(t *testing.T) { + ci.Parallel(t) + assert := assert.New(t) h := NewHarness(t) @@ -891,6 +911,8 @@ func TestServiceSched_EvenSpread(t *testing.T) { } func TestServiceSched_JobRegister_Annotate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -970,6 +992,8 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) { } func TestServiceSched_JobRegister_CountZero(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1019,6 +1043,8 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) { } func TestServiceSched_JobRegister_AllocFail(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create NO nodes @@ -1093,6 +1119,8 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) { } func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a full node @@ -1191,6 +1219,8 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { } func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create one node @@ -1285,6 +1315,8 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { // This test just ensures the scheduler handles the eval type to avoid // regressions. func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a job and set the task group count to zero. @@ -1320,6 +1352,8 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { } func TestServiceSched_Plan_Partial_Progress(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -1390,6 +1424,8 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) { } func TestServiceSched_EvaluateBlockedEval(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a job @@ -1435,6 +1471,8 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) { } func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1519,6 +1557,8 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { } func TestServiceSched_JobModify(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1621,6 +1661,8 @@ func TestServiceSched_JobModify(t *testing.T) { } func TestServiceSched_JobModify_Datacenters(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -1701,6 +1743,8 @@ func TestServiceSched_JobModify_Datacenters(t *testing.T) { // on the node but the node doesn't have enough resources to fit the new count + // 1. This tests that we properly discount the resources of existing allocs. func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create one node @@ -1793,6 +1837,8 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { } func TestServiceSched_JobModify_CountZero(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1893,6 +1939,8 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) { } func TestServiceSched_JobModify_Rolling(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1999,6 +2047,8 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) { // allocations as this allows us to assert that destructive changes are done // first. func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node and clear the reserved resources @@ -2119,6 +2169,8 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { } func TestServiceSched_JobModify_Canaries(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -2243,6 +2295,8 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) { } func TestServiceSched_JobModify_InPlace(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -2395,6 +2449,8 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) { // Safe to remove in 0.11.0 as no one should ever be trying to upgrade from 0.8 // to 0.11! func TestServiceSched_JobModify_InPlace08(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create node @@ -2472,6 +2528,8 @@ func TestServiceSched_JobModify_InPlace08(t *testing.T) { } func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -2584,6 +2642,8 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { // a failing allocation gets rescheduled with a penalty to the old // node, but an updated job doesn't apply the penalty. func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -2711,6 +2771,8 @@ func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) { } func TestServiceSched_JobDeregister_Purged(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Generate a fake job with allocations @@ -2778,6 +2840,8 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) { } func TestServiceSched_JobDeregister_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -2849,6 +2913,8 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) { } func TestServiceSched_NodeDown(t *testing.T) { + ci.Parallel(t) + cases := []struct { desired string client string @@ -2966,6 +3032,8 @@ func TestServiceSched_NodeDown(t *testing.T) { } func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { + ci.Parallel(t) + cases := []struct { stop time.Duration when time.Time @@ -3127,6 +3195,8 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { } func TestServiceSched_NodeUpdate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -3181,6 +3251,8 @@ func TestServiceSched_NodeUpdate(t *testing.T) { } func TestServiceSched_NodeDrain(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -3262,6 +3334,8 @@ func TestServiceSched_NodeDrain(t *testing.T) { } func TestServiceSched_NodeDrain_Down(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -3374,6 +3448,8 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) { } func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -3427,6 +3503,8 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { // TestServiceSched_NodeDrain_TaskHandle asserts that allocations with task // handles have them propagated to replacement allocations when drained. func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node := mock.Node() @@ -3517,6 +3595,8 @@ func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) { } func TestServiceSched_RetryLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) h.Planner = &RejectPlan{h} @@ -3567,6 +3647,8 @@ func TestServiceSched_RetryLimit(t *testing.T) { } func TestServiceSched_Reschedule_OnceNow(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -3679,6 +3761,8 @@ func TestServiceSched_Reschedule_OnceNow(t *testing.T) { // Tests that alloc reschedulable at a future time creates a follow up eval func TestServiceSched_Reschedule_Later(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) // Create some nodes @@ -3767,6 +3851,8 @@ func TestServiceSched_Reschedule_Later(t *testing.T) { } func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -3908,6 +3994,8 @@ func TestServiceSched_Reschedule_MultipleNow(t *testing.T) { // Tests that old reschedule attempts are pruned func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -4039,6 +4127,8 @@ func TestServiceSched_Reschedule_PruneEvents(t *testing.T) { // Tests that deployments with failed allocs result in placements as long as the // deployment is running. func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { + ci.Parallel(t) + for _, failedDeployment := range []bool{false, true} { t.Run(fmt.Sprintf("Failed Deployment: %v", failedDeployment), func(t *testing.T) { h := NewHarness(t) @@ -4125,6 +4215,8 @@ func TestDeployment_FailedAllocs_Reschedule(t *testing.T) { } func TestBatchSched_Run_CompleteAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4182,6 +4274,8 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) { } func TestBatchSched_Run_FailedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4252,6 +4346,8 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) { } func TestBatchSched_Run_LostAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4339,6 +4435,8 @@ func TestBatchSched_Run_LostAlloc(t *testing.T) { } func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node := mock.DrainNode() @@ -4391,6 +4489,8 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { } func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes, one that is drained and has a successfully finished @@ -4464,6 +4564,8 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { // This test checks that terminal allocations that receive an in-place updated // are not added to the plan func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -4516,6 +4618,8 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { // This test ensures that terminal jobs from older versions are ignored. func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -4600,6 +4704,8 @@ func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { // This test asserts that an allocation from an old job that is running on a // drained node is cleaned up. func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes, one that is drained and has a successfully finished @@ -4671,6 +4777,8 @@ func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { // This test asserts that an allocation from a job that is complete on a // drained node is ignored up. func TestBatchSched_NodeDrain_Complete(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes, one that is drained and has a successfully finished @@ -4735,6 +4843,8 @@ func TestBatchSched_NodeDrain_Complete(t *testing.T) { // task group's count and that it works even if all the allocs have the same // name. func TestBatchSched_ScaleDown_SameName(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -4816,6 +4926,8 @@ func TestBatchSched_ScaleDown_SameName(t *testing.T) { } func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { + ci.Parallel(t) + testCases := []struct { Name string NodeCpu int64 @@ -4934,6 +5046,8 @@ func TestGenericSched_AllocFit_Lifecycle(t *testing.T) { } func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node := mock.Node() node.NodeResources.Cpu.CpuShares = 10000 @@ -4979,6 +5093,8 @@ func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) { } func TestGenericSched_ChainedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -5068,6 +5184,8 @@ func TestGenericSched_ChainedAlloc(t *testing.T) { } func TestServiceSched_NodeDrain_Sticky(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -5129,6 +5247,8 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) { // This test ensures that when a job is stopped, the scheduler properly cancels // an outstanding deployment. func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Generate a fake job @@ -5203,6 +5323,8 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { // This test ensures that when a job is updated and had an old deployment, the scheduler properly cancels // the deployment. func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Generate a fake job @@ -5274,6 +5396,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { // Various table driven tests for carry forward // of past reschedule events func Test_updateRescheduleTracker(t *testing.T) { + ci.Parallel(t) t1 := time.Now().UTC() alloc := mock.Alloc() @@ -5509,6 +5632,8 @@ func Test_updateRescheduleTracker(t *testing.T) { } func TestServiceSched_Preemption(t *testing.T) { + ci.Parallel(t) + require := require.New(t) h := NewHarness(t) @@ -5670,6 +5795,8 @@ func TestServiceSched_Preemption(t *testing.T) { // TestServiceSched_Migrate_NonCanary asserts that when rescheduling // non-canary allocations, a single allocation is migrated func TestServiceSched_Migrate_NonCanary(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node1 := mock.Node() @@ -5740,6 +5867,8 @@ func TestServiceSched_Migrate_NonCanary(t *testing.T) { // Canaries should be replaced by canaries, and non-canaries should be replaced // with the latest promoted version. func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node1 := mock.Node() @@ -5907,6 +6036,8 @@ func TestServiceSched_Migrate_CanaryStatus(t *testing.T) { // picks the latest deployment that have either been marked as promoted or is considered // non-destructive so it doesn't use canaries. func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // This test tests downgradedJobForPlacement directly to ease testing many different scenarios @@ -6024,6 +6155,8 @@ func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) { // TestServiceSched_RunningWithNextAllocation asserts that if a running allocation has // NextAllocation Set, the allocation is not ignored and will be stopped func TestServiceSched_RunningWithNextAllocation(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) node1 := mock.Node() @@ -6096,6 +6229,8 @@ func TestServiceSched_RunningWithNextAllocation(t *testing.T) { } func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) require := require.New(t) @@ -6265,7 +6400,7 @@ func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) { // TestPropagateTaskState asserts that propagateTaskState only copies state // when the previous allocation is lost or draining. func TestPropagateTaskState(t *testing.T) { - t.Parallel() + ci.Parallel(t) const taskName = "web" taskHandle := &structs.TaskHandle{ diff --git a/scheduler/preemption_test.go b/scheduler/preemption_test.go index 0c3784498..9179405ba 100644 --- a/scheduler/preemption_test.go +++ b/scheduler/preemption_test.go @@ -2,10 +2,10 @@ package scheduler import ( "fmt" + "strconv" "testing" - "strconv" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,6 +14,8 @@ import ( ) func TestResourceDistance(t *testing.T) { + ci.Parallel(t) + resourceAsk := &structs.ComparableResources{ Flattened: structs.AllocatedTaskResources{ Cpu: structs.AllocatedCpuResources{ @@ -142,6 +144,8 @@ func TestResourceDistance(t *testing.T) { } func TestPreemption(t *testing.T) { + ci.Parallel(t) + type testCase struct { desc string currentAllocations []*structs.Allocation @@ -1383,6 +1387,8 @@ func TestPreemption(t *testing.T) { // TestPreemptionMultiple tests evicting multiple allocations in the same time func TestPreemptionMultiple(t *testing.T) { + ci.Parallel(t) + // The test setup: // * a node with 4 GPUs // * a low priority job with 4 allocs, each is using 1 GPU diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index c80252d3b..1a9fc37f7 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" @@ -289,6 +290,8 @@ func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) { // Tests the reconciler properly handles placements for a job that has no // existing allocations func TestReconciler_Place_NoExisting(t *testing.T) { + ci.Parallel(t) + job := mock.Job() reconciler := NewAllocReconciler( testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, @@ -315,6 +318,8 @@ func TestReconciler_Place_NoExisting(t *testing.T) { // Tests the reconciler properly handles placements for a job that has some // existing allocations func TestReconciler_Place_Existing(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 3 existing allocations @@ -353,6 +358,8 @@ func TestReconciler_Place_Existing(t *testing.T) { // Tests the reconciler properly handles stopping allocations for a job that has // scaled down func TestReconciler_ScaleDown_Partial(t *testing.T) { + ci.Parallel(t) + // Has desired 10 job := mock.Job() @@ -392,6 +399,8 @@ func TestReconciler_ScaleDown_Partial(t *testing.T) { // Tests the reconciler properly handles stopping allocations for a job that has // scaled down to zero desired func TestReconciler_ScaleDown_Zero(t *testing.T) { + ci.Parallel(t) + // Set desired 0 job := mock.Job() job.TaskGroups[0].Count = 0 @@ -431,6 +440,8 @@ func TestReconciler_ScaleDown_Zero(t *testing.T) { // Tests the reconciler properly handles stopping allocations for a job that has // scaled down to zero desired where allocs have duplicate names func TestReconciler_ScaleDown_Zero_DuplicateNames(t *testing.T) { + ci.Parallel(t) + // Set desired 0 job := mock.Job() job.TaskGroups[0].Count = 0 @@ -471,6 +482,8 @@ func TestReconciler_ScaleDown_Zero_DuplicateNames(t *testing.T) { // Tests the reconciler properly handles inplace upgrading allocations func TestReconciler_Inplace(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -508,6 +521,8 @@ func TestReconciler_Inplace(t *testing.T) { // Tests the reconciler properly handles inplace upgrading allocations while // scaling up func TestReconciler_Inplace_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -549,6 +564,8 @@ func TestReconciler_Inplace_ScaleUp(t *testing.T) { // Tests the reconciler properly handles inplace upgrading allocations while // scaling down func TestReconciler_Inplace_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -591,6 +608,8 @@ func TestReconciler_Inplace_ScaleDown(t *testing.T) { // generates the expected placements for any already-running allocations of // that version. func TestReconciler_Inplace_Rollback(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Count = 4 job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ @@ -657,6 +676,8 @@ func TestReconciler_Inplace_Rollback(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations func TestReconciler_Destructive(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -691,6 +712,8 @@ func TestReconciler_Destructive(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations when max_parallel=0 func TestReconciler_DestructiveMaxParallel(t *testing.T) { + ci.Parallel(t) + job := mock.MaxParallelJob() // Create 10 existing allocations @@ -726,6 +749,8 @@ func TestReconciler_DestructiveMaxParallel(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations while // scaling up func TestReconciler_Destructive_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -766,6 +791,8 @@ func TestReconciler_Destructive_ScaleUp(t *testing.T) { // Tests the reconciler properly handles destructive upgrading allocations while // scaling down func TestReconciler_Destructive_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -805,6 +832,8 @@ func TestReconciler_Destructive_ScaleDown(t *testing.T) { // Tests the reconciler properly handles lost nodes with allocations func TestReconciler_LostNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -854,6 +883,8 @@ func TestReconciler_LostNode(t *testing.T) { // Tests the reconciler properly handles lost nodes with allocations while // scaling up func TestReconciler_LostNode_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -905,6 +936,8 @@ func TestReconciler_LostNode_ScaleUp(t *testing.T) { // Tests the reconciler properly handles lost nodes with allocations while // scaling down func TestReconciler_LostNode_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -953,6 +986,8 @@ func TestReconciler_LostNode_ScaleDown(t *testing.T) { // Tests the reconciler properly handles draining nodes with allocations func TestReconciler_DrainNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 existing allocations @@ -1004,6 +1039,8 @@ func TestReconciler_DrainNode(t *testing.T) { // Tests the reconciler properly handles draining nodes with allocations while // scaling up func TestReconciler_DrainNode_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Set desired 15 job := mock.Job() job.TaskGroups[0].Count = 15 @@ -1058,6 +1095,8 @@ func TestReconciler_DrainNode_ScaleUp(t *testing.T) { // Tests the reconciler properly handles draining nodes with allocations while // scaling down func TestReconciler_DrainNode_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Set desired 8 job := mock.Job() job.TaskGroups[0].Count = 8 @@ -1111,6 +1150,8 @@ func TestReconciler_DrainNode_ScaleDown(t *testing.T) { // Tests the reconciler properly handles a task group being removed func TestReconciler_RemovedTG(t *testing.T) { + ci.Parallel(t) + job := mock.Job() // Create 10 allocations for a tg that no longer exists @@ -1155,6 +1196,8 @@ func TestReconciler_RemovedTG(t *testing.T) { // Tests the reconciler properly handles a job in stopped states func TestReconciler_JobStopped(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Stop = true @@ -1217,6 +1260,8 @@ func TestReconciler_JobStopped(t *testing.T) { // Tests the reconciler doesn't update allocs in terminal state // when job is stopped or nil func TestReconciler_JobStopped_TerminalAllocs(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Stop = true @@ -1279,6 +1324,8 @@ func TestReconciler_JobStopped_TerminalAllocs(t *testing.T) { // Tests the reconciler properly handles jobs with multiple task groups func TestReconciler_MultiTG(t *testing.T) { + ci.Parallel(t) + job := mock.Job() tg2 := job.TaskGroups[0].Copy() tg2.Name = "foo" @@ -1323,6 +1370,8 @@ func TestReconciler_MultiTG(t *testing.T) { // Tests the reconciler properly handles jobs with multiple task groups with // only one having an update stanza and a deployment already being created func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) { + ci.Parallel(t) + job := mock.Job() tg2 := job.TaskGroups[0].Copy() tg2.Name = "foo" @@ -1372,6 +1421,8 @@ func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) { // Tests delayed rescheduling of failed batch allocations func TestReconciler_RescheduleLater_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 @@ -1466,6 +1517,8 @@ func TestReconciler_RescheduleLater_Batch(t *testing.T) { // Tests delayed rescheduling of failed batch allocations and batching of allocs // with fail times that are close together func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 @@ -1553,6 +1606,8 @@ func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { // Tests rescheduling failed batch allocations func TestReconciler_RescheduleNow_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 job := mock.Job() @@ -1635,6 +1690,8 @@ func TestReconciler_RescheduleNow_Batch(t *testing.T) { // Tests rescheduling failed service allocations with desired state stop func TestReconciler_RescheduleLater_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -1718,6 +1775,8 @@ func TestReconciler_RescheduleLater_Service(t *testing.T) { // Tests service allocations with client status complete func TestReconciler_Service_ClientStatusComplete(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -1773,6 +1832,8 @@ func TestReconciler_Service_ClientStatusComplete(t *testing.T) { // Tests service job placement with desired stop and client status complete func TestReconciler_Service_DesiredStop_ClientStatusComplete(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -1833,6 +1894,8 @@ func TestReconciler_Service_DesiredStop_ClientStatusComplete(t *testing.T) { // Tests rescheduling failed service allocations with desired state stop func TestReconciler_RescheduleNow_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -1914,6 +1977,8 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) { // Tests rescheduling failed service allocations when there's clock drift (upto a second) func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -1994,6 +2059,8 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { // Tests rescheduling failed service allocations when the eval ID matches and there's a large clock drift func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2076,6 +2143,8 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { // Tests rescheduling failed service allocations when there are canaries func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2185,6 +2254,8 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { // Tests rescheduling failed canary service allocations func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2311,6 +2382,8 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { // Tests rescheduling failed canary service allocations when one has reached its // reschedule limit func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -2438,6 +2511,8 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { // Tests failed service allocations that were already rescheduled won't be rescheduled again func TestReconciler_DontReschedule_PreviouslyRescheduled(t *testing.T) { + ci.Parallel(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2497,6 +2572,8 @@ func TestReconciler_DontReschedule_PreviouslyRescheduled(t *testing.T) { // Tests the reconciler cancels an old deployment when the job is being stopped func TestReconciler_CancelDeployment_JobStop(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Stop = true @@ -2595,6 +2672,8 @@ func TestReconciler_CancelDeployment_JobStop(t *testing.T) { // Tests the reconciler cancels an old deployment when the job is updated func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) { + ci.Parallel(t) + // Create a base job job := mock.Job() @@ -2672,6 +2751,8 @@ func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) { // Tests the reconciler creates a deployment and does a rolling upgrade with // destructive changes func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -2714,6 +2795,8 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) { // Tests the reconciler creates a deployment for inplace updates func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) { + ci.Parallel(t) + jobOld := mock.Job() job := jobOld.Copy() job.Version++ @@ -2757,6 +2840,8 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) { // Tests the reconciler creates a deployment when the job has a newer create index func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) { + ci.Parallel(t) + jobOld := mock.Job() job := jobOld.Copy() job.TaskGroups[0].Update = noCanaryUpdate @@ -2804,6 +2889,8 @@ func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) { // Tests the reconciler doesn't creates a deployment if there are no changes func TestReconciler_DontCreateDeployment_NoChanges(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -2842,6 +2929,8 @@ func TestReconciler_DontCreateDeployment_NoChanges(t *testing.T) { // Tests the reconciler doesn't place any more canaries when the deployment is // paused or failed func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -2923,6 +3012,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) { // Tests the reconciler doesn't place any more allocs when the deployment is // paused or failed func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate job.TaskGroups[0].Count = 15 @@ -2988,6 +3079,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) { // Tests the reconciler doesn't do any more destructive updates when the // deployment is paused or failed func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -3062,6 +3155,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing // Tests the reconciler handles migrating a canary correctly on a draining node func TestReconciler_DrainNode_Canary(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3135,6 +3230,8 @@ func TestReconciler_DrainNode_Canary(t *testing.T) { // Tests the reconciler handles migrating a canary correctly on a lost node func TestReconciler_LostNode_Canary(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3209,6 +3306,8 @@ func TestReconciler_LostNode_Canary(t *testing.T) { // Tests the reconciler handles stopping canaries from older deployments func TestReconciler_StopOldCanaries(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3290,6 +3389,8 @@ func TestReconciler_StopOldCanaries(t *testing.T) { // Tests the reconciler creates new canaries when the job changes func TestReconciler_NewCanaries(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3337,6 +3438,8 @@ func TestReconciler_NewCanaries(t *testing.T) { // Tests the reconciler creates new canaries when the job changes and the // canary count is greater than the task group count func TestReconciler_NewCanaries_CountGreater(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Count = 3 job.TaskGroups[0].Update = canaryUpdate.Copy() @@ -3387,6 +3490,8 @@ func TestReconciler_NewCanaries_CountGreater(t *testing.T) { // Tests the reconciler creates new canaries when the job changes for multiple // task groups func TestReconciler_NewCanaries_MultiTG(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy()) @@ -3443,6 +3548,8 @@ func TestReconciler_NewCanaries_MultiTG(t *testing.T) { // Tests the reconciler creates new canaries when the job changes and scales up func TestReconciler_NewCanaries_ScaleUp(t *testing.T) { + ci.Parallel(t) + // Scale the job up to 15 job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3492,6 +3599,8 @@ func TestReconciler_NewCanaries_ScaleUp(t *testing.T) { // Tests the reconciler creates new canaries when the job changes and scales // down func TestReconciler_NewCanaries_ScaleDown(t *testing.T) { + ci.Parallel(t) + // Scale the job down to 5 job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3542,6 +3651,8 @@ func TestReconciler_NewCanaries_ScaleDown(t *testing.T) { // Tests the reconciler handles filling the names of partially placed canaries func TestReconciler_NewCanaries_FillNames(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = &structs.UpdateStrategy{ Canary: 4, @@ -3611,6 +3722,8 @@ func TestReconciler_NewCanaries_FillNames(t *testing.T) { // Tests the reconciler handles canary promotion by unblocking max_parallel func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -3684,6 +3797,8 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) { // Tests the reconciler handles canary promotion when the canary count equals // the total correctly func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate job.TaskGroups[0].Count = 2 @@ -3766,6 +3881,8 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) { // Tests the reconciler checks the health of placed allocs to determine the // limit func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -3859,6 +3976,8 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) { // Tests the reconciler handles an alloc on a tainted node during a rolling // update func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -3944,6 +4063,8 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) { // Tests the reconciler handles a failed deployment with allocs on tainted // nodes func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4028,6 +4149,8 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) { // Tests the reconciler handles a run after a deployment is complete // successfully. func TestReconciler_CompleteDeployment(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -4080,6 +4203,8 @@ func TestReconciler_CompleteDeployment(t *testing.T) { // nothing left to place even if there are failed allocations that are part of // the deployment. func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4142,6 +4267,8 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) { // Test that a failed deployment cancels non-promoted canaries func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { + ci.Parallel(t) + // Create a job with two task groups job := mock.Job() job.TaskGroups[0].Update = canaryUpdate @@ -4236,6 +4363,8 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) { // Test that a failed deployment and updated job works func TestReconciler_FailedDeployment_NewJob(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4306,6 +4435,8 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) { // Tests the reconciler marks a deployment as complete func TestReconciler_MarkDeploymentComplete(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4363,6 +4494,8 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) { // Tests the reconciler handles changing a job such that a deployment is created // while doing a scale up but as the second eval. func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) { + ci.Parallel(t) + // Scale the job up to 15 job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4424,6 +4557,8 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) { // Tests the reconciler doesn't stop allocations when doing a rolling upgrade // where the count of the old job allocs is < desired count. func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4470,6 +4605,8 @@ func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) { // Tests that the reconciler handles rerunning a batch job in the case that the // allocations are from an older instance of the job. func TestReconciler_Batch_Rerun(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.Type = structs.JobTypeBatch job.TaskGroups[0].Update = nil @@ -4516,6 +4653,8 @@ func TestReconciler_Batch_Rerun(t *testing.T) { // Test that a failed deployment will not result in rescheduling failed allocations func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate @@ -4574,6 +4713,8 @@ func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) { // Test that a running deployment with failed allocs will not result in // rescheduling failed allocations unless they are marked as reschedulable. func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate tgName := job.TaskGroups[0].Name @@ -4632,6 +4773,8 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) { // Test that a failed deployment cancels non-promoted canaries func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { + ci.Parallel(t) + // Create a job job := mock.Job() job.TaskGroups[0].Count = 3 @@ -4728,6 +4871,8 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) { // Test that a successful deployment with failed allocs will result in // rescheduling failed allocations func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Update = noCanaryUpdate tgName := job.TaskGroups[0].Name @@ -4782,6 +4927,8 @@ func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T // Tests force rescheduling a failed alloc that is past its reschedule limit func TestReconciler_ForceReschedule_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -4858,6 +5005,8 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { // new allocs should be placed to satisfy the job count, and current allocations are // left unmodified func TestReconciler_RescheduleNot_Service(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 5 @@ -4940,6 +5089,8 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) { // Tests behavior of batch failure with rescheduling policy preventing rescheduling: // current allocations are left unmodified and no follow up func TestReconciler_RescheduleNot_Batch(t *testing.T) { + ci.Parallel(t) + require := require.New(t) // Set desired 4 job := mock.Job() @@ -5017,5 +5168,4 @@ func TestReconciler_RescheduleNot_Batch(t *testing.T) { }, }, }) - } diff --git a/scheduler/reconcile_util_test.go b/scheduler/reconcile_util_test.go index 59772a349..17617cf3c 100644 --- a/scheduler/reconcile_util_test.go +++ b/scheduler/reconcile_util_test.go @@ -3,6 +3,7 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -14,6 +15,8 @@ import ( // aligned. // Ensure no regression from: https://github.com/hashicorp/nomad/issues/3008 func TestBitmapFrom(t *testing.T) { + ci.Parallel(t) + input := map[string]*structs.Allocation{ "8": { JobID: "foo", @@ -34,6 +37,8 @@ func TestBitmapFrom(t *testing.T) { } func TestAllocSet_filterByTainted(t *testing.T) { + ci.Parallel(t) + require := require.New(t) nodes := map[string]*structs.Node{ diff --git a/scheduler/scheduler_sysbatch_test.go b/scheduler/scheduler_sysbatch_test.go index acee0affd..dcc3d6ea3 100644 --- a/scheduler/scheduler_sysbatch_test.go +++ b/scheduler/scheduler_sysbatch_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -15,6 +16,8 @@ import ( ) func TestSysBatch_JobRegister(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -74,6 +77,8 @@ func TestSysBatch_JobRegister(t *testing.T) { } func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -149,6 +154,8 @@ func TestSysBatch_JobRegister_AddNode_Running(t *testing.T) { } func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -225,6 +232,8 @@ func TestSysBatch_JobRegister_AddNode_Dead(t *testing.T) { } func TestSysBatch_JobModify(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -313,6 +322,8 @@ func TestSysBatch_JobModify(t *testing.T) { } func TestSysBatch_JobModify_InPlace(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -385,6 +396,8 @@ func TestSysBatch_JobModify_InPlace(t *testing.T) { } func TestSysBatch_JobDeregister_Purged(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -444,6 +457,8 @@ func TestSysBatch_JobDeregister_Purged(t *testing.T) { } func TestSysBatch_JobDeregister_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -505,6 +520,8 @@ func TestSysBatch_JobDeregister_Stopped(t *testing.T) { } func TestSysBatch_NodeDown(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a down node @@ -564,6 +581,8 @@ func TestSysBatch_NodeDown(t *testing.T) { } func TestSysBatch_NodeDrain_Down(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -616,6 +635,8 @@ func TestSysBatch_NodeDrain_Down(t *testing.T) { } func TestSysBatch_NodeDrain(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -671,6 +692,8 @@ func TestSysBatch_NodeDrain(t *testing.T) { } func TestSysBatch_NodeUpdate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -713,6 +736,8 @@ func TestSysBatch_NodeUpdate(t *testing.T) { } func TestSysBatch_RetryLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) h.Planner = &RejectPlan{h} @@ -757,6 +782,8 @@ func TestSysBatch_RetryLimit(t *testing.T) { // count for a task group when allocations can't be created on currently // available nodes because of constraint mismatches. func TestSysBatch_Queued_With_Constraints(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) nodes := createNodes(t, h, 3) @@ -802,6 +829,8 @@ func TestSysBatch_Queued_With_Constraints(t *testing.T) { } func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // linux machines @@ -850,6 +879,8 @@ func TestSysBatch_Queued_With_Constraints_PartialMatch(t *testing.T) { // should be that the TaskGroup constrained to the newly added node class is // added and that the TaskGroup constrained to the ineligible node is ignored. func TestSysBatch_JobConstraint_AddNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes @@ -995,6 +1026,8 @@ func TestSysBatch_JobConstraint_AddNode(t *testing.T) { // No errors reported when no available nodes prevent placement func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1074,6 +1107,8 @@ func TestSysBatch_ExistingAllocNoNodes(t *testing.T) { } func TestSysBatch_ConstraintErrors(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1147,6 +1182,8 @@ func TestSysBatch_ConstraintErrors(t *testing.T) { } func TestSysBatch_ChainedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1234,6 +1271,8 @@ func TestSysBatch_ChainedAlloc(t *testing.T) { } func TestSysBatch_PlanWithDrainedNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1314,6 +1353,8 @@ func TestSysBatch_PlanWithDrainedNode(t *testing.T) { } func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1370,6 +1411,8 @@ func TestSysBatch_QueuedAllocsMultTG(t *testing.T) { } func TestSysBatch_Preemption(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create nodes @@ -1654,6 +1697,8 @@ func TestSysBatch_Preemption(t *testing.T) { } func TestSysBatch_canHandle(t *testing.T) { + ci.Parallel(t) + s := SystemScheduler{sysbatch: true} t.Run("sysbatch register", func(t *testing.T) { require.True(t, s.canHandle(structs.EvalTriggerJobRegister)) diff --git a/scheduler/scheduler_system_test.go b/scheduler/scheduler_system_test.go index fc94a6180..63c4c1869 100644 --- a/scheduler/scheduler_system_test.go +++ b/scheduler/scheduler_system_test.go @@ -8,6 +8,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -16,6 +17,8 @@ import ( ) func TestSystemSched_JobRegister(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -75,6 +78,8 @@ func TestSystemSched_JobRegister(t *testing.T) { } func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -148,6 +153,8 @@ func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) { } func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -217,6 +224,8 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { } func TestSystemSched_ExhaustResources(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create a node @@ -295,6 +304,8 @@ func TestSystemSched_ExhaustResources(t *testing.T) { } func TestSystemSched_JobRegister_Annotate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -391,6 +402,8 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) { } func TestSystemSched_JobRegister_AddNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -469,6 +482,8 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) { } func TestSystemSched_JobRegister_AllocFail(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create NO nodes @@ -501,6 +516,8 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) { } func TestSystemSched_JobModify(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -588,6 +605,8 @@ func TestSystemSched_JobModify(t *testing.T) { } func TestSystemSched_JobModify_Rolling(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -686,6 +705,8 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) { } func TestSystemSched_JobModify_InPlace(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -766,6 +787,8 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) { } func TestSystemSched_JobModify_RemoveDC(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -851,6 +874,8 @@ func TestSystemSched_JobModify_RemoveDC(t *testing.T) { } func TestSystemSched_JobDeregister_Purged(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -910,6 +935,8 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) { } func TestSystemSched_JobDeregister_Stopped(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -971,6 +998,8 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) { } func TestSystemSched_NodeDown(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a down node @@ -1030,6 +1059,8 @@ func TestSystemSched_NodeDown(t *testing.T) { } func TestSystemSched_NodeDrain_Down(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -1082,6 +1113,8 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) { } func TestSystemSched_NodeDrain(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a draining node @@ -1137,6 +1170,8 @@ func TestSystemSched_NodeDrain(t *testing.T) { } func TestSystemSched_NodeUpdate(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -1179,6 +1214,8 @@ func TestSystemSched_NodeUpdate(t *testing.T) { } func TestSystemSched_RetryLimit(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) h.Planner = &RejectPlan{h} @@ -1223,6 +1260,8 @@ func TestSystemSched_RetryLimit(t *testing.T) { // count for a task group when allocations can't be created on currently // available nodes because of constraint mismatches. func TestSystemSched_Queued_With_Constraints(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register a node @@ -1262,6 +1301,8 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) { // should be that the TaskGroup constrained to the newly added node class is // added and that the TaskGroup constrained to the ineligible node is ignored. func TestSystemSched_JobConstraint_AddNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create two nodes @@ -1409,6 +1450,8 @@ func TestSystemSched_JobConstraint_AddNode(t *testing.T) { // No errors reported when no available nodes prevent placement func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1488,6 +1531,8 @@ func TestSystemSched_ExistingAllocNoNodes(t *testing.T) { // No errors reported when constraints prevent placement func TestSystemSched_ConstraintErrors(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) var node *structs.Node @@ -1559,6 +1604,8 @@ func TestSystemSched_ConstraintErrors(t *testing.T) { } func TestSystemSched_ChainedAlloc(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create some nodes @@ -1647,6 +1694,8 @@ func TestSystemSched_ChainedAlloc(t *testing.T) { } func TestSystemSched_PlanWithDrainedNode(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1727,6 +1776,8 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) { } func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Register two nodes with two different classes @@ -1783,6 +1834,8 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { } func TestSystemSched_Preemption(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) // Create nodes @@ -2066,6 +2119,8 @@ func TestSystemSched_Preemption(t *testing.T) { } func TestSystemSched_canHandle(t *testing.T) { + ci.Parallel(t) + s := SystemScheduler{sysbatch: false} t.Run("system register", func(t *testing.T) { require.True(t, s.canHandle(structs.EvalTriggerJobRegister)) diff --git a/scheduler/select_test.go b/scheduler/select_test.go index 7625acdff..b553882a6 100644 --- a/scheduler/select_test.go +++ b/scheduler/select_test.go @@ -3,12 +3,15 @@ package scheduler import ( "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" ) func TestLimitIterator(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*RankedNode{ { @@ -53,6 +56,8 @@ func TestLimitIterator(t *testing.T) { } func TestLimitIterator_ScoreThreshold(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) type testCase struct { desc string @@ -317,6 +322,8 @@ func TestLimitIterator_ScoreThreshold(t *testing.T) { } func TestMaxScoreIterator(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*RankedNode{ { diff --git a/scheduler/spread_test.go b/scheduler/spread_test.go index bf9059ecb..adba6ffa5 100644 --- a/scheduler/spread_test.go +++ b/scheduler/spread_test.go @@ -1,14 +1,14 @@ package scheduler import ( + "fmt" "math" "math/rand" "sort" "testing" "time" - "fmt" - + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" @@ -17,6 +17,8 @@ import ( ) func TestSpreadIterator_SingleAttribute(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) dcs := []string{"dc1", "dc2", "dc1", "dc1"} var nodes []*RankedNode @@ -175,6 +177,8 @@ func TestSpreadIterator_SingleAttribute(t *testing.T) { } func TestSpreadIterator_MultipleAttributes(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) dcs := []string{"dc1", "dc2", "dc1", "dc1"} rack := []string{"r1", "r1", "r2", "r2"} @@ -276,6 +280,8 @@ func TestSpreadIterator_MultipleAttributes(t *testing.T) { } func TestSpreadIterator_EvenSpread(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) dcs := []string{"dc1", "dc2", "dc1", "dc2", "dc1", "dc2", "dc2", "dc1", "dc1", "dc1"} var nodes []*RankedNode @@ -464,6 +470,8 @@ func TestSpreadIterator_EvenSpread(t *testing.T) { // Test scenarios where the spread iterator sets maximum penalty (-1.0) func TestSpreadIterator_MaxPenalty(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) var nodes []*RankedNode @@ -551,6 +559,8 @@ func TestSpreadIterator_MaxPenalty(t *testing.T) { } func Test_evenSpreadScoreBoost(t *testing.T) { + ci.Parallel(t) + pset := &propertySet{ existingValues: map[string]uint64{}, proposedValues: map[string]uint64{ @@ -580,7 +590,7 @@ func Test_evenSpreadScoreBoost(t *testing.T) { // can prevent quadratic performance but then we need this test to // verify we have satisfactory spread results. func TestSpreadOnLargeCluster(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string nodeCount int @@ -640,7 +650,7 @@ func TestSpreadOnLargeCluster(t *testing.T) { for i := range cases { tc := cases[i] t.Run(tc.name, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) h := NewHarness(t) err := upsertNodes(h, tc.nodeCount, tc.racks) require.NoError(t, err) @@ -814,6 +824,7 @@ func validateEqualSpread(h *Harness) error { } func TestSpreadPanicDowngrade(t *testing.T) { + ci.Parallel(t) h := NewHarness(t) diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 2f36e0014..458f27f36 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -6,6 +6,7 @@ import ( "runtime" "testing" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" @@ -55,6 +56,8 @@ func benchmarkServiceStack_MetaKeyConstraint(b *testing.B, key string, numNodes, } func TestServiceStack_SetNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewGenericStack(false, ctx) @@ -82,6 +85,8 @@ func TestServiceStack_SetNodes(t *testing.T) { } func TestServiceStack_SetJob(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewGenericStack(false, ctx) @@ -97,6 +102,8 @@ func TestServiceStack_SetJob(t *testing.T) { } func TestServiceStack_Select_Size(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -122,6 +129,8 @@ func TestServiceStack_Select_Size(t *testing.T) { } func TestServiceStack_Select_PreferringNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -166,6 +175,8 @@ func TestServiceStack_Select_PreferringNodes(t *testing.T) { } func TestServiceStack_Select_MetricsReset(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -202,6 +213,8 @@ func TestServiceStack_Select_MetricsReset(t *testing.T) { } func TestServiceStack_Select_DriverFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -232,6 +245,8 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { } func TestServiceStack_Select_CSI(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -308,6 +323,8 @@ func TestServiceStack_Select_CSI(t *testing.T) { } func TestServiceStack_Select_ConstraintFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -348,6 +365,8 @@ func TestServiceStack_Select_ConstraintFilter(t *testing.T) { } func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -391,6 +410,8 @@ func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { } func TestSystemStack_SetNodes(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewSystemStack(false, ctx) @@ -413,6 +434,8 @@ func TestSystemStack_SetNodes(t *testing.T) { } func TestSystemStack_SetJob(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) stack := NewSystemStack(false, ctx) @@ -428,6 +451,8 @@ func TestSystemStack_SetJob(t *testing.T) { } func TestSystemStack_Select_Size(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{mock.Node()} stack := NewSystemStack(false, ctx) @@ -451,6 +476,8 @@ func TestSystemStack_Select_Size(t *testing.T) { } func TestSystemStack_Select_MetricsReset(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -487,6 +514,8 @@ func TestSystemStack_Select_MetricsReset(t *testing.T) { } func TestSystemStack_Select_DriverFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -526,6 +555,8 @@ func TestSystemStack_Select_DriverFilter(t *testing.T) { } func TestSystemStack_Select_ConstraintFilter(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), @@ -567,6 +598,8 @@ func TestSystemStack_Select_ConstraintFilter(t *testing.T) { } func TestSystemStack_Select_BinPack_Overflow(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) nodes := []*structs.Node{ mock.Node(), diff --git a/scheduler/util_test.go b/scheduler/util_test.go index f03114ba7..fec7d8ad4 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" "github.com/hashicorp/nomad/helper" @@ -17,6 +18,8 @@ import ( ) func TestMaterializeTaskGroups(t *testing.T) { + ci.Parallel(t) + job := mock.Job() index := materializeTaskGroups(job) require.Equal(t, 10, len(index)) @@ -35,6 +38,8 @@ func newNode(name string) *structs.Node { } func TestDiffSystemAllocsForNode_Sysbatch_terminal(t *testing.T) { + ci.Parallel(t) + // For a sysbatch job, the scheduler should not re-place an allocation // that has become terminal, unless the job has been updated. @@ -99,6 +104,8 @@ func TestDiffSystemAllocsForNode_Sysbatch_terminal(t *testing.T) { } func TestDiffSystemAllocsForNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() required := materializeTaskGroups(job) @@ -233,6 +240,8 @@ func TestDiffSystemAllocsForNode(t *testing.T) { // Test the desired diff for an updated system job running on a // ineligible node func TestDiffSystemAllocsForNode_ExistingAllocIneligibleNode(t *testing.T) { + ci.Parallel(t) + job := mock.Job() job.TaskGroups[0].Count = 1 required := materializeTaskGroups(job) @@ -284,6 +293,8 @@ func TestDiffSystemAllocsForNode_ExistingAllocIneligibleNode(t *testing.T) { } func TestDiffSystemAllocs(t *testing.T) { + ci.Parallel(t) + job := mock.SystemJob() drainNode := mock.DrainNode() @@ -391,6 +402,8 @@ func TestDiffSystemAllocs(t *testing.T) { } func TestReadyNodesInDCs(t *testing.T) { + ci.Parallel(t) + state := state.TestStateStore(t) node1 := mock.Node() node2 := mock.Node() @@ -421,6 +434,8 @@ func TestReadyNodesInDCs(t *testing.T) { } func TestRetryMax(t *testing.T) { + ci.Parallel(t) + calls := 0 bad := func() (bool, error) { calls += 1 @@ -454,6 +469,8 @@ func TestRetryMax(t *testing.T) { } func TestTaintedNodes(t *testing.T) { + ci.Parallel(t) + state := state.TestStateStore(t) node1 := mock.Node() node2 := mock.Node() @@ -491,6 +508,8 @@ func TestTaintedNodes(t *testing.T) { } func TestShuffleNodes(t *testing.T) { + ci.Parallel(t) + // Use a large number of nodes to make the probability of shuffling to the // original order very low. nodes := []*structs.Node{ @@ -521,6 +540,8 @@ func TestShuffleNodes(t *testing.T) { } func TestTaskUpdatedAffinity(t *testing.T) { + ci.Parallel(t) + j1 := mock.Job() j2 := mock.Job() name := j1.TaskGroups[0].Name @@ -589,6 +610,8 @@ func TestTaskUpdatedAffinity(t *testing.T) { } func TestTaskUpdatedSpread(t *testing.T) { + ci.Parallel(t) + j1 := mock.Job() j2 := mock.Job() name := j1.TaskGroups[0].Name @@ -654,6 +677,8 @@ func TestTaskUpdatedSpread(t *testing.T) { require.False(t, tasksUpdated(j5, j6, name)) } func TestTasksUpdated(t *testing.T) { + ci.Parallel(t) + j1 := mock.Job() j2 := mock.Job() name := j1.TaskGroups[0].Name @@ -789,6 +814,8 @@ func TestTasksUpdated(t *testing.T) { } func TestTasksUpdated_connectServiceUpdated(t *testing.T) { + ci.Parallel(t) + servicesA := []*structs.Service{{ Name: "service1", PortLabel: "1111", @@ -868,7 +895,7 @@ func TestTasksUpdated_connectServiceUpdated(t *testing.T) { } func TestNetworkUpdated(t *testing.T) { - t.Parallel() + ci.Parallel(t) cases := []struct { name string a []*structs.NetworkResource @@ -935,6 +962,8 @@ func TestNetworkUpdated(t *testing.T) { } func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) allocs := []allocTuple{ {Alloc: &structs.Allocation{ID: uuid.Generate()}}, @@ -951,6 +980,8 @@ func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { } func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) allocs := []allocTuple{ {Alloc: &structs.Allocation{ID: uuid.Generate()}}, @@ -967,6 +998,8 @@ func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { } func TestSetStatus(t *testing.T) { + ci.Parallel(t) + h := NewHarness(t) logger := testlog.HCLogger(t) eval := mock.Eval() @@ -1027,6 +1060,8 @@ func TestSetStatus(t *testing.T) { } func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1082,6 +1117,8 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { } func TestInplaceUpdate_AllocatedResources(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1139,6 +1176,8 @@ func TestInplaceUpdate_AllocatedResources(t *testing.T) { } func TestInplaceUpdate_NoMatch(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1190,6 +1229,8 @@ func TestInplaceUpdate_NoMatch(t *testing.T) { } func TestInplaceUpdate_Success(t *testing.T) { + ci.Parallel(t) + state, ctx := testContext(t) eval := mock.Eval() job := mock.Job() @@ -1279,6 +1320,8 @@ func TestInplaceUpdate_Success(t *testing.T) { } func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { + ci.Parallel(t) + _, ctx := testContext(t) allocs := []allocTuple{ {Alloc: &structs.Allocation{ID: uuid.Generate()}}, @@ -1295,6 +1338,8 @@ func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { } func TestTaskGroupConstraints(t *testing.T) { + ci.Parallel(t) + constr := &structs.Constraint{RTarget: "bar"} constr2 := &structs.Constraint{LTarget: "foo"} constr3 := &structs.Constraint{Operand: "<"} @@ -1336,6 +1381,8 @@ func TestTaskGroupConstraints(t *testing.T) { } func TestProgressMade(t *testing.T) { + ci.Parallel(t) + noopPlan := &structs.PlanResult{} require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress") @@ -1360,6 +1407,8 @@ func TestProgressMade(t *testing.T) { } func TestDesiredUpdates(t *testing.T) { + ci.Parallel(t) + tg1 := &structs.TaskGroup{Name: "foo"} tg2 := &structs.TaskGroup{Name: "bar"} a2 := &structs.Allocation{TaskGroup: "bar"} @@ -1416,6 +1465,8 @@ func TestDesiredUpdates(t *testing.T) { } func TestUtil_AdjustQueuedAllocations(t *testing.T) { + ci.Parallel(t) + logger := testlog.HCLogger(t) alloc1 := mock.Alloc() alloc2 := mock.Alloc() @@ -1451,6 +1502,8 @@ func TestUtil_AdjustQueuedAllocations(t *testing.T) { } func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { + ci.Parallel(t) + node := mock.Node() node.Status = structs.NodeStatusDown alloc1 := mock.Alloc() @@ -1503,6 +1556,8 @@ func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { } func TestUtil_connectUpdated(t *testing.T) { + ci.Parallel(t) + t.Run("both nil", func(t *testing.T) { require.False(t, connectUpdated(nil, nil)) }) @@ -1555,6 +1610,8 @@ func TestUtil_connectUpdated(t *testing.T) { } func TestUtil_connectSidecarServiceUpdated(t *testing.T) { + ci.Parallel(t) + t.Run("both nil", func(t *testing.T) { require.False(t, connectSidecarServiceUpdated(nil, nil)) }) diff --git a/testutil/slow.go b/testutil/slow.go deleted file mode 100644 index 1a8088024..000000000 --- a/testutil/slow.go +++ /dev/null @@ -1,15 +0,0 @@ -package testutil - -import ( - "os" - - testing "github.com/mitchellh/go-testing-interface" -) - -// SkipSlow skips a slow test unless the NOMAD_SLOW_TEST environment variable -// is set. -func SkipSlow(t testing.T) { - if os.Getenv("NOMAD_SLOW_TEST") == "" { - t.Skip("Skipping slow test. Set NOMAD_SLOW_TEST=1 to run.") - } -} From 98e743008616f3be76025202a6c2c30e2ffe08de Mon Sep 17 00:00:00 2001 From: James Rasell Date: Wed, 16 Mar 2022 09:42:57 +0100 Subject: [PATCH 65/89] client: avoid double group lookup within groupservice hook setup. --- client/allocrunner/groupservice_hook.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/allocrunner/groupservice_hook.go b/client/allocrunner/groupservice_hook.go index 69eae41e8..87bf8ba91 100644 --- a/client/allocrunner/groupservice_hook.go +++ b/client/allocrunner/groupservice_hook.go @@ -78,7 +78,7 @@ func newGroupServiceHook(cfg groupServiceHookConfig) *groupServiceHook { delay: shutdownDelay, networkStatusGetter: cfg.networkStatusGetter, logger: cfg.logger.Named(groupServiceHookName), - services: cfg.alloc.Job.LookupTaskGroup(cfg.alloc.TaskGroup).Services, + services: tg.Services, shutdownDelayCtx: cfg.shutdownDelayCtx, } From bca014b2cb4b8f7587641dfd81b07abd9fc79e09 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Wed, 16 Mar 2022 08:38:42 -0500 Subject: [PATCH 66/89] ci: explain why ci runs tests in serial now --- ci/slow.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/slow.go b/ci/slow.go index 5becb90b7..f6b8d066a 100644 --- a/ci/slow.go +++ b/ci/slow.go @@ -16,6 +16,9 @@ func SkipSlow(t *testing.T, reason string) { } // Parallel runs t in parallel, unless CI is set to a true value. +// +// In CI (CircleCI / GitHub Actions) we get better performance by running tests +// in serial while not restricting GOMAXPROCS. func Parallel(t *testing.T) { value := os.Getenv("CI") isCI, err := strconv.ParseBool(value) From 8f2b21ab95b4b930393d1ebd44e6845a8877ece4 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Wed, 16 Mar 2022 11:56:11 -0400 Subject: [PATCH 67/89] tests: move state store namespace tests from ENT (#12308) --- nomad/state/state_store_test.go | 792 ++++++++++++++++++++++++++++++++ 1 file changed, 792 insertions(+) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 5f1c8e49c..fa2a2759e 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -686,6 +686,81 @@ func TestStateStore_Deployments(t *testing.T) { require.False(t, watchFired(ws)) } +func TestStateStore_Deployments_Namespace(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + + ns1 := mock.Namespace() + ns1.Name = "namespaced" + deploy1 := mock.Deployment() + deploy2 := mock.Deployment() + deploy1.Namespace = ns1.Name + deploy2.Namespace = ns1.Name + + ns2 := mock.Namespace() + ns2.Name = "new-namespace" + deploy3 := mock.Deployment() + deploy4 := mock.Deployment() + deploy3.Namespace = ns2.Name + deploy4.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + + // Create watchsets so we can test that update fires the watch + watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} + _, err := state.DeploymentsByNamespace(watches[0], ns1.Name) + require.NoError(t, err) + _, err = state.DeploymentsByNamespace(watches[1], ns2.Name) + require.NoError(t, err) + + require.NoError(t, state.UpsertDeployment(1001, deploy1)) + require.NoError(t, state.UpsertDeployment(1002, deploy2)) + require.NoError(t, state.UpsertDeployment(1003, deploy3)) + require.NoError(t, state.UpsertDeployment(1004, deploy4)) + require.True(t, watchFired(watches[0])) + require.True(t, watchFired(watches[1])) + + ws := memdb.NewWatchSet() + iter1, err := state.DeploymentsByNamespace(ws, ns1.Name) + require.NoError(t, err) + iter2, err := state.DeploymentsByNamespace(ws, ns2.Name) + require.NoError(t, err) + + var out1 []*structs.Deployment + for { + raw := iter1.Next() + if raw == nil { + break + } + out1 = append(out1, raw.(*structs.Deployment)) + } + + var out2 []*structs.Deployment + for { + raw := iter2.Next() + if raw == nil { + break + } + out2 = append(out2, raw.(*structs.Deployment)) + } + + require.Len(t, out1, 2) + require.Len(t, out2, 2) + + for _, deploy := range out1 { + require.Equal(t, ns1.Name, deploy.Namespace) + } + for _, deploy := range out2 { + require.Equal(t, ns2.Name, deploy.Namespace) + } + + index, err := state.Index("deployment") + require.NoError(t, err) + require.EqualValues(t, 1004, index) + require.False(t, watchFired(ws)) +} + func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { ci.Parallel(t) @@ -772,6 +847,313 @@ func TestStateStore_DeploymentsByIDPrefix(t *testing.T) { }) } +func TestStateStore_DeploymentsByIDPrefix_Namespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + deploy1 := mock.Deployment() + deploy1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2" + deploy2 := mock.Deployment() + deploy2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2" + sharedPrefix := "aabb" + + ns1 := mock.Namespace() + ns1.Name = "namespace1" + ns2 := mock.Namespace() + ns2.Name = "namespace2" + deploy1.Namespace = ns1.Name + deploy2.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + require.NoError(t, state.UpsertDeployment(1000, deploy1)) + require.NoError(t, state.UpsertDeployment(1001, deploy2)) + + gatherDeploys := func(iter memdb.ResultIterator) []*structs.Deployment { + var deploys []*structs.Deployment + for { + raw := iter.Next() + if raw == nil { + break + } + deploy := raw.(*structs.Deployment) + deploys = append(deploys, deploy) + } + return deploys + } + + ws := memdb.NewWatchSet() + iter1, err := state.DeploymentsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) + require.NoError(t, err) + iter2, err := state.DeploymentsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) + require.NoError(t, err) + + deploysNs1 := gatherDeploys(iter1) + deploysNs2 := gatherDeploys(iter2) + require.Len(t, deploysNs1, 1) + require.Len(t, deploysNs2, 1) + + iter1, err = state.DeploymentsByIDPrefix(ws, ns1.Name, deploy1.ID[:8], SortDefault) + require.NoError(t, err) + + deploysNs1 = gatherDeploys(iter1) + require.Len(t, deploysNs1, 1) + require.False(t, watchFired(ws)) +} + +func TestStateStore_UpsertNamespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + ns1 := mock.Namespace() + ns2 := mock.Namespace() + + // Create a watchset so we can test that upsert fires the watch + ws := memdb.NewWatchSet() + _, err := state.NamespaceByName(ws, ns1.Name) + require.NoError(t, err) + + require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) + require.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + out, err := state.NamespaceByName(ws, ns1.Name) + require.NoError(t, err) + require.Equal(t, ns1, out) + + out, err = state.NamespaceByName(ws, ns2.Name) + require.NoError(t, err) + require.Equal(t, ns2, out) + + index, err := state.Index(TableNamespaces) + require.NoError(t, err) + require.EqualValues(t, 1000, index) + require.False(t, watchFired(ws)) +} + +func TestStateStore_DeleteNamespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + ns1 := mock.Namespace() + ns2 := mock.Namespace() + + require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns1, ns2})) + + // Create a watchset so we can test that delete fires the watch + ws := memdb.NewWatchSet() + _, err := state.NamespaceByName(ws, ns1.Name) + require.NoError(t, err) + + require.NoError(t, state.DeleteNamespaces(1001, []string{ns1.Name, ns2.Name})) + require.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + out, err := state.NamespaceByName(ws, ns1.Name) + require.NoError(t, err) + require.Nil(t, out) + + out, err = state.NamespaceByName(ws, ns2.Name) + require.NoError(t, err) + require.Nil(t, out) + + index, err := state.Index(TableNamespaces) + require.NoError(t, err) + require.EqualValues(t, 1001, index) + require.False(t, watchFired(ws)) +} + +func TestStateStore_DeleteNamespaces_Default(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + + ns := mock.Namespace() + ns.Name = structs.DefaultNamespace + require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + + err := state.DeleteNamespaces(1002, []string{ns.Name}) + require.Error(t, err) + require.Contains(t, err.Error(), "can not be deleted") +} + +func TestStateStore_DeleteNamespaces_NonTerminalJobs(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + + ns := mock.Namespace() + require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + + job := mock.Job() + job.Namespace = ns.Name + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, job)) + + // Create a watchset so we can test that delete fires the watch + ws := memdb.NewWatchSet() + _, err := state.NamespaceByName(ws, ns.Name) + require.NoError(t, err) + + err = state.DeleteNamespaces(1002, []string{ns.Name}) + require.Error(t, err) + require.Contains(t, err.Error(), "one non-terminal") + require.False(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + out, err := state.NamespaceByName(ws, ns.Name) + require.NoError(t, err) + require.NotNil(t, out) + + index, err := state.Index(TableNamespaces) + require.NoError(t, err) + require.EqualValues(t, 1000, index) + require.False(t, watchFired(ws)) +} + +func TestStateStore_Namespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + var namespaces []*structs.Namespace + + for i := 0; i < 10; i++ { + ns := mock.Namespace() + namespaces = append(namespaces, ns) + } + + require.NoError(t, state.UpsertNamespaces(1000, namespaces)) + + // Create a watchset so we can test that getters don't cause it to fire + ws := memdb.NewWatchSet() + iter, err := state.Namespaces(ws) + require.NoError(t, err) + + var out []*structs.Namespace + for { + raw := iter.Next() + if raw == nil { + break + } + ns := raw.(*structs.Namespace) + if ns.Name == structs.DefaultNamespace { + continue + } + out = append(out, ns) + } + + namespaceSort(namespaces) + namespaceSort(out) + require.Equal(t, namespaces, out) + require.False(t, watchFired(ws)) +} + +func TestStateStore_NamespaceNames(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + var namespaces []*structs.Namespace + expectedNames := []string{structs.DefaultNamespace} + + for i := 0; i < 10; i++ { + ns := mock.Namespace() + namespaces = append(namespaces, ns) + expectedNames = append(expectedNames, ns.Name) + } + + err := state.UpsertNamespaces(1000, namespaces) + require.NoError(t, err) + + found, err := state.NamespaceNames() + require.NoError(t, err) + + sort.Strings(expectedNames) + sort.Strings(found) + + require.Equal(t, expectedNames, found) +} + +func TestStateStore_NamespaceByNamePrefix(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + ns := mock.Namespace() + + ns.Name = "foobar" + require.NoError(t, state.UpsertNamespaces(1000, []*structs.Namespace{ns})) + + // Create a watchset so we can test that getters don't cause it to fire + ws := memdb.NewWatchSet() + iter, err := state.NamespacesByNamePrefix(ws, ns.Name) + require.NoError(t, err) + + gatherNamespaces := func(iter memdb.ResultIterator) []*structs.Namespace { + var namespaces []*structs.Namespace + for { + raw := iter.Next() + if raw == nil { + break + } + ns := raw.(*structs.Namespace) + namespaces = append(namespaces, ns) + } + return namespaces + } + + namespaces := gatherNamespaces(iter) + require.Len(t, namespaces, 1) + require.False(t, watchFired(ws)) + + iter, err = state.NamespacesByNamePrefix(ws, "foo") + require.NoError(t, err) + + namespaces = gatherNamespaces(iter) + require.Len(t, namespaces, 1) + + ns = mock.Namespace() + ns.Name = "foozip" + err = state.UpsertNamespaces(1001, []*structs.Namespace{ns}) + require.NoError(t, err) + require.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + iter, err = state.NamespacesByNamePrefix(ws, "foo") + require.NoError(t, err) + + namespaces = gatherNamespaces(iter) + require.Len(t, namespaces, 2) + + iter, err = state.NamespacesByNamePrefix(ws, "foob") + require.NoError(t, err) + + namespaces = gatherNamespaces(iter) + require.Len(t, namespaces, 1) + require.False(t, watchFired(ws)) +} + +func TestStateStore_RestoreNamespace(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + ns := mock.Namespace() + + restore, err := state.Restore() + require.NoError(t, err) + + require.NoError(t, restore.NamespaceRestore(ns)) + restore.Commit() + + ws := memdb.NewWatchSet() + out, err := state.NamespaceByName(ws, ns.Name) + require.NoError(t, err) + require.Equal(t, out, ns) +} + +// namespaceSort is used to sort namespaces by name +func namespaceSort(namespaces []*structs.Namespace) { + sort.Slice(namespaces, func(i, j int) bool { + return namespaces[i].Name < namespaces[j].Name + }) +} + func TestStateStore_UpsertNode_Node(t *testing.T) { ci.Parallel(t) @@ -2168,6 +2550,163 @@ func TestStateStore_JobsByIDPrefix(t *testing.T) { } } +func TestStateStore_JobsByIDPrefix_Namespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + job1 := mock.Job() + job2 := mock.Job() + + ns1 := mock.Namespace() + ns1.Name = "namespace1" + ns2 := mock.Namespace() + ns2.Name = "namespace2" + + jobID := "redis" + job1.ID = jobID + job2.ID = jobID + job1.Namespace = ns1.Name + job2.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, job1)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, job2)) + + gatherJobs := func(iter memdb.ResultIterator) []*structs.Job { + var jobs []*structs.Job + for { + raw := iter.Next() + if raw == nil { + break + } + jobs = append(jobs, raw.(*structs.Job)) + } + return jobs + } + + // Try full match + ws := memdb.NewWatchSet() + iter1, err := state.JobsByIDPrefix(ws, ns1.Name, jobID) + require.NoError(t, err) + iter2, err := state.JobsByIDPrefix(ws, ns2.Name, jobID) + require.NoError(t, err) + + jobsNs1 := gatherJobs(iter1) + require.Len(t, jobsNs1, 1) + + jobsNs2 := gatherJobs(iter2) + require.Len(t, jobsNs2, 1) + + // Try prefix + iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "re") + require.NoError(t, err) + iter2, err = state.JobsByIDPrefix(ws, ns2.Name, "re") + require.NoError(t, err) + + jobsNs1 = gatherJobs(iter1) + jobsNs2 = gatherJobs(iter2) + require.Len(t, jobsNs1, 1) + require.Len(t, jobsNs2, 1) + + job3 := mock.Job() + job3.ID = "riak" + job3.Namespace = ns1.Name + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, job3)) + require.True(t, watchFired(ws)) + + ws = memdb.NewWatchSet() + iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "r") + require.NoError(t, err) + iter2, err = state.JobsByIDPrefix(ws, ns2.Name, "r") + require.NoError(t, err) + + jobsNs1 = gatherJobs(iter1) + jobsNs2 = gatherJobs(iter2) + require.Len(t, jobsNs1, 2) + require.Len(t, jobsNs2, 1) + + iter1, err = state.JobsByIDPrefix(ws, ns1.Name, "ri") + require.NoError(t, err) + + jobsNs1 = gatherJobs(iter1) + require.Len(t, jobsNs1, 1) + require.False(t, watchFired(ws)) +} + +func TestStateStore_JobsByNamespace(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + ns1 := mock.Namespace() + ns1.Name = "new" + job1 := mock.Job() + job2 := mock.Job() + job1.Namespace = ns1.Name + job2.Namespace = ns1.Name + + ns2 := mock.Namespace() + ns2.Name = "new-namespace" + job3 := mock.Job() + job4 := mock.Job() + job3.Namespace = ns2.Name + job4.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + + // Create watchsets so we can test that update fires the watch + watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} + _, err := state.JobsByNamespace(watches[0], ns1.Name) + require.NoError(t, err) + _, err = state.JobsByNamespace(watches[1], ns2.Name) + require.NoError(t, err) + + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, job1)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1002, job2)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1003, job3)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1004, job4)) + require.True(t, watchFired(watches[0])) + require.True(t, watchFired(watches[1])) + + ws := memdb.NewWatchSet() + iter1, err := state.JobsByNamespace(ws, ns1.Name) + require.NoError(t, err) + iter2, err := state.JobsByNamespace(ws, ns2.Name) + require.NoError(t, err) + + var out1 []*structs.Job + for { + raw := iter1.Next() + if raw == nil { + break + } + out1 = append(out1, raw.(*structs.Job)) + } + + var out2 []*structs.Job + for { + raw := iter2.Next() + if raw == nil { + break + } + out2 = append(out2, raw.(*structs.Job)) + } + + require.Len(t, out1, 2) + require.Len(t, out2, 2) + + for _, job := range out1 { + require.Equal(t, ns1.Name, job.Namespace) + } + for _, job := range out2 { + require.Equal(t, ns2.Name, job.Namespace) + } + + index, err := state.Index("jobs") + require.NoError(t, err) + require.EqualValues(t, 1004, index) + require.False(t, watchFired(ws)) +} + func TestStateStore_JobsByPeriodic(t *testing.T) { ci.Parallel(t) @@ -3400,6 +3939,77 @@ func TestStateStore_UpsertEvals_CancelBlocked(t *testing.T) { } } +func TestStateStore_UpsertEvals_Namespace(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + ns1 := mock.Namespace() + ns1.Name = "new" + eval1 := mock.Eval() + eval2 := mock.Eval() + eval1.Namespace = ns1.Name + eval2.Namespace = ns1.Name + + ns2 := mock.Namespace() + ns2.Name = "new-namespace" + eval3 := mock.Eval() + eval4 := mock.Eval() + eval3.Namespace = ns2.Name + eval4.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + + // Create watchsets so we can test that update fires the watch + watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} + _, err := state.EvalsByNamespace(watches[0], ns1.Name) + require.NoError(t, err) + _, err = state.EvalsByNamespace(watches[1], ns2.Name) + require.NoError(t, err) + + require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval1, eval2, eval3, eval4})) + require.True(t, watchFired(watches[0])) + require.True(t, watchFired(watches[1])) + + ws := memdb.NewWatchSet() + iter1, err := state.EvalsByNamespace(ws, ns1.Name) + require.NoError(t, err) + iter2, err := state.EvalsByNamespace(ws, ns2.Name) + require.NoError(t, err) + + var out1 []*structs.Evaluation + for { + raw := iter1.Next() + if raw == nil { + break + } + out1 = append(out1, raw.(*structs.Evaluation)) + } + + var out2 []*structs.Evaluation + for { + raw := iter2.Next() + if raw == nil { + break + } + out2 = append(out2, raw.(*structs.Evaluation)) + } + + require.Len(t, out1, 2) + require.Len(t, out2, 2) + + for _, eval := range out1 { + require.Equal(t, ns1.Name, eval.Namespace) + } + for _, eval := range out2 { + require.Equal(t, ns2.Name, eval.Namespace) + } + + index, err := state.Index("evals") + require.NoError(t, err) + require.EqualValues(t, 1001, index) + require.False(t, watchFired(ws)) +} + func TestStateStore_Update_UpsertEvals_Eval(t *testing.T) { ci.Parallel(t) @@ -3937,6 +4547,57 @@ func TestStateStore_EvalsByIDPrefix(t *testing.T) { }) } +func TestStateStore_EvalsByIDPrefix_Namespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + eval1 := mock.Eval() + eval1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2" + eval2 := mock.Eval() + eval2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2" + sharedPrefix := "aabb" + + ns1 := mock.Namespace() + ns1.Name = "namespace1" + ns2 := mock.Namespace() + ns2.Name = "namespace2" + eval1.Namespace = ns1.Name + eval2.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2})) + + gatherEvals := func(iter memdb.ResultIterator) []*structs.Evaluation { + var evals []*structs.Evaluation + for { + raw := iter.Next() + if raw == nil { + break + } + evals = append(evals, raw.(*structs.Evaluation)) + } + return evals + } + + ws := memdb.NewWatchSet() + iter1, err := state.EvalsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) + require.NoError(t, err) + iter2, err := state.EvalsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) + require.NoError(t, err) + + evalsNs1 := gatherEvals(iter1) + evalsNs2 := gatherEvals(iter2) + require.Len(t, evalsNs1, 1) + require.Len(t, evalsNs2, 1) + + iter1, err = state.EvalsByIDPrefix(ws, ns1.Name, eval1.ID[:8], SortDefault) + require.NoError(t, err) + + evalsNs1 = gatherEvals(iter1) + require.Len(t, evalsNs1, 1) + require.False(t, watchFired(ws)) +} + func TestStateStore_UpdateAllocsFromClient(t *testing.T) { ci.Parallel(t) @@ -4446,6 +5107,84 @@ func TestStateStore_UpsertAlloc_Deployment(t *testing.T) { require.True(now.Add(pdeadline).Equal(dstate.RequireProgressBy)) } +func TestStateStore_UpsertAlloc_AllocsByNamespace(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + + ns1 := mock.Namespace() + ns1.Name = "namespaced" + alloc1 := mock.Alloc() + alloc2 := mock.Alloc() + alloc1.Namespace = ns1.Name + alloc1.Job.Namespace = ns1.Name + alloc2.Namespace = ns1.Name + alloc2.Job.Namespace = ns1.Name + + ns2 := mock.Namespace() + ns2.Name = "new-namespace" + alloc3 := mock.Alloc() + alloc4 := mock.Alloc() + alloc3.Namespace = ns2.Name + alloc3.Job.Namespace = ns2.Name + alloc4.Namespace = ns2.Name + alloc4.Job.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 999, alloc1.Job)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, alloc3.Job)) + + // Create watchsets so we can test that update fires the watch + watches := []memdb.WatchSet{memdb.NewWatchSet(), memdb.NewWatchSet()} + _, err := state.AllocsByNamespace(watches[0], ns1.Name) + require.NoError(t, err) + _, err = state.AllocsByNamespace(watches[1], ns2.Name) + require.NoError(t, err) + + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc1, alloc2, alloc3, alloc4})) + require.True(t, watchFired(watches[0])) + require.True(t, watchFired(watches[1])) + + ws := memdb.NewWatchSet() + iter1, err := state.AllocsByNamespace(ws, ns1.Name) + require.NoError(t, err) + iter2, err := state.AllocsByNamespace(ws, ns2.Name) + require.NoError(t, err) + + var out1 []*structs.Allocation + for { + raw := iter1.Next() + if raw == nil { + break + } + out1 = append(out1, raw.(*structs.Allocation)) + } + + var out2 []*structs.Allocation + for { + raw := iter2.Next() + if raw == nil { + break + } + out2 = append(out2, raw.(*structs.Allocation)) + } + + require.Len(t, out1, 2) + require.Len(t, out2, 2) + + for _, alloc := range out1 { + require.Equal(t, ns1.Name, alloc.Namespace) + } + for _, alloc := range out2 { + require.Equal(t, ns2.Name, alloc.Namespace) + } + + index, err := state.Index("allocs") + require.NoError(t, err) + require.EqualValues(t, 1001, index) + require.False(t, watchFired(ws)) +} + // Testing to ensure we keep issue // https://github.com/hashicorp/nomad/issues/2583 fixed func TestStateStore_UpsertAlloc_No_Job(t *testing.T) { @@ -5450,6 +6189,59 @@ func TestStateStore_AllocsByIDPrefix(t *testing.T) { }) } +func TestStateStore_AllocsByIDPrefix_Namespaces(t *testing.T) { + ci.Parallel(t) + + state := testStateStore(t) + alloc1 := mock.Alloc() + alloc1.ID = "aabbbbbb-7bfb-395d-eb95-0685af2176b2" + alloc2 := mock.Alloc() + alloc2.ID = "aabbcbbb-7bfb-395d-eb95-0685af2176b2" + sharedPrefix := "aabb" + + ns1 := mock.Namespace() + ns1.Name = "namespace1" + ns2 := mock.Namespace() + ns2.Name = "namespace2" + + alloc1.Namespace = ns1.Name + alloc2.Namespace = ns2.Name + + require.NoError(t, state.UpsertNamespaces(998, []*structs.Namespace{ns1, ns2})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})) + + gatherAllocs := func(iter memdb.ResultIterator) []*structs.Allocation { + var allocs []*structs.Allocation + for { + raw := iter.Next() + if raw == nil { + break + } + alloc := raw.(*structs.Allocation) + allocs = append(allocs, alloc) + } + return allocs + } + + ws := memdb.NewWatchSet() + iter1, err := state.AllocsByIDPrefix(ws, ns1.Name, sharedPrefix, SortDefault) + require.NoError(t, err) + iter2, err := state.AllocsByIDPrefix(ws, ns2.Name, sharedPrefix, SortDefault) + require.NoError(t, err) + + allocsNs1 := gatherAllocs(iter1) + allocsNs2 := gatherAllocs(iter2) + require.Len(t, allocsNs1, 1) + require.Len(t, allocsNs2, 1) + + iter1, err = state.AllocsByIDPrefix(ws, ns1.Name, alloc1.ID[:8], SortDefault) + require.NoError(t, err) + + allocsNs1 = gatherAllocs(iter1) + require.Len(t, allocsNs1, 1) + require.False(t, watchFired(ws)) +} + func TestStateStore_Allocs(t *testing.T) { ci.Parallel(t) From 3695901441c2471745f0ed2acdab19933d468e98 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Thu, 17 Mar 2022 08:34:57 -0500 Subject: [PATCH 68/89] ci: use serial testing for api in CI This is a followup to running tests in serial in CI. Since the API package cannot import anything outside of api/, copy the ci.Parallel function into api/internal/testutil, and have api tests use that. --- api/acl_test.go | 19 +++++----- api/agent_test.go | 37 +++++++++--------- api/allocations_test.go | 16 ++++---- api/api_test.go | 25 ++++++------- api/compose_test.go | 4 +- api/constraint_test.go | 4 +- api/consul_test.go | 4 ++ api/csi_test.go | 3 +- api/evaluations_test.go | 10 ++--- api/event_stream_test.go | 10 ++--- api/fs_test.go | 8 ++-- api/internal/testutil/slow.go | 29 +++++++++++---- api/ioutil_test.go | 4 +- api/jobs_test.go | 70 +++++++++++++++++------------------ api/namespace_test.go | 11 +++--- api/nodes_test.go | 30 +++++++-------- api/operator_ent_test.go | 2 +- api/operator_metrics_test.go | 4 +- api/operator_test.go | 8 ++-- api/quota_test.go | 15 ++++---- api/regions_test.go | 2 +- api/resources_test.go | 2 + api/scaling_test.go | 5 ++- api/search_test.go | 5 ++- api/sentinel_test.go | 7 ++-- api/services_test.go | 45 +++++++++++----------- api/status_test.go | 4 +- api/system_test.go | 4 +- api/tasks_test.go | 38 ++++++++++++------- api/utils_test.go | 2 + 30 files changed, 239 insertions(+), 188 deletions(-) diff --git a/api/acl_test.go b/api/acl_test.go index 2a460aa8e..6e32df71c 100644 --- a/api/acl_test.go +++ b/api/acl_test.go @@ -3,11 +3,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) func TestACLPolicies_ListUpsert(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() ap := c.ACLPolicies() @@ -49,7 +50,7 @@ func TestACLPolicies_ListUpsert(t *testing.T) { } func TestACLPolicies_Delete(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() ap := c.ACLPolicies() @@ -84,7 +85,7 @@ func TestACLPolicies_Delete(t *testing.T) { } func TestACLPolicies_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() ap := c.ACLPolicies() @@ -110,7 +111,7 @@ func TestACLPolicies_Info(t *testing.T) { } func TestACLTokens_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() at := c.ACLTokens() @@ -129,7 +130,7 @@ func TestACLTokens_List(t *testing.T) { } func TestACLTokens_CreateUpdate(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() at := c.ACLTokens() @@ -158,7 +159,7 @@ func TestACLTokens_CreateUpdate(t *testing.T) { } func TestACLTokens_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() at := c.ACLTokens() @@ -183,7 +184,7 @@ func TestACLTokens_Info(t *testing.T) { } func TestACLTokens_Self(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() at := c.ACLTokens() @@ -213,7 +214,7 @@ func TestACLTokens_Self(t *testing.T) { } func TestACLTokens_Delete(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() at := c.ACLTokens() @@ -237,7 +238,7 @@ func TestACLTokens_Delete(t *testing.T) { } func TestACL_OneTimeToken(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() at := c.ACLTokens() diff --git a/api/agent_test.go b/api/agent_test.go index cd13bab18..9598ba88b 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -9,15 +9,14 @@ import ( "testing" "time" - "github.com/kr/pretty" - "github.com/stretchr/testify/require" - "github.com/hashicorp/nomad/api/internal/testutil" + "github.com/kr/pretty" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAgent_Self(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -42,7 +41,7 @@ func TestAgent_Self(t *testing.T) { } func TestAgent_NodeName(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() a := c.Agent() @@ -58,7 +57,7 @@ func TestAgent_NodeName(t *testing.T) { } func TestAgent_Datacenter(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() a := c.Agent() @@ -74,7 +73,7 @@ func TestAgent_Datacenter(t *testing.T) { } func TestAgent_Join(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c1, s1 := makeClient(t, nil, nil) defer s1.Stop() a1 := c1.Agent() @@ -104,7 +103,7 @@ func TestAgent_Join(t *testing.T) { } func TestAgent_Members(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() a := c.Agent() @@ -125,7 +124,7 @@ func TestAgent_Members(t *testing.T) { } func TestAgent_ForceLeave(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() a := c.Agent() @@ -143,7 +142,7 @@ func (a *AgentMember) String() string { } func TestAgents_Sort(t *testing.T) { - t.Parallel() + testutil.Parallel(t) var sortTests = []struct { in []*AgentMember out []*AgentMember @@ -254,7 +253,7 @@ func TestAgents_Sort(t *testing.T) { } func TestAgent_Health(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -269,7 +268,7 @@ func TestAgent_Health(t *testing.T) { // passing in a log level and node ie, which tests monitor // functionality for a specific client node func TestAgent_MonitorWithNode(t *testing.T) { - t.Parallel() + testutil.Parallel(t) rpcPort := 0 c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { rpcPort = c.Ports.RPC @@ -339,7 +338,7 @@ OUTER: // passing in only a log level, which tests the servers // monitor functionality func TestAgent_Monitor(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -379,7 +378,7 @@ OUTER: } func TestAgentCPUProfile(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, token := makeACLClient(t, nil, nil) defer s.Stop() @@ -415,7 +414,7 @@ func TestAgentCPUProfile(t *testing.T) { } func TestAgentTrace(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, token := makeACLClient(t, nil, nil) defer s.Stop() @@ -432,7 +431,7 @@ func TestAgentTrace(t *testing.T) { } func TestAgentProfile(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, token := makeACLClient(t, nil, nil) defer s.Stop() @@ -459,7 +458,7 @@ func TestAgentProfile(t *testing.T) { } func TestAgent_SchedulerWorkerConfig(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -475,7 +474,7 @@ func TestAgent_SchedulerWorkerConfig(t *testing.T) { } func TestAgent_SchedulerWorkerConfig_BadRequest(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -491,7 +490,7 @@ func TestAgent_SchedulerWorkerConfig_BadRequest(t *testing.T) { } func TestAgent_SchedulerWorkersInfo(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() a := c.Agent() diff --git a/api/allocations_test.go b/api/allocations_test.go index cc306ec88..c36fe3721 100644 --- a/api/allocations_test.go +++ b/api/allocations_test.go @@ -14,7 +14,7 @@ import ( ) func TestAllocations_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -58,7 +58,7 @@ func TestAllocations_List(t *testing.T) { } func TestAllocations_PrefixList(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() a := c.Allocations() @@ -106,7 +106,7 @@ func TestAllocations_PrefixList(t *testing.T) { } func TestAllocations_List_Resources(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -137,7 +137,7 @@ func TestAllocations_List_Resources(t *testing.T) { } func TestAllocations_CreateIndexSort(t *testing.T) { - t.Parallel() + testutil.Parallel(t) allocs := []*AllocationListStub{ {CreateIndex: 2}, {CreateIndex: 1}, @@ -156,7 +156,7 @@ func TestAllocations_CreateIndexSort(t *testing.T) { } func TestAllocations_RescheduleInfo(t *testing.T) { - t.Parallel() + testutil.Parallel(t) // Create a job, task group and alloc job := &Job{ Name: stringToPtr("foo"), @@ -317,7 +317,7 @@ func TestAllocations_ExecErrors(t *testing.T) { } func TestAllocation_ServerTerminalStatus(t *testing.T) { - t.Parallel() + testutil.Parallel(t) testCases := []struct { inputAllocation *Allocation @@ -349,7 +349,7 @@ func TestAllocation_ServerTerminalStatus(t *testing.T) { } func TestAllocation_ClientTerminalStatus(t *testing.T) { - t.Parallel() + testutil.Parallel(t) testCases := []struct { inputAllocation *Allocation @@ -391,7 +391,7 @@ func TestAllocation_ClientTerminalStatus(t *testing.T) { } func TestAllocations_ShouldMigrate(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require.True(t, DesiredTransition{Migrate: boolToPtr(true)}.ShouldMigrate()) require.False(t, DesiredTransition{}.ShouldMigrate()) require.False(t, DesiredTransition{Migrate: boolToPtr(false)}.ShouldMigrate()) diff --git a/api/api_test.go b/api/api_test.go index 6ab82526d..7b5d35346 100644 --- a/api/api_test.go +++ b/api/api_test.go @@ -16,10 +16,9 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/hashicorp/nomad/api/internal/testutil" ) type configCallback func(c *Config) @@ -71,7 +70,7 @@ func makeClient(t *testing.T, cb1 configCallback, } func TestRequestTime(t *testing.T) { - t.Parallel() + testutil.Parallel(t) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { time.Sleep(100 * time.Millisecond) d, err := json.Marshal(struct{ Done bool }{true}) @@ -119,7 +118,7 @@ func TestRequestTime(t *testing.T) { } func TestDefaultConfig_env(t *testing.T) { - t.Parallel() + testutil.Parallel(t) url := "http://1.2.3.4:5678" auth := []string{"nomaduser", "12345"} region := "test" @@ -169,7 +168,7 @@ func TestDefaultConfig_env(t *testing.T) { } func TestSetQueryOptions(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -203,7 +202,7 @@ func TestSetQueryOptions(t *testing.T) { } func TestQueryOptionsContext(t *testing.T) { - t.Parallel() + testutil.Parallel(t) ctx, cancel := context.WithCancel(context.Background()) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -227,7 +226,7 @@ func TestQueryOptionsContext(t *testing.T) { func TestWriteOptionsContext(t *testing.T) { // No blocking query to test a real cancel of a pending request so // just test that if we pass a pre-canceled context, writes fail quickly - t.Parallel() + testutil.Parallel(t) c, err := NewClient(DefaultConfig()) if err != nil { @@ -250,7 +249,7 @@ func TestWriteOptionsContext(t *testing.T) { } func TestSetWriteOptions(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -278,7 +277,7 @@ func TestSetWriteOptions(t *testing.T) { } func TestRequestToHTTP(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -306,7 +305,7 @@ func TestRequestToHTTP(t *testing.T) { } func TestParseQueryMeta(t *testing.T) { - t.Parallel() + testutil.Parallel(t) resp := &http.Response{ Header: make(map[string][]string), } @@ -331,7 +330,7 @@ func TestParseQueryMeta(t *testing.T) { } func TestParseWriteMeta(t *testing.T) { - t.Parallel() + testutil.Parallel(t) resp := &http.Response{ Header: make(map[string][]string), } @@ -348,7 +347,7 @@ func TestParseWriteMeta(t *testing.T) { } func TestClientHeader(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, func(c *Config) { c.Headers = http.Header{ "Hello": []string{"World"}, @@ -364,7 +363,7 @@ func TestClientHeader(t *testing.T) { } func TestQueryString(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/api/compose_test.go b/api/compose_test.go index 2a5ec265c..c70b244cd 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -3,10 +3,12 @@ package api import ( "reflect" "testing" + + "github.com/hashicorp/nomad/api/internal/testutil" ) func TestCompose(t *testing.T) { - t.Parallel() + testutil.Parallel(t) // Compose a task task := NewTask("task1", "exec"). SetConfig("foo", "bar"). diff --git a/api/constraint_test.go b/api/constraint_test.go index c619c0153..498da0f9f 100644 --- a/api/constraint_test.go +++ b/api/constraint_test.go @@ -3,10 +3,12 @@ package api import ( "reflect" "testing" + + "github.com/hashicorp/nomad/api/internal/testutil" ) func TestCompose_Constraints(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c := NewConstraint("kernel.name", "=", "darwin") expect := &Constraint{ LTarget: "kernel.name", diff --git a/api/consul_test.go b/api/consul_test.go index 2b1145fe9..81ebaf48b 100644 --- a/api/consul_test.go +++ b/api/consul_test.go @@ -3,10 +3,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) func TestConsul_Canonicalize(t *testing.T) { + testutil.Parallel(t) t.Run("missing ns", func(t *testing.T) { c := new(Consul) c.Canonicalize() @@ -21,6 +23,7 @@ func TestConsul_Canonicalize(t *testing.T) { } func TestConsul_Copy(t *testing.T) { + testutil.Parallel(t) t.Run("complete", func(t *testing.T) { result := (&Consul{ Namespace: "foo", @@ -32,6 +35,7 @@ func TestConsul_Copy(t *testing.T) { } func TestConsul_MergeNamespace(t *testing.T) { + testutil.Parallel(t) t.Run("already set", func(t *testing.T) { a := &Consul{Namespace: "foo"} ns := stringToPtr("bar") diff --git a/api/csi_test.go b/api/csi_test.go index 2c88d5a3e..6c75de4c0 100644 --- a/api/csi_test.go +++ b/api/csi_test.go @@ -3,6 +3,7 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) @@ -12,7 +13,7 @@ import ( // 1. Expose the test server RPC server and force a Node.Update to fingerprint a plugin // 2. Build and deploy a dummy CSI plugin via a job, and have it really fingerprint func TestCSIVolumes_CRUD(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, root := makeACLClient(t, nil, nil) defer s.Stop() v := c.CSIVolumes() diff --git a/api/evaluations_test.go b/api/evaluations_test.go index 226db8460..b82c9fed6 100644 --- a/api/evaluations_test.go +++ b/api/evaluations_test.go @@ -9,7 +9,7 @@ import ( ) func TestEvaluations_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() e := c.Evaluations() @@ -73,7 +73,7 @@ func TestEvaluations_List(t *testing.T) { } func TestEvaluations_PrefixList(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() e := c.Evaluations() @@ -102,7 +102,7 @@ func TestEvaluations_PrefixList(t *testing.T) { } func TestEvaluations_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() e := c.Evaluations() @@ -129,7 +129,7 @@ func TestEvaluations_Info(t *testing.T) { } func TestEvaluations_Allocations(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() e := c.Evaluations() @@ -142,7 +142,7 @@ func TestEvaluations_Allocations(t *testing.T) { } func TestEvaluations_Sort(t *testing.T) { - t.Parallel() + testutil.Parallel(t) evals := []*Evaluation{ {CreateIndex: 2}, {CreateIndex: 1}, diff --git a/api/event_stream_test.go b/api/event_stream_test.go index c2ea454d7..d3e29fbe1 100644 --- a/api/event_stream_test.go +++ b/api/event_stream_test.go @@ -12,7 +12,7 @@ import ( ) func TestEvent_Stream(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -50,7 +50,7 @@ func TestEvent_Stream(t *testing.T) { } func TestEvent_Stream_Err_InvalidQueryParam(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -79,7 +79,7 @@ func TestEvent_Stream_Err_InvalidQueryParam(t *testing.T) { } func TestEvent_Stream_CloseCtx(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -116,7 +116,7 @@ func TestEvent_Stream_CloseCtx(t *testing.T) { } func TestEventStream_PayloadValue(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true @@ -175,7 +175,7 @@ func TestEventStream_PayloadValue(t *testing.T) { } func TestEventStream_PayloadValueHelpers(t *testing.T) { - t.Parallel() + testutil.Parallel(t) testCases := []struct { desc string diff --git a/api/fs_test.go b/api/fs_test.go index a9e1efb0c..9204484cb 100644 --- a/api/fs_test.go +++ b/api/fs_test.go @@ -17,7 +17,7 @@ import ( ) func TestFS_Logs(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) rpcPort := 0 c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { @@ -160,7 +160,7 @@ func TestFS_Logs(t *testing.T) { } func TestFS_FrameReader(t *testing.T) { - t.Parallel() + testutil.Parallel(t) // Create a channel of the frames and a cancel channel framesCh := make(chan *StreamFrame, 3) errCh := make(chan error) @@ -230,7 +230,7 @@ func TestFS_FrameReader(t *testing.T) { } func TestFS_FrameReader_Unblock(t *testing.T) { - t.Parallel() + testutil.Parallel(t) // Create a channel of the frames and a cancel channel framesCh := make(chan *StreamFrame, 3) errCh := make(chan error) @@ -268,7 +268,7 @@ func TestFS_FrameReader_Unblock(t *testing.T) { } func TestFS_FrameReader_Error(t *testing.T) { - t.Parallel() + testutil.Parallel(t) // Create a channel of the frames and a cancel channel framesCh := make(chan *StreamFrame, 3) errCh := make(chan error, 1) diff --git a/api/internal/testutil/slow.go b/api/internal/testutil/slow.go index 1a8088024..d4776326f 100644 --- a/api/internal/testutil/slow.go +++ b/api/internal/testutil/slow.go @@ -2,14 +2,29 @@ package testutil import ( "os" - - testing "github.com/mitchellh/go-testing-interface" + "strconv" + "testing" ) -// SkipSlow skips a slow test unless the NOMAD_SLOW_TEST environment variable -// is set. -func SkipSlow(t testing.T) { - if os.Getenv("NOMAD_SLOW_TEST") == "" { - t.Skip("Skipping slow test. Set NOMAD_SLOW_TEST=1 to run.") +// Copy of ci/slow.go for API. + +// SkipSlow skips a slow test unless NOMAD_SLOW_TEST is set to a true value. +func SkipSlow(t *testing.T, reason string) { + value := os.Getenv("NOMAD_SLOW_TEST") + run, err := strconv.ParseBool(value) + if !run || err != nil { + t.Skipf("Skipping slow test: %s", reason) + } +} + +// Parallel runs t in parallel, unless CI is set to a true value. +// +// In CI (CircleCI / GitHub Actions) we get better performance by running tests +// in serial while not restricting GOMAXPROCS. +func Parallel(t *testing.T) { + value := os.Getenv("CI") + isCI, err := strconv.ParseBool(value) + if !isCI || err != nil { + t.Parallel() } } diff --git a/api/ioutil_test.go b/api/ioutil_test.go index 1871f410c..97e43f694 100644 --- a/api/ioutil_test.go +++ b/api/ioutil_test.go @@ -13,10 +13,12 @@ import ( "testing" "testing/iotest" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) func TestChecksumValidatingReader(t *testing.T) { + testutil.Parallel(t) data := make([]byte, 4096) _, err := rand.Read(data) require.NoError(t, err) @@ -66,7 +68,7 @@ func TestChecksumValidatingReader(t *testing.T) { } func TestChecksumValidatingReader_PropagatesError(t *testing.T) { - + testutil.Parallel(t) pr, pw := io.Pipe() defer pr.Close() defer pw.Close() diff --git a/api/jobs_test.go b/api/jobs_test.go index f21f7cab2..c882aa445 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -13,7 +13,7 @@ import ( ) func TestJobs_Register(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -45,7 +45,7 @@ func TestJobs_Register(t *testing.T) { } func TestJobs_Register_PreserveCounts(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -117,7 +117,7 @@ func TestJobs_Register_PreserveCounts(t *testing.T) { } func TestJobs_Register_NoPreserveCounts(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -188,7 +188,7 @@ func TestJobs_Register_NoPreserveCounts(t *testing.T) { } func TestJobs_Register_EvalPriority(t *testing.T) { - t.Parallel() + testutil.Parallel(t) requireAssert := require.New(t) c, s := makeClient(t, nil, nil) @@ -215,7 +215,7 @@ func TestJobs_Register_EvalPriority(t *testing.T) { } func TestJobs_Register_NoEvalPriority(t *testing.T) { - t.Parallel() + testutil.Parallel(t) requireAssert := require.New(t) c, s := makeClient(t, nil, nil) @@ -242,7 +242,7 @@ func TestJobs_Register_NoEvalPriority(t *testing.T) { } func TestJobs_Validate(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -270,7 +270,7 @@ func TestJobs_Validate(t *testing.T) { } func TestJobs_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) testCases := []struct { name string expected *Job @@ -1282,7 +1282,7 @@ func TestJobs_Canonicalize(t *testing.T) { } func TestJobs_EnforceRegister(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -1328,7 +1328,7 @@ func TestJobs_EnforceRegister(t *testing.T) { } func TestJobs_Revert(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1378,7 +1378,7 @@ func TestJobs_Revert(t *testing.T) { } func TestJobs_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1414,7 +1414,7 @@ func TestJobs_Info(t *testing.T) { } func TestJobs_ScaleInvalidAction(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -1454,7 +1454,7 @@ func TestJobs_ScaleInvalidAction(t *testing.T) { } func TestJobs_Versions(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1487,7 +1487,7 @@ func TestJobs_Versions(t *testing.T) { } func TestJobs_PrefixList(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1523,7 +1523,7 @@ func TestJobs_PrefixList(t *testing.T) { } func TestJobs_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1559,7 +1559,7 @@ func TestJobs_List(t *testing.T) { } func TestJobs_Allocations(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1581,7 +1581,7 @@ func TestJobs_Allocations(t *testing.T) { } func TestJobs_Evaluations(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1623,7 +1623,7 @@ func TestJobs_Evaluations(t *testing.T) { } func TestJobs_Deregister(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1683,7 +1683,7 @@ func TestJobs_Deregister(t *testing.T) { } func TestJobs_Deregister_EvalPriority(t *testing.T) { - t.Parallel() + testutil.Parallel(t) requireAssert := require.New(t) c, s := makeClient(t, nil, nil) @@ -1714,7 +1714,7 @@ func TestJobs_Deregister_EvalPriority(t *testing.T) { } func TestJobs_Deregister_NoEvalPriority(t *testing.T) { - t.Parallel() + testutil.Parallel(t) requireAssert := require.New(t) c, s := makeClient(t, nil, nil) @@ -1745,7 +1745,7 @@ func TestJobs_Deregister_NoEvalPriority(t *testing.T) { } func TestJobs_ForceEvaluate(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1785,7 +1785,7 @@ func TestJobs_ForceEvaluate(t *testing.T) { } func TestJobs_PeriodicForce(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1838,7 +1838,7 @@ func TestJobs_PeriodicForce(t *testing.T) { } func TestJobs_Plan(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1910,7 +1910,7 @@ func TestJobs_Plan(t *testing.T) { } func TestJobs_JobSummary(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() jobs := c.Jobs() @@ -1948,7 +1948,7 @@ func TestJobs_JobSummary(t *testing.T) { } func TestJobs_NewBatchJob(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := NewBatchJob("job1", "myjob", "global", 5) expect := &Job{ Region: stringToPtr("global"), @@ -1963,7 +1963,7 @@ func TestJobs_NewBatchJob(t *testing.T) { } func TestJobs_NewServiceJob(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := NewServiceJob("job1", "myjob", "global", 5) expect := &Job{ Region: stringToPtr("global"), @@ -1978,7 +1978,7 @@ func TestJobs_NewServiceJob(t *testing.T) { } func TestJobs_NewSystemJob(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := NewSystemJob("job1", "myjob", "global", 5) expect := &Job{ Region: stringToPtr("global"), @@ -1993,7 +1993,7 @@ func TestJobs_NewSystemJob(t *testing.T) { } func TestJobs_SetMeta(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Meta: nil} // Initializes a nil map @@ -2016,7 +2016,7 @@ func TestJobs_SetMeta(t *testing.T) { } func TestJobs_Constrain(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Constraints: nil} // Create and add a constraint @@ -2050,7 +2050,7 @@ func TestJobs_Constrain(t *testing.T) { } func TestJobs_AddAffinity(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Affinities: nil} // Create and add an affinity @@ -2086,7 +2086,7 @@ func TestJobs_AddAffinity(t *testing.T) { } func TestJobs_Sort(t *testing.T) { - t.Parallel() + testutil.Parallel(t) jobs := []*JobListStub{ {ID: "job2"}, {ID: "job0"}, @@ -2105,7 +2105,7 @@ func TestJobs_Sort(t *testing.T) { } func TestJobs_AddSpread(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Spreads: nil} // Create and add a Spread @@ -2157,7 +2157,7 @@ func TestJobs_AddSpread(t *testing.T) { // TestJobs_ScaleAction tests the scale target for task group count func TestJobs_ScaleAction(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -2218,7 +2218,7 @@ func TestJobs_ScaleAction(t *testing.T) { } func TestJobs_ScaleAction_Error(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -2270,7 +2270,7 @@ func TestJobs_ScaleAction_Error(t *testing.T) { } func TestJobs_ScaleAction_Noop(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -2323,7 +2323,7 @@ func TestJobs_ScaleAction_Noop(t *testing.T) { // TestJobs_ScaleStatus tests the /scale status endpoint for task group count func TestJobs_ScaleStatus(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) diff --git a/api/namespace_test.go b/api/namespace_test.go index 80266b366..6bb90484f 100644 --- a/api/namespace_test.go +++ b/api/namespace_test.go @@ -3,11 +3,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) func TestNamespaces_Register(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -29,7 +30,7 @@ func TestNamespaces_Register(t *testing.T) { } func TestNamespaces_Register_Invalid(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -43,7 +44,7 @@ func TestNamespaces_Register_Invalid(t *testing.T) { } func TestNamespace_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -69,7 +70,7 @@ func TestNamespace_Info(t *testing.T) { } func TestNamespaces_Delete(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -103,7 +104,7 @@ func TestNamespaces_Delete(t *testing.T) { } func TestNamespaces_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/api/nodes_test.go b/api/nodes_test.go index ebe85f14f..a811826ed 100644 --- a/api/nodes_test.go +++ b/api/nodes_test.go @@ -14,7 +14,7 @@ import ( ) func TestNodes_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -43,7 +43,7 @@ func TestNodes_List(t *testing.T) { } func TestNodes_PrefixList(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -86,7 +86,7 @@ func TestNodes_PrefixList(t *testing.T) { // TestNodes_List_Resources asserts that ?resources=true includes allocated and // reserved resources in the response. func TestNodes_List_Resources(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -123,7 +123,7 @@ func TestNodes_List_Resources(t *testing.T) { } func TestNodes_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) startTime := time.Now().Unix() c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true @@ -182,7 +182,7 @@ func TestNodes_Info(t *testing.T) { } func TestNodes_NoSecretID(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -216,7 +216,7 @@ func TestNodes_NoSecretID(t *testing.T) { } func TestNodes_ToggleDrain(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true @@ -319,7 +319,7 @@ func TestNodes_ToggleDrain(t *testing.T) { } func TestNodes_ToggleEligibility(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -388,7 +388,7 @@ func TestNodes_ToggleEligibility(t *testing.T) { } func TestNodes_Allocations(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() nodes := c.Nodes() @@ -407,7 +407,7 @@ func TestNodes_Allocations(t *testing.T) { } func TestNodes_ForceEvaluate(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true }) @@ -446,7 +446,7 @@ func TestNodes_ForceEvaluate(t *testing.T) { } func TestNodes_Sort(t *testing.T) { - t.Parallel() + testutil.Parallel(t) nodes := []*NodeListStub{ {CreateIndex: 2}, {CreateIndex: 1}, @@ -466,7 +466,7 @@ func TestNodes_Sort(t *testing.T) { // Unittest monitorDrainMultiplex when an error occurs func TestNodes_MonitorDrain_Multiplex_Bad(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) ctx := context.Background() @@ -518,7 +518,7 @@ func TestNodes_MonitorDrain_Multiplex_Bad(t *testing.T) { // Unittest monitorDrainMultiplex when drain finishes func TestNodes_MonitorDrain_Multiplex_Good(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) ctx := context.Background() @@ -583,7 +583,7 @@ func TestNodes_MonitorDrain_Multiplex_Good(t *testing.T) { } func TestNodes_DrainStrategy_Equal(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) // nil @@ -620,7 +620,7 @@ func TestNodes_DrainStrategy_Equal(t *testing.T) { } func TestNodes_Purge(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.DevMode = true @@ -661,7 +661,7 @@ func TestNodes_Purge(t *testing.T) { } func TestNodeStatValueFormatting(t *testing.T) { - t.Parallel() + testutil.Parallel(t) cases := []struct { expected string diff --git a/api/operator_ent_test.go b/api/operator_ent_test.go index 6be12845b..842ae6790 100644 --- a/api/operator_ent_test.go +++ b/api/operator_ent_test.go @@ -10,7 +10,7 @@ import ( ) func TestOperator_LicenseGet(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() diff --git a/api/operator_metrics_test.go b/api/operator_metrics_test.go index a53e9d13a..f8b149f50 100644 --- a/api/operator_metrics_test.go +++ b/api/operator_metrics_test.go @@ -8,7 +8,7 @@ import ( ) func TestOperator_MetricsSummary(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -31,7 +31,7 @@ func TestOperator_MetricsSummary(t *testing.T) { } func TestOperator_Metrics_Prometheus(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.Telemetry = &testutil.Telemetry{PrometheusMetrics: true} }) diff --git a/api/operator_test.go b/api/operator_test.go index 5b13fc66c..276aefb18 100644 --- a/api/operator_test.go +++ b/api/operator_test.go @@ -3,10 +3,12 @@ package api import ( "strings" "testing" + + "github.com/hashicorp/nomad/api/internal/testutil" ) func TestOperator_RaftGetConfiguration(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -23,7 +25,7 @@ func TestOperator_RaftGetConfiguration(t *testing.T) { } func TestOperator_RaftRemovePeerByAddress(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -38,7 +40,7 @@ func TestOperator_RaftRemovePeerByAddress(t *testing.T) { } func TestOperator_RaftRemovePeerByID(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/api/quota_test.go b/api/quota_test.go index 88cb12260..3de5fd157 100644 --- a/api/quota_test.go +++ b/api/quota_test.go @@ -6,11 +6,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) func TestQuotas_Register(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -31,7 +32,7 @@ func TestQuotas_Register(t *testing.T) { } func TestQuotas_Register_Invalid(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -45,7 +46,7 @@ func TestQuotas_Register_Invalid(t *testing.T) { } func TestQuotas_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -71,7 +72,7 @@ func TestQuotas_Info(t *testing.T) { } func TestQuotas_Usage(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -97,7 +98,7 @@ func TestQuotas_Usage(t *testing.T) { } func TestQuotas_Delete(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -129,7 +130,7 @@ func TestQuotas_Delete(t *testing.T) { } func TestQuotas_List(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -169,7 +170,7 @@ func TestQuotas_List(t *testing.T) { } func TestQuotas_ListUsages(t *testing.T) { - t.Parallel() + testutil.Parallel(t) assert := assert.New(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/api/regions_test.go b/api/regions_test.go index efa1d4871..b500eb1bc 100644 --- a/api/regions_test.go +++ b/api/regions_test.go @@ -8,7 +8,7 @@ import ( ) func TestRegionsList(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c1, s1 := makeClient(t, nil, func(c *testutil.TestServerConfig) { c.Region = "regionA" }) diff --git a/api/resources_test.go b/api/resources_test.go index 9db95a179..2f9904ba6 100644 --- a/api/resources_test.go +++ b/api/resources_test.go @@ -4,10 +4,12 @@ import ( "reflect" "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/kr/pretty" ) func TestResources_Canonicalize(t *testing.T) { + testutil.Parallel(t) testCases := []struct { name string input *Resources diff --git a/api/scaling_test.go b/api/scaling_test.go index 386274267..0d4a703c6 100644 --- a/api/scaling_test.go +++ b/api/scaling_test.go @@ -3,11 +3,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) func TestScalingPolicies_ListPolicies(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) @@ -55,7 +56,7 @@ func TestScalingPolicies_ListPolicies(t *testing.T) { } func TestScalingPolicies_GetPolicy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) require := require.New(t) c, s := makeClient(t, nil, nil) diff --git a/api/search_test.go b/api/search_test.go index 62f0bd4a5..f262faaf6 100644 --- a/api/search_test.go +++ b/api/search_test.go @@ -4,11 +4,12 @@ import ( "testing" "github.com/hashicorp/nomad/api/contexts" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) func TestSearch_PrefixSearch(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() @@ -30,7 +31,7 @@ func TestSearch_PrefixSearch(t *testing.T) { } func TestSearch_FuzzySearch(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() diff --git a/api/sentinel_test.go b/api/sentinel_test.go index 79a7c2da8..638c4191e 100644 --- a/api/sentinel_test.go +++ b/api/sentinel_test.go @@ -6,11 +6,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" ) func TestSentinelPolicies_ListUpsert(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() ap := c.SentinelPolicies() @@ -51,7 +52,7 @@ func TestSentinelPolicies_ListUpsert(t *testing.T) { } func TestSentinelPolicies_Delete(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() ap := c.SentinelPolicies() @@ -85,7 +86,7 @@ func TestSentinelPolicies_Delete(t *testing.T) { } func TestSentinelPolicies_Info(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s, _ := makeACLClient(t, nil, nil) defer s.Stop() ap := c.SentinelPolicies() diff --git a/api/services_test.go b/api/services_test.go index 648c975d8..775a9f1e1 100644 --- a/api/services_test.go +++ b/api/services_test.go @@ -5,11 +5,12 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) func TestService_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) j := &Job{Name: stringToPtr("job")} tg := &TaskGroup{Name: stringToPtr("group")} @@ -24,7 +25,7 @@ func TestService_Canonicalize(t *testing.T) { } func TestServiceCheck_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) j := &Job{Name: stringToPtr("job")} tg := &TaskGroup{Name: stringToPtr("group")} @@ -43,7 +44,7 @@ func TestServiceCheck_Canonicalize(t *testing.T) { } func TestService_Check_PassFail(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Name: stringToPtr("job")} tg := &TaskGroup{Name: stringToPtr("group")} @@ -79,7 +80,7 @@ func TestService_Check_PassFail(t *testing.T) { // TestService_CheckRestart asserts Service.CheckRestart settings are properly // inherited by Checks. func TestService_CheckRestart(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Name: stringToPtr("job")} tg := &TaskGroup{Name: stringToPtr("group")} @@ -127,7 +128,7 @@ func TestService_CheckRestart(t *testing.T) { } func TestService_Connect_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil connect", func(t *testing.T) { cc := (*ConsulConnect)(nil) @@ -145,7 +146,7 @@ func TestService_Connect_Canonicalize(t *testing.T) { } func TestService_Connect_ConsulSidecarService_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil sidecar_service", func(t *testing.T) { css := (*ConsulSidecarService)(nil) @@ -181,7 +182,7 @@ func TestService_Connect_ConsulSidecarService_Canonicalize(t *testing.T) { } func TestService_Connect_ConsulProxy_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil proxy", func(t *testing.T) { cp := (*ConsulProxy)(nil) @@ -217,7 +218,7 @@ func TestService_Connect_ConsulProxy_Canonicalize(t *testing.T) { } func TestService_Connect_ConsulUpstream_Copy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil upstream", func(t *testing.T) { cu := (*ConsulUpstream)(nil) @@ -239,7 +240,7 @@ func TestService_Connect_ConsulUpstream_Copy(t *testing.T) { } func TestService_Connect_ConsulUpstream_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil upstream", func(t *testing.T) { cu := (*ConsulUpstream)(nil) @@ -267,7 +268,7 @@ func TestService_Connect_ConsulUpstream_Canonicalize(t *testing.T) { } func TestService_Connect_proxy_settings(t *testing.T) { - t.Parallel() + testutil.Parallel(t) job := &Job{Name: stringToPtr("job")} tg := &TaskGroup{Name: stringToPtr("group")} @@ -300,7 +301,7 @@ func TestService_Connect_proxy_settings(t *testing.T) { } func TestService_Tags(t *testing.T) { - t.Parallel() + testutil.Parallel(t) r := require.New(t) // canonicalize does not modify eto or tags @@ -320,7 +321,7 @@ func TestService_Tags(t *testing.T) { } func TestService_Connect_SidecarTask_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil sidecar_task", func(t *testing.T) { st := (*SidecarTask)(nil) @@ -352,7 +353,7 @@ func TestService_Connect_SidecarTask_Canonicalize(t *testing.T) { } func TestService_ConsulGateway_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { cg := (*ConsulGateway)(nil) @@ -388,7 +389,7 @@ func TestService_ConsulGateway_Canonicalize(t *testing.T) { } func TestService_ConsulGateway_Copy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { result := (*ConsulGateway)(nil).Copy() @@ -439,7 +440,7 @@ func TestService_ConsulGateway_Copy(t *testing.T) { } func TestService_ConsulIngressConfigEntry_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { c := (*ConsulIngressConfigEntry)(nil) @@ -485,7 +486,7 @@ func TestService_ConsulIngressConfigEntry_Canonicalize(t *testing.T) { } func TestService_ConsulIngressConfigEntry_Copy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { result := (*ConsulIngressConfigEntry)(nil).Copy() @@ -516,7 +517,7 @@ func TestService_ConsulIngressConfigEntry_Copy(t *testing.T) { } func TestService_ConsulTerminatingConfigEntry_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { c := (*ConsulTerminatingConfigEntry)(nil) @@ -534,7 +535,7 @@ func TestService_ConsulTerminatingConfigEntry_Canonicalize(t *testing.T) { } func TestService_ConsulTerminatingConfigEntry_Copy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { result := (*ConsulIngressConfigEntry)(nil).Copy() @@ -560,7 +561,7 @@ func TestService_ConsulTerminatingConfigEntry_Copy(t *testing.T) { } func TestService_ConsulMeshConfigEntry_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { ce := (*ConsulMeshConfigEntry)(nil) @@ -576,7 +577,7 @@ func TestService_ConsulMeshConfigEntry_Canonicalize(t *testing.T) { } func TestService_ConsulMeshConfigEntry_Copy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { ce := (*ConsulMeshConfigEntry)(nil) @@ -592,7 +593,7 @@ func TestService_ConsulMeshConfigEntry_Copy(t *testing.T) { } func TestService_ConsulMeshGateway_Canonicalize(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { c := (*ConsulMeshGateway)(nil) @@ -614,7 +615,7 @@ func TestService_ConsulMeshGateway_Canonicalize(t *testing.T) { } func TestService_ConsulMeshGateway_Copy(t *testing.T) { - t.Parallel() + testutil.Parallel(t) t.Run("nil", func(t *testing.T) { c := (*ConsulMeshGateway)(nil) diff --git a/api/status_test.go b/api/status_test.go index ab48ad203..bbb19a599 100644 --- a/api/status_test.go +++ b/api/status_test.go @@ -2,10 +2,12 @@ package api import ( "testing" + + "github.com/hashicorp/nomad/api/internal/testutil" ) func TestStatus_Leader(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() status := c.Status() diff --git a/api/system_test.go b/api/system_test.go index cc24615fa..ae34f6848 100644 --- a/api/system_test.go +++ b/api/system_test.go @@ -2,10 +2,12 @@ package api import ( "testing" + + "github.com/hashicorp/nomad/api/internal/testutil" ) func TestSystem_GarbageCollect(t *testing.T) { - t.Parallel() + testutil.Parallel(t) c, s := makeClient(t, nil, nil) defer s.Stop() e := c.System() diff --git a/api/tasks_test.go b/api/tasks_test.go index 14ccf837e..e9781d1aa 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestTaskGroup_NewTaskGroup(t *testing.T) { - t.Parallel() + testutil.Parallel(t) grp := NewTaskGroup("grp1", 2) expect := &TaskGroup{ Name: stringToPtr("grp1"), @@ -23,7 +24,7 @@ func TestTaskGroup_NewTaskGroup(t *testing.T) { } func TestTaskGroup_Constrain(t *testing.T) { - t.Parallel() + testutil.Parallel(t) grp := NewTaskGroup("grp1", 1) // Add a constraint to the group @@ -57,7 +58,7 @@ func TestTaskGroup_Constrain(t *testing.T) { } func TestTaskGroup_AddAffinity(t *testing.T) { - t.Parallel() + testutil.Parallel(t) grp := NewTaskGroup("grp1", 1) // Add an affinity to the group @@ -93,7 +94,7 @@ func TestTaskGroup_AddAffinity(t *testing.T) { } func TestTaskGroup_SetMeta(t *testing.T) { - t.Parallel() + testutil.Parallel(t) grp := NewTaskGroup("grp1", 1) // Initializes an empty map @@ -116,7 +117,7 @@ func TestTaskGroup_SetMeta(t *testing.T) { } func TestTaskGroup_AddSpread(t *testing.T) { - t.Parallel() + testutil.Parallel(t) grp := NewTaskGroup("grp1", 1) // Create and add spread @@ -167,7 +168,7 @@ func TestTaskGroup_AddSpread(t *testing.T) { } func TestTaskGroup_AddTask(t *testing.T) { - t.Parallel() + testutil.Parallel(t) grp := NewTaskGroup("grp1", 1) // Add the task to the task group @@ -199,7 +200,7 @@ func TestTaskGroup_AddTask(t *testing.T) { } func TestTask_NewTask(t *testing.T) { - t.Parallel() + testutil.Parallel(t) task := NewTask("task1", "exec") expect := &Task{ Name: "task1", @@ -211,7 +212,7 @@ func TestTask_NewTask(t *testing.T) { } func TestTask_SetConfig(t *testing.T) { - t.Parallel() + testutil.Parallel(t) task := NewTask("task1", "exec") // Initializes an empty map @@ -234,7 +235,7 @@ func TestTask_SetConfig(t *testing.T) { } func TestTask_SetMeta(t *testing.T) { - t.Parallel() + testutil.Parallel(t) task := NewTask("task1", "exec") // Initializes an empty map @@ -257,7 +258,7 @@ func TestTask_SetMeta(t *testing.T) { } func TestTask_Require(t *testing.T) { - t.Parallel() + testutil.Parallel(t) task := NewTask("task1", "exec") // Create some require resources @@ -285,7 +286,7 @@ func TestTask_Require(t *testing.T) { } func TestTask_Constrain(t *testing.T) { - t.Parallel() + testutil.Parallel(t) task := NewTask("task1", "exec") // Add a constraint to the task @@ -319,7 +320,7 @@ func TestTask_Constrain(t *testing.T) { } func TestTask_AddAffinity(t *testing.T) { - t.Parallel() + testutil.Parallel(t) task := NewTask("task1", "exec") // Add an affinity to the task @@ -354,7 +355,7 @@ func TestTask_AddAffinity(t *testing.T) { } func TestTask_Artifact(t *testing.T) { - t.Parallel() + testutil.Parallel(t) a := TaskArtifact{ GetterSource: stringToPtr("http://localhost/foo.txt"), GetterMode: stringToPtr("file"), @@ -369,7 +370,7 @@ func TestTask_Artifact(t *testing.T) { } func TestTask_VolumeMount(t *testing.T) { - t.Parallel() + testutil.Parallel(t) vm := &VolumeMount{} vm.Canonicalize() require.NotNil(t, vm.PropagationMode) @@ -377,6 +378,7 @@ func TestTask_VolumeMount(t *testing.T) { } func TestTask_Canonicalize_TaskLifecycle(t *testing.T) { + testutil.Parallel(t) testCases := []struct { name string expected *TaskLifecycle @@ -407,6 +409,7 @@ func TestTask_Canonicalize_TaskLifecycle(t *testing.T) { } func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { + testutil.Parallel(t) taskWithWait := func(wc *WaitConfig) *Task { return &Task{ Templates: []*Template{ @@ -493,6 +496,7 @@ func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) { // Ensures no regression on https://github.com/hashicorp/nomad/issues/3132 func TestTaskGroup_Canonicalize_Update(t *testing.T) { + testutil.Parallel(t) // Job with an Empty() Update job := &Job{ ID: stringToPtr("test"), @@ -518,6 +522,7 @@ func TestTaskGroup_Canonicalize_Update(t *testing.T) { } func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { + testutil.Parallel(t) require := require.New(t) job := &Job{ @@ -574,6 +579,7 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) { } func TestTaskGroup_Merge_Update(t *testing.T) { + testutil.Parallel(t) job := &Job{ ID: stringToPtr("test"), Update: &UpdateStrategy{}, @@ -606,6 +612,7 @@ func TestTaskGroup_Merge_Update(t *testing.T) { // Verifies that migrate strategy is merged correctly func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { + testutil.Parallel(t) type testCase struct { desc string jobType string @@ -758,6 +765,7 @@ func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) { // TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly func TestSpread_Canonicalize(t *testing.T) { + testutil.Parallel(t) job := &Job{ ID: stringToPtr("test"), Type: stringToPtr("batch"), @@ -810,6 +818,7 @@ func TestSpread_Canonicalize(t *testing.T) { } func Test_NewDefaultReschedulePolicy(t *testing.T) { + testutil.Parallel(t) testCases := []struct { desc string inputJobType string @@ -874,6 +883,7 @@ func Test_NewDefaultReschedulePolicy(t *testing.T) { } func TestTaskGroup_Canonicalize_Consul(t *testing.T) { + testutil.Parallel(t) t.Run("override job consul in group", func(t *testing.T) { job := &Job{ ID: stringToPtr("job"), diff --git a/api/utils_test.go b/api/utils_test.go index 14c50544e..7e0d789bd 100644 --- a/api/utils_test.go +++ b/api/utils_test.go @@ -3,10 +3,12 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) func TestFormatRoundedFloat(t *testing.T) { + testutil.Parallel(t) cases := []struct { input float64 expected string From 7e6767ddf8f44d2df47f652f38fc544654485ca5 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Thu, 17 Mar 2022 08:37:34 -0500 Subject: [PATCH 69/89] e2e: have e2e use ci.Parallel This is a followup to having tests run in serial in CI. The e2e package isn't in CI, but lets use the helper anyway so we can setup semgrep rules covering the entire repository. --- e2e/clientstate/clientstate.go | 7 ++++--- e2e/framework/framework.go | 3 ++- e2e/nomad09upgrade/upgrade.go | 6 +++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/e2e/clientstate/clientstate.go b/e2e/clientstate/clientstate.go index 71ac22a28..9fadccf55 100644 --- a/e2e/clientstate/clientstate.go +++ b/e2e/clientstate/clientstate.go @@ -14,6 +14,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/e2e/e2eutil" "github.com/hashicorp/nomad/e2e/execagent" @@ -82,7 +83,7 @@ func getPID(client *api.Client, alloc *api.Allocation, path string) (int, error) // loop to assert Nomad is crash safe. func (tc *ClientStateTC) TestClientState_Kill(f *framework.F) { t := f.T() - t.Parallel() + ci.Parallel(t) serverOut := testlog.NewPrefixWriter(t, "SERVER: ") clientOut := testlog.NewPrefixWriter(t, "CLIENT: ") @@ -223,7 +224,7 @@ func (tc *ClientStateTC) TestClientState_Kill(f *framework.F) { // in a tight loop to assert Nomad is crash safe while a task is restarting. func (tc *ClientStateTC) TestClientState_KillDuringRestart(f *framework.F) { t := f.T() - t.Parallel() + ci.Parallel(t) serverOut := testlog.NewPrefixWriter(t, "SERVER: ") clientOut := testlog.NewPrefixWriter(t, "CLIENT: ") @@ -345,7 +346,7 @@ func (tc *ClientStateTC) TestClientState_KillDuringRestart(f *framework.F) { // assert it recovers. func (tc *ClientStateTC) TestClientState_Corrupt(f *framework.F) { t := f.T() - t.Parallel() + ci.Parallel(t) serverOut := testlog.NewPrefixWriter(t, "SERVER: ") clientOut := testlog.NewPrefixWriter(t, "CLIENT: ") diff --git a/e2e/framework/framework.go b/e2e/framework/framework.go index aff77ac3f..b783a2396 100644 --- a/e2e/framework/framework.go +++ b/e2e/framework/framework.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/hashicorp/nomad/ci" "github.com/stretchr/testify/require" ) @@ -212,7 +213,7 @@ func (f *Framework) runCase(t *testing.T, s *TestSuite, c TestCase) { t.Run(c.Name(), func(t *testing.T) { // If the TestSuite has Parallel set, all cases run in parallel if s.Parallel { - t.Parallel() + ci.Parallel(t) } f := newF(t) diff --git a/e2e/nomad09upgrade/upgrade.go b/e2e/nomad09upgrade/upgrade.go index 88f2fc88d..a2d204975 100644 --- a/e2e/nomad09upgrade/upgrade.go +++ b/e2e/nomad09upgrade/upgrade.go @@ -119,7 +119,7 @@ func (tc *UpgradePathTC) TestRawExecTaskUpgrade(f *framework.F) { for _, ver := range nomadVersions { ver := ver f.T().Run(ver, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) tc.testUpgradeForJob(t, ver, "nomad09upgrade/rawexec.nomad") }) } @@ -129,7 +129,7 @@ func (tc *UpgradePathTC) TestExecTaskUpgrade(f *framework.F) { for _, ver := range nomadVersions { ver := ver f.T().Run(ver, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) tc.testUpgradeForJob(t, ver, "nomad09upgrade/exec.nomad") }) } @@ -139,7 +139,7 @@ func (tc *UpgradePathTC) TestDockerTaskUpgrade(f *framework.F) { for _, ver := range nomadVersions { ver := ver f.T().Run(ver, func(t *testing.T) { - t.Parallel() + ci.Parallel(t) tc.testUpgradeForJob(t, ver, "nomad09upgrade/docker.nomad") }) } From ae21af4f9bb983ec1303eafa89d25b37365ea5a4 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Thu, 17 Mar 2022 08:43:37 -0500 Subject: [PATCH 70/89] ci: semgrep rule for parallel tests Adds a semgrep rule warning about using ci.Parallel instead of t.Parallel --- .semgrep/go_tests.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.semgrep/go_tests.yml b/.semgrep/go_tests.yml index e1f085bb6..9790a93bd 100644 --- a/.semgrep/go_tests.yml +++ b/.semgrep/go_tests.yml @@ -119,3 +119,15 @@ rules: exclude: ["*"] include: - "*_test.go" + - id: "tests-no-parallel" + patterns: + - pattern: "t.Parallel()" + message: "Use ci.Parallel(t) instead of t.Parallel()" + languages: + - "go" + severity: "WARNING" + fix: "ci.Parallel(t)" + paths: + exclude: ["*"] + include: + - "*_test.go" From bee4c07fe276df1257920ce44bc299a809b44126 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Thu, 17 Mar 2022 08:49:15 -0500 Subject: [PATCH 71/89] ci: missing import for nomad09upgrade --- e2e/nomad09upgrade/upgrade.go | 1 + 1 file changed, 1 insertion(+) diff --git a/e2e/nomad09upgrade/upgrade.go b/e2e/nomad09upgrade/upgrade.go index a2d204975..a428512b2 100644 --- a/e2e/nomad09upgrade/upgrade.go +++ b/e2e/nomad09upgrade/upgrade.go @@ -13,6 +13,7 @@ import ( "time" getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/e2e/e2eutil" "github.com/hashicorp/nomad/e2e/execagent" "github.com/hashicorp/nomad/e2e/framework" From dfe520a9d8706957a45bbd819b6091e71211563f Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Thu, 17 Mar 2022 11:10:57 -0400 Subject: [PATCH 72/89] server: transfer leadership in case of error (#12293) When a Nomad server becomes the Raft leader, it must perform several actions defined in the establishLeadership function. If any of these actions fail, Raft will think the node is the leader, but it will not actually be able to act as a Nomad leader. In this scenario, leadership must be revoked and transferred to another server if possible, or the node should retry the establishLeadership steps. --- .changelog/12293.txt | 3 + nomad/leader.go | 59 ++++++++++++++++++- .../content/docs/upgrade/upgrade-specific.mdx | 8 +++ 3 files changed, 68 insertions(+), 2 deletions(-) create mode 100644 .changelog/12293.txt diff --git a/.changelog/12293.txt b/.changelog/12293.txt new file mode 100644 index 000000000..6248b9e53 --- /dev/null +++ b/.changelog/12293.txt @@ -0,0 +1,3 @@ +```release-note:improvement +server: Transfer Raft leadership in case the Nomad server fails to establish leadership +``` diff --git a/nomad/leader.go b/nomad/leader.go index 5a50995a4..9dd3792e3 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -120,6 +120,30 @@ func (s *Server) monitorLeadership() { } } +func (s *Server) leadershipTransfer() error { + retryCount := 3 + for i := 0; i < retryCount; i++ { + err := s.raft.LeadershipTransfer().Error() + if err == nil { + s.logger.Info("successfully transferred leadership") + return nil + } + + // Don't retry if the Raft version doesn't support leadership transfer + // since this will never succeed. + if err == raft.ErrUnsupportedProtocol { + return fmt.Errorf("leadership transfer not supported with Raft version lower than 3") + } + + s.logger.Error("failed to transfer leadership attempt, will retry", + "attempt", i, + "retry_limit", retryCount, + "error", err, + ) + } + return fmt.Errorf("failed to transfer leadership in %d attempts", retryCount) +} + // leaderLoop runs as long as we are the leader to run various // maintenance activities func (s *Server) leaderLoop(stopCh chan struct{}) { @@ -151,7 +175,15 @@ RECONCILE: s.logger.Error("failed to revoke leadership", "error", err) } - goto WAIT + // Attempt to transfer leadership. If successful, leave the + // leaderLoop since this node is no longer the leader. Otherwise + // try to establish leadership again after 5 seconds. + if err := s.leadershipTransfer(); err != nil { + s.logger.Error("failed to transfer leadership", "error", err) + interval = time.After(5 * time.Second) + goto WAIT + } + return } establishedLeader = true @@ -182,10 +214,12 @@ RECONCILE: } WAIT: - // Wait until leadership is lost + // Wait until leadership is lost or periodically reconcile as long as we + // are the leader, or when Serf events arrive. for { select { case <-stopCh: + // Lost leadership. return case <-s.shutdownCh: return @@ -213,6 +247,27 @@ WAIT: s.revokeLeadership() err := s.establishLeadership(stopCh) errCh <- err + + // In case establishLeadership fails, try to transfer leadership. + // At this point Raft thinks we are the leader, but Nomad did not + // complete the required steps to act as the leader. + if err != nil { + if err := s.leadershipTransfer(); err != nil { + // establishedLeader was true before, but it no longer is + // since we revoked leadership and leadershipTransfer also + // failed. + // Stay in the leaderLoop with establishedLeader set to + // false so we try to establish leadership again in the + // next loop. + establishedLeader = false + interval = time.After(5 * time.Second) + goto WAIT + } + + // leadershipTransfer was successful and it is + // time to leave the leaderLoop. + return + } } } } diff --git a/website/content/docs/upgrade/upgrade-specific.mdx b/website/content/docs/upgrade/upgrade-specific.mdx index baa5703bd..c60a61777 100644 --- a/website/content/docs/upgrade/upgrade-specific.mdx +++ b/website/content/docs/upgrade/upgrade-specific.mdx @@ -51,6 +51,14 @@ The volume staging directory for new CSI plugin tasks will now be mounted to the task's `NOMAD_TASK_DIR` instead of the `csi_plugin.mount_config`. +#### Raft leadership transfer on error + +Starting with Nomad 1.3.0, when a Nomad server is elected the Raft leader but +fails to complete the process to start acting as the Nomad leader it will +attempt to gracefully transfer its Raft leadership status to another eligible +server in the cluster. This operation is only supported when using Raft +Protocol Version 3. + #### Server Raft Database The server raft database in `raft.db` will be automatically migrated to a new From 81687c1ce5ca78718503cd4315ceaf5894f57e51 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Thu, 17 Mar 2022 13:56:14 -0400 Subject: [PATCH 73/89] api: add related evals to eval details (#12305) The `related` query param is used to indicate that the request should return a list of related (next, previous, and blocked) evaluations. Co-authored-by: Jasmine Dahilig --- .changelog/12305.txt | 3 + api/evaluations.go | 24 +++++ api/evaluations_test.go | 19 ++++ command/agent/eval_endpoint.go | 3 + command/agent/eval_endpoint_test.go | 41 ++++++++ nomad/eval_endpoint.go | 34 +++++-- nomad/eval_endpoint_test.go | 78 ++++++++++----- nomad/state/state_store.go | 49 ++++++++++ nomad/state/state_store_test.go | 119 +++++++++++++++++++++++ nomad/structs/structs.go | 72 +++++++++++++- website/content/api-docs/evaluations.mdx | 85 +++++++++++----- 11 files changed, 466 insertions(+), 61 deletions(-) create mode 100644 .changelog/12305.txt diff --git a/.changelog/12305.txt b/.changelog/12305.txt new file mode 100644 index 000000000..8d9425eed --- /dev/null +++ b/.changelog/12305.txt @@ -0,0 +1,3 @@ +```release-note:improvement +api: Add `related` query parameter to the Evaluation details endpoint +``` diff --git a/api/evaluations.go b/api/evaluations.go index c8404dfc4..62d699ef3 100644 --- a/api/evaluations.go +++ b/api/evaluations.go @@ -71,6 +71,7 @@ type Evaluation struct { NextEval string PreviousEval string BlockedEval string + RelatedEvals []*EvaluationStub FailedTGAllocs map[string]*AllocationMetric ClassEligibility map[string]bool EscapedComputedClass bool @@ -84,6 +85,29 @@ type Evaluation struct { ModifyTime int64 } +// EvaluationStub is used to serialize parts of an evaluation returned in the +// RelatedEvals field of an Evaluation. +type EvaluationStub struct { + ID string + Priority int + Type string + TriggeredBy string + Namespace string + JobID string + NodeID string + DeploymentID string + Status string + StatusDescription string + WaitUntil time.Time + NextEval string + PreviousEval string + BlockedEval string + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 +} + // EvalIndexSort is a wrapper to sort evaluations by CreateIndex. // We reverse the test so that we get the highest index first. type EvalIndexSort []*Evaluation diff --git a/api/evaluations_test.go b/api/evaluations_test.go index 226db8460..b084e4fbb 100644 --- a/api/evaluations_test.go +++ b/api/evaluations_test.go @@ -126,6 +126,25 @@ func TestEvaluations_Info(t *testing.T) { // Check that we got the right result require.NotNil(t, result) require.Equal(t, resp.EvalID, result.ID) + + // Register the job again to get a related eval + resp, wm, err = jobs.Register(job, nil) + evals, _, err := e.List(nil) + require.NoError(t, err) + + // Find an eval that should have related evals + for _, eval := range evals { + if eval.NextEval != "" || eval.PreviousEval != "" || eval.BlockedEval != "" { + result, qm, err := e.Info(eval.ID, &QueryOptions{ + Params: map[string]string{ + "related": "true", + }, + }) + require.NoError(t, err) + assertQueryMeta(t, qm) + require.NotNil(t, result.RelatedEvals) + } + } } func TestEvaluations_Allocations(t *testing.T) { diff --git a/command/agent/eval_endpoint.go b/command/agent/eval_endpoint.go index a51c9e940..1be0e24ba 100644 --- a/command/agent/eval_endpoint.go +++ b/command/agent/eval_endpoint.go @@ -80,6 +80,9 @@ func (s *HTTPServer) evalQuery(resp http.ResponseWriter, req *http.Request, eval return nil, nil } + query := req.URL.Query() + args.IncludeRelated = query.Get("related") == "true" + var out structs.SingleEvalResponse if err := s.agent.RPC("Eval.GetEval", &args, &out); err != nil { return nil, err diff --git a/command/agent/eval_endpoint_test.go b/command/agent/eval_endpoint_test.go index 15217ea1b..df651d462 100644 --- a/command/agent/eval_endpoint_test.go +++ b/command/agent/eval_endpoint_test.go @@ -200,3 +200,44 @@ func TestHTTP_EvalQuery(t *testing.T) { } }) } + +func TestHTTP_EvalQueryWithRelated(t *testing.T) { + t.Parallel() + httpTest(t, nil, func(s *TestAgent) { + // Directly manipulate the state + state := s.Agent.server.State() + eval1 := mock.Eval() + eval2 := mock.Eval() + + // Link related evals + eval1.NextEval = eval2.ID + eval2.PreviousEval = eval1.ID + + err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2}) + require.NoError(t, err) + + // Make the HTTP request + req, err := http.NewRequest("GET", fmt.Sprintf("/v1/evaluation/%s?related=true", eval1.ID), nil) + require.NoError(t, err) + respW := httptest.NewRecorder() + + // Make the request + obj, err := s.Server.EvalSpecificRequest(respW, req) + require.NoError(t, err) + + // Check for the index + require.NotEmpty(t, respW.Result().Header.Get("X-Nomad-Index")) + require.NotEmpty(t, respW.Result().Header.Get("X-Nomad-KnownLeader")) + require.NotEmpty(t, respW.Result().Header.Get("X-Nomad-LastContact")) + + // Check the eval + e := obj.(*structs.Evaluation) + require.Equal(t, eval1.ID, e.ID) + + // Check for the related evals + expected := []*structs.EvaluationStub{ + eval2.Stub(), + } + require.Equal(t, expected, e.RelatedEvals) + }) +} diff --git a/nomad/eval_endpoint.go b/nomad/eval_endpoint.go index b3e4d371e..2d6af727c 100644 --- a/nomad/eval_endpoint.go +++ b/nomad/eval_endpoint.go @@ -53,23 +53,39 @@ func (e *Eval) GetEval(args *structs.EvalSpecificRequest, queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, run: func(ws memdb.WatchSet, state *state.StateStore) error { - // Look for the job - out, err := state.EvalByID(ws, args.EvalID) + var related []*structs.EvaluationStub + + // Look for the eval + eval, err := state.EvalByID(ws, args.EvalID) if err != nil { - return err + return fmt.Errorf("failed to lookup eval: %v", err) } - // Setup the output - reply.Eval = out - if out != nil { + if eval != nil { // Re-check namespace in case it differs from request. - if !allowNsOp(aclObj, out.Namespace) { + if !allowNsOp(aclObj, eval.Namespace) { return structs.ErrPermissionDenied } - reply.Index = out.ModifyIndex + // Lookup related evals if requested. + if args.IncludeRelated { + related, err = state.EvalsRelatedToID(ws, eval.ID) + if err != nil { + return fmt.Errorf("failed to lookup related evals: %v", err) + } + + // Use a copy to avoid modifying the original eval. + eval = eval.Copy() + eval.RelatedEvals = related + } + } + + // Setup the output. + reply.Eval = eval + if eval != nil { + reply.Index = eval.ModifyIndex } else { - // Use the last index that affected the nodes table + // Use the last index that affected the evals table index, err := state.Index("evals") if err != nil { return err diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 500b3502c..b23a9f2b2 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -30,36 +30,60 @@ func TestEvalEndpoint_GetEval(t *testing.T) { // Create the register request eval1 := mock.Eval() - s1.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1}) + eval2 := mock.Eval() - // Lookup the eval - get := &structs.EvalSpecificRequest{ - EvalID: eval1.ID, - QueryOptions: structs.QueryOptions{Region: "global"}, - } - var resp structs.SingleEvalResponse - if err := msgpackrpc.CallWithCodec(codec, "Eval.GetEval", get, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if resp.Index != 1000 { - t.Fatalf("Bad index: %d %d", resp.Index, 1000) - } + // Link the evals + eval1.NextEval = eval2.ID + eval2.PreviousEval = eval1.ID - if !reflect.DeepEqual(eval1, resp.Eval) { - t.Fatalf("bad: %#v %#v", eval1, resp.Eval) - } + err := s1.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2}) + require.NoError(t, err) - // Lookup non-existing node - get.EvalID = uuid.Generate() - if err := msgpackrpc.CallWithCodec(codec, "Eval.GetEval", get, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if resp.Index != 1000 { - t.Fatalf("Bad index: %d %d", resp.Index, 1000) - } - if resp.Eval != nil { - t.Fatalf("unexpected eval") - } + t.Run("lookup eval", func(t *testing.T) { + get := &structs.EvalSpecificRequest{ + EvalID: eval1.ID, + QueryOptions: structs.QueryOptions{Region: "global"}, + } + var resp structs.SingleEvalResponse + err := msgpackrpc.CallWithCodec(codec, "Eval.GetEval", get, &resp) + require.NoError(t, err) + require.EqualValues(t, 1000, resp.Index, "bad index") + require.Equal(t, eval1, resp.Eval) + }) + + t.Run("lookup non-existing eval", func(t *testing.T) { + get := &structs.EvalSpecificRequest{ + EvalID: uuid.Generate(), + QueryOptions: structs.QueryOptions{Region: "global"}, + } + var resp structs.SingleEvalResponse + err := msgpackrpc.CallWithCodec(codec, "Eval.GetEval", get, &resp) + require.NoError(t, err) + require.EqualValues(t, 1000, resp.Index, "bad index") + require.Nil(t, resp.Eval, "unexpected eval") + }) + + t.Run("lookup related evals", func(t *testing.T) { + get := &structs.EvalSpecificRequest{ + EvalID: eval1.ID, + QueryOptions: structs.QueryOptions{Region: "global"}, + IncludeRelated: true, + } + var resp structs.SingleEvalResponse + err := msgpackrpc.CallWithCodec(codec, "Eval.GetEval", get, &resp) + require.NoError(t, err) + require.EqualValues(t, 1000, resp.Index, "bad index") + require.Equal(t, eval1.ID, resp.Eval.ID) + + // Make sure we didn't modify the eval on a read request. + require.Nil(t, eval1.RelatedEvals) + + // Check for the related evals + expected := []*structs.EvaluationStub{ + eval2.Stub(), + } + require.Equal(t, expected, resp.Eval.RelatedEvals) + }) } func TestEvalEndpoint_GetEval_ACL(t *testing.T) { diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 42f2ebf1c..24b9e6f8c 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -3177,6 +3177,55 @@ func (s *StateStore) EvalByID(ws memdb.WatchSet, id string) (*structs.Evaluation return nil, nil } +// EvalsRelatedToID is used to retrieve the evals that are related (next, +// previous, or blocked) to the provided eval ID. +func (s *StateStore) EvalsRelatedToID(ws memdb.WatchSet, id string) ([]*structs.EvaluationStub, error) { + txn := s.db.ReadTxn() + + raw, err := txn.First("evals", "id", id) + if err != nil { + return nil, fmt.Errorf("eval lookup failed: %v", err) + } + if raw == nil { + return nil, nil + } + eval := raw.(*structs.Evaluation) + + relatedEvals := []*structs.EvaluationStub{} + todo := eval.RelatedIDs() + done := map[string]bool{ + eval.ID: true, // don't place the requested eval in the related list. + } + + for len(todo) > 0 { + // Pop the first value from the todo list. + current := todo[0] + todo = todo[1:] + if current == "" { + continue + } + + // Skip value if we already have it in the results. + if done[current] { + continue + } + + eval, err := s.EvalByID(ws, current) + if err != nil { + return nil, err + } + if eval == nil { + continue + } + + todo = append(todo, eval.RelatedIDs()...) + relatedEvals = append(relatedEvals, eval.Stub()) + done[eval.ID] = true + } + + return relatedEvals, nil +} + // EvalsByIDPrefix is used to lookup evaluations by prefix in a particular // namespace func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, namespace, id string, sort SortOption) (memdb.ResultIterator, error) { diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index fa2a2759e..0859b6e54 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -4598,6 +4598,125 @@ func TestStateStore_EvalsByIDPrefix_Namespaces(t *testing.T) { require.False(t, watchFired(ws)) } +func TestStateStore_EvalsRelatedToID(t *testing.T) { + t.Parallel() + + state := testStateStore(t) + + // Create sample evals. + e1 := mock.Eval() + e2 := mock.Eval() + e3 := mock.Eval() + e4 := mock.Eval() + e5 := mock.Eval() + e6 := mock.Eval() + + // Link evals. + // This is not accurate for a real scenario, but it's helpful for testing + // the general approach. + // + // e1 -> e2 -> e3 -> e5 + // └─-> e4 (blocked) -> e6 + e1.NextEval = e2.ID + e2.PreviousEval = e1.ID + + e2.NextEval = e3.ID + e3.PreviousEval = e2.ID + + e3.BlockedEval = e4.ID + e4.PreviousEval = e3.ID + + e3.NextEval = e5.ID + e5.PreviousEval = e3.ID + + e4.NextEval = e6.ID + e6.PreviousEval = e4.ID + + // Create eval not in chain. + e7 := mock.Eval() + + // Create eval with GC'ed related eval. + e8 := mock.Eval() + e8.NextEval = uuid.Generate() + + err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{e1, e2, e3, e4, e5, e6, e7, e8}) + require.NoError(t, err) + + testCases := []struct { + name string + id string + expected []string + }{ + { + name: "linear history", + id: e1.ID, + expected: []string{ + e2.ID, + e3.ID, + e4.ID, + e5.ID, + e6.ID, + }, + }, + { + name: "linear history from middle", + id: e4.ID, + expected: []string{ + e1.ID, + e2.ID, + e3.ID, + e5.ID, + e6.ID, + }, + }, + { + name: "eval not in chain", + id: e7.ID, + expected: []string{}, + }, + { + name: "eval with gc", + id: e8.ID, + expected: []string{}, + }, + { + name: "non-existing eval", + id: uuid.Generate(), + expected: []string{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ws := memdb.NewWatchSet() + related, err := state.EvalsRelatedToID(ws, tc.id) + require.NoError(t, err) + + got := []string{} + for _, e := range related { + got = append(got, e.ID) + } + require.ElementsMatch(t, tc.expected, got) + }) + } + + t.Run("blocking query", func(t *testing.T) { + ws := memdb.NewWatchSet() + _, err := state.EvalsRelatedToID(ws, e2.ID) + require.NoError(t, err) + + // Update an eval off the chain and make sure watchset doesn't fire. + e7.Status = structs.EvalStatusComplete + state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{e7}) + require.False(t, watchFired(ws)) + + // Update an eval in the chain and make sure watchset does fire. + e3.Status = structs.EvalStatusComplete + state.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{e3}) + require.True(t, watchFired(ws)) + }) +} + func TestStateStore_UpdateAllocsFromClient(t *testing.T) { ci.Parallel(t) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 8f2974a3b..325c07f76 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -828,7 +828,8 @@ type EvalDeleteRequest struct { // EvalSpecificRequest is used when we just need to specify a target evaluation type EvalSpecificRequest struct { - EvalID string + EvalID string + IncludeRelated bool QueryOptions } @@ -10586,6 +10587,10 @@ type Evaluation struct { // to constraints or lacking resources. BlockedEval string + // RelatedEvals is a list of all the evaluations that are related (next, + // previous, or blocked) to this one. It may be nil if not requested. + RelatedEvals []*EvaluationStub + // FailedTGAllocs are task groups which have allocations that could not be // made, but the metrics are persisted so that the user can use the feedback // to determine the cause. @@ -10632,6 +10637,27 @@ type Evaluation struct { ModifyTime int64 } +type EvaluationStub struct { + ID string + Namespace string + Priority int + Type string + TriggeredBy string + JobID string + NodeID string + DeploymentID string + Status string + StatusDescription string + WaitUntil time.Time + NextEval string + PreviousEval string + BlockedEval string + CreateIndex uint64 + ModifyIndex uint64 + CreateTime int64 + ModifyTime int64 +} + // GetID implements the IDGetter interface, required for pagination. func (e *Evaluation) GetID() string { if e == nil { @@ -10664,6 +10690,50 @@ func (e *Evaluation) GoString() string { return fmt.Sprintf("", e.ID, e.JobID, e.Namespace) } +func (e *Evaluation) RelatedIDs() []string { + if e == nil { + return nil + } + + ids := []string{e.NextEval, e.PreviousEval, e.BlockedEval} + related := make([]string, 0, len(ids)) + + for _, id := range ids { + if id != "" { + related = append(related, id) + } + } + + return related +} + +func (e *Evaluation) Stub() *EvaluationStub { + if e == nil { + return nil + } + + return &EvaluationStub{ + ID: e.ID, + Namespace: e.Namespace, + Priority: e.Priority, + Type: e.Type, + TriggeredBy: e.TriggeredBy, + JobID: e.JobID, + NodeID: e.NodeID, + DeploymentID: e.DeploymentID, + Status: e.Status, + StatusDescription: e.StatusDescription, + WaitUntil: e.WaitUntil, + NextEval: e.NextEval, + PreviousEval: e.PreviousEval, + BlockedEval: e.BlockedEval, + CreateIndex: e.CreateIndex, + ModifyIndex: e.ModifyIndex, + CreateTime: e.CreateTime, + ModifyTime: e.ModifyTime, + } +} + func (e *Evaluation) Copy() *Evaluation { if e == nil { return nil diff --git a/website/content/api-docs/evaluations.mdx b/website/content/api-docs/evaluations.mdx index 6cb2f34d4..bd501342e 100644 --- a/website/content/api-docs/evaluations.mdx +++ b/website/content/api-docs/evaluations.mdx @@ -129,41 +129,78 @@ The table below shows this endpoint's support for must be the full UUID, not the short 8-character one. This is specified as part of the path. +- `related` `(bool: false)` - Specifies if related evaluations should be + returned. Related evaluations are the ones that can be reached by following + the trail of IDs for `NextEval`, `PreviousEval`, and `BlockedEval`. This is + specified as a query parameter. + ### Sample Request ```shell-session $ curl \ - https://localhost:4646/v1/evaluation/5456bd7a-9fc0-c0dd-6131-cbee77f57577 + https://localhost:4646/v1/evaluation/2deb5f06-a100-f01a-3316-5e501a4965e7?related=true ``` ### Sample Response ```json { - "ID": "5456bd7a-9fc0-c0dd-6131-cbee77f57577", - "Priority": 50, - "Type": "service", - "TriggeredBy": "job-register", - "JobID": "example", - "JobModifyIndex": 52, - "NodeID": "", - "NodeModifyIndex": 0, - "Status": "complete", - "StatusDescription": "", - "Wait": 0, - "NextEval": "", - "PreviousEval": "", - "BlockedEval": "", - "FailedTGAllocs": null, - "ClassEligibility": null, - "EscapedComputedClass": false, - "AnnotatePlan": false, - "SnapshotIndex": 53, - "QueuedAllocations": { - "cache": 0 + "CreateIndex": 28, + "CreateTime": 1647394818583344000, + "FailedTGAllocs": { + "cache": { + "AllocationTime": 4111, + "ClassExhausted": null, + "ClassFiltered": null, + "CoalescedFailures": 0, + "ConstraintFiltered": null, + "DimensionExhausted": null, + "NodesAvailable": { + "dc1": 0 + }, + "NodesEvaluated": 0, + "NodesExhausted": 0, + "NodesFiltered": 0, + "QuotaExhausted": null, + "ResourcesExhausted": null, + "ScoreMetaData": null, + "Scores": null + } }, - "CreateIndex": 53, - "ModifyIndex": 55 + "ID": "2deb5f06-a100-f01a-3316-5e501a4965e7", + "JobID": "example", + "ModifyIndex": 28, + "ModifyTime": 1647394818583344000, + "Namespace": "default", + "PreviousEval": "0f98f7ea-59ae-4d90-d9bd-b8ce80b9e100", + "Priority": 50, + "RelatedEvals": [ + { + "BlockedEval": "2deb5f06-a100-f01a-3316-5e501a4965e7", + "CreateIndex": 27, + "CreateTime": 1647394818582736000, + "DeploymentID": "79ae0a49-acf6-0fcf-183f-8646f3167b88", + "ID": "0f98f7ea-59ae-4d90-d9bd-b8ce80b9e100", + "JobID": "example", + "ModifyIndex": 30, + "ModifyTime": 1647394818583565000, + "Namespace": "default", + "NextEval": "", + "NodeID": "", + "PreviousEval": "", + "Priority": 50, + "Status": "complete", + "StatusDescription": "", + "TriggeredBy": "node-drain", + "Type": "service", + "WaitUntil": null + } + ], + "SnapshotIndex": 27, + "Status": "blocked", + "StatusDescription": "created to place remaining allocations", + "TriggeredBy": "queued-allocs", + "Type": "service" } ``` From eca4ac67f3ff43443a6ce03d545c77aac22680cc Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Thu, 17 Mar 2022 14:15:10 -0400 Subject: [PATCH 74/89] cli: display Raft version in `server members` (#12317) The previous output of the `nomad server members` command would output a column named `Protocol` that displayed the Serf protocol being currently used by servers. This is not a configurable option, so it holds very little value to operators. It is also easy to confuse it with the Raft Protocol version, which is configurable and highly relevant to operators. This commit replaces the previous `Protocol` column with the new `Raft Version`. It also updates the `-detailed` flag to be called `-verbose` so it matches other commands. The detailed output now also outputs the same information as the standard output with the addition of the previous `Protocol` column and `Tags`. --- .changelog/12317.txt | 7 +++ command/server_members.go | 60 +++++++++++-------- command/server_members_test.go | 6 +- .../content/docs/commands/server/members.mdx | 29 +++++---- .../content/docs/upgrade/upgrade-specific.mdx | 13 ++++ 5 files changed, 78 insertions(+), 37 deletions(-) create mode 100644 .changelog/12317.txt diff --git a/.changelog/12317.txt b/.changelog/12317.txt new file mode 100644 index 000000000..940df0c71 --- /dev/null +++ b/.changelog/12317.txt @@ -0,0 +1,7 @@ +```release-note:improvement +cli: display the Raft version instead of the Serf protocol in the `nomad server members` command +``` + +```release-note:improvement +cli: rename the `nomad server members` `-detailed` flag to `-verbose` so it matches other commands +``` diff --git a/command/server_members.go b/command/server_members.go index 540796123..769b20b81 100644 --- a/command/server_members.go +++ b/command/server_members.go @@ -32,10 +32,9 @@ General Options: Server Members Options: - -detailed - Show detailed information about each member. This dumps - a raw set of tags which shows more information than the - default output format. + -verbose + Show detailed information about each member. This dumps a raw set of tags + which shows more information than the default output format. ` return strings.TrimSpace(helpText) } @@ -58,11 +57,12 @@ func (c *ServerMembersCommand) Synopsis() string { func (c *ServerMembersCommand) Name() string { return "server members" } func (c *ServerMembersCommand) Run(args []string) int { - var detailed bool + var detailed, verbose bool flags := c.Meta.FlagSet(c.Name(), FlagSetClient) flags.Usage = func() { c.Ui.Output(c.Help()) } flags.BoolVar(&detailed, "detailed", false, "Show detailed output") + flags.BoolVar(&verbose, "verbose", false, "Show detailed output") if err := flags.Parse(args); err != nil { return 1 @@ -76,6 +76,11 @@ func (c *ServerMembersCommand) Run(args []string) int { return 1 } + // Keep support for previous flag name + if detailed { + verbose = true + } + // Get the HTTP client client, err := c.Meta.Client() if err != nil { @@ -103,8 +108,8 @@ func (c *ServerMembersCommand) Run(args []string) int { // Format the list var out []string - if detailed { - out = detailedOutput(srvMembers.Members) + if verbose { + out = verboseOutput(srvMembers.Members, leaders) } else { out = standardOutput(srvMembers.Members, leaders) } @@ -125,25 +130,15 @@ func (c *ServerMembersCommand) Run(args []string) int { func standardOutput(mem []*api.AgentMember, leaders map[string]string) []string { // Format the members list members := make([]string, len(mem)+1) - members[0] = "Name|Address|Port|Status|Leader|Protocol|Build|Datacenter|Region" + members[0] = "Name|Address|Port|Status|Leader|Raft Version|Build|Datacenter|Region" for i, member := range mem { - reg := member.Tags["region"] - regLeader, ok := leaders[reg] - isLeader := false - if ok { - if regLeader == net.JoinHostPort(member.Addr, member.Tags["port"]) { - - isLeader = true - } - } - - members[i+1] = fmt.Sprintf("%s|%s|%d|%s|%t|%d|%s|%s|%s", + members[i+1] = fmt.Sprintf("%s|%s|%d|%s|%t|%s|%s|%s|%s", member.Name, member.Addr, member.Port, member.Status, - isLeader, - member.ProtocolCur, + isLeader(member, leaders), + member.Tags["raft_vsn"], member.Tags["build"], member.Tags["dc"], member.Tags["region"]) @@ -151,10 +146,10 @@ func standardOutput(mem []*api.AgentMember, leaders map[string]string) []string return members } -func detailedOutput(mem []*api.AgentMember) []string { +func verboseOutput(mem []*api.AgentMember, leaders map[string]string) []string { // Format the members list members := make([]string, len(mem)+1) - members[0] = "Name|Address|Port|Tags" + members[0] = "Name|Address|Port|Status|Leader|Protocol|Raft Version|Build|Datacenter|Region|Tags" for i, member := range mem { // Format the tags tagPairs := make([]string, 0, len(member.Tags)) @@ -163,11 +158,19 @@ func detailedOutput(mem []*api.AgentMember) []string { } tags := strings.Join(tagPairs, ",") - members[i+1] = fmt.Sprintf("%s|%s|%d|%s", + members[i+1] = fmt.Sprintf("%s|%s|%d|%s|%t|%d|%s|%s|%s|%s|%s", member.Name, member.Addr, member.Port, - tags) + member.Status, + isLeader(member, leaders), + member.ProtocolCur, + member.Tags["raft_vsn"], + member.Tags["build"], + member.Tags["dc"], + member.Tags["region"], + tags, + ) } return members } @@ -206,3 +209,10 @@ func regionLeaders(client *api.Client, mem []*api.AgentMember) (map[string]strin return leaders, mErr.ErrorOrNil() } + +func isLeader(member *api.AgentMember, leaders map[string]string) bool { + addr := net.JoinHostPort(member.Addr, member.Tags["port"]) + reg := member.Tags["region"] + regLeader, ok := leaders[reg] + return ok && regLeader == addr +} diff --git a/command/server_members_test.go b/command/server_members_test.go index bf2e423d7..9959157b5 100644 --- a/command/server_members_test.go +++ b/command/server_members_test.go @@ -38,7 +38,11 @@ func TestServerMembersCommand_Run(t *testing.T) { } ui.OutputWriter.Reset() - // Query members with detailed output + // Query members with verbose output + if code := cmd.Run([]string{"-address=" + url, "-verbose"}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } + // Still support previous detailed flag if code := cmd.Run([]string{"-address=" + url, "-detailed"}); code != 0 { t.Fatalf("expected exit 0, got: %d", code) } diff --git a/website/content/docs/commands/server/members.mdx b/website/content/docs/commands/server/members.mdx index f40c835d2..bfd9ce255 100644 --- a/website/content/docs/commands/server/members.mdx +++ b/website/content/docs/commands/server/members.mdx @@ -27,9 +27,14 @@ capability. ## Server Members Options -- `-detailed`: Dump the basic member information as well as the raw set of tags - for each member. This mode reveals additional information not displayed in the - standard output format. +- `-detailed` (_deprecated_ use `-verbose` instead): Dump the + basic member information as well as the raw set of tags for each member. This + mode reveals additional information not displayed in the standard output + format. + +- `-verbose`: Dump the basic member information as well as the raw set of tags + for each member. This mode reveals additional information not displayed in + the standard output format. ## Examples @@ -37,16 +42,18 @@ Default view: ```shell-session $ nomad server members -Name Addr Port Status Proto Build DC Region -node1.global 10.0.0.8 4648 alive 2 0.1.0dev dc1 global -node2.global 10.0.0.9 4648 alive 2 0.1.0dev dc1 global +Name Address Port Status Leader Raft Version Build Datacenter Region +server-1.global 10.0.0.8 4648 alive true 3 1.3.0 dc1 global +server-2.global 10.0.0.9 4648 alive false 3 1.3.0 dc1 global +server-3.global 10.0.0.10 4648 alive false 3 1.3.0 dc1 global ``` -Detailed view: +Verbose view: ```shell-session -$ nomad server members -detailed -Name Addr Port Tags -node1 10.0.0.8 4648 bootstrap=1,build=0.1.0dev,vsn=1,vsn_max=1,dc=dc1,port=4647,region=global,role=nomad,vsn_min=1 -node2 10.0.0.9 4648 bootstrap=0,build=0.1.0dev,vsn=1,vsn_max=1,dc=dc1,port=4647,region=global,role=nomad,vsn_min=1 +$ nomad server members -verbose +Name Address Port Status Leader Protocol Raft Version Build Datacenter Region Tags +server-1.global 10.0.0.8 4648 alive true 2 3 1.3.0 dc1 global id=46122039-7c4d-4647-673a-81786bce2c23,rpc_addr=10.0.0.8,role=nomad,region=global,raft_vsn=3,expect=3,dc=dc1,build=1.3.0,port=4647 +server-2.global 10.0.0.9 4648 alive false 2 3 1.3.0 dc1 global id=04594bee-fec9-4cec-f308-eebe82025ae7,dc=dc1,expect=3,rpc_addr=10.0.0.9,raft_vsn=3,port=4647,role=nomad,region=global,build=1.3.0 +server-3.global 10.0.0.10 4648 alive false 2 3 1.3.0 dc1 global region=global,dc=dc1,rpc_addr=10.0.0.10,raft_vsn=3,build=1.3.0,expect=3,id=59542f6c-fb0e-50f1-4c9f-98bb593e9fe8,role=nomad,port=4647 ``` diff --git a/website/content/docs/upgrade/upgrade-specific.mdx b/website/content/docs/upgrade/upgrade-specific.mdx index c60a61777..56d2d7c44 100644 --- a/website/content/docs/upgrade/upgrade-specific.mdx +++ b/website/content/docs/upgrade/upgrade-specific.mdx @@ -81,6 +81,19 @@ server { } ``` +#### Changes to the `nomad server members` command + +The standard output of the `nomad server members` command replaces the previous +`Protocol` column that indicated the Serf protocol version with a new column +named `Raft Version` which outputs the Raft protocol version defined in each +server. + +The `-detailed` flag is now called `-verbose` and outputs the standard values +in addition to extra information. The previous name is still supported but may +be removed in future releases. + +The previous `Protocol` value can be viewed using the `-verbose` flag. + ## Nomad 1.2.6, 1.1.12, and 1.0.18 #### ACL requirement for the job parse endpoint From fec8d6e030c206e20db6e9edb09dd6ca7e68a2a7 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Thu, 17 Mar 2022 13:45:56 -0500 Subject: [PATCH 75/89] ci: do not exclude Parallel semgrep rule --- .semgrep/go_tests.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.semgrep/go_tests.yml b/.semgrep/go_tests.yml index 9790a93bd..07878c82a 100644 --- a/.semgrep/go_tests.yml +++ b/.semgrep/go_tests.yml @@ -128,6 +128,5 @@ rules: severity: "WARNING" fix: "ci.Parallel(t)" paths: - exclude: ["*"] include: - "*_test.go" From 020fa6f8ba422dfb8097488afb10b5e1497dfe17 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 18 Mar 2022 09:27:28 -0400 Subject: [PATCH 76/89] E2E with HCP Consul/Vault (#12267) Use HCP Consul and HCP Vault for the Consul and Vault clusters used in E2E testing. This has the following benefits: * Without the need to support mTLS bootstrapping for Consul and Vault, we can simplify the mTLS configuration by leaning on Terraform instead of janky bash shell scripting. * Vault bootstrapping is no longer required, so we can eliminate even more janky shell scripting * Our E2E exercises HCP, which is important to us as an organization * With the reduction in configurability, we can simplify the Terraform configuration and drop the complicated `provision.sh`/`provision.ps1` scripts we were using previously. We can template Nomad configuration files and upload them with the `file` provisioner. * Packer builds for Linux and Windows become much simpler. tl;dr way less janky shell scripting! --- e2e/connect/acls.go | 68 ++-- e2e/connect/connect.go | 25 +- e2e/consul/consul.go | 1 - e2e/consul/namespaces.go | 28 +- e2e/consul/namespaces_oss.go | 21 ++ e2e/metrics/input/prometheus.nomad | 1 - e2e/nodedrain/nodedrain.go | 15 +- e2e/terraform/.terraform.lock.hcl | 54 +++ e2e/terraform/Makefile | 68 ++-- e2e/terraform/README.md | 125 +++---- e2e/terraform/compute.tf | 5 +- e2e/terraform/config/.gitignore | 1 - .../consul/client-linux/indexed/.gitkeep | 0 .../consul/client-windows/indexed/.gitkeep | 0 .../custom/consul/server/indexed/.gitkeep | 0 .../nomad/client-linux/indexed/.gitkeep | 0 .../nomad/client-windows/indexed/.gitkeep | 0 .../custom/nomad/server/indexed/.gitkeep | 0 .../config/dev-cluster/consul/aws.json | 4 - .../config/dev-cluster/consul/base.json | 15 - .../dev-cluster/consul/server/server.json | 8 - .../config/dev-cluster/nomad/base.hcl | 19 -- .../dev-cluster/nomad/client-linux/client.hcl | 51 --- .../nomad/client-windows/client-windows.hcl | 36 -- .../config/dev-cluster/vault/server/vault.hcl | 15 - .../config/full-cluster/consul/aws.json | 4 - .../full-cluster/consul/server/server.json | 8 - .../nomad/client-linux/indexed/client-0.hcl | 50 --- .../nomad/client-linux/indexed/client-2.hcl | 39 --- .../nomad/client-linux/indexed/client-3.hcl | 30 -- .../nomad/client-windows/client-windows.hcl | 36 -- .../full-cluster/nomad/server/server.hcl | 4 - .../full-cluster/vault/server/vault.hcl | 15 - e2e/terraform/config/shared/README.md | 5 - e2e/terraform/config/shared/consul-tls.json | 11 - e2e/terraform/config/shared/nomad-acl.hcl | 3 - e2e/terraform/config/shared/nomad-tls.hcl | 29 -- e2e/terraform/config/shared/vault-tls.hcl | 26 -- .../etc/acls/consul/consul-agent-policy.hcl | 32 ++ .../etc/acls/consul/nomad-client-policy.hcl | 31 ++ .../etc/acls/consul/nomad-server-policy.hcl | 27 ++ .../acls/vault/nomad-policy.hcl} | 10 +- e2e/terraform/etc/consul.d/client_acl.json | 8 + .../base.json => etc/consul.d/clients.json} | 1 - e2e/terraform/etc/consul.d/consul.service | 17 + e2e/terraform/etc/nomad.d/.environment | 1 + .../nomad => etc/nomad.d}/base.hcl | 13 +- e2e/terraform/etc/nomad.d/client-linux-0.hcl | 9 + e2e/terraform/etc/nomad.d/client-linux-1.hcl | 5 + e2e/terraform/etc/nomad.d/client-linux-2.hcl | 7 + e2e/terraform/etc/nomad.d/client-linux-3.hcl | 7 + .../nomad.d/client-linux.hcl} | 12 +- e2e/terraform/etc/nomad.d/client-windows.hcl | 12 + e2e/terraform/etc/nomad.d/consul.hcl | 6 + e2e/terraform/etc/nomad.d/index.hcl | 1 + .../etc/nomad.d/nomad-client.service | 21 ++ .../etc/nomad.d/nomad-server.service | 21 ++ .../nomad.d/server-linux.hcl} | 4 + e2e/terraform/etc/nomad.d/tls.hcl | 11 + e2e/terraform/etc/nomad.d/vault.hcl | 8 + e2e/terraform/hcp-vault-auth/main.tf | 47 +++ e2e/terraform/hcp_consul.tf | 127 +++++++ e2e/terraform/hcp_vault.tf | 49 +++ e2e/terraform/nomad-acls.tf | 4 +- e2e/terraform/nomad.tf | 83 +---- e2e/terraform/outputs.tf | 25 +- .../packer/ubuntu-bionic-amd64.pkr.hcl | 2 +- .../packer/ubuntu-bionic-amd64/provision.sh | 256 -------------- .../packer/ubuntu-bionic-amd64/setup.sh | 47 +-- .../packer/ubuntu-bionic-amd64/vault.service | 33 -- .../packer/windows-2016-amd64.pkr.hcl | 14 +- .../windows-2016-amd64/install-consul.ps1 | 10 +- .../windows-2016-amd64/install-nomad.ps1 | 46 +++ .../packer/windows-2016-amd64/provision.ps1 | 263 --------------- .../packer/windows-2016-amd64/userdata.ps1 | 13 +- .../provision-nomad/install-linux.tf | 129 +++++++ .../provision-nomad/install-windows.tf | 123 +++++++ e2e/terraform/provision-nomad/main.tf | 314 ++++++------------ e2e/terraform/provision-nomad/tls.tf | 42 +++ e2e/terraform/provision-nomad/variables.tf | 64 +--- e2e/terraform/scripts/bootstrap-vault.sh | 40 --- .../scripts/vault-nomad-cluster-role.json | 8 - e2e/terraform/terraform.full.tfvars | 19 -- e2e/terraform/terraform.tfvars | 25 +- e2e/terraform/tests/1-expected.json | 12 - e2e/terraform/tests/1-test.tfvars | 8 - e2e/terraform/tests/2-expected.json | 12 - e2e/terraform/tests/2-test.tfvars | 20 -- e2e/terraform/tests/3-expected.json | 12 - e2e/terraform/tests/3-test.tfvars | 8 - e2e/terraform/tests/4-expected.json | 12 - e2e/terraform/tests/4-test.tfvars | 20 -- e2e/terraform/tests/5-expected.json | 12 - e2e/terraform/tests/5-test.tfvars | 8 - e2e/terraform/tests/6-expected.json | 12 - e2e/terraform/tests/6-test.tfvars | 18 - e2e/terraform/tests/7-expected.json | 12 - e2e/terraform/tests/7-test.tfvars | 20 -- e2e/terraform/tests/config | 1 - e2e/terraform/tests/mock-1 | 1 - e2e/terraform/tests/mock-2 | 1 - e2e/terraform/tests/nomad.tf | 61 ---- e2e/terraform/tests/test.sh | 54 --- e2e/terraform/tests/variables.tf | 1 - e2e/terraform/tls_ca.tf | 43 +-- e2e/terraform/tls_client.tf | 42 +++ e2e/terraform/uploads/README.md | 6 + e2e/terraform/variables.tf | 115 ++----- e2e/terraform/vault.tf | 69 ---- 109 files changed, 1285 insertions(+), 2162 deletions(-) delete mode 100644 e2e/terraform/config/.gitignore delete mode 100644 e2e/terraform/config/custom/consul/client-linux/indexed/.gitkeep delete mode 100644 e2e/terraform/config/custom/consul/client-windows/indexed/.gitkeep delete mode 100644 e2e/terraform/config/custom/consul/server/indexed/.gitkeep delete mode 100644 e2e/terraform/config/custom/nomad/client-linux/indexed/.gitkeep delete mode 100644 e2e/terraform/config/custom/nomad/client-windows/indexed/.gitkeep delete mode 100644 e2e/terraform/config/custom/nomad/server/indexed/.gitkeep delete mode 100644 e2e/terraform/config/dev-cluster/consul/aws.json delete mode 100644 e2e/terraform/config/dev-cluster/consul/base.json delete mode 100644 e2e/terraform/config/dev-cluster/consul/server/server.json delete mode 100644 e2e/terraform/config/dev-cluster/nomad/base.hcl delete mode 100644 e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl delete mode 100644 e2e/terraform/config/dev-cluster/nomad/client-windows/client-windows.hcl delete mode 100644 e2e/terraform/config/dev-cluster/vault/server/vault.hcl delete mode 100644 e2e/terraform/config/full-cluster/consul/aws.json delete mode 100644 e2e/terraform/config/full-cluster/consul/server/server.json delete mode 100644 e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl delete mode 100644 e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl delete mode 100644 e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl delete mode 100644 e2e/terraform/config/full-cluster/nomad/client-windows/client-windows.hcl delete mode 100644 e2e/terraform/config/full-cluster/nomad/server/server.hcl delete mode 100644 e2e/terraform/config/full-cluster/vault/server/vault.hcl delete mode 100644 e2e/terraform/config/shared/README.md delete mode 100644 e2e/terraform/config/shared/consul-tls.json delete mode 100644 e2e/terraform/config/shared/nomad-acl.hcl delete mode 100644 e2e/terraform/config/shared/nomad-tls.hcl delete mode 100644 e2e/terraform/config/shared/vault-tls.hcl create mode 100644 e2e/terraform/etc/acls/consul/consul-agent-policy.hcl create mode 100644 e2e/terraform/etc/acls/consul/nomad-client-policy.hcl create mode 100644 e2e/terraform/etc/acls/consul/nomad-server-policy.hcl rename e2e/terraform/{scripts/vault-nomad-server-policy.hcl => etc/acls/vault/nomad-policy.hcl} (81%) create mode 100644 e2e/terraform/etc/consul.d/client_acl.json rename e2e/terraform/{config/full-cluster/consul/base.json => etc/consul.d/clients.json} (90%) create mode 100644 e2e/terraform/etc/consul.d/consul.service create mode 100644 e2e/terraform/etc/nomad.d/.environment rename e2e/terraform/{config/full-cluster/nomad => etc/nomad.d}/base.hcl (67%) create mode 100644 e2e/terraform/etc/nomad.d/client-linux-0.hcl create mode 100644 e2e/terraform/etc/nomad.d/client-linux-1.hcl create mode 100644 e2e/terraform/etc/nomad.d/client-linux-2.hcl create mode 100644 e2e/terraform/etc/nomad.d/client-linux-3.hcl rename e2e/terraform/{config/full-cluster/nomad/client-linux/indexed/client-1.hcl => etc/nomad.d/client-linux.hcl} (79%) create mode 100644 e2e/terraform/etc/nomad.d/client-windows.hcl create mode 100644 e2e/terraform/etc/nomad.d/consul.hcl create mode 100644 e2e/terraform/etc/nomad.d/index.hcl create mode 100644 e2e/terraform/etc/nomad.d/nomad-client.service create mode 100644 e2e/terraform/etc/nomad.d/nomad-server.service rename e2e/terraform/{config/dev-cluster/nomad/server/server.hcl => etc/nomad.d/server-linux.hcl} (69%) create mode 100644 e2e/terraform/etc/nomad.d/tls.hcl create mode 100644 e2e/terraform/etc/nomad.d/vault.hcl create mode 100644 e2e/terraform/hcp-vault-auth/main.tf create mode 100644 e2e/terraform/hcp_consul.tf create mode 100644 e2e/terraform/hcp_vault.tf delete mode 100755 e2e/terraform/packer/ubuntu-bionic-amd64/provision.sh delete mode 100644 e2e/terraform/packer/ubuntu-bionic-amd64/vault.service create mode 100755 e2e/terraform/packer/windows-2016-amd64/install-nomad.ps1 delete mode 100755 e2e/terraform/packer/windows-2016-amd64/provision.ps1 create mode 100644 e2e/terraform/provision-nomad/install-linux.tf create mode 100644 e2e/terraform/provision-nomad/install-windows.tf create mode 100644 e2e/terraform/provision-nomad/tls.tf delete mode 100755 e2e/terraform/scripts/bootstrap-vault.sh delete mode 100644 e2e/terraform/scripts/vault-nomad-cluster-role.json delete mode 100644 e2e/terraform/terraform.full.tfvars delete mode 100644 e2e/terraform/tests/1-expected.json delete mode 100644 e2e/terraform/tests/1-test.tfvars delete mode 100644 e2e/terraform/tests/2-expected.json delete mode 100644 e2e/terraform/tests/2-test.tfvars delete mode 100644 e2e/terraform/tests/3-expected.json delete mode 100644 e2e/terraform/tests/3-test.tfvars delete mode 100644 e2e/terraform/tests/4-expected.json delete mode 100644 e2e/terraform/tests/4-test.tfvars delete mode 100644 e2e/terraform/tests/5-expected.json delete mode 100644 e2e/terraform/tests/5-test.tfvars delete mode 100644 e2e/terraform/tests/6-expected.json delete mode 100644 e2e/terraform/tests/6-test.tfvars delete mode 100644 e2e/terraform/tests/7-expected.json delete mode 100644 e2e/terraform/tests/7-test.tfvars delete mode 120000 e2e/terraform/tests/config delete mode 100644 e2e/terraform/tests/mock-1 delete mode 100644 e2e/terraform/tests/mock-2 delete mode 100644 e2e/terraform/tests/nomad.tf delete mode 100755 e2e/terraform/tests/test.sh delete mode 120000 e2e/terraform/tests/variables.tf create mode 100644 e2e/terraform/tls_client.tf create mode 100644 e2e/terraform/uploads/README.md delete mode 100644 e2e/terraform/vault.tf diff --git a/e2e/connect/acls.go b/e2e/connect/acls.go index 2a455c937..a81fa97f9 100644 --- a/e2e/connect/acls.go +++ b/e2e/connect/acls.go @@ -7,8 +7,8 @@ import ( "time" consulapi "github.com/hashicorp/consul/api" + uuidparse "github.com/hashicorp/go-uuid" nomadapi "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/e2e/consulacls" "github.com/hashicorp/nomad/e2e/e2eutil" "github.com/hashicorp/nomad/e2e/framework" "github.com/hashicorp/nomad/helper/uuid" @@ -19,12 +19,8 @@ import ( type ConnectACLsE2ETest struct { framework.TC - // manageConsulACLs is used to 'enable' and 'disable' Consul ACLs in the - // Consul Cluster that has been setup for e2e testing. - manageConsulACLs consulacls.Manager - - // consulManagementToken is set to the generated Consul ACL token after using - // the consul-acls-manage.sh script to enable ACLs. + // used to store the root token so we can reset the client back to + // it as needed consulManagementToken string // things to cleanup after each test case @@ -38,47 +34,12 @@ func (tc *ConnectACLsE2ETest) BeforeAll(f *framework.F) { e2eutil.WaitForLeader(f.T(), tc.Nomad()) e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2) - // Now enable Consul ACLs, the bootstrapping process for which will be - // managed automatically if needed. - var err error - tc.manageConsulACLs, err = consulacls.New(consulacls.DefaultTFStateFile) - require.NoError(f.T(), err) - tc.enableConsulACLs(f) - - // Validate the consul master token exists, otherwise tests are just + // Validate the consul root token exists, otherwise tests are just // going to be a train wreck. - tokenLength := len(tc.consulManagementToken) - require.Equal(f.T(), 36, tokenLength, "consul master token wrong length") + tc.consulManagementToken = os.Getenv(envConsulToken) - // Validate the CONSUL_HTTP_TOKEN is NOT set, because that will cause - // the agent checks to fail (which do not allow having a token set (!)). - consulTokenEnv := os.Getenv(envConsulToken) - require.Empty(f.T(), consulTokenEnv) - - // Wait for Nomad to be ready _again_, since everything was restarted during - // the bootstrap process. - e2eutil.WaitForLeader(f.T(), tc.Nomad()) - e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 2) -} - -// enableConsulACLs effectively executes `consul-acls-manage.sh enable`, which -// will activate Consul ACLs, going through the bootstrap process if necessary. -func (tc *ConnectACLsE2ETest) enableConsulACLs(f *framework.F) { - tc.consulManagementToken = tc.manageConsulACLs.Enable(f.T()) -} - -// AfterAll runs after all tests are complete. -// -// We disable ConsulACLs in here to isolate the use of Consul ACLs only to -// test suites that explicitly want to test with them enabled. -func (tc *ConnectACLsE2ETest) AfterAll(f *framework.F) { - tc.disableConsulACLs(f) -} - -// disableConsulACLs effectively executes `consul-acls-manage.sh disable`, which -// will de-activate Consul ACLs. -func (tc *ConnectACLsE2ETest) disableConsulACLs(f *framework.F) { - tc.manageConsulACLs.Disable(f.T()) + _, err := uuidparse.ParseUUID(tc.consulManagementToken) + f.NoError(err, "CONSUL_HTTP_TOKEN not set") } // AfterEach does cleanup of Consul ACL objects that were created during each @@ -175,6 +136,7 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMasterToken(f *framework.F) // One should never do this in practice, but, it should work. // https://www.consul.io/docs/acl/acl-system.html#builtin-tokens job.ConsulToken = &tc.consulManagementToken + job.ID = &jobID // Avoid using Register here, because that would actually create and run the // Job which runs the task, creates the SI token, which all needs to be @@ -188,15 +150,20 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMasterToken(f *framework.F) func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMissingOperatorToken(f *framework.F) { t := f.T() + t.Skip("we don't have consul.allow_unauthenticated=false set because it would required updating every E2E test to pass a Consul token") + t.Log("test register Connect job w/ ACLs enabled w/o operator token") + jobID := "connect" + uuid.Short() + tc.jobIDs = append(tc.jobIDs, jobID) // need to clean up if the test fails + job, err := jobspec.ParseFile(demoConnectJob) f.NoError(err) - jobAPI := tc.Nomad().Jobs() // Explicitly show the ConsulToken is not set job.ConsulToken = nil + job.ID = &jobID _, _, err = jobAPI.Register(job, nil) f.Error(err) @@ -207,6 +174,8 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterMissingOperatorToken(f *fra func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterFakeOperatorToken(f *framework.F) { t := f.T() + t.Skip("we don't have consul.allow_unauthenticated=false set because it would required updating every E2E test to pass a Consul token") + t.Log("test register Connect job w/ ACLs enabled w/ operator token") policyID := tc.createConsulPolicy(consulPolicy{ @@ -217,12 +186,17 @@ func (tc *ConnectACLsE2ETest) TestConnectACLsRegisterFakeOperatorToken(f *framew // generate a fake consul token token fakeToken := uuid.Generate() + + jobID := "connect" + uuid.Short() + tc.jobIDs = append(tc.jobIDs, jobID) // need to clean up if the test fails + job := tc.parseJobSpecFile(t, demoConnectJob) jobAPI := tc.Nomad().Jobs() // deliberately set the fake Consul token job.ConsulToken = &fakeToken + job.ID = &jobID // should fail, because the token is fake _, _, err := jobAPI.Register(job, nil) diff --git a/e2e/connect/connect.go b/e2e/connect/connect.go index d224b0161..189ca7220 100644 --- a/e2e/connect/connect.go +++ b/e2e/connect/connect.go @@ -49,22 +49,15 @@ func init() { }, }) - // Connect tests with Consul ACLs enabled. These are now gated behind the - // NOMAD_TEST_CONSUL_ACLS environment variable, because they cause lots of - // problems for e2e test flakiness (due to restarting consul, nomad, etc.). - // - // Run these tests locally when working on Connect. - if os.Getenv("NOMAD_TEST_CONSUL_ACLS") == "1" { - framework.AddSuites(&framework.TestSuite{ - Component: "ConnectACLs", - CanRunLocal: false, - Consul: true, - Parallel: false, - Cases: []framework.TestCase{ - new(ConnectACLsE2ETest), - }, - }) - } + framework.AddSuites(&framework.TestSuite{ + Component: "ConnectACLs", + CanRunLocal: false, + Consul: true, + Parallel: false, + Cases: []framework.TestCase{ + new(ConnectACLsE2ETest), + }, + }) } func (tc *ConnectE2ETest) BeforeAll(f *framework.F) { diff --git a/e2e/consul/consul.go b/e2e/consul/consul.go index 1a68ff770..a277f81c8 100644 --- a/e2e/consul/consul.go +++ b/e2e/consul/consul.go @@ -42,7 +42,6 @@ func init() { new(ScriptChecksE2ETest), new(CheckRestartE2ETest), new(OnUpdateChecksTest), - new(ConsulNamespacesE2ETest), }, }) } diff --git a/e2e/consul/namespaces.go b/e2e/consul/namespaces.go index 407e6dfb0..9429ecd6f 100644 --- a/e2e/consul/namespaces.go +++ b/e2e/consul/namespaces.go @@ -2,6 +2,7 @@ package consul import ( "fmt" + "os" "sort" capi "github.com/hashicorp/consul/api" @@ -37,20 +38,40 @@ var ( allConsulNamespaces = append(consulNamespaces, "default") ) +func init() { + framework.AddSuites(&framework.TestSuite{ + Component: "ConsulNamespaces", + CanRunLocal: true, + Consul: true, + Cases: []framework.TestCase{ + new(ConsulNamespacesE2ETest), + }, + }) +} + type ConsulNamespacesE2ETest struct { framework.TC jobIDs []string - // cToken contains the Consul global-management token during ACL enabled - // tests (i.e. ConsulNamespacesE2ETestACLs which embeds ConsulNamespacesE2ETest). + // cToken contains the Consul global-management token cToken string + + // created policy and token IDs should be set here so they can be cleaned + // up after each test case, organized by namespace + policyIDs map[string][]string + tokenIDs map[string][]string } func (tc *ConsulNamespacesE2ETest) BeforeAll(f *framework.F) { + tc.policyIDs = make(map[string][]string) + tc.tokenIDs = make(map[string][]string) + e2eutil.WaitForLeader(f.T(), tc.Nomad()) e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1) + tc.cToken = os.Getenv("CONSUL_HTTP_TOKEN") + // create a set of consul namespaces in which to register services e2eutil.CreateConsulNamespaces(f.T(), tc.Consul(), consulNamespaces) @@ -61,9 +82,6 @@ func (tc *ConsulNamespacesE2ETest) BeforeAll(f *framework.F) { value := fmt.Sprintf("ns_%s", namespace) e2eutil.PutConsulKey(f.T(), tc.Consul(), namespace, "ns-kv-example", value) } - - // make the unused variable linter happy in oss - f.T().Log("Consul global-management token:", tc.cToken) } func (tc *ConsulNamespacesE2ETest) AfterAll(f *framework.F) { diff --git a/e2e/consul/namespaces_oss.go b/e2e/consul/namespaces_oss.go index ce41810ae..f0049606c 100644 --- a/e2e/consul/namespaces_oss.go +++ b/e2e/consul/namespaces_oss.go @@ -9,6 +9,7 @@ package consul import ( + "os" "sort" capi "github.com/hashicorp/consul/api" @@ -17,6 +18,26 @@ import ( "github.com/stretchr/testify/require" ) +func (tc *ConsulNamespacesE2ETest) AfterEach(f *framework.F) { + if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" { + return + } + + // cleanup jobs + for _, id := range tc.jobIDs { + _, _, err := tc.Nomad().Jobs().Deregister(id, true, nil) + f.NoError(err) + } + + // do garbage collection + err := tc.Nomad().System().GarbageCollect() + f.NoError(err) + + // reset accumulators + tc.tokenIDs = make(map[string][]string) + tc.policyIDs = make(map[string][]string) +} + func (tc *ConsulNamespacesE2ETest) TestConsulRegisterGroupServices(f *framework.F) { nomadClient := tc.Nomad() jobID := "cns-group-services" diff --git a/e2e/metrics/input/prometheus.nomad b/e2e/metrics/input/prometheus.nomad index f191490bf..e54d5d42b 100644 --- a/e2e/metrics/input/prometheus.nomad +++ b/e2e/metrics/input/prometheus.nomad @@ -44,7 +44,6 @@ scrape_configs: consul_sd_configs: - server: '{{ env "NOMAD_IP_prometheus_ui" }}:8500' - services: ['nomad-client', 'nomad'] relabel_configs: - source_labels: ['__meta_consul_tags'] diff --git a/e2e/nodedrain/nodedrain.go b/e2e/nodedrain/nodedrain.go index 3aab90060..bd9cbb154 100644 --- a/e2e/nodedrain/nodedrain.go +++ b/e2e/nodedrain/nodedrain.go @@ -51,6 +51,8 @@ func (tc *NodeDrainE2ETest) AfterEach(f *framework.F) { for _, id := range tc.nodeIDs { _, err := e2e.Command("nomad", "node", "drain", "-disable", "-yes", id) f.Assert().NoError(err) + _, err = e2e.Command("nomad", "node", "eligibility", "-enable", id) + f.Assert().NoError(err) } tc.nodeIDs = []string{} @@ -140,7 +142,7 @@ func (tc *NodeDrainE2ETest) TestNodeDrainEphemeralMigrate(f *framework.F) { // match the old allocation, not the running one var got string var fsErr error - testutil.WaitForResultRetries(500, func() (bool, error) { + testutil.WaitForResultRetries(10, func() (bool, error) { time.Sleep(time.Millisecond * 100) for _, alloc := range allocs { if alloc["Status"] == "running" && alloc["Node ID"] != nodeID && alloc["ID"] != oldAllocID { @@ -149,18 +151,15 @@ func (tc *NodeDrainE2ETest) TestNodeDrainEphemeralMigrate(f *framework.F) { if err != nil { return false, err } - if strings.TrimSpace(got) == oldAllocID { - return true, nil - } else { - return false, fmt.Errorf("expected %q, got %q", oldAllocID, got) - } + return true, nil } } - return false, fmt.Errorf("did not find a migrated alloc") + return false, fmt.Errorf("missing expected allocation") }, func(e error) { fsErr = e }) - f.NoError(fsErr, "node drained but migration failed") + f.NoError(fsErr, "could not get allocation data") + f.Equal(oldAllocID, strings.TrimSpace(got), "node drained but migration failed") } // TestNodeDrainIgnoreSystem tests that system jobs are left behind when the diff --git a/e2e/terraform/.terraform.lock.hcl b/e2e/terraform/.terraform.lock.hcl index 0b3477d0d..8e46278d6 100644 --- a/e2e/terraform/.terraform.lock.hcl +++ b/e2e/terraform/.terraform.lock.hcl @@ -19,6 +19,24 @@ provider "registry.terraform.io/hashicorp/aws" { ] } +provider "registry.terraform.io/hashicorp/consul" { + version = "2.14.0" + hashes = [ + "h1:fbE0ZM8D8Q9m+BsHiYMAO+DQLwXOJoAlg8XXUq5FIrY=", + "zh:06dcca1f76b839af8f86c7b6f65b944003a7a35b30b865b3884f48e2c42f9aee", + "zh:16111df6a485e21cee6ca33cb863434baa1ca360c819c8e2af85e465c1361d2b", + "zh:26b59c82ac2861b2651c1fa31955c3e7790e3c2d5d097f22aa34d3c294da63cf", + "zh:70fd6853099126a602d5ac26caa80214a4a8a38f0cad8a5e3b7bef49923419d3", + "zh:7d4f0061d6fb86e0a5639ed02381063b868245082ec4e3a461bcda964ed00fcc", + "zh:a48cbf57d6511922362d5b0f76f449fba7a550c9d0702635fabb43b4f0a09fc0", + "zh:bb54994a53dd8e1ff84ca50742ce893863dc166fd41b91d951f4cb89fe6a6bc0", + "zh:bc61b19ee3c8d55a9915a3ad84203c87bfd0d57eca8eec788524b14e8b67f090", + "zh:cbe3238e756ada23c1e7c97c42a5c72bf810dc5bd1265c9f074c3e739d1090b0", + "zh:e30198054239eab46493e59956b9cd8c376c3bbd9515ac102a96d1fbd32e423f", + "zh:e74365dba529a0676107e413986d7be81c2125c197754ce69e3e89d8daa53153", + ] +} + provider "registry.terraform.io/hashicorp/external" { version = "2.1.0" hashes = [ @@ -37,6 +55,24 @@ provider "registry.terraform.io/hashicorp/external" { ] } +provider "registry.terraform.io/hashicorp/hcp" { + version = "0.23.1" + hashes = [ + "h1:OeCY9pcVhlaVbONZ8fQ7Dgm/hFmkhmXXWJaAnLitkqM=", + "zh:02c661913643a56ba640432a0bcdf2824218a3598a243da4fd6079238164e7f6", + "zh:2359656d097fb1164bfe961314dafdac80f272c9bc0e359a6e43f5467a231e8f", + "zh:2463ac7e40702cbb4ebd4a397964b87de1b65dcb6982eab32f2bd40c9a5b1294", + "zh:420ef5061b936741a469b4e02dfe9ee047d928c294647e8c5f93e4a8890997a3", + "zh:5eba99a60a3366cd97b70a4ee26cb4489ca320699010bd03ca726772a10089c1", + "zh:82419028e8691acbb2c3f7e7d8c2c931ee03d6b3df6b97f5b965365f0a90392f", + "zh:93b7eecff21055c8b46d5a69ba982abc76479f73a78f67fc86fc86ba56f630cd", + "zh:c151238e96c30126529ccc42bf06d84f73fcd87ee40dbb493be8d85ef0efd453", + "zh:d476ebe1a628abd08d11354a13e5b8aa708d820dcad78587b8440d12f0e219ef", + "zh:e48130a57cf930755983b861768b8e88767e11df33640386d03496d551fb64ce", + "zh:ed9cf5173ea09010ef5ecae452dd3da52054a659e23af8d8e1ed6a45270cd531", + ] +} + provider "registry.terraform.io/hashicorp/http" { version = "2.1.0" hashes = [ @@ -143,3 +179,21 @@ provider "registry.terraform.io/hashicorp/tls" { "zh:fc1e12b713837b85daf6c3bb703d7795eaf1c5177aebae1afcf811dd7009f4b0", ] } + +provider "registry.terraform.io/hashicorp/vault" { + version = "3.3.1" + hashes = [ + "h1:4u5bqCcflSWqJgr3+/02HtP+ZuF4tUaEIUMTW0nv98k=", + "zh:3e1866037f43c1083ff825dce2a9e3853c757bb0121c5ae528ee3cf3f99b4113", + "zh:49636cc5c4939134e098c4ec0163c41fae103f24d7e1e8fc0432f8ad93d596a0", + "zh:5258a7001719c4aeb84f4c4da7115b795da4794754938a3c4176a4b578fe93a1", + "zh:7461738691e2e8ea91aba73d4351cfbc30fcaedcf0e332c9d35ef215f93aa282", + "zh:815529478e33a6727273b08340a4c62c9aeb3da02abf8f091bb4f545c8451fce", + "zh:8e6fede9f5e25b507faf6cacd61b997035b8b62859245861149ddb2990ada8eb", + "zh:9acc2387084b9c411e264c4351633bc82f9c4e420f8e6bbad9f87b145351f929", + "zh:b9e4af3b06386ceed720f0163a1496088c154aa1430ae072c525ffefa4b37891", + "zh:c7d5dfb8f8536694db6740e2a4afd2d681b60b396ded469282524c62ce154861", + "zh:d0850be710c6fd682634a2f823beed0164231cc873b1dc09038aa477c926f57c", + "zh:e90c2cba9d89db5eab295b2f046f24a53f23002bcfe008633d398fb3fa16d941", + ] +} diff --git a/e2e/terraform/Makefile b/e2e/terraform/Makefile index 19b42b34e..3f2f97137 100644 --- a/e2e/terraform/Makefile +++ b/e2e/terraform/Makefile @@ -1,32 +1,52 @@ -NOMAD_SHA ?= $(shell git rev-parse HEAD) PKG_PATH = $(shell pwd)/../../pkg/linux_amd64/nomad +LICENSE_PATH ?= -# The version of nomad that gets deployed depends on an order of precedence -# linked below -# https://github.com/hashicorp/nomad/blob/main/e2e/terraform/README.md#nomad-version -dev-cluster: +# deploy for quick local development testing + +plan: + terraform plan \ + -var="nomad_local_binary=$(PKG_PATH)" \ + -var="volumes=false" \ + -var="client_count_ubuntu_bionic_amd64=2" \ + -var="client_count_windows_2016_amd64=0" + +apply: terraform apply -auto-approve \ - -var="nomad_sha=$(NOMAD_SHA)" - terraform output environment + -var="nomad_local_binary=$(PKG_PATH)" \ + -var="volumes=false" \ + -var="client_count_ubuntu_bionic_amd64=2" \ + -var="client_count_windows_2016_amd64=0" -dev-cluster-from-local: - terraform apply -auto-approve \ - -var="nomad_local_binary=$(PKG_PATH)" - terraform output environment +clean: destroy tidy -clean: +destroy: + terraform destroy -auto-approve \ + -var="nomad_local_binary=$(PKG_PATH)" \ + -var="client_count_ubuntu_bionic_amd64=2" \ + -var="client_count_windows_2016_amd64=0" + +# deploy what's in E2E nightly + +plan_full: + terraform plan + +apply_full: + @terraform apply -auto-approve \ + -var="nomad_license=$(shell cat $(LICENSE_PATH))" + +clean_full: destroy_full tidy + +destroy_full: terraform destroy -auto-approve -full-cluster: - terraform apply -auto-approve \ - -var-file=terraform.full.tfvars \ - -var="nomad_sha=$(NOMAD_SHA)" +# util -plan-dev-cluster: - terraform plan \ - -var="nomad_sha=$(NOMAD_SHA)" - -plan-full-cluster: - terraform plan \ - -var-file=terraform.full.tfvars \ - -var="nomad_sha=$(NOMAD_SHA)" +# don't run this by default in plan/apply because it prevents you from +# updating a running cluster +tidy: + rm -rf keys + mkdir keys + chmod 0700 keys + rm -rf uploads/* + git checkout uploads/README.md + rm -f terraform.tfstate.*.backup diff --git a/e2e/terraform/README.md b/e2e/terraform/README.md index 33f5654a9..7ad9d66b8 100644 --- a/e2e/terraform/README.md +++ b/e2e/terraform/README.md @@ -1,23 +1,46 @@ # Terraform infrastructure -This folder contains Terraform resources for provisioning a Nomad cluster on -EC2 instances on AWS to use as the target of end-to-end tests. +This folder contains Terraform resources for provisioning a Nomad +cluster on EC2 instances on AWS to use as the target of end-to-end +tests. -Terraform provisions the AWS infrastructure assuming that EC2 AMIs have -already been built via Packer. It deploys a specific build of Nomad to the -cluster along with configuration files for Nomad, Consul, and Vault. +Terraform provisions the AWS infrastructure assuming that EC2 AMIs +have already been built via Packer and HCP Consul and HCP Vault +clusters are already running. It deploys a build of Nomad from your +local machine along with configuration files. ## Setup -You'll need Terraform 0.14.7+, as well as AWS credentials to create the Nomad -cluster. This Terraform stack assumes that an appropriate instance role has -been configured elsewhere and that you have the ability to `AssumeRole` into -the AWS account. +You'll need a recent version of Terraform (1.1+ recommended), as well +as AWS credentials to create the Nomad cluster and credentials for +HCP. This Terraform stack assumes that an appropriate instance role +has been configured elsewhere and that you have the ability to +`AssumeRole` into the AWS account. -Optionally, edit the `terraform.tfvars` file to change the number of Linux -clients or Windows clients. The Terraform variables file -`terraform.full.tfvars` is for the nightly E2E test run and deploys a larger, -more diverse set of test targets. +Configure the following environment variables. For HashiCorp Nomad +developers, this configuration can be found in 1Pass in the Nomad +team's vault under `nomad-e2e`. + +``` +export HCP_CLIENT_ID= +export HCP_CLIENT_SECRET= +export CONSUL_HTTP_TOKEN= +export CONSUL_HTTP_ADDR= +``` + +The Vault admin token will expire after 6 hours. If you haven't +created one already use the separate Terraform configuration found in +the `hcp-vault-auth` directory. The following will set the correct +values for `VAULT_TOKEN`, `VAULT_ADDR`, and `VAULT_NAMESPACE`: + +``` +cd ./hcp-vault-auth +terraform apply --auto-approve +$(terraform output environment --raw) +``` + +Optionally, edit the `terraform.tfvars` file to change the number of +Linux clients or Windows clients. ```hcl region = "us-east-1" @@ -25,9 +48,12 @@ instance_type = "t2.medium" server_count = "3" client_count_ubuntu_bionic_amd64 = "4" client_count_windows_2016_amd64 = "1" -profile = "dev-cluster" ``` +Optionally, edit the `nomad_local_binary` variable in the +`terraform.tfvars` file to change the path to the local binary of +Nomad you'd like to upload. + Run Terraform apply to deploy the infrastructure: ```sh @@ -40,66 +66,23 @@ terraform apply > where the ssh service isn't yet ready. That's ok and expected; they'll get > retried. In particular, Windows instances can take a few minutes before ssh > is ready. +> +> Also note: When ACLs are being bootstrapped, you may see "No cluster +> leader" in the output several times while the ACL bootstrap script +> polls the cluster to start and and elect a leader. -## Nomad Version +## Configuration -You'll need to pass one of the following variables in either your -`terraform.tfvars` file or as a command line argument (ex. `terraform apply --var 'nomad_version=0.10.2+ent'`) +The files in `etc` are template configuration files for Nomad and the +Consul agent. Terraform will render these files to the `uploads` +folder and upload them to the cluster during provisioning. -* `nomad_local_binary`: provision this specific local binary of Nomad. This is - a path to a Nomad binary on your own host. Ex. `nomad_local_binary = - "/home/me/nomad"`. This setting overrides `nomad_version`. -* `nomad_url`: provision this version from a remote archived binary, e.g. `build-binaries` CircleCI artifacts zip file urls. -* `nomad_version`: provision this version from - [releases.hashicorp.com](https://releases.hashicorp.com/nomad). Ex. `nomad_version - = "0.10.2+ent"` - -If you want to deploy the Enterprise build, include `-var -'nomad_enterprise=true'`. - -If you want to bootstrap Nomad ACLs, include `-var 'nomad_acls=true'`. - -> Note: If you bootstrap ACLs you will see "No cluster leader" in the output -> several times while the ACL bootstrap script polls the cluster to start and -> and elect a leader. - -## Profiles - -The `profile` field selects from a set of configuration files for Nomad, -Consul, and Vault by uploading the files found in `./config/`. The -standard profiles are as follows: - -* `full-cluster`: This profile is used for nightly E2E testing. It assumes at - least 3 servers and includes a unique config for each Nomad client. -* `dev-cluster`: This profile is used for developer testing of a more limited - set of clients. It assumes at least 3 servers but uses the one config for - all the Linux Nomad clients and one config for all the Windows Nomad - clients. - -You may create additional profiles for testing more complex interactions between features. -You can build your own custom profile by writing config files to the -`./config/` directory. - -For each profile, application (Nomad, Consul, Vault), and agent type -(`server`, `client_linux`, or `client_windows`), the agent gets the following -configuration files, ignoring any that are missing. - -* `./config///*`: base configurations shared between all - servers and clients. -* `./config////*`: base configurations shared - between all agents of this type. -* `./config////indexed/*.`: a - configuration for that particular agent, where the index value is the index - of that agent within the total count. - -For example, with the `full-cluster` profile, 2nd Nomad server would get the -following configuration files: -* `./config/full-cluster/nomad/base.hcl` -* `./config/full-cluster/nomad/server/indexed/server-1.hcl` - -The directory `./config/full-cluster/nomad/server` has no configuration files, -so that's safely skipped. +* `etc/nomad.d` are the Nomad configuration files. + * `base.hcl`, `tls.hcl`, `consul.hcl`, and `vault.hcl` are shared. + * `server-linux.hcl`, `client-linux.hcl`, and `client-windows.hcl` are role and platform specific. + * `client-linux-0.hcl`, etc. are specific to individual instances. +* `etc/consul.d` are the Consul agent configuration files. +* `etc/acls` are ACL policy files for Consul and Vault. ## Outputs diff --git a/e2e/terraform/compute.tf b/e2e/terraform/compute.tf index 5d171f18c..b7ce0aaed 100644 --- a/e2e/terraform/compute.tf +++ b/e2e/terraform/compute.tf @@ -1,5 +1,5 @@ locals { - ami_prefix = "nomad-e2e-v2" + ami_prefix = "nomad-e2e-v3" } resource "aws_instance" "server" { @@ -15,7 +15,6 @@ resource "aws_instance" "server" { tags = { Name = "${local.random_name}-server-${count.index}" ConsulAutoJoin = "auto-join-${local.random_name}" - SHA = var.nomad_sha User = data.aws_caller_identity.current.arn } } @@ -33,7 +32,6 @@ resource "aws_instance" "client_ubuntu_bionic_amd64" { tags = { Name = "${local.random_name}-client-ubuntu-bionic-amd64-${count.index}" ConsulAutoJoin = "auto-join-${local.random_name}" - SHA = var.nomad_sha User = data.aws_caller_identity.current.arn } } @@ -53,7 +51,6 @@ resource "aws_instance" "client_windows_2016_amd64" { tags = { Name = "${local.random_name}-client-windows-2016-${count.index}" ConsulAutoJoin = "auto-join-${local.random_name}" - SHA = var.nomad_sha User = data.aws_caller_identity.current.arn } } diff --git a/e2e/terraform/config/.gitignore b/e2e/terraform/config/.gitignore deleted file mode 100644 index 6dd9bdd41..000000000 --- a/e2e/terraform/config/.gitignore +++ /dev/null @@ -1 +0,0 @@ -custom/* diff --git a/e2e/terraform/config/custom/consul/client-linux/indexed/.gitkeep b/e2e/terraform/config/custom/consul/client-linux/indexed/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/e2e/terraform/config/custom/consul/client-windows/indexed/.gitkeep b/e2e/terraform/config/custom/consul/client-windows/indexed/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/e2e/terraform/config/custom/consul/server/indexed/.gitkeep b/e2e/terraform/config/custom/consul/server/indexed/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/e2e/terraform/config/custom/nomad/client-linux/indexed/.gitkeep b/e2e/terraform/config/custom/nomad/client-linux/indexed/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/e2e/terraform/config/custom/nomad/client-windows/indexed/.gitkeep b/e2e/terraform/config/custom/nomad/client-windows/indexed/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/e2e/terraform/config/custom/nomad/server/indexed/.gitkeep b/e2e/terraform/config/custom/nomad/server/indexed/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/e2e/terraform/config/dev-cluster/consul/aws.json b/e2e/terraform/config/dev-cluster/consul/aws.json deleted file mode 100644 index 2dda95435..000000000 --- a/e2e/terraform/config/dev-cluster/consul/aws.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "recursors": ["172.31.0.2"], - "retry_join": ["provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"] -} diff --git a/e2e/terraform/config/dev-cluster/consul/base.json b/e2e/terraform/config/dev-cluster/consul/base.json deleted file mode 100644 index 2ebc01c14..000000000 --- a/e2e/terraform/config/dev-cluster/consul/base.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "log_level": "INFO", - "data_dir": "/opt/consul/data", - "bind_addr": "0.0.0.0", - "client_addr": "0.0.0.0", - "advertise_addr": "{{ GetPrivateIP }}", - "connect": { - "enabled": true - }, - "ports": { - "http": -1, - "https": 8501, - "grpc": 8502 - } -} diff --git a/e2e/terraform/config/dev-cluster/consul/server/server.json b/e2e/terraform/config/dev-cluster/consul/server/server.json deleted file mode 100644 index 46c390c7c..000000000 --- a/e2e/terraform/config/dev-cluster/consul/server/server.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "server": true, - "ui": true, - "bootstrap_expect": 3, - "service": { - "name": "consul" - } -} diff --git a/e2e/terraform/config/dev-cluster/nomad/base.hcl b/e2e/terraform/config/dev-cluster/nomad/base.hcl deleted file mode 100644 index 00eee928c..000000000 --- a/e2e/terraform/config/dev-cluster/nomad/base.hcl +++ /dev/null @@ -1,19 +0,0 @@ -enable_debug = true - -log_level = "debug" - -data_dir = "/opt/nomad/data" - -bind_addr = "0.0.0.0" - -consul { - address = "127.0.0.1:8500" -} - -telemetry { - collection_interval = "1s" - disable_hostname = true - prometheus_metrics = true - publish_allocation_metrics = true - publish_node_metrics = true -} diff --git a/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl b/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl deleted file mode 100644 index cb3c09ae5..000000000 --- a/e2e/terraform/config/dev-cluster/nomad/client-linux/client.hcl +++ /dev/null @@ -1,51 +0,0 @@ -plugin_dir = "/opt/nomad/plugins" - -client { - enabled = true - - options { - # Allow jobs to run as root - "user.denylist" = "" - } - - host_volume "shared_data" { - path = "/srv/data" - } -} - -plugin "nomad-driver-podman" { - config { - volumes { - enabled = true - } - } -} - -plugin "nomad-driver-ecs" { - config { - enabled = true - cluster = "nomad-rtd-e2e" - region = "us-east-1" - } -} - -plugin "raw_exec" { - config { - enabled = true - } -} - -plugin "docker" { - config { - allow_privileged = true - - volumes { - enabled = true - } - } -} - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} diff --git a/e2e/terraform/config/dev-cluster/nomad/client-windows/client-windows.hcl b/e2e/terraform/config/dev-cluster/nomad/client-windows/client-windows.hcl deleted file mode 100644 index 408d70934..000000000 --- a/e2e/terraform/config/dev-cluster/nomad/client-windows/client-windows.hcl +++ /dev/null @@ -1,36 +0,0 @@ -enable_debug = true - -log_level = "debug" - -log_file = "C:\\opt\\nomad\\nomad.log" - -data_dir = "C:\\opt\\nomad\\data" - -bind_addr = "0.0.0.0" - -# Enable the client -client { - enabled = true - - options { - # Allow rawexec jobs - "driver.raw_exec.enable" = "1" - } -} - -consul { - address = "127.0.0.1:8500" -} - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} - -telemetry { - collection_interval = "1s" - disable_hostname = true - prometheus_metrics = true - publish_allocation_metrics = true - publish_node_metrics = true -} diff --git a/e2e/terraform/config/dev-cluster/vault/server/vault.hcl b/e2e/terraform/config/dev-cluster/vault/server/vault.hcl deleted file mode 100644 index 1e04a13e9..000000000 --- a/e2e/terraform/config/dev-cluster/vault/server/vault.hcl +++ /dev/null @@ -1,15 +0,0 @@ -listener "tcp" { - address = "0.0.0.0:8200" - tls_disable = 1 -} - -# this autounseal key is created by Terraform in the E2E infrastructure repo -# and should be used only for these tests -seal "awskms" { - region = "us-east-1" - kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d" -} - -# Vault 1.5.4 doesn't have autodiscovery for retry_join on its -# integrated storage yet so we'll just use consul for storage -storage "consul" {} diff --git a/e2e/terraform/config/full-cluster/consul/aws.json b/e2e/terraform/config/full-cluster/consul/aws.json deleted file mode 100644 index 2dda95435..000000000 --- a/e2e/terraform/config/full-cluster/consul/aws.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "recursors": ["172.31.0.2"], - "retry_join": ["provider=aws tag_key=ConsulAutoJoin tag_value=auto-join"] -} diff --git a/e2e/terraform/config/full-cluster/consul/server/server.json b/e2e/terraform/config/full-cluster/consul/server/server.json deleted file mode 100644 index 46c390c7c..000000000 --- a/e2e/terraform/config/full-cluster/consul/server/server.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "server": true, - "ui": true, - "bootstrap_expect": 3, - "service": { - "name": "consul" - } -} diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl deleted file mode 100644 index cbc2b036e..000000000 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-0.hcl +++ /dev/null @@ -1,50 +0,0 @@ -# Enable the client -client { - enabled = true - - meta { - "rack" = "r1" - } - - host_volume "shared_data" { - path = "/srv/data" - } -} - -plugin_dir = "/opt/nomad/plugins" -plugin "nomad-driver-podman" { - config { - volumes { - enabled = true - } - } -} - -plugin "nomad-driver-ecs" { - config { - enabled = true - cluster = "nomad-rtd-e2e" - region = "us-east-1" - } -} - -plugin "raw_exec" { - config { - enabled = true - } -} - -plugin "docker" { - config { - allow_privileged = true - - volumes { - enabled = true - } - } -} - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl deleted file mode 100644 index 8fc1d63fd..000000000 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-2.hcl +++ /dev/null @@ -1,39 +0,0 @@ -datacenter = "dc2" - -client { - enabled = true - - meta { - "rack" = "r1" - } -} - -plugin_dir = "/opt/nomad/plugins" -plugin "nomad-driver-podman" { - config { - volumes { - enabled = true - } - } -} - -plugin "raw_exec" { - config { - enabled = true - } -} - -plugin "docker" { - config { - allow_privileged = true - - volumes { - enabled = true - } - } -} - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl b/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl deleted file mode 100644 index 8135c486d..000000000 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-3.hcl +++ /dev/null @@ -1,30 +0,0 @@ -datacenter = "dc2" - -client { - enabled = true - - meta { - "rack" = "r2" - } -} - -plugin "raw_exec" { - config { - enabled = true - } -} - -plugin "docker" { - config { - allow_privileged = true - - volumes { - enabled = true - } - } -} - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} diff --git a/e2e/terraform/config/full-cluster/nomad/client-windows/client-windows.hcl b/e2e/terraform/config/full-cluster/nomad/client-windows/client-windows.hcl deleted file mode 100644 index 408d70934..000000000 --- a/e2e/terraform/config/full-cluster/nomad/client-windows/client-windows.hcl +++ /dev/null @@ -1,36 +0,0 @@ -enable_debug = true - -log_level = "debug" - -log_file = "C:\\opt\\nomad\\nomad.log" - -data_dir = "C:\\opt\\nomad\\data" - -bind_addr = "0.0.0.0" - -# Enable the client -client { - enabled = true - - options { - # Allow rawexec jobs - "driver.raw_exec.enable" = "1" - } -} - -consul { - address = "127.0.0.1:8500" -} - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} - -telemetry { - collection_interval = "1s" - disable_hostname = true - prometheus_metrics = true - publish_allocation_metrics = true - publish_node_metrics = true -} diff --git a/e2e/terraform/config/full-cluster/nomad/server/server.hcl b/e2e/terraform/config/full-cluster/nomad/server/server.hcl deleted file mode 100644 index 385e208f1..000000000 --- a/e2e/terraform/config/full-cluster/nomad/server/server.hcl +++ /dev/null @@ -1,4 +0,0 @@ -server { - enabled = true - bootstrap_expect = 3 -} diff --git a/e2e/terraform/config/full-cluster/vault/server/vault.hcl b/e2e/terraform/config/full-cluster/vault/server/vault.hcl deleted file mode 100644 index 1e04a13e9..000000000 --- a/e2e/terraform/config/full-cluster/vault/server/vault.hcl +++ /dev/null @@ -1,15 +0,0 @@ -listener "tcp" { - address = "0.0.0.0:8200" - tls_disable = 1 -} - -# this autounseal key is created by Terraform in the E2E infrastructure repo -# and should be used only for these tests -seal "awskms" { - region = "us-east-1" - kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d" -} - -# Vault 1.5.4 doesn't have autodiscovery for retry_join on its -# integrated storage yet so we'll just use consul for storage -storage "consul" {} diff --git a/e2e/terraform/config/shared/README.md b/e2e/terraform/config/shared/README.md deleted file mode 100644 index 26032d151..000000000 --- a/e2e/terraform/config/shared/README.md +++ /dev/null @@ -1,5 +0,0 @@ -### Shared configs - -The only configurations that should go here are ones that we want to be able -to toggle on/off for any profile. Adding a new configuration here requires -adding a flag to the provision scripts as well to symlink it. diff --git a/e2e/terraform/config/shared/consul-tls.json b/e2e/terraform/config/shared/consul-tls.json deleted file mode 100644 index b80238a85..000000000 --- a/e2e/terraform/config/shared/consul-tls.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "verify_incoming": true, - "verify_outgoing": true, - "verify_server_hostname": true, - "ca_file": "/etc/consul.d/tls/ca.crt", - "cert_file": "/etc/consul.d/tls/agent.crt", - "key_file": "/etc/consul.d/tls/agent.key", - "ports": { - "https": 8501 - } -} diff --git a/e2e/terraform/config/shared/nomad-acl.hcl b/e2e/terraform/config/shared/nomad-acl.hcl deleted file mode 100644 index f88f1d244..000000000 --- a/e2e/terraform/config/shared/nomad-acl.hcl +++ /dev/null @@ -1,3 +0,0 @@ -acl { - enabled = true -} diff --git a/e2e/terraform/config/shared/nomad-tls.hcl b/e2e/terraform/config/shared/nomad-tls.hcl deleted file mode 100644 index 61066ac1e..000000000 --- a/e2e/terraform/config/shared/nomad-tls.hcl +++ /dev/null @@ -1,29 +0,0 @@ -tls { - http = true - rpc = true - - ca_file = "/etc/nomad.d/tls/ca.crt" - cert_file = "/etc/nomad.d/tls/agent.crt" - key_file = "/etc/nomad.d/tls/agent.key" - - verify_server_hostname = true - verify_https_client = true -} - -consul { - address = "127.0.0.1:8501" - ssl = true - - ca_file = "/etc/nomad.d/tls/ca.crt" - cert_file = "/etc/nomad.d/tls/agent.crt" - key_file = "/etc/nomad.d/tls/agent.key" -} - -vault { - enabled = true - address = "https://active.vault.service.consul:8200" - - ca_file = "/etc/nomad.d/tls/ca.crt" - cert_file = "/etc/nomad.d/tls/agent.crt" - key_file = "/etc/nomad.d/tls/agent.key" -} \ No newline at end of file diff --git a/e2e/terraform/config/shared/vault-tls.hcl b/e2e/terraform/config/shared/vault-tls.hcl deleted file mode 100644 index 4de3e1713..000000000 --- a/e2e/terraform/config/shared/vault-tls.hcl +++ /dev/null @@ -1,26 +0,0 @@ -listener "tcp" { - address = "0.0.0.0:8200" - - tls_disable = false - tls_require_and_verify_client_cert = true - - tls_client_ca_file = "/etc/vault.d/tls/ca.crt" - tls_cert_file = "/etc/vault.d/tls/agent.crt" - tls_key_file = "/etc/vault.d/tls/agent.key" -} - -# this autounseal key is created by Terraform in the E2E infrastructure repo -# and should be used only for these tests -seal "awskms" { - region = "us-east-1" - kms_key_id = "74b7e226-c745-4ddd-9b7f-2371024ee37d" -} - -storage "consul" { - address = "127.0.0.1:8501" - scheme = "https" - - tls_ca_file = "/etc/vault.d/tls/ca.crt" - tls_cert_file = "/etc/vault.d/tls/agent.crt" - tls_key_file = "/etc/vault.d/tls/agent.key" -} diff --git a/e2e/terraform/etc/acls/consul/consul-agent-policy.hcl b/e2e/terraform/etc/acls/consul/consul-agent-policy.hcl new file mode 100644 index 000000000..1eda9d2ae --- /dev/null +++ b/e2e/terraform/etc/acls/consul/consul-agent-policy.hcl @@ -0,0 +1,32 @@ +# TODO: because Nomad should own most of these interactions, I think +# it might be possible to reduce this to: +# +# node_prefix "" { +# policy = write +# } + +acl = "write" + +agent_prefix "" { + policy = "write" +} + +event_prefix "" { + policy = "write" +} + +key_prefix "" { + policy = "write" +} + +node_prefix "" { + policy = "write" +} + +query_prefix "" { + policy = "write" +} + +service_prefix "" { + policy = "write" +} diff --git a/e2e/terraform/etc/acls/consul/nomad-client-policy.hcl b/e2e/terraform/etc/acls/consul/nomad-client-policy.hcl new file mode 100644 index 000000000..3125fb59f --- /dev/null +++ b/e2e/terraform/etc/acls/consul/nomad-client-policy.hcl @@ -0,0 +1,31 @@ +// The Nomad Client will be registering things into its buddy Consul Client. +// Note: because we also test the use of Consul namespaces, this token must be +// able to register services, read the keystore, and read node data for any +// namespace. +// The operator=write permission is required for creating config entries for +// connect ingress gateways. operator ACLs are not namespaced, though the +// config entries they can generate are. +operator = "write" + +agent_prefix "" { + policy = "read" +} + +namespace_prefix "" { + // The acl=write permission is required for generating Consul Service Identity + // tokens for consul connect services. Those services could be configured for + // any Consul namespace the job-submitter has access to. + acl = "write" + + key_prefix "" { + policy = "read" + } + + node_prefix "" { + policy = "read" + } + + service_prefix "" { + policy = "write" + } +} diff --git a/e2e/terraform/etc/acls/consul/nomad-server-policy.hcl b/e2e/terraform/etc/acls/consul/nomad-server-policy.hcl new file mode 100644 index 000000000..3c40cfb0d --- /dev/null +++ b/e2e/terraform/etc/acls/consul/nomad-server-policy.hcl @@ -0,0 +1,27 @@ +// The operator=write permission is required for creating config entries for +// connect ingress gateways. operator ACLs are not namespaced, though the +// config entries they can generate are. +operator = "write" + +agent_prefix "" { + policy = "read" +} + +namespace_prefix "" { + // The acl=write permission is required for generating Consul Service Identity + // tokens for consul connect services. Those services could be configured for + // any Consul namespace the job-submitter has access to. + acl = "write" +} + +service_prefix "" { + policy = "write" +} + +agent_prefix "" { + policy = "read" +} + +node_prefix "" { + policy = "read" +} diff --git a/e2e/terraform/scripts/vault-nomad-server-policy.hcl b/e2e/terraform/etc/acls/vault/nomad-policy.hcl similarity index 81% rename from e2e/terraform/scripts/vault-nomad-server-policy.hcl rename to e2e/terraform/etc/acls/vault/nomad-policy.hcl index d93a537d1..5da5f2200 100644 --- a/e2e/terraform/scripts/vault-nomad-server-policy.hcl +++ b/e2e/terraform/etc/acls/vault/nomad-policy.hcl @@ -1,12 +1,12 @@ # Allow creating tokens under "nomad-cluster" role. The role name should be -# updated if "nomad-cluster" is not used. -path "auth/token/create/nomad-cluster" { +# updated if "nomad-tasks" is not used. +path "auth/token/create/nomad-tasks" { capabilities = ["update"] } -# Allow looking up "nomad-cluster" role. The role name should be updated if -# "nomad-cluster" is not used. -path "auth/token/roles/nomad-cluster" { +# Allow looking up "nomad-tasks" role. The role name should be updated if +# "nomad-tasks" is not used. +path "auth/token/roles/nomad-tasks" { capabilities = ["read"] } diff --git a/e2e/terraform/etc/consul.d/client_acl.json b/e2e/terraform/etc/consul.d/client_acl.json new file mode 100644 index 000000000..c9a5f8e8b --- /dev/null +++ b/e2e/terraform/etc/consul.d/client_acl.json @@ -0,0 +1,8 @@ +{ + "acl": { + "tokens": { + "agent": "${token}", + "default": "${token}" + } + } +} diff --git a/e2e/terraform/config/full-cluster/consul/base.json b/e2e/terraform/etc/consul.d/clients.json similarity index 90% rename from e2e/terraform/config/full-cluster/consul/base.json rename to e2e/terraform/etc/consul.d/clients.json index 763eb06ad..9fbc9928e 100644 --- a/e2e/terraform/config/full-cluster/consul/base.json +++ b/e2e/terraform/etc/consul.d/clients.json @@ -1,5 +1,4 @@ { - "log_level": "INFO", "data_dir": "/opt/consul/data", "bind_addr": "0.0.0.0", "client_addr": "0.0.0.0", diff --git a/e2e/terraform/etc/consul.d/consul.service b/e2e/terraform/etc/consul.d/consul.service new file mode 100644 index 000000000..2f1e9f24e --- /dev/null +++ b/e2e/terraform/etc/consul.d/consul.service @@ -0,0 +1,17 @@ +[Unit] +Description=Consul Agent +Requires=network-online.target +After=network-online.target + +[Service] +Restart=on-failure +Environment=CONSUL_ALLOW_PRIVILEGED_PORTS=true +WorkingDirectory=/etc/consul.d +ExecStart=/usr/bin/consul agent -config-dir="/etc/consul.d" +ExecReload=/bin/kill -HUP $MAINPID +KillSignal=SIGTERM +User=consul +Group=consul + +[Install] +WantedBy=multi-user.target diff --git a/e2e/terraform/etc/nomad.d/.environment b/e2e/terraform/etc/nomad.d/.environment new file mode 100644 index 000000000..a7dbdc0e9 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/.environment @@ -0,0 +1 @@ +NOMAD_LICENSE=${license} diff --git a/e2e/terraform/config/full-cluster/nomad/base.hcl b/e2e/terraform/etc/nomad.d/base.hcl similarity index 67% rename from e2e/terraform/config/full-cluster/nomad/base.hcl rename to e2e/terraform/etc/nomad.d/base.hcl index b31559918..578603ba8 100644 --- a/e2e/terraform/config/full-cluster/nomad/base.hcl +++ b/e2e/terraform/etc/nomad.d/base.hcl @@ -1,14 +1,7 @@ +bind_addr = "0.0.0.0" +data_dir = "${data_dir}" enable_debug = true - -log_level = "debug" - -data_dir = "/opt/nomad/data" - -bind_addr = "0.0.0.0" - -consul { - address = "127.0.0.1:8500" -} +log_level = "debug" audit { enabled = true diff --git a/e2e/terraform/etc/nomad.d/client-linux-0.hcl b/e2e/terraform/etc/nomad.d/client-linux-0.hcl new file mode 100644 index 000000000..f0f9ff2fb --- /dev/null +++ b/e2e/terraform/etc/nomad.d/client-linux-0.hcl @@ -0,0 +1,9 @@ +client { + meta { + "rack" = "r1" + } + + host_volume "shared_data" { + path = "/srv/data" + } +} diff --git a/e2e/terraform/etc/nomad.d/client-linux-1.hcl b/e2e/terraform/etc/nomad.d/client-linux-1.hcl new file mode 100644 index 000000000..78f1be395 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/client-linux-1.hcl @@ -0,0 +1,5 @@ +client { + meta { + "rack" = "r2" + } +} diff --git a/e2e/terraform/etc/nomad.d/client-linux-2.hcl b/e2e/terraform/etc/nomad.d/client-linux-2.hcl new file mode 100644 index 000000000..772ee825f --- /dev/null +++ b/e2e/terraform/etc/nomad.d/client-linux-2.hcl @@ -0,0 +1,7 @@ +datacenter = "dc2" + +client { + meta { + "rack" = "r1" + } +} diff --git a/e2e/terraform/etc/nomad.d/client-linux-3.hcl b/e2e/terraform/etc/nomad.d/client-linux-3.hcl new file mode 100644 index 000000000..51ff5ad6c --- /dev/null +++ b/e2e/terraform/etc/nomad.d/client-linux-3.hcl @@ -0,0 +1,7 @@ +datacenter = "dc2" + +client { + meta { + "rack" = "r2" + } +} diff --git a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl b/e2e/terraform/etc/nomad.d/client-linux.hcl similarity index 79% rename from e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl rename to e2e/terraform/etc/nomad.d/client-linux.hcl index c36957389..cefae0748 100644 --- a/e2e/terraform/config/full-cluster/nomad/client-linux/indexed/client-1.hcl +++ b/e2e/terraform/etc/nomad.d/client-linux.hcl @@ -1,12 +1,9 @@ +plugin_dir = "/opt/nomad/plugins" + client { enabled = true - - meta { - "rack" = "r2" - } } -plugin_dir = "/opt/nomad/plugins" plugin "nomad-driver-podman" { config { volumes { @@ -38,8 +35,3 @@ plugin "docker" { } } } - -vault { - enabled = true - address = "http://active.vault.service.consul:8200" -} diff --git a/e2e/terraform/etc/nomad.d/client-windows.hcl b/e2e/terraform/etc/nomad.d/client-windows.hcl new file mode 100644 index 000000000..9ab2f87c5 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/client-windows.hcl @@ -0,0 +1,12 @@ +log_file = "C:\\opt\\nomad\\nomad.log" +plugin_dir = "C:\\opt\\nomad\\plugins" + +client { + enabled = true +} + +plugin "raw_exec" { + config { + enabled = true + } +} diff --git a/e2e/terraform/etc/nomad.d/consul.hcl b/e2e/terraform/etc/nomad.d/consul.hcl new file mode 100644 index 000000000..cff8dbb99 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/consul.hcl @@ -0,0 +1,6 @@ +consul { + address = "127.0.0.1:8500" + token = "${token}" + client_service_name = "${client_service_name}" + server_service_name = "${server_service_name}" +} diff --git a/e2e/terraform/etc/nomad.d/index.hcl b/e2e/terraform/etc/nomad.d/index.hcl new file mode 100644 index 000000000..76ff8e9e6 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/index.hcl @@ -0,0 +1 @@ +# This is an empty placeholder for indexed configuration diff --git a/e2e/terraform/etc/nomad.d/nomad-client.service b/e2e/terraform/etc/nomad.d/nomad-client.service new file mode 100644 index 000000000..8490fc9c8 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/nomad-client.service @@ -0,0 +1,21 @@ +[Unit] +Description=Nomad Agent +Requires=network-online.target +After=network-online.target +StartLimitIntervalSec=0 +StartLimitBurst=3 + +[Service] +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d +EnvironmentFile=-/etc/nomad.d/.environment +KillMode=process +KillSignal=SIGINT +LimitNOFILE=65536 +LimitNPROC=infinity +TasksMax=infinity +Restart=on-failure +RestartSec=2 + +[Install] +WantedBy=multi-user.target diff --git a/e2e/terraform/etc/nomad.d/nomad-server.service b/e2e/terraform/etc/nomad.d/nomad-server.service new file mode 100644 index 000000000..8490fc9c8 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/nomad-server.service @@ -0,0 +1,21 @@ +[Unit] +Description=Nomad Agent +Requires=network-online.target +After=network-online.target +StartLimitIntervalSec=0 +StartLimitBurst=3 + +[Service] +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/usr/local/bin/nomad agent -config /etc/nomad.d +EnvironmentFile=-/etc/nomad.d/.environment +KillMode=process +KillSignal=SIGINT +LimitNOFILE=65536 +LimitNPROC=infinity +TasksMax=infinity +Restart=on-failure +RestartSec=2 + +[Install] +WantedBy=multi-user.target diff --git a/e2e/terraform/config/dev-cluster/nomad/server/server.hcl b/e2e/terraform/etc/nomad.d/server-linux.hcl similarity index 69% rename from e2e/terraform/config/dev-cluster/nomad/server/server.hcl rename to e2e/terraform/etc/nomad.d/server-linux.hcl index 385e208f1..1446a7516 100644 --- a/e2e/terraform/config/dev-cluster/nomad/server/server.hcl +++ b/e2e/terraform/etc/nomad.d/server-linux.hcl @@ -2,3 +2,7 @@ server { enabled = true bootstrap_expect = 3 } + +acl { + enabled = true +} diff --git a/e2e/terraform/etc/nomad.d/tls.hcl b/e2e/terraform/etc/nomad.d/tls.hcl new file mode 100644 index 000000000..4c51801a9 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/tls.hcl @@ -0,0 +1,11 @@ +tls { + http = true + rpc = true + + ca_file = "/etc/nomad.d/tls/ca.crt" + cert_file = "/etc/nomad.d/tls/agent.crt" + key_file = "/etc/nomad.d/tls/agent.key" + + verify_server_hostname = true + verify_https_client = true +} diff --git a/e2e/terraform/etc/nomad.d/vault.hcl b/e2e/terraform/etc/nomad.d/vault.hcl new file mode 100644 index 000000000..bb8a4a495 --- /dev/null +++ b/e2e/terraform/etc/nomad.d/vault.hcl @@ -0,0 +1,8 @@ +vault { + enabled = true + address = "${url}" + task_token_ttl = "1h" + create_from_role = "nomad-tasks" + namespace = "${namespace}" + token = "${token}" +} diff --git a/e2e/terraform/hcp-vault-auth/main.tf b/e2e/terraform/hcp-vault-auth/main.tf new file mode 100644 index 000000000..eb8569b75 --- /dev/null +++ b/e2e/terraform/hcp-vault-auth/main.tf @@ -0,0 +1,47 @@ +# Vault cluster admin tokens expire after 6 hours, so we need to +# generate them fresh for test runs. But we can't generate the token +# and then use that token with the vault provider in the same +# Terraform run. So you'll need to apply this TF config separately +# from the root configuratiion. + +variable "hcp_vault_cluster_id" { + description = "The ID of the HCP Vault cluster" + type = string + default = "nomad-e2e-shared-hcp-vault" +} + +variable "hcp_vault_namespace" { + description = "The namespace where the HCP Vault cluster policy works" + type = string + default = "admin" +} + +data "hcp_vault_cluster" "e2e_shared_vault" { + cluster_id = var.hcp_vault_cluster_id +} + +resource "hcp_vault_cluster_admin_token" "admin" { + cluster_id = data.hcp_vault_cluster.e2e_shared_vault.cluster_id +} + +output "message" { + value = < ${path.root}/keys/nomad_root_token" + template = "${local.nomad_env} ./scripts/bootstrap-nomad.sh" } data "local_file" "nomad_token" { diff --git a/e2e/terraform/nomad.tf b/e2e/terraform/nomad.tf index 5151838ad..de87bae83 100644 --- a/e2e/terraform/nomad.tf +++ b/e2e/terraform/nomad.tf @@ -1,36 +1,19 @@ module "nomad_server" { - source = "./provision-nomad" depends_on = [aws_instance.server] count = var.server_count - platform = "linux_amd64" - profile = var.profile + platform = "linux" + arch = "linux_amd64" role = "server" index = count.index - - # The specific version of Nomad deployed will default to whichever one of - # nomad_sha, nomad_version, or nomad_local_binary is set, but if you want to - # deploy multiple versions you can use the nomad_*_server variables to - # provide a list of builds - nomad_version = count.index < length(var.nomad_version_server) ? var.nomad_version_server[count.index] : var.nomad_version - - nomad_sha = count.index < length(var.nomad_sha_server) ? var.nomad_sha_server[count.index] : var.nomad_sha + instance = aws_instance.server[count.index] nomad_local_binary = count.index < length(var.nomad_local_binary_server) ? var.nomad_local_binary_server[count.index] : var.nomad_local_binary - nomad_url = count.index < length(var.nomad_url_server) ? var.nomad_url_server[count.index] : var.nomad_url - - nomad_enterprise = var.nomad_enterprise - nomad_license = var.nomad_license - nomad_acls = var.nomad_acls - cluster_name = local.random_name - - tls = var.tls - tls_ca_key = tls_private_key.ca.private_key_pem - tls_ca_cert = tls_self_signed_cert.ca.cert_pem - - instance = aws_instance.server[count.index] + nomad_license = var.nomad_license + tls_ca_key = tls_private_key.ca.private_key_pem + tls_ca_cert = tls_self_signed_cert.ca.cert_pem connection = { type = "ssh" @@ -43,38 +26,21 @@ module "nomad_server" { # TODO: split out the different Linux targets (ubuntu, centos, arm, etc.) when # they're available module "nomad_client_ubuntu_bionic_amd64" { - source = "./provision-nomad" depends_on = [aws_instance.client_ubuntu_bionic_amd64] count = var.client_count_ubuntu_bionic_amd64 - platform = "linux_amd64" - profile = var.profile - role = "client-linux" + platform = "linux" + arch = "linux_amd64" + role = "client" index = count.index - - # The specific version of Nomad deployed will default to whichever one of - # nomad_sha, nomad_version, or nomad_local_binary is set, but if you want to - # deploy multiple versions you can use the nomad_*_client_linux - # variables to provide a list of builds - nomad_version = count.index < length(var.nomad_version_client_ubuntu_bionic_amd64) ? var.nomad_version_client_ubuntu_bionic_amd64[count.index] : var.nomad_version - - nomad_sha = count.index < length(var.nomad_sha_client_ubuntu_bionic_amd64) ? var.nomad_sha_client_ubuntu_bionic_amd64[count.index] : var.nomad_sha + instance = aws_instance.client_ubuntu_bionic_amd64[count.index] nomad_local_binary = count.index < length(var.nomad_local_binary_client_ubuntu_bionic_amd64) ? var.nomad_local_binary_client_ubuntu_bionic_amd64[count.index] : var.nomad_local_binary - nomad_url = count.index < length(var.nomad_url_client_ubuntu_bionic_amd64) ? var.nomad_url_client_ubuntu_bionic_amd64[count.index] : var.nomad_url - - nomad_enterprise = var.nomad_enterprise - nomad_acls = false - cluster_name = local.random_name - - tls = var.tls tls_ca_key = tls_private_key.ca.private_key_pem tls_ca_cert = tls_self_signed_cert.ca.cert_pem - instance = aws_instance.client_ubuntu_bionic_amd64[count.index] - connection = { type = "ssh" user = "ubuntu" @@ -83,44 +49,25 @@ module "nomad_client_ubuntu_bionic_amd64" { } } + # TODO: split out the different Windows targets (2016, 2019) when they're # available module "nomad_client_windows_2016_amd64" { - source = "./provision-nomad" depends_on = [aws_instance.client_windows_2016_amd64] count = var.client_count_windows_2016_amd64 - platform = "windows_amd64" - profile = var.profile - role = "client-windows" + platform = "windows" + arch = "windows_amd64" + role = "client" index = count.index + instance = aws_instance.client_windows_2016_amd64[count.index] - # The specific version of Nomad deployed will default to whichever one of - # nomad_sha, nomad_version, or nomad_local_binary is set, but if you want to - # deploy multiple versions you can use the nomad_*_client_windows - # variables to provide a list of builds - nomad_version = count.index < length(var.nomad_version_client_windows_2016_amd64) ? var.nomad_version_client_windows_2016_amd64[count.index] : var.nomad_version - - nomad_sha = count.index < length(var.nomad_sha_client_windows_2016_amd64) ? var.nomad_sha_client_windows_2016_amd64[count.index] : var.nomad_sha - - # if nomad_local_binary is in use, you must pass a nomad_local_binary_client_windows_2016_amd64! nomad_local_binary = count.index < length(var.nomad_local_binary_client_windows_2016_amd64) ? var.nomad_local_binary_client_windows_2016_amd64[count.index] : "" - # if nomad_url is in use, you must pass a nomad_url_client_windows_2016_amd64! - nomad_url = count.index < length(var.nomad_url_client_windows_2016_amd64) ? var.nomad_url_client_windows_2016_amd64[count.index] : "" - - nomad_enterprise = var.nomad_enterprise - nomad_acls = false - cluster_name = local.random_name - - - tls = var.tls tls_ca_key = tls_private_key.ca.private_key_pem tls_ca_cert = tls_self_signed_cert.ca.cert_pem - instance = aws_instance.client_windows_2016_amd64[count.index] - connection = { type = "ssh" user = "Administrator" diff --git a/e2e/terraform/outputs.tf b/e2e/terraform/outputs.tf index ee69f69f0..8c97056ca 100644 --- a/e2e/terraform/outputs.tf +++ b/e2e/terraform/outputs.tf @@ -38,34 +38,19 @@ ssh into clients with: EOM } +# Note: Consul and Vault environment needs to be set in test +# environment before the Terraform run, so we don't have that output +# here output "environment" { description = "get connection config by running: $(terraform output environment)" + sensitive = true value = </dev/null \ - | sudo xargs -I % ln -fs % "$3" -} - -install_config_profile() { - - if [ -d /tmp/custom ]; then - rm -rf /opt/config/custom - sudo mv /tmp/custom /opt/config/ - fi - - # we're removing the whole directory and recreating to avoid - # any quirks around dotfiles that might show up here. - sudo rm -rf /etc/nomad.d - sudo rm -rf /etc/consul.d - sudo rm -rf /etc/vault.d - - sudo mkdir -p /etc/nomad.d - sudo mkdir -p /etc/consul.d - sudo mkdir -p /etc/vault.d - - sym "${NOMAD_PROFILE}/nomad/" '*' /etc/nomad.d - sym "${NOMAD_PROFILE}/consul/" '*' /etc/consul.d - sym "${NOMAD_PROFILE}/vault/" '*' /etc/vault.d - - if [ -n "$NOMAD_ROLE" ]; then - sym "${NOMAD_PROFILE}/nomad/${NOMAD_ROLE}/" '*' /etc/nomad.d - sym "${NOMAD_PROFILE}/consul/${NOMAD_ROLE}/" '*' /etc/consul.d - sym "${NOMAD_PROFILE}/vault/${NOMAD_ROLE}/" '*' /etc/vault.d - fi - if [ -n "$NOMAD_INDEX" ]; then - sym "${NOMAD_PROFILE}/nomad/${NOMAD_ROLE}/indexed/" "*${NOMAD_INDEX}*" /etc/nomad.d - sym "${NOMAD_PROFILE}/consul/${NOMAD_ROLE}/indexed/" "*${NOMAD_INDEX}*" /etc/consul.d - sym "${NOMAD_PROFILE}/vault/${NOMAD_ROLE}/indexed/" "*${NOMAD_INDEX}*" /etc/vault.d - fi - - if [ $ACLS == "1" ]; then - sudo ln -fs /opt/config/shared/nomad-acl.hcl /etc/nomad.d/acl.hcl - fi - - if [ $TLS == "1" ]; then - sudo ln -fs /opt/config/shared/nomad-tls.hcl /etc/nomad.d/tls.hcl - sudo ln -fs /opt/config/shared/consul-tls.json /etc/consul.d/tls.json - sudo cp /opt/config/shared/vault-tls.hcl /etc/vault.d/vault.hcl - - sudo cp -r /tmp/nomad-tls /etc/nomad.d/tls - sudo cp -r /tmp/nomad-tls /etc/consul.d/tls - sudo cp -r /tmp/nomad-tls /etc/vault.d/tls - fi -} - -update_consul_autojoin() { - sudo sed -i'' -e "s|tag_key=ConsulAutoJoin tag_value=auto-join|tag_key=ConsulAutoJoin tag_value=${CONSUL_AUTOJOIN}|g" /etc/consul.d/*.json -} - -while [[ $# -gt 0 ]] -do -opt="$1" - case $opt in - --nomad_sha) - if [ -z "$2" ]; then echo "Missing sha parameter"; usage; fi - NOMAD_SHA="$2" - install_fn=install_from_s3 - shift 2 - ;; - --nomad_release | --nomad_version) - if [ -z "$2" ]; then echo "Missing version parameter"; usage; fi - NOMAD_VERSION="$2" - install_fn=install_from_release - shift 2 - ;; - --nomad_binary) - if [ -z "$2" ]; then echo "Missing file parameter"; usage; fi - NOMAD_UPLOADED_BINARY="$2" - install_fn=install_from_uploaded_binary - shift 2 - ;; - --nomad_url) - if [ -z "$2" ]; then echo "Missing URL parameter"; usage; fi - NOMAD_URL="$2" - install_fn=install_from_url - shift 2 - ;; - --config_profile) - if [ -z "$2" ]; then echo "Missing profile parameter"; usage; fi - NOMAD_PROFILE="/opt/config/${2}" - shift 2 - ;; - --role) - if [ -z "$2" ]; then echo "Missing role parameter"; usage; fi - NOMAD_ROLE="$2" - shift 2 - ;; - --index) - if [ -z "$2" ]; then echo "Missing index parameter"; usage; fi - NOMAD_INDEX="$2" - shift 2 - ;; - --autojoin) - if [ -z "$2" ]; then ehco "Missing autojoin parameter"; usage; fi - CONSUL_AUTOJOIN="$2" - shift 2 - ;; - --nostart) - # for initial packer builds, we don't want to start Nomad - START=0 - shift - ;; - --enterprise) - BUILD_FOLDER="builds-ent" - shift - ;; - --nomad_license) - if [ -z "$2" ]; then echo "Missing license parameter"; usage; fi - NOMAD_LICENSE="$2" - shift 2 - ;; - --nomad_acls) - ACLS=1 - shift - ;; - --tls) - TLS=1 - shift - ;; - *) usage ;; - esac -done - -# call the appropriate installation function -if [ -n "$install_fn" ]; then - $install_fn -fi -if [ -n "$NOMAD_PROFILE" ]; then - install_config_profile -fi - -if [ -n "$CONSUL_AUTOJOIN" ]; then - update_consul_autojoin -fi - -sudo touch /etc/nomad.d/.environment -if [ -n "$NOMAD_LICENSE" ]; then - echo "NOMAD_LICENSE=${NOMAD_LICENSE}" > /tmp/.nomad-environment - sudo mv /tmp/.nomad-environment /etc/nomad.d/.environment -fi - -if [ $START == "1" ]; then - if [ "$NOMAD_ROLE" == "server" ]; then - sudo systemctl restart vault - fi - sudo systemctl restart consul - sudo systemctl restart nomad -fi diff --git a/e2e/terraform/packer/ubuntu-bionic-amd64/setup.sh b/e2e/terraform/packer/ubuntu-bionic-amd64/setup.sh index cb8f9db33..1fe8f20c6 100755 --- a/e2e/terraform/packer/ubuntu-bionic-amd64/setup.sh +++ b/e2e/terraform/packer/ubuntu-bionic-amd64/setup.sh @@ -4,11 +4,6 @@ set -e -# Will be overwritten at test time with the version specified -NOMADVERSION=0.12.7 -CONSULVERSION=1.9.4+ent -VAULTVERSION=1.5.4 - NOMAD_PLUGIN_DIR=/opt/nomad/plugins/ mkdir_for_root() { @@ -20,9 +15,6 @@ mkdir_for_root() { export DEBIAN_FRONTEND=noninteractive echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections -sudo mkdir -p /ops/shared -sudo chown -R ubuntu:ubuntu /ops/shared - mkdir_for_root /opt mkdir_for_root /srv/data # for host volumes @@ -43,44 +35,31 @@ sudo chown root:root /usr/local/bin/sockaddr # Disable the firewall sudo ufw disable || echo "ufw not installed" -echo "Install Consul" -curl -fsL -o /tmp/consul.zip \ - "https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip" -sudo unzip -q /tmp/consul.zip -d /usr/local/bin -sudo chmod 0755 /usr/local/bin/consul -sudo chown root:root /usr/local/bin/consul +echo "Install HashiCorp apt repositories" +curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo apt-key add - +sudo apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" +sudo apt-get update + +echo "Install Consul and Nomad" +sudo apt-get install -y \ + consul-enterprise \ + nomad + +# Note: neither service will start on boot because we haven't enabled +# the systemd unit file and we haven't uploaded any configuration +# files for Consul and Nomad echo "Configure Consul" mkdir_for_root /etc/consul.d mkdir_for_root /opt/consul sudo mv /tmp/linux/consul.service /etc/systemd/system/consul.service -echo "Install Vault" -curl -fsL -o /tmp/vault.zip \ - "https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip" -sudo unzip -q /tmp/vault.zip -d /usr/local/bin -sudo chmod 0755 /usr/local/bin/vault -sudo chown root:root /usr/local/bin/vault - -echo "Configure Vault" -mkdir_for_root /etc/vault.d -mkdir_for_root /opt/vault -sudo mv /tmp/linux/vault.service /etc/systemd/system/vault.service - -sudo setcap cap_ipc_lock=+ep /usr/local/bin/vault -sudo useradd --system --home /etc/vault.d --shell /bin/false vault - echo "Configure Nomad" mkdir_for_root /etc/nomad.d mkdir_for_root /opt/nomad mkdir_for_root $NOMAD_PLUGIN_DIR sudo mv /tmp/linux/nomad.service /etc/systemd/system/nomad.service -echo "Install Nomad" -sudo mv /tmp/linux/provision.sh /opt/provision.sh -sudo chmod +x /opt/provision.sh -/opt/provision.sh --nomad_version $NOMADVERSION --nostart - echo "Installing third-party apt repositories" # Docker diff --git a/e2e/terraform/packer/ubuntu-bionic-amd64/vault.service b/e2e/terraform/packer/ubuntu-bionic-amd64/vault.service deleted file mode 100644 index 36cb2a53e..000000000 --- a/e2e/terraform/packer/ubuntu-bionic-amd64/vault.service +++ /dev/null @@ -1,33 +0,0 @@ -[Unit] -Description="HashiCorp Vault - A tool for managing secrets" -Documentation=https://www.vaultproject.io/docs/ -Requires=network-online.target -After=network-online.target -ConditionFileNotEmpty=/etc/vault.d/vault.hcl -StartLimitIntervalSec=60 -StartLimitBurst=3 - -[Service] -User=vault -Group=vault -ProtectSystem=full -ProtectHome=read-only -PrivateTmp=yes -PrivateDevices=yes -SecureBits=keep-caps -AmbientCapabilities=CAP_IPC_LOCK -Capabilities=CAP_IPC_LOCK+ep -CapabilityBoundingSet=CAP_SYSLOG CAP_IPC_LOCK -NoNewPrivileges=yes -ExecStart=/usr/local/bin/vault server -config=/etc/vault.d/vault.hcl -ExecReload=/bin/kill --signal HUP $MAINPID -KillMode=process -KillSignal=SIGINT -Restart=on-failure -RestartSec=5 -TimeoutStopSec=30 -LimitNOFILE=65536 -LimitMEMLOCK=infinity - -[Install] -WantedBy=multi-user.target diff --git a/e2e/terraform/packer/windows-2016-amd64.pkr.hcl b/e2e/terraform/packer/windows-2016-amd64.pkr.hcl index 21bc7f1ae..af6cdad99 100644 --- a/e2e/terraform/packer/windows-2016-amd64.pkr.hcl +++ b/e2e/terraform/packer/windows-2016-amd64.pkr.hcl @@ -5,7 +5,7 @@ variable "build_sha" { locals { timestamp = regex_replace(timestamp(), "[- TZ:]", "") - version = "v2" + version = "v3" } source "amazon-ebs" "latest_windows_2016" { @@ -42,19 +42,11 @@ build { "windows-2016-amd64/fix-tls.ps1", "windows-2016-amd64/install-nuget.ps1", "windows-2016-amd64/install-docker.ps1", - "windows-2016-amd64/install-consul.ps1" + "windows-2016-amd64/install-consul.ps1", + "windows-2016-amd64/install-nomad.ps1" ] } - provisioner "file" { - destination = "/opt/provision.ps1" - source = "./windows-2016-amd64/provision.ps1" - } - - provisioner "powershell" { - inline = ["/opt/provision.ps1 -nomad_version 0.12.7 -nostart"] - } - # this restart is required for adding the "containers feature", but we can # wait to do it until right before we do sysprep, which makes debugging # builds slightly faster diff --git a/e2e/terraform/packer/windows-2016-amd64/install-consul.ps1 b/e2e/terraform/packer/windows-2016-amd64/install-consul.ps1 index 5765c4eab..5d567d1f4 100755 --- a/e2e/terraform/packer/windows-2016-amd64/install-consul.ps1 +++ b/e2e/terraform/packer/windows-2016-amd64/install-consul.ps1 @@ -8,11 +8,11 @@ Set-Location C:\opt Try { $releases = "https://releases.hashicorp.com" - $version = "1.9.4+ent" + $version = "1.11.4+ent" $url = "${releases}/consul/${version}/consul_${version}_windows_amd64.zip" New-Item -ItemType Directory -Force -Path C:\opt\consul - New-Item -ItemType Directory -Force -Path C:\opt\consul.d + New-Item -ItemType Directory -Force -Path C:\etc\consul.d # TODO: check sha! Write-Output "Downloading Consul from: $url" @@ -22,6 +22,12 @@ Try { C:\opt\consul.exe version rm consul.zip + New-Service ` + -Name "Consul" ` + -BinaryPathName "C:\opt\consul.exe agent -config-dir C:\etc\consul.d" ` + -StartupType "Automatic" ` + -ErrorAction Ignore + } Catch { Write-Output "Failed to install Consul." Write-Output $_ diff --git a/e2e/terraform/packer/windows-2016-amd64/install-nomad.ps1 b/e2e/terraform/packer/windows-2016-amd64/install-nomad.ps1 new file mode 100755 index 000000000..7fe3e3d26 --- /dev/null +++ b/e2e/terraform/packer/windows-2016-amd64/install-nomad.ps1 @@ -0,0 +1,46 @@ +Set-StrictMode -Version latest +$ErrorActionPreference = "Stop" + +# Force TLS1.2 +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 + +Set-Location C:\opt + +Try { + $releases = "https://releases.hashicorp.com" + $version = "1.2.6" + $url = "${releases}/nomad/${version}/nomad_${version}_windows_amd64.zip" + + New-Item -ItemType Directory -Force -Path C:\opt\nomad + New-Item -ItemType Directory -Force -Path C:\etc\nomad.d + + # TODO: check sha! + Write-Output "Downloading Nomad from: $url" + Invoke-WebRequest -Uri $url -Outfile nomad.zip -ErrorAction Stop + Expand-Archive .\nomad.zip .\ -ErrorAction Stop + Move-Item nomad.exe C:\opt\nomad.exe -Force -ErrorAction Stop + C:\opt\nomad.exe version + rm nomad.zip + + New-NetFirewallRule ` + -DisplayName 'Nomad HTTP Inbound' ` + -Profile @('Public', 'Domain', 'Private') ` + -Direction Inbound ` + -Action Allow ` + -Protocol TCP ` + -LocalPort @('4646') + + New-Service ` + -Name "Nomad" ` + -BinaryPathName "C:\opt\nomad.exe agent -config C:\etc\nomad.d" ` + -StartupType "Automatic" ` + -ErrorAction Ignore + +} Catch { + Write-Output "Failed to install Nomad." + Write-Output $_ + $host.SetShouldExit(-1) + throw +} + +Write-Output "Installed Nomad." diff --git a/e2e/terraform/packer/windows-2016-amd64/provision.ps1 b/e2e/terraform/packer/windows-2016-amd64/provision.ps1 deleted file mode 100755 index 508ff6baf..000000000 --- a/e2e/terraform/packer/windows-2016-amd64/provision.ps1 +++ /dev/null @@ -1,263 +0,0 @@ -param( - [string]$nomad_sha, - [string]$nomad_version, - [string]$nomad_binary, - [string]$nomad_url, - [switch]$enterprise = $false, - [switch]$nomad_acls = $false, - [string]$config_profile, - [string]$role, - [string]$index, - [string]$autojoin, - [switch]$nostart = $false -) - -Set-StrictMode -Version latest -$ErrorActionPreference = "Stop" - -$usage = @" -Usage: provision.ps1 [options...] -Options (use one of the following): - -nomad_sha SHA full git sha to install from S3 - -nomad_version VERSION release version number (ex. 0.12.4+ent) - -nomad_binary FILEPATH path to file on host - -nomad_url URL url path to nomad binary archive - -Options for configuration: - -config_profile FILEPATH path to config profile directory - -role ROLE role within config profile directory - -index INDEX count of instance, for profiles with per-instance config - -nostart do not start or restart Nomad - -enterprise if nomad_sha is passed, use the ENT version ---autojoin the AWS ConsulAutoJoin tag value - -"@ - -$RunningAsAdmin = ([Security.Principal.WindowsPrincipal] [Security.Principal.WindowsIdentity]::GetCurrent()).IsInRole([Security.Principal.WindowsBuiltInRole] "Administrator") -if (!$RunningAsAdmin) { - Write-Error "Must be executed in Administrator level shell." - exit 1 -} - -# Force TLS1.2 -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 - - -$install_path = "C:\opt\nomad.exe" -$platform = "windows_amd64" - -Set-Location C:\opt - -function Usage { - Write-Output "${usage}" -} - -function InstallFromS3 { - Stop-Service -Name nomad -ErrorAction Ignore - - $build_folder = "builds-oss" - if ($enterprise) { - $build_folder = "builds-ent" - } - $key = "${build_folder}/nomad_${platform}_${nomad_sha}.zip" - - Write-Output "Downloading Nomad from s3: $key" - Try { - Remove-Item -Path ./nomad.zip -Force -ErrorAction Ignore - Read-S3Object -BucketName nomad-team-dev-test-binaries ` - -Key $key -File ./nomad.zip -ErrorAction Stop - - Remove-Item -Path $install_path -Force -ErrorAction Stop - Expand-Archive ./nomad.zip ./ -Force -ErrorAction Stop - Remove-Item -Path nomad.zip -Force -ErrorAction Ignore - - New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop - New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop - } Catch { - Write-Output "Failed to install Nomad." - Write-Output $_ - Write-Host $_.ScriptStackTrace - $host.SetShouldExit(-1) - throw - } - - Write-Output "Installed Nomad." -} - -function InstallFromUploadedBinary { - - Stop-Service -Name nomad -ErrorAction Ignore - - Try { - Remove-Item -Path $install_path -Force -ErrorAction Ignore - Move-Item -Path $nomad_binary -Destination $install_path -Force -ErrorAction Stop - - New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop - New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop - } Catch { - Write-Output "Failed to install Nomad." - Write-Output $_ - $host.SetShouldExit(-1) - throw - } - - Write-Output "Installed Nomad." -} - -function InstallFromRelease { - Try { - # check that we don't already have this version - if (C:\opt\nomad.exe -version ` - | Select-String -Pattern $nomad_version -SimpleMatch -Quiet) { - if (C:\opt\nomad.exe -version ` - | Select-String -Pattern dev -SimpleMatch -Quiet -NotMatch) { - Write-Output "${nomad_version} already installed" - return - } - } - } Catch { - Write-Output "${nomad_version} not previously installed" - } - - Stop-Service -Name nomad -ErrorAction Ignore - - $releases = "https://releases.hashicorp.com" - $url = "${releases}/nomad/${nomad_version}/nomad_${nomad_version}_${platform}.zip" - - Write-Output "Downloading Nomad from: $url" - Try { - Remove-Item -Path ./nomad.zip -Force -ErrorAction Ignore - Invoke-WebRequest -Uri $url -Outfile nomad.zip -ErrorAction Stop - - Remove-Item -Path $install_path -Force -ErrorAction Ignore - Expand-Archive .\nomad.zip .\ -ErrorAction Stop - Remove-Item -Path nomad.zip -Force -ErrorAction Ignore - - New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop - New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop - } Catch { - Write-Output "Failed to install Nomad." - Write-Output $_ - $host.SetShouldExit(-1) - throw - } - - Write-Output "Installed Nomad." -} - -function InstallFromURL { - Stop-Service -Name nomad -ErrorAction Ignore - - Write-Output "Downloading Nomad from: $nomad_url" - Try { - Remove-Item -Path ./nomad.zip -Force -ErrorAction Ignore - Invoke-WebRequest -Uri $nomad_url -Outfile nomad.zip -ErrorAction Stop - - Remove-Item -Path $install_path -Force -ErrorAction Ignore - Expand-Archive .\nomad.zip .\ -ErrorAction Stop - Remove-Item -Path nomad.zip -Force -ErrorAction Ignore - - New-Item -ItemType Directory -Force -Path C:\opt\nomad.d -ErrorAction Stop - New-Item -ItemType Directory -Force -Path C:\opt\nomad -ErrorAction Stop - } Catch { - Write-Output "Failed to install Nomad." - Write-Output $_ - $host.SetShouldExit(-1) - throw - } - - Write-Output "Installed Nomad." -} - - -function ConfigFiles($src, $dest) { - Get-ChildItem -Path "$src" -Name -Attributes !Directory -ErrorAction Ignore` - | ForEach-Object { ` - New-Item -ItemType SymbolicLink -Path "${dest}\$_" -Target "${src}\$_" } -} - -function InstallConfigProfile { - - if ( Test-Path -Path 'C:\tmp\custom' -PathType Container ) { - Remote-Item 'C:\opt\config\custom' -Force -ErrorAction Ignore - Move-Item -Path 'C:\tmp\custom' -Destination 'C:\opt\config\custom' -Force - } - - $cfg = "C:\opt\config\${config_profile}" - - Remove-Item "C:\opt\nomad.d\*" -Force -ErrorAction Ignore - Remove-Item "C:\opt\consul.d\*" -Force -ErrorAction Ignore - - ConfigFiles "${cfg}\nomad" "C:\opt\nomad.d" - ConfigFiles "${cfg}\consul" "C:\opt\consul.d" - - if ( "" -ne $role ) { - ConfigFiles "${cfg}\nomad\${role}" "C:\opt\nomad.d" - ConfigFiles "${cfg}\consul\${role}" "C:\opt\consul.d" - } - - if ( "" -ne $index ) { - ConfigFiles "${cfg}\nomad\${role}\indexed\*${index}*" "C:\opt\nomad.d" - ConfigFiles "${cfg}\consul\${role}\indexed\*${index}*" "C:\opt\consul.d" - } -} - -function UpdateConsulAutojoin { - (Get-Content C:\opt\consul.d\aws.json).replace("tag_key=ConsulAutoJoin tag_value=auto-join", "tag_key=ConsulAutoJoin tag_value=${autojoin}") | ` - Set-Content C:\opt\consul.d\aws.json -} - -function CreateConsulService { - New-Service ` - -Name "Consul" ` - -BinaryPathName "C:\opt\consul.exe agent -config-dir C:\opt\consul.d" ` - -StartupType "Automatic" ` - -ErrorAction Ignore -} - -function CreateNomadService { - New-NetFirewallRule ` - -DisplayName 'Nomad HTTP Inbound' ` - -Profile @('Public', 'Domain', 'Private') ` - -Direction Inbound ` - -Action Allow ` - -Protocol TCP ` - -LocalPort @('4646') - - # idempotently enable as a service - New-Service ` - -Name "Nomad" ` - -BinaryPathName "C:\opt\nomad.exe agent -config C:\opt\nomad.d" ` - -StartupType "Automatic" ` - -ErrorAction Ignore -} - -if ( "" -ne $nomad_sha ) { - InstallFromS3 - CreateNomadService -} -if ( "" -ne $nomad_version ) { - InstallFromRelease - CreateNomadService -} -if ( "" -ne $nomad_binary ) { - InstallFromUploadedBinary - CreateNomadService -} -if ( "" -ne $nomad_url ) { - InstallFromURL - CreateNomadService -} -if ( "" -ne $config_profile) { - InstallConfigProfile -} -if ( "" -ne $autojoin) { - UpdateConsulAutojoin -} - -if (!($nostart)) { - CreateConsulService - CreateNomadService - Restart-Service "Consul" - Restart-Service "Nomad" -} diff --git a/e2e/terraform/packer/windows-2016-amd64/userdata.ps1 b/e2e/terraform/packer/windows-2016-amd64/userdata.ps1 index 0956d0267..2cf500c87 100755 --- a/e2e/terraform/packer/windows-2016-amd64/userdata.ps1 +++ b/e2e/terraform/packer/windows-2016-amd64/userdata.ps1 @@ -95,11 +95,16 @@ Try { New-NetFirewallRule -Name sshd -DisplayName 'OpenSSH Server (sshd)' ` -Enabled True -Direction Inbound -Protocol TCP -Action Allow -LocalPort 22 -ErrorAction Stop + # Note: there appears to be a regression in recent versions of + # Terraform for file provisioning over ssh for Windows with + # powershell as the default shell + # See: https://github.com/hashicorp/terraform/issues/30661 + # # Set powershell as the OpenSSH login shell - New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" ` - -Name DefaultShell ` - -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" ` - -PropertyType String -Force -ErrorAction Stop + # New-ItemProperty -Path "HKLM:\SOFTWARE\OpenSSH" ` + # -Name DefaultShell ` + # -Value "C:\Windows\System32\WindowsPowerShell\v1.0\powershell.exe" ` + # -PropertyType String -Force -ErrorAction Stop Write-Output "Installed OpenSSH." diff --git a/e2e/terraform/provision-nomad/install-linux.tf b/e2e/terraform/provision-nomad/install-linux.tf new file mode 100644 index 000000000..2f425bde9 --- /dev/null +++ b/e2e/terraform/provision-nomad/install-linux.tf @@ -0,0 +1,129 @@ +resource "local_file" "nomad_systemd_unit_file" { + sensitive_content = templatefile("etc/nomad.d/nomad-${var.role}.service", {}) + filename = "${local.upload_dir}/nomad.d/nomad.service" + file_permission = "0700" +} + +resource "null_resource" "install_nomad_binary_linux" { + count = var.platform == "linux" ? 1 : 0 + triggers = { nomad_binary_sha = filemd5(var.nomad_local_binary) } + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + timeout = "5m" + } + + provisioner "file" { + source = var.nomad_local_binary + destination = "/tmp/nomad" + } + provisioner "remote-exec" { + inline = [ + "sudo mv /tmp/nomad /usr/local/bin/nomad", + "sudo chmod +x /usr/local/bin/nomad", + ] + } +} + +resource "null_resource" "install_consul_configs_linux" { + count = var.platform == "linux" ? 1 : 0 + + depends_on = [ + null_resource.upload_consul_configs, + ] + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + timeout = "5m" + } + + provisioner "remote-exec" { + inline = [ + "mkdir -p /etc/consul.d", + "sudo rm -rf /etc/consul.d/*", + "sudo mv /tmp/consul_ca.pem /etc/consul.d/ca.pem", + "sudo mv /tmp/consul_client_acl.json /etc/consul.d/acl.json", + "sudo mv /tmp/consul_client.json /etc/consul.d/consul_client.json", + "sudo mv /tmp/consul_client_base.json /etc/consul.d/consul_client_base.json", + "sudo mv /tmp/consul.service /etc/systemd/system/consul.service", + ] + } +} + +resource "null_resource" "install_nomad_configs_linux" { + count = var.platform == "linux" ? 1 : 0 + + depends_on = [ + null_resource.upload_nomad_configs, + ] + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + timeout = "5m" + } + + provisioner "remote-exec" { + inline = [ + "mkdir -p /etc/nomad.d", + "mkdir -p /opt/nomad/data", + "sudo rm -rf /etc/nomad.d/*", + "sudo mv /tmp/consul.hcl /etc/nomad.d/consul.hcl", + "sudo mv /tmp/vault.hcl /etc/nomad.d/vault.hcl", + "sudo mv /tmp/base.hcl /etc/nomad.d/base.hcl", + "sudo mv /tmp/${var.role}-${var.platform}.hcl /etc/nomad.d/${var.role}-${var.platform}.hcl", + "sudo mv /tmp/${var.role}-${var.platform}-${var.index}.hcl /etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl", + "sudo mv /tmp/.environment /etc/nomad.d/.environment", + + # TLS + "sudo mkdir /etc/nomad.d/tls", + "sudo mv /tmp/tls.hcl /etc/nomad.d/tls.hcl", + "sudo mv /tmp/agent-${var.instance.public_ip}.key /etc/nomad.d/tls/agent.key", + "sudo mv /tmp/agent-${var.instance.public_ip}.crt /etc/nomad.d/tls/agent.crt", + "sudo mv /tmp/ca.crt /etc/nomad.d/tls/ca.crt", + + "sudo mv /tmp/nomad.service /etc/systemd/system/nomad.service", + ] + } + +} + +resource "null_resource" "restart_linux_services" { + count = var.platform == "linux" ? 1 : 0 + + depends_on = [ + null_resource.install_nomad_binary_linux, + null_resource.install_consul_configs_linux, + null_resource.install_nomad_configs_linux, + ] + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + timeout = "5m" + } + + provisioner "remote-exec" { + inline = [ + "sudo systemctl daemon-reload", + "sudo systemctl enable consul", + "sudo systemctl restart consul", + "sudo systemctl enable nomad", + "sudo systemctl restart nomad", + ] + } +} diff --git a/e2e/terraform/provision-nomad/install-windows.tf b/e2e/terraform/provision-nomad/install-windows.tf new file mode 100644 index 000000000..035b96eee --- /dev/null +++ b/e2e/terraform/provision-nomad/install-windows.tf @@ -0,0 +1,123 @@ +resource "null_resource" "install_nomad_binary_windows" { + count = var.platform == "windows" ? 1 : 0 + triggers = { nomad_binary_sha = filemd5(var.nomad_local_binary) } + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + target_platform = "windows" + timeout = "10m" + } + + provisioner "file" { + source = var.nomad_local_binary + destination = "/tmp/nomad" + } + provisioner "remote-exec" { + inline = [ + "powershell Move-Item -Force -Path C://tmp/nomad -Destination C:/opt/nomad.exe", + ] + } +} + +resource "null_resource" "install_consul_configs_windows" { + count = var.platform == "windows" ? 1 : 0 + + depends_on = [ + null_resource.upload_consul_configs, + ] + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + target_platform = "windows" + timeout = "10m" + } + + provisioner "remote-exec" { + inline = [ + "powershell Remove-Item -Force -Recurse -Path C://etc/consul.d", + "powershell New-Item -Force -Path C:// -Name opt -ItemType directory", + "powershell New-Item -Force -Path C://etc -Name consul.d -ItemType directory", + "powershell Move-Item -Force -Path C://tmp/consul_ca.pem C://Windows/System32/ca.pem", + "powershell Move-Item -Force -Path C://tmp/consul_client_acl.json C://etc/consul.d/acl.json", + "powershell Move-Item -Force -Path C://tmp/consul_client.json C://etc/consul.d/consul_client.json", + "powershell Move-Item -Force -Path C://tmp/consul_client_base.json C://etc/consul.d/consul_client_base.json", + ] + } +} + +resource "null_resource" "install_nomad_configs_windows" { + count = var.platform == "windows" ? 1 : 0 + + depends_on = [ + null_resource.upload_nomad_configs, + ] + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + target_platform = "windows" + timeout = "10m" + } + + provisioner "remote-exec" { + inline = [ + "powershell Remove-Item -Force -Recurse -Path C://etc/nomad.d", + "powershell New-Item -Force -Path C:// -Name opt -ItemType directory", + "powershell New-Item -Force -Path C:// -Name etc -ItemType directory", + "powershell New-Item -Force -Path C://etc/ -Name nomad.d -ItemType directory", + "powershell New-Item -Force -Path C://opt/ -Name nomad -ItemType directory", + "powershell New-Item -Force -Path C://opt/nomad -Name data -ItemType directory", + "powershell Move-Item -Force -Path C://tmp/consul.hcl C://etc/nomad.d/consul.hcl", + "powershell Move-Item -Force -Path C://tmp/vault.hcl C://etc/nomad.d/vault.hcl", + "powershell Move-Item -Force -Path C://tmp/base.hcl C://etc/nomad.d/base.hcl", + "powershell Move-Item -Force -Path C://tmp/${var.role}-${var.platform}.hcl C://etc/nomad.d/${var.role}-${var.platform}.hcl", + "powershell Move-Item -Force -Path C://tmp/${var.role}-${var.platform}-${var.index}.hcl C://etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl", + "powershell Move-Item -Force -Path C://tmp/.environment C://etc/nomad.d/.environment", + + # TLS + "powershell New-Item -Force -Path C://etc/nomad.d -Name tls -ItemType directory", + "powershell Move-Item -Force -Path C://tmp/tls.hcl C://etc/nomad.d/tls.hcl", + "powershell Move-Item -Force -Path C://tmp/agent-${var.instance.public_ip}.key C://etc/nomad.d/tls/agent.key", + "powershell Move-Item -Force -Path C://tmp/agent-${var.instance.public_ip}.crt C://etc/nomad.d/tls/agent.crt", + "powershell Move-Item -Force -Path C://tmp/ca.crt C://etc/nomad.d/tls/ca.crt", + ] + } +} + +resource "null_resource" "restart_windows_services" { + count = var.platform == "windows" ? 1 : 0 + + depends_on = [ + null_resource.install_nomad_binary_windows, + null_resource.install_consul_configs_windows, + null_resource.install_nomad_configs_windows, + ] + + connection { + type = "ssh" + user = var.connection.user + host = var.instance.public_ip + port = var.connection.port + private_key = file(var.connection.private_key) + target_platform = "windows" + timeout = "10m" + } + + provisioner "remote-exec" { + inline = [ + "powershell Restart-Service Consul", + "powershell Restart-Service Nomad" + ] + } +} diff --git a/e2e/terraform/provision-nomad/main.tf b/e2e/terraform/provision-nomad/main.tf index ba8130b7c..fb1a17d61 100644 --- a/e2e/terraform/provision-nomad/main.tf +++ b/e2e/terraform/provision-nomad/main.tf @@ -1,33 +1,46 @@ locals { - provision_script = var.platform == "windows_amd64" ? "powershell C:/opt/provision.ps1" : "/opt/provision.sh" + upload_dir = "uploads/${var.instance.public_ip}" - config_path = dirname("${path.root}/config/") + indexed_config_path = fileexists("etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl") ? "etc/nomad.d/${var.role}-${var.platform}-${var.index}.hcl" : "etc/nomad.d/index.hcl" - config_files = compact(setunion( - fileset(local.config_path, "**"), - )) - - update_config_command = var.platform == "windows_amd64" ? "powershell -Command \"& { if (test-path /opt/config) { Remove-Item -Path /opt/config -Force -Recurse }; cp -r C:/tmp/config /opt/config }\"" : "sudo rm -rf /opt/config; sudo mv /tmp/config /opt/config" - - # abstract-away platform-specific parameter expectations - _arg = var.platform == "windows_amd64" ? "-" : "--" - - tls_role = var.role == "server" ? "server" : "client" } -resource "null_resource" "provision_nomad" { +# if nomad_license is unset, it'll be a harmless empty license file +resource "local_file" "nomad_environment" { + sensitive_content = templatefile("etc/nomad.d/.environment", { + license = var.nomad_license + }) + filename = "${local.upload_dir}/nomad.d/.environment" + file_permission = "0600" +} - depends_on = [ - null_resource.upload_configs, - null_resource.upload_nomad_binary, - null_resource.generate_instance_tls_certs - ] +resource "local_file" "nomad_base_config" { + sensitive_content = templatefile("etc/nomad.d/base.hcl", { + data_dir = var.platform != "windows" ? "/opt/nomad/data" : "C://opt/nomad/data" + }) + filename = "${local.upload_dir}/nomad.d/base.hcl" + file_permission = "0600" +} - # no need to re-run if nothing changes - triggers = { - script = data.template_file.provision_script.rendered - } +resource "local_file" "nomad_role_config" { + sensitive_content = templatefile("etc/nomad.d/${var.role}-${var.platform}.hcl", {}) + filename = "${local.upload_dir}/nomad.d/${var.role}.hcl" + file_permission = "0600" +} +resource "local_file" "nomad_indexed_config" { + sensitive_content = templatefile(local.indexed_config_path, {}) + filename = "${local.upload_dir}/nomad.d/${var.role}-${var.platform}-${var.index}.hcl" + file_permission = "0600" +} + +resource "local_file" "nomad_tls_config" { + sensitive_content = templatefile("etc/nomad.d/tls.hcl", {}) + filename = "${local.upload_dir}/nomad.d/tls.hcl" + file_permission = "0600" +} + +resource "null_resource" "upload_consul_configs" { connection { type = "ssh" @@ -35,75 +48,33 @@ resource "null_resource" "provision_nomad" { host = var.instance.public_ip port = var.connection.port private_key = file(var.connection.private_key) - target_platform = var.platform == "windows_amd64" ? "windows" : "unix" + target_platform = var.arch == "windows_amd64" ? "windows" : "unix" timeout = "15m" } - provisioner "remote-exec" { - inline = [data.template_file.provision_script.rendered] + provisioner "file" { + source = "uploads/shared/consul.d/ca.pem" + destination = "/tmp/consul_ca.pem" } - -} - -data "template_file" "provision_script" { - template = "${local.provision_script}${data.template_file.arg_nomad_url.rendered}${data.template_file.arg_nomad_sha.rendered}${data.template_file.arg_nomad_version.rendered}${data.template_file.arg_nomad_binary.rendered}${data.template_file.arg_nomad_enterprise.rendered}${data.template_file.arg_nomad_license.rendered}${data.template_file.arg_nomad_acls.rendered}${data.template_file.arg_nomad_tls.rendered}${data.template_file.arg_profile.rendered}${data.template_file.arg_role.rendered}${data.template_file.arg_index.rendered}${data.template_file.autojoin_tag.rendered}" -} - -data "template_file" "arg_nomad_sha" { - template = var.nomad_sha != "" && var.nomad_local_binary == "" && var.nomad_url == "" ? " ${local._arg}nomad_sha ${var.nomad_sha}" : "" -} - -data "template_file" "arg_nomad_version" { - template = var.nomad_version != "" && var.nomad_sha == "" && var.nomad_url == "" && var.nomad_local_binary == "" ? " ${local._arg}nomad_version ${var.nomad_version}" : "" -} - -data "template_file" "arg_nomad_url" { - template = var.nomad_url != "" && var.nomad_local_binary == "" ? " ${local._arg}nomad_url '${var.nomad_url}'" : "" -} - -data "template_file" "arg_nomad_binary" { - template = var.nomad_local_binary != "" ? " ${local._arg}nomad_binary /tmp/nomad" : "" -} - -data "template_file" "arg_nomad_enterprise" { - template = var.nomad_enterprise ? " ${local._arg}enterprise" : "" -} - -data "template_file" "arg_nomad_license" { - template = var.nomad_license != "" ? " ${local._arg}nomad_license ${var.nomad_license}" : "" -} - -data "template_file" "arg_nomad_acls" { - template = var.nomad_acls ? " ${local._arg}nomad_acls" : "" -} - -data "template_file" "arg_nomad_tls" { - template = var.tls ? " ${local._arg}tls" : "" -} - -data "template_file" "arg_profile" { - template = var.profile != "" ? " ${local._arg}config_profile ${var.profile}" : "" -} - -data "template_file" "arg_role" { - template = var.role != "" ? " ${local._arg}role ${var.role}" : "" -} - -data "template_file" "arg_index" { - template = var.index != "" ? " ${local._arg}index ${var.index}" : "" -} - -data "template_file" "autojoin_tag" { - template = var.cluster_name != "" ? " ${local._arg}autojoin auto-join-${var.cluster_name}" : "" -} - -resource "null_resource" "upload_nomad_binary" { - - count = var.nomad_local_binary != "" ? 1 : 0 - depends_on = [null_resource.upload_configs] - triggers = { - nomad_binary_sha = filemd5(var.nomad_local_binary) + provisioner "file" { + source = "uploads/shared/consul.d/consul_client.json" + destination = "/tmp/consul_client.json" } + provisioner "file" { + source = "uploads/shared/consul.d/client_acl.json" + destination = "/tmp/consul_client_acl.json" + } + provisioner "file" { + source = "uploads/shared/consul.d/consul_client_base.json" + destination = "/tmp/consul_client_base.json" + } + provisioner "file" { + source = "uploads/shared/consul.d/consul.service" + destination = "/tmp/consul.service" + } +} + +resource "null_resource" "upload_nomad_configs" { connection { type = "ssh" @@ -111,134 +82,55 @@ resource "null_resource" "upload_nomad_binary" { host = var.instance.public_ip port = var.connection.port private_key = file(var.connection.private_key) - target_platform = var.platform == "windows_amd64" ? "windows" : "unix" + target_platform = var.arch == "windows_amd64" ? "windows" : "unix" timeout = "15m" } + # created in hcp_consul.tf provisioner "file" { - source = var.nomad_local_binary - destination = "/tmp/nomad" + source = "uploads/shared/nomad.d/${var.role}-consul.hcl" + destination = "/tmp/consul.hcl" + } + # created in hcp_vault.tf + provisioner "file" { + source = "uploads/shared/nomad.d/vault.hcl" + destination = "/tmp/vault.hcl" + } + + provisioner "file" { + source = local_file.nomad_environment.filename + destination = "/tmp/.environment" + } + provisioner "file" { + source = local_file.nomad_base_config.filename + destination = "/tmp/base.hcl" + } + provisioner "file" { + source = local_file.nomad_role_config.filename + destination = "/tmp/${var.role}-${var.platform}.hcl" + } + provisioner "file" { + source = local_file.nomad_indexed_config.filename + destination = "/tmp/${var.role}-${var.platform}-${var.index}.hcl" + } + provisioner "file" { + source = local_file.nomad_tls_config.filename + destination = "/tmp/tls.hcl" + } + provisioner "file" { + source = local_file.nomad_systemd_unit_file.filename + destination = "/tmp/nomad.service" + } + provisioner "file" { + source = local_file.nomad_client_key.filename + destination = "/tmp/agent-${var.instance.public_ip}.key" + } + provisioner "file" { + source = local_file.nomad_client_cert.filename + destination = "/tmp/agent-${var.instance.public_ip}.crt" + } + provisioner "file" { + source = "keys/tls_ca.crt" + destination = "/tmp/ca.crt" } } - -resource "null_resource" "upload_configs" { - - triggers = { - hashes = join(",", [for file in local.config_files : filemd5("${local.config_path}/${file}")]) - } - - connection { - type = "ssh" - user = var.connection.user - host = var.instance.public_ip - port = var.connection.port - private_key = file(var.connection.private_key) - target_platform = var.platform == "windows_amd64" ? "windows" : "unix" - timeout = "15m" - } - - provisioner "file" { - source = local.config_path - destination = "/tmp/" - } - - provisioner "remote-exec" { - inline = [local.update_config_command] - } - -} - -// TODO: Create separate certs. -// This creates one set of certs to manage Nomad, Consul, and Vault and therefore -// puts all the required SAN entries to enable sharing certs. This is an anti-pattern -// that we should clean up. -resource "null_resource" "generate_instance_tls_certs" { - count = var.tls ? 1 : 0 - depends_on = [null_resource.upload_configs] - - connection { - type = "ssh" - user = var.connection.user - host = var.instance.public_ip - port = var.connection.port - private_key = file(var.connection.private_key) - timeout = "15m" - } - - provisioner "local-exec" { - command = < keys/ca.crt -${var.tls_ca_cert} -EOT - -cat <<'EOT' > keys/ca.key -${var.tls_ca_key} -EOT - -openssl req -newkey rsa:2048 -nodes \ - -subj "/CN=${local.tls_role}.global.nomad" \ - -keyout keys/agent-${var.instance.public_ip}.key \ - -out keys/agent-${var.instance.public_ip}.csr - -cat <<'NEOY' > keys/agent-${var.instance.public_ip}.conf - -subjectAltName=DNS:${local.tls_role}.global.nomad,DNS:${local.tls_role}.dc1.consul,DNS:localhost,DNS:${var.instance.public_dns},DNS:vault.service.consul,DNS:active.vault.service.consul,IP:127.0.0.1,IP:${var.instance.private_ip},IP:${var.instance.public_ip} -extendedKeyUsage = serverAuth, clientAuth -basicConstraints = CA:FALSE -keyUsage = digitalSignature, keyEncipherment -NEOY - -openssl x509 -req -CAcreateserial \ - -extfile ./keys/agent-${var.instance.public_ip}.conf \ - -days 365 \ - -sha256 \ - -CA keys/ca.crt \ - -CAkey keys/ca.key \ - -in keys/agent-${var.instance.public_ip}.csr \ - -out keys/agent-${var.instance.public_ip}.crt - -EOF - } - - provisioner "remote-exec" { - inline = [ - "mkdir -p /tmp/nomad-tls", - ] - } - provisioner "file" { - source = "keys/ca.crt" - destination = "/tmp/nomad-tls/ca.crt" - } - provisioner "file" { - source = "keys/agent-${var.instance.public_ip}.crt" - destination = "/tmp/nomad-tls/agent.crt" - } - provisioner "file" { - source = "keys/agent-${var.instance.public_ip}.key" - destination = "/tmp/nomad-tls/agent.key" - } - # workaround to avoid updating packer - provisioner "file" { - source = "packer/ubuntu-bionic-amd64/provision.sh" - destination = "/opt/provision.sh" - } - provisioner "file" { - source = "config" - destination = "/tmp/config" - } - - provisioner "remote-exec" { - inline = [ - "sudo cp -r /tmp/nomad-tls /opt/config/${var.profile}/nomad/tls", - "sudo cp -r /tmp/nomad-tls /opt/config/${var.profile}/consul/tls", - "sudo cp -r /tmp/nomad-tls /opt/config/${var.profile}/vault/tls", - - # more workaround - "sudo rm -rf /opt/config", - "sudo mv /tmp/config /opt/config" - ] - } - -} diff --git a/e2e/terraform/provision-nomad/tls.tf b/e2e/terraform/provision-nomad/tls.tf new file mode 100644 index 000000000..4b04e76f4 --- /dev/null +++ b/e2e/terraform/provision-nomad/tls.tf @@ -0,0 +1,42 @@ +resource "tls_private_key" "nomad" { + algorithm = "ECDSA" + ecdsa_curve = "P384" +} + +resource "tls_cert_request" "nomad" { + key_algorithm = "ECDSA" + private_key_pem = tls_private_key.nomad.private_key_pem + ip_addresses = [var.instance.public_ip, var.instance.private_ip, "127.0.0.1"] + dns_names = ["${var.role}.global.nomad"] + + subject { + common_name = "${var.role}.global.nomad" + } +} + +resource "tls_locally_signed_cert" "nomad" { + cert_request_pem = tls_cert_request.nomad.cert_request_pem + ca_key_algorithm = var.tls_ca_algorithm + ca_private_key_pem = var.tls_ca_key + ca_cert_pem = var.tls_ca_cert + + validity_period_hours = 720 + + # Reasonable set of uses for a server SSL certificate. + allowed_uses = [ + "key_encipherment", + "digital_signature", + "client_auth", + "server_auth", + ] +} + +resource "local_file" "nomad_client_key" { + sensitive_content = tls_private_key.nomad.private_key_pem + filename = "keys/agent-${var.instance.public_ip}.key" +} + +resource "local_file" "nomad_client_cert" { + sensitive_content = tls_locally_signed_cert.nomad.cert_pem + filename = "keys/agent-${var.instance.public_ip}.crt" +} diff --git a/e2e/terraform/provision-nomad/variables.tf b/e2e/terraform/provision-nomad/variables.tf index 0fc1583f8..093711f26 100644 --- a/e2e/terraform/provision-nomad/variables.tf +++ b/e2e/terraform/provision-nomad/variables.tf @@ -1,55 +1,19 @@ -variable "platform" { - type = string - description = "Platform ID (ex. \"linux_amd64\" or \"windows_amd64\")" - default = "linux_amd64" -} - -variable "nomad_version" { - type = string - description = "Nomad release version (ex. \"0.10.3\")" - default = "" -} - -variable "nomad_sha" { - type = string - description = "Nomad build full SHA (ex. \"fef22bdbfa094b5d076710354275e360867261aa\")" - default = "" -} - variable "nomad_local_binary" { type = string description = "Path to local Nomad build (ex. \"/home/me/bin/nomad\")" default = "" } -variable "nomad_url" { - type = string - description = "URL to Nomad binary (ex. \"https://circleci.com/.../linux_amd64.zip\")" - default = "" -} - -variable "nomad_enterprise" { - type = bool - description = "If nomad_sha is used, deploy Nomad Enterprise" - default = false -} - variable "nomad_license" { type = string description = "The enterprise license to use. overrides Nomad temporary license" default = "" } -variable "nomad_acls" { - type = bool - description = "Bootstrap ACLs" - default = false -} - -variable "tls" { - type = bool - description = "Bootstrap TLS" - default = false +variable "tls_ca_algorithm" { + type = string + description = "CA private key algorithm" + default = "ECDSA" } variable "tls_ca_key" { @@ -64,15 +28,21 @@ variable "tls_ca_cert" { default = "" } -variable "profile" { +variable "arch" { type = string - description = "The name of the configuration profile (ex. 'full-cluster')" - default = "" + description = "The architecture for this instance (ex. 'linux_amd64' or 'windows_amd64')" + default = "linux_amd64" +} + +variable "platform" { + type = string + description = "The platform for this instance (ex. 'windows' or 'linux')" + default = "linux" } variable "role" { type = string - description = "The role in the configuration profile for this instance (ex. 'client-linux')" + description = "The role for this instance (ex. 'client' or 'server')" default = "" } @@ -82,12 +52,6 @@ variable "index" { default = "" } -variable "cluster_name" { - type = string - description = "The random name assigned to the cluster" - default = "" -} - variable "instance" { type = object({ id = string diff --git a/e2e/terraform/scripts/bootstrap-vault.sh b/e2e/terraform/scripts/bootstrap-vault.sh deleted file mode 100755 index 5f1edf30d..000000000 --- a/e2e/terraform/scripts/bootstrap-vault.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - -# unseal vault and get a root operator token; the vault is configured to -# autounseal with AWS KMS -while true : -do - ROOT_TOKEN=$(vault operator init -recovery-shares=1 -recovery-threshold=1 | awk '/Initial Root Token/{print $4}') - if [ ! -z $ROOT_TOKEN ]; then break; fi - sleep 5 -done -set -e - -export VAULT_TOKEN="$ROOT_TOKEN" - -mkdir -p ../keys -echo $VAULT_TOKEN > "${DIR}/../keys/vault_root_token" - -# write policies for Nomad to Vault, and then configure Nomad to use the -# token from those policies - -vault policy write nomad-server "${DIR}/vault-nomad-server-policy.hcl" -vault write /auth/token/roles/nomad-cluster "@${DIR}/vault-nomad-cluster-role.json" - -NOMAD_VAULT_TOKEN=$(vault token create -policy nomad-server -period 72h -orphan | awk '/token /{print $2}') - -cat < "${DIR}/../keys/nomad_vault.hcl" -vault { - enabled = true - address = "https://active.vault.service.consul:8200" - task_token_ttl = "1h" - create_from_role = "nomad-cluster" - token = "$NOMAD_VAULT_TOKEN" - ca_file = "/etc/vault.d/tls/ca.crt" - cert_file = "/etc/vault.d/tls/agent.crt" - key_file = "/etc/vault.d/tls/agent.key" -} - -EOF diff --git a/e2e/terraform/scripts/vault-nomad-cluster-role.json b/e2e/terraform/scripts/vault-nomad-cluster-role.json deleted file mode 100644 index 033ea30c1..000000000 --- a/e2e/terraform/scripts/vault-nomad-cluster-role.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "disallowed_policies": "nomad-server", - "token_explicit_max_ttl": 0, - "name": "nomad-cluster", - "orphan": true, - "token_period": 259200, - "renewable": true -} diff --git a/e2e/terraform/terraform.full.tfvars b/e2e/terraform/terraform.full.tfvars deleted file mode 100644 index fc83633e1..000000000 --- a/e2e/terraform/terraform.full.tfvars +++ /dev/null @@ -1,19 +0,0 @@ -region = "us-east-1" -instance_type = "t3.medium" -server_count = "3" -client_count_ubuntu_bionic_amd64 = "4" -client_count_windows_2016_amd64 = "1" -profile = "full-cluster" -nomad_enterprise = true -nomad_acls = true -vault = true -volumes = true -tls = true - -# required to avoid picking up defaults from terraform.tfvars file -nomad_version = "" # default version for deployment -nomad_local_binary = "" # overrides nomad_version if set - -# The nightly E2E runner will set a nomad_sha flag; this should not be used -# outside of the nightly E2E runner and will usually fail because the build -# will not be available diff --git a/e2e/terraform/terraform.tfvars b/e2e/terraform/terraform.tfvars index c1cc871c6..e1474ebd0 100644 --- a/e2e/terraform/terraform.tfvars +++ b/e2e/terraform/terraform.tfvars @@ -1,23 +1,12 @@ region = "us-east-1" instance_type = "t3.medium" server_count = "3" -client_count_ubuntu_bionic_amd64 = "2" -client_count_windows_2016_amd64 = "0" -profile = "dev-cluster" -nomad_acls = false -nomad_enterprise = false -vault = true -volumes = false -tls = true +client_count_ubuntu_bionic_amd64 = "4" +client_count_windows_2016_amd64 = "1" +volumes = true -nomad_version = "1.0.1" # default version for deployment -nomad_local_binary = "" # overrides nomad_version if set -nomad_url = "" # overrides nomad_version if set +nomad_local_binary = "../../pkg/linux_amd64/nomad" +nomad_local_binary_client_windows_2016_amd64 = ["../../pkg/windows_amd64/nomad.exe"] -# Example overrides: -# nomad_local_binary = "../../pkg/linux_amd64/nomad" -# nomad_local_binary_client_windows_2016_amd64 = ["../../pkg/windows_amd64/nomad.exe"] - -# The nightly E2E runner will set a nomad_sha flag; this should not be used -# outside of the nightly E2E runner and will usually fail because the build -# will not be available +# For testing enterprise, set via --var: +# nomad_license = diff --git a/e2e/terraform/tests/1-expected.json b/e2e/terraform/tests/1-expected.json deleted file mode 100644 index fa12a5fab..000000000 --- a/e2e/terraform/tests/1-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_version 0.12.1 -config_profile dev-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster" - } -} diff --git a/e2e/terraform/tests/1-test.tfvars b/e2e/terraform/tests/1-test.tfvars deleted file mode 100644 index 28b86fca5..000000000 --- a/e2e/terraform/tests/1-test.tfvars +++ /dev/null @@ -1,8 +0,0 @@ -# test: install a public Nomad release -profile = "dev-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_version = "0.12.1" diff --git a/e2e/terraform/tests/2-expected.json b/e2e/terraform/tests/2-expected.json deleted file mode 100644 index 47b6e0dde..000000000 --- a/e2e/terraform/tests/2-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile dev-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile dev-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_version 0.12.3 -config_profile dev-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile dev-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile dev-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile dev-cluster" - } -} diff --git a/e2e/terraform/tests/2-test.tfvars b/e2e/terraform/tests/2-test.tfvars deleted file mode 100644 index 5ddb530ce..000000000 --- a/e2e/terraform/tests/2-test.tfvars +++ /dev/null @@ -1,20 +0,0 @@ -# test: install a public Nomad release with overrides -profile = "dev-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_version = "0.12.1" -nomad_version_server = [ - "0.12.0", # override servers 1 and 2 - "0.12.3", -] -nomad_version_client_linux = [ - "0.12.0", # override linux client 1 and 2 - "0.12.3" -] -nomad_version_client_windows = [ - "0.12.3", # override windows client 1 - "0.12.4" # ignored -] diff --git a/e2e/terraform/tests/3-expected.json b/e2e/terraform/tests/3-expected.json deleted file mode 100644 index 07d5e5bc5..000000000 --- a/e2e/terraform/tests/3-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf -config_profile dev-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster" - } -} diff --git a/e2e/terraform/tests/3-test.tfvars b/e2e/terraform/tests/3-test.tfvars deleted file mode 100644 index 61bc5894f..000000000 --- a/e2e/terraform/tests/3-test.tfvars +++ /dev/null @@ -1,8 +0,0 @@ -# test: install a specific Nomad sha -profile = "dev-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_sha = "2a6e62be00a0db228d8add74ceca6ca83c8efdcf" diff --git a/e2e/terraform/tests/4-expected.json b/e2e/terraform/tests/4-expected.json deleted file mode 100644 index 51d71322a..000000000 --- a/e2e/terraform/tests/4-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_sha 920f00da22726914e504d016bb588ca9c18240f2 --config_profile dev-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_sha 568c4aa72b51050913365dae6b3b1d089d39b2a5 --config_profile dev-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_sha 920f00da22726914e504d016bb588ca9c18240f2 -config_profile dev-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_sha 920f00da22726914e504d016bb588ca9c18240f2 --config_profile dev-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_sha 568c4aa72b51050913365dae6b3b1d089d39b2a5 --config_profile dev-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_sha 2a6e62be00a0db228d8add74ceca6ca83c8efdcf --config_profile dev-cluster" - } -} diff --git a/e2e/terraform/tests/4-test.tfvars b/e2e/terraform/tests/4-test.tfvars deleted file mode 100644 index 4c87ef9fa..000000000 --- a/e2e/terraform/tests/4-test.tfvars +++ /dev/null @@ -1,20 +0,0 @@ -# test: install a specific Nomad sha with overrides -profile = "dev-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_sha = "2a6e62be00a0db228d8add74ceca6ca83c8efdcf" -nomad_sha_server = [ - "920f00da22726914e504d016bb588ca9c18240f2", # override server 1 and 2 - "568c4aa72b51050913365dae6b3b1d089d39b2a5", -] -nomad_sha_client_linux = [ - "920f00da22726914e504d016bb588ca9c18240f2", # override client 1 and 2 - "568c4aa72b51050913365dae6b3b1d089d39b2a5", -] -nomad_sha_client_windows = [ - "920f00da22726914e504d016bb588ca9c18240f2", # override windows client - "568c4aa72b51050913365dae6b3b1d089d39b2a5", # ignored -] diff --git a/e2e/terraform/tests/5-expected.json b/e2e/terraform/tests/5-expected.json deleted file mode 100644 index 3ca48f9a7..000000000 --- a/e2e/terraform/tests/5-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_binary ./mock-1 -config_profile dev-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster" - } -} diff --git a/e2e/terraform/tests/5-test.tfvars b/e2e/terraform/tests/5-test.tfvars deleted file mode 100644 index 3f6c8c67a..000000000 --- a/e2e/terraform/tests/5-test.tfvars +++ /dev/null @@ -1,8 +0,0 @@ -# test: install a local Nomad binary -profile = "dev-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_local_binary = "./mock-1" diff --git a/e2e/terraform/tests/6-expected.json b/e2e/terraform/tests/6-expected.json deleted file mode 100644 index 5cad1ae57..000000000 --- a/e2e/terraform/tests/6-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_binary ./mock-2 --config_profile dev-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_binary ./mock-2 -config_profile dev-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_binary ./mock-2 --config_profile dev-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_binary ./mock-2 --config_profile dev-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_binary ./mock-1 --config_profile dev-cluster" - } -} diff --git a/e2e/terraform/tests/6-test.tfvars b/e2e/terraform/tests/6-test.tfvars deleted file mode 100644 index 8d0c17922..000000000 --- a/e2e/terraform/tests/6-test.tfvars +++ /dev/null @@ -1,18 +0,0 @@ -# test: install a local Nomad binary, with overrides -profile = "dev-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_local_binary = "./mock-1" -nomad_local_binary_server = [ - "./mock-2", # override servers 1 and 2 - "./mock-2", -] -nomad_local_binary_client_linux = [ - "./mock-2" # override client 1 -] -nomad_local_binary_client_windows = [ - "./mock-2" # override windows client -] diff --git a/e2e/terraform/tests/7-expected.json b/e2e/terraform/tests/7-expected.json deleted file mode 100644 index 718bb6a53..000000000 --- a/e2e/terraform/tests/7-expected.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "provisioning": { - "nomad_client_linux[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile full-cluster", - "nomad_client_linux[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile full-cluster", - "nomad_client_linux[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile full-cluster", - "nomad_client_linux[3]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile full-cluster", - "nomad_client_windows[0]": "C:/opt/provision.ps1 -nomad_version 0.12.0 -config_profile full-cluster", - "nomad_server[0]": "/opt/provision.sh --nomad_version 0.12.0 --config_profile full-cluster", - "nomad_server[1]": "/opt/provision.sh --nomad_version 0.12.3 --config_profile full-cluster", - "nomad_server[2]": "/opt/provision.sh --nomad_version 0.12.1 --config_profile full-cluster" - } -} diff --git a/e2e/terraform/tests/7-test.tfvars b/e2e/terraform/tests/7-test.tfvars deleted file mode 100644 index 5656e465a..000000000 --- a/e2e/terraform/tests/7-test.tfvars +++ /dev/null @@ -1,20 +0,0 @@ -# test: install a specific Nomad version with indexed configs -profile = "full-cluster" - -server_count = 3 -client_count = 4 -windows_client_count = 1 - -nomad_version = "0.12.1" -nomad_version_server = [ - "0.12.0", # override servers 1 and 2 - "0.12.3", -] -nomad_version_client_linux = [ - "0.12.0", # override clients 1 and 2 - "0.12.3", -] -nomad_version_client_windows = [ - "0.12.0", # override windows client - "0.12.3", # ignored -] diff --git a/e2e/terraform/tests/config b/e2e/terraform/tests/config deleted file mode 120000 index 3ca249e06..000000000 --- a/e2e/terraform/tests/config +++ /dev/null @@ -1 +0,0 @@ -../config \ No newline at end of file diff --git a/e2e/terraform/tests/mock-1 b/e2e/terraform/tests/mock-1 deleted file mode 100644 index 489f0cb15..000000000 --- a/e2e/terraform/tests/mock-1 +++ /dev/null @@ -1 +0,0 @@ -this is a mock file so that we can verify the checksum of a known file for testing diff --git a/e2e/terraform/tests/mock-2 b/e2e/terraform/tests/mock-2 deleted file mode 100644 index 37bda5903..000000000 --- a/e2e/terraform/tests/mock-2 +++ /dev/null @@ -1 +0,0 @@ -this is a different mock file so that we can verify the checksum of a known file for testing diff --git a/e2e/terraform/tests/nomad.tf b/e2e/terraform/tests/nomad.tf deleted file mode 100644 index 15ad2af02..000000000 --- a/e2e/terraform/tests/nomad.tf +++ /dev/null @@ -1,61 +0,0 @@ -locals { - # fake connection to satisfy module requirements - connection = { - type = "ssh" - user = "ubuntu" - host = "192.168.1.1" - port = 22 - private_key = "example" - } -} - -module "nomad_server" { - - source = "../provision-nomad" - count = var.server_count - platform = "linux_amd64" - profile = var.profile - connection = local.connection - - nomad_version = count.index < length(var.nomad_version_server) ? var.nomad_version_server[count.index] : var.nomad_version - - nomad_sha = count.index < length(var.nomad_sha_server) ? var.nomad_sha_server[count.index] : var.nomad_sha - - nomad_local_binary = count.index < length(var.nomad_local_binary_server) ? var.nomad_local_binary_server[count.index] : var.nomad_local_binary - - nomad_enterprise = var.nomad_enterprise -} - -module "nomad_client_linux" { - - source = "../provision-nomad" - count = var.client_count - platform = "linux_amd64" - profile = var.profile - connection = local.connection - - nomad_version = count.index < length(var.nomad_version_client_linux) ? var.nomad_version_client_linux[count.index] : var.nomad_version - - nomad_sha = count.index < length(var.nomad_sha_client_linux) ? var.nomad_sha_client_linux[count.index] : var.nomad_sha - - nomad_local_binary = count.index < length(var.nomad_local_binary_client_linux) ? var.nomad_local_binary_client_linux[count.index] : var.nomad_local_binary - - nomad_enterprise = var.nomad_enterprise -} - -module "nomad_client_windows" { - - source = "../provision-nomad" - count = var.windows_client_count - platform = "windows_amd64" - profile = var.profile - connection = local.connection - - nomad_version = count.index < length(var.nomad_version_client_windows) ? var.nomad_version_client_windows[count.index] : var.nomad_version - - nomad_sha = count.index < length(var.nomad_sha_client_windows) ? var.nomad_sha_client_windows[count.index] : var.nomad_sha - - nomad_local_binary = count.index < length(var.nomad_local_binary_client_windows) ? var.nomad_local_binary_client_windows[count.index] : var.nomad_local_binary - - nomad_enterprise = var.nomad_enterprise -} diff --git a/e2e/terraform/tests/test.sh b/e2e/terraform/tests/test.sh deleted file mode 100755 index cc84add56..000000000 --- a/e2e/terraform/tests/test.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash -# test the profiles by running Terraform plans and extracting the -# plans into JSON for comparison -set -eu - -command -v jq > /dev/null || (echo "jq required"; exit 1 ) -command -v terraform > /dev/null || (echo "terraform required"; exit 1 ) - -tempdir=$(mktemp -d) - -plan() { - vars_file="$1" - out_file="$2" - terraform plan --var-file="$vars_file" -out="$out_file" > /dev/null -} - -# read the plan file to extract the bits we care about into JSON, and -# then compare this to the expected file. -check() { - plan_file="$1" - expected_file="$2" - - got=$(terraform show -json "$plan_file" \ - | jq --sort-keys --raw-output ' -([.resource_changes[] - | select(.name == "provision_nomad") - | select(.change.actions[0] == "create")] - | reduce .[] as $i ({}; - .[($i.module_address|ltrimstr("module."))] = - .[($i.module_address|ltrimstr("module."))] - + $i.change.after.triggers.script) -) as $provisioning | -{ - provisioning: $provisioning -} -') - - # leaves behind the temp plan file for debugging - diff "$expected_file" <(echo "$got") - -} - -run() { - echo -n "testing $1-test.tfvars... " - plan "$1-test.tfvars" "${tempdir}/$1.plan" - check "${tempdir}/$1.plan" "$1-expected.json" - echo "ok!" -} - -for t in *-test.tfvars; do - run $(echo $t | grep -o '[0-9]\+') -done - -rm -r "${tempdir}" diff --git a/e2e/terraform/tests/variables.tf b/e2e/terraform/tests/variables.tf deleted file mode 120000 index 3a65dccd2..000000000 --- a/e2e/terraform/tests/variables.tf +++ /dev/null @@ -1 +0,0 @@ -../variables.tf \ No newline at end of file diff --git a/e2e/terraform/tls_ca.tf b/e2e/terraform/tls_ca.tf index d1cd16d26..03dd3c4da 100644 --- a/e2e/terraform/tls_ca.tf +++ b/e2e/terraform/tls_ca.tf @@ -1,3 +1,5 @@ +# tls_ca.tf defines the certificate authority we use for mTLS + resource "tls_private_key" "ca" { algorithm = "ECDSA" ecdsa_curve = "P384" @@ -27,44 +29,3 @@ resource "local_file" "ca_cert" { filename = "keys/tls_ca.crt" content = tls_self_signed_cert.ca.cert_pem } - -resource "tls_private_key" "api_client" { - algorithm = "ECDSA" - ecdsa_curve = "P384" -} - -resource "tls_cert_request" "api_client" { - key_algorithm = "ECDSA" - private_key_pem = tls_private_key.api_client.private_key_pem - - subject { - common_name = "${local.random_name} api client" - } -} - -resource "tls_locally_signed_cert" "api_client" { - cert_request_pem = tls_cert_request.api_client.cert_request_pem - ca_key_algorithm = tls_private_key.ca.algorithm - ca_private_key_pem = tls_private_key.ca.private_key_pem - ca_cert_pem = tls_self_signed_cert.ca.cert_pem - - - validity_period_hours = 720 - - # Reasonable set of uses for a server SSL certificate. - allowed_uses = [ - "key_encipherment", - "digital_signature", - "client_auth", - ] -} - -resource "local_file" "api_client_key" { - filename = "keys/tls_api_client.key" - content = tls_private_key.api_client.private_key_pem -} - -resource "local_file" "api_client_cert" { - filename = "keys/tls_api_client.crt" - content = tls_locally_signed_cert.api_client.cert_pem -} diff --git a/e2e/terraform/tls_client.tf b/e2e/terraform/tls_client.tf new file mode 100644 index 000000000..f9abd3852 --- /dev/null +++ b/e2e/terraform/tls_client.tf @@ -0,0 +1,42 @@ +# tls_client.tf defines the mTLS certs that'll be used by the E2E test +# runner + +resource "tls_private_key" "api_client" { + algorithm = "ECDSA" + ecdsa_curve = "P384" +} + +resource "tls_cert_request" "api_client" { + key_algorithm = "ECDSA" + private_key_pem = tls_private_key.api_client.private_key_pem + + subject { + common_name = "${local.random_name} api client" + } +} + +resource "tls_locally_signed_cert" "api_client" { + cert_request_pem = tls_cert_request.api_client.cert_request_pem + ca_key_algorithm = tls_private_key.ca.algorithm + ca_private_key_pem = tls_private_key.ca.private_key_pem + ca_cert_pem = tls_self_signed_cert.ca.cert_pem + + validity_period_hours = 720 + + # Reasonable set of uses for a server SSL certificate. + allowed_uses = [ + "key_encipherment", + "digital_signature", + "client_auth", + ] +} + +resource "local_file" "api_client_key" { + sensitive_content = tls_private_key.api_client.private_key_pem + filename = "keys/tls_api_client.key" +} + +resource "local_file" "api_client_cert" { + sensitive_content = tls_locally_signed_cert.api_client.cert_pem + filename = "keys/tls_api_client.crt" +} diff --git a/e2e/terraform/uploads/README.md b/e2e/terraform/uploads/README.md new file mode 100644 index 000000000..acbc7a8dc --- /dev/null +++ b/e2e/terraform/uploads/README.md @@ -0,0 +1,6 @@ +# Uploads Directory + +Terraform renders a bunch of configuration files for each host and +service, and then uploads the files to the appropriate locations on +the remote host. None of the files in this directory get committed to +source control. diff --git a/e2e/terraform/variables.tf b/e2e/terraform/variables.tf index 49f8f8790..19e2449f1 100644 --- a/e2e/terraform/variables.tf +++ b/e2e/terraform/variables.tf @@ -64,60 +64,39 @@ variable "restrict_ingress_cidrblock" { # The specific version of Nomad deployed will default to whichever one of # nomad_sha, nomad_version, or nomad_local_binary is set -variable "nomad_sha" { - description = "The sha of Nomad to provision; only used for automated nightly testing" - default = "" -} - -variable "nomad_version" { - description = "The release version of Nomad to provision" - default = "" -} - variable "nomad_local_binary" { description = "The path to a local binary to provision" default = "" } -variable "nomad_url" { - description = "the URL to Nomad binary archives e.g. CircleCI artifacts" - default = "" -} - -variable "nomad_enterprise" { - type = bool - description = "If nomad_sha is used, deploy Nomad Enterprise" - default = false -} - variable "nomad_license" { type = string description = "If nomad_license is set, deploy a license to override the temporary license" default = "" } -variable "nomad_acls" { - type = bool - description = "Bootstrap ACLs" - default = false -} - -variable "tls" { - type = bool - description = "Bootstrap TLS" - default = false -} - -variable "vault" { - type = bool - description = "Bootstrap Vault" - default = false -} - variable "volumes" { type = bool - description = "Include external EBS and EFS volumes (for CSI)" - default = false + description = "Include external EFS volumes (for CSI)" + default = true +} + +variable "hcp_consul_cluster_id" { + description = "The ID of the HCP Consul cluster" + type = string + default = "nomad-e2e-shared-hcp-consul" +} + +variable "hcp_vault_cluster_id" { + description = "The ID of the HCP Vault cluster" + type = string + default = "nomad-e2e-shared-hcp-vault" +} + +variable "hcp_vault_namespace" { + description = "The namespace where the HCP Vault cluster policy works" + type = string + default = "admin" } # ---------------------------------------- @@ -125,74 +104,20 @@ variable "volumes" { # provide a list of builds to override the values of nomad_sha, nomad_version, # or nomad_local_binary. Most of the time you can ignore these variables! -variable "nomad_version_server" { - description = "A list of Nomad versions to deploy to servers, to override nomad_version" - type = list(string) - default = [] -} - -variable "nomad_sha_server" { - description = "A list of Nomad SHAs to deploy to servers, to override nomad_sha" - type = list(string) - default = [] -} - variable "nomad_local_binary_server" { description = "A list of nomad local binary paths to deploy to servers, to override nomad_local_binary" type = list(string) default = [] } -variable "nomad_url_server" { - description = "A list of Nomad binary archive URLs to deploy to servers, to override nomad_url" - type = list(string) - default = [] -} - -variable "nomad_version_client_ubuntu_bionic_amd64" { - description = "A list of Nomad versions to deploy to Ubuntu Bionic clients, to override nomad_version" - type = list(string) - default = [] -} - -variable "nomad_sha_client_ubuntu_bionic_amd64" { - description = "A list of Nomad SHAs to deploy to Ubuntu Bionic clients, to override nomad_sha" - type = list(string) - default = [] -} - variable "nomad_local_binary_client_ubuntu_bionic_amd64" { description = "A list of nomad local binary paths to deploy to Ubuntu Bionic clients, to override nomad_local_binary" type = list(string) default = [] } -variable "nomad_url_client_ubuntu_bionic_amd64" { - description = "A list of Nomad binary archive URLs to deploy to Ubuntu Bionic clients, to override nomad_url" - type = list(string) - default = [] -} - -variable "nomad_version_client_windows_2016_amd64" { - description = "A list of Nomad versions to deploy to Windows 2016 clients, to override nomad_version" - type = list(string) - default = [] -} - -variable "nomad_sha_client_windows_2016_amd64" { - description = "A list of Nomad SHAs to deploy to Windows 2016 clients, to override nomad_sha" - type = list(string) - default = [] -} - variable "nomad_local_binary_client_windows_2016_amd64" { description = "A list of nomad local binary paths to deploy to Windows 2016 clients, to override nomad_local_binary" type = list(string) default = [] } - -variable "nomad_url_client_windows_2016_amd64" { - description = "A list of Nomad binary archive URLs to deploy to Windows 2016 clients, to override nomad_url" - type = list(string) - default = [] -} diff --git a/e2e/terraform/vault.tf b/e2e/terraform/vault.tf deleted file mode 100644 index ac067b003..000000000 --- a/e2e/terraform/vault.tf +++ /dev/null @@ -1,69 +0,0 @@ -locals { - - vault_env = var.tls ? "VAULT_ADDR=https://${aws_instance.server.0.public_ip}:8200 VAULT_CACERT=keys/tls_ca.crt VAULT_CLIENT_CERT=keys/tls_api_client.crt VAULT_CLIENT_KEY=keys/tls_api_client.key" : "VAULT_ADDR=http://${aws_instance.server.0.public_ip}:8200" -} - -resource "null_resource" "bootstrap_vault" { - depends_on = [ - aws_instance.server, - module.nomad_server - ] - triggers = { - script = data.template_file.bootstrap_vault_script.rendered - } - - provisioner "local-exec" { - command = data.template_file.bootstrap_vault_script.rendered - } -} - -# write the bootstrap token to the keys/ directory (where the ssh key is) -# so that we can read it into the data.local_file later. If not set, -# ensure that it's empty. -data "template_file" "bootstrap_vault_script" { - template = var.vault ? "${local.vault_env} ./scripts/bootstrap-vault.sh" : "mkdir -p ${path.root}/keys; echo > ${path.root}/keys/vault_root_token; echo ${path.root}/keys/nomad_vault.hcl" -} - -data "local_file" "vault_token" { - depends_on = [null_resource.bootstrap_vault] - filename = "${path.root}/keys/vault_root_token" -} - -data "local_file" "nomad_vault_config" { - depends_on = [null_resource.bootstrap_vault] - filename = "${path.root}/keys/nomad_vault.hcl" -} - -resource "null_resource" "nomad_vault_config" { - - depends_on = [ - aws_instance.server, - null_resource.bootstrap_vault - ] - - triggers = { - data = data.local_file.nomad_vault_config.content - } - - count = var.server_count - - provisioner "file" { - source = "${path.root}/keys/nomad_vault.hcl" - destination = "./nomad_vault.hcl" - } - - provisioner "remote-exec" { - inline = [ - "sudo mv ./nomad_vault.hcl /etc/nomad.d/nomad_vault.hcl", - "sudo systemctl restart nomad" - ] - } - - connection { - type = "ssh" - user = "ubuntu" - host = aws_instance.server[count.index].public_ip - port = 22 - private_key = file("${path.root}/keys/${local.random_name}.pem") - } -} From 09cdaca5bab9531652c6445979e1c507fdf49453 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 18 Mar 2022 10:12:28 -0400 Subject: [PATCH 77/89] api: fix ENT-only test imports for moved testutil package (#12320) The `api/testutil` package was moved to `api/internal/testutil` but this wasn't caught in the ENT tests because they're not run here in the OSS repo. --- api/operator_ent_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/api/operator_ent_test.go b/api/operator_ent_test.go index 842ae6790..7e315a5d7 100644 --- a/api/operator_ent_test.go +++ b/api/operator_ent_test.go @@ -6,6 +6,7 @@ package api import ( "testing" + "github.com/hashicorp/nomad/api/internal/testutil" "github.com/stretchr/testify/require" ) From a44c55ae84ee7c877edd1502fa43f581d8e9e851 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Fri, 18 Mar 2022 07:48:08 -0500 Subject: [PATCH 78/89] ci: limit gotestsum to circle ci Part 2 of breaking up https://github.com/hashicorp/nomad/pull/12255 This PR makes it so gotestsum is invoked only in CircleCI. Also the HCLogger(t) is plumbed more correctly in TestServer and TestAgent so that they respect NOMAD_TEST_LOG_LEVEL. The reason for these is we'll want to disable logging in GHA, where spamming the disk with logs really drags performance. --- .circleci/config.yml | 2 +- GNUmakefile | 12 +++++------- command/agent/testagent.go | 9 +++++++++ helper/testlog/testlog.go | 36 ++++++++++++++++++++++++++++++------ nomad/testing.go | 30 ++++++++---------------------- 5 files changed, 53 insertions(+), 36 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c97299c45..c84171ed5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -544,7 +544,7 @@ executors: GOBIN: c:\gopath\bin GOTESTSUM_PATH: c:\tmp\test-reports GOLANG_VERSION: 1.17.5 - GOTESTSUM_VERSION: 0.4.2 + GOTESTSUM_VERSION: 1.7.0 VAULT_VERSION: 1.4.1 workflows: diff --git a/GNUmakefile b/GNUmakefile index 6a0c88f7d..fa36f852c 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -32,7 +32,11 @@ ifndef NOMAD_NO_UI GO_TAGS := ui $(GO_TAGS) endif +ifeq ($(CIRCLECI),true) GO_TEST_CMD = $(if $(shell command -v gotestsum 2>/dev/null),gotestsum --,go test) +else +GO_TEST_CMD = go test +endif ifeq ($(origin GOTEST_PKGS_EXCLUDE), undefined) GOTEST_PKGS ?= "./..." @@ -49,12 +53,6 @@ LAST_RELEASE ?= v1.2.6 default: help -ifeq ($(CI),true) - $(info Running in a CI environment, verbose mode is disabled) -else - VERBOSE="true" -endif - ifeq (Linux,$(THIS_OS)) ALL_TARGETS = linux_386 \ linux_amd64 \ @@ -131,7 +129,7 @@ deps: ## Install build and development dependencies go install github.com/hashicorp/go-bindata/go-bindata@bf7910af899725e4938903fb32048c7c0b15f12e go install github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs@234c15e7648ff35458026de92b34c637bae5e6f7 go install github.com/a8m/tree/cmd/tree@fce18e2a750ea4e7f53ee706b1c3d9cbb22de79c - go install gotest.tools/gotestsum@v0.4.2 + go install gotest.tools/gotestsum@v1.7.0 go install github.com/hashicorp/hcl/v2/cmd/hclfmt@v2.5.1 go install github.com/golang/protobuf/protoc-gen-go@v1.3.4 go install github.com/hashicorp/go-msgpack/codec/codecgen@v1.1.5 diff --git a/command/agent/testagent.go b/command/agent/testagent.go index 5ab554fbe..b69459c85 100644 --- a/command/agent/testagent.go +++ b/command/agent/testagent.go @@ -16,6 +16,7 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/api" + client "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/testlog" @@ -357,6 +358,14 @@ func (a *TestAgent) config() *Config { config := nomad.DefaultConfig() conf.NomadConfig = config + // Setup client config + conf.ClientConfig = client.DefaultConfig() + + logger := testlog.HCLogger(a.T) + conf.LogLevel = testlog.HCLoggerTestLevel().String() + conf.NomadConfig.Logger = logger + conf.ClientConfig.Logger = logger + // Set the name conf.NodeName = a.Name diff --git a/helper/testlog/testlog.go b/helper/testlog/testlog.go index 28977001f..73a4a12b2 100644 --- a/helper/testlog/testlog.go +++ b/helper/testlog/testlog.go @@ -5,6 +5,7 @@ package testlog import ( "bytes" + "fmt" "io" "log" "os" @@ -39,25 +40,48 @@ func WithPrefix(t LogPrinter, prefix string) *log.Logger { return New(t, prefix, log.Lmicroseconds) } -// Logger returns a new test logger with the Lmicroseconds flag set and no -// prefix. +// Logger returns a new test logger with the Lmicroseconds flag set and no prefix. +// +// Note: only use this where HCLogger cannot be used (i.e. RPC yamux configuration). func Logger(t LogPrinter) *log.Logger { return WithPrefix(t, "") } -//HCLogger returns a new test hc-logger. +// HCLogger returns a new test hc-logger. +// +// Default log level is TRACE. Set NOMAD_TEST_LOG_LEVEL for custom log level. func HCLogger(t LogPrinter) hclog.InterceptLogger { + logger, _ := HCLoggerNode(t, -1) + return logger +} + +// HCLoggerTestLevel returns the level in which hc log should emit logs. +// +// Default log level is TRACE. Set NOMAD_TEST_LOG_LEVEL for custom log level. +func HCLoggerTestLevel() hclog.Level { level := hclog.Trace envLogLevel := os.Getenv("NOMAD_TEST_LOG_LEVEL") if envLogLevel != "" { level = hclog.LevelFromString(envLogLevel) } + return level +} + +// HCLoggerNode returns a new hc-logger, but with a prefix indicating the node number +// on each log line. Useful for TestServer in tests with more than one server. +// +// Default log level is TRACE. Set NOMAD_TEST_LOG_LEVEL for custom log level. +func HCLoggerNode(t LogPrinter, node int32) (hclog.InterceptLogger, io.Writer) { + var output io.Writer = os.Stderr + if node > -1 { + output = NewPrefixWriter(t, fmt.Sprintf("node-%03d", node)) + } opts := &hclog.LoggerOptions{ - Level: level, - Output: os.Stderr, + Level: HCLoggerTestLevel(), + Output: output, IncludeLocation: true, } - return hclog.NewInterceptLogger(opts) + return hclog.NewInterceptLogger(opts), output } type prefixStderr struct { diff --git a/nomad/testing.go b/nomad/testing.go index 7c86f91e8..9fbe2ca02 100644 --- a/nomad/testing.go +++ b/nomad/testing.go @@ -4,14 +4,10 @@ import ( "fmt" "math/rand" "net" - "os" "sync/atomic" + "testing" "time" - testing "github.com/mitchellh/go-testing-interface" - "github.com/pkg/errors" - - "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/freeport" "github.com/hashicorp/nomad/helper/pluginutils/catalog" @@ -20,13 +16,14 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/version" + "github.com/pkg/errors" ) var ( - nodeNumber uint32 = 0 + nodeNumber int32 = 0 ) -func TestACLServer(t testing.T, cb func(*Config)) (*Server, *structs.ACLToken, func()) { +func TestACLServer(t *testing.T, cb func(*Config)) (*Server, *structs.ACLToken, func()) { server, cleanup := TestServer(t, func(c *Config) { c.ACLEnabled = true if cb != nil { @@ -41,33 +38,22 @@ func TestACLServer(t testing.T, cb func(*Config)) (*Server, *structs.ACLToken, f return server, token, cleanup } -func TestServer(t testing.T, cb func(*Config)) (*Server, func()) { +func TestServer(t *testing.T, cb func(*Config)) (*Server, func()) { // Setup the default settings config := DefaultConfig() // Setup default enterprise-specific settings, including license defaultEnterpriseTestConfig(config) - config.Logger = testlog.HCLogger(t) config.Build = version.Version + "+unittest" config.DevMode = true config.EnableEventBroker = true config.BootstrapExpect = 1 - nodeNum := atomic.AddUint32(&nodeNumber, 1) + nodeNum := atomic.AddInt32(&nodeNumber, 1) config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum) // configure logger - level := hclog.Trace - if envLogLevel := os.Getenv("NOMAD_TEST_LOG_LEVEL"); envLogLevel != "" { - level = hclog.LevelFromString(envLogLevel) - } - opts := &hclog.LoggerOptions{ - Level: level, - Output: testlog.NewPrefixWriter(t, config.NodeName+" "), - IncludeLocation: true, - } - config.Logger = hclog.NewInterceptLogger(opts) - config.LogOutput = opts.Output + config.Logger, config.LogOutput = testlog.HCLoggerNode(t, nodeNum) // Tighten the Serf timing config.SerfConfig.MemberlistConfig.BindAddr = "127.0.0.1" @@ -168,7 +154,7 @@ func TestServer(t testing.T, cb func(*Config)) (*Server, func()) { return nil, nil } -func TestJoin(t testing.T, servers ...*Server) { +func TestJoin(t *testing.T, servers ...*Server) { for i := 0; i < len(servers)-1; i++ { addr := fmt.Sprintf("127.0.0.1:%d", servers[i].config.SerfConfig.MemberlistConfig.BindPort) From 5c2aa93d6e24018dd5ec4ddf9a2e2a2b59694d35 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Fri, 18 Mar 2022 10:39:46 -0500 Subject: [PATCH 79/89] ci: turn on testing in github actions --- .github/workflows/test-core.yaml | 152 +++++++++++++++++++++++++++++++ GNUmakefile | 1 + 2 files changed, 153 insertions(+) create mode 100644 .github/workflows/test-core.yaml diff --git a/.github/workflows/test-core.yaml b/.github/workflows/test-core.yaml new file mode 100644 index 000000000..3a7da21d3 --- /dev/null +++ b/.github/workflows/test-core.yaml @@ -0,0 +1,152 @@ +name: Core CI Tests +on: + pull_request: + branches: + - main + paths-ignore: + - 'README.md' + - 'CHANGELOG.md' + - '.changelog/*' + - '.tours/*' + - 'contributing/*' + - 'demo/*' + - 'pkg/*' + - 'scripts/*' + - 'terraform/*' + - 'ui/*' + - 'website/*' + push: + branches-ignore: + - main + - release-** + paths-ignore: + - 'README.md' + - 'CHANGELOG.md' + - '.changelog/*' + - '.tours/*' + - 'contributing/*' + - 'demo/*' + - 'pkg/*' + - 'scripts/*' + - 'terraform/*' + - 'ui/*' + - 'website/*' +env: + GO_VERSION: 1.17.7 + GOBIN: /usr/local/bin + GOTESTARCH: amd64 + CONSUL_VERSION: 1.11.3 + VAULT_VERSION: 1.9.3 + NOMAD_SLOW_TEST: 0 + NOMAD_TEST_LOG_LEVEL: ERROR +jobs: + checks: + runs-on: ubuntu-20.04 + timeout-minutes: 10 + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 # needs tags for checkproto + - uses: magnetikonline/action-golang-cache@v1 + with: + go-version: ${{env.GO_VERSION}} + cache-key-suffix: -checks + - name: Run make check + run: | + make bootstrap + make check + compile: + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, macos-11, windows-2019] + runs-on: ${{matrix.os}} + timeout-minutes: 20 + steps: + - uses: actions/checkout@v2 + - uses: magnetikonline/action-golang-cache@v1 + with: + go-version: ${{env.GO_VERSION}} + cache-key-suffix: -compile + - name: Run make dev + env: + GOBIN: ${{env.GOROOT}}/bin # windows kludge + run: | + make bootstrap + make dev + tests-api: + runs-on: ubuntu-20.04 + timeout-minutes: 30 + steps: + - uses: actions/checkout@v2 + - uses: magnetikonline/action-golang-cache@v1 + with: + go-version: ${{env.GO_VERSION}} + cache-key-suffix: -api + - name: Run API tests + env: + GOTEST_MOD: api + run: | + make bootstrap + make generate-all + make test-nomad-module + tests-pkgs: + runs-on: ubuntu-20.04 + timeout-minutes: 30 + strategy: + fail-fast: false + matrix: + pkg: + - acl + - client + - client/allocdir + - client/allochealth + - client/allocrunner + - client/allocwatcher + - client/config + - client/consul + - client/devicemanager + - client/dynamicplugins + - client/fingerprint + # - client/lib/... + - client/logmon + - client/pluginmanager + - client/state + - client/stats + - client/structs + - client/taskenv + - command + - command/agent + # - drivers/docker + # - drivers/exec + - drivers/java + - drivers/rawexec + - helper/... + - internal/... + - jobspec/... + - lib/... + - nomad + - nomad/deploymentwatcher + - nomad/stream + - nomad/structs + - nomad/volumewatcher + - plugins/... + - scheduler/... + - testutil + steps: + - uses: actions/checkout@v2 + - uses: magnetikonline/action-golang-cache@v1 + with: + go-version: ${{env.GO_VERSION}} + cache-key-suffix: -pkgs + - name: Run Matrix Tests + env: + GOTEST_PKGS: ./${{matrix.pkg}} + run: | + make bootstrap + make generate-all + hc-install vault ${{env.VAULT_VERSION}} + hc-install consul ${{env.CONSUL_VERSION}} + sudo sed -i 's!Defaults!#Defaults!g' /etc/sudoers + sudo -E env "PATH=$PATH" make test-nomad + diff --git a/GNUmakefile b/GNUmakefile index fa36f852c..64cd3856f 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -136,6 +136,7 @@ deps: ## Install build and development dependencies go install github.com/bufbuild/buf/cmd/buf@v0.36.0 go install github.com/hashicorp/go-changelog/cmd/changelog-build@latest go install golang.org/x/tools/cmd/stringer@v0.1.8 + go install gophers.dev/cmds/hc-install/cmd/hc-install@v1.0.1 .PHONY: lint-deps lint-deps: ## Install linter dependencies From 665967cc313072f979db491225d6b2850327bdb9 Mon Sep 17 00:00:00 2001 From: Seth Hoenig Date: Fri, 18 Mar 2022 12:47:38 -0500 Subject: [PATCH 80/89] ci: scope to push, ignore more dirs, update go update script --- .github/workflows/test-core.yaml | 18 +++--------------- scripts/update_golang_version.sh | 3 +++ 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/.github/workflows/test-core.yaml b/.github/workflows/test-core.yaml index 3a7da21d3..5f2a66d3b 100644 --- a/.github/workflows/test-core.yaml +++ b/.github/workflows/test-core.yaml @@ -1,20 +1,5 @@ name: Core CI Tests on: - pull_request: - branches: - - main - paths-ignore: - - 'README.md' - - 'CHANGELOG.md' - - '.changelog/*' - - '.tours/*' - - 'contributing/*' - - 'demo/*' - - 'pkg/*' - - 'scripts/*' - - 'terraform/*' - - 'ui/*' - - 'website/*' push: branches-ignore: - main @@ -26,6 +11,9 @@ on: - '.tours/*' - 'contributing/*' - 'demo/*' + - 'dev/*' + - 'e2e/terraform/*' + - 'integrations/*' - 'pkg/*' - 'scripts/*' - 'terraform/*' diff --git a/scripts/update_golang_version.sh b/scripts/update_golang_version.sh index 89854b0ee..bd44a8075 100755 --- a/scripts/update_golang_version.sh +++ b/scripts/update_golang_version.sh @@ -23,6 +23,9 @@ sed -i'' -e "s|/golang:[.0-9]*|/golang:${golang_version}|g" .circleci/config.yml sed -i'' -e "s|GOLANG_VERSION:[ \"]*[.0-9]*\"*|GOLANG_VERSION: ${golang_version}|g" \ .circleci/config.yml +sed -i'' -e "s|GO_VERSION:[ \"]*[.0-9]*\"*|GO_VERSION: ${golang_version}|g" \ + .github/workflows/test-core.yaml + sed -i'' -e "s|\\(Install .Go\\) [.0-9]*|\\1 ${golang_version}|g" \ contributing/README.md From 0a2981fcba0ad8472a839cd98d0cbd8bfc123c2b Mon Sep 17 00:00:00 2001 From: James Rasell Date: Mon, 21 Mar 2022 13:37:08 +0100 Subject: [PATCH 81/89] core: fixup node drain update message spelling. --- nomad/node_endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index 3dabf9c19..2a6b7dc94 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -35,7 +35,7 @@ const ( // NodeDrainEvents are the various drain messages NodeDrainEventDrainSet = "Node drain strategy set" NodeDrainEventDrainDisabled = "Node drain disabled" - NodeDrainEventDrainUpdated = "Node drain stategy updated" + NodeDrainEventDrainUpdated = "Node drain strategy updated" // NodeEligibilityEventEligible is used when the nodes eligiblity is marked // eligible From c3f53fa8d0bd461b6ce40b059d0f614852a4bf3a Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 21 Mar 2022 11:05:02 -0400 Subject: [PATCH 82/89] E2E: ensure `ConnectACLsE2ETest` has clean state before starting (#12334) The `ConnectACLsE2ETest` checks that the SI tokens have been properly cleaned up between tests, but following the change to use HCP the previous `Connect` test suite will often have SI tokens that haven't been cleaned up by the time this test suite runs. Wait for the SI tokens to be cleaned up at the start of the test to ensure we have a clean state. --- e2e/connect/acls.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/e2e/connect/acls.go b/e2e/connect/acls.go index a81fa97f9..364107905 100644 --- a/e2e/connect/acls.go +++ b/e2e/connect/acls.go @@ -40,6 +40,13 @@ func (tc *ConnectACLsE2ETest) BeforeAll(f *framework.F) { _, err := uuidparse.ParseUUID(tc.consulManagementToken) f.NoError(err, "CONSUL_HTTP_TOKEN not set") + + // ensure SI tokens from previous test cases were removed + f.Eventually(func() bool { + siTokens := tc.countSITokens(f.T()) + f.T().Log("cleanup: checking for remaining SI tokens:", siTokens) + return len(siTokens) == 0 + }, 2*time.Minute, 2*time.Second, "SI tokens did not get removed") } // AfterEach does cleanup of Consul ACL objects that were created during each From 02d26ceb1a35c40af88eb7b1583d67474ece90ef Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 21 Mar 2022 11:48:47 -0400 Subject: [PATCH 83/89] CSI: set plugin `CSI_ENDPOINT` env var only if unset by user (#12257) * Use unix:// prefix for CSI_ENDPOINT variable by default * Some plugins have strict validation over the format of the `CSI_ENDPOINT` variable, and unfortunately not all plugins agree. Allow the user to override the `CSI_ENDPOINT` to workaround those cases. * Update all demos and tests with CSI_ENDPOINT --- .../taskrunner/plugin_supervisor_hook.go | 28 ++++++++++--------- demo/csi/ceph-csi-plugin/README.md | 3 +- demo/csi/cinder-csi-plugin/README.md | 23 +++++++-------- .../cinder-csi-plugin/cinder-csi-plugin.hcl | 5 ++-- demo/csi/digitalocean/plugin.nomad | 2 +- demo/csi/hostpath/plugin.nomad | 2 +- e2e/csi/input/plugin-aws-ebs-controller.nomad | 2 +- e2e/csi/input/plugin-aws-ebs-nodes.nomad | 2 +- e2e/csi/input/plugin-aws-efs-nodes.nomad | 10 +++---- 9 files changed, 38 insertions(+), 39 deletions(-) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 122386fd7..415b6e20b 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -170,19 +170,21 @@ func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, h.setSocketHook() - switch h.caps.FSIsolation { - case drivers.FSIsolationNone: - // Plugin tasks with no filesystem isolation won't have the - // plugin dir bind-mounted to their alloc dir, but we can - // provide them the path to the socket. These Nomad-only - // plugins will need to be aware of the csi directory layout - // in the client data dir - resp.Env = map[string]string{ - "CSI_ENDPOINT": h.socketPath} - default: - resp.Env = map[string]string{ - "CSI_ENDPOINT": filepath.Join( - h.task.CSIPluginConfig.MountDir, structs.CSISocketName)} + if _, ok := h.task.Env["CSI_ENDPOINT"]; !ok { + switch h.caps.FSIsolation { + case drivers.FSIsolationNone: + // Plugin tasks with no filesystem isolation won't have the + // plugin dir bind-mounted to their alloc dir, but we can + // provide them the path to the socket. These Nomad-only + // plugins will need to be aware of the csi directory layout + // in the client data dir + resp.Env = map[string]string{ + "CSI_ENDPOINT": "unix://" + h.socketPath} + default: + resp.Env = map[string]string{ + "CSI_ENDPOINT": "unix://" + filepath.Join( + h.task.CSIPluginConfig.MountDir, structs.CSISocketName)} + } } mounts := ensureMountpointInserted(h.runner.hookResources.getMounts(), configMount) diff --git a/demo/csi/ceph-csi-plugin/README.md b/demo/csi/ceph-csi-plugin/README.md index 0eaa1aa63..f00b30ed6 100644 --- a/demo/csi/ceph-csi-plugin/README.md +++ b/demo/csi/ceph-csi-plugin/README.md @@ -19,7 +19,8 @@ Refer to the official plugin * `--type=rbd`: driver type `rbd` (or alternately `cephfs`) -* `--endpoint=unix:///csi/csi.sock`: this option must match the `mount_dir` +* `--endpoint=${CSI_ENDPOINT}`: if you don't use the `CSI_ENDPOINT` + environment variable, this option must match the `mount_dir` specified in the `csi_plugin` stanza for the task. * `--nodeid=${node.unique.id}`: a unique ID for the node the task is running diff --git a/demo/csi/cinder-csi-plugin/README.md b/demo/csi/cinder-csi-plugin/README.md index a6c133441..43b1652d0 100644 --- a/demo/csi/cinder-csi-plugin/README.md +++ b/demo/csi/cinder-csi-plugin/README.md @@ -3,7 +3,7 @@ ## Requirements The containers that run the Node/Controller applications require a cloud-config file be mounted in the containers and the path specified in the containers `args`. - + The example plugin job creates a file at `local/cloud.conf` using a [`template`](https://www.nomadproject.io/docs/job-specification/template) stanza which pulls the necessary credentials from a [Vault kv-v2](https://www.vaultproject.io/docs/secrets/kv/kv-v2) secrets store. However, other methods, such as using the [`artifact`](https://www.nomadproject.io/docs/job-specification/artifact) stanza, will work as well for delivering the `cloud.conf` file to the CSI drivers. ### Example cloud.conf @@ -24,21 +24,18 @@ The Cinder CSI Node task requires that [`privileged = true`](https://www.nomadpr ## Container Arguments -- `--endpoint=unix:///csi/csi.sock` +* `--endpoint=${CSI_ENDPOINT}`: If you don't use the `CSI_ENDPOINT` + environment variable, this option must match the `mount_dir` + specified in the `csi_plugin` stanza for the task. - - This option must match the `mount_dir` specified in the `csi_plugin` stanza for the task. +* `--cloud-config=/etc/config/cloud.conf`: The location that the + cloud.conf file was mounted inside the container -- `--cloud-config=/etc/config/cloud.conf` - - - The location that the cloud.conf file was mounted inside the container - -- `--nodeid=${node.unique.name}` +* `--nodeid=${node.unique.name}`: A unique ID for the node the task is + running on. Recommend using `${node.unique.name}` - - A unique ID for the node the task is running on. Recommend using `${node.unique.name}` - -- `--cluster=${NOMAD_DC}` - - - The cluster the Controller/Node is a part of. Recommend using `${NOMAD_DC}` +* `--cluster=${NOMAD_DC}`: The cluster the Controller/Node is a part + of. Recommend using `${NOMAD_DC}` ## Deployment diff --git a/demo/csi/cinder-csi-plugin/cinder-csi-plugin.hcl b/demo/csi/cinder-csi-plugin/cinder-csi-plugin.hcl index 92235bf4b..e35b95334 100644 --- a/demo/csi/cinder-csi-plugin/cinder-csi-plugin.hcl +++ b/demo/csi/cinder-csi-plugin/cinder-csi-plugin.hcl @@ -34,7 +34,7 @@ EOF args = [ "/bin/cinder-csi-plugin", "-v=4", - "--endpoint=unix:///csi/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--cloud-config=/etc/config/cloud.conf", "--nodeid=${node.unique.name}", ] @@ -73,7 +73,7 @@ EOF args = [ "/bin/cinder-csi-plugin", "-v=4", - "--endpoint=unix:///csi/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--cloud-config=/etc/config/cloud.conf", "--nodeid=${node.unique.name}", "--cluster=${NOMAD_DC}" @@ -88,4 +88,3 @@ EOF } } } - diff --git a/demo/csi/digitalocean/plugin.nomad b/demo/csi/digitalocean/plugin.nomad index 998142948..321234e16 100644 --- a/demo/csi/digitalocean/plugin.nomad +++ b/demo/csi/digitalocean/plugin.nomad @@ -10,7 +10,7 @@ job "digitalocean" { config { image = "digitalocean/do-csi-plugin:v2.1.1" args = [ - "--endpoint=unix://csi/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--token=${token}", "--url=https://api.digitalocean.com/", ] diff --git a/demo/csi/hostpath/plugin.nomad b/demo/csi/hostpath/plugin.nomad index a1b43f326..60637d10a 100644 --- a/demo/csi/hostpath/plugin.nomad +++ b/demo/csi/hostpath/plugin.nomad @@ -13,7 +13,7 @@ job "csi-plugin" { args = [ "--drivername=csi-hostpath", "--v=5", - "--endpoint=unix://csi/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--nodeid=node-${NOMAD_ALLOC_INDEX}", ] diff --git a/e2e/csi/input/plugin-aws-ebs-controller.nomad b/e2e/csi/input/plugin-aws-ebs-controller.nomad index dd0b675c7..2f022c6e0 100644 --- a/e2e/csi/input/plugin-aws-ebs-controller.nomad +++ b/e2e/csi/input/plugin-aws-ebs-controller.nomad @@ -26,7 +26,7 @@ job "plugin-aws-ebs-controller" { args = [ "controller", - "--endpoint=unix://csi/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--logtostderr", "--v=5", ] diff --git a/e2e/csi/input/plugin-aws-ebs-nodes.nomad b/e2e/csi/input/plugin-aws-ebs-nodes.nomad index 206b1df81..8bb9691f6 100644 --- a/e2e/csi/input/plugin-aws-ebs-nodes.nomad +++ b/e2e/csi/input/plugin-aws-ebs-nodes.nomad @@ -23,7 +23,7 @@ job "plugin-aws-ebs-nodes" { args = [ "node", - "--endpoint=unix://csi/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--logtostderr", "--v=5", ] diff --git a/e2e/csi/input/plugin-aws-efs-nodes.nomad b/e2e/csi/input/plugin-aws-efs-nodes.nomad index 3cf5358e9..67601f78a 100644 --- a/e2e/csi/input/plugin-aws-efs-nodes.nomad +++ b/e2e/csi/input/plugin-aws-efs-nodes.nomad @@ -19,13 +19,10 @@ job "plugin-aws-efs-nodes" { driver = "docker" config { - image = "amazon/aws-efs-csi-driver:v1.2.0" - - # note: the EFS driver doesn't seem to respect the --endpoint - # flag and always sets up the listener at '/tmp/csi.sock' + image = "amazon/aws-efs-csi-driver:v1.3.6" args = [ "node", - "--endpoint=unix://tmp/csi.sock", + "--endpoint=${CSI_ENDPOINT}", "--logtostderr", "--v=5", ] @@ -33,6 +30,9 @@ job "plugin-aws-efs-nodes" { privileged = true } + # note: the EFS driver doesn't seem to respect the --endpoint + # flag or CSI_ENDPOINT env var and always sets up the listener + # at '/tmp/csi.sock' csi_plugin { id = "aws-efs0" type = "node" From 70b752736ad2281ec4ac6e9091ed49708a58993a Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 22 Mar 2022 09:48:38 -0400 Subject: [PATCH 84/89] CSI: presentation improvements (#12325) * Fix plugin capability sorting. The `sort.StringSlice` method in the stdlib doesn't actually sort, but instead constructs a sorting type which you call `Sort()` on. * Sort allocations for plugins by modify index. Present allocations in modify index order so that newest allocations show up at the top of the list. This results in sorted allocs in `nomad plugin status :id`, just like `nomad job status :id`. * Sort allocations for volumes in HTTP response. Present allocations in modify index order so that newest allocations show up at the top of the list. This results in sorted allocs in `nomad volume status :id`, just like `nomad job status :id`. This is implemented in the HTTP response and not in the state store because the state store maintains two separate lists of allocs that are merged before sending over the API. * Fix length of alloc IDs in `nomad volume status` output --- command/agent/csi_endpoint.go | 5 +++++ command/plugin_status_csi.go | 6 ++++-- command/volume_status.go | 1 - nomad/state/state_store.go | 3 +++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index a8e6d599f..8ecc989ba 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -2,6 +2,7 @@ package agent import ( "net/http" + "sort" "strconv" "strings" @@ -548,6 +549,10 @@ func structsCSIVolumeToApi(vol *structs.CSIVolume) *api.CSIVolume { } } + sort.Slice(out.Allocations, func(i, j int) bool { + return out.Allocations[i].ModifyIndex > out.Allocations[j].ModifyIndex + }) + return out } diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go index cdf8e03c5..1ee5eaa4f 100644 --- a/command/plugin_status_csi.go +++ b/command/plugin_status_csi.go @@ -206,7 +206,8 @@ func (c *PluginStatusCommand) formatControllerCaps(controllers map[string]*api.C return "" } - return " " + strings.Join(sort.StringSlice(caps), "\n ") + sort.StringSlice(caps).Sort() + return " " + strings.Join(caps, "\n ") } func (c *PluginStatusCommand) formatNodeCaps(nodes map[string]*api.CSIInfo) string { @@ -237,7 +238,8 @@ func (c *PluginStatusCommand) formatNodeCaps(nodes map[string]*api.CSIInfo) stri return "" } - return " " + strings.Join(sort.StringSlice(caps), "\n ") + sort.StringSlice(caps).Sort() + return " " + strings.Join(caps, "\n ") } func (c *PluginStatusCommand) formatTopology(nodes map[string]*api.CSIInfo) string { diff --git a/command/volume_status.go b/command/volume_status.go index 39157af39..40360f312 100644 --- a/command/volume_status.go +++ b/command/volume_status.go @@ -114,7 +114,6 @@ func (c *VolumeStatusCommand) Run(args []string) int { if c.verbose { c.length = fullId } - c.length = fullId // Get the HTTP client client, err := c.Meta.Client() diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 24b9e6f8c..d1a355481 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -2793,6 +2793,9 @@ func (s *StateStore) CSIPluginDenormalizeTxn(txn Txn, ws memdb.WatchSet, plug *s } plug.Allocations = append(plug.Allocations, alloc.Stub(nil)) } + sort.Slice(plug.Allocations, func(i, j int) bool { + return plug.Allocations[i].ModifyIndex > plug.Allocations[j].ModifyIndex + }) return plug, nil } From 65e41fdb5462027a6d18ae2e37cba607715d1323 Mon Sep 17 00:00:00 2001 From: Jonathan Tey Date: Tue, 22 Mar 2022 21:50:50 +0800 Subject: [PATCH 85/89] demo: add missing file for Kadalu CSI demo (#12336) --- demo/csi/kadalu-csi/volume.hcl | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 demo/csi/kadalu-csi/volume.hcl diff --git a/demo/csi/kadalu-csi/volume.hcl b/demo/csi/kadalu-csi/volume.hcl new file mode 100644 index 000000000..8fb218daf --- /dev/null +++ b/demo/csi/kadalu-csi/volume.hcl @@ -0,0 +1,30 @@ +# Unfortunately 'variable' interpolation isn't supported in volume spec +# so, parameters has to be supplied again + +id = "csi-test" + +name = "csi-test" + +type = "csi" + +plugin_id = "kadalu-csi" + +capacity_min = "200M" + +capacity_max = "1G" + +capability { + access_mode = "multi-node-multi-writer" + attachment_mode = "file-system" +} + +parameters { + kadalu_format = "native" + + # Below parameters needs to be replaced correctly based on + # json file supplied during controller/nodeplugin job + storage_name = "POOL" + + gluster_hosts = "GHOST" + gluster_volname = "GVOL" +} From 879e137457f8414e5ed34d189fdaa6f7029a9680 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 22 Mar 2022 10:26:56 -0400 Subject: [PATCH 86/89] drainer: defer CSI plugins until last (#12324) When a node is drained, system jobs are left until last so that operators can rely on things like log shippers running even as their applications are getting drained off. Include CSI plugins in this set so that Controller plugins deployed as services can be handled as gracefully as Node plugins that are running as system jobs. --- .changelog/12324.txt | 3 +++ nomad/drainer/draining_node.go | 2 +- nomad/structs/structs.go | 12 ++++++++++++ website/content/docs/commands/node/drain.mdx | 5 ++--- 4 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 .changelog/12324.txt diff --git a/.changelog/12324.txt b/.changelog/12324.txt new file mode 100644 index 000000000..c7086c644 --- /dev/null +++ b/.changelog/12324.txt @@ -0,0 +1,3 @@ +```release-note:improvement +drainer: defer draining CSI plugin jobs until system jobs are drained +``` diff --git a/nomad/drainer/draining_node.go b/nomad/drainer/draining_node.go index 5a9ee1c15..348556e4f 100644 --- a/nomad/drainer/draining_node.go +++ b/nomad/drainer/draining_node.go @@ -139,7 +139,7 @@ func (n *drainingNode) DrainingJobs() ([]structs.NamespacedID, error) { jobIDs := make(map[structs.NamespacedID]struct{}) var jobs []structs.NamespacedID for _, alloc := range allocs { - if alloc.TerminalStatus() || alloc.Job.Type == structs.JobTypeSystem { + if alloc.TerminalStatus() || alloc.Job.Type == structs.JobTypeSystem || alloc.Job.IsPlugin() { continue } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 325c07f76..c4ac6a652 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -4566,6 +4566,18 @@ func (j *Job) IsMultiregion() bool { return j.Multiregion != nil && j.Multiregion.Regions != nil && len(j.Multiregion.Regions) > 0 } +// IsPlugin returns whether a job is implements a plugin (currently just CSI) +func (j *Job) IsPlugin() bool { + for _, tg := range j.TaskGroups { + for _, task := range tg.Tasks { + if task.CSIPluginConfig != nil { + return true + } + } + } + return false +} + // VaultPolicies returns the set of Vault policies per task group, per task func (j *Job) VaultPolicies() map[string]map[string]*Vault { policies := make(map[string]map[string]*Vault, len(j.TaskGroups)) diff --git a/website/content/docs/commands/node/drain.mdx b/website/content/docs/commands/node/drain.mdx index 171304bed..83a87d2ef 100644 --- a/website/content/docs/commands/node/drain.mdx +++ b/website/content/docs/commands/node/drain.mdx @@ -70,9 +70,8 @@ capability. without being force stopped after a certain deadline. - `-ignore-system`: Ignore system allows the drain to complete without - stopping system job allocations. By default system jobs are stopped - last. You should always use this flag when draining a node running - [CSI node plugins][internals-csi]. + stopping system job allocations. By default system jobs (and CSI + plugins) are stopped last, after the `deadline` time has expired. - `-keep-ineligible`: Keep ineligible will maintain the node's scheduling ineligibility even if the drain is being disabled. This is useful when an From bf5006ec01d8024eb2dfd234bdcd559b6e8cdcc5 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Tue, 22 Mar 2022 15:06:25 -0400 Subject: [PATCH 87/89] set raft v3 as the default config (#12341) --- nomad/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nomad/config.go b/nomad/config.go index 2396900c1..6ea8cfd09 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -446,8 +446,8 @@ func DefaultConfig() *Config { // Disable shutdown on removal c.RaftConfig.ShutdownOnRemove = false - // Default to Raft v2, update to v3 to enable new Raft and autopilot features. - c.RaftConfig.ProtocolVersion = 2 + // Default to Raft v3 since Nomad 1.3 + c.RaftConfig.ProtocolVersion = 3 return c } From 717053adbb91aa204a02c186557576e0eb863f82 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Tue, 22 Mar 2022 15:07:31 -0400 Subject: [PATCH 88/89] core: use the new Raft API when removing peers (#12340) Raft v3 introduced a new API for adding and removing peers that takes the peer ID instead of the address. Prior to this change, Nomad would use the remote peer Raft version for deciding which API to use, but this would not work in the scenario where a Raft v3 server tries to remove a Raft v2 server; the code running uses v3 so it's unable to call the v2 API. This change uses the Raft version of the server running the code to decide which API to use. If the remote peer is a Raft v2, it uses the server address as the ID. --- nomad/leader.go | 36 +++++++++++++++++++--------------- nomad/leader_test.go | 46 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 61 insertions(+), 21 deletions(-) diff --git a/nomad/leader.go b/nomad/leader.go index 9dd3792e3..7db6cc57e 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -1314,22 +1314,26 @@ func (s *Server) removeRaftPeer(m serf.Member, parts *serverParts) error { // Pick which remove API to use based on how the server was added. for _, server := range configFuture.Configuration().Servers { - // If we understand the new add/remove APIs and the server was added by ID, use the new remove API - if minRaftProtocol >= 2 && server.ID == raft.ServerID(parts.ID) { - s.logger.Info("removing server by ID", "id", server.ID) - future := s.raft.RemoveServer(raft.ServerID(parts.ID), 0, 0) - if err := future.Error(); err != nil { - s.logger.Error("failed to remove raft peer", "id", server.ID, "error", err) - return err - } - break - } else if server.Address == raft.ServerAddress(addr) { - // If not, use the old remove API - s.logger.Info("removing server by address", "address", server.Address) - future := s.raft.RemovePeer(raft.ServerAddress(addr)) - if err := future.Error(); err != nil { - s.logger.Error("failed to remove raft peer", "address", addr, "error", err) - return err + // Check if this is the server to remove based on how it was registered. + // Raft v2 servers are registered by address. + // Raft v3 servers are registered by ID. + if server.ID == raft.ServerID(parts.ID) || server.Address == raft.ServerAddress(addr) { + // Use the new add/remove APIs if we understand them. + if minRaftProtocol >= 2 { + s.logger.Info("removing server by ID", "id", server.ID) + future := s.raft.RemoveServer(server.ID, 0, 0) + if err := future.Error(); err != nil { + s.logger.Error("failed to remove raft peer", "id", server.ID, "error", err) + return err + } + } else { + // If not, use the old remove API + s.logger.Info("removing server by address", "address", server.Address) + future := s.raft.RemovePeer(raft.ServerAddress(addr)) + if err := future.Error(); err != nil { + s.logger.Error("failed to remove raft peer", "address", addr, "error", err) + return err + } } break } diff --git a/nomad/leader_test.go b/nomad/leader_test.go index b244273b0..7ee5e90fd 100644 --- a/nomad/leader_test.go +++ b/nomad/leader_test.go @@ -1216,7 +1216,9 @@ func TestLeader_RollRaftServer(t *testing.T) { // Kill the first v2 server s1.Shutdown() - for _, s := range []*Server{s1, s3} { + for _, s := range []*Server{s2, s3} { + s.RemoveFailedNode(s1.config.NodeID) + retry.Run(t, func(r *retry.R) { minVer, err := s.autopilot.MinRaftProtocol() if err != nil { @@ -1225,6 +1227,14 @@ func TestLeader_RollRaftServer(t *testing.T) { if got, want := minVer, 2; got != want { r.Fatalf("got min raft version %d want %d", got, want) } + + configFuture := s.raft.GetConfiguration() + if err != nil { + r.Fatal(err) + } + if len(configFuture.Configuration().Servers) != 2 { + r.Fatalf("expected 2 servers, got %d", len(configFuture.Configuration().Servers)) + } }) } @@ -1234,14 +1244,19 @@ func TestLeader_RollRaftServer(t *testing.T) { c.RaftConfig.ProtocolVersion = 3 }) defer cleanupS4() - TestJoin(t, s4, s2) + TestJoin(t, s2, s3, s4) servers[0] = s4 // Kill the second v2 server s2.Shutdown() for _, s := range []*Server{s3, s4} { - retry.Run(t, func(r *retry.R) { + s.RemoveFailedNode(s2.config.NodeID) + + retry.RunWith(&retry.Counter{ + Count: int(10 * testutil.TestMultiplier()), + Wait: time.Duration(testutil.TestMultiplier()) * time.Second, + }, t, func(r *retry.R) { minVer, err := s.autopilot.MinRaftProtocol() if err != nil { r.Fatal(err) @@ -1249,6 +1264,14 @@ func TestLeader_RollRaftServer(t *testing.T) { if got, want := minVer, 2; got != want { r.Fatalf("got min raft version %d want %d", got, want) } + + configFuture := s.raft.GetConfiguration() + if err != nil { + r.Fatal(err) + } + if len(configFuture.Configuration().Servers) != 2 { + r.Fatalf("expected 2 servers, got %d", len(configFuture.Configuration().Servers)) + } }) } // Replace another dead server with one running raft protocol v3 @@ -1257,14 +1280,19 @@ func TestLeader_RollRaftServer(t *testing.T) { c.RaftConfig.ProtocolVersion = 3 }) defer cleanupS5() - TestJoin(t, s5, s4) + TestJoin(t, s3, s4, s5) servers[1] = s5 // Kill the last v2 server, now minRaftProtocol should be 3 s3.Shutdown() for _, s := range []*Server{s4, s5} { - retry.Run(t, func(r *retry.R) { + s.RemoveFailedNode(s2.config.NodeID) + + retry.RunWith(&retry.Counter{ + Count: int(10 * testutil.TestMultiplier()), + Wait: time.Duration(testutil.TestMultiplier()) * time.Second, + }, t, func(r *retry.R) { minVer, err := s.autopilot.MinRaftProtocol() if err != nil { r.Fatal(err) @@ -1272,6 +1300,14 @@ func TestLeader_RollRaftServer(t *testing.T) { if got, want := minVer, 3; got != want { r.Fatalf("got min raft version %d want %d", got, want) } + + configFuture := s.raft.GetConfiguration() + if err != nil { + r.Fatal(err) + } + if len(configFuture.Configuration().Servers) != 2 { + r.Fatalf("expected 2 servers, got %d", len(configFuture.Configuration().Servers)) + } }) } From 73afdea08d7582c326eeeb9aeefa467518110627 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 22 Mar 2022 15:40:24 -0400 Subject: [PATCH 89/89] csi: fix handling of garbage collected node in node unpublish (#12350) When a node is garbage collected, we assume that the volume is no longer attached to it and ignore the `ErrUnknownNode` error. But we used `errors.Is` to check for a wrapped error, and RPC flattens the errors during serialization. This results in an error check that works in automated testing but not in real clusters. Use a string contains check instead. --- .changelog/12350.txt | 3 +++ nomad/csi_endpoint.go | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 .changelog/12350.txt diff --git a/.changelog/12350.txt b/.changelog/12350.txt new file mode 100644 index 000000000..a70ffae56 --- /dev/null +++ b/.changelog/12350.txt @@ -0,0 +1,3 @@ +```release-note:bug +csi: Fixed a bug where garbage collected nodes would block releasing a volume +``` diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index e46351bbf..add9c3cd8 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -1,9 +1,9 @@ package nomad import ( - "errors" "fmt" "net/http" + "strings" "time" metrics "github.com/armon/go-metrics" @@ -741,7 +741,9 @@ func (v *CSIVolume) nodeUnpublishVolumeImpl(vol *structs.CSIVolume, claim *struc // we should only get this error if the Nomad node disconnects and // is garbage-collected, so at this point we don't have any reason // to operate as though the volume is attached to it. - if !errors.Is(err, structs.ErrUnknownNode) { + // note: errors.Is cannot be used because the RPC call breaks + // error wrapping. + if !strings.Contains(err.Error(), structs.ErrUnknownNode.Error()) { return fmt.Errorf("could not detach from node: %w", err) } }