From 2dadf9fe6ce4dd2af575550a852774040469e1e1 Mon Sep 17 00:00:00 2001 From: Juana De La Cuesta Date: Tue, 4 Mar 2025 16:23:07 +0100 Subject: [PATCH] Improve stability (#25244) * func: add dependencies to avoid race conditions and move the update to each client to the main upgrade scenario * Update enos/enos-scenario-upgrade.hcl Co-authored-by: Tim Gross * Update enos/enos-scenario-upgrade.hcl Co-authored-by: Tim Gross --------- Co-authored-by: Tim Gross --- enos/enos-modules.hcl | 4 +- enos/enos-scenario-upgrade.hcl | 191 ++++++++++++++-- enos/modules/run_workloads/main.tf | 2 +- enos/modules/run_workloads/outputs.tf | 5 + enos/modules/test_cluster_health/main.tf | 2 + .../test_cluster_health/scripts/allocs.sh | 15 +- .../test_cluster_health/scripts/clients.sh | 4 +- .../test_cluster_health/scripts/jobs.sh | 2 +- .../test_cluster_health/scripts/servers.sh | 2 +- .../test_cluster_health/scripts/versions.sh | 1 - enos/modules/upgrade_client/main.tf | 84 +++++++ .../scripts/set_metadata.sh | 0 .../scripts/verify_metadata.sh | 6 +- .../scripts/wait_for_nomad_api.sh | 0 .../variables.tf | 6 +- enos/modules/upgrade_clients/main.tf | 211 ------------------ 16 files changed, 292 insertions(+), 243 deletions(-) create mode 100644 enos/modules/upgrade_client/main.tf rename enos/modules/{upgrade_clients => upgrade_client}/scripts/set_metadata.sh (100%) rename enos/modules/{upgrade_clients => upgrade_client}/scripts/verify_metadata.sh (92%) rename enos/modules/{upgrade_clients => upgrade_client}/scripts/wait_for_nomad_api.sh (100%) rename enos/modules/{upgrade_clients => upgrade_client}/variables.tf (93%) delete mode 100644 enos/modules/upgrade_clients/main.tf diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 2241c70b1..a590a1e4d 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -21,6 +21,6 @@ module "upgrade_servers" { source = "./modules/upgrade_servers" } -module "upgrade_clients" { - source = "./modules/upgrade_clients" +module "upgrade_client" { + source = "./modules/upgrade_client" } diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 3fb34682e..5adab22e6 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -81,9 +81,46 @@ scenario "upgrade" { } } - step "run_initial_workloads" { + step "initial_test_cluster_health" { depends_on = [step.provision_cluster] + description = <<-EOF + Verify the health of the cluster by checking the status of all servers, nodes, + jobs and allocs and stopping random allocs to check for correct reschedules" + EOF + + module = module.test_cluster_health + variables { + # connecting to the Nomad API + nomad_addr = step.provision_cluster.nomad_addr + ca_file = step.provision_cluster.ca_file + cert_file = step.provision_cluster.cert_file + key_file = step.provision_cluster.key_file + nomad_token = step.provision_cluster.nomad_token + + # configuring assertions + server_count = var.server_count + client_count = local.clients_count + jobs_count = 0 + alloc_count = 0 + servers = step.provision_cluster.servers + clients_version = local.test_product_version + servers_version = local.test_product_version + } + + verifies = [ + quality.nomad_agent_info, + quality.nomad_agent_info_self, + quality.nomad_nodes_status, + quality.nomad_job_status, + quality.nomad_allocs_status, + quality.nomad_reschedule_alloc, + ] + } + + step "run_initial_workloads" { + depends_on = [step.initial_test_cluster_health] + description = <<-EOF Verify the health of the cluster by running new workloads EOF @@ -127,7 +164,7 @@ scenario "upgrade" { ] } - step "initial_test_cluster_health" { + step "workloads_test_cluster_health" { depends_on = [step.run_initial_workloads] description = <<-EOF @@ -165,13 +202,13 @@ scenario "upgrade" { } step "fetch_upgrade_binary" { - depends_on = [step.provision_cluster, step.initial_test_cluster_health] + depends_on = [step.provision_cluster, step.workloads_test_cluster_health] description = <<-EOF Determine which Nomad artifact we want to use for the scenario, depending on the 'arch', 'edition' and 'os' and fetches the URL and SHA to identify the upgraded binary. -EOF + EOF module = module.fetch_binaries @@ -266,12 +303,12 @@ EOF ] } - step "upgrade_clients" { + step "upgrade_first_client" { depends_on = [step.server_upgrade_test_cluster_health] description = <<-EOF - Takes the clients one by one, writes some dynamic metadata to them, - updates the binary with the new one previously fetched and restarts them. + Takes a client, writes some dynamic metadata to it, + updates the binary with the new one previously fetched and restarts it. Important: The path where the binary will be placed is hardcoded to match what the provision-cluster module does. It can be configurable in the future @@ -279,12 +316,9 @@ EOF * "C:/opt/nomad.exe" for windows * "/usr/local/bin/nomad" for linux - - To ensure the clients are upgraded one by one, they use the depends_on meta, - there are ONLY 4 CLIENTS being upgraded in the module. EOF - module = module.upgrade_clients + module = module.upgrade_client verifies = [ quality.nomad_nodes_status, @@ -301,7 +335,130 @@ EOF nomad_token = step.provision_cluster.nomad_token # configuring assertions - clients = step.provision_cluster.clients + client = step.provision_cluster.clients[0] + ssh_key_path = step.provision_cluster.ssh_key_file + artifactory_username = var.artifactory_username + artifactory_token = var.artifactory_token + artifact_url = step.fetch_upgrade_binary.artifact_url[matrix.os] + artifact_sha = step.fetch_upgrade_binary.artifact_sha[matrix.os] + } + } + + step "upgrade_second_client" { + depends_on = [step.upgrade_first_client] + + description = <<-EOF + Takes a client, writes some dynamic metadata to it, + updates the binary with the new one previously fetched and restarts it. + + Important: The path where the binary will be placed is hardcoded to match + what the provision-cluster module does. It can be configurable in the future + but for now it is: + + * "C:/opt/nomad.exe" for windows + * "/usr/local/bin/nomad" for linux + EOF + + module = module.upgrade_client + + verifies = [ + quality.nomad_nodes_status, + quality.nomad_job_status, + quality.nomad_node_metadata + ] + + variables { + # connecting to the Nomad API + nomad_addr = step.provision_cluster.nomad_addr + ca_file = step.provision_cluster.ca_file + cert_file = step.provision_cluster.cert_file + key_file = step.provision_cluster.key_file + nomad_token = step.provision_cluster.nomad_token + + # configuring assertions + client = step.provision_cluster.clients[1] + ssh_key_path = step.provision_cluster.ssh_key_file + artifactory_username = var.artifactory_username + artifactory_token = var.artifactory_token + artifact_url = step.fetch_upgrade_binary.artifact_url[matrix.os] + artifact_sha = step.fetch_upgrade_binary.artifact_sha[matrix.os] + } + } + + step "upgrade_third_client" { + depends_on = [step.upgrade_second_client] + + description = <<-EOF + Takes a client, writes some dynamic metadata to it, + updates the binary with the new one previously fetched and restarts it. + + Important: The path where the binary will be placed is hardcoded to match + what the provision-cluster module does. It can be configurable in the future + but for now it is: + + * "C:/opt/nomad.exe" for windows + * "/usr/local/bin/nomad" for linux + EOF + + module = module.upgrade_client + + verifies = [ + quality.nomad_nodes_status, + quality.nomad_job_status, + quality.nomad_node_metadata + ] + + variables { + # connecting to the Nomad API + nomad_addr = step.provision_cluster.nomad_addr + ca_file = step.provision_cluster.ca_file + cert_file = step.provision_cluster.cert_file + key_file = step.provision_cluster.key_file + nomad_token = step.provision_cluster.nomad_token + + # configuring assertions + client = step.provision_cluster.clients[2] + ssh_key_path = step.provision_cluster.ssh_key_file + artifactory_username = var.artifactory_username + artifactory_token = var.artifactory_token + artifact_url = step.fetch_upgrade_binary.artifact_url[matrix.os] + artifact_sha = step.fetch_upgrade_binary.artifact_sha[matrix.os] + } + } + + step "upgrade_fourth_client" { + depends_on = [step.upgrade_third_client] + + description = <<-EOF + Takes a client, writes some dynamic metadata to it, + updates the binary with the new one previously fetched and restarts it. + + Important: The path where the binary will be placed is hardcoded to match + what the provision-cluster module does. It can be configurable in the future + but for now it is: + + * "C:/opt/nomad.exe" for windows + * "/usr/local/bin/nomad" for linux + EOF + + module = module.upgrade_client + + verifies = [ + quality.nomad_nodes_status, + quality.nomad_job_status, + quality.nomad_node_metadata + ] + + variables { + # connecting to the Nomad API + nomad_addr = step.provision_cluster.nomad_addr + ca_file = step.provision_cluster.ca_file + cert_file = step.provision_cluster.cert_file + key_file = step.provision_cluster.key_file + nomad_token = step.provision_cluster.nomad_token + + # configuring assertions + client = step.provision_cluster.clients[3] ssh_key_path = step.provision_cluster.ssh_key_file artifactory_username = var.artifactory_username artifactory_token = var.artifactory_token @@ -311,7 +468,7 @@ EOF } step "client_upgrade_test_cluster_health" { - depends_on = [step.upgrade_clients] + depends_on = [step.upgrade_fourth_client] description = <<-EOF Verify the health of the cluster by checking the status of all servers, nodes, @@ -395,4 +552,12 @@ EOF output "allocs" { value = step.run_initial_workloads.allocs_count } + + output "new_allocs" { + value = step.run_initial_workloads.new_allocs_count + } + + output "nodes" { + value = step.run_initial_workloads.nodes + } } diff --git a/enos/modules/run_workloads/main.tf b/enos/modules/run_workloads/main.tf index 83b77f2e4..6a47e2dfd 100644 --- a/enos/modules/run_workloads/main.tf +++ b/enos/modules/run_workloads/main.tf @@ -33,7 +33,7 @@ resource "enos_local_exec" "get_nodes" { depends_on = [enos_local_exec.wait_for_nomad_api] environment = local.nomad_env - inline = ["nomad node status -json | jq '[.[] | select(.Status == \"ready\")] | length'"] + inline = ["nomad node status -json | jq '[.[] | select(.SchedulingEligibility == \"eligible\" and .Status == \"ready\")] | length'"] } resource "enos_local_exec" "get_jobs" { diff --git a/enos/modules/run_workloads/outputs.tf b/enos/modules/run_workloads/outputs.tf index 76c7814d0..ecb970cca 100644 --- a/enos/modules/run_workloads/outputs.tf +++ b/enos/modules/run_workloads/outputs.tf @@ -25,3 +25,8 @@ output "new_allocs_count" { description = "The number of allocs that will be added to the cluster after all the workloads are run" value = local.system_job_count * tonumber(coalesce(chomp(enos_local_exec.get_nodes.stdout), "0")) + local.service_batch_allocs } + +output "system_job_count" { + description = "The number of jobs that were triggered by the module" + value = local.system_job_count +} diff --git a/enos/modules/test_cluster_health/main.tf b/enos/modules/test_cluster_health/main.tf index f7c356d7b..2a3185250 100644 --- a/enos/modules/test_cluster_health/main.tf +++ b/enos/modules/test_cluster_health/main.tf @@ -27,6 +27,7 @@ resource "enos_local_exec" "wait_for_nomad_api" { } resource "enos_local_exec" "run_tests" { + depends_on = [enos_local_exec.wait_for_nomad_api] environment = merge( local.nomad_env, { SERVER_COUNT = var.server_count @@ -45,6 +46,7 @@ resource "enos_local_exec" "run_tests" { } resource "enos_local_exec" "verify_versions" { + depends_on = [enos_local_exec.wait_for_nomad_api, enos_local_exec.run_tests] environment = merge( local.nomad_env, { SERVERS_VERSION = var.servers_version diff --git a/enos/modules/test_cluster_health/scripts/allocs.sh b/enos/modules/test_cluster_health/scripts/allocs.sh index fad3572cc..911017c27 100755 --- a/enos/modules/test_cluster_health/scripts/allocs.sh +++ b/enos/modules/test_cluster_health/scripts/allocs.sh @@ -41,12 +41,12 @@ while true; do error_exit "Some allocs are not running:\n$(nomad alloc status -json | jq -r '.[] | select(.ClientStatus != "running") | .ID')" fi - echo "Running allocs: $running_allocs, expected $ALLOC_COUNT. Waiting for $elapsed_time Retrying in $POLL_INTERVAL seconds..." + echo "Running allocs: $allocs_length, expected $ALLOC_COUNT. Waiting for $elapsed_time Retrying in $POLL_INTERVAL seconds..." sleep $POLL_INTERVAL elapsed_time=$((elapsed_time + POLL_INTERVAL)) done -echo "All ALLOCS are running." +echo "All $ALLOC_COUNT ALLOCS are running." if [ "$allocs_length" -eq 0 ]; then exit 0 @@ -54,8 +54,10 @@ fi # Quality: nomad_reschedule_alloc: A POST / PUT call to /v1/allocation/:alloc_id/stop results in the stopped allocation being rescheduled -random_index=$((RANDOM % allocs_length)) -random_alloc_id=$(echo "$running_allocs" | jq -r ".[${random_index}].ID") +service_batch_allocs=$(echo "$running_allocs" | jq '[.[] |select(.JobType != "system")]') +service_batch_allocs_length=$(echo "$service_batch_allocs" | jq 'length' ) +random_index=$((RANDOM % service_batch_allocs_length)) +random_alloc_id=$(echo "$service_batch_allocs" | jq -r ".[${random_index}].ID") nomad alloc stop "$random_alloc_id" \ || error_exit "Failed to stop allocation $random_alloc_id" @@ -89,10 +91,11 @@ while true; do checkAllocsCount && break if [ "$elapsed_time" -ge "$MAX_WAIT_TIME" ]; then - error_exit "Expected $ALLOC_COUNT running allocations, found $running_allocs after $elapsed_time seconds" + nomad alloc status -json > allocs.json + error_exit "Expected $ALLOC_COUNT running allocations, found $allocs_length after $elapsed_time seconds" fi - echo "Expected $ALLOC_COUNT running allocations, found $running_allocs Retrying in $POLL_INTERVAL seconds..." + echo "Expected $ALLOC_COUNT running allocations, found $allocs_length Retrying in $POLL_INTERVAL seconds..." sleep $POLL_INTERVAL elapsed_time=$((elapsed_time + POLL_INTERVAL)) done diff --git a/enos/modules/test_cluster_health/scripts/clients.sh b/enos/modules/test_cluster_health/scripts/clients.sh index 3a5e480ff..b916860cf 100755 --- a/enos/modules/test_cluster_health/scripts/clients.sh +++ b/enos/modules/test_cluster_health/scripts/clients.sh @@ -21,7 +21,7 @@ last_error= checkReadyClients() { local clients_length - ready_clients=$(nomad node status -json | jq '[.[] | select(.Status == "ready")]') || + ready_clients=$(nomad node status -json | jq '[.[] | select(.Status == "ready" and .SchedulingEligibility == "eligible")]') || error_exit "Could not query node status" clients_length=$(echo "$ready_clients" | jq 'length') @@ -54,4 +54,4 @@ while true; do elapsed_time=$((elapsed_time + POLL_INTERVAL)) done -echo "All clients are eligible and running." +echo "All $CLIENT_COUNT clients are eligible and running." diff --git a/enos/modules/test_cluster_health/scripts/jobs.sh b/enos/modules/test_cluster_health/scripts/jobs.sh index dbb46c578..dfe42e16c 100755 --- a/enos/modules/test_cluster_health/scripts/jobs.sh +++ b/enos/modules/test_cluster_health/scripts/jobs.sh @@ -21,4 +21,4 @@ if [ "$jobs_length" -ne "$JOB_COUNT" ]; then error_exit "The number of running jobs ($jobs_length) does not match the expected count ($JOB_COUNT) $(nomad job status | awk 'NR > 1 && $4 != "running" {print $4}') " fi -echo "All JOBS are running." +echo "All $JOB_COUNT JOBS are running." diff --git a/enos/modules/test_cluster_health/scripts/servers.sh b/enos/modules/test_cluster_health/scripts/servers.sh index 7c5bcc7ba..1e58fc626 100755 --- a/enos/modules/test_cluster_health/scripts/servers.sh +++ b/enos/modules/test_cluster_health/scripts/servers.sh @@ -92,4 +92,4 @@ for ip in $SERVERS; do done done -echo "All servers are alive and up to date." +echo "All $SERVER_COUNT SERVERS are alive and up to date." diff --git a/enos/modules/test_cluster_health/scripts/versions.sh b/enos/modules/test_cluster_health/scripts/versions.sh index 54f580c3e..425a3bd54 100755 --- a/enos/modules/test_cluster_health/scripts/versions.sh +++ b/enos/modules/test_cluster_health/scripts/versions.sh @@ -32,7 +32,6 @@ echo "All servers are running Nomad version $SERVERS_VERSION" # Clients version clients_versions=$(nomad node status -json | jq -r '[.[] | select(.Status == "ready") | .Version] | unique') - if [ "$(echo "$clients_versions" | jq 'length')" -eq 0 ]; then error_exit "Unable to get clients version" fi diff --git a/enos/modules/upgrade_client/main.tf b/enos/modules/upgrade_client/main.tf new file mode 100644 index 000000000..661a9ee4f --- /dev/null +++ b/enos/modules/upgrade_client/main.tf @@ -0,0 +1,84 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "registry.terraform.io/hashicorp-forge/enos" + } + } +} + +locals { + nomad_env = { + NOMAD_ADDR = var.nomad_addr + NOMAD_CACERT = var.ca_file + NOMAD_CLIENT_CERT = var.cert_file + NOMAD_CLIENT_KEY = var.key_file + NOMAD_TOKEN = var.nomad_token + } + + artifactory = { + username = var.artifactory_username + token = var.artifactory_token + url = var.artifact_url + sha256 = var.artifact_sha + } + + tls = { + ca_file = var.ca_file + cert_file = var.cert_file + key_file = var.key_file + } +} + +resource "enos_local_exec" "wait_for_nomad_api" { + environment = local.nomad_env + + scripts = [abspath("${path.module}/scripts/wait_for_nomad_api.sh")] +} + +resource "enos_local_exec" "set_metadata" { + depends_on = [enos_local_exec.wait_for_nomad_api] + + environment = merge( + local.nomad_env, + { + CLIENT_IP = var.client + } + ) + + scripts = [abspath("${path.module}/scripts/set_metadata.sh")] +} + +module upgrade_client { + depends_on = [enos_local_exec.set_metadata] + + source = "../upgrade_instance" + + nomad_addr = var.nomad_addr + tls = local.tls + nomad_token = var.nomad_token + platform = var.platform + instance_address = var.client + ssh_key_path = var.ssh_key_path + artifactory_release = local.artifactory +} + +resource "enos_local_exec" "wait_for_nomad_api_post_update" { + environment = local.nomad_env + + scripts = [abspath("${path.module}/scripts/wait_for_nomad_api.sh")] +} + +resource "enos_local_exec" "verify_metadata" { + depends_on = [enos_local_exec.wait_for_nomad_api_post_update] + + environment = merge( + local.nomad_env, + { + CLIENT_IP = var.client + }) + + scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] +} diff --git a/enos/modules/upgrade_clients/scripts/set_metadata.sh b/enos/modules/upgrade_client/scripts/set_metadata.sh similarity index 100% rename from enos/modules/upgrade_clients/scripts/set_metadata.sh rename to enos/modules/upgrade_client/scripts/set_metadata.sh diff --git a/enos/modules/upgrade_clients/scripts/verify_metadata.sh b/enos/modules/upgrade_client/scripts/verify_metadata.sh similarity index 92% rename from enos/modules/upgrade_clients/scripts/verify_metadata.sh rename to enos/modules/upgrade_client/scripts/verify_metadata.sh index ab992e5e8..3ab78a91a 100755 --- a/enos/modules/upgrade_clients/scripts/verify_metadata.sh +++ b/enos/modules/upgrade_client/scripts/verify_metadata.sh @@ -9,7 +9,7 @@ error_exit() { exit 1 } -MAX_WAIT_TIME=10 # Maximum wait time in seconds +MAX_WAIT_TIME=60 # Maximum wait time in seconds POLL_INTERVAL=2 # Interval between status checks elapsed_time=0 @@ -47,8 +47,10 @@ while true; do elapsed_time=$((elapsed_time + POLL_INTERVAL)) done -# Quality: "nomad_node_metadata: A GET call to /v1/node/:node-id returns the same node.Meta for each node before and after a node upgrade" +echo "Client $client_id at $CLIENT_IP is ready" +# Quality: "nomad_node_metadata: A GET call to /v1/node/:node-id returns the same node.Meta for each node before and after a node upgrade" +echo "Reading metadata for client at $CLIENT_IP" if ! client_meta=$(nomad node meta read -json -node-id "$client_id"); then echo "Failed to read metadata for node: $client_id" exit 1 diff --git a/enos/modules/upgrade_clients/scripts/wait_for_nomad_api.sh b/enos/modules/upgrade_client/scripts/wait_for_nomad_api.sh similarity index 100% rename from enos/modules/upgrade_clients/scripts/wait_for_nomad_api.sh rename to enos/modules/upgrade_client/scripts/wait_for_nomad_api.sh diff --git a/enos/modules/upgrade_clients/variables.tf b/enos/modules/upgrade_client/variables.tf similarity index 93% rename from enos/modules/upgrade_clients/variables.tf rename to enos/modules/upgrade_client/variables.tf index 71f0bdbb1..dad698832 100644 --- a/enos/modules/upgrade_clients/variables.tf +++ b/enos/modules/upgrade_client/variables.tf @@ -44,9 +44,9 @@ variable "ssh_key_path" { type = string } -variable "clients" { - description = "List of public IP address of the nomad clients that will be updated" - type = list +variable "client" { + description = "Public IP address of the nomad client that will be updated" + type = string } variable "artifactory_username" { diff --git a/enos/modules/upgrade_clients/main.tf b/enos/modules/upgrade_clients/main.tf deleted file mode 100644 index a0ae56a39..000000000 --- a/enos/modules/upgrade_clients/main.tf +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -terraform { - required_providers { - enos = { - source = "registry.terraform.io/hashicorp-forge/enos" - } - } -} - -locals { - nomad_env = { - NOMAD_ADDR = var.nomad_addr - NOMAD_CACERT = var.ca_file - NOMAD_CLIENT_CERT = var.cert_file - NOMAD_CLIENT_KEY = var.key_file - NOMAD_TOKEN = var.nomad_token - } - - artifactory = { - username = var.artifactory_username - token = var.artifactory_token - url = var.artifact_url - sha256 = var.artifact_sha - } - - tls = { - ca_file = var.ca_file - cert_file = var.cert_file - key_file = var.key_file - } -} - -resource "enos_local_exec" "wait_for_nomad_api" { - environment = local.nomad_env - - scripts = [abspath("${path.module}/scripts/wait_for_nomad_api.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the first client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_first_client" { - depends_on = [enos_local_exec.wait_for_nomad_api] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[0] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_first_client { - depends_on = [enos_local_exec.set_metadata_on_first_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[0] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_first_client" { - depends_on = [module.upgrade_first_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[0] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the second client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_second_client" { - depends_on = [enos_local_exec.verify_metadata_from_first_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[1] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_second_client { - depends_on = [enos_local_exec.set_metadata_on_second_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[1] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_second_client" { - depends_on = [module.upgrade_second_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[1] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the third client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_third_client" { - depends_on = [enos_local_exec.verify_metadata_from_second_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[2] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_third_client { - depends_on = [enos_local_exec.set_metadata_on_third_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[2] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_third_client" { - depends_on = [module.upgrade_third_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[2] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -} - -#//////////////////////////////////////////////////////////////////////////////// -#// Upgrading the forth client -#//////////////////////////////////////////////////////////////////////////////// - -resource "enos_local_exec" "set_metadata_on_forth_client" { - depends_on = [enos_local_exec.verify_metadata_from_third_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[3] - } - ) - - scripts = [abspath("${path.module}/scripts/set_metadata.sh")] -} - -module upgrade_forth_client { - depends_on = [enos_local_exec.set_metadata_on_forth_client] - - source = "../upgrade_instance" - - nomad_addr = var.nomad_addr - tls = local.tls - nomad_token = var.nomad_token - platform = var.platform - instance_address = var.clients[3] - ssh_key_path = var.ssh_key_path - artifactory_release = local.artifactory -} - -resource "enos_local_exec" "verify_metadata_from_forth_client" { - depends_on = [module.upgrade_forth_client] - - environment = merge( - local.nomad_env, - { - CLIENT_IP = var.clients[3] - }) - - scripts = [abspath("${path.module}/scripts/verify_metadata.sh")] -}