Files
nomad/client/allocrunner/checks_hook_test.go
Tim Gross e168548341 provide allocrunner hooks with prebuilt taskenv and fix mutation bugs (#25373)
Some of our allocrunner hooks require a task environment for interpolating values based on the node or allocation. But several of the hooks accept an already-built environment or builder and then keep that in memory. Both of these retain a copy of all the node attributes and allocation metadata, which balloons memory usage until the allocation is GC'd.

While we'd like to look into ways to avoid keeping the allocrunner around entirely (see #25372), for now we can significantly reduce memory usage by creating the task environment on-demand when calling allocrunner methods, rather than persisting it in the allocrunner hooks.

In doing so, we uncover two other bugs:
* The WID manager, the group service hook, and the checks hook have to interpolate services for specific tasks. They mutated a taskenv builder to do so, but each time they mutate the builder, they write to the same environment map. When a group has multiple tasks, it's possible for one task to set an environment variable that would then be interpolated in the service definition for another task if that task did not have that environment variable. Only the service definition interpolation is impacted. This does not leak env vars across running tasks, as each taskrunner has its own builder.

  To fix this, we move the `UpdateTask` method off the builder and onto the taskenv as the `WithTask` method. This makes a shallow copy of the taskenv with a deep clone of the environment map used for interpolation, and then overwrites the environment from the task.

* The checks hook interpolates Nomad native service checks only on `Prerun` and not on `Update`. This could cause unexpected deregistration and registration of checks during in-place updates. To fix this, we make sure we interpolate in the `Update` method.

I also bumped into an incorrectly implemented interface in the CSI hook. I've pulled that and some better guardrails out to https://github.com/hashicorp/nomad/pull/25472.

Fixes: https://github.com/hashicorp/nomad/issues/25269
Fixes: https://hashicorp.atlassian.net/browse/NET-12310
Ref: https://github.com/hashicorp/nomad/issues/25372
2025-03-24 12:05:04 -04:00

319 lines
7.7 KiB
Go

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package allocrunner
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/client/allocrunner/interfaces"
"github.com/hashicorp/nomad/client/serviceregistration/checks/checkstore"
"github.com/hashicorp/nomad/client/state"
"github.com/hashicorp/nomad/client/taskenv"
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/shoenig/test/must"
)
func makeCheckStore(logger hclog.Logger) checkstore.Shim {
db := state.NewMemDB(logger)
checkStore := checkstore.NewStore(logger, db)
return checkStore
}
func allocWithNomadChecks(addr, port string, onGroup bool) *structs.Allocation {
alloc := mock.Alloc()
group := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
task := "task-one"
if onGroup {
task = ""
}
services := []*structs.Service{
{
Name: "service-one",
TaskName: "web",
PortLabel: port,
AddressMode: "auto",
Address: addr,
Provider: "nomad",
Checks: []*structs.ServiceCheck{
{
Name: "check-ok",
Type: "http",
Path: "/",
Protocol: "http",
PortLabel: port,
AddressMode: "auto",
Interval: 250 * time.Millisecond,
Timeout: 1 * time.Second,
Method: "GET",
TaskName: task,
},
{
Name: "check-error",
Type: "http",
Path: "/fail",
Protocol: "http",
PortLabel: port,
AddressMode: "auto",
Interval: 250 * time.Millisecond,
Timeout: 1 * time.Second,
Method: "GET",
TaskName: task,
},
{
Name: "check-hang",
Type: "http",
Path: "/hang",
Protocol: "http",
PortLabel: port,
AddressMode: "auto",
Interval: 250 * time.Millisecond,
Timeout: 500 * time.Millisecond,
Method: "GET",
TaskName: task,
},
},
},
}
switch onGroup {
case true:
group.Tasks[0].Services = nil
group.Services = services
case false:
group.Services = nil
group.Tasks[0].Services = services
}
return alloc
}
func allocWithDifferentNomadChecks(id, addr, port string) *structs.Allocation {
alloc := allocWithNomadChecks(addr, port, true)
alloc.ID = id
group := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
group.Services[0].Checks[2].Path = "/" // the hanging check is now ok
// append 4th check, this one is failing
group.Services[0].Checks = append(group.Services[0].Checks, &structs.ServiceCheck{
Name: "check-error-2",
Type: "http",
Path: "/fail",
Protocol: "http",
PortLabel: port,
AddressMode: "auto",
Interval: 250 * time.Millisecond,
Timeout: 1 * time.Second,
Method: "GET",
})
return alloc
}
var checkHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/fail":
w.WriteHeader(http.StatusInternalServerError)
_, _ = io.WriteString(w, "500 problem")
case "/hang":
time.Sleep(2 * time.Second)
_, _ = io.WriteString(w, "too slow")
default:
w.WriteHeader(http.StatusOK)
_, _ = io.WriteString(w, "200 ok")
}
})
func TestCheckHook_Checks_ResultsSet(t *testing.T) {
ci.Parallel(t)
logger := testlog.HCLogger(t)
// create an http server with various responses
ts := httptest.NewServer(checkHandler)
defer ts.Close()
cases := []struct {
name string
onGroup bool
}{
{name: "group-level", onGroup: true},
{name: "task-level", onGroup: false},
}
for _, tc := range cases {
checkStore := makeCheckStore(logger)
// get the address and port for http server
tokens := strings.Split(ts.URL, ":")
addr, port := strings.TrimPrefix(tokens[1], "//"), tokens[2]
network := mock.NewNetworkStatus(addr)
alloc := allocWithNomadChecks(addr, port, tc.onGroup)
env := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build()
h := newChecksHook(logger, alloc, checkStore, network)
// initialize is called; observers are created but not started yet
must.MapEmpty(t, h.observers)
// calling pre-run starts the observers
err := h.Prerun(env)
must.NoError(t, err)
testutil.WaitForResultUntil(
2*time.Second,
func() (bool, error) {
results := checkStore.List(alloc.ID)
passing, failing, pending := 0, 0, 0
for _, result := range results {
switch result.Status {
case structs.CheckSuccess:
passing++
case structs.CheckFailure:
failing++
case structs.CheckPending:
pending++
}
}
if passing != 1 || failing != 2 || pending != 0 {
fmt.Printf("results %v\n", results)
return false, fmt.Errorf(
"expected 1 passing, 2 failing, 0 pending, got %d passing, %d failing, %d pending",
passing, failing, pending,
)
}
return true, nil
},
func(err error) {
t.Fatal(err)
},
)
h.PreKill() // stop observers, cleanup
// assert shim no longer contains results for the alloc
results := checkStore.List(alloc.ID)
must.MapEmpty(t, results)
}
}
func TestCheckHook_Checks_UpdateSet(t *testing.T) {
ci.Parallel(t)
logger := testlog.HCLogger(t)
// create an http server with various responses
ts := httptest.NewServer(checkHandler)
defer ts.Close()
// get the address and port for http server
tokens := strings.Split(ts.URL, ":")
addr, port := strings.TrimPrefix(tokens[1], "//"), tokens[2]
shim := makeCheckStore(logger)
network := mock.NewNetworkStatus(addr)
alloc := allocWithNomadChecks(addr, port, true)
env := taskenv.NewBuilder(mock.Node(), alloc, nil, alloc.Job.Region).Build()
h := newChecksHook(logger, alloc, shim, network)
// calling pre-run starts the observers
err := h.Prerun(env)
must.NoError(t, err)
// initial set of checks
testutil.WaitForResultUntil(
2*time.Second,
func() (bool, error) {
results := shim.List(alloc.ID)
passing, failing, pending := 0, 0, 0
for _, result := range results {
switch result.Status {
case structs.CheckSuccess:
passing++
case structs.CheckFailure:
failing++
case structs.CheckPending:
pending++
}
}
if passing != 1 || failing != 2 || pending != 0 {
fmt.Printf("results %v\n", results)
return false, fmt.Errorf(
"(initial set) expected 1 passing, 2 failing, 0 pending, got %d passing, %d failing, %d pending",
passing, failing, pending,
)
}
return true, nil
},
func(err error) {
t.Fatal(err)
},
)
updatedAlloc := allocWithDifferentNomadChecks(alloc.ID, addr, port)
updatedEnv := taskenv.NewBuilder(mock.Node(), updatedAlloc, nil, alloc.Job.Region).Build()
request := &interfaces.RunnerUpdateRequest{
Alloc: updatedAlloc,
AllocEnv: updatedEnv,
}
err = h.Update(request)
must.NoError(t, err)
// updated set of checks
testutil.WaitForResultUntil(
2*time.Second,
func() (bool, error) {
results := shim.List(alloc.ID)
passing, failing, pending := 0, 0, 0
for _, result := range results {
switch result.Status {
case structs.CheckSuccess:
passing++
case structs.CheckFailure:
failing++
case structs.CheckPending:
pending++
}
}
if passing != 2 || failing != 2 || pending != 0 {
fmt.Printf("results %v\n", results)
return false, fmt.Errorf(
"(updated set) expected 2 passing, 2 failing, 0 pending, got %d passing, %d failing, %d pending",
passing, failing, pending,
)
}
return true, nil
},
func(err error) {
t.Fatal(err)
},
)
h.PreKill() // stop observers, cleanup
// assert shim no longer contains results for the alloc
results := shim.List(alloc.ID)
must.MapEmpty(t, results)
}