Files
nomad/nomad/node_endpoint_test.go
Tim Gross 6563d0ec3c wait for service registration cleanup until allocs marked lost (#26424)
When a node misses a heartbeat and is marked down, Nomad deletes service
registration instances for that node. But if the node then successfully
heartbeats before its allocations are marked lost, the services are never
restored. The node is unaware that it has missed a heartbeat and there's no
anti-entropy on the node in any case.

We already delete services when the plan applier marks allocations as stopped,
so deleting the services when the node goes down is only an optimization to more
quickly divert service traffic. But because the state after a plan apply is the
"canonical" view of allocation health, this breaks correctness.

Remove the code path that deletes services from nodes when nodes go down. Retain
the state store code that deletes services when allocs are marked terminal by
the plan applier. Also add a path in the state store to delete services when
allocs are marked terminal by the client. This gets back some of the
optimization but avoids the correctness bug because marking the allocation
client-terminal is a one way operation.

Fixes: https://github.com/hashicorp/nomad/issues/16983
2025-08-06 13:40:37 -04:00

5281 lines
152 KiB
Go

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package nomad
import (
"context"
"errors"
"fmt"
"net"
"net/rpc"
"reflect"
"strings"
"testing"
"time"
"github.com/go-jose/go-jose/v3/jwt"
memdb "github.com/hashicorp/go-memdb"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/kr/pretty"
"github.com/shoenig/test/must"
"github.com/shoenig/test/wait"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClientEndpoint_Register(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Check that we have no client connections
require.Empty(s1.connectedNodes())
// Create the register request
node := mock.Node()
req := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check that we have the client connections
nodes := s1.connectedNodes()
require.Len(nodes, 1)
require.Contains(nodes, node.ID)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected node")
}
if out.CreateIndex != resp.Index {
t.Fatalf("index mis-match")
}
if out.ComputedClass == "" {
t.Fatal("ComputedClass not set")
}
// Close the connection and check that we remove the client connections
require.Nil(codec.Close())
testutil.WaitForResult(func() (bool, error) {
nodes := s1.connectedNodes()
return len(nodes) == 0, nil
}, func(err error) {
t.Fatalf("should have no clients")
})
}
func TestNode_Register_Identity(t *testing.T) {
ci.Parallel(t)
// This helper function verifies the identity token generated by the server
// in the Node.Register RPC call.
verifyIdentityFn := func(
t *testing.T,
testServer *Server,
token string,
node *structs.Node,
ttl time.Duration,
) {
t.Helper()
identityClaims, err := testServer.encrypter.VerifyClaim(token)
must.NoError(t, err)
must.Eq(t, ttl, identityClaims.Expiry.Time().Sub(identityClaims.NotBefore.Time()))
must.True(t, identityClaims.IsNode())
must.Eq(t, identityClaims.NodeIdentityClaims, &structs.NodeIdentityClaims{
NodeID: node.ID,
NodeDatacenter: node.Datacenter,
NodeClass: node.NodeClass,
NodePool: node.NodePool,
})
// Identify the active encrypter key ID, which would have been used to
// sign the identity token.
_, keyID, err := testServer.encrypter.GetActiveKey()
must.NoError(t, err)
// Perform a lookup of the node in state. The IdentitySigningKeyID field
// should be populated with the active encrypter key ID.
stateNodeResp, err := testServer.State().NodeByID(nil, node.ID)
must.NoError(t, err)
must.NotNil(t, stateNodeResp)
must.Eq(t, keyID, stateNodeResp.IdentitySigningKeyID)
}
testCases := []struct {
name string
testFn func(t *testing.T, srv *Server, codec rpc.ClientCodec)
}{
{
// Test the initial registration flow, where a node will not include
// an authentication token in the request.
//
// A later registration will not generate a new identity, as the
// included identity is still valid.
name: "identity generation and node reregister",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
node := mock.Node()
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, req.Node, structs.DefaultNodePoolNodeIdentityTTL)
req.WriteRequest.AuthToken = *resp.SignedIdentity
var resp2 structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp2))
must.Nil(t, resp2.SignedIdentity)
},
},
{
// A node can register with a node pool that does not exist, and the
// server will create it on FSM write. In this case, the server
// should generate an identity with the default node pool identity
// TTL.
name: "create on register node pool",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
node := mock.Node()
node.NodePool = "custom-pool"
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, req.Node, structs.DefaultNodePoolNodeIdentityTTL)
},
},
{
// A node can register with a node pool that exists, and the server
// will generate an identity with the node pool's identity TTL.
name: "non-default identity ttl",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
nodePool := mock.NodePool()
nodePool.NodeIdentityTTL = 168 * time.Hour
must.NoError(t, srv.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{nodePool}))
node := mock.Node()
node.NodePool = nodePool.Name
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, req.Node, nodePool.NodeIdentityTTL)
},
},
{
// Ensure a new identity is generated if the identity within the
// request is close to expiration.
name: "identity close to expiration",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
timeNow := time.Now().UTC().Add(-20 * time.Hour)
timeJWTNow := jwt.NewNumericDate(timeNow)
node := mock.Node()
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, 1000, node))
claims := structs.GenerateNodeIdentityClaims(
node,
srv.Region(),
structs.DefaultNodePoolNodeIdentityTTL,
)
claims.IssuedAt = timeJWTNow
claims.NotBefore = timeJWTNow
claims.Expiry = jwt.NewNumericDate(time.Now().UTC().Add(4 * time.Hour))
signedToken, _, err := srv.encrypter.SignClaims(claims)
must.NoError(t, err)
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: signedToken,
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, req.Node, structs.DefaultNodePoolNodeIdentityTTL)
},
},
{
// A node could disconnect from the cluster for long enough for the
// identity to expire. When it reconnects and performs its
// reregistration, the server should generate a new identity.
name: "identity expired",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
node := mock.Node()
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, 1000, node))
claims := structs.GenerateNodeIdentityClaims(
node,
srv.Region(),
structs.DefaultNodePoolNodeIdentityTTL,
)
claims.Expiry = jwt.NewNumericDate(time.Now().UTC().Add(-1 * time.Hour))
signedToken, _, err := srv.encrypter.SignClaims(claims)
must.NoError(t, err)
req := structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: signedToken,
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, req.Node, structs.DefaultNodePoolNodeIdentityTTL)
},
},
{
// Ensure that if the node's SecretID is tampered with, the server
// rejects any attempt to register. This test is to gate against a
// potential regressions in how we handle identities within this
// RPC.
name: "identity expired secret ID tampered",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
node := mock.Node()
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, 1000, node.Copy()))
claims := structs.GenerateNodeIdentityClaims(
node,
srv.Region(),
structs.DefaultNodePoolNodeIdentityTTL,
)
claims.Expiry = jwt.NewNumericDate(time.Now().UTC().Add(-1 * time.Hour))
signedToken, _, err := srv.encrypter.SignClaims(claims)
must.NoError(t, err)
node.SecretID = uuid.Generate()
req := structs.NodeRegisterRequest{
Node: node.Copy(),
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: signedToken,
},
}
var resp structs.NodeUpdateResponse
must.ErrorContains(
t,
msgpackrpc.CallWithCodec(codec, "Node.Register", &req, &resp),
"node secret ID does not match",
)
},
},
}
// ACL enabled server test run.
testACLServer, _, aclServerCleanup := TestACLServer(t, func(c *Config) {})
defer aclServerCleanup()
testACLCodec := rpcClient(t, testACLServer)
testutil.WaitForLeader(t, testACLServer.RPC)
testutil.WaitForKeyring(t, testACLServer.RPC, testACLServer.config.Region)
// ACL disabled server test run.
testServer, serverCleanup := TestServer(t, func(c *Config) {})
defer serverCleanup()
testCodec := rpcClient(t, testServer)
testutil.WaitForLeader(t, testServer.RPC)
testutil.WaitForKeyring(t, testServer.RPC, testServer.config.Region)
for _, tc := range testCases {
t.Run("ACL_enabled_"+tc.name, func(t *testing.T) {
tc.testFn(t, testACLServer, testACLCodec)
})
t.Run("ACL_disabled_"+tc.name, func(t *testing.T) {
tc.testFn(t, testServer, testCodec)
})
}
}
// This test asserts that we only track node connections if they are not from
// forwarded RPCs. This is essential otherwise we will think a Yamux session to
// a Nomad server is actually the session to the node.
func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.BootstrapExpect = 2
})
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
c.BootstrapExpect = 2
})
defer cleanupS2()
TestJoin(t, s1, s2)
testutil.WaitForLeaders(t, s1.RPC, s2.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Determine the non-leader server
var leader, nonLeader *Server
if s1.IsLeader() {
leader = s1
nonLeader = s2
} else {
leader = s2
nonLeader = s1
}
// Send the requests to the non-leader
codec := rpcClient(t, nonLeader)
// Check that we have no client connections
require.Empty(nonLeader.connectedNodes())
require.Empty(leader.connectedNodes())
// Create the register request
node := mock.Node()
req := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check that we have the client connections on the non leader
nodes := nonLeader.connectedNodes()
require.Len(nodes, 1)
require.Contains(nodes, node.ID)
// Check that we have no client connections on the leader
nodes = leader.connectedNodes()
require.Empty(nodes)
// Check for the node in the FSM
state := leader.State()
testutil.WaitForResult(func() (bool, error) {
out, err := state.NodeByID(nil, node.ID)
if err != nil {
return false, err
}
if out == nil {
return false, fmt.Errorf("expected node")
}
if out.CreateIndex != resp.Index {
return false, fmt.Errorf("index mis-match")
}
if out.ComputedClass == "" {
return false, fmt.Errorf("ComputedClass not set")
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
// Close the connection and check that we remove the client connections
require.Nil(codec.Close())
testutil.WaitForResult(func() (bool, error) {
nodes := nonLeader.connectedNodes()
return len(nodes) == 0, nil
}, func(err error) {
t.Fatalf("should have no clients")
})
}
func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
req := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Update the nodes SecretID
node.SecretID = uuid.Generate()
err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "Not registering") {
t.Fatalf("Expecting error regarding mismatching secret id: %v", err)
}
}
func TestClientEndpoint_Register_NodePool(t *testing.T) {
ci.Parallel(t)
s, cleanupS := TestServer(t, nil)
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
testutil.WaitForKeyring(t, s.RPC, s.config.Region)
testCases := []struct {
name string
pool string
expectedErr string
validateFn func(*testing.T, *structs.Node)
}{
{
name: "invalid node pool name",
pool: "not@valid",
expectedErr: "invalid node pool: invalid name",
},
{
name: "built-in pool all not allowed",
pool: structs.NodePoolAll,
expectedErr: `node is not allowed to register in node pool "all"`,
},
{
name: "set default node pool when empty",
pool: "",
validateFn: func(t *testing.T, node *structs.Node) {
state := s.fsm.State()
ws := memdb.NewWatchSet()
// Verify node was registered with default node pool.
got, err := state.NodeByID(ws, node.ID)
must.NoError(t, err)
must.NotNil(t, got)
must.Eq(t, structs.NodePoolDefault, got.NodePool)
},
},
{
name: "set node pool requested",
pool: "my-pool",
validateFn: func(t *testing.T, node *structs.Node) {
state := s.fsm.State()
ws := memdb.NewWatchSet()
// Verify node was registered.
got, err := state.NodeByID(ws, node.ID)
must.NoError(t, err)
must.NotNil(t, got)
// Verify node pool was created.
pool, err := state.NodePoolByName(ws, "my-pool")
must.NoError(t, err)
must.NotNil(t, pool)
// Verify node was added to the pool.
must.Eq(t, "my-pool", got.NodePool)
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
node := mock.Node()
node.NodePool = tc.pool
req := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp structs.GenericResponse
err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
if tc.expectedErr != "" {
must.ErrorContains(t, err, tc.expectedErr)
} else {
must.NoError(t, err)
if tc.validateFn != nil {
tc.validateFn(t, req.Node)
}
}
})
}
}
func TestClientEndpoint_Register_NodePool_Multiregion(t *testing.T) {
ci.Parallel(t)
// Helper function to setup client heartbeat.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
heartbeat := func(ctx context.Context, codec rpc.ClientCodec, req *structs.NodeUpdateStatusRequest) {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
default:
}
var resp structs.NodeUpdateResponse
msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp)
}
}
// Create servers in two regions.
s1, rootToken1, cleanupS1 := TestACLServer(t, func(c *Config) {
c.Region = "region-1"
c.AuthoritativeRegion = "region-1"
})
defer cleanupS1()
codec1 := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
s2, _, cleanupS2 := TestACLServer(t, func(c *Config) {
c.Region = "region-2"
c.AuthoritativeRegion = "region-1"
// Speed-up replication for testing.
c.ReplicationBackoff = 500 * time.Millisecond
c.ReplicationToken = rootToken1.SecretID
})
defer cleanupS2()
codec2 := rpcClient(t, s2)
testutil.WaitForLeader(t, s2.RPC)
testutil.WaitForKeyring(t, s2.RPC, s2.config.Region)
// Verify that registering a node with a new node pool in the authoritative
// region creates the node pool.
node1 := mock.Node()
node1.Status = ""
node1.NodePool = "new-pool-region-1"
// Register node in region-1.
req := &structs.NodeRegisterRequest{
Node: node1,
WriteRequest: structs.WriteRequest{Region: "region-1"},
}
var resp structs.GenericResponse
err := msgpackrpc.CallWithCodec(codec1, "Node.Register", req, &resp)
must.NoError(t, err)
// Setup heartbeat for node in region-1.
go heartbeat(ctx, rpcClient(t, s1), &structs.NodeUpdateStatusRequest{
NodeID: node1.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{Region: "region-1", AuthToken: node1.SecretID},
})
// Verify client becomes ready.
must.Wait(t, wait.InitialSuccess(
wait.ErrorFunc(func() error {
n, err := s1.State().NodeByID(nil, node1.ID)
if err != nil {
return err
}
if n.Status != structs.NodeStatusReady {
return fmt.Errorf("expected node to be %s, got %s", structs.NodeStatusReady, n.Status)
}
return nil
}),
wait.Timeout(10*time.Second),
wait.Gap(time.Second),
))
// Verify that registering a node with a new node pool in the
// non-authoritative region does not create the node pool and the client is
// kept in the initializing status.
node2 := mock.Node()
node2.Status = ""
node2.NodePool = "new-pool-region-2"
// Register node in region-2.
req = &structs.NodeRegisterRequest{
Node: node2,
WriteRequest: structs.WriteRequest{Region: "region-2"},
}
err = msgpackrpc.CallWithCodec(codec2, "Node.Register", req, &resp)
must.NoError(t, err)
// Setup heartbeat for node in region-2.
go heartbeat(ctx, rpcClient(t, s2), &structs.NodeUpdateStatusRequest{
NodeID: node2.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{Region: "region-2", AuthToken: node2.SecretID},
})
// Verify client is kept at the initializing status and has a node pool
// missing event.
must.Wait(t, wait.InitialSuccess(
wait.ErrorFunc(func() error {
n, err := s2.State().NodeByID(nil, node2.ID)
if err != nil {
return err
}
if !n.HasEvent(NodeWaitingForNodePool) {
return fmt.Errorf("node pool missing event not found:\n%v", n.Events)
}
return nil
}),
wait.Timeout(10*time.Second),
wait.Gap(time.Second),
))
must.Wait(t, wait.ContinualSuccess(
wait.ErrorFunc(func() error {
n, err := s2.State().NodeByID(nil, node2.ID)
if err != nil {
return err
}
if n.Status != structs.NodeStatusInit {
return fmt.Errorf("expected node to be %s, got %s", structs.NodeStatusInit, n.Status)
}
return nil
}),
wait.Timeout(time.Second),
wait.Gap(time.Second),
))
// Federate regions.
TestJoin(t, s1, s2)
// Verify node pool from authoritative region is replicated.
must.Wait(t, wait.InitialSuccess(
wait.ErrorFunc(func() error {
poolName := node1.NodePool
pool, err := s2.State().NodePoolByName(nil, poolName)
if err != nil {
return err
}
if pool == nil {
return fmt.Errorf("node pool %s not found in region-2", poolName)
}
return nil
}),
wait.Timeout(10*time.Second),
wait.Gap(time.Second),
))
// Create node pool for region-2.
nodePoolReq := &structs.NodePoolUpsertRequest{
NodePools: []*structs.NodePool{{Name: node2.NodePool}},
WriteRequest: structs.WriteRequest{
Region: "region-2",
AuthToken: rootToken1.SecretID,
},
}
var nodePoolResp *structs.GenericResponse
err = msgpackrpc.CallWithCodec(codec2, "NodePool.UpsertNodePools", nodePoolReq, &nodePoolResp)
must.NoError(t, err)
// Verify node pool exists in both regions and the node in region-2 is now
// ready.
must.Wait(t, wait.InitialSuccess(
wait.ErrorFunc(func() error {
for region, s := range map[string]*state.StateStore{
"region-1": s1.State(),
"region-2": s2.State(),
} {
poolName := node2.NodePool
pool, err := s.NodePoolByName(nil, poolName)
if err != nil {
return err
}
if pool == nil {
return fmt.Errorf("expected node pool %s to exist in region %s", poolName, region)
}
}
n, err := s2.State().NodeByID(nil, node2.ID)
if err != nil {
return err
}
if n.Status != structs.NodeStatusReady {
return fmt.Errorf("expected node to be %s, got %s", structs.NodeStatusReady, n.Status)
}
return nil
}),
wait.Timeout(10*time.Second),
wait.Gap(time.Second),
))
}
// Test the deprecated single node deregistration path
func TestClientEndpoint_DeregisterOne(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.NodeDeregisterRequest{
NodeID: node.ID,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp2 structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected node")
}
}
func TestClientEndpoint_Deregister_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the node
node := mock.Node()
node1 := mock.Node()
state := s1.fsm.State()
if err := state.UpsertNode(structs.MsgTypeTestSetup, 1, node); err != nil {
t.Fatalf("err: %v", err)
}
if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node1); err != nil {
t.Fatalf("err: %v", err)
}
// Create the policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
// Deregister without any token and expect it to fail
dereg := &structs.NodeBatchDeregisterRequest{
NodeIDs: []string{node.ID},
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err == nil {
t.Fatalf("node de-register succeeded")
}
// Deregister with a valid token
dereg.AuthToken = validToken.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Check for the node in the FSM
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected node")
}
// Deregister with an invalid token.
dereg1 := &structs.NodeBatchDeregisterRequest{
NodeIDs: []string{node1.ID},
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node1.SecretID},
}
dereg1.AuthToken = invalidToken.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err == nil {
t.Fatalf("rpc should not have succeeded")
}
// Try with a root token
dereg1.AuthToken = root.SecretID
if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err != nil {
t.Fatalf("err: %v", err)
}
}
func TestClientEndpoint_UpdateStatus(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Check that we have no client connections
require.Empty(s1.connectedNodes())
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Check for heartbeat interval
ttl := resp.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Update the status
dereg := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusInit,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp2 structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for heartbeat interval
ttl = resp2.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Check that we have the client connections
nodes := s1.connectedNodes()
require.Len(nodes, 1)
require.Contains(nodes, node.ID)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected node")
}
if out.ModifyIndex != resp2.Index {
t.Fatalf("index mis-match")
}
// Close the connection and check that we remove the client connections
require.Nil(codec.Close())
testutil.WaitForResult(func() (bool, error) {
nodes := s1.connectedNodes()
return len(nodes) == 0, nil
}, func(err error) {
t.Fatalf("should have no clients")
})
}
func TestClientEndpoint_UpdateStatus_Reconnect(t *testing.T) {
ci.Parallel(t)
jobVersions := []struct {
name string
jobSpec func(time.Duration) *structs.Job
}{
{
name: "job-with-disconnect-block",
jobSpec: func(lostAfter time.Duration) *structs.Job {
job := mock.Job()
job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{
LostAfter: lostAfter,
}
return job
},
},
}
for _, version := range jobVersions {
t.Run(version.name, func(t *testing.T) {
// Setup server with tighter heartbeat so we don't have to wait so long
// for nodes to go down.
heartbeatTTL := time.Duration(500*testutil.TestMultiplier()) * time.Millisecond
s, rootToken, cleanupS := TestACLServer(t, func(c *Config) {
c.MinHeartbeatTTL = heartbeatTTL
c.HeartbeatGrace = 2 * heartbeatTTL
})
codec := rpcClient(t, s)
defer cleanupS()
testutil.WaitForLeader(t, s.RPC)
testutil.WaitForKeyring(t, s.RPC, s.config.Region)
// Register node.
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
var nodeUpdateResp structs.NodeUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &nodeUpdateResp)
must.NoError(t, err)
// Start heartbeat.
heartbeat := func(ctx context.Context) {
ticker := time.NewTicker(heartbeatTTL / 2)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
if t.Failed() {
return
}
req := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: node.SecretID},
}
var resp structs.NodeUpdateResponse
// Ignore errors since an unexpected failed heartbeat will cause
// the test conditions to fail.
msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp)
}
}
}
heartbeatCtx, cancelHeartbeat := context.WithCancel(context.Background())
defer cancelHeartbeat()
go heartbeat(heartbeatCtx)
// Wait for node to be ready.
testutil.WaitForClientStatusWithToken(t, s.RPC, node.ID, "global",
structs.NodeStatusReady, rootToken.SecretID)
// Register job with Disconnect.LostAfter
job := version.jobSpec(time.Hour)
job.Constraints = []*structs.Constraint{}
job.TaskGroups[0].Count = 1
job.TaskGroups[0].Constraints = []*structs.Constraint{}
job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
"run_for": "10m",
}
jobReq := &structs.JobRegisterRequest{
Job: job.Copy(),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
AuthToken: rootToken.SecretID,
},
}
var jobResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp)
must.NoError(t, err)
// Wait for alloc to be pending in the server.
testutil.WaitForJobAllocStatusWithToken(t, s.RPC, job, map[string]int{
structs.AllocClientStatusPending: 1,
}, rootToken.SecretID)
// Get allocs that node should run.
allocsReq := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
AuthToken: rootToken.SecretID,
},
}
var allocsResp structs.NodeAllocsResponse
err = msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", allocsReq, &allocsResp)
must.NoError(t, err)
must.Len(t, 1, allocsResp.Allocs)
// Tell server the alloc is running.
// Save the alloc so we can reuse the request later.
alloc := allocsResp.Allocs[0].Copy()
alloc.ClientStatus = structs.AllocClientStatusRunning
allocUpdateReq := &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{alloc},
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: node.SecretID,
},
}
var resp structs.GenericResponse
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &resp)
must.NoError(t, err)
// Wait for alloc to be running in the server.
testutil.WaitForJobAllocStatusWithToken(t, s.RPC, job, map[string]int{
structs.AllocClientStatusRunning: 1,
}, rootToken.SecretID)
// Stop heartbeat and wait for the client to be disconnected and the alloc
// to be unknown.
cancelHeartbeat()
testutil.WaitForClientStatusWithToken(t, s.RPC, node.ID, "global",
structs.NodeStatusDisconnected, rootToken.SecretID)
testutil.WaitForJobAllocStatusWithToken(t, s.RPC, job, map[string]int{
structs.AllocClientStatusUnknown: 1,
}, rootToken.SecretID)
// Restart heartbeat to reconnect node.
heartbeatCtx, cancelHeartbeat = context.WithCancel(context.Background())
defer cancelHeartbeat()
go heartbeat(heartbeatCtx)
// Wait a few heartbeats and check that the node is still initializing.
//
// The heartbeat should not update the node to ready until it updates its
// allocs status with the server so the scheduler have the necessary
// information to avoid unnecessary placements.
time.Sleep(3 * heartbeatTTL)
testutil.WaitForClientStatusWithToken(t, s.RPC, node.ID, "global",
structs.NodeStatusInit, rootToken.SecretID)
// Get allocs that node should run.
// The node should only have one alloc assigned until it updates its allocs
// status with the server.
allocsReq = &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
AuthToken: rootToken.SecretID,
},
}
err = msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", allocsReq, &allocsResp)
must.NoError(t, err)
must.Len(t, 1, allocsResp.Allocs)
// Tell server the alloc is still running.
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &resp)
must.NoError(t, err)
// The client must end in the same state as before it disconnected:
// - client status is ready.
// - only 1 alloc and the alloc is running.
// - all evals are terminal, so cluster is in a stable state.
testutil.WaitForClientStatusWithToken(t, s.RPC, node.ID, "global",
structs.NodeStatusReady, rootToken.SecretID)
testutil.WaitForJobAllocStatusWithToken(t, s.RPC, job, map[string]int{
structs.AllocClientStatusRunning: 1,
}, rootToken.SecretID)
testutil.WaitForResult(func() (bool, error) {
state := s.fsm.State()
ws := memdb.NewWatchSet()
evals, err := state.EvalsByJob(ws, job.Namespace, job.ID)
if err != nil {
return false, fmt.Errorf("failed to read evals: %v", err)
}
for _, eval := range evals {
// TODO: remove this check once the disconnect process stops
// leaking a max-disconnect-timeout eval.
// https://github.com/hashicorp/nomad/issues/12809
if eval.TriggeredBy == structs.EvalTriggerMaxDisconnectTimeout {
continue
}
if !eval.TerminalStatus() {
return false, fmt.Errorf("found %s eval", eval.Status)
}
}
return true, nil
}, func(err error) {
must.NoError(t, err)
})
})
}
}
func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Check that we have no client connections
require.Empty(s1.connectedNodes())
// Create the register request but make the node down
node := mock.Node()
node.Status = structs.NodeStatusDown
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
// Update the status
dereg := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusInit,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp2 structs.NodeUpdateResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2))
require.NotZero(resp2.Index)
// Check for heartbeat interval
ttl := resp2.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
require.NoError(err)
require.NotNil(out)
require.EqualValues(resp2.Index, out.ModifyIndex)
require.Len(out.Events, 2)
require.Equal(NodeHeartbeatEventReregistered, out.Events[1].Message)
}
func TestClientEndpoint_Register_GetEvals(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Register a system job.
job := mock.SystemJob()
state := s1.fsm.State()
if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil {
t.Fatalf("err: %v", err)
}
// Create the register request going directly to ready
node := mock.Node()
node.Status = structs.NodeStatusReady
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Check for heartbeat interval
ttl := resp.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Check for an eval caused by the system job.
if len(resp.EvalIDs) != 1 {
t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
}
evalID := resp.EvalIDs[0]
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, evalID)
if err != nil {
t.Fatalf("could not get eval %v", evalID)
}
if eval.Type != "system" {
t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
}
// Check for the node in the FSM
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected node")
}
if out.ModifyIndex != resp.Index {
t.Fatalf("index mis-match")
}
// Transition it to down and then ready
req := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusDown,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if len(resp.EvalIDs) != 1 {
t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
}
req = &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if len(resp.EvalIDs) != 1 {
t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
}
}
func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Register a system job.
job := mock.SystemJob()
state := s1.fsm.State()
if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, nil, job); err != nil {
t.Fatalf("err: %v", err)
}
// Create the register request
node := mock.Node()
node.Status = structs.NodeStatusInit
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Check for heartbeat interval
ttl := resp.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Update the status
update := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp2 structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", update, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for an eval caused by the system job.
if len(resp2.EvalIDs) != 1 {
t.Fatalf("expected one eval; got %#v", resp2.EvalIDs)
}
evalID := resp2.EvalIDs[0]
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, evalID)
if err != nil {
t.Fatalf("could not get eval %v", evalID)
}
if eval.Type != "system" {
t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
}
// Check for heartbeat interval
ttl = resp2.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Check for the node in the FSM
out, err := state.NodeByID(ws, node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected node")
}
if out.ModifyIndex != resp2.Index {
t.Fatalf("index mis-match")
}
}
func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.BootstrapExpect = 3
})
defer cleanupS1()
s2, cleanupS2 := TestServer(t, func(c *Config) {
c.BootstrapExpect = 3
})
defer cleanupS2()
s3, cleanupS3 := TestServer(t, func(c *Config) {
c.BootstrapExpect = 3
})
defer cleanupS3()
servers := []*Server{s1, s2, s3}
TestJoin(t, s1, s2, s3)
for _, s := range servers {
testutil.WaitForResult(func() (bool, error) {
peers, _ := s.numPeers()
return peers == 3, nil
}, func(err error) {
t.Fatalf("should have 3 peers")
})
}
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Check for heartbeat interval
ttl := resp.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Check for heartbeat servers
serverAddrs := resp.Servers
if len(serverAddrs) == 0 {
t.Fatalf("bad: %#v", serverAddrs)
}
// Update the status, static state
dereg := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: node.Status,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp2 structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for heartbeat interval
ttl = resp2.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
}
func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
advAddr := "127.0.1.1:1234"
adv, err := net.ResolveTCPAddr("tcp", advAddr)
require.Nil(err)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.ClientRPCAdvertise = adv
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Check for heartbeat interval
ttl := resp.HeartbeatTTL
if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
t.Fatalf("bad: %#v", ttl)
}
// Check for heartbeat servers
require.Len(resp.Servers, 1)
require.Equal(resp.Servers[0].RPCAdvertiseAddr, advAddr)
}
func TestNode_UpdateStatus_Identity(t *testing.T) {
ci.Parallel(t)
// This helper function verifies the identity token generated by the server
// in the Node.UpdateStatus RPC call.
verifyIdentityFn := func(
t *testing.T,
testServer *Server,
token string,
node *structs.Node,
ttl time.Duration,
) {
t.Helper()
identityClaims, err := testServer.encrypter.VerifyClaim(token)
must.NoError(t, err)
must.Eq(t, ttl, identityClaims.Expiry.Time().Sub(identityClaims.NotBefore.Time()))
must.True(t, identityClaims.IsNode())
must.Eq(t, identityClaims.NodeIdentityClaims, &structs.NodeIdentityClaims{
NodeID: node.ID,
NodeDatacenter: node.Datacenter,
NodeClass: node.NodeClass,
NodePool: node.NodePool,
})
// Identify the active encrypter key ID, which would have been used to
// sign the identity token.
_, keyID, err := testServer.encrypter.GetActiveKey()
must.NoError(t, err)
// Perform a lookup of the node in state. The IdentitySigningKeyID field
// should be populated with the active encrypter key ID.
stateNodeResp, err := testServer.State().NodeByID(nil, node.ID)
must.NoError(t, err)
must.NotNil(t, stateNodeResp)
must.Eq(t, keyID, stateNodeResp.IdentitySigningKeyID)
}
testCases := []struct {
name string
testFn func(t *testing.T, srv *Server, codec rpc.ClientCodec)
}{
{
// Ensure that the Node.UpdateStatus RPC generates a new identity
// for a client authenticating using its secret ID.
name: "node secret ID authenticated default pool",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
node := mock.Node()
must.Eq(t, "", node.IdentitySigningKeyID)
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, srv.raft.LastIndex(), node))
req := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{
Region: srv.Region(),
AuthToken: node.SecretID,
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, node, structs.DefaultNodePoolNodeIdentityTTL)
},
},
{
// Ensure that the Node.UpdateStatus RPC generates a new identity
// for a client authenticating using its secret ID which belongs to
// a non-default node pool.
name: "node secret ID authenticated non-default pool",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
nodePool := mock.NodePool()
nodePool.NodeIdentityTTL = 168 * time.Hour
must.NoError(t, srv.State().UpsertNodePools(
structs.MsgTypeTestSetup,
srv.raft.LastIndex(),
[]*structs.NodePool{nodePool},
))
node := mock.Node()
node.NodePool = nodePool.Name
must.Eq(t, "", node.IdentitySigningKeyID)
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, srv.raft.LastIndex(), node))
req := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{
Region: srv.Region(),
AuthToken: node.SecretID,
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, node, nodePool.NodeIdentityTTL)
},
},
{
// Nomad servers often call the Node.UpdateStatus RPC to notify that
// a node has missed its heartbeat. In this case, we should write
// the update to state, but not generate an identity token.
name: "leader acl token authenticated",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
node := mock.Node()
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, srv.raft.LastIndex(), node))
req := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusDown,
WriteRequest: structs.WriteRequest{
Region: srv.Region(),
AuthToken: srv.getLeaderAcl(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp))
must.Nil(t, resp.SignedIdentity)
stateNode, err := srv.State().NodeByID(nil, node.ID)
must.NoError(t, err)
must.NotNil(t, stateNode)
must.Eq(t, structs.NodeStatusDown, stateNode.Status)
must.Greater(t, stateNode.CreateIndex, stateNode.ModifyIndex)
},
},
{
// Ensure a new identity is generated if the identity within the
// request is close to expiration.
name: "identity close to expiration",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
timeNow := time.Now().UTC().Add(-20 * time.Hour)
timeJWTNow := jwt.NewNumericDate(timeNow)
node := mock.Node()
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, srv.raft.LastIndex(), node))
claims := structs.GenerateNodeIdentityClaims(
node,
srv.Region(),
structs.DefaultNodePoolNodeIdentityTTL,
)
claims.IssuedAt = timeJWTNow
claims.NotBefore = timeJWTNow
claims.Expiry = jwt.NewNumericDate(time.Now().UTC().Add(4 * time.Hour))
signedToken, _, err := srv.encrypter.SignClaims(claims)
must.NoError(t, err)
req := structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: signedToken,
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, node, structs.DefaultNodePoolNodeIdentityTTL)
},
},
{
// Ensure a new identity is generated if the identity within the
// request is close to expiration and the new identity has a TTL set
// by its custom node pool configuration.
name: "identity close to expiration custom pool",
testFn: func(t *testing.T, srv *Server, codec rpc.ClientCodec) {
nodePool := mock.NodePool()
nodePool.NodeIdentityTTL = 168 * time.Hour
must.NoError(t, srv.State().UpsertNodePools(
structs.MsgTypeTestSetup,
srv.raft.LastIndex(),
[]*structs.NodePool{nodePool},
))
timeNow := time.Now().UTC().Add(-135 * time.Hour)
timeJWTNow := jwt.NewNumericDate(timeNow)
node := mock.Node()
node.NodePool = nodePool.Name
must.NoError(t, srv.State().UpsertNode(structs.MsgTypeTestSetup, srv.raft.LastIndex(), node))
claims := structs.GenerateNodeIdentityClaims(
node,
srv.Region(),
structs.DefaultNodePoolNodeIdentityTTL,
)
claims.IssuedAt = timeJWTNow
claims.NotBefore = timeJWTNow
claims.Expiry = jwt.NewNumericDate(time.Now().UTC().Add(4 * time.Hour))
signedToken, _, err := srv.encrypter.SignClaims(claims)
must.NoError(t, err)
req := structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: signedToken,
},
}
var resp structs.NodeUpdateResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", &req, &resp))
must.NotNil(t, resp.SignedIdentity)
verifyIdentityFn(t, srv, *resp.SignedIdentity, node, nodePool.NodeIdentityTTL)
},
},
}
// ACL enabled server test run.
testACLServer, _, aclServerCleanup := TestACLServer(t, func(c *Config) {})
defer aclServerCleanup()
testACLCodec := rpcClient(t, testACLServer)
testutil.WaitForLeader(t, testACLServer.RPC)
testutil.WaitForKeyring(t, testACLServer.RPC, testACLServer.config.Region)
// ACL disabled server test run.
testServer, serverCleanup := TestServer(t, func(c *Config) {})
defer serverCleanup()
testCodec := rpcClient(t, testServer)
testutil.WaitForLeader(t, testServer.RPC)
testutil.WaitForKeyring(t, testServer.RPC, testServer.config.Region)
for _, tc := range testCases {
t.Run("ACL_enabled_"+tc.name, func(t *testing.T) {
tc.testFn(t, testACLServer, testACLCodec)
})
t.Run("ACL_disabled_"+tc.name, func(t *testing.T) {
tc.testFn(t, testServer, testCodec)
})
}
}
// TestClientEndpoint_UpdateDrain asserts the ability to initiate drain
// against a node and cancel that drain. It also asserts:
// * an evaluation is created when the node becomes eligible
// * drain metadata is properly persisted in Node.LastDrain
func TestClientEndpoint_UpdateDrain(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Disable drainer to prevent drain from completing during test
s1.nodeDrainer.SetEnabled(false, nil)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
beforeUpdate := time.Now()
strategy := &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: 10 * time.Second,
},
}
// Update the status
dereg := &structs.NodeUpdateDrainRequest{
NodeID: node.ID,
DrainStrategy: strategy,
Meta: map[string]string{"message": "this node is not needed"},
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp2 structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
require.NotZero(resp2.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.NodeByID(ws, node.ID)
require.Nil(err)
require.NotNil(out.DrainStrategy)
require.Equal(strategy.Deadline, out.DrainStrategy.Deadline)
require.Len(out.Events, 2)
require.Equal(NodeDrainEventDrainSet, out.Events[1].Message)
require.NotNil(out.LastDrain)
require.Equal(structs.DrainMetadata{
StartedAt: out.LastDrain.UpdatedAt,
UpdatedAt: out.LastDrain.StartedAt,
Status: structs.DrainStatusDraining,
Meta: map[string]string{"message": "this node is not needed"},
AccessorID: "token:acls-disabled",
}, *out.LastDrain)
// before+deadline should be before the forced deadline
require.True(beforeUpdate.Add(strategy.Deadline).Before(out.DrainStrategy.ForceDeadline))
// now+deadline should be after the forced deadline
require.True(time.Now().Add(strategy.Deadline).After(out.DrainStrategy.ForceDeadline))
drainStartedAt := out.DrainStrategy.StartedAt
// StartedAt should be close to the time the drain started
require.WithinDuration(beforeUpdate, drainStartedAt, 1*time.Second)
// StartedAt shouldn't change if a new request comes while still draining
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
ws = memdb.NewWatchSet()
out, err = state.NodeByID(ws, node.ID)
require.NoError(err)
require.True(out.DrainStrategy.StartedAt.Equal(drainStartedAt))
// Register a system job
job := mock.SystemJob()
require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job))
// Update the eligibility and expect evals
dereg.DrainStrategy = nil
dereg.MarkEligible = true
dereg.Meta = map[string]string{"cancelled": "yes"}
var resp3 structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
require.NotZero(resp3.Index)
require.NotZero(resp3.EvalCreateIndex)
require.Len(resp3.EvalIDs, 1)
// Check for updated node in the FSM
ws = memdb.NewWatchSet()
out, err = state.NodeByID(ws, node.ID)
require.NoError(err)
require.Len(out.Events, 4)
require.Equal(NodeDrainEventDrainDisabled, out.Events[3].Message)
require.NotNil(out.LastDrain)
require.NotNil(out.LastDrain)
require.False(out.LastDrain.UpdatedAt.Before(out.LastDrain.StartedAt))
require.Equal(structs.DrainMetadata{
StartedAt: out.LastDrain.StartedAt,
UpdatedAt: out.LastDrain.UpdatedAt,
Status: structs.DrainStatusCanceled,
Meta: map[string]string{"cancelled": "yes"},
AccessorID: "token:acls-disabled",
}, *out.LastDrain)
// Check that calling UpdateDrain with the same DrainStrategy does not emit
// a node event.
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
ws = memdb.NewWatchSet()
out, err = state.NodeByID(ws, node.ID)
require.NoError(err)
require.Len(out.Events, 4)
}
// TestClientEndpoint_UpdatedDrainAndCompleted asserts that drain metadata
// is properly persisted in Node.LastDrain as the node drain is updated and
// completes.
func TestClientEndpoint_UpdatedDrainAndCompleted(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
state := s1.fsm.State()
// Disable drainer for now
s1.nodeDrainer.SetEnabled(false, nil)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
strategy := &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: 10 * time.Second,
},
}
// Update the status
dereg := &structs.NodeUpdateDrainRequest{
NodeID: node.ID,
DrainStrategy: strategy,
Meta: map[string]string{
"message": "first drain",
},
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp2 structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
require.NotZero(resp2.Index)
// Check for the node in the FSM
out, err := state.NodeByID(nil, node.ID)
require.Nil(err)
require.NotNil(out.DrainStrategy)
require.NotNil(out.LastDrain)
firstDrainUpdate := out.LastDrain.UpdatedAt
require.Equal(structs.DrainMetadata{
StartedAt: firstDrainUpdate,
UpdatedAt: firstDrainUpdate,
Status: structs.DrainStatusDraining,
Meta: map[string]string{"message": "first drain"},
AccessorID: "token:acls-disabled",
}, *out.LastDrain)
time.Sleep(1 * time.Second)
// Update the drain
dereg.DrainStrategy.DrainSpec.Deadline *= 2
dereg.Meta["message"] = "second drain"
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
require.NotZero(resp2.Index)
out, err = state.NodeByID(nil, node.ID)
require.Nil(err)
require.NotNil(out.DrainStrategy)
require.NotNil(out.LastDrain)
secondDrainUpdate := out.LastDrain.UpdatedAt
require.True(secondDrainUpdate.After(firstDrainUpdate))
require.Equal(structs.DrainMetadata{
StartedAt: firstDrainUpdate,
UpdatedAt: secondDrainUpdate,
Status: structs.DrainStatusDraining,
Meta: map[string]string{"message": "second drain"},
AccessorID: "token:acls-disabled",
}, *out.LastDrain)
time.Sleep(1 * time.Second)
// Enable the drainer, wait for completion
s1.nodeDrainer.SetEnabled(true, state)
testutil.WaitForResult(func() (bool, error) {
out, err = state.NodeByID(nil, node.ID)
if err != nil {
return false, err
}
if out == nil {
return false, fmt.Errorf("could not find node")
}
return out.DrainStrategy == nil, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
require.True(out.LastDrain.UpdatedAt.After(secondDrainUpdate))
require.Equal(structs.DrainMetadata{
StartedAt: firstDrainUpdate,
UpdatedAt: out.LastDrain.UpdatedAt,
Status: structs.DrainStatusComplete,
Meta: map[string]string{"message": "second drain"},
AccessorID: "token:acls-disabled",
}, *out.LastDrain)
}
// TestClientEndpoint_UpdatedDrainNoop asserts that drain metadata is properly
// persisted in Node.LastDrain when calls to Node.UpdateDrain() don't affect
// the drain status.
func TestClientEndpoint_UpdatedDrainNoop(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
state := s1.fsm.State()
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
// Update the status
dereg := &structs.NodeUpdateDrainRequest{
NodeID: node.ID,
DrainStrategy: &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: 10 * time.Second,
},
},
Meta: map[string]string{
"message": "drain",
},
WriteRequest: structs.WriteRequest{Region: "global"},
}
var drainResp structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &drainResp))
require.NotZero(drainResp.Index)
var out *structs.Node
testutil.WaitForResult(func() (bool, error) {
var err error
out, err = state.NodeByID(nil, node.ID)
if err != nil {
return false, err
}
if out == nil {
return false, fmt.Errorf("could not find node")
}
return out.DrainStrategy == nil && out.SchedulingEligibility == structs.NodeSchedulingIneligible, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
require.Equal(structs.DrainStatusComplete, out.LastDrain.Status)
require.Equal(map[string]string{"message": "drain"}, out.LastDrain.Meta)
prevDrain := out.LastDrain
// call again with Drain Strategy nil; should be a no-op because drain is already complete
dereg.DrainStrategy = nil
dereg.Meta = map[string]string{
"new_message": "is new",
}
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &drainResp))
require.NotZero(drainResp.Index)
out, err := state.NodeByID(nil, node.ID)
require.Nil(err)
require.Nil(out.DrainStrategy)
require.NotNil(out.LastDrain)
require.Equal(prevDrain, out.LastDrain)
}
// TestClientEndpoint_UpdateDrain_ACL asserts that Node.UpdateDrain() enforces
// node.write ACLs, and that token accessor ID is properly persisted in
// Node.LastDrain.AccessorID
func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
require := require.New(t)
// Create the node
node := mock.Node()
state := s1.fsm.State()
require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
// Create the policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
// Update the status without a token and expect failure
dereg := &structs.NodeUpdateDrainRequest{
NodeID: node.ID,
DrainStrategy: &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: 10 * time.Second,
},
},
WriteRequest: structs.WriteRequest{Region: "global"},
}
{
var resp structs.NodeDrainUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
require.NotNil(err, "RPC")
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token
dereg.AuthToken = validToken.SecretID
{
var resp structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
out, err := state.NodeByID(nil, node.ID)
require.NoError(err)
require.Equal("token:"+validToken.AccessorID, out.LastDrain.AccessorID)
}
// Try with a invalid token
dereg.AuthToken = invalidToken.SecretID
{
var resp structs.NodeDrainUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
require.NotNil(err, "RPC")
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
dereg.DrainStrategy.DrainSpec.Deadline = 20 * time.Second
dereg.AuthToken = root.SecretID
{
var resp structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
out, err := state.NodeByID(nil, node.ID)
require.NoError(err)
require.Equal("token:"+root.AccessorID, out.LastDrain.AccessorID)
}
}
// This test ensures that Nomad marks client state of allocations which are in
// pending/running state to lost when a node is marked as down.
func TestClientEndpoint_Drain_Down(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
require := require.New(t)
// Register a node
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
// Register a service job
var jobResp structs.JobRegisterResponse
job := mock.Job()
job.TaskGroups[0].Count = 1
jobReq := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp))
// Register a system job
var jobResp1 structs.JobRegisterResponse
job1 := mock.SystemJob()
job1.TaskGroups[0].Count = 1
jobReq1 := &structs.JobRegisterRequest{
Job: job1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job1.Namespace,
},
}
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq1, &jobResp1))
// Wait for the scheduler to create an allocation
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
allocs, err := s1.fsm.state.AllocsByJob(ws, job.Namespace, job.ID, true)
if err != nil {
return false, err
}
allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.Namespace, job1.ID, true)
if err != nil {
return false, err
}
return len(allocs) > 0 && len(allocs1) > 0, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
// Drain the node
dereg := &structs.NodeUpdateDrainRequest{
NodeID: node.ID,
DrainStrategy: &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: -1 * time.Second,
},
},
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp2 structs.NodeDrainUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
// Mark the node as down
req := &structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusDown,
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp))
// Ensure that the allocation has transitioned to lost
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
summary, err := s1.fsm.state.JobSummaryByID(ws, job.Namespace, job.ID)
if err != nil {
return false, err
}
expectedSummary := &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": {
Queued: 1,
Lost: 1,
},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: jobResp.JobModifyIndex,
ModifyIndex: summary.ModifyIndex,
}
if !reflect.DeepEqual(summary, expectedSummary) {
return false, fmt.Errorf("Service: expected: %#v, actual: %#v", expectedSummary, summary)
}
summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.Namespace, job1.ID)
if err != nil {
return false, err
}
expectedSummary1 := &structs.JobSummary{
JobID: job1.ID,
Namespace: job1.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": {
Lost: 1,
},
},
Children: new(structs.JobChildrenSummary),
CreateIndex: jobResp1.JobModifyIndex,
ModifyIndex: summary1.ModifyIndex,
}
if !reflect.DeepEqual(summary1, expectedSummary1) {
return false, fmt.Errorf("System: expected: %#v, actual: %#v", expectedSummary1, summary1)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %v", err)
})
}
func TestClientEndpoint_UpdateEligibility(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
// Update the eligibility
elig := &structs.NodeUpdateEligibilityRequest{
NodeID: node.ID,
Eligibility: structs.NodeSchedulingIneligible,
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp2 structs.NodeEligibilityUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp2))
require.NotZero(resp2.Index)
require.Zero(resp2.EvalCreateIndex)
require.Empty(resp2.EvalIDs)
// Check for the node in the FSM
state := s1.fsm.State()
out, err := state.NodeByID(nil, node.ID)
require.Nil(err)
require.Equal(out.SchedulingEligibility, structs.NodeSchedulingIneligible)
require.Len(out.Events, 2)
require.Equal(NodeEligibilityEventIneligible, out.Events[1].Message)
// Register a system job
job := mock.SystemJob()
require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job))
// Update the eligibility and expect evals
elig.Eligibility = structs.NodeSchedulingEligible
var resp3 structs.NodeEligibilityUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp3))
require.NotZero(resp3.Index)
require.NotZero(resp3.EvalCreateIndex)
require.Len(resp3.EvalIDs, 1)
out, err = state.NodeByID(nil, node.ID)
require.Nil(err)
require.Len(out.Events, 3)
require.Equal(NodeEligibilityEventEligible, out.Events[2].Message)
}
func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
require := require.New(t)
// Create the node
node := mock.Node()
state := s1.fsm.State()
require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
// Create the policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
// Update the status without a token and expect failure
dereg := &structs.NodeUpdateEligibilityRequest{
NodeID: node.ID,
Eligibility: structs.NodeSchedulingIneligible,
WriteRequest: structs.WriteRequest{Region: "global"},
}
{
var resp structs.NodeEligibilityUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
require.NotNil(err, "RPC")
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token
dereg.AuthToken = validToken.SecretID
{
var resp structs.NodeEligibilityUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
}
// Try with a invalid token
dereg.AuthToken = invalidToken.SecretID
{
var resp structs.NodeEligibilityUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
require.NotNil(err, "RPC")
require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
dereg.AuthToken = root.SecretID
{
var resp structs.NodeEligibilityUpdateResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
}
}
func TestClientEndpoint_GetNode(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Lookup the node
get := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
}
var resp2 structs.SingleNodeResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.Index {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
if resp2.Node.ComputedClass == "" {
t.Fatalf("bad ComputedClass: %#v", resp2.Node)
}
_, keyID, err := s1.encrypter.GetActiveKey()
must.NoError(t, err)
// Update the status updated at value
node.StatusUpdatedAt = resp2.Node.StatusUpdatedAt
node.SecretID = ""
node.Events = resp2.Node.Events
node.IdentitySigningKeyID = keyID
must.Eq(t, node, resp2.Node)
// assert that the node register event was set correctly
if len(resp2.Node.Events) != 1 {
t.Fatalf("Did not set node events: %#v", resp2.Node)
}
if resp2.Node.Events[0].Message != state.NodeRegisterEventRegistered {
t.Fatalf("Did not set node register event correctly: %#v", resp2.Node)
}
// Lookup non-existing node
get.NodeID = "12345678-abcd-efab-cdef-123456789abc"
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.Index {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
if resp2.Node != nil {
t.Fatalf("unexpected node")
}
}
func TestClientEndpoint_GetNode_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
assert := assert.New(t)
// Create the node
node := mock.Node()
state := s1.fsm.State()
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
// Create the policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
// Lookup the node without a token and expect failure
req := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
}
{
var resp structs.SingleNodeResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token
req.AuthToken = validToken.SecretID
{
var resp structs.SingleNodeResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
assert.Equal(node.ID, resp.Node.ID)
}
// Try with a Node.SecretID
req.AuthToken = node.SecretID
{
var resp structs.SingleNodeResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
assert.Equal(node.ID, resp.Node.ID)
}
// Try with a invalid token
req.AuthToken = invalidToken.SecretID
{
var resp structs.SingleNodeResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
req.AuthToken = root.SecretID
{
var resp structs.SingleNodeResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
assert.Equal(node.ID, resp.Node.ID)
}
}
func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the node
node1 := mock.Node()
node2 := mock.Node()
// First create an unrelated node.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertNode(structs.MsgTypeTestSetup, 100, node1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert the node we are watching later
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertNode(structs.MsgTypeTestSetup, 200, node2); err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the node
req := &structs.NodeSpecificRequest{
NodeID: node2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
MinQueryIndex: 150,
},
}
var resp structs.SingleNodeResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp.Node == nil || resp.Node.ID != node2.ID {
t.Fatalf("bad: %#v", resp.Node)
}
// Node update triggers watches
time.AfterFunc(100*time.Millisecond, func() {
nodeUpdate := mock.Node()
nodeUpdate.ID = node2.ID
nodeUpdate.Status = structs.NodeStatusDown
if err := state.UpsertNode(structs.MsgTypeTestSetup, 300, nodeUpdate); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
var resp2 structs.SingleNodeResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Node == nil || resp2.Node.Status != structs.NodeStatusDown {
t.Fatalf("bad: %#v", resp2.Node)
}
// Node delete triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteNode(structs.MsgTypeTestSetup, 400, []string{node2.ID}); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 350
var resp3 structs.SingleNodeResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp3.Index != 400 {
t.Fatalf("Bad index: %d %d", resp2.Index, 400)
}
if resp3.Node != nil {
t.Fatalf("bad: %#v", resp3.Node)
}
}
func TestClientEndpoint_GetAllocs(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Inject fake evaluations
alloc := mock.Alloc()
alloc.NodeID = node.ID
state := s1.fsm.State()
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the allocs
get := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
}
var resp2 structs.NodeAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 100 {
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
}
if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
t.Fatalf("bad: %#v", resp2.Allocs)
}
// Lookup non-existing node
get.NodeID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 100 {
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
}
if len(resp2.Allocs) != 0 {
t.Fatalf("unexpected node")
}
}
func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
assert := assert.New(t)
// Create the node
allocDefaultNS := mock.Alloc()
node := mock.Node()
allocDefaultNS.NodeID = node.ID
state := s1.fsm.State()
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
allocs := []*structs.Allocation{allocDefaultNS}
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, allocs), "UpsertAllocs")
// Create the namespace policy and tokens
validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
req := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
},
}
// Lookup the node without a token and expect failure
{
var resp structs.NodeAllocsResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token for the default namespace
req.AuthToken = validDefaultToken.SecretID
{
var resp structs.NodeAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
assert.Len(resp.Allocs, 1)
assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
}
// Try with a invalid token
req.AuthToken = invalidToken.SecretID
{
var resp structs.NodeAllocsResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
req.AuthToken = root.SecretID
{
var resp structs.NodeAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
assert.Len(resp.Allocs, 1)
for _, alloc := range resp.Allocs {
switch alloc.ID {
case allocDefaultNS.ID:
// expected
default:
t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
}
}
}
}
func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
assert := assert.New(t)
// Create the namespaces
ns1 := mock.Namespace()
ns2 := mock.Namespace()
ns1.Name = "altnamespace"
ns2.Name = "should-only-be-displayed-for-root-ns"
// Create the allocs
allocDefaultNS := mock.Alloc()
allocAltNS := mock.Alloc()
allocAltNS.Namespace = ns1.Name
allocOtherNS := mock.Alloc()
allocOtherNS.Namespace = ns2.Name
node := mock.Node()
allocDefaultNS.NodeID = node.ID
allocAltNS.NodeID = node.ID
allocOtherNS.NodeID = node.ID
state := s1.fsm.State()
assert.Nil(state.UpsertNamespaces(1, []*structs.Namespace{ns1, ns2}), "UpsertNamespaces")
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 2, node), "UpsertNode")
assert.Nil(state.UpsertJobSummary(3, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
assert.Nil(state.UpsertJobSummary(4, mock.JobSummary(allocAltNS.JobID)), "UpsertJobSummary")
assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(allocOtherNS.JobID)), "UpsertJobSummary")
allocs := []*structs.Allocation{allocDefaultNS, allocAltNS, allocOtherNS}
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, allocs), "UpsertAllocs")
// Create the namespace policy and tokens
validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
validNoNSToken := mock.CreatePolicyAndToken(t, state, 1003, "test-alt-valid", mock.NodePolicy(acl.PolicyRead))
invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
// Lookup the node without a token and expect failure
req := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
}
{
var resp structs.NodeAllocsResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token for the default namespace
req.AuthToken = validDefaultToken.SecretID
{
var resp structs.NodeAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
assert.Len(resp.Allocs, 1)
assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
}
// Try with a valid token for a namespace with no allocs on this node
req.AuthToken = validNoNSToken.SecretID
{
var resp structs.NodeAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
assert.Len(resp.Allocs, 0)
}
// Try with a invalid token
req.AuthToken = invalidToken.SecretID
{
var resp structs.NodeAllocsResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
req.AuthToken = root.SecretID
{
var resp structs.NodeAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
assert.Len(resp.Allocs, 3)
for _, alloc := range resp.Allocs {
switch alloc.ID {
case allocDefaultNS.ID, allocAltNS.ID, allocOtherNS.ID:
// expected
default:
t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
}
}
}
}
func TestClientEndpoint_GetClientAllocs(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Check that we have no client connections
require.Empty(s1.connectedNodes())
// The RPC is client only, so perform a test using the leader ACL token to
// ensure that even this powerful token cannot access the endpoint.
leaderACLReq := structs.NodeSpecificRequest{
NodeID: uuid.Generate(),
QueryOptions: structs.QueryOptions{
Region: "global",
AuthToken: s1.leaderAcl,
},
}
var leaderACLResp structs.NodeClientAllocsResponse
err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", &leaderACLReq, &leaderACLResp)
must.ErrorContains(t, err, "Permission denied")
// Create the register request
node := mock.Node()
state := s1.fsm.State()
require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 98, node))
// Inject fake evaluations
alloc := mock.Alloc()
alloc.NodeID = node.ID
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the allocs
get := &structs.NodeSpecificRequest{
NodeID: node.ID,
SecretID: node.SecretID,
QueryOptions: structs.QueryOptions{
Region: "global",
AuthToken: node.SecretID,
},
}
var resp2 structs.NodeClientAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 100 {
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
}
if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
t.Fatalf("bad: %#v", resp2.Allocs)
}
// Check that we have the client connections
nodes := s1.connectedNodes()
require.Len(nodes, 1)
require.Contains(nodes, node.ID)
// Lookup node with bad SecretID
get.SecretID = "foobarbaz"
var resp3 structs.NodeClientAllocsResponse
err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp3)
if err == nil || !strings.Contains(err.Error(), "does not match") {
t.Fatalf("err: %v", err)
}
// Lookup non-existing node
get.NodeID = uuid.Generate()
var resp4 structs.NodeClientAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp4); err != nil {
t.Fatalf("err: %v", err)
}
if resp4.Index != 100 {
t.Fatalf("Bad index: %d %d", resp3.Index, 100)
}
if len(resp4.Allocs) != 0 {
t.Fatalf("unexpected node %#v", resp3.Allocs)
}
// Close the connection and check that we remove the client connections
require.Nil(codec.Close())
testutil.WaitForResult(func() (bool, error) {
nodes := s1.connectedNodes()
return len(nodes) == 0, nil
}, func(err error) {
t.Fatalf("should have no clients")
})
}
func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Inject fake evaluations async
now := time.Now().UTC().UnixNano()
alloc := mock.Alloc()
alloc.NodeID = node.ID
alloc.ModifyTime = now
store := s1.fsm.State()
store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
start := time.Now()
time.AfterFunc(100*time.Millisecond, func() {
err := store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the allocs in a blocking query
req := &structs.NodeSpecificRequest{
NodeID: node.ID,
SecretID: node.SecretID,
QueryOptions: structs.QueryOptions{
AuthToken: node.SecretID,
Region: "global",
MinQueryIndex: 50,
MaxQueryTime: time.Second,
},
}
var resp2 structs.NodeClientAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
// Should block at least 100ms
if time.Since(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
if resp2.Index != 100 {
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
}
if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
t.Fatalf("bad: %#v", resp2.Allocs)
}
iter, err := store.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID, state.SortDefault)
if err != nil {
t.Fatalf("err: %v", err)
}
getAllocs := func(iter memdb.ResultIterator) []*structs.Allocation {
var allocs []*structs.Allocation
for {
raw := iter.Next()
if raw == nil {
break
}
allocs = append(allocs, raw.(*structs.Allocation))
}
return allocs
}
out := getAllocs(iter)
if len(out) != 1 {
t.Fatalf("Expected to get one allocation but got:%v", out)
}
if out[0].ModifyTime != now {
t.Fatalf("Invalid modify time %v", out[0].ModifyTime)
}
// Alloc updates fire watches
time.AfterFunc(100*time.Millisecond, func() {
allocUpdate := mock.Alloc()
allocUpdate.NodeID = alloc.NodeID
allocUpdate.ID = alloc.ID
allocUpdate.ClientStatus = structs.AllocClientStatusRunning
store.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
err := store.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate})
if err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 150
var resp3 structs.NodeClientAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if time.Since(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
if resp3.Index != 200 {
t.Fatalf("Bad index: %d %d", resp3.Index, 200)
}
if len(resp3.Allocs) != 1 || resp3.Allocs[alloc.ID] != 200 {
t.Fatalf("bad: %#v", resp3.Allocs)
}
}
func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) {
ci.Parallel(t)
assert := assert.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Inject fake allocations async
alloc1 := mock.Alloc()
alloc1.NodeID = node.ID
alloc2 := mock.Alloc()
alloc2.NodeID = node.ID
state := s1.fsm.State()
state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
start := time.Now()
time.AfterFunc(100*time.Millisecond, func() {
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1, alloc2}))
})
// Lookup the allocs in a blocking query
req := &structs.NodeSpecificRequest{
NodeID: node.ID,
SecretID: node.SecretID,
QueryOptions: structs.QueryOptions{
AuthToken: node.SecretID,
Region: "global",
MinQueryIndex: 50,
MaxQueryTime: time.Second,
},
}
var resp2 structs.NodeClientAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2))
// Should block at least 100ms
if time.Since(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
assert.EqualValues(100, resp2.Index)
if assert.Len(resp2.Allocs, 2) {
assert.EqualValues(100, resp2.Allocs[alloc1.ID])
}
// Delete an allocation
time.AfterFunc(100*time.Millisecond, func() {
assert.Nil(state.DeleteEval(200, nil, []string{alloc2.ID}, false))
})
req.QueryOptions.MinQueryIndex = 150
var resp3 structs.NodeClientAllocsResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3))
if time.Since(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
assert.EqualValues(200, resp3.Index)
if assert.Len(resp3.Allocs, 1) {
assert.EqualValues(100, resp3.Allocs[alloc1.ID])
}
}
// A MigrateToken should not be created if an allocation shares the same node
// with its previous allocation
func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) {
ci.Parallel(t)
assert := assert.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Inject fake evaluations
prevAlloc := mock.Alloc()
prevAlloc.NodeID = node.ID
alloc := mock.Alloc()
alloc.NodeID = node.ID
alloc.PreviousAllocation = prevAlloc.ID
alloc.DesiredStatus = structs.AllocClientStatusComplete
state := s1.fsm.State()
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{prevAlloc, alloc})
assert.Nil(err)
// Lookup the allocs
get := &structs.NodeSpecificRequest{
NodeID: node.ID,
SecretID: node.SecretID,
QueryOptions: structs.QueryOptions{
AuthToken: node.SecretID,
Region: "global",
},
}
var resp2 structs.NodeClientAllocsResponse
err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2)
assert.Nil(err)
assert.Equal(uint64(100), resp2.Index)
assert.Equal(2, len(resp2.Allocs))
assert.Equal(uint64(100), resp2.Allocs[alloc.ID])
assert.Equal(0, len(resp2.MigrateTokens))
}
func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Inject fake evaluations async
alloc := mock.Alloc()
alloc.NodeID = node.ID
state := s1.fsm.State()
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
start := time.Now()
time.AfterFunc(100*time.Millisecond, func() {
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the allocs in a blocking query
req := &structs.NodeSpecificRequest{
NodeID: node.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
MinQueryIndex: 50,
MaxQueryTime: time.Second,
},
}
var resp2 structs.NodeAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
// Should block at least 100ms
if time.Since(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
if resp2.Index != 100 {
t.Fatalf("Bad index: %d %d", resp2.Index, 100)
}
if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
t.Fatalf("bad: %#v", resp2.Allocs)
}
// Alloc updates fire watches
time.AfterFunc(100*time.Millisecond, func() {
allocUpdate := mock.Alloc()
allocUpdate.NodeID = alloc.NodeID
allocUpdate.ID = alloc.ID
allocUpdate.ClientStatus = structs.AllocClientStatusRunning
state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate})
if err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 150
var resp3 structs.NodeAllocsResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if time.Since(start) < 100*time.Millisecond {
t.Fatalf("too fast")
}
if resp3.Index != 200 {
t.Fatalf("Bad index: %d %d", resp3.Index, 200)
}
if len(resp3.Allocs) != 1 || resp3.Allocs[0].ClientStatus != structs.AllocClientStatusRunning {
t.Fatalf("bad: %#v", resp3.Allocs[0])
}
}
func TestNode_UpdateAlloc(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
// Disabling scheduling in this test so that we can
// ensure that the state store doesn't accumulate more evals
// than what we expect the unit test to add
c.NumSchedulers = 0
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
store := s1.fsm.State()
// Inject mock job
job := mock.Job()
job.ID = "mytestjob"
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job))
// Inject fake allocations
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.NodeID = node.ID
must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)))
alloc.TaskGroup = job.TaskGroups[0].Name
alloc2 := mock.Alloc()
alloc2.JobID = job.ID
alloc2.NodeID = node.ID
must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID)))
alloc2.TaskGroup = job.TaskGroups[0].Name
must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc, alloc2}))
// Attempt updates of more than one alloc for the same job
clientAlloc1 := new(structs.Allocation)
*clientAlloc1 = *alloc
clientAlloc1.ClientStatus = structs.AllocClientStatusFailed
clientAlloc2 := new(structs.Allocation)
*clientAlloc2 = *alloc2
clientAlloc2.ClientStatus = structs.AllocClientStatusFailed
// Update the alloc
update := &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{clientAlloc1, clientAlloc2},
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp2 structs.NodeAllocsResponse
start := time.Now()
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2))
must.Greater(t, 0, resp2.Index)
must.GreaterEq(t, batchUpdateInterval, time.Since(start))
// Lookup the alloc
ws := memdb.NewWatchSet()
out, err := store.AllocByID(ws, alloc.ID)
must.NoError(t, err)
must.Eq(t, structs.AllocClientStatusFailed, out.ClientStatus)
must.Greater(t, 0, out.ModifyTime)
// Assert that exactly one eval with TriggeredBy EvalTriggerRetryFailedAlloc exists
evaluations, err := store.EvalsByJob(ws, job.Namespace, job.ID)
must.NoError(t, err)
must.Greater(t, 0, len(evaluations))
foundCount := 0
for _, resultEval := range evaluations {
if resultEval.TriggeredBy == structs.EvalTriggerRetryFailedAlloc && resultEval.WaitUntil.IsZero() {
foundCount++
}
}
must.Eq(t, 1, foundCount, must.Sprint("Should create exactly one eval for failed allocs"))
}
func TestNode_UpdateAlloc_NodeNotReady(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Register node.
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
var resp structs.GenericResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
// Inject mock job and allocation.
store := s1.fsm.State()
job := mock.Job()
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job))
alloc := mock.Alloc()
alloc.JobID = job.ID
alloc.NodeID = node.ID
alloc.TaskGroup = job.TaskGroups[0].Name
alloc.ClientStatus = structs.AllocClientStatusRunning
must.NoError(t, store.UpsertJobSummary(99, mock.JobSummary(alloc.JobID)))
must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}))
downReq := structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusDown,
UpdatedAt: time.Now().UnixNano(),
}
must.NoError(t, store.UpdateNodeStatus(structs.MsgTypeTestSetup, 101, &downReq))
// Try to update alloc.
updatedAlloc := new(structs.Allocation)
*updatedAlloc = *alloc
updatedAlloc.ClientStatus = structs.AllocClientStatusFailed
allocUpdateReq := &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{updatedAlloc},
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var allocUpdateResp structs.NodeAllocsResponse
err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &allocUpdateResp)
must.ErrorContains(t, err, "not allowed to update allocs")
// Send request without an explicit node ID.
updatedAlloc.NodeID = ""
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &allocUpdateResp)
must.ErrorContains(t, err, "missing node ID")
// Send request with invalid node ID.
updatedAlloc.NodeID = "not-valid"
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &allocUpdateResp)
must.ErrorContains(t, err, "node lookup failed")
// Send request with non-existing node ID.
updatedAlloc.NodeID = uuid.Generate()
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &allocUpdateResp)
must.ErrorContains(t, err, "not found")
// Mark node as ready and try again.
readyReq := structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusReady,
UpdatedAt: time.Now().UnixNano(),
}
must.NoError(t, store.UpdateNodeStatus(structs.MsgTypeTestSetup, 102, &readyReq))
updatedAlloc.NodeID = node.ID
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &allocUpdateResp))
}
func TestNode_UpdateAllocServiceRegistrations(t *testing.T) {
ci.Parallel(t)
srv, cleanup := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanup()
codec := rpcClient(t, srv)
testutil.WaitForLeader(t, srv.RPC)
store := srv.fsm.State()
index := uint64(100)
// Inject mock node, job, allocations for that job, and service
// registrations for those allocs
node := mock.Node()
index++
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node))
job := mock.Job()
job.ID = "mytestjob"
index++
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, index, nil, job))
alloc0 := mock.Alloc()
alloc0.JobID = job.ID
alloc0.NodeID = node.ID
index++
must.NoError(t, store.UpsertJobSummary(index, mock.JobSummary(alloc0.JobID)))
alloc0.TaskGroup = job.TaskGroups[0].Name
alloc1 := mock.Alloc()
alloc1.JobID = job.ID
alloc1.NodeID = node.ID
index++
must.NoError(t, store.UpsertJobSummary(index, mock.JobSummary(alloc1.JobID)))
alloc1.TaskGroup = job.TaskGroups[0].Name
alloc2 := mock.Alloc() // will have no service registration
alloc2.JobID = job.ID
alloc2.NodeID = node.ID
index++
must.NoError(t, store.UpsertJobSummary(index, mock.JobSummary(alloc2.JobID)))
alloc2.TaskGroup = job.TaskGroups[0].Name
index++
must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc0, alloc1, alloc2}))
serviceFor := func(allocID string, port int) *structs.ServiceRegistration {
return &structs.ServiceRegistration{
ID: fmt.Sprintf("_nomad-task-%s-group-api-countdash-api-http", allocID),
ServiceName: "countdash-api",
Namespace: job.Namespace,
NodeID: node.ID,
Datacenter: node.Datacenter,
JobID: job.ID,
AllocID: allocID,
Tags: []string{"bar"},
Address: "192.168.200.200",
Port: port,
}
}
service0 := serviceFor(alloc0.ID, 29001)
service1 := serviceFor(alloc1.ID, 29002)
index++
must.NoError(t, store.UpsertServiceRegistrations(
structs.MsgTypeTestSetup, index, []*structs.ServiceRegistration{service0, service1}))
// no-op
update := &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{alloc0, alloc1, alloc2},
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
var resp structs.NodeAllocsResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp))
services, err := store.GetServiceRegistrationsByNodeID(nil, node.ID)
must.NoError(t, err)
must.Len(t, 2, services, must.Sprint("no-op update should not have deleted services"))
// fail one allocation
alloc0 = alloc0.Copy()
alloc0.ClientStatus = structs.AllocClientStatusFailed
update = &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{alloc0, alloc1, alloc2},
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: node.SecretID},
}
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp))
services, err = store.GetServiceRegistrationsByNodeID(nil, node.ID)
must.NoError(t, err)
must.Eq(t, []*structs.ServiceRegistration{service1}, services,
must.Sprint("failing an allocation should result in its service being deleted"))
}
func TestClientEndpoint_BatchUpdate(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Inject fake evaluations
alloc := mock.Alloc()
alloc.NodeID = node.ID
state := s1.fsm.State()
state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Attempt update
clientAlloc := new(structs.Allocation)
*clientAlloc = *alloc
clientAlloc.ClientStatus = structs.AllocClientStatusFailed
// Call to do the batch update
bf := structs.NewBatchFuture()
endpoint := NewNodeEndpoint(s1, nil)
endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc}, nil)
if err := bf.Wait(); err != nil {
t.Fatalf("err: %v", err)
}
if bf.Index() == 0 {
t.Fatalf("Bad index: %d", bf.Index())
}
// Lookup the alloc
ws := memdb.NewWatchSet()
out, err := state.AllocByID(ws, alloc.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out.ClientStatus != structs.AllocClientStatusFailed {
t.Fatalf("Bad: %#v", out)
}
}
func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
idx, err := state.LatestIndex()
must.NoError(t, err)
node := mock.Node()
err = state.UpsertNode(structs.MsgTypeTestSetup, idx, node)
must.NoError(t, err)
idx++
// Inject fake evaluations
alloc := mock.Alloc()
alloc.NodeID = node.ID
state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, []*structs.Allocation{alloc}))
idx++
sysBatchAlloc := mock.SysBatchAlloc()
sysBatchAlloc.NodeID = node.ID
state.UpsertJobSummary(1, mock.JobSummary(sysBatchAlloc.JobID))
must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, idx, []*structs.Allocation{sysBatchAlloc}))
idx++
// Inject a fake system job.
job := mock.SystemJob()
must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, job))
idx++
// Create some evaluations
nodeEndpoint := NewNodeEndpoint(s1, nil)
ids, index, err := nodeEndpoint.createNodeEvals(node, 1)
if err != nil {
t.Fatalf("err: %v", err)
}
if index == 0 {
t.Fatalf("bad: %d", index)
}
if len(ids) != 2 {
t.Fatalf("bad: %s", ids)
}
// Lookup the evaluations
ws := memdb.NewWatchSet()
evalByType := make(map[string]*structs.Evaluation, 2)
for _, id := range ids {
eval, err := state.EvalByID(ws, id)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if old, ok := evalByType[eval.Type]; ok {
t.Fatalf("multiple evals of the same type: %v and %v", old, eval)
}
evalByType[eval.Type] = eval
}
if len(evalByType) != 2 {
t.Fatalf("Expected a service and system job; got %#v", evalByType)
}
// Ensure the evals are correct.
for schedType, eval := range evalByType {
expPriority := alloc.Job.Priority
expJobID := alloc.JobID
if schedType == "system" {
expPriority = job.Priority
expJobID = job.ID
}
// we do not expect a sysbatch eval
must.NotEq(t, structs.JobTypeSysBatch, eval.Type)
t.Logf("checking eval: %v", pretty.Sprint(eval))
must.Eq(t, index, eval.CreateIndex)
must.Eq(t, structs.EvalTriggerNodeUpdate, eval.TriggeredBy)
must.Eq(t, alloc.NodeID, eval.NodeID)
must.Eq(t, uint64(1), eval.NodeModifyIndex)
switch eval.Status {
case structs.EvalStatusPending, structs.EvalStatusComplete:
// success
default:
t.Fatalf("expected pending or complete, found %v", eval.Status)
}
must.Eq(t, expPriority, eval.Priority)
must.Eq(t, expJobID, eval.JobID)
must.NonZero(t, eval.CreateTime)
must.NonZero(t, eval.ModifyTime)
}
}
// TestClientEndpoint_CreateNodeEvals_MultipleNSes asserts that evals are made
// for all jobs across namespaces
func TestClientEndpoint_CreateNodeEvals_MultipleNSes(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
idx := uint64(3)
ns1 := mock.Namespace()
err := state.UpsertNamespaces(idx, []*structs.Namespace{ns1})
require.NoError(t, err)
idx++
node := mock.Node()
err = state.UpsertNode(structs.MsgTypeTestSetup, idx, node)
require.NoError(t, err)
idx++
// Inject a fake system job.
defaultJob := mock.SystemJob()
err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, defaultJob)
require.NoError(t, err)
idx++
nsJob := mock.SystemJob()
nsJob.ID = defaultJob.ID
nsJob.Namespace = ns1.Name
err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, nsJob)
require.NoError(t, err)
idx++
// Create some evaluations
nodeEndpoint := NewNodeEndpoint(s1, nil)
evalIDs, index, err := nodeEndpoint.createNodeEvals(node, 1)
require.NoError(t, err)
require.NotZero(t, index)
require.Len(t, evalIDs, 2)
byNS := map[string]*structs.Evaluation{}
for _, evalID := range evalIDs {
eval, err := state.EvalByID(nil, evalID)
require.NoError(t, err)
byNS[eval.Namespace] = eval
}
require.Len(t, byNS, 2)
defaultNSEval := byNS[defaultJob.Namespace]
require.NotNil(t, defaultNSEval)
require.Equal(t, defaultJob.ID, defaultNSEval.JobID)
require.Equal(t, defaultJob.Namespace, defaultNSEval.Namespace)
otherNSEval := byNS[nsJob.Namespace]
require.NotNil(t, otherNSEval)
require.Equal(t, nsJob.ID, otherNSEval.JobID)
require.Equal(t, nsJob.Namespace, otherNSEval.Namespace)
}
// TestClientEndpoint_CreateNodeEvals_MultipleDCes asserts that evals are made
// only for the DC the node is in.
func TestClientEndpoint_CreateNodeEvals_MultipleDCes(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
idx, err := state.LatestIndex()
require.NoError(t, err)
node := mock.Node()
node.Datacenter = "test1"
err = state.UpsertNode(structs.MsgTypeTestSetup, idx, node)
require.NoError(t, err)
idx++
// Inject a fake system job in the same dc
defaultJob := mock.SystemJob()
defaultJob.Datacenters = []string{"test1", "test2"}
err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, defaultJob)
require.NoError(t, err)
idx++
// Inject a fake system job in a different dc
nsJob := mock.SystemJob()
nsJob.Datacenters = []string{"test2", "test3"}
err = state.UpsertJob(structs.MsgTypeTestSetup, idx, nil, nsJob)
require.NoError(t, err)
idx++
// Create evaluations
nodeEndpoint := NewNodeEndpoint(s1, nil)
evalIDs, index, err := nodeEndpoint.createNodeEvals(node, 1)
require.NoError(t, err)
require.NotZero(t, index)
require.Len(t, evalIDs, 1)
eval, err := state.EvalByID(nil, evalIDs[0])
require.NoError(t, err)
require.Equal(t, defaultJob.ID, eval.JobID)
}
func TestClientEndpoint_Evaluate(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Inject fake evaluations
alloc := mock.Alloc()
node := mock.Node()
node.ID = alloc.NodeID
state := s1.fsm.State()
err := state.UpsertNode(structs.MsgTypeTestSetup, 1, node)
if err != nil {
t.Fatalf("err: %v", err)
}
state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID))
err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
if err != nil {
t.Fatalf("err: %v", err)
}
// Re-evaluate
req := &structs.NodeEvaluateRequest{
NodeID: alloc.NodeID,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.NodeUpdateResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create some evaluations
ids := resp.EvalIDs
if len(ids) != 1 {
t.Fatalf("bad: %s", ids)
}
// Lookup the evaluation
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, ids[0])
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.Index {
t.Fatalf("index mis-match")
}
if eval.Priority != alloc.Job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != alloc.Job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerNodeUpdate {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != alloc.JobID {
t.Fatalf("bad: %#v", eval)
}
if eval.NodeID != alloc.NodeID {
t.Fatalf("bad: %#v", eval)
}
if eval.NodeModifyIndex != 1 {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
if eval.CreateTime == 0 {
t.Fatalf("CreateTime is unset: %#v", eval)
}
if eval.ModifyTime == 0 {
t.Fatalf("ModifyTime is unset: %#v", eval)
}
}
func TestClientEndpoint_Evaluate_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
assert := assert.New(t)
// Create the node with an alloc
alloc := mock.Alloc()
node := mock.Node()
node.ID = alloc.NodeID
state := s1.fsm.State()
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary")
assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}), "UpsertAllocs")
// Create the policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
// Re-evaluate without a token and expect failure
req := &structs.NodeEvaluateRequest{
NodeID: alloc.NodeID,
WriteRequest: structs.WriteRequest{Region: "global"},
}
{
var resp structs.NodeUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token
req.AuthToken = validToken.SecretID
{
var resp structs.NodeUpdateResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
}
// Try with a invalid token
req.AuthToken = invalidToken.SecretID
{
var resp structs.NodeUpdateResponse
err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
req.AuthToken = root.SecretID
{
var resp structs.NodeUpdateResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
}
}
func TestClientEndpoint_ListNodes(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{
"foo": {
Name: "foo",
Path: "/",
ReadOnly: true,
},
}
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Lookup the node
get := &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
}
var resp2 structs.NodeListResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.Index {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
require.Len(t, resp2.Nodes, 1)
require.Equal(t, node.ID, resp2.Nodes[0].ID)
// #7344 - Assert HostVolumes are included in stub
require.Equal(t, node.HostVolumes, resp2.Nodes[0].HostVolumes)
// #9055 - Assert Resources are *not* included by default
require.Nil(t, resp2.Nodes[0].NodeResources)
require.Nil(t, resp2.Nodes[0].ReservedResources)
// Lookup the node with prefix
get = &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{Region: "global", Prefix: node.ID[:4]},
}
var resp3 structs.NodeListResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if resp3.Index != resp.Index {
t.Fatalf("Bad index: %d %d", resp3.Index, resp2.Index)
}
if len(resp3.Nodes) != 1 {
t.Fatalf("bad: %#v", resp3.Nodes)
}
if resp3.Nodes[0].ID != node.ID {
t.Fatalf("bad: %#v", resp3.Nodes[0])
}
}
func TestClientEndpoint_ListNodes_Fields(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var resp structs.GenericResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
node.CreateIndex = resp.Index
node.ModifyIndex = resp.Index
// Lookup the node with fields
get := &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
Fields: &structs.NodeStubFields{
Resources: true,
},
}
var resp2 structs.NodeListResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2))
require.Equal(t, resp.Index, resp2.Index)
require.Len(t, resp2.Nodes, 1)
require.Equal(t, node.ID, resp2.Nodes[0].ID)
require.NotNil(t, resp2.Nodes[0].NodeResources)
require.NotNil(t, resp2.Nodes[0].ReservedResources)
}
func TestClientEndpoint_ListNodes_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
assert := assert.New(t)
// Create the node
node := mock.Node()
state := s1.fsm.State()
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
// Create the namespace policy and tokens
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
// Lookup the node without a token and expect failure
req := &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
}
{
var resp structs.NodeListResponse
err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a valid token
req.AuthToken = validToken.SecretID
{
var resp structs.NodeListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
assert.Equal(node.ID, resp.Nodes[0].ID)
}
// Try with a invalid token
req.AuthToken = invalidToken.SecretID
{
var resp structs.NodeListResponse
err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
assert.NotNil(err, "RPC")
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a root token
req.AuthToken = root.SecretID
{
var resp structs.NodeListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
assert.Equal(node.ID, resp.Nodes[0].ID)
}
}
func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Disable drainer to prevent drain from completing during test
s1.nodeDrainer.SetEnabled(false, nil)
// Create the node
node := mock.Node()
// Node upsert triggers watches
errCh := make(chan error, 1)
timer := time.AfterFunc(100*time.Millisecond, func() {
errCh <- state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
})
defer timer.Stop()
req := &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
MinQueryIndex: 1,
},
}
start := time.Now()
var resp structs.NodeListResponse
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if err := <-errCh; err != nil {
t.Fatalf("error from timer: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 2 {
t.Fatalf("Bad index: %d %d", resp.Index, 2)
}
if len(resp.Nodes) != 1 || resp.Nodes[0].ID != node.ID {
t.Fatalf("bad: %#v", resp.Nodes)
}
// Node drain updates trigger watches.
time.AfterFunc(100*time.Millisecond, func() {
s := &structs.DrainStrategy{
DrainSpec: structs.DrainSpec{
Deadline: 10 * time.Second,
},
}
errCh <- state.UpdateNodeDrain(structs.MsgTypeTestSetup, 3, node.ID, s, false, 0, nil, nil, "")
})
req.MinQueryIndex = 2
var resp2 structs.NodeListResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if err := <-errCh; err != nil {
t.Fatalf("error from timer: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 3 {
t.Fatalf("Bad index: %d %d", resp2.Index, 3)
}
if len(resp2.Nodes) != 1 || !resp2.Nodes[0].Drain {
t.Fatalf("bad: %#v", resp2.Nodes)
}
// Node status update triggers watches
triggerReq := structs.NodeUpdateStatusRequest{
NodeID: node.ID,
Status: structs.NodeStatusDown,
}
time.AfterFunc(100*time.Millisecond, func() {
errCh <- state.UpdateNodeStatus(structs.MsgTypeTestSetup, 40, &triggerReq)
})
req.MinQueryIndex = 38
var resp3 structs.NodeListResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp3); err != nil {
t.Fatalf("err: %v", err)
}
if err := <-errCh; err != nil {
t.Fatalf("error from timer: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp3)
}
if resp3.Index != 40 {
t.Fatalf("Bad index: %d %d", resp3.Index, 40)
}
if len(resp3.Nodes) != 1 || resp3.Nodes[0].Status != structs.NodeStatusDown {
t.Fatalf("bad: %#v", resp3.Nodes)
}
// Node delete triggers watches.
time.AfterFunc(100*time.Millisecond, func() {
errCh <- state.DeleteNode(structs.MsgTypeTestSetup, 50, []string{node.ID})
})
req.MinQueryIndex = 45
var resp4 structs.NodeListResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp4); err != nil {
t.Fatalf("err: %v", err)
}
if err := <-errCh; err != nil {
t.Fatalf("error from timer: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp4)
}
if resp4.Index != 50 {
t.Fatalf("Bad index: %d %d", resp4.Index, 50)
}
if len(resp4.Nodes) != 0 {
t.Fatalf("bad: %#v", resp4.Nodes)
}
}
func TestClientEndpoint_ShouldCreateNodeEval(t *testing.T) {
ci.Parallel(t)
t.Run("spurious changes don't require eval", func(t *testing.T) {
n1 := mock.Node()
n2 := n1.Copy()
n2.SecretID = uuid.Generate()
n2.Links["vault"] = "links don't get interpolated"
n2.ModifyIndex++
require.False(t, shouldCreateNodeEval(n1, n2))
})
positiveCases := []struct {
name string
updateFn func(n *structs.Node)
}{
{
"data center changes",
func(n *structs.Node) { n.Datacenter += "u" },
},
{
"attribute change",
func(n *structs.Node) { n.Attributes["test.attribute"] = "something" },
},
{
"meta change",
func(n *structs.Node) { n.Meta["test.meta"] = "something" },
},
{
"drivers health changed",
func(n *structs.Node) { n.Drivers["exec"].Detected = false },
},
{
"new drivers",
func(n *structs.Node) {
n.Drivers["newdriver"] = &structs.DriverInfo{
Detected: true,
Healthy: true,
}
},
},
}
for _, c := range positiveCases {
t.Run(c.name, func(t *testing.T) {
n1 := mock.Node()
n2 := n1.Copy()
c.updateFn(n2)
require.Truef(t, shouldCreateNodeEval(n1, n2), "node changed but without node eval: %v", pretty.Diff(n1, n2))
})
}
}
func TestClientEndpoint_UpdateAlloc_Evals_ByTrigger(t *testing.T) {
ci.Parallel(t)
type testCase struct {
name string
clientStatus string
serverClientStatus string
triggerBy string
missingJob bool
missingAlloc bool
invalidTaskGroup bool
}
testCases := []testCase{
{
name: "failed-alloc",
clientStatus: structs.AllocClientStatusFailed,
serverClientStatus: structs.AllocClientStatusRunning,
triggerBy: structs.EvalTriggerRetryFailedAlloc,
missingJob: false,
missingAlloc: false,
invalidTaskGroup: false,
},
{
name: "unknown-alloc",
clientStatus: structs.AllocClientStatusRunning,
serverClientStatus: structs.AllocClientStatusUnknown,
triggerBy: structs.EvalTriggerReconnect,
missingJob: false,
missingAlloc: false,
invalidTaskGroup: false,
},
{
name: "orphaned-unknown-alloc",
clientStatus: structs.AllocClientStatusRunning,
serverClientStatus: structs.AllocClientStatusUnknown,
triggerBy: structs.EvalTriggerJobDeregister,
missingJob: true,
missingAlloc: false,
invalidTaskGroup: false,
},
{
name: "running-job",
clientStatus: structs.AllocClientStatusRunning,
serverClientStatus: structs.AllocClientStatusRunning,
triggerBy: "",
missingJob: false,
missingAlloc: false,
invalidTaskGroup: false,
},
{
name: "complete-job",
clientStatus: structs.AllocClientStatusComplete,
serverClientStatus: structs.AllocClientStatusComplete,
triggerBy: "",
missingJob: false,
missingAlloc: false,
invalidTaskGroup: false,
},
{
name: "no-alloc-at-server",
clientStatus: structs.AllocClientStatusUnknown,
serverClientStatus: "",
triggerBy: "",
missingJob: false,
missingAlloc: true,
invalidTaskGroup: false,
},
{
name: "invalid-task-group",
clientStatus: structs.AllocClientStatusUnknown,
serverClientStatus: structs.AllocClientStatusRunning,
triggerBy: "",
missingJob: false,
missingAlloc: false,
invalidTaskGroup: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s1, cleanupS1 := TestServer(t, func(c *Config) {
// Disabling scheduling in this test so that we can
// ensure that the state store doesn't accumulate more evals
// than what we expect the unit test to add
c.NumSchedulers = 0
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
testutil.WaitForKeyring(t, s1.RPC, s1.config.Region)
// Create the register request
node := mock.Node()
reg := &structs.NodeRegisterRequest{
Node: node,
WriteRequest: structs.WriteRequest{Region: "global"},
}
// Fetch the response
var nodeResp structs.GenericResponse
err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &nodeResp)
require.NoError(t, err)
fsmState := s1.fsm.State()
job := mock.Job()
job.ID = tc.name + "-test-job"
if !tc.missingJob {
err = fsmState.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job)
require.NoError(t, err)
}
serverAlloc := mock.Alloc()
serverAlloc.JobID = job.ID
serverAlloc.NodeID = node.ID
serverAlloc.ClientStatus = tc.serverClientStatus
serverAlloc.TaskGroup = job.TaskGroups[0].Name
// Create the incoming client alloc.
clientAlloc := serverAlloc.Copy()
clientAlloc.ClientStatus = tc.clientStatus
err = fsmState.UpsertJobSummary(99, mock.JobSummary(serverAlloc.JobID))
require.NoError(t, err)
if tc.invalidTaskGroup {
serverAlloc.TaskGroup = "invalid"
}
if !tc.missingAlloc {
err = fsmState.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{serverAlloc})
require.NoError(t, err)
}
updateReq := &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{clientAlloc},
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: node.SecretID,
},
}
var nodeAllocResp structs.NodeAllocsResponse
err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", updateReq, &nodeAllocResp)
require.NoError(t, err)
require.NotEqual(t, uint64(0), nodeAllocResp.Index)
// If no eval should be created validate, none were and return.
if tc.triggerBy == "" {
evaluations, err := fsmState.EvalsByJob(nil, job.Namespace, job.ID)
require.NoError(t, err)
require.Len(t, evaluations, 0)
return
}
// Lookup the alloc
updatedAlloc, err := fsmState.AllocByID(nil, serverAlloc.ID)
require.NoError(t, err)
require.Equal(t, tc.clientStatus, updatedAlloc.ClientStatus)
// Assert that exactly one eval with test case TriggeredBy exists
evaluations, err := fsmState.EvalsByJob(nil, job.Namespace, job.ID)
require.NoError(t, err)
require.Equal(t, 1, len(evaluations))
foundCount := 0
for _, resultEval := range evaluations {
if resultEval.TriggeredBy == tc.triggerBy && resultEval.WaitUntil.IsZero() {
foundCount++
}
}
require.Equal(t, 1, foundCount, "Should create exactly one eval for trigger by", tc.triggerBy)
})
}
}
func TestNode_Register_Introduction(t *testing.T) {
ci.Parallel(t)
testServer, _, testServerCleanup := TestACLServer(t, nil)
t.Cleanup(testServerCleanup)
rpcCodec := rpcClient(t, testServer)
testutil.WaitForLeader(t, testServer.RPC)
testutil.WaitForKeyring(t, testServer.RPC, testServer.config.Region)
t.Run("empty auth enforcement none", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementNone
registerReq := structs.NodeRegisterRequest{
Node: mock.Node(),
WriteRequest: structs.WriteRequest{
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
)
nodeResp, err := testServer.State().NodeByID(nil, registerReq.Node.ID)
must.NoError(t, err)
must.NotNil(t, nodeResp)
must.Eq(t, registerReq.Node.SecretID, nodeResp.SecretID)
})
t.Run("empty auth enforcement warn", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
registerReq := structs.NodeRegisterRequest{
Node: mock.Node(),
WriteRequest: structs.WriteRequest{
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
)
nodeResp, err := testServer.State().NodeByID(nil, registerReq.Node.ID)
must.NoError(t, err)
must.NotNil(t, nodeResp)
must.Eq(t, registerReq.Node.SecretID, nodeResp.SecretID)
})
t.Run("empty auth enforcement strict", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
registerReq := structs.NodeRegisterRequest{
Node: mock.Node(),
WriteRequest: structs.WriteRequest{
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.ErrorContains(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
"Permission denied",
)
})
t.Run("valid jwt enforcement none", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementNone
mockNode := mock.Node()
introClaims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
testServer.config.NodeIntroductionConfig.DefaultIdentityTTL,
)
signedJWT, _, err := testServer.encrypter.SignClaims(introClaims)
must.NoError(t, err)
registerReq := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
)
nodeResp, err := testServer.State().NodeByID(nil, registerReq.Node.ID)
must.NoError(t, err)
must.NotNil(t, nodeResp)
must.Eq(t, registerReq.Node.SecretID, nodeResp.SecretID)
})
t.Run("valid jwt enforcement warn", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
mockNode := mock.Node()
introClaims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
testServer.config.NodeIntroductionConfig.DefaultIdentityTTL,
)
signedJWT, _, err := testServer.encrypter.SignClaims(introClaims)
must.NoError(t, err)
registerReq := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
)
nodeResp, err := testServer.State().NodeByID(nil, registerReq.Node.ID)
must.NoError(t, err)
must.NotNil(t, nodeResp)
must.Eq(t, registerReq.Node.SecretID, nodeResp.SecretID)
})
t.Run("valid jwt enforcement strict", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
mockNode := mock.Node()
introClaims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
testServer.config.NodeIntroductionConfig.DefaultIdentityTTL,
)
signedJWT, _, err := testServer.encrypter.SignClaims(introClaims)
must.NoError(t, err)
registerReq := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
)
nodeResp, err := testServer.State().NodeByID(nil, registerReq.Node.ID)
must.NoError(t, err)
must.NotNil(t, nodeResp)
must.Eq(t, registerReq.Node.SecretID, nodeResp.SecretID)
})
t.Run("invalid jwt enforcement none", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementNone
mockNode := mock.Node()
introClaims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
testServer.config.NodeIntroductionConfig.DefaultIdentityTTL,
)
signedJWT, _, err := testServer.encrypter.SignClaims(introClaims)
must.NoError(t, err)
mockNode.Name = "changed-name"
registerReq := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.NoError(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
)
nodeResp, err := testServer.State().NodeByID(nil, registerReq.Node.ID)
must.NoError(t, err)
must.NotNil(t, nodeResp)
must.Eq(t, registerReq.Node.SecretID, nodeResp.SecretID)
})
t.Run("invalid jwt enforcement warn", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
mockNode := mock.Node()
introClaims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
testServer.config.NodeIntroductionConfig.DefaultIdentityTTL,
)
signedJWT, _, err := testServer.encrypter.SignClaims(introClaims)
must.NoError(t, err)
mockNode.Name = "changed-name"
registerReq := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.ErrorContains(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
"Permission denied",
)
})
t.Run("invalid jwt enforcement strict", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
mockNode := mock.Node()
introClaims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
testServer.config.NodeIntroductionConfig.DefaultIdentityTTL,
)
signedJWT, _, err := testServer.encrypter.SignClaims(introClaims)
must.NoError(t, err)
mockNode.Name = "changed-name"
registerReq := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
Region: testServer.Region(),
},
}
var resp structs.NodeUpdateResponse
must.ErrorContains(
t,
msgpackrpc.CallWithCodec(rpcCodec, "Node.Register", &registerReq, &resp),
"Permission denied",
)
})
}
func TestNode_newRegistrationAllowed(t *testing.T) {
ci.Parallel(t)
// Generate a stable mock node for testing that includes a populated node
// pool field.
mockNode := structs.MockNode()
mockNode.NodePool = "monitoring"
// Create a test server, so we can sign JWTs.
testServer, _, testServerCleanup := TestACLServer(t, nil)
t.Cleanup(testServerCleanup)
testutil.WaitForLeader(t, testServer.RPC)
testutil.WaitForKeyring(t, testServer.RPC, testServer.config.Region)
nodeEndpoint := &Node{
ctx: &RPCContext{},
logger: testServer.logger,
srv: testServer,
}
t.Run("enforcement none anonymous", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementNone
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{},
}
require.True(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement warn anonymous", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: structs.AnonymousACLToken.SecretID,
},
}
require.True(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement strict anonymous", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement warn auth error", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
errors.New("jwt: token is expired"),
))
})
t.Run("enforcement strict auth error", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
errors.New("jwt: token is expired"),
))
})
t.Run("enforcement warn claims pool mismatch", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
claims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
"wrong-node-pool",
testServer.Region(),
10*time.Minute,
)
signedJWT, _, err := testServer.encrypter.SignClaims(claims)
must.NoError(t, err)
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement strict claims pool mismatch", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
claims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
"wrong-node-pool",
testServer.Region(),
10*time.Minute,
)
signedJWT, _, err := testServer.encrypter.SignClaims(claims)
must.NoError(t, err)
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement warn claims name mismatch", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
claims := structs.GenerateNodeIntroductionIdentityClaims(
"wrong-node-name",
mockNode.NodePool,
testServer.Region(),
10*time.Minute,
)
signedJWT, _, err := testServer.encrypter.SignClaims(claims)
must.NoError(t, err)
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement strict claims name mismatch", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
claims := structs.GenerateNodeIntroductionIdentityClaims(
"wrong-node-name",
mockNode.NodePool,
testServer.Region(),
10*time.Minute,
)
signedJWT, _, err := testServer.encrypter.SignClaims(claims)
must.NoError(t, err)
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
},
}
require.False(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement warn claims match", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementWarn
claims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
10*time.Minute,
)
signedJWT, _, err := testServer.encrypter.SignClaims(claims)
must.NoError(t, err)
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
},
}
require.True(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
t.Run("enforcement strict claims match", func(t *testing.T) {
testServer.config.NodeIntroductionConfig.Enforcement = structs.NodeIntroductionEnforcementStrict
claims := structs.GenerateNodeIntroductionIdentityClaims(
mockNode.Name,
mockNode.NodePool,
testServer.Region(),
10*time.Minute,
)
signedJWT, _, err := testServer.encrypter.SignClaims(claims)
must.NoError(t, err)
nodeRegisterRequest := structs.NodeRegisterRequest{
Node: mockNode,
WriteRequest: structs.WriteRequest{
AuthToken: signedJWT,
},
}
require.True(t, nodeEndpoint.newRegistrationAllowed(
&nodeRegisterRequest,
testServer.auth.AuthenticateNodeIdentityGenerator(nodeEndpoint.ctx, &nodeRegisterRequest),
))
})
}
// TestNode_List_PaginationFiltering asserts that API pagination and filtering
// works against the Node.List RPC.
func TestNode_List_PaginationFiltering(t *testing.T) {
ci.Parallel(t)
s1, _, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Build a set of nodes in various datacenters and states. This allows us
// to test different filter queries along with pagination.
mocks := []struct {
id string
dc string
status string
meta map[string]string
}{
{
id: "aaaa1111-3350-4b4b-d185-0e1992ed43e9",
dc: "dc2",
status: structs.NodeStatusDisconnected,
meta: map[string]string{"foo": "bar"},
},
{
id: "aaaaaa22-3350-4b4b-d185-0e1992ed43e9",
dc: "dc1",
status: structs.NodeStatusReady,
meta: map[string]string{"foo": "bar"},
},
{
id: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
dc: "dc3",
status: structs.NodeStatusReady,
meta: map[string]string{"foo": "something else"},
},
{
id: "aaaaaaaa-3350-4b4b-d185-0e1992ed43e9",
dc: "dc2",
status: structs.NodeStatusDown,
},
{
id: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
dc: "dc3",
status: structs.NodeStatusDown,
},
{
id: "aaaaaacc-3350-4b4b-d185-0e1992ed43e9",
dc: "dc1",
status: structs.NodeStatusReady,
},
}
testState := s1.fsm.State()
for i, m := range mocks {
index := 1000 + uint64(i)
mockNode := mock.Node()
mockNode.ID = m.id
mockNode.Datacenter = m.dc
mockNode.Status = m.status
mockNode.Meta = m.meta
mockNode.CreateIndex = index
require.NoError(t, testState.UpsertNode(structs.MsgTypeTestSetup, index, mockNode))
}
// The server is running with ACLs enabled, so generate an adequate token
// to use.
aclToken := mock.CreatePolicyAndToken(t, testState, 1100, "test-valid-read",
mock.NodePolicy(acl.PolicyRead)).SecretID
cases := []struct {
name string
filter string
nextToken string
pageSize int32
expectedNextToken string
expectedIDs []string
expectedError string
}{
{
name: "pagination no filter",
pageSize: 2,
expectedNextToken: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
expectedIDs: []string{
"aaaa1111-3350-4b4b-d185-0e1992ed43e9",
"aaaaaa22-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "pagination no filter with next token",
pageSize: 2,
nextToken: "aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
expectedNextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
expectedIDs: []string{
"aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "pagination no filter with next token end of pages",
pageSize: 2,
nextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
expectedNextToken: "",
expectedIDs: []string{
"aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
"aaaaaacc-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "filter no pagination",
filter: `Datacenter == "dc3"`,
expectedIDs: []string{
"aaaaaa33-3350-4b4b-d185-0e1992ed43e9",
"aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "filter and pagination",
filter: `Status != "ready"`,
pageSize: 2,
expectedNextToken: "aaaaaabb-3350-4b4b-d185-0e1992ed43e9",
expectedIDs: []string{
"aaaa1111-3350-4b4b-d185-0e1992ed43e9",
"aaaaaaaa-3350-4b4b-d185-0e1992ed43e9",
},
},
{
name: "filter on meta key",
filter: `Meta["foo"] == "bar"`,
expectedIDs: []string{
"aaaa1111-3350-4b4b-d185-0e1992ed43e9",
"aaaaaa22-3350-4b4b-d185-0e1992ed43e9",
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
req := &structs.NodeListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Filter: tc.filter,
PerPage: tc.pageSize,
NextToken: tc.nextToken,
},
}
req.AuthToken = aclToken
var resp structs.NodeListResponse
err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
if tc.expectedError == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedError)
return
}
actualIDs := []string{}
for _, node := range resp.Nodes {
actualIDs = append(actualIDs, node.ID)
}
require.Equal(t, tc.expectedIDs, actualIDs, "unexpected page of nodes")
require.Equal(t, tc.expectedNextToken, resp.QueryMeta.NextToken, "unexpected NextToken")
})
}
}
func TestNode_constructNodeServerInfoResponse_MissingNode(t *testing.T) {
ci.Parallel(t)
s, cleanup := TestServer(t, nil)
defer cleanup()
testutil.WaitForLeader(t, s.RPC)
// Create a node that isn't a member of the state to force a not found
node := mock.Node()
var reply structs.NodeUpdateResponse
nE := NewNodeEndpoint(s, nil)
snap, err := s.State().Snapshot()
must.NoError(t, err)
// call constructNodeServerInfoResponse. Before GH #17316 this would panic
require.NoError(t, nE.constructNodeServerInfoResponse(node.ID, snap, &reply))
must.NotNil(t, &reply)
}