From 970e998b00aa09012b99ad643a9fef3a95b78520 Mon Sep 17 00:00:00 2001 From: Luiz Aoqui Date: Thu, 1 Jun 2023 15:55:49 -0400 Subject: [PATCH] node pools: add CRUD API (#17384) --- command/agent/http.go | 3 + command/agent/node_pool_endpoint.go | 121 ++ command/agent/node_pool_endpoint_test.go | 314 +++++ nomad/fsm.go | 36 +- nomad/fsm_test.go | 100 ++ nomad/mock/acl.go | 36 + nomad/node_pool_endpoint.go | 254 ++++ nomad/node_pool_endpoint_test.go | 1096 +++++++++++++++++ nomad/server.go | 1 + nomad/state/state_store_node_pools.go | 32 +- nomad/state/state_store_node_pools_test.go | 67 +- nomad/structs/node_pool.go | 41 + website/content/api-docs/node-pools.mdx | 213 ++++ .../docs/operations/metrics-reference.mdx | 6 + .../docs/other-specifications/acl-policy.mdx | 6 +- website/data/api-docs-nav-data.json | 4 + 16 files changed, 2316 insertions(+), 14 deletions(-) create mode 100644 command/agent/node_pool_endpoint.go create mode 100644 command/agent/node_pool_endpoint_test.go create mode 100644 nomad/node_pool_endpoint.go create mode 100644 nomad/node_pool_endpoint_test.go create mode 100644 website/content/api-docs/node-pools.mdx diff --git a/command/agent/http.go b/command/agent/http.go index 5ccba35d6..e27992ae0 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -386,6 +386,9 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/nodes", s.wrap(s.NodesRequest)) s.mux.HandleFunc("/v1/node/", s.wrap(s.NodeSpecificRequest)) + s.mux.HandleFunc("/v1/node/pools", s.wrap(s.NodePoolsRequest)) + s.mux.HandleFunc("/v1/node/pool/", s.wrap(s.NodePoolSpecificRequest)) + s.mux.HandleFunc("/v1/allocations", s.wrap(s.AllocsRequest)) s.mux.HandleFunc("/v1/allocation/", s.wrap(s.AllocSpecificRequest)) diff --git a/command/agent/node_pool_endpoint.go b/command/agent/node_pool_endpoint.go new file mode 100644 index 000000000..8f83bde75 --- /dev/null +++ b/command/agent/node_pool_endpoint.go @@ -0,0 +1,121 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "net/http" + "strings" + + "github.com/hashicorp/nomad/nomad/structs" +) + +func (s *HTTPServer) NodePoolsRequest(resp http.ResponseWriter, req *http.Request) (any, error) { + switch req.Method { + case "GET": + return s.nodePoolList(resp, req) + case "PUT", "POST": + return s.nodePoolUpsert(resp, req, "") + default: + return nil, CodedError(http.StatusMethodNotAllowed, ErrInvalidMethod) + } +} + +func (s *HTTPServer) NodePoolSpecificRequest(resp http.ResponseWriter, req *http.Request) (any, error) { + path := strings.TrimPrefix(req.URL.Path, "/v1/node/pool/") + switch { + default: + return s.nodePoolCRUD(resp, req, path) + } +} + +func (s *HTTPServer) nodePoolCRUD(resp http.ResponseWriter, req *http.Request, poolName string) (any, error) { + switch req.Method { + case "GET": + return s.nodePoolQuery(resp, req, poolName) + case "PUT", "POST": + return s.nodePoolUpsert(resp, req, poolName) + case "DELETE": + return s.nodePoolDelete(resp, req, poolName) + default: + return nil, CodedError(http.StatusMethodNotAllowed, ErrInvalidMethod) + } +} + +func (s *HTTPServer) nodePoolList(resp http.ResponseWriter, req *http.Request) (any, error) { + args := structs.NodePoolListRequest{} + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.NodePoolListResponse + if err := s.agent.RPC("NodePool.List", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.NodePools == nil { + out.NodePools = make([]*structs.NodePool, 0) + } + return out.NodePools, nil +} + +func (s *HTTPServer) nodePoolQuery(resp http.ResponseWriter, req *http.Request, poolName string) (any, error) { + args := structs.NodePoolSpecificRequest{ + Name: poolName, + } + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.SingleNodePoolResponse + if err := s.agent.RPC("NodePool.GetNodePool", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.NodePool == nil { + return nil, CodedError(http.StatusNotFound, "node pool not found") + } + + return out.NodePool, nil +} + +func (s *HTTPServer) nodePoolUpsert(resp http.ResponseWriter, req *http.Request, poolName string) (any, error) { + var pool structs.NodePool + if err := decodeBody(req, &pool); err != nil { + return nil, CodedError(http.StatusBadRequest, err.Error()) + } + + if poolName != "" && pool.Name != poolName { + return nil, CodedError(http.StatusBadRequest, "Node pool name does not match request path") + } + + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{&pool}, + } + s.parseWriteRequest(req, &args.WriteRequest) + + var out structs.GenericResponse + if err := s.agent.RPC("NodePool.UpsertNodePools", &args, &out); err != nil { + return nil, err + } + + setIndex(resp, out.Index) + return nil, nil +} + +func (s *HTTPServer) nodePoolDelete(resp http.ResponseWriter, req *http.Request, poolName string) (any, error) { + args := structs.NodePoolDeleteRequest{ + Names: []string{poolName}, + } + s.parseWriteRequest(req, &args.WriteRequest) + + var out structs.GenericResponse + if err := s.agent.RPC("NodePool.DeleteNodePools", &args, &out); err != nil { + return nil, err + } + + setIndex(resp, out.Index) + return nil, nil +} diff --git a/command/agent/node_pool_endpoint_test.go b/command/agent/node_pool_endpoint_test.go new file mode 100644 index 000000000..c8a69ea85 --- /dev/null +++ b/command/agent/node_pool_endpoint_test.go @@ -0,0 +1,314 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package agent + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" +) + +func TestHTTP_NodePool_List(t *testing.T) { + ci.Parallel(t) + httpTest(t, nil, func(s *TestAgent) { + // Populate state with test data. + pool1 := mock.NodePool() + pool2 := mock.NodePool() + pool3 := mock.NodePool() + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{pool1, pool2, pool3}, + } + var resp structs.GenericResponse + err := s.Agent.RPC("NodePool.UpsertNodePools", &args, &resp) + must.NoError(t, err) + + // Make HTTP request. + req, err := http.NewRequest("GET", "/v1/node/pools", nil) + must.NoError(t, err) + respW := httptest.NewRecorder() + + obj, err := s.Server.NodePoolsRequest(respW, req) + must.NoError(t, err) + + // Expect 5 node pools: 3 created + 2 built-in. + must.SliceLen(t, 5, obj.([]*structs.NodePool)) + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + }) +} + +func TestHTTP_NodePool_Info(t *testing.T) { + ci.Parallel(t) + httpTest(t, nil, func(s *TestAgent) { + // Populate state with test data. + pool := mock.NodePool() + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{pool}, + } + var resp structs.GenericResponse + err := s.Agent.RPC("NodePool.UpsertNodePools", &args, &resp) + must.NoError(t, err) + + t.Run("test pool", func(t *testing.T) { + // Make HTTP request for test pool. + req, err := http.NewRequest("GET", fmt.Sprintf("/v1/node/pool/%s", pool.Name), nil) + must.NoError(t, err) + respW := httptest.NewRecorder() + + obj, err := s.Server.NodePoolSpecificRequest(respW, req) + must.NoError(t, err) + + // Verify expected pool is returned. + must.Eq(t, pool, obj.(*structs.NodePool), must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + }) + + t.Run("built-in pool", func(t *testing.T) { + // Make HTTP request for built-in pool. + req, err := http.NewRequest("GET", fmt.Sprintf("/v1/node/pool/%s", structs.NodePoolAll), nil) + must.NoError(t, err) + respW := httptest.NewRecorder() + + obj, err := s.Server.NodePoolSpecificRequest(respW, req) + must.NoError(t, err) + + // Verify expected pool is returned. + must.Eq(t, structs.NodePoolAll, obj.(*structs.NodePool).Name) + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + }) + + t.Run("invalid pool", func(t *testing.T) { + // Make HTTP request for built-in pool. + req, err := http.NewRequest("GET", "/v1/node/pool/doesn-exist", nil) + must.NoError(t, err) + respW := httptest.NewRecorder() + + // Verify error. + _, err = s.Server.NodePoolSpecificRequest(respW, req) + must.ErrorContains(t, err, "not found") + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + }) + }) +} + +func TestHTTP_NodePool_Create(t *testing.T) { + ci.Parallel(t) + httpTest(t, nil, func(s *TestAgent) { + // Create test node pool. + pool := mock.NodePool() + buf := encodeReq(pool) + req, err := http.NewRequest("PUT", "/v1/node/pools", buf) + must.NoError(t, err) + + respW := httptest.NewRecorder() + obj, err := s.Server.NodePoolsRequest(respW, req) + must.NoError(t, err) + must.Nil(t, obj) + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + + // Verify test node pool is in state. + got, err := s.Agent.server.State().NodePoolByName(nil, pool.Name) + must.NoError(t, err) + must.Eq(t, pool, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + must.Eq(t, gotIndex, got.CreateIndex) + must.Eq(t, gotIndex, got.ModifyIndex) + }) +} + +func TestHTTP_NodePool_Update(t *testing.T) { + ci.Parallel(t) + httpTest(t, nil, func(s *TestAgent) { + t.Run("success", func(t *testing.T) { + // Populate state with test node pool. + pool := mock.NodePool() + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{pool}, + } + var resp structs.GenericResponse + err := s.Agent.RPC("NodePool.UpsertNodePools", &args, &resp) + must.NoError(t, err) + + // Update node pool. + updated := pool.Copy() + updated.Description = "updated node pool" + updated.Meta = map[string]string{ + "updated": "true", + } + updated.SchedulerConfiguration = &structs.NodePoolSchedulerConfiguration{ + SchedulerAlgorithm: structs.SchedulerAlgorithmBinpack, + } + + buf := encodeReq(updated) + req, err := http.NewRequest("PUT", fmt.Sprintf("/v1/node/pool/%s", updated.Name), buf) + must.NoError(t, err) + + respW := httptest.NewRecorder() + obj, err := s.Server.NodePoolsRequest(respW, req) + must.NoError(t, err) + must.Nil(t, obj) + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + + // Verify node pool was updated. + got, err := s.Agent.server.State().NodePoolByName(nil, pool.Name) + must.NoError(t, err) + must.Eq(t, updated, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + must.NotEq(t, gotIndex, got.CreateIndex) + must.Eq(t, gotIndex, got.ModifyIndex) + }) + + t.Run("no name in path", func(t *testing.T) { + // Populate state with test node pool. + pool := mock.NodePool() + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{pool}, + } + var resp structs.GenericResponse + err := s.Agent.RPC("NodePool.UpsertNodePools", &args, &resp) + must.NoError(t, err) + + // Update node pool with no name in path. + updated := pool.Copy() + updated.Description = "updated node pool" + updated.Meta = map[string]string{ + "updated": "true", + } + updated.SchedulerConfiguration = &structs.NodePoolSchedulerConfiguration{ + SchedulerAlgorithm: structs.SchedulerAlgorithmBinpack, + } + + buf := encodeReq(updated) + req, err := http.NewRequest("PUT", "/v1/node/pool/", buf) + must.NoError(t, err) + + respW := httptest.NewRecorder() + obj, err := s.Server.NodePoolsRequest(respW, req) + must.NoError(t, err) + must.Nil(t, obj) + + // Verify response index. + gotIndex, err := strconv.ParseUint(respW.HeaderMap.Get("X-Nomad-Index"), 10, 64) + must.NoError(t, err) + must.NonZero(t, gotIndex) + + // Verify node pool was updated. + got, err := s.Agent.server.State().NodePoolByName(nil, pool.Name) + must.NoError(t, err) + must.Eq(t, updated, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + }) + + t.Run("wrong name in path", func(t *testing.T) { + // Populate state with test node pool. + pool := mock.NodePool() + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{pool}, + } + var resp structs.GenericResponse + err := s.Agent.RPC("NodePool.UpsertNodePools", &args, &resp) + must.NoError(t, err) + + // Update node pool. + updated := pool.Copy() + updated.Description = "updated node pool" + updated.Meta = map[string]string{ + "updated": "true", + } + updated.SchedulerConfiguration = &structs.NodePoolSchedulerConfiguration{ + SchedulerAlgorithm: structs.SchedulerAlgorithmBinpack, + } + + // Make request with the wrong path. + buf := encodeReq(updated) + req, err := http.NewRequest("PUT", "/v1/node/pool/wrong", buf) + must.NoError(t, err) + + respW := httptest.NewRecorder() + _, err = s.Server.NodePoolSpecificRequest(respW, req) + must.ErrorContains(t, err, "name does not match request path") + + // Verify node pool was NOT updated. + got, err := s.Agent.server.State().NodePoolByName(nil, pool.Name) + must.NoError(t, err) + must.Eq(t, pool, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + }) + }) +} + +func TestHTTP_NodePool_Delete(t *testing.T) { + ci.Parallel(t) + httpTest(t, nil, func(s *TestAgent) { + // Populate state with test node pool. + pool := mock.NodePool() + args := structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{pool}, + } + var resp structs.GenericResponse + err := s.Agent.RPC("NodePool.UpsertNodePools", &args, &resp) + must.NoError(t, err) + + // Delete test node pool. + req, err := http.NewRequest("DELETE", fmt.Sprintf("/v1/node/pool/%s", pool.Name), nil) + must.NoError(t, err) + + respW := httptest.NewRecorder() + obj, err := s.Server.NodePoolSpecificRequest(respW, req) + must.NoError(t, err) + must.Nil(t, obj) + + // Verify node pool was deleted. + got, err := s.Agent.server.State().NodePoolByName(nil, pool.Name) + must.NoError(t, err) + must.Nil(t, got) + }) +} diff --git a/nomad/fsm.go b/nomad/fsm.go index 7d726cceb..5de959fb9 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -228,6 +228,10 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} { return n.applyStatusUpdate(msgType, buf[1:], log.Index) case structs.NodeUpdateDrainRequestType: return n.applyDrainUpdate(msgType, buf[1:], log.Index) + case structs.NodePoolUpsertRequestType: + return n.applyNodePoolUpsert(msgType, buf[1:], log.Index) + case structs.NodePoolDeleteRequestType: + return n.applyNodePoolDelete(msgType, buf[1:], log.Index) case structs.JobRegisterRequestType: return n.applyUpsertJob(msgType, buf[1:], log.Index) case structs.JobDeregisterRequestType: @@ -541,6 +545,36 @@ func (n *nomadFSM) applyNodeEligibilityUpdate(msgType structs.MessageType, buf [ return nil } +func (n *nomadFSM) applyNodePoolUpsert(msgType structs.MessageType, buf []byte, index uint64) interface{} { + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_node_pool_upsert"}, time.Now()) + var req structs.NodePoolUpsertRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + + if err := n.state.UpsertNodePools(msgType, index, req.NodePools); err != nil { + n.logger.Error("UpsertNodePool failed", "error", err) + return err + } + + return nil +} + +func (n *nomadFSM) applyNodePoolDelete(msgType structs.MessageType, buf []byte, index uint64) interface{} { + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_node_pool_delete"}, time.Now()) + var req structs.NodePoolDeleteRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + + if err := n.state.DeleteNodePools(msgType, index, req.Names); err != nil { + n.logger.Error("DeleteNodePools failed", "error", err) + return err + } + + return nil +} + func (n *nomadFSM) applyUpsertJob(msgType structs.MessageType, buf []byte, index uint64) interface{} { defer metrics.MeasureSince([]string{"nomad", "fsm", "register_job"}, time.Now()) var req structs.JobRegisterRequest @@ -2462,7 +2496,7 @@ func (s *nomadSnapshot) persistNodePools(sink raft.SnapshotSink, encoder *codec.Encoder) error { // Get all node pools. ws := memdb.NewWatchSet() - pools, err := s.snap.NodePools(ws) + pools, err := s.snap.NodePools(ws, state.SortDefault) if err != nil { return err } diff --git a/nomad/fsm_test.go b/nomad/fsm_test.go index 5d3539316..32b6d74d3 100644 --- a/nomad/fsm_test.go +++ b/nomad/fsm_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/raft" "github.com/kr/pretty" @@ -555,6 +556,105 @@ func TestFSM_UpdateNodeEligibility_Unblock(t *testing.T) { }) } +func TestFSM_NodePoolDelete(t *testing.T) { + ci.Parallel(t) + + // Create FSM and populate state. + fsm := testFSM(t) + pools := []*structs.NodePool{ + mock.NodePool(), + mock.NodePool(), + mock.NodePool(), + mock.NodePool(), + } + err := fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, pools) + must.NoError(t, err) + + // Delete some of the node pools. + req := structs.NodePoolDeleteRequest{ + Names: []string{pools[0].Name, pools[1].Name}, + } + buf, err := structs.Encode(structs.NodePoolDeleteRequestType, req) + must.NoError(t, err) + + resp := fsm.Apply(makeLog(buf)) + must.Nil(t, resp) + + // Verify selected node pools were deleted. + ws := memdb.NewWatchSet() + for i, pool := range pools { + got, err := fsm.State().NodePoolByName(ws, pool.Name) + must.NoError(t, err) + + switch i { + // Node pools 0 and 1 were deleted. + case 0, 1: + must.Nil(t, got) + default: + must.NotNil(t, got) + } + } +} + +func TestFSM_NodePoolUpsert(t *testing.T) { + ci.Parallel(t) + + // Create FSM and create some node pools. + fsm := testFSM(t) + pools := []*structs.NodePool{ + mock.NodePool(), + mock.NodePool(), + mock.NodePool(), + } + req := structs.NodePoolUpsertRequest{ + NodePools: pools, + } + buf, err := structs.Encode(structs.NodePoolUpsertRequestType, req) + must.NoError(t, err) + + resp := fsm.Apply(makeLog(buf)) + must.Nil(t, resp) + + // Verify node pools were created. + ws := memdb.NewWatchSet() + for _, pool := range pools { + got, err := fsm.State().NodePoolByName(ws, pool.Name) + + must.NoError(t, err) + must.Eq(t, pool, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + } + + // Update one of the node pools. + updatedPool := pools[0].Copy() + updatedPool.Description = "updated" + updatedPool.Meta = map[string]string{ + "update": "true", + } + + req = structs.NodePoolUpsertRequest{ + NodePools: []*structs.NodePool{updatedPool}, + } + buf, err = structs.Encode(structs.NodePoolUpsertRequestType, req) + must.NoError(t, err) + + resp = fsm.Apply(makeLog(buf)) + must.Nil(t, resp) + + // Verify node pool was updated. + ws = memdb.NewWatchSet() + got, err := fsm.State().NodePoolByName(ws, updatedPool.Name) + must.NoError(t, err) + must.Eq(t, updatedPool, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) +} + func TestFSM_RegisterJob(t *testing.T) { ci.Parallel(t) fsm := testFSM(t) diff --git a/nomad/mock/acl.go b/nomad/mock/acl.go index 37d39c47a..6d2d676cb 100644 --- a/nomad/mock/acl.go +++ b/nomad/mock/acl.go @@ -4,6 +4,7 @@ package mock import ( + "bytes" "crypto/rand" "crypto/rsa" "crypto/x509" @@ -11,6 +12,7 @@ import ( "fmt" "strconv" "strings" + "text/template" "time" "github.com/golang-jwt/jwt/v5" @@ -69,6 +71,40 @@ func NamespacePolicyWithVariables(namespace string, policy string, capabilities return policyHCL } +func NodePoolPolicy(pool string, policy string, capabilities []string) string { + tmplStr := ` +node_pool "{{.Label}}" { + {{- if .Policy}} + policy = "{{.Policy}}" + {{end -}} + + {{if gt (len .Capabilities) 0}} + capabilities = [ + {{- range .Capabilities}} + "{{.}}", + {{- end}} + ] + {{- end}} +}` + + tmpl, err := template.New(pool).Parse(tmplStr) + if err != nil { + panic(err) + } + + var buf bytes.Buffer + err = tmpl.Execute(&buf, struct { + Label string + Policy string + Capabilities []string + }{pool, policy, capabilities}) + if err != nil { + panic(err) + } + + return buf.String() +} + // VariablePolicy is a helper for generating the policy hcl for a given // variable block inside of a namespace. func VariablePolicy(svars map[string][]string) string { diff --git a/nomad/node_pool_endpoint.go b/nomad/node_pool_endpoint.go new file mode 100644 index 000000000..708117b4f --- /dev/null +++ b/nomad/node_pool_endpoint.go @@ -0,0 +1,254 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "net/http" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/state/paginator" + "github.com/hashicorp/nomad/nomad/structs" +) + +// NodePool endpoint is used for node pool management and interaction. +type NodePool struct { + srv *Server + ctx *RPCContext +} + +func NewNodePoolEndpoint(srv *Server, ctx *RPCContext) *NodePool { + return &NodePool{srv: srv, ctx: ctx} +} + +// List is used to retrieve multiple node pools. It supports prefix listing, +// pagination, and filtering. +func (n *NodePool) List(args *structs.NodePoolListRequest, reply *structs.NodePoolListResponse) error { + authErr := n.srv.Authenticate(n.ctx, args) + if done, err := n.srv.forward("NodePool.List", args, args, reply); done { + return err + } + n.srv.MeasureRPCRate("node_pool", structs.RateMetricList, args) + if authErr != nil { + return structs.ErrPermissionDenied + } + defer metrics.MeasureSince([]string{"nomad", "node_pool", "list"}, time.Now()) + + // Resolve ACL token to only return node pools it has access to. + aclObj, err := n.srv.ResolveACL(args) + if err != nil { + return err + } + + // Setup blocking query. + sort := state.SortOption(args.Reverse) + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, store *state.StateStore) error { + var err error + var iter memdb.ResultIterator + + if prefix := args.QueryOptions.Prefix; prefix != "" { + iter, err = store.NodePoolsByNamePrefix(ws, prefix, sort) + } else { + iter, err = store.NodePools(ws, sort) + } + if err != nil { + return err + } + + pageOpts := paginator.StructsTokenizerOptions{WithID: true} + tokenizer := paginator.NewStructsTokenizer(iter, pageOpts) + filters := []paginator.Filter{ + // Filter out node pools based on ACL token capabilities. + paginator.GenericFilter{ + Allow: func(raw interface{}) (bool, error) { + pool := raw.(*structs.NodePool) + return aclObj.AllowNodePoolOperation(pool.Name, acl.NodePoolCapabilityRead), nil + }, + }, + } + + var pools []*structs.NodePool + pager, err := paginator.NewPaginator(iter, tokenizer, filters, args.QueryOptions, + func(raw interface{}) error { + pool := raw.(*structs.NodePool) + pools = append(pools, pool) + return nil + }) + if err != nil { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "failed to create result paginator: %v", err) + } + + nextToken, err := pager.Page() + if err != nil { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "failed to read result page: %v", err) + } + + reply.QueryMeta.NextToken = nextToken + reply.NodePools = pools + + // Use the last index that affected the node pools table. + index, err := store.Index("node_pools") + if err != nil { + return err + } + reply.Index = helper.Max(1, index) + + // Set the query response. + n.srv.setQueryMeta(&reply.QueryMeta) + return nil + }} + return n.srv.blockingRPC(&opts) +} + +// GetNodePool returns the specific node pool requested or nil if the node pool +// doesn't exist. +func (n *NodePool) GetNodePool(args *structs.NodePoolSpecificRequest, reply *structs.SingleNodePoolResponse) error { + authErr := n.srv.Authenticate(n.ctx, args) + if done, err := n.srv.forward("NodePool.GetNodePool", args, args, reply); done { + return err + } + n.srv.MeasureRPCRate("node_pool", structs.RateMetricRead, args) + if authErr != nil { + return structs.ErrPermissionDenied + } + defer metrics.MeasureSince([]string{"nomad", "node_pool", "get_node_pool"}, time.Now()) + + // Resolve ACL token and verify it has read capability for the pool. + aclObj, err := n.srv.ResolveACL(args) + if err != nil { + return err + } + if !aclObj.AllowNodePoolOperation(args.Name, acl.NodePoolCapabilityRead) { + return structs.ErrPermissionDenied + } + + // Setup the blocking query. + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, store *state.StateStore) error { + // Fetch node pool. + pool, err := store.NodePoolByName(ws, args.Name) + if err != nil { + return err + } + + reply.NodePool = pool + if pool != nil { + reply.Index = pool.ModifyIndex + } else { + // Return the last index that affected the node pools table if + // the requested node pool doesn't exist. + index, err := store.Index(state.TableNodePools) + if err != nil { + return err + } + reply.Index = helper.Max(1, index) + } + return nil + }} + return n.srv.blockingRPC(&opts) +} + +// UpsertNodePools creates or updates the given node pools. Built-in node pools +// cannot be updated. +func (n *NodePool) UpsertNodePools(args *structs.NodePoolUpsertRequest, reply *structs.GenericResponse) error { + authErr := n.srv.Authenticate(n.ctx, args) + args.Region = n.srv.config.AuthoritativeRegion + if done, err := n.srv.forward("NodePool.UpsertNodePools", args, args, reply); done { + return err + } + n.srv.MeasureRPCRate("node_pool", structs.RateMetricWrite, args) + if authErr != nil { + return structs.ErrPermissionDenied + } + defer metrics.MeasureSince([]string{"nomad", "node_pool", "upsert_node_pools"}, time.Now()) + + // Resolve ACL token and verify it has write capability to all pools in the + // request. + aclObj, err := n.srv.ResolveACL(args) + if err != nil { + return err + } + for _, pool := range args.NodePools { + if !aclObj.AllowNodePoolOperation(pool.Name, acl.NodePoolCapabilityWrite) { + return structs.ErrPermissionDenied + } + } + + // Validate request. + if len(args.NodePools) == 0 { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "must specify at least one node pool") + } + for _, pool := range args.NodePools { + if err := pool.Validate(); err != nil { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "invalid node pool %q: %v", pool.Name, err) + } + if pool.IsBuiltIn() { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "modifying node pool %q is not allowed", pool.Name) + } + } + + // Update via Raft. + _, index, err := n.srv.raftApply(structs.NodePoolUpsertRequestType, args) + if err != nil { + return err + } + reply.Index = index + return nil +} + +// DeleteNodePools deletes the given node pools. Built-in node pools cannot be +// deleted. +func (n *NodePool) DeleteNodePools(args *structs.NodePoolDeleteRequest, reply *structs.GenericResponse) error { + authErr := n.srv.Authenticate(n.ctx, args) + args.Region = n.srv.config.AuthoritativeRegion + if done, err := n.srv.forward("NodePool.DeleteNodePools", args, args, reply); done { + return err + } + n.srv.MeasureRPCRate("node_pool", structs.RateMetricWrite, args) + if authErr != nil { + return structs.ErrPermissionDenied + } + defer metrics.MeasureSince([]string{"nomad", "node_pool", "delete_node_pools"}, time.Now()) + + // Resolve ACL token and verify it has delete capability to all pools in + // the request. + aclObj, err := n.srv.ResolveACL(args) + if err != nil { + return err + } + for _, name := range args.Names { + if !aclObj.AllowNodePoolOperation(name, acl.NodePoolCapabilityDelete) { + return structs.ErrPermissionDenied + } + } + + // Validate request. + if len(args.Names) == 0 { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "must specify at least one node pool to delete") + } + for _, name := range args.Names { + if name == "" { + return structs.NewErrRPCCodedf(http.StatusBadRequest, "node pool name is empty") + } + } + + // TODO(luiz): verify that the node pool is not being used. + + // Delete via Raft. + _, index, err := n.srv.raftApply(structs.NodePoolDeleteRequestType, args) + if err != nil { + return err + } + reply.Index = index + return nil +} diff --git a/nomad/node_pool_endpoint_test.go b/nomad/node_pool_endpoint_test.go new file mode 100644 index 000000000..6965f8838 --- /dev/null +++ b/nomad/node_pool_endpoint_test.go @@ -0,0 +1,1096 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package nomad + +import ( + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/hashicorp/go-memdb" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/ci" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/shoenig/test/must" +) + +func TestNodePoolEndpoint_List(t *testing.T) { + ci.Parallel(t) + + s, cleanupS := TestServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Populate state with some node pools. + poolDev1 := &structs.NodePool{ + Name: "dev-1", + Description: "test node pool for dev-1", + Meta: map[string]string{ + "env": "dev", + "index": "1", + }, + } + poolDev2 := &structs.NodePool{ + Name: "dev-2", + Description: "test node pool for dev-2", + Meta: map[string]string{ + "env": "dev", + "index": "2", + }, + } + poolDevNoMeta := &structs.NodePool{ + Name: "dev-no-meta", + Description: "test node pool for dev without meta", + } + poolProd1 := &structs.NodePool{ + Name: "prod-1", + Description: "test node pool for prod-1", + Meta: map[string]string{ + "env": "prod", + "index": "1", + }, + } + poolProd2 := &structs.NodePool{ + Name: "prod-2", + Description: "test node pool for prod-2", + Meta: map[string]string{ + "env": "prod", + "index": "2", + }, + } + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{ + poolDev1, + poolDev2, + poolDevNoMeta, + poolProd1, + poolProd2, + }) + must.NoError(t, err) + + testCases := []struct { + name string + req *structs.NodePoolListRequest + expectedErr string + expected []string + expectedNextToken string + }{ + { + name: "list all", + req: &structs.NodePoolListRequest{}, + expected: []string{ + "all", "default", + "dev-1", "dev-2", "dev-no-meta", + "prod-1", "prod-2", + }, + }, + { + name: "list all reverse", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Reverse: true, + }, + }, + expected: []string{ + "prod-2", "prod-1", + "dev-no-meta", "dev-2", "dev-1", + "default", "all", + }, + }, + { + name: "filter by prefix", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Prefix: "prod-", + }, + }, + expected: []string{"prod-1", "prod-2"}, + }, + { + name: "filter by prefix reverse", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Prefix: "prod-", + Reverse: true, + }, + }, + expected: []string{"prod-2", "prod-1"}, + }, + { + name: "filter by prefix no match", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Prefix: "invalid-", + }, + }, + expected: []string{}, + }, + { + name: "filter by expression", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Filter: `Meta.env == "dev"`, + }, + }, + expected: []string{"dev-1", "dev-2"}, + }, + { + name: "filter by expression reverse", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Filter: `Meta.env == "dev"`, + Reverse: true, + }, + }, + expected: []string{"dev-2", "dev-1"}, + }, + { + name: "paginate per-page=2 page=1", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + PerPage: 2, + }, + }, + expected: []string{"all", "default"}, + expectedNextToken: "dev-1", + }, + { + name: "paginate per-page=2 page=2", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + PerPage: 2, + NextToken: "dev-1", + }, + }, + expected: []string{"dev-1", "dev-2"}, + expectedNextToken: "dev-no-meta", + }, + { + name: "paginate per-page=2 page=last", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + PerPage: 2, + NextToken: "prod-2", + }, + }, + expected: []string{"prod-2"}, + expectedNextToken: "", + }, + { + name: "paginate reverse per-page=2 page=2", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + PerPage: 2, + NextToken: "dev-no-meta", + Reverse: true, + }, + }, + expected: []string{"dev-no-meta", "dev-2"}, + expectedNextToken: "dev-1", + }, + { + name: "paginate prefix per-page=1 page=2", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + PerPage: 1, + NextToken: "dev-2", + Prefix: "dev", + }, + }, + expected: []string{"dev-2"}, + expectedNextToken: "dev-no-meta", + }, + { + name: "paginate filter per-page=1 page=2", + req: &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + PerPage: 1, + NextToken: "dev-2", + Filter: "Meta is not empty", + }, + }, + expected: []string{"dev-2"}, + expectedNextToken: "prod-1", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Always send the request to the global region. + tc.req.Region = "global" + + // Make node pool list request. + var resp structs.NodePoolListResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.List", tc.req, &resp) + + // Check response. + if tc.expectedErr != "" { + must.ErrorContains(t, err, tc.expectedErr) + } else { + must.NoError(t, err) + + got := make([]string, len(resp.NodePools)) + for i, pool := range resp.NodePools { + got[i] = pool.Name + } + must.Eq(t, tc.expected, got) + must.Eq(t, tc.expectedNextToken, resp.NextToken) + } + }) + } +} + +func TestNodePoolEndpoint_List_ACL(t *testing.T) { + ci.Parallel(t) + + s, root, cleanupS := TestACLServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Populate state with some node pools. + poolDev1 := &structs.NodePool{ + Name: "dev-1", + Description: "test node pool for dev-1", + Meta: map[string]string{ + "env": "dev", + "index": "1", + }, + } + poolDev2 := &structs.NodePool{ + Name: "dev-2", + Description: "test node pool for dev-2", + Meta: map[string]string{ + "env": "dev", + "index": "2", + }, + } + poolProd1 := &structs.NodePool{ + Name: "prod-1", + Description: "test node pool for prod-1", + Meta: map[string]string{ + "env": "prod", + "index": "1", + }, + } + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{ + poolDev1, + poolDev2, + poolProd1, + }) + must.NoError(t, err) + + // Create test ACL tokens. + devToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1001, "dev-node-pools", + mock.NodePoolPolicy("dev-*", "read", nil), + ) + prodToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1003, "prod-node-pools", + mock.NodePoolPolicy("prod-*", "read", nil), + ) + noPolicyToken := mock.CreateToken(t, s.fsm.State(), 1005, nil) + allPoolsToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1007, "all-node-pools", + mock.NodePoolPolicy("*", "read", nil), + ) + + testCases := []struct { + name string + token string + expected []string + }{ + { + name: "management token lists all", + token: root.SecretID, + expected: []string{ + "all", "default", + "dev-1", "dev-2", "prod-1", + }, + }, + { + name: "dev token lists dev", + token: devToken.SecretID, + expected: []string{"dev-1", "dev-2"}, + }, + { + name: "prod token lists prod", + token: prodToken.SecretID, + expected: []string{"prod-1"}, + }, + { + name: "all pools token lists all", + token: allPoolsToken.SecretID, + expected: []string{ + "all", "default", + "dev-1", "dev-2", "prod-1", + }, + }, + { + name: "no policy token", + token: noPolicyToken.SecretID, + expected: []string{}, + }, + { + name: "no token", + token: "", + expected: []string{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Make node pool list request. + req := &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: tc.token, + }, + } + var resp structs.NodePoolListResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.List", req, &resp) + must.NoError(t, err) + + // Check response. + got := make([]string, len(resp.NodePools)) + for i, pool := range resp.NodePools { + got[i] = pool.Name + } + must.Eq(t, tc.expected, got) + }) + } +} + +func TestNodePoolEndpoint_List_BlockingQuery(t *testing.T) { + ci.Parallel(t) + + s, cleanupS := TestServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Populate state with some node pools. + // Insert triggers watchers. + pool := mock.NodePool() + time.AfterFunc(100*time.Millisecond, func() { + s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{pool}) + }) + + req := &structs.NodePoolListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + MinQueryIndex: 999, + }, + } + var resp structs.NodePoolListResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.List", req, &resp) + must.NoError(t, err) + must.Eq(t, 1000, resp.Index) + + // Delete triggers watchers. + time.AfterFunc(100*time.Millisecond, func() { + s.fsm.State().DeleteNodePools(structs.MsgTypeTestSetup, 1001, []string{pool.Name}) + }) + + req.MinQueryIndex = 1000 + err = msgpackrpc.CallWithCodec(codec, "NodePool.List", req, &resp) + must.NoError(t, err) + must.Eq(t, 1001, resp.Index) +} + +func TestNodePoolEndpoint_GetNodePool(t *testing.T) { + ci.Parallel(t) + + s, cleanupS := TestServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Populate state with some node pools. + pool := mock.NodePool() + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{pool}) + must.NoError(t, err) + + testCases := []struct { + name string + pool string + expected *structs.NodePool + }{ + { + name: "get pool", + pool: pool.Name, + expected: pool, + }, + { + name: "non-existing", + pool: "does-not-exist", + expected: nil, + }, + { + name: "empty", + pool: "", + expected: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Make node pool list request. + req := &structs.NodePoolSpecificRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + }, + Name: tc.pool, + } + var resp structs.SingleNodePoolResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.GetNodePool", req, &resp) + must.NoError(t, err) + + // Check response. + must.Eq(t, tc.expected, resp.NodePool) + }) + } +} + +func TestNodePoolEndpoint_GetNodePool_ACL(t *testing.T) { + ci.Parallel(t) + + s, root, cleanupS := TestACLServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Populate state with some node pools. + pool := mock.NodePool() + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{pool}) + must.NoError(t, err) + + // Create test ACL tokens. + allowedToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1001, "allow", + mock.NodePoolPolicy(pool.Name, "read", nil), + ) + deniedToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1003, "deny", + mock.NodePoolPolicy(pool.Name, "deny", nil), + ) + noPolicyToken := mock.CreateToken(t, s.fsm.State(), 1005, nil) + allPoolsToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1007, "all-node-pools", + mock.NodePoolPolicy("*", "read", nil), + ) + + testCases := []struct { + name string + token string + pool string + expectedErr string + expected string + }{ + { + name: "management token", + token: root.SecretID, + pool: pool.Name, + expected: pool.Name, + }, + { + name: "allowed token", + token: allowedToken.SecretID, + pool: pool.Name, + expected: pool.Name, + }, + { + name: "all pools token", + token: allPoolsToken.SecretID, + pool: pool.Name, + expected: pool.Name, + }, + { + name: "denied token", + token: deniedToken.SecretID, + pool: pool.Name, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no policy token", + token: noPolicyToken.SecretID, + pool: pool.Name, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "invalid token", + token: "invalid", + pool: pool.Name, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no token", + token: "", + pool: pool.Name, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Make node pool list request. + req := &structs.NodePoolSpecificRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: tc.token, + }, + Name: tc.pool, + } + var resp structs.SingleNodePoolResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.GetNodePool", req, &resp) + + if tc.expectedErr != "" { + must.ErrorContains(t, err, tc.expectedErr) + must.Nil(t, resp.NodePool) + } else { + must.NoError(t, err) + must.Eq(t, tc.expected, resp.NodePool.Name) + } + }) + } +} + +func TestNodePoolEndpoint_GetNodePool_BlockingQuery(t *testing.T) { + ci.Parallel(t) + + s, cleanupS := TestServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Upsert triggers watchers. + // Populate state with a node pools. + pool1 := mock.NodePool() + time.AfterFunc(100*time.Millisecond, func() { + s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{pool1}) + }) + + // Insert node pool that should not trigger watcher. + pool2 := mock.NodePool() + time.AfterFunc(200*time.Millisecond, func() { + s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1001, []*structs.NodePool{pool2}) + }) + + // Update first node pool to trigger watcher. + pool1.Meta["updated"] = "true" + time.AfterFunc(300*time.Millisecond, func() { + s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1002, []*structs.NodePool{pool1}) + }) + + req := &structs.NodePoolSpecificRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + MinQueryIndex: 1000, + }, + Name: pool1.Name, + } + var resp structs.SingleNodePoolResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.GetNodePool", req, &resp) + must.NoError(t, err) + must.Eq(t, 1002, resp.Index) + + // Delete triggers watchers. + // Delete pool that is not being watched. + time.AfterFunc(100*time.Millisecond, func() { + s.fsm.State().DeleteNodePools(structs.MsgTypeTestSetup, 1003, []string{pool2.Name}) + }) + + // Delete pool that is being watched. + time.AfterFunc(200*time.Millisecond, func() { + s.fsm.State().DeleteNodePools(structs.MsgTypeTestSetup, 1004, []string{pool1.Name}) + }) + + req.MinQueryIndex = 1002 + err = msgpackrpc.CallWithCodec(codec, "NodePool.GetNodePool", req, &resp) + must.NoError(t, err) + must.Eq(t, 1004, resp.Index) +} + +func TestNodePoolEndpoint_UpsertNodePools(t *testing.T) { + ci.Parallel(t) + + s, cleanupS := TestServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Insert a node pool that we can update. + existing := mock.NodePool() + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, []*structs.NodePool{existing}) + must.NoError(t, err) + + testCases := []struct { + name string + pools []*structs.NodePool + expectedErr string + }{ + { + name: "insert new pool", + pools: []*structs.NodePool{ + mock.NodePool(), + }, + }, + { + name: "insert multiple pools", + pools: []*structs.NodePool{ + mock.NodePool(), + mock.NodePool(), + }, + }, + { + name: "update pool", + pools: []*structs.NodePool{ + { + Name: existing.Name, + Description: "updated pool", + Meta: map[string]string{ + "updated": "true", + }, + }, + }, + }, + { + name: "invalid pool name", + pools: []*structs.NodePool{ + { + Name: "%invalid%", + }, + }, + expectedErr: "invalid node pool", + }, + { + name: "missing pool name", + pools: []*structs.NodePool{ + { + Name: "", + Description: "no name", + }, + }, + expectedErr: "invalid node pool", + }, + { + name: "empty request", + pools: []*structs.NodePool{}, + expectedErr: "must specify at least one node pool", + }, + { + name: "fail to update built-in pool all", + pools: []*structs.NodePool{ + { + Name: structs.NodePoolAll, + Description: "trying to update built-in pool", + }, + }, + expectedErr: "not allowed", + }, + { + name: "fail to update built-in pool default", + pools: []*structs.NodePool{ + { + Name: structs.NodePoolDefault, + Description: "trying to update built-in pool", + }, + }, + expectedErr: "not allowed", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := &structs.NodePoolUpsertRequest{ + WriteRequest: structs.WriteRequest{ + Region: "global", + }, + NodePools: tc.pools, + } + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.UpsertNodePools", req, &resp) + + if tc.expectedErr != "" { + must.ErrorContains(t, err, tc.expectedErr) + } else { + must.NoError(t, err) + + for _, pool := range tc.pools { + ws := memdb.NewWatchSet() + got, err := s.fsm.State().NodePoolByName(ws, pool.Name) + must.NoError(t, err) + must.Eq(t, pool, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + } + } + }) + } +} + +func TestNodePoolEndpoint_UpsertNodePool_ACL(t *testing.T) { + ci.Parallel(t) + + s, root, cleanupS := TestACLServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Create test ACL tokens. + devToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1001, "dev-node-pools", + mock.NodePoolPolicy("dev-*", "write", nil), + ) + devSpecificToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1003, "dev-1-node-pools", + mock.NodePoolPolicy("dev-1", "write", nil), + ) + prodToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1005, "prod-node-pools", + mock.NodePoolPolicy("prod-*", "", []string{"write"}), + ) + noPolicyToken := mock.CreateToken(t, s.fsm.State(), 1007, nil) + readOnlyToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1009, "node-pools-read-only", + mock.NodePoolPolicy("*", "read", nil), + ) + + testCases := []struct { + name string + token string + pools []*structs.NodePool + expectedErr string + }{ + { + name: "management token has full access", + token: root.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-1"}, + {Name: "prod-1"}, + {Name: "qa-1"}, + }, + }, + { + name: "allowed by policy", + token: devToken.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-1"}, + }, + }, + { + name: "allowed by capability", + token: prodToken.SecretID, + pools: []*structs.NodePool{ + {Name: "prod-1"}, + }, + }, + { + name: "allowed by exact match", + token: devSpecificToken.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-1"}, + }, + }, + { + name: "token restricted to wildcard", + token: devToken.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-1"}, // ok + {Name: "prod-1"}, // not ok + }, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "token restricted if not exact match", + token: devSpecificToken.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-2"}, + }, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no token", + token: "", + pools: []*structs.NodePool{ + {Name: "dev-2"}, + }, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no policy", + token: noPolicyToken.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-1"}, + }, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no write", + token: readOnlyToken.SecretID, + pools: []*structs.NodePool{ + {Name: "dev-1"}, + }, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + req := &structs.NodePoolUpsertRequest{ + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: tc.token, + }, + NodePools: tc.pools, + } + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "NodePool.UpsertNodePools", req, &resp) + + if tc.expectedErr != "" { + must.ErrorContains(t, err, tc.expectedErr) + } else { + must.NoError(t, err) + + for _, pool := range tc.pools { + ws := memdb.NewWatchSet() + got, err := s.fsm.State().NodePoolByName(ws, pool.Name) + must.NoError(t, err) + must.Eq(t, pool, got, must.Cmp(cmpopts.IgnoreFields( + structs.NodePool{}, + "CreateIndex", + "ModifyIndex", + ))) + } + } + }) + } +} + +func TestNodePoolEndpoint_DeleteNodePools(t *testing.T) { + ci.Parallel(t) + + s, cleanupS := TestServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Insert a few node pools that we can delete. + var pools []*structs.NodePool + for i := 0; i < 10; i++ { + pools = append(pools, mock.NodePool()) + } + + testCases := []struct { + name string + pools []string + expectedErr string + }{ + { + name: "delete existing pool", + pools: []string{pools[0].Name}, + }, + { + name: "delete multiple pools", + pools: []string{ + pools[1].Name, + pools[2].Name, + }, + }, + { + name: "pool doesn't exist", + pools: []string{"doesnt-exist"}, + expectedErr: "not found", + }, + { + name: "empty request", + pools: []string{}, + expectedErr: "must specify at least one node pool to delete", + }, + { + name: "empty name", + pools: []string{""}, + expectedErr: "node pool name is empty", + }, + { + name: "can't delete built-in pool all", + pools: []string{"all"}, + expectedErr: "not allowed", + }, + { + name: "can't delete built-in pool default", + pools: []string{"default"}, + expectedErr: "not allowed", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, pools) + must.NoError(t, err) + + req := &structs.NodePoolDeleteRequest{ + WriteRequest: structs.WriteRequest{ + Region: "global", + }, + Names: tc.pools, + } + var resp structs.GenericResponse + err = msgpackrpc.CallWithCodec(codec, "NodePool.DeleteNodePools", req, &resp) + + if tc.expectedErr != "" { + must.ErrorContains(t, err, tc.expectedErr) + } else { + must.NoError(t, err) + + for _, pool := range tc.pools { + ws := memdb.NewWatchSet() + got, err := s.fsm.State().NodePoolByName(ws, pool) + must.NoError(t, err) + must.Nil(t, got) + } + } + }) + } +} + +func TestNodePoolEndpoint_DeleteNodePools_ACL(t *testing.T) { + ci.Parallel(t) + + s, root, cleanupS := TestACLServer(t, nil) + defer cleanupS() + + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + // Create test ACL tokens. + devToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1001, "dev-node-pools", + mock.NodePoolPolicy("dev-*", "write", nil), + ) + devSpecificToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1003, "dev-1-node-pools", + mock.NodePoolPolicy("dev-1", "write", nil), + ) + prodToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1005, "prod-node-pools", + mock.NodePoolPolicy("prod-*", "", []string{"delete"}), + ) + noPolicyToken := mock.CreateToken(t, s.fsm.State(), 1007, nil) + noDeleteToken := mock.CreatePolicyAndToken(t, s.fsm.State(), 1009, "node-pools-no-delete", + mock.NodePoolPolicy("*", "", []string{"read", "write"}), + ) + + // Insert a few node pools that we can delete. + var pools []*structs.NodePool + for i := 0; i < 5; i++ { + devPool := mock.NodePool() + devPool.Name = fmt.Sprintf("dev-%d", i) + pools = append(pools, devPool) + + prodPool := mock.NodePool() + prodPool.Name = fmt.Sprintf("prod-%d", i) + pools = append(pools, prodPool) + + qaPool := mock.NodePool() + qaPool.Name = fmt.Sprintf("qa-%d", i) + pools = append(pools, qaPool) + } + + testCases := []struct { + name string + token string + pools []string + expectedErr string + }{ + { + name: "management token has full access", + token: root.SecretID, + pools: []string{ + "dev-1", + "prod-1", + "qa-1", + }, + }, + { + name: "allowed by write policy", + token: devToken.SecretID, + pools: []string{"dev-1"}, + }, + { + name: "allowed by delete capability", + token: prodToken.SecretID, + pools: []string{"prod-1"}, + }, + { + name: "allowed by exact match", + token: devSpecificToken.SecretID, + pools: []string{"dev-1"}, + }, + { + name: "restricted by wildcard", + token: devToken.SecretID, + pools: []string{ + "dev-1", // ok + "prod-1", // not ok + }, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "restricted if not exact match", + token: devSpecificToken.SecretID, + pools: []string{"dev-2"}, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no token", + token: "", + pools: []string{"dev-1"}, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no policy", + token: noPolicyToken.SecretID, + pools: []string{"dev-1"}, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + { + name: "no delete", + token: noDeleteToken.SecretID, + pools: []string{"dev-1"}, + expectedErr: structs.ErrPermissionDenied.Error(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := s.fsm.State().UpsertNodePools(structs.MsgTypeTestSetup, 1000, pools) + must.NoError(t, err) + + req := &structs.NodePoolDeleteRequest{ + WriteRequest: structs.WriteRequest{ + Region: "global", + AuthToken: tc.token, + }, + Names: tc.pools, + } + var resp structs.GenericResponse + err = msgpackrpc.CallWithCodec(codec, "NodePool.DeleteNodePools", req, &resp) + + if tc.expectedErr != "" { + must.ErrorContains(t, err, tc.expectedErr) + } else { + must.NoError(t, err) + + for _, pool := range tc.pools { + ws := memdb.NewWatchSet() + got, err := s.fsm.State().NodePoolByName(ws, pool) + must.NoError(t, err) + must.Nil(t, got) + } + } + }) + } +} diff --git a/nomad/server.go b/nomad/server.go index 4fec6a678..bd819f11f 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1286,6 +1286,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { _ = server.Register(NewKeyringEndpoint(s, ctx, s.encrypter)) _ = server.Register(NewNamespaceEndpoint(s, ctx)) _ = server.Register(NewNodeEndpoint(s, ctx)) + _ = server.Register(NewNodePoolEndpoint(s, ctx)) _ = server.Register(NewPeriodicEndpoint(s, ctx)) _ = server.Register(NewPlanEndpoint(s, ctx)) _ = server.Register(NewRegionEndpoint(s, ctx)) diff --git a/nomad/state/state_store_node_pools.go b/nomad/state/state_store_node_pools.go index 0a481c024..c6a56d271 100644 --- a/nomad/state/state_store_node_pools.go +++ b/nomad/state/state_store_node_pools.go @@ -31,10 +31,18 @@ func (s *StateStore) nodePoolInit() error { } // NodePools returns an iterator over all node pools. -func (s *StateStore) NodePools(ws memdb.WatchSet) (memdb.ResultIterator, error) { +func (s *StateStore) NodePools(ws memdb.WatchSet, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - iter, err := txn.Get(TableNodePools, "id") + var iter memdb.ResultIterator + var err error + + switch sort { + case SortReverse: + iter, err = txn.GetReverse(TableNodePools, "id") + default: + iter, err = txn.Get(TableNodePools, "id") + } if err != nil { return nil, fmt.Errorf("node pools lookup failed: %w", err) } @@ -63,15 +71,23 @@ func (s *StateStore) NodePoolByName(ws memdb.WatchSet, name string) (*structs.No // NodePoolsByNamePrefix returns an interator over all node pools that match // the given name prefix. -func (s *StateStore) NodePoolsByNamePrefix(ws memdb.WatchSet, namePrefix string) (memdb.ResultIterator, error) { +func (s *StateStore) NodePoolsByNamePrefix(ws memdb.WatchSet, namePrefix string, sort SortOption) (memdb.ResultIterator, error) { txn := s.db.ReadTxn() - iter, err := txn.Get(TableNodePools, "id_prefix", namePrefix) - if err != nil { - return nil, fmt.Errorf("node pool lookup failed: %w", err) - } - ws.Add(iter.WatchCh()) + var iter memdb.ResultIterator + var err error + switch sort { + case SortReverse: + iter, err = txn.GetReverse(TableNodePools, "id_prefix", namePrefix) + default: + iter, err = txn.Get(TableNodePools, "id_prefix", namePrefix) + } + if err != nil { + return nil, fmt.Errorf("node pools prefix lookup failed: %w", err) + } + + ws.Add(iter.WatchCh()) return iter, nil } diff --git a/nomad/state/state_store_node_pools_test.go b/nomad/state/state_store_node_pools_test.go index b208800db..e1841ab3f 100644 --- a/nomad/state/state_store_node_pools_test.go +++ b/nomad/state/state_store_node_pools_test.go @@ -4,6 +4,7 @@ package state import ( + "fmt" "testing" memdb "github.com/hashicorp/go-memdb" @@ -26,7 +27,7 @@ func TestStateStore_NodePools(t *testing.T) { // Create a watchset to test that getters don't cause it to fire. ws := memdb.NewWatchSet() - iter, err := state.NodePools(ws) + iter, err := state.NodePools(ws, SortDefault) must.NoError(t, err) // Verify all pools are returned. @@ -55,6 +56,57 @@ func TestStateStore_NodePools(t *testing.T) { } } +func TestStateStore_NodePools_Ordering(t *testing.T) { + ci.Parallel(t) + + // Create test node pools with stable sortable names. + state := testStateStore(t) + pools := make([]*structs.NodePool, 10) + for i := 0; i < 5; i++ { + pool := mock.NodePool() + pool.Name = fmt.Sprintf("%02d", i+1) + pools[i] = pool + } + must.NoError(t, state.UpsertNodePools(structs.MsgTypeTestSetup, 1000, pools)) + + testCases := []struct { + name string + order SortOption + expected []string + }{ + { + name: "default order", + order: SortDefault, + expected: []string{"01", "02", "03", "04", "05"}, + }, + { + name: "reverse order", + order: SortReverse, + expected: []string{"05", "04", "03", "02", "01"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ws := memdb.NewWatchSet() + iter, err := state.NodePools(ws, tc.order) + must.NoError(t, err) + + var got []string + for raw := iter.Next(); raw != nil; raw = iter.Next() { + pool := raw.(*structs.NodePool) + if pool.IsBuiltIn() { + continue + } + + got = append(got, pool.Name) + } + + must.Eq(t, got, tc.expected) + }) + } +} + func TestStateStore_NodePool_ByName(t *testing.T) { ci.Parallel(t) @@ -145,25 +197,30 @@ func TestStateStore_NodePool_ByNamePrefix(t *testing.T) { name string prefix string expected []string + order SortOption }{ { name: "multiple prefix match", prefix: "prod", + order: SortDefault, expected: []string{"prod-1", "prod-2", "prod-3"}, }, { name: "single prefix match", prefix: "qa", + order: SortDefault, expected: []string{"qa"}, }, { name: "no match", prefix: "nope", + order: SortDefault, expected: []string{}, }, { name: "empty prefix", prefix: "", + order: SortDefault, expected: []string{ "all", "default", @@ -175,12 +232,18 @@ func TestStateStore_NodePool_ByNamePrefix(t *testing.T) { "qa", }, }, + { + name: "reverse order", + prefix: "prod", + order: SortReverse, + expected: []string{"prod-3", "prod-2", "prod-1"}, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ws := memdb.NewWatchSet() - iter, err := state.NodePoolsByNamePrefix(ws, tc.prefix) + iter, err := state.NodePoolsByNamePrefix(ws, tc.prefix, tc.order) must.NoError(t, err) got := []string{} diff --git a/nomad/structs/node_pool.go b/nomad/structs/node_pool.go index 37189ac69..d6af4c0c3 100644 --- a/nomad/structs/node_pool.go +++ b/nomad/structs/node_pool.go @@ -52,6 +52,11 @@ type NodePool struct { ModifyIndex uint64 } +// GetID implements the IDGetter interface required for pagination. +func (n *NodePool) GetID() string { + return n.Name +} + // Validate returns an error if the node pool is invalid. func (n *NodePool) Validate() error { var mErr *multierror.Error @@ -133,3 +138,39 @@ func (n *NodePoolSchedulerConfiguration) Validate() error { return mErr.ErrorOrNil() } + +// NodePoolListRequest is used to list node pools. +type NodePoolListRequest struct { + QueryOptions +} + +// NodePoolListResponse is the response to node pools list request. +type NodePoolListResponse struct { + NodePools []*NodePool + QueryMeta +} + +// NodePoolSpecificRequest is used to make a request for a specific node pool. +type NodePoolSpecificRequest struct { + Name string + QueryOptions +} + +// SingleNodePoolResponse is the response to a specific node pool request. +type SingleNodePoolResponse struct { + NodePool *NodePool + QueryMeta +} + +// NodePoolUpsertRequest is used to make a request to insert or update a node +// pool. +type NodePoolUpsertRequest struct { + NodePools []*NodePool + WriteRequest +} + +// NodePoolDeleteRequest is used to make a request to delete a node pool. +type NodePoolDeleteRequest struct { + Names []string + WriteRequest +} diff --git a/website/content/api-docs/node-pools.mdx b/website/content/api-docs/node-pools.mdx new file mode 100644 index 000000000..3b00286dd --- /dev/null +++ b/website/content/api-docs/node-pools.mdx @@ -0,0 +1,213 @@ +--- +layout: api +page_title: Node Pools - HTTP API +description: The /node/pool endpoints are used to query for and interact with node pools. +--- + +# Node Pools HTTP API + +The `/node/pool` endpoints are used to query for and interact with node pools. + +## List Node Pools + +This endpoint list all node pools. + +| Method | Path | Produces | +| ------ | ---------------- | ------------------ | +| `GET` | `/v1/node/pools` | `application/json` | + +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). + +| Blocking Queries | ACL Required | +| ---------------- | ----------------- | +| `YES` | `node_pool:read` | + +### Parameters + +- `prefix` `(string: "")`- Specifies a string to filter node pools based on a + name prefix. This is specified as a query string parameter. + +- `next_token` `(string: "")` - This endpoint supports paging. The `next_token` + parameter accepts a string which identifies the next expected node pool. This + value can be obtained from the `X-Nomad-NextToken` header from the previous + response. + +- `per_page` `(int: 0)` - Specifies a maximum number of node pools to return + for this request. If omitted, the response is not paginated. The value of the + `X-Nomad-NextToken` header of the last response can be used as the + `next_token` of the next request to fetch additional pages. + +- `filter` `(string: "")` - Specifies the [expression](/nomad/api-docs#filtering) + used to filter the results. Consider using pagination to reduce resource used + to serve the request. + +### Sample Request + +```shell-session +$ nomad operator api '/v1/node/pools' +``` + +```shell-session +$ nomad operator api '/v1/node/pools?prefix=prod' +``` + +```shell-session +$ nomad operator api -filter 'Meta.env == "production"' '/v1/node/pools?per_page=5' +``` + +### Sample Response + +```json +[ + { + "CreateIndex": 11, + "Description": "Production workloads", + "Meta": { + "env": "production", + "team": "engineering" + }, + "ModifyIndex": 11, + "Name": "prod-eng", + "SchedulerConfiguration": { + "SchedulerAlgorithm": "spread" + } + } +] +``` + +## Read Node Pool + +This endpoint queries information about a node pool. + +| Method | Path | Produces | +| ------ | -------------------------- | ------------------ | +| `GET` | `/v1/node/pool/:node_pool` | `application/json` | + +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). + +| Blocking Queries | ACL Required | +| ---------------- | ----------------- | +| `YES` | `node_pool:read` | + +### Parameters + +- `:node_pool` `(string: )`- Specifies the node pool to query. + +### Sample Request + +```shell-session +$ nomad operator api /v1/node/pool/prod-eng +``` + +### Sample Response + +```json +{ + "CreateIndex": 11, + "Description": "Production workloads", + "Meta": { + "env": "production", + "team": "engineering" + }, + "ModifyIndex": 11, + "Name": "prod-eng", + "SchedulerConfiguration": { + "SchedulerAlgorithm": "spread" + } +} +``` + +## Create or Update Node Pool + +This endpoint is used to create or update a node pool. + +| Method | Path | Produces | +| ------ | ------------------------------------------------- | ------------------ | +| `POST` | `/v1/node/pool/:node_pool`
`/v1/node/pool` | `application/json` | + +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). + +| Blocking Queries | ACL Required | +| ---------------- | ----------------- | +| `NO` | `node_pool:write` | + + +### Parameters + +- `Name` `(string: )` - Specifies the node pool to create or update. + Must have fewer than 128 characters. Only alphanumeric, `-`, and `_` are + allowed. + +- `Description` `(string: "")` - Specifies the optional human-readable + description of the node pool. Must have fewer than 256 characters. + +- `Meta` `(object: null)` - Optional object with string keys and values of + metadata to attach to the node pool. Node pool metadata is not used by + Nomad and is intended for use by operators and third party tools. + +- `SchedulerConfiguration` `(SchedulerConfiguration: )` - Specifies + custom configuration applied when scheduling allocations in the node pool. + + - `SchedulerAlgorithm` `(string: ""`) - The algorithm used by the scheduler + when scoring nodes. Possible values are `binpack` or `spread`. If not + specified the [global cluster configuration value][api_scheduler_algo] is used. + +### Sample Payload + +```json +{ + "Name": "prod-eng", + "Description": "Production workloads", + "Meta": { + "env": "production", + "team": "engineering" + }, + "SchedulerConfiguration": { + "SchedulerAlgorithm": "spread" + } +} +``` + +### Sample Request + +```shell-session +$ cat pool.json | nomad operator api /v1/node/pools +``` + +```shell-session +$ cat pool.json | nomad operator api /v1/node/pool/prod-eng +``` + +## Delete Node Pool + +This endpoint is used to delete a node pool. + +| Method | Path | Produces | +| -------- | -------------------------- | ------------------ | +| `DELETE` | `/v1/node/pool/:node_pool` | `application/json` | + +The table below shows this endpoint's support for +[blocking queries](/nomad/api-docs#blocking-queries) and +[required ACLs](/nomad/api-docs#acls). + +| Blocking Queries | ACL Required | +| ---------------- | ------------ | +| `NO` | `management` | + +### Parameters + +- `:node_pool` `(string: )`- Specifies the node pool to delete. + +### Sample Request + +```shell-session +$ nomad operator api -X DELETE /v1/node/pool/prod-eng +``` + +[api_scheduler_alog]: /nomad/api-docs/operator/scheduler#scheduleralgorithm diff --git a/website/content/docs/operations/metrics-reference.mdx b/website/content/docs/operations/metrics-reference.mdx index 7ec7bcbcb..48df00c51 100644 --- a/website/content/docs/operations/metrics-reference.mdx +++ b/website/content/docs/operations/metrics-reference.mdx @@ -339,6 +339,8 @@ those listed in [Key Metrics](#key-metrics) above. | `nomad.nomad.fsm.apply_job_stability` | Time elapsed to apply `ApplyJobStability` raft entry | Nanoseconds | Summary | host | | `nomad.nomad.fsm.apply_namespace_delete` | Time elapsed to apply `ApplyNamespaceDelete` raft entry | Nanoseconds | Summary | host | | `nomad.nomad.fsm.apply_namespace_upsert` | Time elapsed to apply `ApplyNamespaceUpsert` raft entry | Nanoseconds | Summary | host | +| `nomad.nomad.fsm.apply_node_pool_upsert` | Time elapsed to apply `ApplyNodePoolUpsert` raft entry | Nanoseconds | Summary | host | +| `nomad.nomad.fsm.apply_node_pool_delete` | Time elapsed to apply `ApplyNodePoolDelete` raft entry | Nanoseconds | Summary | host | | `nomad.nomad.fsm.apply_plan_results` | Time elapsed to apply `ApplyPlanResults` raft entry | Nanoseconds | Summary | host | | `nomad.nomad.fsm.apply_scheduler_config` | Time elapsed to apply `ApplySchedulerConfig` raft entry | Nanoseconds | Summary | host | | `nomad.nomad.fsm.autopilot` | Time elapsed to apply `Autopilot` raft entry | Nanoseconds | Summary | host | @@ -389,6 +391,10 @@ those listed in [Key Metrics](#key-metrics) above. | `nomad.nomad.namespace.get_namespaces` | Time elapsed for `Namespace.GetNamespaces` | Nanoseconds | Summary | host | | `nomad.nomad.namespace.list_namespace` | Time elapsed for `Namespace.ListNamespaces` | Nanoseconds | Summary | host | | `nomad.nomad.namespace.upsert_namespaces` | Time elapsed for `Namespace.UpsertNamespaces` | Nanoseconds | Summary | host | +| `nomad.nomad.node_pool.list` | Time elapsed for `NodePool.List` RPC call | Nanoseconds | Summary | host | +| `nomad.nomad.node_pool.get_node_pool` | Time elapsed for `NodePool.GetNodePool` RPC call | Nanoseconds | Summary | host | +| `nomad.nomad.node_pool.upsert_node_pools` | Time elapsed for `NodePool.UpsertNodePools` RPC call | Nanoseconds | Summary | host | +| `nomad.nomad.node_pool.delete_node_pools` | Time elapsed for `NodePool.DeleteNodePools` RPC call | Nanoseconds | Summary | host | | `nomad.nomad.periodic.force` | Time elapsed for `Periodic.Force` RPC call | Nanoseconds | Summary | host | | `nomad.nomad.plan.apply` | Time elapsed to apply a plan | Nanoseconds | Summary | host | | `nomad.nomad.plan.evaluate` | Time elapsed to evaluate a plan | Nanoseconds | Summary | host | diff --git a/website/content/docs/other-specifications/acl-policy.mdx b/website/content/docs/other-specifications/acl-policy.mdx index f59dec3e4..8e1a46a65 100644 --- a/website/content/docs/other-specifications/acl-policy.mdx +++ b/website/content/docs/other-specifications/acl-policy.mdx @@ -253,9 +253,8 @@ The `policy` field for the node rule can have one of the following values: Node pool rules are defined with a `node_pool` block. An ACL policy can include zero, one, or more node pool rules. - -Node pool rule controls access to the Node Pool API such as create, update, and -list node pools. +Node pool rule controls access to the [Node Pools API][api_node_pool] such as +create, update, and list node pools. Each node pool rule is labeled with the node pool name it applies to. You may use wildcard globs (`"*"`) in the label to apply a rule to multiple node pools. @@ -470,6 +469,7 @@ agent { [api_search]: /nomad/api-docs/search [api_agent]: /nomad/api-docs/agent/ [api_node]: /nomad/api-docs/nodes/ +[api_node_pool]: /nomad/api-docs/node-pools/ [api_operator]: /nomad/api-docs/operator/ [api_quota]: /nomad/api-docs/quotas/ [host_volumes]: /nomad/docs/configuration/client#host_volume-block diff --git a/website/data/api-docs-nav-data.json b/website/data/api-docs-nav-data.json index 8437b610b..f70fa8587 100644 --- a/website/data/api-docs-nav-data.json +++ b/website/data/api-docs-nav-data.json @@ -90,6 +90,10 @@ "title": "Nodes", "path": "nodes" }, + { + "title": "Node Pools", + "path": "node-pools" + }, { "title": "Metrics", "path": "metrics"