dynamic host volumes: node selection via constraints (#24518)

When making a request to create a dynamic host volumes, users can pass a node
pool and constraints instead of a specific node ID.

This changeset implements a node scheduling logic by instantiating a filter by
node pool and constraint checker borrowed from the scheduler package. Because
host volumes with the same name can't land on the same host, we don't need to
support `distinct_hosts`/`distinct_property`; this would be challenging anyways
without building out a much larger node iteration mechanism to keep track of
usage across multiple hosts.

Ref: https://github.com/hashicorp/nomad/pull/24479
This commit is contained in:
Tim Gross
2024-11-21 09:28:13 -05:00
parent c2dd97dee7
commit bbf49a9050
9 changed files with 239 additions and 46 deletions

View File

@@ -21,6 +21,8 @@ func TestHostVolumeEndpoint_CRUD(t *testing.T) {
// Create a volume on the test node
vol := mock.HostVolumeRequest(structs.DefaultNamespace)
vol.NodePool = ""
vol.Constraints = nil
reqBody := struct {
Volumes []*structs.HostVolume
}{Volumes: []*structs.HostVolume{vol}}

View File

@@ -11,13 +11,16 @@ import (
"github.com/hashicorp/hcl"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/command/agent"
"github.com/mitchellh/cli"
"github.com/shoenig/test/must"
)
func TestHostVolumeCreateCommand_Run(t *testing.T) {
ci.Parallel(t)
srv, client, url := testServer(t, true, nil)
srv, client, url := testServer(t, true, func(c *agent.Config) {
c.Client.Meta = map[string]string{"rack": "foo"}
})
t.Cleanup(srv.Shutdown)
waitForNodes(t, client)
@@ -38,11 +41,6 @@ node_pool = "default"
capacity_min = "10GiB"
capacity_max = "20G"
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
constraint {
attribute = "${meta.rack}"
value = "foo"