mirror of
https://github.com/kemko/nomad.git
synced 2026-01-01 16:05:42 +03:00
CSI: fix namespace ACL bypass on create/register APIs (#24396)
When creating or registering a CSI volume, the RPC handler uses the volume specification's namespace instead of the request namespace. This works as intended, but the ACL check is only on the request namespace. This allows a cross-namespace ACL bypass for authenticated users who have `csi-write-volume` capabilities in one namespace but not another namespace. Such a user can set the volume specification to a forbidden namespace while setting the `-namespace` flag in the CLI or API. The ACL check happens against the namespace they do have permission to, but the volume is created in the forbidden namespace. This changeset fixes the bug by moving the namespace check into the loop over the volumes being written by the RPCs. It also updates the tests to better cover ACL checking in these two RPCs. Ref: CVE-2024-10975 Ref: https://hashicorp.atlassian.net/browse/SECVULN-15463 Fixes: https://github.com/hashicorp/nomad/issues/24397
This commit is contained in:
3
.changelog/24396.txt
Normal file
3
.changelog/24396.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
```release-note:security
|
||||
csi: Fixed a bug where a user with csi-write-volume permissions to one namespace can create volumes in another namespace (CVE-2024-10975)
|
||||
```
|
||||
@@ -304,7 +304,8 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru
|
||||
|
||||
defer metrics.MeasureSince([]string{"nomad", "volume", "register"}, time.Now())
|
||||
|
||||
if !allowVolume(aclObj, args.RequestNamespace()) || !aclObj.AllowPluginRead() {
|
||||
// permission for the volume namespaces will be checked below
|
||||
if !aclObj.AllowPluginRead() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
@@ -312,24 +313,25 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru
|
||||
return fmt.Errorf("missing volume definition")
|
||||
}
|
||||
|
||||
// This is the only namespace we ACL checked, force all the volumes to use it.
|
||||
// We also validate that the plugin exists for each plugin, and validate the
|
||||
snap, err := v.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate ACLs, that the plugin exists for each volume, and validate the
|
||||
// capabilities when the plugin has a controller.
|
||||
for _, vol := range args.Volumes {
|
||||
|
||||
snap, err := v.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vol.Namespace == "" {
|
||||
vol.Namespace = args.RequestNamespace()
|
||||
}
|
||||
if !allowVolume(aclObj, vol.Namespace) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
if err = vol.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
existingVol, err := snap.CSIVolumeByID(ws, vol.Namespace, vol.ID)
|
||||
existingVol, err := snap.CSIVolumeByID(nil, vol.Namespace, vol.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1044,7 +1046,8 @@ func (v *CSIVolume) Create(args *structs.CSIVolumeCreateRequest, reply *structs.
|
||||
return err
|
||||
}
|
||||
|
||||
if !allowVolume(aclObj, args.RequestNamespace()) || !aclObj.AllowPluginRead() {
|
||||
// permission for the volume namespaces will be checked below
|
||||
if !aclObj.AllowPluginRead() {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
|
||||
@@ -1062,13 +1065,20 @@ func (v *CSIVolume) Create(args *structs.CSIVolumeCreateRequest, reply *structs.
|
||||
}
|
||||
validatedVols := []validated{}
|
||||
|
||||
// This is the only namespace we ACL checked, force all the volumes to use it.
|
||||
// We also validate that the plugin exists for each plugin, and validate the
|
||||
snap, err := v.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate ACLs, that the plugin exists for each volume, and validate the
|
||||
// capabilities when the plugin has a controller.
|
||||
for _, vol := range args.Volumes {
|
||||
if vol.Namespace == "" {
|
||||
vol.Namespace = args.RequestNamespace()
|
||||
}
|
||||
if !allowVolume(aclObj, vol.Namespace) {
|
||||
return structs.ErrPermissionDenied
|
||||
}
|
||||
if err = vol.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1084,10 +1094,6 @@ func (v *CSIVolume) Create(args *structs.CSIVolumeCreateRequest, reply *structs.
|
||||
}
|
||||
|
||||
// if the volume already exists, we'll update it instead
|
||||
snap, err := v.srv.State().Snapshot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// current will be nil if it does not exist.
|
||||
current, err := snap.CSIVolumeByID(nil, vol.Namespace, vol.ID)
|
||||
if err != nil {
|
||||
|
||||
@@ -206,7 +206,7 @@ func TestCSIVolume_pluginValidateVolume(t *testing.T) {
|
||||
|
||||
func TestCSIVolumeEndpoint_Register(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
srv, shutdown := TestServer(t, func(c *Config) {
|
||||
srv, _, shutdown := TestACLServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||
})
|
||||
defer shutdown()
|
||||
@@ -217,9 +217,10 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) {
|
||||
|
||||
id0 := uuid.Generate()
|
||||
|
||||
// Create the register request
|
||||
ns := mock.Namespace()
|
||||
store.UpsertNamespaces(900, []*structs.Namespace{ns})
|
||||
ns := "prod"
|
||||
otherNS := "other"
|
||||
index := uint64(1000)
|
||||
must.NoError(t, store.UpsertNamespaces(index, []*structs.Namespace{{Name: ns}, {Name: otherNS}}))
|
||||
|
||||
// Create the node and plugin
|
||||
node := mock.Node()
|
||||
@@ -231,12 +232,24 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) {
|
||||
NodeInfo: &structs.CSINodeInfo{},
|
||||
},
|
||||
}
|
||||
require.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node))
|
||||
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node))
|
||||
|
||||
index++
|
||||
validToken := mock.CreatePolicyAndToken(t, store, index, "csi-access-ns",
|
||||
`namespace "prod" { capabilities = ["csi-write-volume", "csi-read-volume"] }
|
||||
namespace "default" { capabilities = ["csi-write-volume"] }
|
||||
plugin { policy = "read" }
|
||||
node { policy = "read" }`).SecretID
|
||||
|
||||
index++
|
||||
invalidToken := mock.CreatePolicyAndToken(t, store, index, "csi-access-other",
|
||||
`namespace "other" { capabilities = ["csi-write-volume"] }
|
||||
plugin { policy = "read" }
|
||||
node { policy = "read" }`).SecretID
|
||||
|
||||
// Create the volume
|
||||
vols := []*structs.CSIVolume{{
|
||||
ID: id0,
|
||||
Namespace: ns.Name,
|
||||
Namespace: ns,
|
||||
PluginID: "minnie",
|
||||
AccessMode: structs.CSIVolumeAccessModeSingleNodeReader, // legacy field ignored
|
||||
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, // legacy field ignored
|
||||
@@ -252,57 +265,68 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) {
|
||||
}}
|
||||
|
||||
// Create the register request
|
||||
// The token has access to the request namespace but not the volume namespace
|
||||
req1 := &structs.CSIVolumeRegisterRequest{
|
||||
Volumes: vols,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
Namespace: "",
|
||||
AuthToken: invalidToken,
|
||||
Namespace: otherNS,
|
||||
},
|
||||
}
|
||||
resp1 := &structs.CSIVolumeRegisterResponse{}
|
||||
err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, uint64(0), resp1.Index)
|
||||
must.EqError(t, err, "Permission denied")
|
||||
|
||||
// Switch to a token that has access to the volume's namespace, but switch
|
||||
// the request namespace to one that will be overwritten by the vol spec
|
||||
req1.AuthToken = validToken
|
||||
req1.Namespace = structs.DefaultNamespace
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1)
|
||||
must.NoError(t, err)
|
||||
must.NotEq(t, uint64(0), resp1.Index)
|
||||
|
||||
// Get the volume back out
|
||||
req2 := &structs.CSIVolumeGetRequest{
|
||||
ID: id0,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: "global",
|
||||
Namespace: ns.Name,
|
||||
Namespace: ns,
|
||||
AuthToken: validToken,
|
||||
},
|
||||
}
|
||||
resp2 := &structs.CSIVolumeGetResponse{}
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp1.Index, resp2.Index)
|
||||
require.Equal(t, vols[0].ID, resp2.Volume.ID)
|
||||
require.Equal(t, "csi.CSISecrets(map[mysecret:[REDACTED]])",
|
||||
must.NoError(t, err)
|
||||
must.Eq(t, resp1.Index, resp2.Index)
|
||||
must.Eq(t, vols[0].ID, resp2.Volume.ID)
|
||||
must.Eq(t, "csi.CSISecrets(map[mysecret:[REDACTED]])",
|
||||
resp2.Volume.Secrets.String())
|
||||
require.Equal(t, "csi.CSIOptions(FSType: ext4, MountFlags: [REDACTED])",
|
||||
must.Eq(t, "csi.CSIOptions(FSType: ext4, MountFlags: [REDACTED])",
|
||||
resp2.Volume.MountOptions.String())
|
||||
|
||||
// Registration does not update
|
||||
req1.Volumes[0].PluginID = "adam"
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1)
|
||||
require.Error(t, err, "exists")
|
||||
must.ErrorContains(t, err, "no CSI plugin named")
|
||||
|
||||
// Deregistration works
|
||||
req3 := &structs.CSIVolumeDeregisterRequest{
|
||||
VolumeIDs: []string{id0},
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
Namespace: ns.Name,
|
||||
Namespace: ns,
|
||||
AuthToken: validToken,
|
||||
},
|
||||
}
|
||||
resp3 := &structs.CSIVolumeDeregisterResponse{}
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Deregister", req3, resp3)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
// Volume is missing
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, resp2.Volume)
|
||||
must.NoError(t, err)
|
||||
must.Nil(t, resp2.Volume)
|
||||
}
|
||||
|
||||
// TestCSIVolumeEndpoint_Claim exercises the VolumeClaim RPC, verifying that claims
|
||||
@@ -1098,7 +1122,7 @@ func TestCSIVolumeEndpoint_List_PaginationFiltering(t *testing.T) {
|
||||
func TestCSIVolumeEndpoint_Create(t *testing.T) {
|
||||
ci.Parallel(t)
|
||||
var err error
|
||||
srv, shutdown := TestServer(t, func(c *Config) {
|
||||
srv, rootToken, shutdown := TestACLServer(t, func(c *Config) {
|
||||
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||
})
|
||||
defer shutdown()
|
||||
@@ -1132,11 +1156,11 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) {
|
||||
|
||||
req0 := &structs.NodeRegisterRequest{
|
||||
Node: node,
|
||||
WriteRequest: structs.WriteRequest{Region: "global"},
|
||||
WriteRequest: structs.WriteRequest{Region: "global", AuthToken: rootToken.SecretID},
|
||||
}
|
||||
var resp0 structs.NodeUpdateResponse
|
||||
err = client.RPC("Node.Register", req0, &resp0)
|
||||
require.NoError(t, err)
|
||||
must.NoError(t, err)
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
nodes := srv.connectedNodes()
|
||||
@@ -1145,9 +1169,10 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) {
|
||||
t.Fatalf("should have a client")
|
||||
})
|
||||
|
||||
ns := structs.DefaultNamespace
|
||||
ns := "prod"
|
||||
otherNS := "other"
|
||||
|
||||
state := srv.fsm.State()
|
||||
store := srv.fsm.State()
|
||||
codec := rpcClient(t, srv)
|
||||
index := uint64(1000)
|
||||
|
||||
@@ -1172,14 +1197,17 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) {
|
||||
}
|
||||
}).Node
|
||||
index++
|
||||
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, index, node))
|
||||
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, index, node))
|
||||
|
||||
index++
|
||||
must.NoError(t, store.UpsertNamespaces(index, []*structs.Namespace{{Name: ns}, {Name: otherNS}}))
|
||||
|
||||
// Create the volume
|
||||
volID := uuid.Generate()
|
||||
vols := []*structs.CSIVolume{{
|
||||
ID: volID,
|
||||
Name: "vol",
|
||||
Namespace: "", // overriden by WriteRequest
|
||||
Namespace: ns,
|
||||
PluginID: "minnie",
|
||||
AccessMode: structs.CSIVolumeAccessModeSingleNodeReader, // legacy field ignored
|
||||
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, // legacy field ignored
|
||||
@@ -1200,48 +1228,73 @@ func TestCSIVolumeEndpoint_Create(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
index++
|
||||
validToken := mock.CreatePolicyAndToken(t, store, index, "csi-access-ns",
|
||||
`namespace "prod" { capabilities = ["csi-write-volume", "csi-read-volume"] }
|
||||
namespace "default" { capabilities = ["csi-write-volume"] }
|
||||
plugin { policy = "read" }
|
||||
node { policy = "read" }`).SecretID
|
||||
|
||||
index++
|
||||
invalidToken := mock.CreatePolicyAndToken(t, store, index, "csi-access-other",
|
||||
`namespace "other" { capabilities = ["csi-write-volume"] }
|
||||
plugin { policy = "read" }
|
||||
node { policy = "read" }`).SecretID
|
||||
|
||||
// Create the create request
|
||||
// The token has access to the request namespace but not the volume namespace
|
||||
req1 := &structs.CSIVolumeCreateRequest{
|
||||
Volumes: vols,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
Region: "global",
|
||||
Namespace: ns,
|
||||
AuthToken: invalidToken,
|
||||
Namespace: otherNS,
|
||||
},
|
||||
}
|
||||
resp1 := &structs.CSIVolumeCreateResponse{}
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Create", req1, resp1)
|
||||
require.NoError(t, err)
|
||||
must.EqError(t, err, "Permission denied")
|
||||
|
||||
// Switch to a token that has access to the volume's namespace, but switch
|
||||
// the request namespace to one that will be overwritten by the vol spec
|
||||
req1.AuthToken = validToken
|
||||
req1.Namespace = structs.DefaultNamespace
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Create", req1, resp1)
|
||||
must.NoError(t, err)
|
||||
must.NotEq(t, uint64(0), resp1.Index)
|
||||
|
||||
// Get the volume back out
|
||||
req2 := &structs.CSIVolumeGetRequest{
|
||||
ID: volID,
|
||||
QueryOptions: structs.QueryOptions{
|
||||
Region: "global",
|
||||
Region: "global",
|
||||
Namespace: ns,
|
||||
AuthToken: validToken,
|
||||
},
|
||||
}
|
||||
resp2 := &structs.CSIVolumeGetResponse{}
|
||||
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, resp1.Index, resp2.Index)
|
||||
must.NoError(t, err)
|
||||
must.Eq(t, resp1.Index, resp2.Index)
|
||||
|
||||
vol := resp2.Volume
|
||||
require.NotNil(t, vol)
|
||||
require.Equal(t, volID, vol.ID)
|
||||
must.NotNil(t, vol)
|
||||
must.Eq(t, volID, vol.ID)
|
||||
|
||||
// these fields are set from the args
|
||||
require.Equal(t, "csi.CSISecrets(map[mysecret:[REDACTED]])",
|
||||
must.Eq(t, "csi.CSISecrets(map[mysecret:[REDACTED]])",
|
||||
vol.Secrets.String())
|
||||
require.Equal(t, "csi.CSIOptions(FSType: ext4, MountFlags: [REDACTED])",
|
||||
must.Eq(t, "csi.CSIOptions(FSType: ext4, MountFlags: [REDACTED])",
|
||||
vol.MountOptions.String())
|
||||
require.Equal(t, ns, vol.Namespace)
|
||||
require.Len(t, vol.RequestedCapabilities, 1)
|
||||
must.Eq(t, ns, vol.Namespace)
|
||||
must.Len(t, 1, vol.RequestedCapabilities)
|
||||
|
||||
// these fields are set from the plugin and should have been written to raft
|
||||
require.Equal(t, "vol-12345", vol.ExternalID)
|
||||
require.Equal(t, int64(42), vol.Capacity)
|
||||
require.Equal(t, "bar", vol.Context["plugincontext"])
|
||||
require.Equal(t, "", vol.Context["mycontext"])
|
||||
require.Equal(t, map[string]string{"rack": "R1"}, vol.Topologies[0].Segments)
|
||||
must.Eq(t, "vol-12345", vol.ExternalID)
|
||||
must.Eq(t, int64(42), vol.Capacity)
|
||||
must.Eq(t, "bar", vol.Context["plugincontext"])
|
||||
must.Eq(t, "", vol.Context["mycontext"])
|
||||
must.Eq(t, map[string]string{"rack": "R1"}, vol.Topologies[0].Segments)
|
||||
}
|
||||
|
||||
func TestCSIVolumeEndpoint_Delete(t *testing.T) {
|
||||
|
||||
Reference in New Issue
Block a user