Merge branch 'CVE-2020-27781' into master

* CVE-2020-27781:
  tasks/cephfs/test_volume_client: Add tests for authorize/deauthorize
  pybind/ceph_volume_client: Optionally authorize existing auth-ids
  pybind/ceph_volume_client: Preserve existing caps while authorize/deauthorize auth-id
  pybind/ceph_volume_client: Disallow authorize auth_id

Reviewed-by: Ramana Raja <rraja@redhat.com>
Reviewed-by: Kotresh Hiremath Ravishankar <khiremat@redhat.com>
This commit is contained in:
Patrick Donnelly 2020-12-16 08:19:21 -08:00
commit c6121e48ec
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB
2 changed files with 283 additions and 44 deletions

View File

@ -58,7 +58,7 @@ vc.disconnect()
def _configure_guest_auth(self, volumeclient_mount, guest_mount,
guest_entity, cephfs_mntpt,
namespace_prefix=None, readonly=False,
tenant_id=None):
tenant_id=None, allow_existing_id=False):
"""
Set up auth credentials for the guest client to mount a volume.
@ -83,14 +83,16 @@ vc.disconnect()
key = self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly},
tenant_id="{tenant_id}")
tenant_id="{tenant_id}",
allow_existing_id="{allow_existing_id}")
print(auth_result['auth_key'])
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guest_entity,
readonly=readonly,
tenant_id=tenant_id)), volume_prefix, namespace_prefix
tenant_id=tenant_id,
allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix
)
# CephFSVolumeClient's authorize() does not return the secret
@ -827,6 +829,209 @@ vc.disconnect()
)))
self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes"))
def test_authorize_auth_id_not_created_by_ceph_volume_client(self):
"""
If the auth_id already exists and is not created by
ceph_volume_client, it's not allowed to authorize
the auth-id by default.
"""
volumeclient_mount = self.mounts[1]
volumeclient_mount.umount_wait()
# Configure volumeclient_mount as the handle for driving volumeclient.
self._configure_vc_auth(volumeclient_mount, "manila")
group_id = "groupid"
volume_id = "volumeid"
# Create auth_id
out = self.fs.mon_manager.raw_cluster_cmd(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
"mon", "allow *"
)
auth_id = "guest1"
guestclient_1 = {
"auth_id": auth_id,
"tenant_id": "tenant1",
}
# Create a volume.
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 1024*1024*10)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
# Cannot authorize 'guestclient_1' to access the volume.
# It uses auth ID 'guest1', which already exists and not
# created by ceph_volume_client
with self.assertRaises(CommandFailedError):
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
""".format(
group_id=group_id,
volume_id=volume_id,
auth_id=guestclient_1["auth_id"],
tenant_id=guestclient_1["tenant_id"]
)))
# Delete volume
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.delete_volume(vp)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
def test_authorize_allow_existing_id_option(self):
"""
If the auth_id already exists and is not created by
ceph_volume_client, it's not allowed to authorize
the auth-id by default but is allowed with option
allow_existing_id.
"""
volumeclient_mount = self.mounts[1]
volumeclient_mount.umount_wait()
# Configure volumeclient_mount as the handle for driving volumeclient.
self._configure_vc_auth(volumeclient_mount, "manila")
group_id = "groupid"
volume_id = "volumeid"
# Create auth_id
out = self.fs.mon_manager.raw_cluster_cmd(
"auth", "get-or-create", "client.guest1",
"mds", "allow *",
"osd", "allow rw",
"mon", "allow *"
)
auth_id = "guest1"
guestclient_1 = {
"auth_id": auth_id,
"tenant_id": "tenant1",
}
# Create a volume.
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 1024*1024*10)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
# Cannot authorize 'guestclient_1' to access the volume
# by default, which already exists and not created by
# ceph_volume_client but is allowed with option 'allow_existing_id'.
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}",
allow_existing_id="{allow_existing_id}")
""".format(
group_id=group_id,
volume_id=volume_id,
auth_id=guestclient_1["auth_id"],
tenant_id=guestclient_1["tenant_id"],
allow_existing_id=True
)))
# Delete volume
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.delete_volume(vp)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
def test_deauthorize_auth_id_after_out_of_band_update(self):
"""
If the auth_id authorized by ceph_volume_client is updated
out of band, the auth_id should not be deleted after a
deauthorize. It should only remove caps associated it.
"""
volumeclient_mount = self.mounts[1]
volumeclient_mount.umount_wait()
# Configure volumeclient_mount as the handle for driving volumeclient.
self._configure_vc_auth(volumeclient_mount, "manila")
group_id = "groupid"
volume_id = "volumeid"
auth_id = "guest1"
guestclient_1 = {
"auth_id": auth_id,
"tenant_id": "tenant1",
}
# Create a volume.
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.create_volume(vp, 1024*1024*10)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
# Authorize 'guestclient_1' to access the volume.
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}")
""".format(
group_id=group_id,
volume_id=volume_id,
auth_id=guestclient_1["auth_id"],
tenant_id=guestclient_1["tenant_id"]
)))
# Update caps for guestclient_1 out of band
out = self.fs.mon_manager.raw_cluster_cmd(
"auth", "caps", "client.guest1",
"mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid",
"osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid",
"mon", "allow r",
"mgr", "allow *"
)
# Deauthorize guestclient_1
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.deauthorize(vp, "{guest_entity}")
""".format(
group_id=group_id,
volume_id=volume_id,
guest_entity=guestclient_1["auth_id"]
)))
# Validate the caps of guestclient_1 after deauthorize. It should not have deleted
# guestclient_1. The mgr and mds caps should be present which was updated out of band.
out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty"))
self.assertEqual("client.guest1", out[0]["entity"])
self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"])
self.assertEqual("allow *", out[0]["caps"]["mgr"])
self.assertNotIn("osd", out[0]["caps"])
# Delete volume
self._volume_client_python(volumeclient_mount, dedent("""
vp = VolumePath("{group_id}", "{volume_id}")
vc.delete_volume(vp)
""".format(
group_id=group_id,
volume_id=volume_id,
)))
def test_recover_metadata(self):
"""
That volume client can recover from partial auth updates using
@ -1045,9 +1250,9 @@ vc.disconnect()
guest_mount = self.mounts[2]
guest_mount.umount_wait()
# Set auth caps for the auth ID using the volumeclient
self._configure_guest_auth(vc_mount, guest_mount, guest_id, cephfs_mntpt)
# Set auth caps for the auth ID using the volumeclient
self._configure_guest_auth(vc_mount, guest_mount, guest_id, cephfs_mntpt,
allow_existing_id=True)
# Mount the volume in the guest using the auth ID to assert that the
# auth caps are valid

View File

@ -215,6 +215,7 @@ CEPHFSVOLUMECLIENT_VERSION_HISTORY = """
* 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient
* 3 - Allow volumes to be created without RADOS namespace isolation
* 4 - Added get_object_and_version, put_object_versioned method to CephFSVolumeClient
* 5 - Disallow authorize API for users not created by CephFSVolumeClient
"""
@ -238,7 +239,7 @@ class CephFSVolumeClient(object):
"""
# Current version
version = 4
version = 5
# Where shall we create our volumes?
POOL_PREFIX = "fsvolume_"
@ -379,7 +380,18 @@ class CephFSVolumeClient(object):
continue
readonly = access_level == 'r'
self._authorize_volume(volume_path, auth_id, readonly)
client_entity = "client.{0}".format(auth_id)
try:
existing_caps = self._rados_command(
'auth get',
{
'entity': client_entity
}
)
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
except rados.Error:
existing_caps = None
self._authorize_volume(volume_path, auth_id, readonly, existing_caps)
# Recovered from partial auth updates for the auth ID's access
# to a volume.
@ -940,7 +952,27 @@ class CephFSVolumeClient(object):
data['version'] = self.version
return self._metadata_set(self._volume_metadata_path(volume_path), data)
def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None):
def _prepare_updated_caps_list(self, existing_caps, mds_cap_str, osd_cap_str, authorize=True):
caps_list = []
for k, v in existing_caps['caps'].items():
if k == 'mds' or k == 'osd':
continue
elif k == 'mon':
if not authorize and v == 'allow r':
continue
caps_list.extend((k,v))
if mds_cap_str:
caps_list.extend(('mds', mds_cap_str))
if osd_cap_str:
caps_list.extend(('osd', osd_cap_str))
if authorize and 'mon' not in caps_list:
caps_list.extend(('mon', 'allow r'))
return caps_list
def authorize(self, volume_path, auth_id, readonly=False, tenant_id=None, allow_existing_id=False):
"""
Get-or-create a Ceph auth identity for `auth_id` and grant them access
to
@ -950,10 +982,24 @@ class CephFSVolumeClient(object):
:param tenant_id: Optionally provide a stringizable object to
restrict any created cephx IDs to other callers
passing the same tenant ID.
:allow_existing_id: Optionally authorize existing auth-ids not
created by ceph_volume_client
:return:
"""
with self._auth_lock(auth_id):
client_entity = "client.{0}".format(auth_id)
try:
existing_caps = self._rados_command(
'auth get',
{
'entity': client_entity
}
)
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
except rados.Error:
existing_caps = None
# Existing meta, or None, to be updated
auth_meta = self._auth_metadata_get(auth_id)
@ -967,7 +1013,14 @@ class CephFSVolumeClient(object):
'dirty': True,
}
}
if auth_meta is None:
if not allow_existing_id and existing_caps is not None:
msg = "auth ID: {0} exists and not created by ceph_volume_client. Not allowed to modify".format(auth_id)
log.error(msg)
raise CephFSVolumeClientError(msg)
# non-existent auth IDs
sys.stderr.write("Creating meta for ID {0} with tenant {1}\n".format(
auth_id, tenant_id
))
@ -977,14 +1030,6 @@ class CephFSVolumeClient(object):
'tenant_id': tenant_id.__str__() if tenant_id else None,
'volumes': volume
}
# Note: this is *not* guaranteeing that the key doesn't already
# exist in Ceph: we are allowing VolumeClient tenants to
# 'claim' existing Ceph keys. In order to prevent VolumeClient
# tenants from reading e.g. client.admin keys, you need to
# have configured your VolumeClient user (e.g. Manila) to
# have mon auth caps that prevent it from accessing those keys
# (e.g. limit it to only access keys with a manila.* prefix)
else:
# Disallow tenants to share auth IDs
if auth_meta['tenant_id'].__str__() != tenant_id.__str__():
@ -1004,7 +1049,7 @@ class CephFSVolumeClient(object):
self._auth_metadata_set(auth_id, auth_meta)
with self._volume_lock(volume_path):
key = self._authorize_volume(volume_path, auth_id, readonly)
key = self._authorize_volume(volume_path, auth_id, readonly, existing_caps)
auth_meta['dirty'] = False
auth_meta['volumes'][volume_path_str]['dirty'] = False
@ -1021,7 +1066,7 @@ class CephFSVolumeClient(object):
'auth_key': None
}
def _authorize_volume(self, volume_path, auth_id, readonly):
def _authorize_volume(self, volume_path, auth_id, readonly, existing_caps):
vol_meta = self._volume_metadata_get(volume_path)
access_level = 'r' if readonly else 'rw'
@ -1040,14 +1085,14 @@ class CephFSVolumeClient(object):
vol_meta['auths'].update(auth)
self._volume_metadata_set(volume_path, vol_meta)
key = self._authorize_ceph(volume_path, auth_id, readonly)
key = self._authorize_ceph(volume_path, auth_id, readonly, existing_caps)
vol_meta['auths'][auth_id]['dirty'] = False
self._volume_metadata_set(volume_path, vol_meta)
return key
def _authorize_ceph(self, volume_path, auth_id, readonly):
def _authorize_ceph(self, volume_path, auth_id, readonly, existing_caps):
path = self._get_path(volume_path)
log.debug("Authorizing Ceph id '{0}' for path '{1}'".format(
auth_id, path
@ -1075,15 +1120,7 @@ class CephFSVolumeClient(object):
want_osd_cap = 'allow {0} pool={1}'.format(want_access_level,
pool_name)
try:
existing = self._rados_command(
'auth get',
{
'entity': client_entity
}
)
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure
except rados.Error:
if existing_caps is None:
caps = self._rados_command(
'auth get-or-create',
{
@ -1095,7 +1132,7 @@ class CephFSVolumeClient(object):
})
else:
# entity exists, update it
cap = existing[0]
cap = existing_caps[0]
# Construct auth caps that if present might conflict with the desired
# auth caps.
@ -1115,8 +1152,8 @@ class CephFSVolumeClient(object):
if not orig_mds_caps:
return want_mds_cap, want_osd_cap
mds_cap_tokens = orig_mds_caps.split(",")
osd_cap_tokens = orig_osd_caps.split(",")
mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
if want_mds_cap in mds_cap_tokens:
return orig_mds_caps, orig_osd_caps
@ -1137,15 +1174,14 @@ class CephFSVolumeClient(object):
orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap,
unwanted_mds_cap, unwanted_osd_cap)
caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str)
caps = self._rados_command(
'auth caps',
{
'entity': client_entity,
'caps': [
'mds', mds_cap_str,
'osd', osd_cap_str,
'mon', cap['caps'].get('mon', 'allow r')]
'caps': caps_list
})
caps = self._rados_command(
'auth get',
{
@ -1270,8 +1306,8 @@ class CephFSVolumeClient(object):
)
def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps):
mds_cap_tokens = orig_mds_caps.split(",")
osd_cap_tokens = orig_osd_caps.split(",")
mds_cap_tokens = [x.strip() for x in orig_mds_caps.split(",")]
osd_cap_tokens = [x.strip() for x in orig_osd_caps.split(",")]
for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps):
if want_mds_cap in mds_cap_tokens:
@ -1287,17 +1323,15 @@ class CephFSVolumeClient(object):
mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps,
want_mds_caps, want_osd_caps)
if not mds_cap_str:
caps_list = self._prepare_updated_caps_list(cap, mds_cap_str, osd_cap_str, authorize=False)
if not caps_list:
self._rados_command('auth del', {'entity': client_entity}, decode=False)
else:
self._rados_command(
'auth caps',
{
'entity': client_entity,
'caps': [
'mds', mds_cap_str,
'osd', osd_cap_str,
'mon', cap['caps'].get('mon', 'allow r')]
'caps': caps_list
})
# FIXME: rados raising Error instead of ObjectNotFound in auth get failure