mirror of
https://github.com/ceph/ceph
synced 2025-01-02 17:12:31 +00:00
mgr/volumes: Allow/deny auth IDs access to FS subvolumes
... via the `ceph fs subvolume authorize/deauthorize` command. Fixes: https://tracker.ceph.com/issues/40401 Signed-off-by: Ramana Raja <rraja@redhat.com> Signed-off-by: Kotresh HR <khiremat@redhat.com>
This commit is contained in:
parent
d3aea55797
commit
6c3b7547fb
@ -43,13 +43,13 @@ def get_next_clone_entry(volume_client, volname, running_jobs):
|
||||
def open_at_volume(volume_client, volname, groupname, subvolname, op_type):
|
||||
with open_volume(volume_client, volname) as fs_handle:
|
||||
with open_group(fs_handle, volume_client.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, volume_client.volspec, group, subvolname, op_type) as subvolume:
|
||||
with open_subvol(volume_client.mgr, fs_handle, volume_client.volspec, group, subvolname, op_type) as subvolume:
|
||||
yield subvolume
|
||||
|
||||
@contextmanager
|
||||
def open_at_group(volume_client, fs_handle, groupname, subvolname, op_type):
|
||||
with open_group(fs_handle, volume_client.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, volume_client.volspec, group, subvolname, op_type) as subvolume:
|
||||
with open_subvol(volume_client.mgr, fs_handle, volume_client.volspec, group, subvolname, op_type) as subvolume:
|
||||
yield subvolume
|
||||
|
||||
@contextmanager
|
||||
@ -311,7 +311,7 @@ class Cloner(AsyncJobs):
|
||||
try:
|
||||
with open_volume(self.vc, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.vc.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
|
||||
with open_subvol(self.vc.mgr, fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
|
||||
status = clone_subvolume.status
|
||||
clone_state = SubvolumeStates.from_value(status['state'])
|
||||
if not self.is_clone_cancelable(clone_state):
|
||||
@ -331,7 +331,7 @@ class Cloner(AsyncJobs):
|
||||
with self.lock:
|
||||
with open_volume_lockless(self.vc, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.vc.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
|
||||
with open_subvol(self.vc.mgr, fs_handle, self.vc.volspec, group, clonename, SubvolumeOpType.CLONE_CANCEL) as clone_subvolume:
|
||||
if not self._cancel_job(volname, (track_idx, clone_subvolume.base_path)):
|
||||
raise VolumeException(-errno.EINVAL, "cannot cancel -- clone finished (check clone status)")
|
||||
except (IndexException, MetadataMgrException) as e:
|
||||
|
126
src/pybind/mgr/volumes/fs/operations/access.py
Normal file
126
src/pybind/mgr/volumes/fs/operations/access.py
Normal file
@ -0,0 +1,126 @@
|
||||
import errno
|
||||
import json
|
||||
|
||||
def allow_access(mgr, client_entity, want_mds_cap, want_osd_cap,
|
||||
unwanted_mds_cap, unwanted_osd_cap):
|
||||
ret, out, err = mgr.mon_command({
|
||||
"prefix": "auth get",
|
||||
"entity": client_entity,
|
||||
"format": "json"})
|
||||
|
||||
if ret == -errno.ENOENT:
|
||||
ret, out, err = mgr.mon_command({
|
||||
"prefix": "auth get-or-create",
|
||||
"entity": client_entity,
|
||||
"caps": ['mds', want_mds_cap, 'osd', want_osd_cap, 'mon', 'allow r'],
|
||||
"format": "json"})
|
||||
else:
|
||||
cap = json.loads(out)[0]
|
||||
|
||||
def cap_update(
|
||||
orig_mds_caps, orig_osd_caps, want_mds_cap,
|
||||
want_osd_cap, unwanted_mds_cap, unwanted_osd_cap):
|
||||
|
||||
if not orig_mds_caps:
|
||||
return want_mds_cap, want_osd_cap
|
||||
|
||||
mds_cap_tokens = orig_mds_caps.split(",")
|
||||
osd_cap_tokens = orig_osd_caps.split(",")
|
||||
|
||||
if want_mds_cap in mds_cap_tokens:
|
||||
return orig_mds_caps, orig_osd_caps
|
||||
|
||||
if unwanted_mds_cap in mds_cap_tokens:
|
||||
mds_cap_tokens.remove(unwanted_mds_cap)
|
||||
osd_cap_tokens.remove(unwanted_osd_cap)
|
||||
|
||||
mds_cap_tokens.append(want_mds_cap)
|
||||
osd_cap_tokens.append(want_osd_cap)
|
||||
|
||||
return ",".join(mds_cap_tokens), ",".join(osd_cap_tokens)
|
||||
|
||||
orig_mds_caps = cap['caps'].get('mds', "")
|
||||
orig_osd_caps = cap['caps'].get('osd', "")
|
||||
|
||||
mds_cap_str, osd_cap_str = cap_update(
|
||||
orig_mds_caps, orig_osd_caps, want_mds_cap, want_osd_cap,
|
||||
unwanted_mds_cap, unwanted_osd_cap)
|
||||
|
||||
mgr.mon_command(
|
||||
{
|
||||
"prefix": "auth caps",
|
||||
'entity': client_entity,
|
||||
'caps': [
|
||||
'mds', mds_cap_str,
|
||||
'osd', osd_cap_str,
|
||||
'mon', cap['caps'].get('mon', 'allow r')],
|
||||
})
|
||||
ret, out, err = mgr.mon_command(
|
||||
{
|
||||
'prefix': 'auth get',
|
||||
'entity': client_entity,
|
||||
'format': 'json'
|
||||
})
|
||||
|
||||
# Result expected like this:
|
||||
# [
|
||||
# {
|
||||
# "entity": "client.foobar",
|
||||
# "key": "AQBY0\/pViX\/wBBAAUpPs9swy7rey1qPhzmDVGQ==",
|
||||
# "caps": {
|
||||
# "mds": "allow *",
|
||||
# "mon": "allow *"
|
||||
# }
|
||||
# }
|
||||
# ]
|
||||
|
||||
caps = json.loads(out)
|
||||
assert len(caps) == 1
|
||||
assert caps[0]['entity'] == client_entity
|
||||
return caps[0]['key']
|
||||
|
||||
def deny_access(mgr, client_entity, want_mds_caps, want_osd_caps):
|
||||
ret, out, err = mgr.mon_command({
|
||||
"prefix": "auth get",
|
||||
"entity": client_entity,
|
||||
"format": "json",
|
||||
})
|
||||
|
||||
if ret == -errno.ENOENT:
|
||||
# Already gone, great.
|
||||
return
|
||||
|
||||
def cap_remove(orig_mds_caps, orig_osd_caps, want_mds_caps, want_osd_caps):
|
||||
mds_cap_tokens = orig_mds_caps.split(",")
|
||||
osd_cap_tokens = orig_osd_caps.split(",")
|
||||
|
||||
for want_mds_cap, want_osd_cap in zip(want_mds_caps, want_osd_caps):
|
||||
if want_mds_cap in mds_cap_tokens:
|
||||
mds_cap_tokens.remove(want_mds_cap)
|
||||
osd_cap_tokens.remove(want_osd_cap)
|
||||
break
|
||||
|
||||
return ",".join(mds_cap_tokens), ",".join(osd_cap_tokens)
|
||||
|
||||
cap = json.loads(out)[0]
|
||||
orig_mds_caps = cap['caps'].get('mds', "")
|
||||
orig_osd_caps = cap['caps'].get('osd', "")
|
||||
mds_cap_str, osd_cap_str = cap_remove(orig_mds_caps, orig_osd_caps,
|
||||
want_mds_caps, want_osd_caps)
|
||||
|
||||
if not mds_cap_str:
|
||||
mgr.mon_command(
|
||||
{
|
||||
'prefix': 'auth rm',
|
||||
'entity': client_entity
|
||||
})
|
||||
else:
|
||||
mgr.mon_command(
|
||||
{
|
||||
"prefix": "auth caps",
|
||||
'entity': client_entity,
|
||||
'caps': [
|
||||
'mds', mds_cap_str,
|
||||
'osd', osd_cap_str,
|
||||
'mon', cap['caps'].get('mon', 'allow r')],
|
||||
})
|
@ -7,7 +7,7 @@ from .template import SubvolumeOpType
|
||||
|
||||
from .versions import loaded_subvolumes
|
||||
|
||||
def create_subvol(fs, vol_spec, group, subvolname, size, isolate_nspace, pool, mode, uid, gid):
|
||||
def create_subvol(mgr, fs, vol_spec, group, subvolname, size, isolate_nspace, pool, mode, uid, gid):
|
||||
"""
|
||||
create a subvolume (create a subvolume with the max known version).
|
||||
|
||||
@ -22,10 +22,10 @@ def create_subvol(fs, vol_spec, group, subvolname, size, isolate_nspace, pool, m
|
||||
:param gid: the group identifier
|
||||
:return: None
|
||||
"""
|
||||
subvolume = loaded_subvolumes.get_subvolume_object_max(fs, vol_spec, group, subvolname)
|
||||
subvolume = loaded_subvolumes.get_subvolume_object_max(mgr, fs, vol_spec, group, subvolname)
|
||||
subvolume.create(size, isolate_nspace, pool, mode, uid, gid)
|
||||
|
||||
def create_clone(fs, vol_spec, group, subvolname, pool, source_volume, source_subvolume, snapname):
|
||||
def create_clone(mgr, fs, vol_spec, group, subvolname, pool, source_volume, source_subvolume, snapname):
|
||||
"""
|
||||
create a cloned subvolume.
|
||||
|
||||
@ -39,10 +39,10 @@ def create_clone(fs, vol_spec, group, subvolname, pool, source_volume, source_su
|
||||
:param snapname: source subvolume snapshot
|
||||
:return None
|
||||
"""
|
||||
subvolume = loaded_subvolumes.get_subvolume_object_max(fs, vol_spec, group, subvolname)
|
||||
subvolume = loaded_subvolumes.get_subvolume_object_max(mgr, fs, vol_spec, group, subvolname)
|
||||
subvolume.create_clone(pool, source_volume, source_subvolume, snapname)
|
||||
|
||||
def remove_subvol(fs, vol_spec, group, subvolname, force=False, retainsnaps=False):
|
||||
def remove_subvol(mgr, fs, vol_spec, group, subvolname, force=False, retainsnaps=False):
|
||||
"""
|
||||
remove a subvolume.
|
||||
|
||||
@ -54,11 +54,11 @@ def remove_subvol(fs, vol_spec, group, subvolname, force=False, retainsnaps=Fals
|
||||
:return: None
|
||||
"""
|
||||
op_type = SubvolumeOpType.REMOVE if not force else SubvolumeOpType.REMOVE_FORCE
|
||||
with open_subvol(fs, vol_spec, group, subvolname, op_type) as subvolume:
|
||||
with open_subvol(mgr, fs, vol_spec, group, subvolname, op_type) as subvolume:
|
||||
subvolume.remove(retainsnaps)
|
||||
|
||||
@contextmanager
|
||||
def open_subvol(fs, vol_spec, group, subvolname, op_type):
|
||||
def open_subvol(mgr, fs, vol_spec, group, subvolname, op_type):
|
||||
"""
|
||||
open a subvolume. This API is to be used as a context manager.
|
||||
|
||||
@ -69,6 +69,6 @@ def open_subvol(fs, vol_spec, group, subvolname, op_type):
|
||||
:param op_type: operation type for which subvolume is being opened
|
||||
:return: yields a subvolume object (subclass of SubvolumeTemplate)
|
||||
"""
|
||||
subvolume = loaded_subvolumes.get_subvolume_object(fs, vol_spec, group, subvolname)
|
||||
subvolume = loaded_subvolumes.get_subvolume_object(mgr, fs, vol_spec, group, subvolname)
|
||||
subvolume.open(op_type)
|
||||
yield subvolume
|
||||
|
@ -56,6 +56,8 @@ class SubvolumeOpType(Enum):
|
||||
CLONE_STATUS = 'clone-status'
|
||||
CLONE_CANCEL = 'clone-cancel'
|
||||
CLONE_INTERNAL = 'clone_internal'
|
||||
ALLOW_ACCESS = 'allow-access'
|
||||
DENY_ACCESS = 'deny-access'
|
||||
|
||||
class SubvolumeTemplate(object):
|
||||
VERSION = None # type: int
|
||||
|
@ -46,8 +46,8 @@ class SubvolumeLoader(object):
|
||||
except KeyError:
|
||||
raise VolumeException(-errno.EINVAL, "subvolume class v{0} does not exist".format(version))
|
||||
|
||||
def get_subvolume_object_max(self, fs, vol_spec, group, subvolname):
|
||||
return self._get_subvolume_version(self.max_version)(fs, vol_spec, group, subvolname)
|
||||
def get_subvolume_object_max(self, mgr, fs, vol_spec, group, subvolname):
|
||||
return self._get_subvolume_version(self.max_version)(mgr, fs, vol_spec, group, subvolname)
|
||||
|
||||
def upgrade_to_v2_subvolume(self, subvolume):
|
||||
# legacy mode subvolumes cannot be upgraded to v2
|
||||
@ -58,7 +58,7 @@ class SubvolumeLoader(object):
|
||||
if version >= SubvolumeV2.version():
|
||||
return
|
||||
|
||||
v1_subvolume = self._get_subvolume_version(version)(subvolume.fs, subvolume.vol_spec, subvolume.group, subvolume.subvolname)
|
||||
v1_subvolume = self._get_subvolume_version(version)(subvolume.mgr, subvolume.fs, subvolume.vol_spec, subvolume.group, subvolume.subvolname)
|
||||
try:
|
||||
v1_subvolume.open(SubvolumeOpType.SNAP_LIST)
|
||||
except VolumeException as ve:
|
||||
@ -89,17 +89,17 @@ class SubvolumeLoader(object):
|
||||
# legacy is only upgradable to v1
|
||||
subvolume.init_config(SubvolumeV1.version(), subvolume_type, qpath, initial_state)
|
||||
|
||||
def get_subvolume_object(self, fs, vol_spec, group, subvolname, upgrade=True):
|
||||
subvolume = SubvolumeBase(fs, vol_spec, group, subvolname)
|
||||
def get_subvolume_object(self, mgr, fs, vol_spec, group, subvolname, upgrade=True):
|
||||
subvolume = SubvolumeBase(mgr, fs, vol_spec, group, subvolname)
|
||||
try:
|
||||
subvolume.discover()
|
||||
self.upgrade_to_v2_subvolume(subvolume)
|
||||
version = int(subvolume.metadata_mgr.get_global_option('version'))
|
||||
return self._get_subvolume_version(version)(fs, vol_spec, group, subvolname, legacy=subvolume.legacy_mode)
|
||||
return self._get_subvolume_version(version)(mgr, fs, vol_spec, group, subvolname, legacy=subvolume.legacy_mode)
|
||||
except MetadataMgrException as me:
|
||||
if me.errno == -errno.ENOENT and upgrade:
|
||||
self.upgrade_legacy_subvolume(fs, subvolume)
|
||||
return self.get_subvolume_object(fs, vol_spec, group, subvolname, upgrade=False)
|
||||
return self.get_subvolume_object(mgr, fs, vol_spec, group, subvolname, upgrade=False)
|
||||
else:
|
||||
# log the actual error and generalize error string returned to user
|
||||
log.error("error accessing subvolume metadata for '{0}' ({1})".format(subvolname, me))
|
||||
|
@ -21,7 +21,8 @@ log = logging.getLogger(__name__)
|
||||
class SubvolumeBase(object):
|
||||
LEGACY_CONF_DIR = "_legacy"
|
||||
|
||||
def __init__(self, fs, vol_spec, group, subvolname, legacy=False):
|
||||
def __init__(self, mgr, fs, vol_spec, group, subvolname, legacy=False):
|
||||
self.mgr = mgr
|
||||
self.fs = fs
|
||||
self.cmode = None
|
||||
self.user_id = None
|
||||
|
@ -3,6 +3,7 @@ import stat
|
||||
import uuid
|
||||
import errno
|
||||
import logging
|
||||
import json
|
||||
from datetime import datetime
|
||||
|
||||
import cephfs
|
||||
@ -13,6 +14,7 @@ from .op_sm import SubvolumeOpSm
|
||||
from .subvolume_base import SubvolumeBase
|
||||
from ..template import SubvolumeTemplate
|
||||
from ..snapshot_util import mksnap, rmsnap
|
||||
from ..access import allow_access, deny_access
|
||||
from ...exception import IndexException, OpSmException, VolumeException, MetadataMgrException
|
||||
from ...fs_util import listdir
|
||||
from ..template import SubvolumeOpType
|
||||
@ -229,6 +231,65 @@ class SubvolumeV1(SubvolumeBase, SubvolumeTemplate):
|
||||
except cephfs.Error as e:
|
||||
raise VolumeException(-e.args[0], e.args[1])
|
||||
|
||||
def authorize(self, auth_id, access_level):
|
||||
subvol_path = self.path
|
||||
log.debug("Authorizing Ceph id '{0}' for path '{1}'".format(auth_id, subvol_path))
|
||||
|
||||
# First I need to work out what the data pool is for this share:
|
||||
# read the layout
|
||||
try:
|
||||
pool = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool').decode('utf-8')
|
||||
except cephfs.Error as e:
|
||||
raise VolumeException(-e.args[0], e.args[1])
|
||||
|
||||
try:
|
||||
namespace = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool_namespace').decode('utf-8')
|
||||
except cephfs.NoData:
|
||||
namespace = None
|
||||
|
||||
# Now construct auth capabilities that give the guest just enough
|
||||
# permissions to access the share
|
||||
client_entity = "client.{0}".format(auth_id)
|
||||
want_mds_cap = "allow {0} path={1}".format(access_level, subvol_path.decode('utf-8'))
|
||||
want_osd_cap = "allow {0} pool={1}{2}".format(
|
||||
access_level, pool, " namespace={0}".format(namespace) if namespace else "")
|
||||
|
||||
# Construct auth caps that if present might conflict with the desired
|
||||
# auth caps.
|
||||
unwanted_access_level = 'r' if access_level is 'rw' else 'rw'
|
||||
unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, subvol_path.decode('utf-8'))
|
||||
unwanted_osd_cap = "allow {0} pool={1}{2}".format(
|
||||
unwanted_access_level, pool, " namespace={0}".format(namespace) if namespace else "")
|
||||
|
||||
return allow_access(self.mgr, client_entity, want_mds_cap, want_osd_cap,
|
||||
unwanted_mds_cap, unwanted_osd_cap)
|
||||
|
||||
def deauthorize(self, auth_id):
|
||||
"""
|
||||
The volume must still exist.
|
||||
"""
|
||||
client_entity = "client.{0}".format(auth_id)
|
||||
subvol_path = self.path
|
||||
try:
|
||||
pool_name = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool').decode('utf-8')
|
||||
except cephfs.Error as e:
|
||||
raise VolumeException(-e.args[0], e.args[1])
|
||||
|
||||
try:
|
||||
namespace = self.fs.getxattr(subvol_path, 'ceph.dir.layout.pool_namespace').decode('utf-8')
|
||||
except cephfs.NoData:
|
||||
namespace = None
|
||||
|
||||
# The auth_id might have read-only or read-write mount access for the
|
||||
# subvolume path.
|
||||
access_levels = ('r', 'rw')
|
||||
want_mds_caps = ['allow {0} path={1}'.format(access_level, subvol_path.decode('utf-8'))
|
||||
for access_level in access_levels]
|
||||
want_osd_caps = ['allow {0} pool={1}{2}'.format(
|
||||
access_level, pool_name, " namespace={0}".format(namespace) if namespace else "")
|
||||
for access_level in access_levels]
|
||||
deny_access(self.mgr, client_entity, want_mds_caps, want_osd_caps)
|
||||
|
||||
def _get_clone_source(self):
|
||||
try:
|
||||
clone_source = {
|
||||
|
@ -41,7 +41,7 @@ def subvolume_purge(volume_client, volname, trashcan, subvolume_trash_entry, sho
|
||||
try:
|
||||
with open_volume(volume_client, volname) as fs_handle:
|
||||
with open_group(fs_handle, volume_client.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, volume_client.volspec, group, subvolname, SubvolumeOpType.REMOVE) as subvolume:
|
||||
with open_subvol(volume_client.mgr, fs_handle, volume_client.volspec, group, subvolname, SubvolumeOpType.REMOVE) as subvolume:
|
||||
log.debug("subvolume.path={0}, purgeable={1}".format(subvolume.path, subvolume.purgeable))
|
||||
if not subvolume.purgeable:
|
||||
return
|
||||
|
@ -22,6 +22,8 @@ from .operations.template import SubvolumeOpType
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
ALLOWED_ACCESS_LEVELS = ('r', 'rw')
|
||||
|
||||
|
||||
def octal_str_to_decimal_int(mode):
|
||||
try:
|
||||
@ -136,7 +138,7 @@ class VolumeClient(CephfsClient):
|
||||
oct_mode = octal_str_to_decimal_int(mode)
|
||||
try:
|
||||
create_subvol(
|
||||
fs_handle, self.volspec, group, subvolname, size, isolate_nspace, pool, oct_mode, uid, gid)
|
||||
self.mgr, fs_handle, self.volspec, group, subvolname, size, isolate_nspace, pool, oct_mode, uid, gid)
|
||||
except VolumeException as ve:
|
||||
# kick the purge threads for async removal -- note that this
|
||||
# assumes that the subvolume is moved to trashcan for cleanup on error.
|
||||
@ -158,7 +160,7 @@ class VolumeClient(CephfsClient):
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
try:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.CREATE) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.CREATE) as subvolume:
|
||||
# idempotent creation -- valid. Attributes set is supported.
|
||||
attrs = {
|
||||
'uid': uid if uid else subvolume.uid,
|
||||
@ -189,7 +191,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
remove_subvol(fs_handle, self.volspec, group, subvolname, force, retainsnaps)
|
||||
remove_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, force, retainsnaps)
|
||||
# kick the purge threads for async removal -- note that this
|
||||
# assumes that the subvolume is moved to trash can.
|
||||
# TODO: make purge queue as singleton so that trash can kicks
|
||||
@ -203,6 +205,40 @@ class VolumeClient(CephfsClient):
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
return ret
|
||||
|
||||
def authorize_subvolume(self, **kwargs):
|
||||
ret = 0, "", ""
|
||||
volname = kwargs['vol_name']
|
||||
subvolname = kwargs['sub_name']
|
||||
authid = kwargs['auth_id']
|
||||
groupname = kwargs['group_name']
|
||||
accesslevel = kwargs['access_level']
|
||||
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.ALLOW_ACCESS) as subvolume:
|
||||
key = subvolume.authorize(authid, accesslevel)
|
||||
ret = 0, key, ""
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
return ret
|
||||
|
||||
def deauthorize_subvolume(self, **kwargs):
|
||||
ret = 0, "", ""
|
||||
volname = kwargs['vol_name']
|
||||
subvolname = kwargs['sub_name']
|
||||
authid = kwargs['auth_id']
|
||||
groupname = kwargs['group_name']
|
||||
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.DENY_ACCESS) as subvolume:
|
||||
subvolume.deauthorize(authid)
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
return ret
|
||||
|
||||
def resize_subvolume(self, **kwargs):
|
||||
ret = 0, "", ""
|
||||
volname = kwargs['vol_name']
|
||||
@ -214,7 +250,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.RESIZE) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.RESIZE) as subvolume:
|
||||
nsize, usedbytes = subvolume.resize(newsize, noshrink)
|
||||
ret = 0, json.dumps(
|
||||
[{'bytes_used': usedbytes},{'bytes_quota': nsize},
|
||||
@ -235,7 +271,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.PIN) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.PIN) as subvolume:
|
||||
subvolume.pin(pin_type, pin_setting)
|
||||
ret = 0, json.dumps({}), ""
|
||||
except VolumeException as ve:
|
||||
@ -251,7 +287,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.GETPATH) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.GETPATH) as subvolume:
|
||||
subvolpath = subvolume.path
|
||||
ret = 0, subvolpath.decode("utf-8"), ""
|
||||
except VolumeException as ve:
|
||||
@ -267,7 +303,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.INFO) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.INFO) as subvolume:
|
||||
mon_addr_lst = []
|
||||
mon_map_mons = self.mgr.get('mon_map')['mons']
|
||||
for mon in mon_map_mons:
|
||||
@ -307,7 +343,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_CREATE) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_CREATE) as subvolume:
|
||||
subvolume.create_snapshot(snapname)
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
@ -324,7 +360,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_REMOVE) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_REMOVE) as subvolume:
|
||||
subvolume.remove_snapshot(snapname)
|
||||
except VolumeException as ve:
|
||||
# ESTALE serves as an error to state that subvolume is currently stale due to internal removal and,
|
||||
@ -345,7 +381,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_INFO) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_INFO) as subvolume:
|
||||
snap_info_dict = subvolume.snapshot_info(snapname)
|
||||
ret = 0, json.dumps(snap_info_dict, indent=4, sort_keys=True), ""
|
||||
except VolumeException as ve:
|
||||
@ -361,7 +397,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_LIST) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_LIST) as subvolume:
|
||||
snapshots = subvolume.list_snapshots()
|
||||
ret = 0, name_to_json(snapshots), ""
|
||||
except VolumeException as ve:
|
||||
@ -377,7 +413,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_PROTECT) as subvolume:
|
||||
log.warning("snapshot protect call is deprecated and will be removed in a future release")
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
@ -392,7 +428,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, subvolname, SubvolumeOpType.SNAP_UNPROTECT) as subvolume:
|
||||
log.warning("snapshot unprotect call is deprecated and will be removed in a future release")
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
@ -404,8 +440,8 @@ class VolumeClient(CephfsClient):
|
||||
s_groupname = kwargs['group_name']
|
||||
t_groupname = kwargs['target_group_name']
|
||||
|
||||
create_clone(fs_handle, self.volspec, t_group, t_subvolname, t_pool, volname, s_subvolume, s_snapname)
|
||||
with open_subvol(fs_handle, self.volspec, t_group, t_subvolname, SubvolumeOpType.CLONE_INTERNAL) as t_subvolume:
|
||||
create_clone(self.mgr, fs_handle, self.volspec, t_group, t_subvolname, t_pool, volname, s_subvolume, s_snapname)
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, t_group, t_subvolname, SubvolumeOpType.CLONE_INTERNAL) as t_subvolume:
|
||||
try:
|
||||
if t_groupname == s_groupname and t_subvolname == s_subvolname:
|
||||
t_subvolume.attach_snapshot(s_snapname, t_subvolume)
|
||||
@ -431,7 +467,7 @@ class VolumeClient(CephfsClient):
|
||||
|
||||
with open_group_unique(fs_handle, self.volspec, target_groupname, s_group, s_groupname) as target_group:
|
||||
try:
|
||||
with open_subvol(fs_handle, self.volspec, target_group, target_subvolname, SubvolumeOpType.CLONE_CREATE):
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, target_group, target_subvolname, SubvolumeOpType.CLONE_CREATE):
|
||||
raise VolumeException(-errno.EEXIST, "subvolume '{0}' exists".format(target_subvolname))
|
||||
except VolumeException as ve:
|
||||
if ve.errno == -errno.ENOENT:
|
||||
@ -449,7 +485,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, s_groupname) as s_group:
|
||||
with open_subvol(fs_handle, self.volspec, s_group, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, s_group, s_subvolname, SubvolumeOpType.CLONE_SOURCE) as s_subvolume:
|
||||
self._clone_subvolume_snapshot(fs_handle, volname, s_group, s_subvolume, **kwargs)
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
@ -464,7 +500,7 @@ class VolumeClient(CephfsClient):
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, clonename, SubvolumeOpType.CLONE_STATUS) as subvolume:
|
||||
with open_subvol(self.mgr, fs_handle, self.volspec, group, clonename, SubvolumeOpType.CLONE_STATUS) as subvolume:
|
||||
ret = 0, json.dumps({'status' : subvolume.status}, indent=2), ""
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
|
@ -119,6 +119,25 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
"clone, and retaining existing subvolume snapshots",
|
||||
'perm': 'rw'
|
||||
},
|
||||
{
|
||||
'cmd': 'fs subvolume authorize '
|
||||
'name=vol_name,type=CephString '
|
||||
'name=sub_name,type=CephString '
|
||||
'name=auth_id,type=CephString '
|
||||
'name=group_name,type=CephString,req=false '
|
||||
'name=access_level,type=CephString,req=false ',
|
||||
'desc': "Allow a cephx auth ID access to a subvolume",
|
||||
'perm': 'rw'
|
||||
},
|
||||
{
|
||||
'cmd': 'fs subvolume deauthorize '
|
||||
'name=vol_name,type=CephString '
|
||||
'name=sub_name,type=CephString '
|
||||
'name=auth_id,type=CephString '
|
||||
'name=group_name,type=CephString,req=false ',
|
||||
'desc': "Deny a cephx auth ID access to a subvolume",
|
||||
'perm': 'rw'
|
||||
},
|
||||
{
|
||||
'cmd': 'fs subvolumegroup getpath '
|
||||
'name=vol_name,type=CephString '
|
||||
@ -494,6 +513,27 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
force=cmd.get('force', False),
|
||||
retain_snapshots=cmd.get('retain_snapshots', False))
|
||||
|
||||
@mgr_cmd_wrap
|
||||
def _cmd_fs_subvolume_authorize(self, inbuf, cmd):
|
||||
"""
|
||||
:return: a 3-tuple of return code(int), secret key(str), error message (str)
|
||||
"""
|
||||
return self.vc.authorize_subvolume(vol_name=cmd['vol_name'],
|
||||
sub_name=cmd['sub_name'],
|
||||
auth_id=cmd['auth_id'],
|
||||
group_name=cmd.get('group_name', None),
|
||||
access_level=cmd.get('access_level', 'rw'))
|
||||
|
||||
@mgr_cmd_wrap
|
||||
def _cmd_fs_subvolume_deauthorize(self, inbuf, cmd):
|
||||
"""
|
||||
:return: a 3-tuple of return code(int), empty string(str), error message (str)
|
||||
"""
|
||||
return self.vc.deauthorize_subvolume(vol_name=cmd['vol_name'],
|
||||
sub_name=cmd['sub_name'],
|
||||
auth_id=cmd['auth_id'],
|
||||
group_name=cmd.get('group_name', None))
|
||||
|
||||
@mgr_cmd_wrap
|
||||
def _cmd_fs_subvolume_ls(self, inbuf, cmd):
|
||||
return self.vc.list_subvolumes(vol_name=cmd['vol_name'],
|
||||
|
Loading…
Reference in New Issue
Block a user