mirror of
https://github.com/ceph/ceph
synced 2025-01-29 22:43:40 +00:00
mgr/volumes: Add interface to get subvolume metadata
The following interface is added "ceph fs subvolume info <vol_name> <sub_name> [<group_name>]" The output is in json format with following fields 1. atime: access time of subvolume path in the format "YYYY-MM-DD HH:MM:SS" 2. mtime: modification time of subvolume path in the format "YYYY-MM-DD HH:MM:SS" 3. ctime: change time of subvolume path in the format "YYYY-MM-DD HH:MM:SS" 4. uid: uid of subvolume path 5. gid: gid of subvolume path 6. mode: mode of subvolume path 7. mon_addrs: list of monitor addresses 8. bytes_pcent: quota used in percentage if quota is set, else displays "undefined" 9. bytes_quota: quota size in bytes if quota is set, else displays "infinite" 10. bytes_used: current used size of the subvolume in bytes 11. created_at: time of creation of subvolume in the format "YYYY-MM-DD HH:MM:SS" 12. data_pool: data pool the subvolume belongs to 13. path: absolute path of a subvolume 14. type: subvolume type indicating whether it's clone or subvolume Fixes: https://tracker.ceph.com/issues/44277 Signed-off-by: Kotresh HR <khiremat@redhat.com>
This commit is contained in:
parent
558853be66
commit
70659ffee1
@ -152,6 +152,27 @@ Fetch the absolute path of a subvolume using::
|
||||
|
||||
$ ceph fs subvolume getpath <vol_name> <subvol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
Fetch the metadata of a subvolume using::
|
||||
|
||||
$ ceph fs subvolume info <vol_name> <subvol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
The output format is json and contains fields as follows.
|
||||
|
||||
* atime: access time of subvolume path in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* mtime: modification time of subvolume path in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* ctime: change time of subvolume path in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* uid: uid of subvolume path
|
||||
* gid: gid of subvolume path
|
||||
* mode: mode of subvolume path
|
||||
* mon_addrs: list of monitor addresses
|
||||
* bytes_pcent: quota used in percentage if quota is set, else displays "undefined"
|
||||
* bytes_quota: quota size in bytes if quota is set, else displays "infinite"
|
||||
* bytes_used: current used size of the subvolume in bytes
|
||||
* created_at: time of creation of subvolume in the format "YYYY-MM-DD HH:MM:SS"
|
||||
* data_pool: data pool the subvolume belongs to
|
||||
* path: absolute path of a subvolume
|
||||
* type: subvolume type indicating whether it's clone or subvolume
|
||||
|
||||
List subvolumes using::
|
||||
|
||||
$ ceph fs subvolume ls <vol_name> [--group_name <subvol_group_name>]
|
||||
|
@ -156,6 +156,14 @@ class TestVolumes(CephFSTestCase):
|
||||
# remove the leading '/', and trailing whitespaces
|
||||
return path[1:].rstrip()
|
||||
|
||||
def _get_subvolume_info(self, vol_name, subvol_name, group_name=None):
|
||||
args = ["subvolume", "info", vol_name, subvol_name]
|
||||
if group_name:
|
||||
args.append(group_name)
|
||||
args = tuple(args)
|
||||
subvol_md = self._fs_cmd(*args)
|
||||
return subvol_md
|
||||
|
||||
def _delete_test_volume(self):
|
||||
self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it")
|
||||
|
||||
@ -733,6 +741,106 @@ class TestVolumes(CephFSTestCase):
|
||||
raise RuntimeError("expected filling subvolume {0} with {1} file of size {2}MB "
|
||||
"to succeed".format(subvolname, number_of_files, file_size))
|
||||
|
||||
def test_subvolume_info(self):
|
||||
# tests the 'fs subvolume info' command
|
||||
|
||||
subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
|
||||
"data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "type", "uid"]
|
||||
|
||||
# create subvolume
|
||||
subvolume = self._generate_random_subvolume_name()
|
||||
self._fs_cmd("subvolume", "create", self.volname, subvolume)
|
||||
|
||||
# get subvolume metadata
|
||||
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
|
||||
if len(subvol_info) == 0:
|
||||
raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
|
||||
for md in subvol_md:
|
||||
if md not in subvol_info.keys():
|
||||
raise RuntimeError("%s not present in the metadata of subvolume" % md)
|
||||
|
||||
if subvol_info["bytes_pcent"] != "undefined":
|
||||
raise RuntimeError("bytes_pcent should be set to undefined if quota is not set")
|
||||
|
||||
if subvol_info["bytes_quota"] != "infinite":
|
||||
raise RuntimeError("bytes_quota should be set to infinite if quota is not set")
|
||||
|
||||
nsize = self.DEFAULT_FILE_SIZE*1024*1024
|
||||
try:
|
||||
self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
|
||||
except CommandFailedError:
|
||||
raise RuntimeError("expected the 'fs subvolume resize' command to succeed")
|
||||
|
||||
# get subvolume metadata after quota set
|
||||
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
|
||||
if len(subvol_info) == 0:
|
||||
raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
|
||||
if subvol_info["bytes_pcent"] == "undefined":
|
||||
raise RuntimeError("bytes_pcent should not be set to undefined if quota is set")
|
||||
|
||||
if subvol_info["bytes_quota"] == "infinite":
|
||||
raise RuntimeError("bytes_quota should not be set to infinite if quota is set")
|
||||
|
||||
if subvol_info["type"] != "subvolume":
|
||||
raise RuntimeError("type should be set to subvolume")
|
||||
|
||||
# remove subvolumes
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
def test_clone_subvolume_info(self):
|
||||
|
||||
# tests the 'fs subvolume info' command for a clone
|
||||
subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
|
||||
"data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "type", "uid"]
|
||||
|
||||
subvolume = self._generate_random_subvolume_name()
|
||||
snapshot = self._generate_random_snapshot_name()
|
||||
clone = self._generate_random_clone_name()
|
||||
|
||||
# create subvolume
|
||||
self._fs_cmd("subvolume", "create", self.volname, subvolume)
|
||||
|
||||
# do some IO
|
||||
self._do_subvolume_io(subvolume, number_of_files=1)
|
||||
|
||||
# snapshot subvolume
|
||||
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
|
||||
|
||||
# now, protect snapshot
|
||||
self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot)
|
||||
|
||||
# schedule a clone
|
||||
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
|
||||
|
||||
# check clone status
|
||||
self._wait_for_clone_to_complete(clone)
|
||||
|
||||
# now, unprotect snapshot
|
||||
self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot)
|
||||
|
||||
# remove snapshot
|
||||
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
|
||||
|
||||
subvol_info = json.loads(self._get_subvolume_info(self.volname, clone))
|
||||
if len(subvol_info) == 0:
|
||||
raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume")
|
||||
for md in subvol_md:
|
||||
if md not in subvol_info.keys():
|
||||
raise RuntimeError("%s not present in the metadata of subvolume" % md)
|
||||
if subvol_info["type"] != "clone":
|
||||
raise RuntimeError("type should be set to clone")
|
||||
|
||||
# remove subvolumes
|
||||
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
|
||||
self._fs_cmd("subvolume", "rm", self.volname, clone)
|
||||
|
||||
# verify trash dir is clean
|
||||
self._wait_for_trash_empty()
|
||||
|
||||
|
||||
### subvolume group operations
|
||||
|
||||
def test_subvolume_create_and_rm_in_group(self):
|
||||
|
@ -227,3 +227,28 @@ class SubvolumeBase(object):
|
||||
self.fs.mkdirs(self.base_path, mode)
|
||||
except cephfs.Error as e:
|
||||
raise VolumeException(-e.args[0], e.args[1])
|
||||
|
||||
def info (self):
|
||||
subvolpath = self.metadata_mgr.get_global_option('path')
|
||||
etype = self.metadata_mgr.get_global_option(MetadataManager.GLOBAL_META_KEY_TYPE)
|
||||
st = self.fs.statx(subvolpath, cephfs.CEPH_STATX_BTIME | cephfs.CEPH_STATX_SIZE |
|
||||
cephfs.CEPH_STATX_UID | cephfs.CEPH_STATX_GID |
|
||||
cephfs.CEPH_STATX_MODE | cephfs.CEPH_STATX_ATIME |
|
||||
cephfs.CEPH_STATX_MTIME | cephfs.CEPH_STATX_CTIME,
|
||||
cephfs.AT_SYMLINK_NOFOLLOW)
|
||||
usedbytes = st["size"]
|
||||
try:
|
||||
nsize = int(self.fs.getxattr(subvolpath, 'ceph.quota.max_bytes').decode('utf-8'))
|
||||
except cephfs.NoData:
|
||||
nsize = 0
|
||||
|
||||
try:
|
||||
data_pool = self.fs.getxattr(subvolpath, 'ceph.dir.layout.pool').decode('utf-8')
|
||||
except cephfs.Error as e:
|
||||
raise VolumeException(-e.args[0], e.args[1])
|
||||
|
||||
return {'path': subvolpath, 'type': etype, 'uid': int(st["uid"]), 'gid': int(st["gid"]),
|
||||
'atime': str(st["atime"]), 'mtime': str(st["mtime"]), 'ctime': str(st["ctime"]),
|
||||
'mode': int(st["mode"]), 'data_pool': data_pool, 'created_at': str(st["btime"]),
|
||||
'bytes_quota': "infinite" if nsize == 0 else nsize, 'bytes_used': int(usedbytes),
|
||||
'bytes_pcent': "undefined" if nsize == 0 else '{0:.2f}'.format((float(usedbytes) / nsize) * 100.0)}
|
||||
|
@ -2,6 +2,8 @@ import json
|
||||
import errno
|
||||
import logging
|
||||
|
||||
import cephfs
|
||||
|
||||
from mgr_util import CephfsClient
|
||||
|
||||
from .fs_util import listdir
|
||||
@ -209,6 +211,29 @@ class VolumeClient(CephfsClient):
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
return ret
|
||||
|
||||
def subvolume_info(self, **kwargs):
|
||||
ret = None
|
||||
volname = kwargs['vol_name']
|
||||
subvolname = kwargs['sub_name']
|
||||
groupname = kwargs['group_name']
|
||||
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
with open_group(fs_handle, self.volspec, groupname) as group:
|
||||
with open_subvol(fs_handle, self.volspec, group, subvolname) as subvolume:
|
||||
mon_addr_lst = []
|
||||
mon_map_mons = self.mgr.get('mon_map')['mons']
|
||||
for mon in mon_map_mons:
|
||||
ip_port = mon['addr'].split("/")[0]
|
||||
mon_addr_lst.append(ip_port)
|
||||
|
||||
subvol_info_dict = subvolume.info()
|
||||
subvol_info_dict["mon_addrs"] = mon_addr_lst
|
||||
ret = 0, json.dumps(subvol_info_dict, indent=4, sort_keys=True), ""
|
||||
except VolumeException as ve:
|
||||
ret = self.volume_exception_to_retval(ve)
|
||||
return ret
|
||||
|
||||
def list_subvolumes(self, **kwargs):
|
||||
ret = 0, "", ""
|
||||
volname = kwargs['vol_name']
|
||||
|
@ -101,6 +101,15 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
"and optionally, in a specific subvolume group",
|
||||
'perm': 'rw'
|
||||
},
|
||||
{
|
||||
'cmd': 'fs subvolume info '
|
||||
'name=vol_name,type=CephString '
|
||||
'name=sub_name,type=CephString '
|
||||
'name=group_name,type=CephString,req=false ',
|
||||
'desc': "Get the metadata of a CephFS subvolume in a volume, "
|
||||
"and optionally, in a specific subvolume group",
|
||||
'perm': 'r'
|
||||
},
|
||||
{
|
||||
'cmd': 'fs subvolumegroup snapshot ls '
|
||||
'name=vol_name,type=CephString '
|
||||
@ -317,6 +326,11 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
sub_name=cmd['sub_name'],
|
||||
group_name=cmd.get('group_name', None))
|
||||
|
||||
def _cmd_fs_subvolume_info(self, inbuf, cmd):
|
||||
return self.vc.subvolume_info(vol_name=cmd['vol_name'],
|
||||
sub_name=cmd['sub_name'],
|
||||
group_name=cmd.get('group_name', None))
|
||||
|
||||
def _cmd_fs_subvolumegroup_snapshot_create(self, inbuf, cmd):
|
||||
return self.vc.create_subvolume_group_snapshot(vol_name=cmd['vol_name'],
|
||||
group_name=cmd['group_name'],
|
||||
|
Loading…
Reference in New Issue
Block a user