mirror of
https://github.com/ceph/ceph
synced 2025-02-21 18:17:42 +00:00
Merge pull request #48196 from neesingh-rh/wip-human-readable-volume-info
mgr/volumes: Add human-readable flag to volume info command Reviewed-by: Venky Shankar <vshankar@redhat.com> Reviewed-by: Dhairya Parmar <dparmar@redhat.com> Reviewed-by: Kotresh HR <khiremat@redhat.com> Reviewed-by: Anthony D'Atri <anthony.datri@gmail.com> Reviewed-by: Ramana Raja <rraja@redhat.com>
This commit is contained in:
commit
296ba0641a
@ -99,32 +99,11 @@ expected to be disabled on the volume.
|
||||
|
||||
Fetch the information of a CephFS volume using::
|
||||
|
||||
$ ceph fs volume info vol_name
|
||||
{
|
||||
"mon_addrs": [
|
||||
"192.168.1.7:40977"
|
||||
],
|
||||
"pending_subvolume_deletions": 0,
|
||||
"pools": {
|
||||
"data": [
|
||||
{
|
||||
"avail": 106288709632,
|
||||
"name": "cephfs.vol_name.data",
|
||||
"used": 4096
|
||||
}
|
||||
],
|
||||
"metadata": [
|
||||
{
|
||||
"avail": 106288709632,
|
||||
"name": "cephfs.vol_name.meta",
|
||||
"used": 155648
|
||||
}
|
||||
]
|
||||
},
|
||||
"used_size": 0
|
||||
}
|
||||
$ ceph fs volume info vol_name [--human_readable]
|
||||
|
||||
The output format is json and contains fields as follows.
|
||||
The ``--human_readable`` flag shows used and available pool capacities in KB/MB/GB.
|
||||
|
||||
The output format is JSON and contains fields as follows:
|
||||
|
||||
* pools: Attributes of data and metadata pools
|
||||
* avail: The amount of free space available in bytes
|
||||
@ -134,6 +113,33 @@ The output format is json and contains fields as follows.
|
||||
* used_size: Current used size of the CephFS volume in bytes
|
||||
* pending_subvolume_deletions: Number of subvolumes pending deletion
|
||||
|
||||
Sample output of volume info command::
|
||||
|
||||
$ ceph fs volume info vol_name
|
||||
{
|
||||
"mon_addrs": [
|
||||
"192.168.1.7:40977"
|
||||
],
|
||||
"pending_subvolume_deletions": 0,
|
||||
"pools": {
|
||||
"data": [
|
||||
{
|
||||
"avail": 106288709632,
|
||||
"name": "cephfs.vol_name.data",
|
||||
"used": 4096
|
||||
}
|
||||
],
|
||||
"metadata": [
|
||||
{
|
||||
"avail": 106288709632,
|
||||
"name": "cephfs.vol_name.meta",
|
||||
"used": 155648
|
||||
}
|
||||
]
|
||||
},
|
||||
"used_size": 0
|
||||
}
|
||||
|
||||
FS Subvolume groups
|
||||
-------------------
|
||||
|
||||
@ -194,8 +200,9 @@ Check the presence of any subvolume group using::
|
||||
$ ceph fs subvolumegroup exist <vol_name>
|
||||
|
||||
The strings returned by the 'exist' command:
|
||||
* "subvolumegroup exists": if any subvolumegroup is present
|
||||
* "no subvolumegroup exists": if no subvolumegroup is present
|
||||
|
||||
* "subvolumegroup exists": if any subvolumegroup is present
|
||||
* "no subvolumegroup exists": if no subvolumegroup is present
|
||||
|
||||
.. note:: It checks for the presence of custom groups and not the default one. To validate the emptiness of the volume, subvolumegroup existence check alone is not sufficient. The subvolume existence also needs to be checked as there might be subvolumes in the default group.
|
||||
|
||||
@ -344,8 +351,9 @@ Check the presence of any subvolume using::
|
||||
$ ceph fs subvolume exist <vol_name> [--group_name <subvol_group_name>]
|
||||
|
||||
The strings returned by the 'exist' command:
|
||||
* "subvolume exists": if any subvolume of given group_name is present
|
||||
* "no subvolume exists": if no subvolume of given group_name is present
|
||||
|
||||
* "subvolume exists": if any subvolume of given group_name is present
|
||||
* "no subvolume exists": if no subvolume of given group_name is present
|
||||
|
||||
Set custom metadata on the subvolume as a key-value pair using::
|
||||
|
||||
|
@ -207,8 +207,11 @@ class TestVolumesHelper(CephFSTestCase):
|
||||
else:
|
||||
self.volname = result[0]['name']
|
||||
|
||||
def _get_volume_info(self, vol_name):
|
||||
args = ["volume", "info", vol_name]
|
||||
def _get_volume_info(self, vol_name, human_readable=False):
|
||||
if human_readable:
|
||||
args = ["volume", "info", vol_name, human_readable]
|
||||
else:
|
||||
args = ["volume", "info", vol_name]
|
||||
args = tuple(args)
|
||||
vol_md = self._fs_cmd(*args)
|
||||
return vol_md
|
||||
@ -662,6 +665,49 @@ class TestVolumes(TestVolumesHelper):
|
||||
"'pending_subvolume_deletions' should not be present in absence"
|
||||
" of subvolumegroup")
|
||||
|
||||
def test_volume_info_with_human_readable_flag(self):
|
||||
"""
|
||||
Tests the 'fs volume info --human_readable' command
|
||||
"""
|
||||
vol_fields = ["pools", "used_size", "pending_subvolume_deletions", "mon_addrs"]
|
||||
group = self._generate_random_group_name()
|
||||
# create subvolumegroup
|
||||
self._fs_cmd("subvolumegroup", "create", self.volname, group)
|
||||
# get volume metadata
|
||||
vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable"))
|
||||
for md in vol_fields:
|
||||
self.assertIn(md, vol_info,
|
||||
f"'{md}' key not present in metadata of volume")
|
||||
units = [' ', 'k', 'M', 'G', 'T', 'P', 'E']
|
||||
assert vol_info["used_size"][-1] in units, "unit suffix in used_size is absent"
|
||||
assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent"
|
||||
assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent"
|
||||
assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent"
|
||||
assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent"
|
||||
self.assertEqual(int(vol_info["used_size"]), 0,
|
||||
"Size should be zero when volumes directory is empty")
|
||||
|
||||
def test_volume_info_with_human_readable_flag_without_subvolumegroup(self):
|
||||
"""
|
||||
Tests the 'fs volume info --human_readable' command without subvolume group
|
||||
"""
|
||||
vol_fields = ["pools", "mon_addrs"]
|
||||
# get volume metadata
|
||||
vol_info = json.loads(self._get_volume_info(self.volname, "--human_readable"))
|
||||
for md in vol_fields:
|
||||
self.assertIn(md, vol_info,
|
||||
f"'{md}' key not present in metadata of volume")
|
||||
units = [' ', 'k', 'M', 'G', 'T', 'P', 'E']
|
||||
assert vol_info["pools"]["data"][0]["avail"][-1] in units, "unit suffix in avail data is absent"
|
||||
assert vol_info["pools"]["data"][0]["used"][-1] in units, "unit suffix in used data is absent"
|
||||
assert vol_info["pools"]["metadata"][0]["avail"][-1] in units, "unit suffix in avail metadata is absent"
|
||||
assert vol_info["pools"]["metadata"][0]["used"][-1] in units, "unit suffix in used metadata is absent"
|
||||
self.assertNotIn("used_size", vol_info,
|
||||
"'used_size' should not be present in absence of subvolumegroup")
|
||||
self.assertNotIn("pending_subvolume_deletions", vol_info,
|
||||
"'pending_subvolume_deletions' should not be present in absence"
|
||||
" of subvolumegroup")
|
||||
|
||||
|
||||
class TestSubvolumeGroups(TestVolumesHelper):
|
||||
"""Tests for FS subvolume group operations."""
|
||||
|
@ -2,6 +2,7 @@ import json
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import mgr_util
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cephfs
|
||||
@ -152,6 +153,7 @@ class VolumeClient(CephfsClient["Module"]):
|
||||
def volume_info(self, **kwargs):
|
||||
ret = None
|
||||
volname = kwargs['vol_name']
|
||||
human_readable = kwargs['human_readable']
|
||||
|
||||
try:
|
||||
with open_volume(self, volname) as fs_handle:
|
||||
@ -163,7 +165,10 @@ class VolumeClient(CephfsClient["Module"]):
|
||||
|
||||
usedbytes = st['size']
|
||||
vol_info_dict = get_pending_subvol_deletions_count(path)
|
||||
vol_info_dict['used_size'] = int(usedbytes)
|
||||
if human_readable:
|
||||
vol_info_dict['used_size'] = mgr_util.format_bytes(int(usedbytes), 5)
|
||||
else:
|
||||
vol_info_dict['used_size'] = int(usedbytes)
|
||||
except cephfs.Error as e:
|
||||
if e.args[0] == errno.ENOENT:
|
||||
pass
|
||||
@ -178,10 +183,16 @@ class VolumeClient(CephfsClient["Module"]):
|
||||
pool_type = "metadata"
|
||||
else:
|
||||
pool_type = "data"
|
||||
vol_info_dict["pools"][pool_type].append({
|
||||
'name': pools[pool_id]['pool_name'],
|
||||
'used': pool_stats[pool_id]['bytes_used'],
|
||||
'avail': pool_stats[pool_id]['max_avail']})
|
||||
if human_readable:
|
||||
vol_info_dict["pools"][pool_type].append({
|
||||
'name': pools[pool_id]['pool_name'],
|
||||
'used': mgr_util.format_bytes(pool_stats[pool_id]['bytes_used'], 5),
|
||||
'avail': mgr_util.format_bytes(pool_stats[pool_id]['max_avail'], 5)})
|
||||
else:
|
||||
vol_info_dict["pools"][pool_type].append({
|
||||
'name': pools[pool_id]['pool_name'],
|
||||
'used': pool_stats[pool_id]['bytes_used'],
|
||||
'avail': pool_stats[pool_id]['max_avail']})
|
||||
|
||||
mon_addr_lst = []
|
||||
mon_map_mons = self.mgr.get('mon_map')['mons']
|
||||
|
@ -70,7 +70,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
},
|
||||
{
|
||||
'cmd': 'fs volume info '
|
||||
'name=vol_name,type=CephString ',
|
||||
'name=vol_name,type=CephString '
|
||||
'name=human_readable,type=CephBool,req=false ',
|
||||
'desc': "Get the information of a CephFS volume",
|
||||
'perm': 'r'
|
||||
},
|
||||
@ -555,7 +556,8 @@ class Module(orchestrator.OrchestratorClientMixin, MgrModule):
|
||||
|
||||
@mgr_cmd_wrap
|
||||
def _cmd_fs_volume_info(self, inbuf, cmd):
|
||||
return self.vc.volume_info(vol_name=cmd['vol_name'])
|
||||
return self.vc.volume_info(vol_name=cmd['vol_name'],
|
||||
human_readable=cmd.get('human_readable', False))
|
||||
|
||||
@mgr_cmd_wrap
|
||||
def _cmd_fs_subvolumegroup_create(self, inbuf, cmd):
|
||||
|
Loading…
Reference in New Issue
Block a user