mgr/volumes: Add subvolume v2 test cases

Added test cases for v2 subvolumes

Signed-off-by: Shyamsundar Ranganathan <srangana@redhat.com>
This commit is contained in:
Shyamsundar Ranganathan 2020-07-14 17:41:54 -04:00
parent 0b80de9b6a
commit 63599e27f5

View File

@ -5,9 +5,12 @@ import errno
import random
import logging
import collections
import uuid
from hashlib import md5
from tasks.cephfs.cephfs_test_case import CephFSTestCase
from teuthology.exceptions import CommandFailedError
from teuthology.misc import sudo_write_file
log = logging.getLogger(__name__)
@ -56,8 +59,15 @@ class TestVolumes(CephFSTestCase):
def _check_clone_canceled(self, clone, clone_group=None):
self.__check_clone_state("canceled", clone, clone_group, timo=1)
def _verify_clone_attrs(self, subvolume, clone, source_group=None, clone_group=None):
path1 = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
def _get_subvolume_snapshot_path(self, snapshot, subvol_path):
(base_path, uuid_str) = os.path.split(subvol_path)
return os.path.join(base_path, ".snap", snapshot, uuid_str)
def _verify_clone_attrs(self, subvolume, clone, source_group=None, clone_group=None, snapshot=None, subvol_path=None):
if snapshot and subvol_path:
path1 = self._get_subvolume_snapshot_path(snapshot, subvol_path)
else:
path1 = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
p = self.mount_a.run_shell(["find", path1])
@ -92,12 +102,18 @@ class TestVolumes(CephFSTestCase):
cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip())
self.assertEqual(sval, cval)
def _verify_clone(self, subvolume, clone, source_group=None, clone_group=None, timo=120):
path1 = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
def _verify_clone(self, subvolume, clone, source_group=None, clone_group=None, snapshot=None, subvol_path=None, timo=120):
# pass in snapshot and subvol_path (subvolume path when snapshot was taken) when subvolume is removed
# but snapshots are retained for clone verification
if snapshot and subvol_path:
path1 = self._get_subvolume_snapshot_path(snapshot, subvol_path)
else:
path1 = self._get_subvolume_path(self.volname, subvolume, group_name=source_group)
path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group)
check = 0
while check < timo:
# TODO: currently rentries are not being returned for snapshots, if source entries are removed
while check < timo and subvol_path is None:
val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries"))
val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries"))
if val1 == val2:
@ -106,7 +122,8 @@ class TestVolumes(CephFSTestCase):
time.sleep(1)
self.assertTrue(check < timo)
self._verify_clone_attrs(subvolume, clone, source_group=source_group, clone_group=clone_group)
self._verify_clone_attrs(subvolume, clone, source_group=source_group, clone_group=clone_group,
snapshot=snapshot, subvol_path=subvol_path)
def _generate_random_volume_name(self, count=1):
n = self.volume_start
@ -226,6 +243,52 @@ class TestVolumes(CephFSTestCase):
trashdir = os.path.join("./", "volumes", "_deleting")
self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout)
def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False):
if legacy:
subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group)
m = md5()
m.update(("/"+subvol_path).encode('utf-8'))
meta_filename = "{0}.meta".format(m.digest().hex())
metapath = os.path.join(".", "volumes", "_legacy", meta_filename)
else:
group = subvol_group if subvol_group is not None else '_nogroup'
metapath = os.path.join(".", "volumes", group, subvol_name, ".meta")
out = self.mount_a.run_shell(['cat', metapath])
lines = out.stdout.getvalue().strip().split('\n')
sv_version = -1
for line in lines:
if line == "version = " + str(version):
sv_version = version
break
self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format(
version, sv_version, metapath))
def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'):
group = subvol_group if subvol_group is not None else '_nogroup'
basepath = os.path.join(".", "volumes", group, subvol_name)
uuid_str = str(uuid.uuid4())
createpath = os.path.join(basepath, uuid_str)
self.mount_a.run_shell(['mkdir', '-p', createpath])
# create a v1 snapshot, to prevent auto upgrades
if has_snapshot:
snappath = os.path.join(createpath, ".snap", "fake")
self.mount_a.run_shell(['mkdir', '-p', snappath])
# add required xattrs to subvolume
default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool")
self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool)
# create a v1 .meta file
meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, createpath, state)
if state == 'pending':
# add a fake clone source
meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n'
meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta")
sudo_write_file(self.mount_a.client_remote, meta_filepath1, meta_contents)
return createpath
def setUp(self):
super(TestVolumes, self).setUp()
self.volname = None
@ -891,7 +954,7 @@ class TestVolumes(CephFSTestCase):
subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
"data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
"type", "uid", "features"]
"type", "uid", "features", "state"]
# create subvolume
subvolume = self._generate_random_subvolume_name()
@ -899,17 +962,17 @@ class TestVolumes(CephFSTestCase):
# get subvolume metadata
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
self.assertNotEqual(len(subvol_info), 0, "expected the 'fs subvolume info' command to list metadata of subvolume")
for md in subvol_md:
self.assertIn(md, subvol_info.keys(), "'{0}' key not present in metadata of subvolume".format(md))
self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set")
self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set")
self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty")
self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
self.assertEqual(len(subvol_info["features"]), 2,
msg="expected 2 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
for feature in ['snapshot-clone', 'snapshot-autoprotect']:
self.assertEqual(len(subvol_info["features"]), 3,
msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
nsize = self.DEFAULT_FILE_SIZE*1024*1024
@ -917,15 +980,17 @@ class TestVolumes(CephFSTestCase):
# get subvolume metadata after quota set
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
self.assertNotEqual(len(subvol_info), 0, "expected the 'fs subvolume info' command to list metadata of subvolume")
for md in subvol_md:
self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set")
self.assertNotEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should not be set to infinite if quota is not set")
self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume")
self.assertEqual(subvol_info["state"], "complete", "expected state to be complete")
self.assertEqual(len(subvol_info["features"]), 2,
msg="expected 2 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
for feature in ['snapshot-clone', 'snapshot-autoprotect']:
self.assertEqual(len(subvol_info["features"]), 3,
msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']:
self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
# remove subvolumes
@ -1284,10 +1349,10 @@ class TestVolumes(CephFSTestCase):
tests the 'fs subvolume snapshot info' command
"""
snap_metadata = ["created_at", "data_pool", "has_pending_clones", "size"]
snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
snapshot, snap_missing = self._generate_random_snapshot_name(2)
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
@ -1299,12 +1364,18 @@ class TestVolumes(CephFSTestCase):
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
self.assertNotEqual(len(snap_info), 0)
for md in snap_metadata:
if md not in snap_info:
raise RuntimeError("%s not present in the metadata of subvolume snapshot" % md)
for md in snap_md:
self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
self.assertEqual(snap_info["has_pending_clones"], "no")
# snapshot info for non-existent snapshot
try:
self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot")
else:
self.fail("expected snapshot info of non-existent snapshot to fail")
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
@ -1581,11 +1652,12 @@ class TestVolumes(CephFSTestCase):
self.mgr_cluster.mgr_fail(mgr)
self.wait_until_evicted(sessions[0]['id'])
def test_subvolume_upgrade(self):
def test_subvolume_upgrade_legacy_to_v1(self):
"""
poor man's upgrade test -- rather than going through a full upgrade cycle,
emulate subvolumes by going through the wormhole and verify if they are
accessible.
further ensure that a legacy volume is not updated to v2.
"""
subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
group = self._generate_random_group_name()
@ -1612,6 +1684,10 @@ class TestVolumes(CephFSTestCase):
self.assertEqual(createpath1[1:], subvolpath1)
self.assertEqual(createpath2[1:], subvolpath2)
# ensure metadata file is in legacy location, with required version v1
self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True)
self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
@ -1622,6 +1698,200 @@ class TestVolumes(CephFSTestCase):
# remove group
self._fs_cmd("subvolumegroup", "rm", self.volname, group)
def test_subvolume_no_upgrade_v1_sanity(self):
"""
poor man's upgrade test -- theme continues...
This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through
a series of operations on the v1 subvolume to ensure they work as expected.
"""
subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime",
"data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace",
"type", "uid", "features", "state"]
snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone1, clone2 = self._generate_random_clone_name(2)
# emulate a v1 subvolume -- in the default group
subvolume_path = self._create_v1_subvolume(subvolume)
# getpath
subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
self.assertEqual(subvolpath.rstrip(), subvolume_path)
# ls
subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
self.assertEqual(subvolumes[0]['name'], subvolume,
"subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
# info
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
for md in subvol_md:
self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
self.assertEqual(subvol_info["state"], "complete",
msg="expected state to be 'complete', found '{0}".format(subvol_info["state"]))
self.assertEqual(len(subvol_info["features"]), 2,
msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"]))
for feature in ['snapshot-clone', 'snapshot-autoprotect']:
self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature))
# resize
nsize = self.DEFAULT_FILE_SIZE*1024*1024
self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume))
for md in subvol_md:
self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md))
self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize))
# create (idempotent)
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# TODO: do some IO (fails possibly due to permissions)
#self._do_subvolume_io(subvolume, number_of_files=64)
# snap-create
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
# clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1)
# check clone status
self._wait_for_clone_to_complete(clone1)
# ensure clone is v2
self._assert_meta_location_and_version(self.volname, clone1, version=2)
# clone (older snapshot)
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2)
# check clone status
self._wait_for_clone_to_complete(clone2)
# ensure clone is v2
self._assert_meta_location_and_version(self.volname, clone2, version=2)
# snap-info
snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
for md in snap_md:
self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
self.assertEqual(snap_info["has_pending_clones"], "no")
# snap-ls
subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots)))
snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots]
for name in [snapshot, 'fake']:
self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name))
# snap-rm
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake")
# ensure volume is still at version 1
self._assert_meta_location_and_version(self.volname, subvolume, version=1)
# rm
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
self._fs_cmd("subvolume", "rm", self.volname, clone1)
self._fs_cmd("subvolume", "rm", self.volname, clone2)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_no_upgrade_v1_to_v2(self):
"""
poor man's upgrade test -- theme continues...
ensure v1 to v2 upgrades are not done automatically due to various states of v1
"""
subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3)
group = self._generate_random_group_name()
# emulate a v1 subvolume -- in the default group
subvol1_path = self._create_v1_subvolume(subvolume1)
# emulate a v1 subvolume -- in a custom group
subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group)
# emulate a v1 subvolume -- in a clone pending state
self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending')
# this would attempt auto-upgrade on access, but fail to do so as snapshots exist
subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
self.assertEqual(subvolpath1.rstrip(), subvol1_path)
subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
self.assertEqual(subvolpath2.rstrip(), subvol2_path)
# this would attempt auto-upgrade on access, but fail to do so as volume is not complete
# use clone status, as only certain operations are allowed in pending state
status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3))
self.assertEqual(status["status"]["state"], "pending")
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake")
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group)
# ensure metadata file is in v1 location, with version retained as v1
self._assert_meta_location_and_version(self.volname, subvolume1, version=1)
self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
try:
self._fs_cmd("subvolume", "rm", self.volname, subvolume3)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone")
else:
self.fail("expected rm of subvolume undergoing clone to fail")
# ensure metadata file is in v1 location, with version retained as v1
self._assert_meta_location_and_version(self.volname, subvolume3, version=1)
self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force")
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_upgrade_v1_to_v2(self):
"""
poor man's upgrade test -- theme continues...
ensure v1 to v2 upgrades work
"""
subvolume1, subvolume2 = self._generate_random_subvolume_name(2)
group = self._generate_random_group_name()
# emulate a v1 subvolume -- in the default group
subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False)
# emulate a v1 subvolume -- in a custom group
subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False)
# this would attempt auto-upgrade on access
subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1)
self.assertEqual(subvolpath1.rstrip(), subvol1_path)
subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group)
self.assertEqual(subvolpath2.rstrip(), subvol2_path)
# ensure metadata file is in v2 location, with version retained as v2
self._assert_meta_location_and_version(self.volname, subvolume1, version=2)
self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume1)
self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_rm_with_snapshots(self):
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
@ -1650,6 +1920,405 @@ class TestVolumes(CephFSTestCase):
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_retain_snapshot_without_snapshots(self):
"""
ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume
"""
subvolume = self._generate_random_subvolume_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# remove with snapshot retention (should remove volume, no snapshots to retain)
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_retain_snapshot_with_snapshots(self):
"""
ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume
also test allowed and dis-allowed operations on a retained subvolume
"""
snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
# remove subvolume -- should fail with ENOTEMPTY since it has snapshots
try:
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots")
else:
self.fail("expected rm of subvolume with retained snapshots to fail")
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# fetch info
subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
self.assertEqual(subvol_info["state"], "snapshot-retained",
msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
## test allowed ops in retained state
# ls
subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes)))
self.assertEqual(subvolumes[0]['name'], subvolume,
"subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name']))
# snapshot info
snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot))
for md in snap_md:
self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
self.assertEqual(snap_info["has_pending_clones"], "no")
# rm --force (allowed but should fail)
try:
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
else:
self.fail("expected rm of subvolume with retained snapshots to fail")
# rm (allowed but should fail)
try:
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots")
else:
self.fail("expected rm of subvolume with retained snapshots to fail")
## test disallowed ops
# getpath
try:
self._fs_cmd("subvolume", "getpath", self.volname, subvolume)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots")
else:
self.fail("expected getpath of subvolume with retained snapshots to fail")
# resize
nsize = self.DEFAULT_FILE_SIZE*1024*1024
try:
self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize))
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots")
else:
self.fail("expected resize of subvolume with retained snapshots to fail")
# snap-create
try:
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail")
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots")
else:
self.fail("expected snapshot create of subvolume with retained snapshots to fail")
# remove snapshot (should remove volume)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_retain_snapshot_recreate_subvolume(self):
"""
ensure a retained subvolume can be recreated and further snapshotted
"""
snap_md = ["created_at", "data_pool", "has_pending_clones", "size"]
subvolume = self._generate_random_subvolume_name()
snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# fetch info
subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
self.assertEqual(subvol_info["state"], "snapshot-retained",
msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
# recreate retained subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# fetch info
subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume))
self.assertEqual(subvol_info["state"], "complete",
msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"]))
# snapshot info (older snapshot)
snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1))
for md in snap_md:
self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md))
self.assertEqual(snap_info["has_pending_clones"], "no")
# snap-create (new snapshot)
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
# remove with retain snapshots
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# list snapshots
subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume))
self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the"
" created subvolume snapshots")
snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls]
for snap in [snapshot1, snapshot2]:
self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
# remove snapshots (should remove volume)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_retain_snapshot_clone(self):
"""
clone a snapshot from a snapshot retained subvolume
"""
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
clone = self._generate_random_clone_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# store path for clone verification
subvol_path = self._get_subvolume_path(self.volname, subvolume)
# do some IO
self._do_subvolume_io(subvolume, number_of_files=16)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# clone retained subvolume snapshot
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
# check clone status
self._wait_for_clone_to_complete(clone)
# verify clone
self._verify_clone(subvolume, clone, snapshot=snapshot, subvol_path=subvol_path)
# remove snapshots (removes retained volume)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_retain_snapshot_recreate(self):
"""
recreate a subvolume from one of its retained snapshots
"""
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# store path for clone verification
subvol_path = self._get_subvolume_path(self.volname, subvolume)
# do some IO
self._do_subvolume_io(subvolume, number_of_files=16)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# recreate retained subvolume using its own snapshot to clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume)
# check clone status
self._wait_for_clone_to_complete(subvolume)
# verify clone
self._verify_clone(subvolume, subvolume, snapshot=snapshot, subvol_path=subvol_path)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_clone_retain_snapshot_with_snapshots(self):
"""
retain snapshots of a cloned subvolume and check disallowed operations
"""
subvolume = self._generate_random_subvolume_name()
snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
clone = self._generate_random_clone_name()
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# store path for clone verification
subvol1_path = self._get_subvolume_path(self.volname, subvolume)
# do some IO
self._do_subvolume_io(subvolume, number_of_files=16)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# clone retained subvolume snapshot
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone)
# check clone status
self._wait_for_clone_to_complete(clone)
# verify clone
self._verify_clone(subvolume, clone, snapshot=snapshot1, subvol_path=subvol1_path)
# create a snapshot on the clone
self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2)
# retain a clone
self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots")
# list snapshots
clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone))
self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the"
" created subvolume snapshots")
snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls]
for snap in [snapshot2]:
self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap))
## check disallowed operations on retained clone
# clone-status
try:
self._fs_cmd("clone", "status", self.volname, clone)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots")
else:
self.fail("expected clone status of clone with retained snapshots to fail")
# clone-cancel
try:
self._fs_cmd("clone", "cancel", self.volname, clone)
except CommandFailedError as ce:
self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots")
else:
self.fail("expected clone cancel of clone with retained snapshots to fail")
# remove snapshots (removes subvolumes as all are in retained state)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2)
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self):
"""
clone a subvolume from recreated subvolume's latest snapshot
"""
subvolume = self._generate_random_subvolume_name()
snapshot1, snapshot2 = self._generate_random_snapshot_name(2)
clone = self._generate_random_clone_name(1)
# create subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# do some IO
self._do_subvolume_io(subvolume, number_of_files=16)
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1)
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# recreate subvolume
self._fs_cmd("subvolume", "create", self.volname, subvolume)
# get and store path for clone verification
subvol2_path = self._get_subvolume_path(self.volname, subvolume)
# do some IO
self._do_subvolume_io(subvolume, number_of_files=16)
# snapshot newer subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2)
# remove with snapshot retention
self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots")
# clone retained subvolume's newer snapshot
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone)
# check clone status
self._wait_for_clone_to_complete(clone)
# verify clone
self._verify_clone(subvolume, clone, snapshot=snapshot2, subvol_path=subvol2_path)
# remove snapshot
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1)
self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2)
# remove subvolume
self._fs_cmd("subvolume", "rm", self.volname, clone)
# verify list subvolumes returns an empty list
subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname))
self.assertEqual(len(subvolumels), 0)
# verify trash dir is clean
self._wait_for_trash_empty()
def test_subvolume_snapshot_protect_unprotect_sanity(self):
"""
Snapshot protect/unprotect commands are deprecated. This test exists to ensure that
@ -1985,6 +2654,7 @@ class TestVolumes(CephFSTestCase):
yet another poor man's upgrade test -- rather than going through a full
upgrade cycle, emulate old types subvolumes by going through the wormhole
and verify clone operation.
further ensure that a legacy volume is not updated to v2, but clone is.
"""
subvolume = self._generate_random_subvolume_name()
snapshot = self._generate_random_snapshot_name()
@ -2000,6 +2670,9 @@ class TestVolumes(CephFSTestCase):
# snapshot subvolume
self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot)
# ensure metadata file is in legacy location, with required version v1
self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True)
# schedule a clone
self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone)
@ -2020,6 +2693,9 @@ class TestVolumes(CephFSTestCase):
# verify clone
self._verify_clone(subvolume, clone)
# ensure metadata file is in v2 location, with required version v2
self._assert_meta_location_and_version(self.volname, clone)
# remove subvolumes
self._fs_cmd("subvolume", "rm", self.volname, subvolume)
self._fs_cmd("subvolume", "rm", self.volname, clone)