Merge pull request #512 from ceph/wip-pool-perm-fixes

Fixes to test_pool_perm

Reviewed-by: Greg Farnum <gfarnum@redhat.com>
This commit is contained in:
Gregory Farnum 2015-07-28 14:45:44 +01:00
commit 961701e3ed
2 changed files with 21 additions and 4 deletions

View File

@ -104,6 +104,12 @@ class CephFSTestCase(unittest.TestCase):
for addr, blacklisted_at in blacklist.items():
self.fs.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr)
# In case some test messed with auth caps, reset them
for mount in self.mounts:
self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', "client.{0}".format(mount.client_id),
'mon', 'allow r', 'osd',
'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
self.fs.mds_restart()
self.fs.wait_for_daemons()
if not self.mount_a.is_mounted():

View File

@ -1,13 +1,13 @@
from textwrap import dedent
from tasks.cephfs.cephfs_test_case import CephFSTestCase
import os
class TestPoolPerm(CephFSTestCase):
def test_pool_perm(self):
self.mount_a.run_shell(["touch", "test_file"])
file_path = os.path.join(self.mount_a.mountpoint, "test_file")
file_path = os.path.join(self.mount_a.mountpoint, "test_file")
remote_script = dedent("""
import os
@ -26,8 +26,11 @@ class TestPoolPerm(CephFSTestCase):
raise RuntimeError("client does not check permission of data pool")
""")
client_name = "client.{0}".format(self.mount_a.client_id)
# set data pool read only
self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', 'client.0', 'mon', 'allow r', 'osd', 'allow r pool=data')
self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', client_name, 'mon', 'allow r', 'osd',
'allow r pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
self.mount_a.mount()
@ -37,7 +40,8 @@ class TestPoolPerm(CephFSTestCase):
self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False)))
# set data pool write only
self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', 'client.0', 'mon', 'allow r', 'osd', 'allow w pool=data')
self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', client_name, 'mon', 'allow r', 'osd',
'allow w pool={0}'.format(self.fs.get_data_pool_name()))
self.mount_a.umount_wait()
self.mount_a.mount()
@ -45,3 +49,10 @@ class TestPoolPerm(CephFSTestCase):
# read should fail
self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True)))
def tearDown(self):
self.fs.mon_manager.raw_cluster_cmd_result('auth', 'caps', "client.{0}".format(self.mount_a.client_id),
'mon', 'allow r', 'osd',
'allow rw pool={0}'.format(self.fs.get_data_pool_name()))
super(CephFSTestCase, self).tearDown()