mirror of
https://github.com/ceph/ceph
synced 2025-02-22 02:27:29 +00:00
Merge pull request #14575 from jcsp/wip-cephfs-suites
qa: Tidy up fs/ suite Reviewed-by: Yan, Zheng <zyan@redhat.com>
This commit is contained in:
commit
6fad8ccc24
1
qa/fs/.gitignore
vendored
1
qa/fs/.gitignore
vendored
@ -1 +0,0 @@
|
||||
/test_o_trunc
|
@ -1,7 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
fs: btrfs
|
||||
conf:
|
||||
osd:
|
||||
osd sloppy crc: true
|
||||
osd op thread timeout: 60
|
@ -1,7 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
fs: ext4
|
||||
conf:
|
||||
global:
|
||||
osd max object name len: 460
|
||||
osd max object namespace len: 64
|
@ -1,6 +0,0 @@
|
||||
overrides:
|
||||
ceph:
|
||||
fs: xfs
|
||||
conf:
|
||||
osd:
|
||||
osd sloppy crc: true
|
1
qa/objectstore_cephfs/bluestore.yaml
Symbolic link
1
qa/objectstore_cephfs/bluestore.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
../objectstore/bluestore.yaml
|
1
qa/objectstore_cephfs/filestore-xfs.yaml
Symbolic link
1
qa/objectstore_cephfs/filestore-xfs.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
../objectstore/filestore-xfs.yaml
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +0,0 @@
|
||||
../../../objectstore
|
1
qa/suites/fs/basic_functional/objectstore/bluestore.yaml
Symbolic link
1
qa/suites/fs/basic_functional/objectstore/bluestore.yaml
Symbolic link
@ -0,0 +1 @@
|
||||
../../../../objectstore/bluestore.yaml
|
1
qa/suites/fs/basic_workload/objectstore
Symbolic link
1
qa/suites/fs/basic_workload/objectstore
Symbolic link
@ -0,0 +1 @@
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +0,0 @@
|
||||
../../../objectstore
|
@ -1,3 +0,0 @@
|
||||
overrides:
|
||||
ceph-fuse:
|
||||
disabled: true
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +0,0 @@
|
||||
../../../fs/xfs.yaml
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../fs/basic/begin.yaml
|
||||
../../fs/basic_workload/begin.yaml
|
@ -1 +1 @@
|
||||
../../fs/basic/inline/
|
||||
../../fs/basic_workload/inline
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../fs/basic/overrides/
|
||||
../../../fs/basic_workload/overrides
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -1 +1 @@
|
||||
../../../objectstore
|
||||
../../../objectstore_cephfs
|
@ -88,27 +88,16 @@ class CephFSTestCase(CephTestCase):
|
||||
# here for hours waiting for the test to fill up a 1TB drive!
|
||||
raise case.SkipTest("Require `memstore` OSD backend to simulate full drives")
|
||||
|
||||
# Unmount all surplus clients
|
||||
for i in range(self.CLIENTS_REQUIRED, len(self.mounts)):
|
||||
mount = self.mounts[i]
|
||||
log.info("Unmounting unneeded client {0}".format(mount.client_id))
|
||||
mount.umount_wait()
|
||||
|
||||
# Create friendly mount_a, mount_b attrs
|
||||
for i in range(0, self.CLIENTS_REQUIRED):
|
||||
setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i])
|
||||
|
||||
self.mds_cluster.clear_firewall()
|
||||
|
||||
# Unmount in order to start each test on a fresh mount, such
|
||||
# that test_barrier can have a firm expectation of what OSD
|
||||
# epoch the clients start with.
|
||||
if self.mount_a.is_mounted():
|
||||
self.mount_a.umount_wait()
|
||||
|
||||
if self.mount_b:
|
||||
if self.mount_b.is_mounted():
|
||||
self.mount_b.umount_wait()
|
||||
# Unmount all clients, we are about to blow away the filesystem
|
||||
for mount in self.mounts:
|
||||
if mount.is_mounted():
|
||||
mount.umount_wait(force=True)
|
||||
|
||||
# To avoid any issues with e.g. unlink bugs, we destroy and recreate
|
||||
# the filesystem rather than just doing a rm -rf of files
|
||||
@ -161,14 +150,11 @@ class CephFSTestCase(CephTestCase):
|
||||
|
||||
# wait for mds restart to complete...
|
||||
self.fs.wait_for_daemons()
|
||||
if not self.mount_a.is_mounted():
|
||||
self.mount_a.mount()
|
||||
self.mount_a.wait_until_mounted()
|
||||
|
||||
if self.mount_b:
|
||||
if not self.mount_b.is_mounted():
|
||||
self.mount_b.mount()
|
||||
self.mount_b.wait_until_mounted()
|
||||
# Mount the requested number of clients
|
||||
for i in range(0, self.CLIENTS_REQUIRED):
|
||||
self.mounts[i].mount()
|
||||
self.mounts[i].wait_until_mounted()
|
||||
|
||||
# Load an config settings of interest
|
||||
for setting in self.LOAD_SETTINGS:
|
||||
|
Loading…
Reference in New Issue
Block a user