mirror of
https://github.com/ceph/ceph
synced 2025-01-03 01:22:53 +00:00
mds: fix the description for inotable testing only options
The description text are mixed for mds_kill_skip_replaying_inotable and mds_inject_skip_replaying_inotable. At the same time rename "mds_kill_skip_replaying_inotable", which is a bit confusing to "mds_kill_after_journal_logs_flushed". Fixes: https://tracker.ceph.com/issues/61660 Signed-off-by: Xiubo Li <xiubli@redhat.com>
This commit is contained in:
parent
4dee8b7928
commit
7df5714803
@ -236,6 +236,10 @@
|
||||
"mgr/prometheus/x/server_port" will be displayed instead of
|
||||
"mgr/prometheus/server_port". This matches the output of the non pretty-print
|
||||
formatted version of the command.
|
||||
* CEPHFS: MDS config option name "mds_kill_skip_replaying_inotable" is a bit
|
||||
confusing with "mds_inject_skip_replaying_inotable", therefore renaming it to
|
||||
"mds_kill_after_journal_logs_flushed"
|
||||
|
||||
|
||||
>=17.2.1
|
||||
|
||||
|
@ -56,7 +56,7 @@
|
||||
.. confval:: mds_kill_link_at
|
||||
.. confval:: mds_kill_rename_at
|
||||
.. confval:: mds_inject_skip_replaying_inotable
|
||||
.. confval:: mds_kill_skip_replaying_inotable
|
||||
.. confval:: mds_kill_after_journal_logs_flushed
|
||||
.. confval:: mds_wipe_sessions
|
||||
.. confval:: mds_wipe_ino_prealloc
|
||||
.. confval:: mds_skip_ino
|
||||
|
@ -632,7 +632,7 @@ class TestSkipReplayInoTable(CephFSTestCase):
|
||||
status = self.fs.status()
|
||||
rank0 = self.fs.get_rank(rank=0, status=status)
|
||||
|
||||
self.fs.mds_asok(['config', 'set', 'mds_kill_skip_replaying_inotable', "true"])
|
||||
self.fs.mds_asok(['config', 'set', 'mds_kill_after_journal_logs_flushed', "true"])
|
||||
# This will make the MDS crash, since we only have one MDS in the
|
||||
# cluster and without the "wait=False" it will stuck here forever.
|
||||
self.mount_a.run_shell(["mkdir", "test_alloc_ino/dir1"], wait=False)
|
||||
|
@ -1117,14 +1117,14 @@ options:
|
||||
default: false
|
||||
services:
|
||||
- mds
|
||||
- name: mds_kill_skip_replaying_inotable
|
||||
- name: mds_kill_after_journal_logs_flushed
|
||||
type: bool
|
||||
level: dev
|
||||
default: false
|
||||
services:
|
||||
- mds
|
||||
fmt_desc: Ceph will skip replaying the inotable when replaying the journal, and
|
||||
the premary MDS will crash, while the replacing MDS won't.
|
||||
fmt_desc: The primary MDS will crash just after the mknod/openc journal logs
|
||||
are flushed to the pool.
|
||||
(for testing only).
|
||||
with_legacy: true
|
||||
- name: mds_inject_skip_replaying_inotable
|
||||
@ -1133,8 +1133,7 @@ options:
|
||||
default: false
|
||||
services:
|
||||
- mds
|
||||
fmt_desc: Ceph will skip replaying the inotable when replaying the journal, and
|
||||
the premary MDS will crash, while the replacing MDS won't.
|
||||
fmt_desc: MDS will skip replaying the inotable when replaying the journal logs.
|
||||
(for testing only).
|
||||
with_legacy: true
|
||||
# percentage of MDS modify replies to skip sending the client a trace on [0-1]
|
||||
|
@ -4557,7 +4557,7 @@ public:
|
||||
ceph_assert(r == 0);
|
||||
|
||||
// crash current MDS and the replacing MDS will test the journal
|
||||
ceph_assert(!g_conf()->mds_kill_skip_replaying_inotable);
|
||||
ceph_assert(!g_conf()->mds_kill_after_journal_logs_flushed);
|
||||
|
||||
dn->pop_projected_linkage();
|
||||
|
||||
@ -6874,7 +6874,7 @@ public:
|
||||
ceph_assert(r == 0);
|
||||
|
||||
// crash current MDS and the replacing MDS will test the journal
|
||||
ceph_assert(!g_conf()->mds_kill_skip_replaying_inotable);
|
||||
ceph_assert(!g_conf()->mds_kill_after_journal_logs_flushed);
|
||||
|
||||
// link the inode
|
||||
dn->pop_projected_linkage();
|
||||
@ -7166,7 +7166,7 @@ void Server::handle_client_symlink(MDRequestRef& mdr)
|
||||
mds->balancer->maybe_fragment(dir, false);
|
||||
|
||||
// flush the journal as soon as possible
|
||||
if (g_conf()->mds_kill_skip_replaying_inotable) {
|
||||
if (g_conf()->mds_kill_after_journal_logs_flushed) {
|
||||
mdlog->flush();
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user