From 67ca6cd229a595d54ccea18b5452f2574ede9657 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Tue, 12 Dec 2017 11:09:19 -0800 Subject: [PATCH] mds: obsolete MDSMap option configs These configs were used for initialization but it is more appropriate to require setting these file system attributes via `ceph fs set`. This is similar to what was already done with max_mds. There are new variables added for `fs set` where missing. Signed-off-by: Patrick Donnelly --- PendingReleaseNotes | 5 ++++ doc/cephfs/dirfrags.rst | 7 +++-- doc/cephfs/eviction.rst | 6 ++--- doc/cephfs/health-messages.rst | 2 +- doc/cephfs/mds-config-ref.rst | 34 ------------------------- qa/tasks/cephfs/filesystem.py | 13 +++++++--- qa/tasks/cephfs/test_client_limits.py | 6 ++--- qa/tasks/cephfs/test_client_recovery.py | 24 +++++++++-------- qa/tasks/cephfs/test_fragment.py | 1 - qa/tasks/cephfs/test_misc.py | 9 +++---- qa/tasks/cephfs/test_strays.py | 2 +- src/common/legacy_config_opts.h | 6 +---- src/common/options.cc | 16 ------------ src/mds/FSMap.cc | 8 ------ src/mds/Locker.cc | 10 ++++---- src/mds/MDBalancer.cc | 2 +- src/mds/MDSMap.h | 14 +++++++--- src/mds/Server.cc | 4 +-- src/mon/FSCommands.cc | 30 ++++++++++++++++++++++ src/mon/MonCommands.h | 4 +-- 20 files changed, 95 insertions(+), 108 deletions(-) diff --git a/PendingReleaseNotes b/PendingReleaseNotes index 64c23ff1b4e..e5e6c2cd63a 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -20,6 +20,11 @@ - mds remove_data_pool -> fs rm_data_pool - mds rm_data_pool -> fs rm_data_pool + * New CephFS file system attributes session_timeout and session_autoclose + are configurable via `ceph fs set`. The MDS config options + mds_session_timeout, mds_session_autoclose, and mds_max_file_size are now + obsolete. + >= 12.2.2 --------- diff --git a/doc/cephfs/dirfrags.rst b/doc/cephfs/dirfrags.rst index 717553fea9a..24b05edfc26 100644 --- a/doc/cephfs/dirfrags.rst +++ b/doc/cephfs/dirfrags.rst @@ -25,10 +25,9 @@ fragments may be *merged* to reduce the number of fragments in the directory. Splitting and merging ===================== -An MDS will only consider doing splits and merges if the ``mds_bal_frag`` -setting is true in the MDS's configuration file, and the allow_dirfrags -setting is true in the filesystem map (set on the mons). These settings -are both true by default since the *Luminous* (12.2.x) release of Ceph. +An MDS will only consider doing splits if the allow_dirfrags setting is true in +the file system map (set on the mons). This setting is true by default since +the *Luminous* release (12.2.X). When an MDS identifies a directory fragment to be split, it does not do the split immediately. Because splitting interrupts metadata IO, diff --git a/doc/cephfs/eviction.rst b/doc/cephfs/eviction.rst index 3c130b0f596..f0a129d4596 100644 --- a/doc/cephfs/eviction.rst +++ b/doc/cephfs/eviction.rst @@ -23,9 +23,9 @@ Automatic client eviction There are two situations in which a client may be evicted automatically: -On an active MDS daemon, if a client has not communicated with the MDS for -over ``mds_session_autoclose`` seconds (300 seconds by default), then it -will be evicted automatically. +On an active MDS daemon, if a client has not communicated with the MDS for over +``session_autoclose`` (a file system variable) seconds (300 seconds by +default), then it will be evicted automatically. During MDS startup (including on failover), the MDS passes through a state called ``reconnect``. During this state, it waits for all the diff --git a/doc/cephfs/health-messages.rst b/doc/cephfs/health-messages.rst index 057eea491ad..b2ebe4b1822 100644 --- a/doc/cephfs/health-messages.rst +++ b/doc/cephfs/health-messages.rst @@ -69,7 +69,7 @@ are like locks. Sometimes, for example when another client needs access, the MDS will request clients release their capabilities. If the client is unresponsive or buggy, it might fail to do so promptly or fail to do so at all. This message appears if a client has taken longer than -``mds_session_timeout`` (default 60s) to comply. +``session_timeout`` (default 60s) to comply. Message: "Client *name* failing to respond to cache pressure" Code: MDS_HEALTH_CLIENT_RECALL, MDS_HEALTH_CLIENT_RECALL_MANY diff --git a/doc/cephfs/mds-config-ref.rst b/doc/cephfs/mds-config-ref.rst index 3a95686d1e9..2f7eb3758a8 100644 --- a/doc/cephfs/mds-config-ref.rst +++ b/doc/cephfs/mds-config-ref.rst @@ -10,15 +10,6 @@ :Type: Boolean :Default: ``true`` - -``mds max file size`` - -:Description: The maximum allowed file size to set when creating a - new file system. - -:Type: 64-bit Integer Unsigned -:Default: ``1ULL << 40`` - ``mds cache memory limit`` :Description: The memory limit the MDS should enforce for its cache. @@ -107,24 +98,6 @@ :Default: ``24.0*60.0`` -``mds session timeout`` - -:Description: The interval (in seconds) of client inactivity before Ceph - times out capabilities and leases. - -:Type: Float -:Default: ``60`` - - -``mds session autoclose`` - -:Description: The interval (in seconds) before Ceph closes - a laggy client's session. - -:Type: Float -:Default: ``300`` - - ``mds reconnect timeout`` :Description: The interval (in seconds) to wait for clients to reconnect @@ -254,13 +227,6 @@ :Default: ``0`` -``mds bal frag`` - -:Description: Determines whether the MDS will fragment directories. -:Type: Boolean -:Default: ``false`` - - ``mds bal split size`` :Description: The maximum directory size before the MDS will split a directory diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index fce7931696f..99d26450864 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -437,14 +437,18 @@ class Filesystem(MDSCluster): raise RuntimeError("cannot deactivate rank 0") self.mon_manager.raw_cluster_cmd("mds", "deactivate", "%d:%d" % (self.id, rank)) + def set_var(self, var, *args): + a = map(str, args) + self.mon_manager.raw_cluster_cmd("fs", "set", self.name, var, *a) + def set_max_mds(self, max_mds): - self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "max_mds", "%d" % max_mds) + self.set_var("max_mds", "%d" % max_mds) def set_allow_dirfrags(self, yes): - self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_dirfrags", str(yes).lower(), '--yes-i-really-mean-it') + self.set_var("allow_dirfrags", str(yes).lower(), '--yes-i-really-mean-it') def set_allow_new_snaps(self, yes): - self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it') + self.set_var("allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it') def get_pgs_per_fs_pool(self): """ @@ -559,6 +563,9 @@ class Filesystem(MDSCluster): def get_mds_map(self): return self.status().get_fsmap(self.id)['mdsmap'] + def get_var(self, var): + return self.status().get_fsmap(self.id)['mdsmap'][var] + def add_data_pool(self, name): self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.get_pgs_per_fs_pool().__str__()) self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name) diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py index b06d5123d8f..1f1d5467079 100644 --- a/qa/tasks/cephfs/test_client_limits.py +++ b/qa/tasks/cephfs/test_client_limits.py @@ -134,10 +134,10 @@ class TestClientLimits(CephFSTestCase): # Client B tries to stat the file that client A created rproc = self.mount_b.write_background("file1") - # After mds_session_timeout, we should see a health warning (extra lag from + # After session_timeout, we should see a health warning (extra lag from # MDS beacon period) - mds_session_timeout = float(self.fs.get_config("mds_session_timeout")) - self.wait_for_health("MDS_CLIENT_LATE_RELEASE", mds_session_timeout + 10) + session_timeout = self.fs.get_var("session_timeout") + self.wait_for_health("MDS_CLIENT_LATE_RELEASE", session_timeout + 10) # Client B should still be stuck self.assertFalse(rproc.finished) diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py index fd58c142733..52cc5f8ec9a 100644 --- a/qa/tasks/cephfs/test_client_recovery.py +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -28,10 +28,9 @@ class TestClientNetworkRecovery(CephFSTestCase): REQUIRE_ONE_CLIENT_REMOTE = True CLIENTS_REQUIRED = 2 - LOAD_SETTINGS = ["mds_session_timeout", "mds_reconnect_timeout", "ms_max_backoff"] + LOAD_SETTINGS = ["mds_reconnect_timeout", "ms_max_backoff"] # Environment references - mds_session_timeout = None mds_reconnect_timeout = None ms_max_backoff = None @@ -43,6 +42,8 @@ class TestClientNetworkRecovery(CephFSTestCase): I/O after failure. """ + session_timeout = self.fs.get_var("session_timeout") + # We only need one client self.mount_b.umount_wait() @@ -65,7 +66,7 @@ class TestClientNetworkRecovery(CephFSTestCase): # ...then it should block self.assertFalse(write_blocked.finished) self.assert_session_state(client_id, "open") - time.sleep(self.mds_session_timeout * 1.5) # Long enough for MDS to consider session stale + time.sleep(session_timeout * 1.5) # Long enough for MDS to consider session stale self.assertFalse(write_blocked.finished) self.assert_session_state(client_id, "stale") @@ -85,10 +86,9 @@ class TestClientRecovery(CephFSTestCase): REQUIRE_KCLIENT_REMOTE = True CLIENTS_REQUIRED = 2 - LOAD_SETTINGS = ["mds_session_timeout", "mds_reconnect_timeout", "ms_max_backoff"] + LOAD_SETTINGS = ["mds_reconnect_timeout", "ms_max_backoff"] # Environment references - mds_session_timeout = None mds_reconnect_timeout = None ms_max_backoff = None @@ -212,6 +212,8 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.create_destroy() def test_stale_caps(self): + session_timeout = self.fs.get_var("session_timeout") + # Capability release from stale session # ===================================== cap_holder = self.mount_a.open_background() @@ -224,7 +226,7 @@ class TestClientRecovery(CephFSTestCase): self.mount_a.kill() try: - # Now, after mds_session_timeout seconds, the waiter should + # Now, after session_timeout seconds, the waiter should # complete their operation when the MDS marks the holder's # session stale. cap_waiter = self.mount_b.write_background() @@ -237,9 +239,9 @@ class TestClientRecovery(CephFSTestCase): cap_waited = b - a log.info("cap_waiter waited {0}s".format(cap_waited)) - self.assertTrue(self.mds_session_timeout / 2.0 <= cap_waited <= self.mds_session_timeout * 2.0, + self.assertTrue(session_timeout / 2.0 <= cap_waited <= session_timeout * 2.0, "Capability handover took {0}, expected approx {1}".format( - cap_waited, self.mds_session_timeout + cap_waited, session_timeout )) cap_holder.stdin.close() @@ -259,6 +261,8 @@ class TestClientRecovery(CephFSTestCase): # Eviction while holding a capability # =================================== + session_timeout = self.fs.get_var("session_timeout") + # Take out a write capability on a file on client A, # and then immediately kill it. cap_holder = self.mount_a.open_background() @@ -288,9 +292,9 @@ class TestClientRecovery(CephFSTestCase): log.info("cap_waiter waited {0}s".format(cap_waited)) # This is the check that it happened 'now' rather than waiting # for the session timeout - self.assertLess(cap_waited, self.mds_session_timeout / 2.0, + self.assertLess(cap_waited, session_timeout / 2.0, "Capability handover took {0}, expected less than {1}".format( - cap_waited, self.mds_session_timeout / 2.0 + cap_waited, session_timeout / 2.0 )) cap_holder.stdin.close() diff --git a/qa/tasks/cephfs/test_fragment.py b/qa/tasks/cephfs/test_fragment.py index a62ef743216..54a49cea2fc 100644 --- a/qa/tasks/cephfs/test_fragment.py +++ b/qa/tasks/cephfs/test_fragment.py @@ -33,7 +33,6 @@ class TestFragmentation(CephFSTestCase): Apply kwargs as MDS configuration settings, enable dirfrags and restart the MDSs. """ - kwargs['mds_bal_frag'] = "true" for k, v in kwargs.items(): self.ceph_cluster.set_ceph_conf("mds", k, v.__str__()) diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index d857cfddf01..6757b009b73 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -11,9 +11,6 @@ import json class TestMisc(CephFSTestCase): CLIENTS_REQUIRED = 2 - LOAD_SETTINGS = ["mds_session_autoclose"] - mds_session_autoclose = None - def test_getattr_caps(self): """ Check if MDS recognizes the 'mask' parameter of open request. @@ -104,6 +101,8 @@ class TestMisc(CephFSTestCase): only session """ + session_autoclose = self.fs.get_var("session_autoclose") + self.mount_b.umount_wait() ls_data = self.fs.mds_asok(['session', 'ls']) self.assert_session_count(1, ls_data) @@ -111,7 +110,7 @@ class TestMisc(CephFSTestCase): self.mount_a.kill() self.mount_a.kill_cleanup() - time.sleep(self.mds_session_autoclose * 1.5) + time.sleep(session_autoclose * 1.5) ls_data = self.fs.mds_asok(['session', 'ls']) self.assert_session_count(1, ls_data) @@ -126,7 +125,7 @@ class TestMisc(CephFSTestCase): self.mount_a.kill() self.mount_a.kill_cleanup() - time.sleep(self.mds_session_autoclose * 1.5) + time.sleep(session_autoclose * 1.5) ls_data = self.fs.mds_asok(['session', 'ls']) self.assert_session_count(1, ls_data) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py index 3c2a86993f2..919b059b871 100644 --- a/qa/tasks/cephfs/test_strays.py +++ b/qa/tasks/cephfs/test_strays.py @@ -962,7 +962,7 @@ class TestStrays(CephFSTestCase): max_purge_files = 2 - self.set_conf('mds', 'mds_bal_frag', 'false') + self.fs.set_allow_dirfrags(True) self.set_conf('mds', 'mds_max_purge_files', "%d" % max_purge_files) self.fs.mds_fail_restart() self.fs.wait_for_daemons() diff --git a/src/common/legacy_config_opts.h b/src/common/legacy_config_opts.h index c82649836f8..454273a0556 100644 --- a/src/common/legacy_config_opts.h +++ b/src/common/legacy_config_opts.h @@ -398,7 +398,6 @@ OPTION(filer_max_purge_ops, OPT_U32) OPTION(filer_max_truncate_ops, OPT_U32) OPTION(mds_data, OPT_STR) -OPTION(mds_max_file_size, OPT_U64) // Used when creating new CephFS. Change with 'ceph fs set max_file_size ' afterwards // max xattr kv pairs size for each dir/file OPTION(mds_max_xattr_pairs_size, OPT_U32) OPTION(mds_max_file_recover, OPT_U32) @@ -409,17 +408,15 @@ OPTION(mds_beacon_interval, OPT_FLOAT) OPTION(mds_beacon_grace, OPT_FLOAT) OPTION(mds_enforce_unique_name, OPT_BOOL) -OPTION(mds_session_timeout, OPT_FLOAT) // cap bits and leases time out if client unresponsive or not returning its caps OPTION(mds_session_blacklist_on_timeout, OPT_BOOL) // whether to blacklist clients whose sessions are dropped due to timeout OPTION(mds_session_blacklist_on_evict, OPT_BOOL) // whether to blacklist clients whose sessions are dropped via admin commands OPTION(mds_sessionmap_keys_per_op, OPT_U32) // how many sessions should I try to load/store in a single OMAP operation? OPTION(mds_recall_state_timeout, OPT_FLOAT) // detect clients which aren't trimming caps OPTION(mds_freeze_tree_timeout, OPT_FLOAT) // detecting freeze tree deadlock -OPTION(mds_session_autoclose, OPT_FLOAT) // autoclose idle session OPTION(mds_health_summarize_threshold, OPT_INT) // collapse N-client health metrics to a single 'many' OPTION(mds_reconnect_timeout, OPT_FLOAT) // seconds to wait for clients during mds restart - // make it (mds_session_timeout - mds_beacon_grace) + // make it (mdsmap.session_timeout - mds_beacon_grace) OPTION(mds_tick_interval, OPT_FLOAT) OPTION(mds_dirstat_min_interval, OPT_FLOAT) // try to avoid propagating more often than this OPTION(mds_scatter_nudge_interval, OPT_FLOAT) // how quickly dirstat changes propagate up the hierarchy @@ -436,7 +433,6 @@ OPTION(mds_bal_export_pin, OPT_BOOL) // allow clients to pin directory trees to OPTION(mds_bal_sample_interval, OPT_DOUBLE) // every 3 seconds OPTION(mds_bal_replicate_threshold, OPT_FLOAT) OPTION(mds_bal_unreplicate_threshold, OPT_FLOAT) -OPTION(mds_bal_frag, OPT_BOOL) OPTION(mds_bal_split_size, OPT_INT) OPTION(mds_bal_split_rd, OPT_FLOAT) OPTION(mds_bal_split_wr, OPT_FLOAT) diff --git a/src/common/options.cc b/src/common/options.cc index 245240efed8..c569fa70765 100644 --- a/src/common/options.cc +++ b/src/common/options.cc @@ -6008,10 +6008,6 @@ std::vector