mirror of
https://github.com/ceph/ceph
synced 2025-02-22 10:37:15 +00:00
osd: osd_mon_report_interval_min -> osd_mon_report_interval, kill _max
The _max isn't used. Drop the _min suffix. Signed-off-by: Sage Weil <sage@redhat.com>
This commit is contained in:
parent
8f8f65594b
commit
577737d007
@ -109,3 +109,12 @@ method. See http://docs.ceph.com/docs/luminous/mgr/restful for details.
|
||||
now reflects the source of each config option (e.g., default, config file,
|
||||
command line) as well as the final (active) value.
|
||||
|
||||
13.0.3
|
||||
------
|
||||
|
||||
* The ``osd_mon_report_interval_min`` option has been renamed to
|
||||
``osd_mon_report_interval``, and the ``osd_mon_report_interval_max``
|
||||
(unused) has been eliminated. If this value has been customized on
|
||||
your cluster then your configuration should be adjusted in order to
|
||||
avoid reverting to the default value.
|
||||
|
||||
|
@ -154,7 +154,7 @@ consider the Ceph OSD Daemon ``down`` after the ``mon osd report timeout``
|
||||
elapses. A Ceph OSD Daemon sends a report to a Ceph Monitor when a reportable
|
||||
event such as a failure, a change in placement group stats, a change in
|
||||
``up_thru`` or when it boots within 5 seconds. You can change the Ceph OSD
|
||||
Daemon minimum report interval by adding an ``osd mon report interval min``
|
||||
Daemon minimum report interval by adding an ``osd mon report interval``
|
||||
setting under the ``[osd]`` section of your Ceph configuration file, or by
|
||||
setting the value at runtime. A Ceph OSD Daemon sends a report to a Ceph
|
||||
Monitor every 120 seconds irrespective of whether any notable changes occur.
|
||||
@ -379,24 +379,14 @@ OSD Settings
|
||||
:Default: ``30``
|
||||
|
||||
|
||||
``osd mon report interval max``
|
||||
``osd mon report interval``
|
||||
|
||||
:Description: The maximum time in seconds that a Ceph OSD Daemon can wait before
|
||||
it must report to a Ceph Monitor.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``120``
|
||||
|
||||
|
||||
``osd mon report interval min``
|
||||
|
||||
:Description: The minimum number of seconds a Ceph OSD Daemon may wait
|
||||
:Description: The number of seconds a Ceph OSD Daemon may wait
|
||||
from startup or another reportable event before reporting
|
||||
to a Ceph Monitor.
|
||||
|
||||
:Type: 32-bit Integer
|
||||
:Default: ``5``
|
||||
:Valid Range: Should be less than ``osd mon report interval max``
|
||||
|
||||
|
||||
``osd mon ack timeout``
|
||||
|
@ -18,7 +18,7 @@ overrides:
|
||||
mon osd backfillfull ratio: 0.6
|
||||
mon osd full ratio: 0.7
|
||||
osd:
|
||||
osd mon report interval max: 5
|
||||
osd mon report interval: 5
|
||||
osd objectstore: memstore
|
||||
osd failsafe full ratio: 1.0
|
||||
memstore device bytes: 200000000
|
||||
|
@ -18,7 +18,7 @@ overrides:
|
||||
mon osd backfillfull ratio: 0.6
|
||||
mon osd full ratio: 0.7
|
||||
osd:
|
||||
osd mon report interval max: 5
|
||||
osd mon report interval: 5
|
||||
osd objectstore: memstore
|
||||
osd failsafe full ratio: 1.0
|
||||
memstore device bytes: 200000000
|
||||
|
@ -1384,7 +1384,7 @@ class CephManager:
|
||||
assert False
|
||||
|
||||
def wait_for_pg_stats(func):
|
||||
# both osd_mon_report_interval_min and mgr_stats_period are 5 seconds
|
||||
# both osd_mon_report_interval and mgr_stats_period are 5 seconds
|
||||
# by default, and take the faulty injection in ms into consideration,
|
||||
# 12 seconds are more than enough
|
||||
delays = [1, 1, 2, 3, 5, 8, 13]
|
||||
|
@ -134,7 +134,7 @@ class FullnessTestCase(CephFSTestCase):
|
||||
the failed write.
|
||||
"""
|
||||
|
||||
osd_mon_report_interval_max = int(self.fs.get_config("osd_mon_report_interval_max", service_type='osd'))
|
||||
osd_mon_report_interval = int(self.fs.get_config("osd_mon_report_inverval", service_type='osd'))
|
||||
|
||||
log.info("Writing {0}MB should fill this cluster".format(self.fill_mb))
|
||||
|
||||
@ -149,7 +149,7 @@ class FullnessTestCase(CephFSTestCase):
|
||||
else:
|
||||
log.info("Writing file B succeeded (full status will happen soon)")
|
||||
self.wait_until_true(lambda: self.is_full(),
|
||||
timeout=osd_mon_report_interval_max * 5)
|
||||
timeout=osd_mon_report_interval * 5)
|
||||
|
||||
# Attempting to write more data should give me ENOSPC
|
||||
with self.assertRaises(CommandFailedError) as ar:
|
||||
@ -184,7 +184,7 @@ class FullnessTestCase(CephFSTestCase):
|
||||
# * The MDS to purge the stray folder and execute object deletions
|
||||
# * The OSDs to inform the mon that they are no longer full
|
||||
self.wait_until_true(lambda: not self.is_full(),
|
||||
timeout=osd_mon_report_interval_max * 5)
|
||||
timeout=osd_mon_report_interval * 5)
|
||||
|
||||
# Wait for the MDS to see the latest OSD map so that it will reliably
|
||||
# be applying the free space policy
|
||||
@ -212,7 +212,7 @@ class FullnessTestCase(CephFSTestCase):
|
||||
file_path = os.path.join(self.mount_a.mountpoint, "full_test_file")
|
||||
|
||||
# Enough to trip the full flag
|
||||
osd_mon_report_interval_max = int(self.fs.get_config("osd_mon_report_interval_max", service_type='osd'))
|
||||
osd_mon_report_interval = int(self.fs.get_config("osd_mon_report_interval", service_type='osd'))
|
||||
mon_tick_interval = int(self.fs.get_config("mon_tick_interval", service_type="mon"))
|
||||
|
||||
# Sufficient data to cause RADOS cluster to go 'full'
|
||||
@ -222,13 +222,13 @@ class FullnessTestCase(CephFSTestCase):
|
||||
# (report_interval for mon to learn PG stats, tick interval for it to update OSD map,
|
||||
# factor of 1.5 for I/O + network latency in committing OSD map and distributing it
|
||||
# to the OSDs)
|
||||
full_wait = (osd_mon_report_interval_max + mon_tick_interval) * 1.5
|
||||
full_wait = (osd_mon_report_interval + mon_tick_interval) * 1.5
|
||||
|
||||
# Configs for this test should bring this setting down in order to
|
||||
# run reasonably quickly
|
||||
if osd_mon_report_interval_max > 10:
|
||||
if osd_mon_report_interval > 10:
|
||||
log.warn("This test may run rather slowly unless you decrease"
|
||||
"osd_mon_report_interval_max (5 is a good setting)!")
|
||||
"osd_mon_report_interval (5 is a good setting)!")
|
||||
|
||||
self.mount_a.run_python(template.format(
|
||||
fill_mb=self.fill_mb,
|
||||
|
@ -954,8 +954,8 @@ def exec_test():
|
||||
|
||||
# For the benefit of polling tests like test_full -- in teuthology land we set this
|
||||
# in a .yaml, here it's just a hardcoded thing for the developer's pleasure.
|
||||
remote.run(args=[os.path.join(BIN_PREFIX, "ceph"), "tell", "osd.*", "injectargs", "--osd-mon-report-interval-max", "5"])
|
||||
ceph_cluster.set_ceph_conf("osd", "osd_mon_report_interval_max", "5")
|
||||
remote.run(args=[os.path.join(BIN_PREFIX, "ceph"), "tell", "osd.*", "injectargs", "--osd-mon-report-interval", "5"])
|
||||
ceph_cluster.set_ceph_conf("osd", "osd_mon_report_interval", "5")
|
||||
|
||||
# Vstart defaults to two segments, which very easily gets a "behind on trimming" health warning
|
||||
# from normal IO latency. Increase it for running teests.
|
||||
|
@ -699,8 +699,7 @@ OPTION(osd_max_trimming_pgs, OPT_U64)
|
||||
OPTION(osd_heartbeat_min_healthy_ratio, OPT_FLOAT)
|
||||
|
||||
OPTION(osd_mon_heartbeat_interval, OPT_INT) // (seconds) how often to ping monitor if no peers
|
||||
OPTION(osd_mon_report_interval_max, OPT_INT)
|
||||
OPTION(osd_mon_report_interval_min, OPT_INT) // pg stats, failures, up_thru, boot.
|
||||
OPTION(osd_mon_report_interval, OPT_INT) // failures, up_thru, boot.
|
||||
OPTION(osd_mon_report_max_in_flight, OPT_INT) // max updates in flight
|
||||
OPTION(osd_beacon_report_interval, OPT_INT) // (second) how often to send beacon message to monitor
|
||||
OPTION(osd_pg_stat_report_interval_max, OPT_INT) // report pg stats for any given pg at least this often
|
||||
|
@ -2912,13 +2912,9 @@ std::vector<Option> get_global_options() {
|
||||
.set_default(30)
|
||||
.set_description(""),
|
||||
|
||||
Option("osd_mon_report_interval_max", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
.set_default(600)
|
||||
.set_description(""),
|
||||
|
||||
Option("osd_mon_report_interval_min", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
Option("osd_mon_report_interval", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
.set_default(5)
|
||||
.set_description(""),
|
||||
.set_description("Frequency of OSD reports to mon for peer failures, fullness status changes"),
|
||||
|
||||
Option("osd_mon_report_max_in_flight", Option::TYPE_INT, Option::LEVEL_ADVANCED)
|
||||
.set_default(2)
|
||||
|
@ -4963,7 +4963,7 @@ void OSD::tick_without_osd_lock()
|
||||
// mon report?
|
||||
utime_t now = ceph_clock_now();
|
||||
if (service.need_fullness_update() ||
|
||||
now - last_mon_report > cct->_conf->osd_mon_report_interval_min) {
|
||||
now - last_mon_report > cct->_conf->osd_mon_report_interval) {
|
||||
last_mon_report = now;
|
||||
send_full_update();
|
||||
send_failures();
|
||||
|
@ -223,8 +223,7 @@ all_osds_die_impl() {
|
||||
}
|
||||
|
||||
all_osds_die() {
|
||||
setup 3 'osd mon report interval max = 60
|
||||
osd mon report interval min = 3
|
||||
setup 3 'osd mon report interval = 3
|
||||
mon osd report timeout = 60'
|
||||
|
||||
all_osds_die_impl
|
||||
|
Loading…
Reference in New Issue
Block a user