1
0
mirror of https://github.com/ceph/ceph synced 2025-03-31 16:25:56 +00:00

mds: cap client recall to min caps per client

Fixes: http://tracker.ceph.com/issues/21575

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2017-09-27 10:39:01 -07:00
parent bfc4902495
commit 538834171f
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB
2 changed files with 31 additions and 20 deletions
qa/tasks/cephfs
src/mds

View File

@ -29,7 +29,7 @@ class TestClientLimits(CephFSTestCase):
REQUIRE_KCLIENT_REMOTE = True
CLIENTS_REQUIRED = 2
def _test_client_pin(self, use_subdir):
def _test_client_pin(self, use_subdir, open_files):
"""
When a client pins an inode in its cache, for example because the file is held open,
it should reject requests from the MDS to trim these caps. The MDS should complain
@ -39,13 +39,16 @@ class TestClientLimits(CephFSTestCase):
:param use_subdir: whether to put test files in a subdir or use root
"""
cache_size = 100
open_files = 200
cache_size = open_files/2
self.set_conf('mds', 'mds cache size', cache_size)
self.fs.mds_fail_restart()
self.fs.wait_for_daemons()
mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client"))
self.assertTrue(open_files >= mds_min_caps_per_client)
mds_max_ratio_caps_per_client = float(self.fs.get_config("mds_max_ratio_caps_per_client"))
mount_a_client_id = self.mount_a.get_global_id()
path = "subdir/mount_a" if use_subdir else "mount_a"
open_proc = self.mount_a.open_n_background(path, open_files)
@ -62,8 +65,7 @@ class TestClientLimits(CephFSTestCase):
# MDS should not be happy about that, as the client is failing to comply
# with the SESSION_RECALL messages it is being sent
mds_recall_state_timeout = float(self.fs.get_config("mds_recall_state_timeout"))
self.wait_for_health("MDS_CLIENT_RECALL",
mds_recall_state_timeout + 10)
self.wait_for_health("MDS_CLIENT_RECALL", mds_recall_state_timeout+10)
# We can also test that the MDS health warning for oversized
# cache is functioning as intended.
@ -82,19 +84,30 @@ class TestClientLimits(CephFSTestCase):
# The remaining caps should comply with the numbers sent from MDS in SESSION_RECALL message,
# which depend on the caps outstanding, cache size and overall ratio
self.wait_until_equal(
lambda: self.get_session(mount_a_client_id)['num_caps'],
int(open_files * 0.2),
timeout=30,
reject_fn=lambda x: x < int(open_files*0.2))
def expected_caps():
num_caps = self.get_session(mount_a_client_id)['num_caps']
if num_caps < mds_min_caps_per_client:
raise
elif num_caps == mds_min_caps_per_client:
return True
elif num_caps == int((1.0-mds_max_ratio_caps_per_client)*(open_files+2)):
return True
else:
return False
self.wait_until_true(expected_caps, timeout=60)
@needs_trimming
def test_client_pin_root(self):
self._test_client_pin(False)
self._test_client_pin(False, 400)
@needs_trimming
def test_client_pin(self):
self._test_client_pin(True)
self._test_client_pin(True, 800)
@needs_trimming
def test_client_pin_mincaps(self):
self._test_client_pin(True, 200)
def test_client_release_bug(self):
"""

View File

@ -1113,14 +1113,12 @@ void Server::recall_client_state(void)
<< ", leases " << session->leases.size()
<< dendl;
if (session->caps.size() > min_caps_per_client) {
uint64_t newlim = MIN((session->caps.size() * ratio), max_caps_per_client);
if (session->caps.size() > newlim) {
MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE);
m->head.max_caps = newlim;
mds->send_message_client(m, session);
session->notify_recall_sent(newlim);
}
uint64_t newlim = MAX(MIN((session->caps.size() * ratio), max_caps_per_client), min_caps_per_client);
if (session->caps.size() > newlim) {
MClientSession *m = new MClientSession(CEPH_SESSION_RECALL_STATE);
m->head.max_caps = newlim;
mds->send_message_client(m, session);
session->notify_recall_sent(newlim);
}
}
}