mds: disable early reply for dir layout and quota related requests

These requests impacts whole subtree tree, replaying them when
mds recovers may break order of requests in multimds cluster.

Signed-off-by: "Yan, Zheng" <zyan@redhat.com>
This commit is contained in:
Yan, Zheng 2017-10-20 17:03:10 +08:00
parent dee3711bd7
commit edd76fc883
2 changed files with 15 additions and 7 deletions

View File

@ -192,11 +192,12 @@ struct MDRequestImpl : public MutationImpl {
inodeno_t alloc_ino, used_prealloc_ino;
interval_set<inodeno_t> prealloc_inos;
int snap_caps;
int getattr_caps; ///< caps requested by getattr
bool did_early_reply;
bool o_trunc; ///< request is an O_TRUNC mutation
bool has_completed; ///< request has already completed
int snap_caps = 0;
int getattr_caps = 0; ///< caps requested by getattr
bool no_early_reply = false;
bool did_early_reply = false;
bool o_trunc = false; ///< request is an O_TRUNC mutation
bool has_completed = false; ///< request has already completed
bufferlist reply_extra_bl;
@ -299,8 +300,6 @@ struct MDRequestImpl : public MutationImpl {
session(NULL), item_session_request(this),
client_request(params.client_req), straydn(NULL), snapid(CEPH_NOSNAP),
tracei(NULL), tracedn(NULL), alloc_ino(0), used_prealloc_ino(0),
snap_caps(0), getattr_caps(0),
did_early_reply(false), o_trunc(false), has_completed(false),
slave_request(NULL), internal_op(params.internal_op), internal_op_finish(NULL),
internal_op_private(NULL),
retry(0),

View File

@ -1296,6 +1296,11 @@ void Server::early_reply(MDRequestRef& mdr, CInode *tracei, CDentry *tracedn)
if (!g_conf->mds_early_reply)
return;
if (mdr->no_early_reply) {
dout(10) << "early_reply - flag no_early_reply is set, not allowed." << dendl;
return;
}
if (mdr->has_more() && mdr->more()->has_journaled_slaves) {
dout(10) << "early_reply - there are journaled slaves, not allowed." << dendl;
return;
@ -4361,6 +4366,7 @@ void Server::handle_client_setdirlayout(MDRequestRef& mdr)
mdcache->predirty_journal_parents(mdr, &le->metablob, cur, 0, PREDIRTY_PRIMARY);
mdcache->journal_dirty_inode(mdr.get(), &le->metablob, cur);
mdr->no_early_reply = true;
journal_and_reply(mdr, cur, 0, le, new C_MDS_inode_update_finish(this, mdr, cur));
}
@ -4592,6 +4598,7 @@ void Server::handle_set_vxattr(MDRequestRef& mdr, CInode *cur,
pi = cur->project_inode();
pi->layout = layout;
mdr->no_early_reply = true;
} else if (name.compare(0, 16, "ceph.file.layout") == 0) {
if (!cur->is_file()) {
respond_to_request(mdr, -EINVAL);
@ -4637,6 +4644,7 @@ void Server::handle_set_vxattr(MDRequestRef& mdr, CInode *cur,
pi = cur->project_inode();
pi->quota = quota;
mdr->no_early_reply = true;
} else if (name.find("ceph.dir.pin") == 0) {
if (!cur->is_dir() || cur->is_root()) {
respond_to_request(mdr, -EINVAL);
@ -4726,6 +4734,7 @@ void Server::handle_remove_vxattr(MDRequestRef& mdr, CInode *cur,
mdcache->predirty_journal_parents(mdr, &le->metablob, cur, 0, PREDIRTY_PRIMARY);
mdcache->journal_dirty_inode(mdr.get(), &le->metablob, cur);
mdr->no_early_reply = true;
journal_and_reply(mdr, cur, 0, le, new C_MDS_inode_update_finish(this, mdr, cur));
return;
} else if (name == "ceph.dir.layout.pool_namespace"