mds: fix owner check of file lock

flock and posix lock do not use process ID as owner identifier.
The process ID of who holds the lock is just for F_GETLK fcntl(2).
For linux kernel, File lock's owner identifier is the file pointer
through which the lock is requested.

The fix is do not take the 'pid_namespace' into consideration when
checking conflict locks. Also rename the 'pid' fields of struct
ceph_mds_request_args and struct ceph_filelock to 'owner', rename
'pid_namespace' fields to 'pid'.

The kclient counterpart of this patch modifies the flock code to
assign the file pointer to the 'owner' field of lock message. It
also set the most significant bit of the 'owner' field. We can use
that bit to distinguish between old and new clients.

Signed-off-by: Yan, Zheng <zheng.z.yan@intel.com>
This commit is contained in:
Yan, Zheng 2014-03-10 07:36:14 +08:00
parent 83731a75d7
commit d3e3df7ad7
6 changed files with 28 additions and 24 deletions

View File

@ -403,8 +403,8 @@ union ceph_mds_request_args {
struct {
__u8 rule; /* currently fcntl or flock */
__u8 type; /* shared, exclusive, remove*/
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id requesting the lock */
__le64 pid_namespace;
__le64 start; /* initial location to lock */
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
@ -515,8 +515,8 @@ struct ceph_filelock {
__le64 start;/* file offset to start lock at */
__le64 length; /* num bytes to lock; 0 for all following start */
__le64 client; /* which client holds the lock */
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id holding the lock on the client */
__le64 pid_namespace;
__u8 type; /* shared lock, exclusive lock, or unlock */
} __attribute__ ((packed));

View File

@ -3050,8 +3050,8 @@ void Server::handle_client_file_setlock(MDRequest *mdr)
set_lock.start = req->head.args.filelock_change.start;
set_lock.length = req->head.args.filelock_change.length;
set_lock.client = req->get_orig_source().num();
set_lock.owner = req->head.args.filelock_change.owner;
set_lock.pid = req->head.args.filelock_change.pid;
set_lock.pid_namespace = req->head.args.filelock_change.pid_namespace;
set_lock.type = req->head.args.filelock_change.type;
bool will_wait = req->head.args.filelock_change.wait;
@ -3141,8 +3141,8 @@ void Server::handle_client_file_readlock(MDRequest *mdr)
checking_lock.start = req->head.args.filelock_change.start;
checking_lock.length = req->head.args.filelock_change.length;
checking_lock.client = req->get_orig_source().num();
checking_lock.owner = req->head.args.filelock_change.owner;
checking_lock.pid = req->head.args.filelock_change.pid;
checking_lock.pid_namespace = req->head.args.filelock_change.pid_namespace;
checking_lock.type = req->head.args.filelock_change.type;
// get the appropriate lock state

View File

@ -15,9 +15,7 @@ bool ceph_lock_state_t::is_waiting(ceph_filelock &fl)
if (p->second.start > fl.start)
return false;
if (p->second.length == fl.length &&
p->second.client == fl.client &&
p->second.pid == fl.pid &&
p->second.pid_namespace == fl.pid_namespace)
ceph_filelock_owner_equal(p->second, fl))
return true;
++p;
}
@ -31,9 +29,7 @@ void ceph_lock_state_t::remove_waiting(ceph_filelock& fl)
if (p->second.start > fl.start)
return;
if (p->second.length == fl.length &&
p->second.client == fl.client &&
p->second.pid == fl.pid &&
p->second.pid_namespace == fl.pid_namespace) {
ceph_filelock_owner_equal(p->second, fl)) {
waiting_locks.erase(p);
--client_waiting_lock_counts[(client_t)fl.client];
if (!client_waiting_lock_counts[(client_t)fl.client]) {
@ -466,17 +462,15 @@ void ceph_lock_state_t::split_by_owner(ceph_filelock& owner,
dout(15) << "owner lock: " << owner << dendl;
while (iter != locks.end()) {
dout(15) << "comparing to " << (*iter)->second << dendl;
if ((*iter)->second.client == owner.client &&
(*iter)->second.pid_namespace == owner.pid_namespace &&
(*iter)->second.pid == owner.pid) {
if (ceph_filelock_owner_equal((*iter)->second, owner)) {
dout(15) << "success, pushing to owned_locks" << dendl;
owned_locks.push_back(*iter);
iter = locks.erase(iter);
} else {
dout(15) << "failure, something not equal in this group "
<< (*iter)->second.client << ":" << owner.client << ","
<< (*iter)->second.pid_namespace << ":" << owner.pid_namespace
<< "," << (*iter)->second.pid << ":" << owner.pid << dendl;
<< (*iter)->second.owner << ":" << owner.owner << ","
<< (*iter)->second.pid << ":" << owner.pid << dendl;
++iter;
}
}

View File

@ -11,19 +11,29 @@
inline ostream& operator<<(ostream& out, ceph_filelock& l) {
out << "start: " << l.start << ", length: " << l.length
<< ", client: " << l.client << ", pid: " << l.pid
<< ", pid_ns: " << l.pid_namespace << ", type: " << (int)l.type
<< ", client: " << l.client << ", owner: " << l.owner
<< ", pid: " << l.pid << ", type: " << (int)l.type
<< std::endl;
return out;
}
inline bool ceph_filelock_owner_equal(ceph_filelock& l, ceph_filelock& r)
{
if (l.client != r.client || l.owner != r.owner)
return false;
// The file lock is from old client if the most significant bit of
// 'owner' is not set. Old clients use both 'owner' and 'pid' to
// identify the owner of lock.
if (l.owner & (1ULL << 63))
return true;
return l.pid == r.pid;
}
inline bool operator==(ceph_filelock& l, ceph_filelock& r) {
return
l.length == r.length &&
l.client == r.client &&
l.pid == r.pid &&
l.pid_namespace == r.pid_namespace &&
l.type == r.type;
l.type == r.type &&
ceph_filelock_owner_equal(l, r);
}
class ceph_lock_state_t {

View File

@ -180,8 +180,8 @@ public:
head.op == CEPH_MDS_OP_GETFILELOCK) {
out << "rule " << (int)head.args.filelock_change.rule
<< ", type " << (int)head.args.filelock_change.type
<< ", owner " << head.args.filelock_change.owner
<< ", pid " << head.args.filelock_change.pid
<< ", pid_ns " << head.args.filelock_change.pid_namespace
<< ", start " << head.args.filelock_change.start
<< ", length " << head.args.filelock_change.length
<< ", wait " << (int)head.args.filelock_change.wait;

View File

@ -395,8 +395,8 @@ union ceph_mds_request_args {
struct {
__u8 rule; /* currently fcntl or flock */
__u8 type; /* shared, exclusive, remove*/
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id requesting the lock */
__le64 pid_namespace;
__le64 start; /* initial location to lock */
__le64 length; /* num bytes to lock from start */
__u8 wait; /* will caller wait for lock to become available? */
@ -506,8 +506,8 @@ struct ceph_filelock {
__le64 start;/* file offset to start lock at */
__le64 length; /* num bytes to lock; 0 for all following start */
__le64 client; /* which client holds the lock */
__le64 owner; /* who requests/holds the lock */
__le64 pid; /* process id holding the lock on the client */
__le64 pid_namespace;
__u8 type; /* shared lock, exclusive lock, or unlock */
} __attribute__ ((packed));