mirror of
https://github.com/ceph/ceph
synced 2025-02-24 11:37:37 +00:00
client: use separate locks for SafeCond completions
We can't reuse client_lock because it is already held when the completion Context is called.
This commit is contained in:
parent
996c8bf0f8
commit
267679abc7
@ -3631,9 +3631,10 @@ int Client::_read(Fh *f, __s64 offset, __u64 size, bufferlist *bl)
|
||||
in->get_cap_ref(CEPH_CAP_RD);
|
||||
|
||||
int rvalue = 0;
|
||||
Mutex flock("Client::_read flock");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
Context *onfinish = new C_SafeCond(&client_lock, &cond, &done, &rvalue);
|
||||
Context *onfinish = new C_SafeCond(&flock, &cond, &done, &rvalue);
|
||||
|
||||
int r = 0;
|
||||
if (g_conf.client_oc) {
|
||||
@ -3866,9 +3867,10 @@ int Client::_write(Fh *f, __s64 offset, __u64 size, const char *buf)
|
||||
}
|
||||
} else {
|
||||
// simple, non-atomic sync write
|
||||
Mutex flock("Client::_write flock");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
Context *onfinish = new C_SafeCond(&client_lock, &cond, &done);
|
||||
Context *onfinish = new C_SafeCond(&flock, &cond, &done);
|
||||
Context *onsafe = new C_Client_SyncCommit(this, in);
|
||||
|
||||
unsafe_sync_write++;
|
||||
|
@ -981,7 +981,7 @@ int SyntheticClient::play_trace(Trace& t, string& prefix, bool metadata_only)
|
||||
int n = 0;
|
||||
|
||||
// for object traces
|
||||
Mutex &lock = client->client_lock;
|
||||
Mutex lock("synclient foo");
|
||||
Cond cond;
|
||||
bool ack;
|
||||
bool safe;
|
||||
@ -2134,7 +2134,7 @@ int SyntheticClient::create_objects(int nobj, int osize, int inflight)
|
||||
bufferlist bl;
|
||||
bl.push_back(bp);
|
||||
|
||||
Mutex lock("lock");
|
||||
Mutex lock("create_objects lock");
|
||||
Cond cond;
|
||||
|
||||
int unack = 0;
|
||||
@ -3300,17 +3300,17 @@ int SyntheticClient::chunk_file(string &filename)
|
||||
while (pos < size) {
|
||||
int get = MIN(size-pos, 1048576);
|
||||
|
||||
Mutex lock("lock");
|
||||
Mutex flock("synclient chunk_file lock");
|
||||
Cond cond;
|
||||
bool done;
|
||||
bufferlist bl;
|
||||
|
||||
lock.Lock();
|
||||
Context *onfinish = new C_SafeCond(&lock, &cond, &done);
|
||||
flock.Lock();
|
||||
Context *onfinish = new C_SafeCond(&flock, &cond, &done);
|
||||
filer->read(inode.ino, &inode.layout, CEPH_NOSNAP, pos, get, &bl, 0, onfinish);
|
||||
while (!done)
|
||||
cond.Wait(lock);
|
||||
lock.Unlock();
|
||||
cond.Wait(flock);
|
||||
flock.Unlock();
|
||||
|
||||
dout(0) << "got " << bl.length() << " bytes at " << pos << dendl;
|
||||
|
||||
|
@ -1030,13 +1030,13 @@ int ObjectCacher::atomic_sync_readx(OSDRead *rd, inodeno_t ino, Mutex& lock)
|
||||
if (rd->extents.size() == 1) {
|
||||
// single object.
|
||||
// just write synchronously.
|
||||
Mutex flock("ObjectCacher::atomic_sync_readx flock 1");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
//objecter->readx(rd, new C_SafeCond(&lock, &cond, &done));
|
||||
objecter->read(rd->extents[0].oid, rd->extents[0].layout,
|
||||
rd->extents[0].offset, rd->extents[0].length,
|
||||
rd->bl, 0,
|
||||
new C_SafeCond(&lock, &cond, &done));
|
||||
new C_SafeCond(&flock, &cond, &done));
|
||||
|
||||
// block
|
||||
while (!done) cond.Wait(lock);
|
||||
@ -1062,9 +1062,10 @@ int ObjectCacher::atomic_sync_readx(OSDRead *rd, inodeno_t ino, Mutex& lock)
|
||||
vector<ObjectExtent> extents = rd->extents;
|
||||
|
||||
// do the read, into our cache
|
||||
Mutex flock("ObjectCacher::atomic_sync_readx flock 2");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
readx(rd, ino, new C_SafeCond(&lock, &cond, &done));
|
||||
readx(rd, ino, new C_SafeCond(&flock, &cond, &done));
|
||||
|
||||
// block
|
||||
while (!done) cond.Wait(lock);
|
||||
@ -1106,10 +1107,11 @@ int ObjectCacher::atomic_sync_writex(OSDWrite *wr, inodeno_t ino, Mutex& lock)
|
||||
<< " doing sync write"
|
||||
<< dendl;
|
||||
|
||||
Mutex flock("ObjectCacher::atomic_sync_writex flock");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
objecter->sg_write(wr->extents, wr->snapc, wr->bl, 0,
|
||||
new C_SafeCond(&lock, &cond, &done), 0);
|
||||
new C_SafeCond(&flock, &cond, &done), 0);
|
||||
|
||||
// block
|
||||
while (!done) cond.Wait(lock);
|
||||
@ -1182,9 +1184,10 @@ void ObjectCacher::rdlock(Object *o)
|
||||
if (o->lock_state == Object::LOCK_RDLOCKING ||
|
||||
o->lock_state == Object::LOCK_WRLOCKING) {
|
||||
dout(10) << "rdlock waiting for rdlock|wrlock on " << *o << dendl;
|
||||
Mutex flock("ObjectCacher::rdlock flock");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
o->waitfor_rd.push_back(new C_SafeCond(&lock, &cond, &done));
|
||||
o->waitfor_rd.push_back(new C_SafeCond(&flock, &cond, &done));
|
||||
while (!done) cond.Wait(lock);
|
||||
}
|
||||
assert(o->lock_state == Object::LOCK_RDLOCK ||
|
||||
@ -1225,9 +1228,10 @@ void ObjectCacher::wrlock(Object *o)
|
||||
if (o->lock_state == Object::LOCK_WRLOCKING ||
|
||||
o->lock_state == Object::LOCK_UPGRADING) {
|
||||
dout(10) << "wrlock waiting for wrlock on " << *o << dendl;
|
||||
Mutex flock("ObjectCacher::wrlock flock");
|
||||
Cond cond;
|
||||
bool done = false;
|
||||
o->waitfor_wr.push_back(new C_SafeCond(&lock, &cond, &done));
|
||||
o->waitfor_wr.push_back(new C_SafeCond(&flock, &cond, &done));
|
||||
while (!done) cond.Wait(lock);
|
||||
}
|
||||
assert(o->lock_state == Object::LOCK_WRLOCK);
|
||||
|
Loading…
Reference in New Issue
Block a user