mirror of
https://github.com/ceph/ceph
synced 2025-03-06 08:20:12 +00:00
mds: use client_t throughout
This commit is contained in:
parent
27f6aa37ed
commit
a2c869c3ce
7
src/TODO
7
src/TODO
@ -21,6 +21,11 @@ v0.13
|
||||
/- mds: misc bugfixes for multiclient file access
|
||||
|
||||
v0.14
|
||||
/- dir backpointer update on rename (fsck assist)
|
||||
/- uclient bugs
|
||||
/- msgr: unidirectional option (client->mon, client->mds, client->osd)
|
||||
/- mon subscriptions to get osd, mds map updates. single mon connection.
|
||||
|
||||
- kclient: retry alloc on ENOMEM when reading from connection
|
||||
- client authentication
|
||||
- radosgw
|
||||
@ -35,9 +40,11 @@ v0.14
|
||||
- kill maybe_dup_msg
|
||||
|
||||
|
||||
|
||||
bugs
|
||||
- premature filejournal trimming?
|
||||
- weird osd_lock contention during osd restart?
|
||||
- clientid needs to be u64, and consistent throughout
|
||||
|
||||
later
|
||||
- authentication
|
||||
|
@ -232,11 +232,43 @@ typedef __u64 tid_t; // transaction id
|
||||
typedef __u64 version_t;
|
||||
typedef __u32 epoch_t; // map epoch (32bits -> 13 epochs/second for 10 years)
|
||||
|
||||
|
||||
#define O_LAZY 01000000
|
||||
|
||||
|
||||
|
||||
// --------------------------------------
|
||||
// identify individual mount clients by 64bit value
|
||||
|
||||
struct client_t {
|
||||
__s64 v;
|
||||
|
||||
client_t(__s64 _v = -2) : v(_v) {}
|
||||
|
||||
void encode(bufferlist& bl) const {
|
||||
::encode(v, bl);
|
||||
}
|
||||
void decode(bufferlist::iterator& bl) {
|
||||
::decode(v, bl);
|
||||
}
|
||||
};
|
||||
WRITE_CLASS_ENCODER(client_t)
|
||||
|
||||
static inline bool operator==(const client_t& l, const client_t& r) { return l.v == r.v; }
|
||||
static inline bool operator!=(const client_t& l, const client_t& r) { return l.v != r.v; }
|
||||
static inline bool operator<(const client_t& l, const client_t& r) { return l.v < r.v; }
|
||||
static inline bool operator<=(const client_t& l, const client_t& r) { return l.v <= r.v; }
|
||||
static inline bool operator>(const client_t& l, const client_t& r) { return l.v > r.v; }
|
||||
static inline bool operator>=(const client_t& l, const client_t& r) { return l.v >= r.v; }
|
||||
|
||||
static inline bool operator>=(const client_t& l, __s64 o) { return l.v >= o; }
|
||||
static inline bool operator<(const client_t& l, __s64 o) { return l.v < o; }
|
||||
|
||||
inline ostream& operator<<(ostream& out, const client_t& c) {
|
||||
return out << c.v;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// --------------------------------------
|
||||
// ino
|
||||
|
||||
|
@ -501,7 +501,7 @@ void CDentry::decode_lock_state(int type, bufferlist& bl)
|
||||
}
|
||||
|
||||
|
||||
ClientLease *CDentry::add_client_lease(int c, int mask)
|
||||
ClientLease *CDentry::add_client_lease(client_t c, int mask)
|
||||
{
|
||||
ClientLease *l;
|
||||
if (client_lease_map.count(c))
|
||||
|
@ -230,11 +230,11 @@ public:
|
||||
return get_projected_linkage()->inode;
|
||||
}
|
||||
|
||||
bool use_projected(int client, Mutation *mut) {
|
||||
bool use_projected(client_t client, Mutation *mut) {
|
||||
return lock.can_read_projected(client) ||
|
||||
lock.get_xlocked_by() == mut;
|
||||
}
|
||||
linkage_t *get_linkage(int client, Mutation *mut) {
|
||||
linkage_t *get_linkage(client_t client, Mutation *mut) {
|
||||
return use_projected(client, mut) ? get_projected_linkage() : get_linkage();
|
||||
}
|
||||
|
||||
@ -352,17 +352,17 @@ public:
|
||||
// ---------------------------------------------
|
||||
// replicas (on clients)
|
||||
public:
|
||||
map<int,ClientLease*> client_lease_map;
|
||||
map<client_t,ClientLease*> client_lease_map;
|
||||
|
||||
bool is_any_leases() {
|
||||
return !client_lease_map.empty();
|
||||
}
|
||||
ClientLease *get_client_lease(int c) {
|
||||
ClientLease *get_client_lease(client_t c) {
|
||||
if (client_lease_map.count(c))
|
||||
return client_lease_map[c];
|
||||
return 0;
|
||||
}
|
||||
int get_client_lease_mask(int c) {
|
||||
int get_client_lease_mask(client_t c) {
|
||||
ClientLease *l = get_client_lease(c);
|
||||
if (l)
|
||||
return l->mask;
|
||||
@ -370,7 +370,7 @@ public:
|
||||
return 0;
|
||||
}
|
||||
|
||||
ClientLease *add_client_lease(int c, int mask);
|
||||
ClientLease *add_client_lease(client_t c, int mask);
|
||||
int remove_client_lease(ClientLease *r, int mask, class Locker *locker); // returns remaining mask (if any), and kicks locker eval_gathers
|
||||
|
||||
|
||||
|
@ -143,7 +143,7 @@ ostream& operator<<(ostream& out, CInode& in)
|
||||
|
||||
if (!in.get_client_caps().empty()) {
|
||||
out << " caps={";
|
||||
for (map<int,Capability*>::iterator it = in.get_client_caps().begin();
|
||||
for (map<client_t,Capability*>::iterator it = in.get_client_caps().begin();
|
||||
it != in.get_client_caps().end();
|
||||
it++) {
|
||||
if (it != in.get_client_caps().begin()) out << ",";
|
||||
@ -515,7 +515,7 @@ void CInode::name_stray_dentry(string& dname)
|
||||
}
|
||||
|
||||
|
||||
Capability *CInode::add_client_cap(int client, Session *session, SnapRealm *conrealm)
|
||||
Capability *CInode::add_client_cap(client_t client, Session *session, SnapRealm *conrealm)
|
||||
{
|
||||
if (client_caps.empty()) {
|
||||
get(PIN_CAPS);
|
||||
@ -542,7 +542,7 @@ Capability *CInode::add_client_cap(int client, Session *session, SnapRealm *conr
|
||||
return cap;
|
||||
}
|
||||
|
||||
void CInode::remove_client_cap(int client)
|
||||
void CInode::remove_client_cap(client_t client)
|
||||
{
|
||||
assert(client_caps.count(client) == 1);
|
||||
Capability *cap = client_caps[client];
|
||||
@ -1743,7 +1743,7 @@ bool CInode::encode_inodestat(bufferlist& bl, Session *session,
|
||||
|
||||
void CInode::encode_cap_message(MClientCaps *m, Capability *cap)
|
||||
{
|
||||
int client = cap->get_client();
|
||||
client_t client = cap->get_client();
|
||||
|
||||
bool pfile = filelock.is_xlocked_by_client(client) ||
|
||||
(cap && (cap->issued() & CEPH_CAP_FILE_EXCL));
|
||||
|
@ -262,7 +262,7 @@ public:
|
||||
// -- distributed state --
|
||||
protected:
|
||||
// file capabilities
|
||||
map<int, Capability*> client_caps; // client -> caps
|
||||
map<client_t, Capability*> client_caps; // client -> caps
|
||||
map<int, int> mds_caps_wanted; // [auth] mds -> caps wanted
|
||||
int replica_caps_wanted; // [replica] what i've requested from auth
|
||||
utime_t replica_caps_wanted_keep_until;
|
||||
@ -533,26 +533,26 @@ public:
|
||||
|
||||
// -- caps -- (new)
|
||||
// client caps
|
||||
int loner_cap, want_loner_cap;
|
||||
client_t loner_cap, want_loner_cap;
|
||||
|
||||
int get_loner() { return loner_cap; }
|
||||
int get_wanted_loner() { return want_loner_cap; }
|
||||
client_t get_loner() { return loner_cap; }
|
||||
client_t get_wanted_loner() { return want_loner_cap; }
|
||||
|
||||
// this is the loner state our locks should aim for
|
||||
int get_target_loner() {
|
||||
client_t get_target_loner() {
|
||||
if (loner_cap == want_loner_cap)
|
||||
return loner_cap;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
int calc_ideal_loner() {
|
||||
client_t calc_ideal_loner() {
|
||||
if (!mds_caps_wanted.empty())
|
||||
return -1;
|
||||
|
||||
int n = 0;
|
||||
int loner = -1;
|
||||
for (map<int,Capability*>::iterator it = client_caps.begin();
|
||||
client_t loner = -1;
|
||||
for (map<client_t,Capability*>::iterator it = client_caps.begin();
|
||||
it != client_caps.end();
|
||||
it++)
|
||||
if (!it->second->is_stale() &&
|
||||
@ -565,7 +565,7 @@ public:
|
||||
}
|
||||
return loner;
|
||||
}
|
||||
int choose_ideal_loner() {
|
||||
client_t choose_ideal_loner() {
|
||||
want_loner_cap = calc_ideal_loner();
|
||||
return want_loner_cap;
|
||||
}
|
||||
@ -629,7 +629,7 @@ public:
|
||||
|
||||
int count_nonstale_caps() {
|
||||
int n = 0;
|
||||
for (map<int,Capability*>::iterator it = client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator it = client_caps.begin();
|
||||
it != client_caps.end();
|
||||
it++)
|
||||
if (!it->second->is_stale())
|
||||
@ -638,7 +638,7 @@ public:
|
||||
}
|
||||
bool multiple_nonstale_caps() {
|
||||
int n = 0;
|
||||
for (map<int,Capability*>::iterator it = client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator it = client_caps.begin();
|
||||
it != client_caps.end();
|
||||
it++)
|
||||
if (!it->second->is_stale()) {
|
||||
@ -652,23 +652,23 @@ public:
|
||||
bool is_any_caps() { return !client_caps.empty(); }
|
||||
bool is_any_nonstale_caps() { return count_nonstale_caps(); }
|
||||
|
||||
map<int,Capability*>& get_client_caps() { return client_caps; }
|
||||
Capability *get_client_cap(int client) {
|
||||
map<client_t,Capability*>& get_client_caps() { return client_caps; }
|
||||
Capability *get_client_cap(client_t client) {
|
||||
if (client_caps.count(client))
|
||||
return client_caps[client];
|
||||
return 0;
|
||||
}
|
||||
int get_client_cap_pending(int client) {
|
||||
int get_client_cap_pending(client_t client) {
|
||||
Capability *c = get_client_cap(client);
|
||||
if (c) return c->pending();
|
||||
return 0;
|
||||
}
|
||||
|
||||
Capability *add_client_cap(int client, Session *session, SnapRealm *conrealm=0);
|
||||
void remove_client_cap(int client);
|
||||
Capability *add_client_cap(client_t client, Session *session, SnapRealm *conrealm=0);
|
||||
void remove_client_cap(client_t client);
|
||||
|
||||
void move_to_containing_realm(SnapRealm *realm) {
|
||||
for (map<int,Capability*>::iterator q = client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator q = client_caps.begin();
|
||||
q != client_caps.end();
|
||||
q++) {
|
||||
containing_realm->remove_cap(q->first, q->second);
|
||||
@ -679,7 +679,7 @@ public:
|
||||
containing_realm = realm;
|
||||
}
|
||||
|
||||
Capability *reconnect_cap(int client, ceph_mds_cap_reconnect& icr, Session *session) {
|
||||
Capability *reconnect_cap(client_t client, ceph_mds_cap_reconnect& icr, Session *session) {
|
||||
Capability *cap = get_client_cap(client);
|
||||
if (cap) {
|
||||
// FIXME?
|
||||
@ -701,8 +701,8 @@ public:
|
||||
while (!client_caps.empty())
|
||||
remove_client_cap(client_caps.begin()->first);
|
||||
}
|
||||
void export_client_caps(map<int,Capability::Export>& cl) {
|
||||
for (map<int,Capability*>::iterator it = client_caps.begin();
|
||||
void export_client_caps(map<client_t,Capability::Export>& cl) {
|
||||
for (map<client_t,Capability*>::iterator it = client_caps.begin();
|
||||
it != client_caps.end();
|
||||
it++) {
|
||||
cl[it->first] = it->second->make_export();
|
||||
@ -739,14 +739,14 @@ public:
|
||||
(xattrlock.gcaps_careful() << xattrlock.get_cap_shift()) |
|
||||
(linklock.gcaps_careful() << linklock.get_cap_shift());
|
||||
}
|
||||
int get_xlocker_mask(int client) {
|
||||
int get_xlocker_mask(client_t client) {
|
||||
return
|
||||
(filelock.gcaps_xlocker_mask(client) << filelock.get_cap_shift()) |
|
||||
(authlock.gcaps_xlocker_mask(client) << authlock.get_cap_shift()) |
|
||||
(xattrlock.gcaps_xlocker_mask(client) << xattrlock.get_cap_shift()) |
|
||||
(linklock.gcaps_xlocker_mask(client) << linklock.get_cap_shift());
|
||||
}
|
||||
int get_caps_allowed_for_client(int client) {
|
||||
int get_caps_allowed_for_client(client_t client) {
|
||||
int allowed = get_caps_allowed_by_type(client == get_loner() ? CAP_LONER : CAP_ANY);
|
||||
allowed |= get_caps_allowed_by_type(CAP_XLOCKER) & get_xlocker_mask(client);
|
||||
return allowed;
|
||||
@ -759,7 +759,7 @@ public:
|
||||
int loner = 0, other = 0, xlocker = 0;
|
||||
if (!is_auth())
|
||||
loner_cap = -1;
|
||||
for (map<int,Capability*>::iterator it = client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator it = client_caps.begin();
|
||||
it != client_caps.end();
|
||||
it++) {
|
||||
int i = it->second->issued();
|
||||
@ -778,7 +778,7 @@ public:
|
||||
int get_caps_wanted(int *ploner = 0, int *pother = 0, int shift = 0, int mask = 0xffff) {
|
||||
int w = 0;
|
||||
int loner = 0, other = 0;
|
||||
for (map<int,Capability*>::iterator it = client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator it = client_caps.begin();
|
||||
it != client_caps.end();
|
||||
it++) {
|
||||
if (!it->second->is_stale()) {
|
||||
|
@ -98,7 +98,7 @@ public:
|
||||
|
||||
private:
|
||||
CInode *inode;
|
||||
int client;
|
||||
client_t client;
|
||||
|
||||
__u64 cap_id;
|
||||
|
||||
@ -221,7 +221,7 @@ public:
|
||||
|
||||
xlist<Capability*>::item snaprealm_caps_item;
|
||||
|
||||
Capability(CInode *i, __u64 id, int c) :
|
||||
Capability(CInode *i, __u64 id, client_t c) :
|
||||
inode(i), client(c),
|
||||
cap_id(id),
|
||||
_wanted(0),
|
||||
@ -260,7 +260,7 @@ public:
|
||||
void set_stale(bool b) { stale = b; }
|
||||
|
||||
CInode *get_inode() { return inode; }
|
||||
int get_client() { return client; }
|
||||
client_t get_client() { return client; }
|
||||
|
||||
// caps this client wants to hold
|
||||
int wanted() { return _wanted; }
|
||||
|
@ -153,7 +153,7 @@ bool Locker::acquire_locks(MDRequest *mdr,
|
||||
}
|
||||
dout(10) << "acquire_locks " << *mdr << dendl;
|
||||
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
|
||||
set<SimpleLock*, SimpleLock::ptr_lt> sorted; // sort everything we will lock
|
||||
set<SimpleLock*> mustpin = xlocks; // items to authpin
|
||||
@ -684,7 +684,7 @@ bool Locker::_rdlock_kick(SimpleLock *lock)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Locker::rdlock_try(SimpleLock *lock, int client, Context *con)
|
||||
bool Locker::rdlock_try(SimpleLock *lock, client_t client, Context *con)
|
||||
{
|
||||
dout(7) << "rdlock_try on " << *lock << " on " << *lock->get_parent() << dendl;
|
||||
|
||||
@ -708,7 +708,7 @@ bool Locker::rdlock_start(SimpleLock *lock, MDRequest *mut)
|
||||
dout(7) << "rdlock_start on " << *lock << " on " << *lock->get_parent() << dendl;
|
||||
|
||||
// client may be allowed to rdlock the same item it has xlocked.
|
||||
int client = mut->get_client();
|
||||
client_t client = mut->get_client();
|
||||
|
||||
if (!lock->get_parent()->is_auth() &&
|
||||
lock->fw_rdlock_to_auth()) {
|
||||
@ -791,7 +791,7 @@ bool Locker::wrlock_start(SimpleLock *lock, MDRequest *mut, bool nowait)
|
||||
((CInode*)lock->get_parent())->has_subtree_root_dirfrag();
|
||||
|
||||
CInode *in = (CInode *)lock->get_parent();
|
||||
int client = mut->get_client();
|
||||
client_t client = mut->get_client();
|
||||
Capability *cap = 0;
|
||||
if (client >= 0)
|
||||
cap = in->get_client_cap(client);
|
||||
@ -868,7 +868,7 @@ bool Locker::xlock_start(SimpleLock *lock, MDRequest *mut)
|
||||
return local_xlock_start((LocalLock*)lock, mut);
|
||||
|
||||
dout(7) << "xlock_start on " << *lock << " on " << *lock->get_parent() << dendl;
|
||||
int client = mut->get_client();
|
||||
client_t client = mut->get_client();
|
||||
|
||||
// auth?
|
||||
if (lock->get_parent()->is_auth()) {
|
||||
@ -998,10 +998,10 @@ struct C_Locker_FileUpdate_finish : public Context {
|
||||
CInode *in;
|
||||
Mutation *mut;
|
||||
bool share;
|
||||
int client;
|
||||
client_t client;
|
||||
Capability *cap;
|
||||
MClientCaps *ack;
|
||||
C_Locker_FileUpdate_finish(Locker *l, CInode *i, Mutation *m, bool e=false, int c=-1,
|
||||
C_Locker_FileUpdate_finish(Locker *l, CInode *i, Mutation *m, bool e=false, client_t c=-1,
|
||||
Capability *cp = 0,
|
||||
MClientCaps *ac = 0) :
|
||||
locker(l), in(i), mut(m), share(e), client(c), cap(cp),
|
||||
@ -1013,7 +1013,7 @@ struct C_Locker_FileUpdate_finish : public Context {
|
||||
}
|
||||
};
|
||||
|
||||
void Locker::file_update_finish(CInode *in, Mutation *mut, bool share, int client,
|
||||
void Locker::file_update_finish(CInode *in, Mutation *mut, bool share, client_t client,
|
||||
Capability *cap, MClientCaps *ack)
|
||||
{
|
||||
dout(10) << "file_update_finish on " << *in << dendl;
|
||||
@ -1115,7 +1115,7 @@ bool Locker::issue_caps(CInode *in, Capability *only_cap)
|
||||
int loner_allowed = in->get_caps_allowed_by_type(CAP_LONER);
|
||||
int xlocker_allowed = in->get_caps_allowed_by_type(CAP_XLOCKER);
|
||||
|
||||
int loner = in->get_loner();
|
||||
client_t loner = in->get_loner();
|
||||
if (loner >= 0) {
|
||||
dout(7) << "issue_caps loner client" << loner
|
||||
<< " allowed=" << ccap_string(loner_allowed)
|
||||
@ -1132,7 +1132,7 @@ bool Locker::issue_caps(CInode *in, Capability *only_cap)
|
||||
int nissued = 0;
|
||||
|
||||
// client caps
|
||||
map<int, Capability*>::iterator it;
|
||||
map<client_t, Capability*>::iterator it;
|
||||
if (only_cap)
|
||||
it = in->client_caps.find(only_cap->get_client());
|
||||
else
|
||||
@ -1213,7 +1213,7 @@ void Locker::issue_truncate(CInode *in)
|
||||
{
|
||||
dout(7) << "issue_truncate on " << *in << dendl;
|
||||
|
||||
for (map<int, Capability*>::iterator it = in->client_caps.begin();
|
||||
for (map<client_t, Capability*>::iterator it = in->client_caps.begin();
|
||||
it != in->client_caps.end();
|
||||
it++) {
|
||||
Capability *cap = it->second;
|
||||
@ -1235,7 +1235,7 @@ void Locker::issue_truncate(CInode *in)
|
||||
void Locker::revoke_stale_caps(Session *session)
|
||||
{
|
||||
dout(10) << "revoke_stale_caps for " << session->inst.name << dendl;
|
||||
int client = session->get_client();
|
||||
client_t client = session->get_client();
|
||||
|
||||
for (xlist<Capability*>::iterator p = session->caps.begin(); !p.end(); ++p) {
|
||||
Capability *cap = *p;
|
||||
@ -1413,13 +1413,13 @@ public:
|
||||
};
|
||||
|
||||
|
||||
void Locker::calc_new_client_ranges(CInode *in, __u64 size, map<int,byte_range_t>& new_ranges)
|
||||
void Locker::calc_new_client_ranges(CInode *in, __u64 size, map<client_t,byte_range_t>& new_ranges)
|
||||
{
|
||||
inode_t *latest = in->get_projected_inode();
|
||||
|
||||
// increase ranges as appropriate.
|
||||
// shrink to 0 if no WR|BUFFER caps issued.
|
||||
for (map<int,Capability*>::iterator p = in->client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator p = in->client_caps.begin();
|
||||
p != in->client_caps.end();
|
||||
p++) {
|
||||
if ((p->second->issued() | p->second->wanted()) & (CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER)) {
|
||||
@ -1440,7 +1440,7 @@ bool Locker::check_inode_max_size(CInode *in, bool force_wrlock,
|
||||
assert(in->is_auth());
|
||||
|
||||
inode_t *latest = in->get_projected_inode();
|
||||
map<int,byte_range_t> new_ranges;
|
||||
map<client_t,byte_range_t> new_ranges;
|
||||
__u64 size = latest->size;
|
||||
if (update_size)
|
||||
size = new_size;
|
||||
@ -1538,10 +1538,10 @@ void Locker::share_inode_max_size(CInode *in)
|
||||
* the cap later.
|
||||
*/
|
||||
dout(10) << "share_inode_max_size on " << *in << dendl;
|
||||
for (map<int,Capability*>::iterator it = in->client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator it = in->client_caps.begin();
|
||||
it != in->client_caps.end();
|
||||
it++) {
|
||||
const int client = it->first;
|
||||
const client_t client = it->first;
|
||||
Capability *cap = it->second;
|
||||
if (cap->is_suppress())
|
||||
continue;
|
||||
@ -1606,7 +1606,7 @@ void Locker::adjust_cap_wanted(Capability *cap, int wanted, int issue_seq)
|
||||
*/
|
||||
void Locker::handle_client_caps(MClientCaps *m)
|
||||
{
|
||||
int client = m->get_source().num();
|
||||
client_t client = m->get_source().num();
|
||||
|
||||
snapid_t follows = m->get_snap_follows();
|
||||
dout(7) << "handle_client_caps on " << m->get_ino()
|
||||
@ -1767,7 +1767,7 @@ void Locker::handle_client_caps(MClientCaps *m)
|
||||
delete m;
|
||||
}
|
||||
|
||||
void Locker::process_cap_update(MDRequest *mdr, int client,
|
||||
void Locker::process_cap_update(MDRequest *mdr, client_t client,
|
||||
inodeno_t ino, __u64 cap_id, int caps, int wanted,
|
||||
int seq, int issue_seq, int mseq,
|
||||
const nstring& dname)
|
||||
@ -1811,7 +1811,7 @@ void Locker::process_cap_update(MDRequest *mdr, int client,
|
||||
|
||||
void Locker::kick_cap_releases(MDRequest *mdr)
|
||||
{
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
for (map<vinodeno_t,ceph_seq_t>::iterator p = mdr->cap_releases.begin();
|
||||
p != mdr->cap_releases.end();
|
||||
p++) {
|
||||
@ -1842,7 +1842,7 @@ bool Locker::_do_cap_update(CInode *in, Capability *cap,
|
||||
<< " wanted " << ccap_string(wanted)
|
||||
<< " on " << *in << dendl;
|
||||
assert(in->is_auth());
|
||||
int client = m->get_source().num();
|
||||
client_t client = m->get_source().num();
|
||||
inode_t *latest = in->get_projected_inode();
|
||||
|
||||
// increase or zero max_size?
|
||||
@ -2019,7 +2019,7 @@ bool Locker::_do_cap_update(CInode *in, Capability *cap,
|
||||
|
||||
void Locker::handle_client_cap_release(MClientCapRelease *m)
|
||||
{
|
||||
int client = m->get_source().num();
|
||||
client_t client = m->get_source().num();
|
||||
dout(10) << "handle_client_cap_release " << *m << dendl;
|
||||
|
||||
for (vector<ceph_mds_cap_item>::iterator p = m->caps.begin(); p != m->caps.end(); p++) {
|
||||
@ -2064,7 +2064,7 @@ void Locker::handle_client_lease(MClientLease *m)
|
||||
dout(10) << "handle_client_lease " << *m << dendl;
|
||||
|
||||
assert(m->get_source().is_client());
|
||||
int client = m->get_source().num();
|
||||
client_t client = m->get_source().num();
|
||||
|
||||
CInode *in = mdcache->get_inode(m->get_ino(), m->get_last());
|
||||
if (!in) {
|
||||
@ -2135,7 +2135,7 @@ void Locker::handle_client_lease(MClientLease *m)
|
||||
|
||||
|
||||
|
||||
void Locker::_issue_client_lease(CDentry *dn, int mask, int pool, int client,
|
||||
void Locker::_issue_client_lease(CDentry *dn, int mask, int pool, client_t client,
|
||||
bufferlist &bl, utime_t now, Session *session)
|
||||
{
|
||||
LeaseStat e;
|
||||
@ -2160,7 +2160,7 @@ void Locker::_issue_client_lease(CDentry *dn, int mask, int pool, int client,
|
||||
|
||||
|
||||
/*
|
||||
int Locker::issue_client_lease(CInode *in, int client,
|
||||
int Locker::issue_client_lease(CInode *in, client_t client,
|
||||
bufferlist &bl, utime_t now, Session *session)
|
||||
{
|
||||
int mask = CEPH_LOCK_INO;
|
||||
@ -2175,7 +2175,7 @@ int Locker::issue_client_lease(CInode *in, int client,
|
||||
}
|
||||
*/
|
||||
|
||||
int Locker::issue_client_lease(CDentry *dn, int client,
|
||||
int Locker::issue_client_lease(CDentry *dn, client_t client,
|
||||
bufferlist &bl, utime_t now, Session *session)
|
||||
{
|
||||
int pool = 1; // fixme.. do something smart!
|
||||
@ -2201,7 +2201,7 @@ void Locker::revoke_client_leases(SimpleLock *lock)
|
||||
{
|
||||
int n = 0;
|
||||
CDentry *dn = (CDentry*)lock->get_parent();
|
||||
for (map<int, ClientLease*>::iterator p = dn->client_lease_map.begin();
|
||||
for (map<client_t, ClientLease*>::iterator p = dn->client_lease_map.begin();
|
||||
p != dn->client_lease_map.end();
|
||||
p++) {
|
||||
ClientLease *l = p->second;
|
||||
|
@ -104,7 +104,7 @@ public:
|
||||
void try_eval(CInode *in, int mask);
|
||||
|
||||
bool _rdlock_kick(SimpleLock *lock);
|
||||
bool rdlock_try(SimpleLock *lock, int client, Context *c);
|
||||
bool rdlock_try(SimpleLock *lock, client_t client, Context *c);
|
||||
bool rdlock_start(SimpleLock *lock, MDRequest *mut);
|
||||
void rdlock_finish(SimpleLock *lock, Mutation *mut);
|
||||
|
||||
@ -168,7 +168,7 @@ public:
|
||||
|
||||
|
||||
// caps
|
||||
void process_cap_update(MDRequest *mdr, int client,
|
||||
void process_cap_update(MDRequest *mdr, client_t client,
|
||||
inodeno_t ino, __u64 cap_id, int caps, int wanted,
|
||||
int seq, int issue_seq, int mseq,
|
||||
const nstring& dname);
|
||||
@ -223,10 +223,10 @@ public:
|
||||
protected:
|
||||
void handle_inode_file_caps(class MInodeFileCaps *m);
|
||||
|
||||
void file_update_finish(CInode *in, Mutation *mut, bool share, int client, Capability *cap,
|
||||
void file_update_finish(CInode *in, Mutation *mut, bool share, client_t client, Capability *cap,
|
||||
MClientCaps *ack);
|
||||
public:
|
||||
void calc_new_client_ranges(CInode *in, __u64 size, map<int,byte_range_t>& new_ranges);
|
||||
void calc_new_client_ranges(CInode *in, __u64 size, map<client_t,byte_range_t>& new_ranges);
|
||||
bool check_inode_max_size(CInode *in, bool force_wrlock=false, bool update_size=false, __u64 newsize=0,
|
||||
utime_t mtime=utime_t());
|
||||
void share_inode_max_size(CInode *in);
|
||||
@ -241,9 +241,9 @@ private:
|
||||
public:
|
||||
void handle_client_lease(class MClientLease *m);
|
||||
|
||||
void _issue_client_lease(CDentry *dn, int mask, int pool, int client, bufferlist &bl, utime_t now, Session *session);
|
||||
int issue_client_lease(CInode *in, int client, bufferlist &bl, utime_t now, Session *session);
|
||||
int issue_client_lease(CDentry *dn, int client, bufferlist &bl, utime_t now, Session *session);
|
||||
void _issue_client_lease(CDentry *dn, int mask, int pool, client_t client, bufferlist &bl, utime_t now, Session *session);
|
||||
int issue_client_lease(CInode *in, client_t client, bufferlist &bl, utime_t now, Session *session);
|
||||
int issue_client_lease(CDentry *dn, client_t client, bufferlist &bl, utime_t now, Session *session);
|
||||
void revoke_client_leases(SimpleLock *lock);
|
||||
};
|
||||
|
||||
|
@ -1287,14 +1287,14 @@ CInode *MDCache::cow_inode(CInode *in, snapid_t last)
|
||||
add_inode(oldin);
|
||||
|
||||
// clone caps?
|
||||
for (map<int,Capability*>::iterator p = in->client_caps.begin();
|
||||
for (map<client_t,Capability*>::iterator p = in->client_caps.begin();
|
||||
p != in->client_caps.end();
|
||||
p++) {
|
||||
Capability *cap = p->second;
|
||||
if ((cap->issued() & CEPH_CAP_ANY_WR) &&
|
||||
cap->client_follows < last) {
|
||||
// clone to oldin
|
||||
int client = p->first;
|
||||
client_t client = p->first;
|
||||
Capability *newcap = oldin->add_client_cap(client, 0, in->containing_realm);
|
||||
cap->session_caps_item.get_xlist()->push_back(&newcap->session_caps_item);
|
||||
newcap->issue(cap->issued());
|
||||
@ -3837,7 +3837,7 @@ void MDCache::process_imported_caps()
|
||||
|
||||
// process cap imports
|
||||
// ino -> client -> frommds -> capex
|
||||
map<inodeno_t,map<int, map<int,ceph_mds_cap_reconnect> > >::iterator p = cap_imports.begin();
|
||||
map<inodeno_t,map<client_t, map<int,ceph_mds_cap_reconnect> > >::iterator p = cap_imports.begin();
|
||||
while (p != cap_imports.end()) {
|
||||
CInode *in = get_inode(p->first);
|
||||
if (!in) {
|
||||
@ -3847,7 +3847,7 @@ void MDCache::process_imported_caps()
|
||||
p++;
|
||||
continue;
|
||||
}
|
||||
for (map<int, map<int,ceph_mds_cap_reconnect> >::iterator q = p->second.begin();
|
||||
for (map<client_t, map<int,ceph_mds_cap_reconnect> >::iterator q = p->second.begin();
|
||||
q != p->second.end();
|
||||
++q)
|
||||
for (map<int,ceph_mds_cap_reconnect>::iterator r = q->second.begin();
|
||||
@ -3868,10 +3868,10 @@ void MDCache::process_reconnected_caps()
|
||||
{
|
||||
dout(10) << "process_reconnected_caps" << dendl;
|
||||
|
||||
map<int,MClientSnap*> splits;
|
||||
map<client_t,MClientSnap*> splits;
|
||||
|
||||
// adjust lock states appropriately
|
||||
map<CInode*,map<int,inodeno_t> >::iterator p = reconnected_caps.begin();
|
||||
map<CInode*,map<client_t,inodeno_t> >::iterator p = reconnected_caps.begin();
|
||||
while (p != reconnected_caps.end()) {
|
||||
CInode *in = p->first;
|
||||
p++;
|
||||
@ -3898,7 +3898,7 @@ void MDCache::process_reconnected_caps()
|
||||
}
|
||||
|
||||
// also, make sure client's cap is in the correct snaprealm.
|
||||
for (map<int,inodeno_t>::iterator q = p->second.begin();
|
||||
for (map<client_t,inodeno_t>::iterator q = p->second.begin();
|
||||
q != p->second.end();
|
||||
q++) {
|
||||
if (q->second == realm->inode->ino()) {
|
||||
@ -3921,8 +3921,8 @@ void MDCache::process_reconnected_caps()
|
||||
send_snaps(splits);
|
||||
}
|
||||
|
||||
void MDCache::prepare_realm_split(SnapRealm *realm, int client, inodeno_t ino,
|
||||
map<int,MClientSnap*>& splits)
|
||||
void MDCache::prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
|
||||
map<client_t,MClientSnap*>& splits)
|
||||
{
|
||||
MClientSnap *snap;
|
||||
if (splits.count(client) == 0) {
|
||||
@ -3940,14 +3940,14 @@ void MDCache::prepare_realm_split(SnapRealm *realm, int client, inodeno_t ino,
|
||||
snap->split_inos.push_back(ino);
|
||||
}
|
||||
|
||||
void MDCache::send_snaps(map<int,MClientSnap*>& splits)
|
||||
void MDCache::send_snaps(map<client_t,MClientSnap*>& splits)
|
||||
{
|
||||
dout(10) << "send_snaps" << dendl;
|
||||
|
||||
for (map<int,MClientSnap*>::iterator p = splits.begin();
|
||||
for (map<client_t,MClientSnap*>::iterator p = splits.begin();
|
||||
p != splits.end();
|
||||
p++) {
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->first));
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->first.v));
|
||||
if (session) {
|
||||
dout(10) << " client" << p->first
|
||||
<< " split " << p->second->head.split
|
||||
@ -3990,11 +3990,11 @@ void MDCache::clean_open_file_lists()
|
||||
|
||||
|
||||
|
||||
void MDCache::rejoin_import_cap(CInode *in, int client, ceph_mds_cap_reconnect& icr, int frommds)
|
||||
void MDCache::rejoin_import_cap(CInode *in, client_t client, ceph_mds_cap_reconnect& icr, int frommds)
|
||||
{
|
||||
dout(10) << "rejoin_import_cap for client" << client << " from mds" << frommds
|
||||
<< " on " << *in << dendl;
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(client));
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(client.v));
|
||||
assert(session);
|
||||
|
||||
Capability *cap = in->reconnect_cap(client, icr, session);
|
||||
@ -4005,7 +4005,7 @@ void MDCache::rejoin_import_cap(CInode *in, int client, ceph_mds_cap_reconnect&
|
||||
|
||||
void MDCache::try_reconnect_cap(CInode *in, Session *session)
|
||||
{
|
||||
int client = session->get_client();
|
||||
client_t client = session->get_client();
|
||||
ceph_mds_cap_reconnect *rc = get_replay_cap_reconnect(in->ino(), client);
|
||||
if (rc) {
|
||||
in->reconnect_cap(client, *rc, session);
|
||||
@ -4027,7 +4027,7 @@ void MDCache::try_reconnect_cap(CInode *in, Session *session)
|
||||
|
||||
void MDCache::do_cap_import(Session *session, CInode *in, Capability *cap)
|
||||
{
|
||||
int client = session->inst.name.num();
|
||||
client_t client = session->inst.name.num();
|
||||
SnapRealm *realm = in->find_snaprealm();
|
||||
if (realm->have_past_parents_open()) {
|
||||
dout(10) << "do_cap_import " << session->inst.name << " mseq " << cap->get_mseq() << " on " << *in << dendl;
|
||||
@ -4056,17 +4056,17 @@ void MDCache::do_delayed_cap_imports()
|
||||
{
|
||||
dout(10) << "do_delayed_cap_imports" << dendl;
|
||||
|
||||
map<int,set<CInode*> > d;
|
||||
map<client_t,set<CInode*> > d;
|
||||
d.swap(delayed_imported_caps);
|
||||
|
||||
for (map<int,set<CInode*> >::iterator p = d.begin();
|
||||
for (map<client_t,set<CInode*> >::iterator p = d.begin();
|
||||
p != d.end();
|
||||
p++) {
|
||||
for (set<CInode*>::iterator q = p->second.begin();
|
||||
q != p->second.end();
|
||||
q++) {
|
||||
CInode *in = *q;
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->first));
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->first.v));
|
||||
if (session) {
|
||||
Capability *cap = in->get_client_cap(p->first);
|
||||
if (cap) {
|
||||
@ -4093,10 +4093,10 @@ void MDCache::open_snap_parents()
|
||||
{
|
||||
dout(10) << "open_snap_parents" << dendl;
|
||||
|
||||
map<int,MClientSnap*> splits;
|
||||
map<client_t,MClientSnap*> splits;
|
||||
C_Gather *gather = new C_Gather;
|
||||
|
||||
map<CInode*,map<int,set<inodeno_t> > >::iterator p = missing_snap_parents.begin();
|
||||
map<CInode*,map<client_t,set<inodeno_t> > >::iterator p = missing_snap_parents.begin();
|
||||
while (p != missing_snap_parents.end()) {
|
||||
CInode *in = p->first;
|
||||
assert(in->snaprealm);
|
||||
@ -4104,7 +4104,7 @@ void MDCache::open_snap_parents()
|
||||
dout(10) << " past parents now open on " << *in << dendl;
|
||||
|
||||
// include in a (now safe) snap split?
|
||||
for (map<int,set<inodeno_t> >::iterator q = p->second.begin();
|
||||
for (map<client_t,set<inodeno_t> >::iterator q = p->second.begin();
|
||||
q != p->second.end();
|
||||
q++)
|
||||
for (set<inodeno_t>::iterator r = q->second.begin();
|
||||
@ -4117,9 +4117,9 @@ void MDCache::open_snap_parents()
|
||||
in->put(CInode::PIN_OPENINGSNAPPARENTS);
|
||||
|
||||
// finish off client snaprealm reconnects?
|
||||
map<inodeno_t,map<int,snapid_t> >::iterator q = reconnected_snaprealms.find(in->ino());
|
||||
map<inodeno_t,map<client_t,snapid_t> >::iterator q = reconnected_snaprealms.find(in->ino());
|
||||
if (q != reconnected_snaprealms.end()) {
|
||||
for (map<int,snapid_t>::iterator r = q->second.begin();
|
||||
for (map<client_t,snapid_t>::iterator r = q->second.begin();
|
||||
r != q->second.end();
|
||||
r++)
|
||||
finish_snaprealm_reconnect(r->first, in->snaprealm, r->second);
|
||||
@ -4144,14 +4144,14 @@ void MDCache::open_snap_parents()
|
||||
}
|
||||
}
|
||||
|
||||
void MDCache::finish_snaprealm_reconnect(int client, SnapRealm *realm, snapid_t seq)
|
||||
void MDCache::finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq)
|
||||
{
|
||||
if (seq < realm->get_newest_seq()) {
|
||||
dout(10) << "finish_snaprealm_reconnect client" << client << " has old seq " << seq << " < "
|
||||
<< realm->get_newest_seq()
|
||||
<< " on " << *realm << dendl;
|
||||
// send an update
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(client));
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(client.v));
|
||||
if (session) {
|
||||
MClientSnap *snap = new MClientSnap(CEPH_SNAP_OP_UPDATE);
|
||||
realm->build_snap_trace(snap->bl);
|
||||
@ -4381,7 +4381,7 @@ void MDCache::identify_files_to_recover()
|
||||
continue;
|
||||
|
||||
bool recover = false;
|
||||
for (map<int,byte_range_t>::iterator p = in->inode.client_ranges.begin();
|
||||
for (map<client_t,byte_range_t>::iterator p = in->inode.client_ranges.begin();
|
||||
p != in->inode.client_ranges.end();
|
||||
p++) {
|
||||
Capability *cap = in->get_client_cap(p->first);
|
||||
@ -5249,7 +5249,7 @@ void MDCache::check_memory_usage()
|
||||
}
|
||||
|
||||
|
||||
void MDCache::remove_client_cap(CInode *in, int client)
|
||||
void MDCache::remove_client_cap(CInode *in, client_t client)
|
||||
{
|
||||
in->remove_client_cap(client);
|
||||
|
||||
@ -5645,7 +5645,7 @@ int MDCache::path_traverse(MDRequest *mdr, Message *req, // who
|
||||
if (mdr)
|
||||
mdr->snapid = snapid;
|
||||
|
||||
int client = (mdr && mdr->reqid.name.is_client()) ? mdr->reqid.name.num() : -1;
|
||||
client_t client = (mdr && mdr->reqid.name.is_client()) ? mdr->reqid.name.num() : -1;
|
||||
|
||||
if (mds->logger) mds->logger->inc(l_mds_t);
|
||||
|
||||
@ -6688,7 +6688,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop)
|
||||
bufferlist snapbl;
|
||||
in->snaprealm->build_snap_trace(snapbl);
|
||||
|
||||
map<int, MClientSnap*> updates;
|
||||
map<client_t, MClientSnap*> updates;
|
||||
list<SnapRealm*> q;
|
||||
q.push_back(in->snaprealm);
|
||||
while (!q.empty()) {
|
||||
@ -6698,7 +6698,7 @@ void MDCache::do_realm_invalidate_and_update_notify(CInode *in, int snapop)
|
||||
dout(10) << " realm " << *realm << " on " << *realm->inode << dendl;
|
||||
realm->invalidate_cached_snaps();
|
||||
|
||||
for (map<int, xlist<Capability*> >::iterator p = realm->client_caps.begin();
|
||||
for (map<client_t, xlist<Capability*> >::iterator p = realm->client_caps.begin();
|
||||
p != realm->client_caps.end();
|
||||
p++) {
|
||||
assert(!p->second.empty());
|
||||
|
@ -121,9 +121,9 @@ struct Mutation {
|
||||
bool is_master() { return slave_to_mds < 0; }
|
||||
bool is_slave() { return slave_to_mds >= 0; }
|
||||
|
||||
int get_client() {
|
||||
client_t get_client() {
|
||||
if (reqid.name.is_client())
|
||||
return reqid.name.num();
|
||||
return client_t(reqid.name.num());
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -303,8 +303,8 @@ struct MDRequest : public Mutation {
|
||||
CInode* destdn_was_remote_inode;
|
||||
bool was_link_merge;
|
||||
|
||||
map<__u32,entity_inst_t> imported_client_map;
|
||||
map<CInode*, map<__u32,Capability::Export> > cap_imports;
|
||||
map<client_t,entity_inst_t> imported_client_map;
|
||||
map<CInode*, map<client_t,Capability::Export> > cap_imports;
|
||||
|
||||
// for snaps
|
||||
version_t stid;
|
||||
@ -466,7 +466,7 @@ public:
|
||||
// -- client caps --
|
||||
__u64 last_cap_id;
|
||||
|
||||
void remove_client_cap(CInode *in, int client);
|
||||
void remove_client_cap(CInode *in, client_t client);
|
||||
|
||||
|
||||
// -- discover --
|
||||
@ -684,10 +684,10 @@ protected:
|
||||
set<int> rejoin_sent; // nodes i sent a rejoin to
|
||||
set<int> rejoin_ack_gather; // nodes from whom i need a rejoin ack
|
||||
|
||||
map<inodeno_t,map<int,ceph_mds_cap_reconnect> > cap_exports; // ino -> client -> capex
|
||||
map<inodeno_t,map<client_t,ceph_mds_cap_reconnect> > cap_exports; // ino -> client -> capex
|
||||
map<inodeno_t,string> cap_export_paths;
|
||||
|
||||
map<inodeno_t,map<int,map<int,ceph_mds_cap_reconnect> > > cap_imports; // ino -> client -> frommds -> capex
|
||||
map<inodeno_t,map<client_t,map<int,ceph_mds_cap_reconnect> > > cap_imports; // ino -> client -> frommds -> capex
|
||||
map<inodeno_t,filepath> cap_import_paths;
|
||||
set<inodeno_t> cap_imports_missing;
|
||||
|
||||
@ -709,16 +709,16 @@ protected:
|
||||
public:
|
||||
void rejoin_gather_finish();
|
||||
void rejoin_send_rejoins();
|
||||
void rejoin_export_caps(inodeno_t ino, int client, cap_reconnect_t& icr) {
|
||||
void rejoin_export_caps(inodeno_t ino, client_t client, cap_reconnect_t& icr) {
|
||||
cap_exports[ino][client] = icr.capinfo;
|
||||
cap_export_paths[ino] = icr.path;
|
||||
}
|
||||
void rejoin_recovered_caps(inodeno_t ino, int client, cap_reconnect_t& icr,
|
||||
void rejoin_recovered_caps(inodeno_t ino, client_t client, cap_reconnect_t& icr,
|
||||
int frommds=-1) {
|
||||
cap_imports[ino][client][frommds] = icr.capinfo;
|
||||
cap_import_paths[ino] = filepath(icr.path, (__u64)icr.capinfo.pathbase);
|
||||
}
|
||||
ceph_mds_cap_reconnect *get_replay_cap_reconnect(inodeno_t ino, int client) {
|
||||
ceph_mds_cap_reconnect *get_replay_cap_reconnect(inodeno_t ino, client_t client) {
|
||||
if (cap_imports.count(ino) &&
|
||||
cap_imports[ino].count(client) &&
|
||||
cap_imports[ino][client].count(-1)) {
|
||||
@ -726,36 +726,36 @@ public:
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
void remove_replay_cap_reconnect(inodeno_t ino, int client) {
|
||||
void remove_replay_cap_reconnect(inodeno_t ino, client_t client) {
|
||||
assert(cap_imports[ino].size() == 1);
|
||||
assert(cap_imports[ino][client].size() == 1);
|
||||
cap_imports.erase(ino);
|
||||
}
|
||||
|
||||
// [reconnect/rejoin caps]
|
||||
map<CInode*,map<int, inodeno_t> > reconnected_caps; // inode -> client -> realmino
|
||||
map<inodeno_t,map<int, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
|
||||
map<CInode*,map<client_t, inodeno_t> > reconnected_caps; // inode -> client -> realmino
|
||||
map<inodeno_t,map<client_t, snapid_t> > reconnected_snaprealms; // realmino -> client -> realmseq
|
||||
|
||||
void add_reconnected_cap(CInode *in, int client, inodeno_t realm) {
|
||||
void add_reconnected_cap(CInode *in, client_t client, inodeno_t realm) {
|
||||
reconnected_caps[in][client] = realm;
|
||||
}
|
||||
void add_reconnected_snaprealm(int client, inodeno_t ino, snapid_t seq) {
|
||||
void add_reconnected_snaprealm(client_t client, inodeno_t ino, snapid_t seq) {
|
||||
reconnected_snaprealms[ino][client] = seq;
|
||||
}
|
||||
void process_imported_caps();
|
||||
void process_reconnected_caps();
|
||||
void prepare_realm_split(SnapRealm *realm, int client, inodeno_t ino,
|
||||
map<int,MClientSnap*>& splits);
|
||||
void prepare_realm_split(SnapRealm *realm, client_t client, inodeno_t ino,
|
||||
map<client_t,MClientSnap*>& splits);
|
||||
void do_realm_invalidate_and_update_notify(CInode *in, int snapop);
|
||||
void send_snaps(map<int,MClientSnap*>& splits);
|
||||
void rejoin_import_cap(CInode *in, int client, ceph_mds_cap_reconnect& icr, int frommds);
|
||||
void finish_snaprealm_reconnect(int client, SnapRealm *realm, snapid_t seq);
|
||||
void send_snaps(map<client_t,MClientSnap*>& splits);
|
||||
void rejoin_import_cap(CInode *in, client_t client, ceph_mds_cap_reconnect& icr, int frommds);
|
||||
void finish_snaprealm_reconnect(client_t client, SnapRealm *realm, snapid_t seq);
|
||||
void try_reconnect_cap(CInode *in, Session *session);
|
||||
|
||||
// cap imports. delayed snap parent opens.
|
||||
// realm inode -> client -> cap inodes needing to split to this realm
|
||||
map<CInode*,map<int, set<inodeno_t> > > missing_snap_parents;
|
||||
map<int,set<CInode*> > delayed_imported_caps;
|
||||
map<CInode*,map<client_t, set<inodeno_t> > > missing_snap_parents;
|
||||
map<client_t,set<CInode*> > delayed_imported_caps;
|
||||
|
||||
void do_cap_import(Session *session, CInode *in, Capability *cap);
|
||||
void do_delayed_cap_imports();
|
||||
|
@ -351,12 +351,12 @@ void MDS::forward_message_mds(Message *m, int mds)
|
||||
|
||||
|
||||
|
||||
void MDS::send_message_client(Message *m, int client)
|
||||
void MDS::send_message_client(Message *m, client_t client)
|
||||
{
|
||||
if (sessionmap.have_session(entity_name_t::CLIENT(client))) {
|
||||
if (sessionmap.have_session(entity_name_t::CLIENT(client.v))) {
|
||||
version_t seq = sessionmap.inc_push_seq(client);
|
||||
dout(10) << "send_message_client client" << client << " seq " << seq << " " << *m << dendl;
|
||||
messenger->send_message(m, sessionmap.get_session(entity_name_t::CLIENT(client))->inst);
|
||||
messenger->send_message(m, sessionmap.get_session(entity_name_t::CLIENT(client.v))->inst);
|
||||
} else {
|
||||
dout(10) << "send_message_client no session for client" << client << " " << *m << dendl;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ class MDS : public Dispatcher {
|
||||
void send_message_mds(Message *m, int mds);
|
||||
void forward_message_mds(Message *req, int mds);
|
||||
|
||||
void send_message_client(Message *m, int client);
|
||||
void send_message_client(Message *m, client_t client);
|
||||
void send_message_client(Message *m, entity_inst_t clientinst);
|
||||
|
||||
|
||||
|
@ -829,7 +829,7 @@ void Migrator::export_go_synced(CDir *dir)
|
||||
// fill export message with cache data
|
||||
MExportDir *req = new MExportDir(dir->dirfrag());
|
||||
utime_t now = g_clock.now();
|
||||
map<__u32,entity_inst_t> exported_client_map;
|
||||
map<client_t,entity_inst_t> exported_client_map;
|
||||
int num_exported_inodes = encode_export_dir(req->export_data,
|
||||
dir, // recur start point
|
||||
exported_client_map,
|
||||
@ -864,7 +864,7 @@ void Migrator::export_go_synced(CDir *dir)
|
||||
* is pretty arbitrary and dumb.
|
||||
*/
|
||||
void Migrator::encode_export_inode(CInode *in, bufferlist& enc_state,
|
||||
map<__u32,entity_inst_t>& exported_client_map)
|
||||
map<client_t,entity_inst_t>& exported_client_map)
|
||||
{
|
||||
dout(7) << "encode_export_inode " << *in << dendl;
|
||||
assert(!in->is_replica(mds->get_nodeid()));
|
||||
@ -878,20 +878,20 @@ void Migrator::encode_export_inode(CInode *in, bufferlist& enc_state,
|
||||
}
|
||||
|
||||
void Migrator::encode_export_inode_caps(CInode *in, bufferlist& bl,
|
||||
map<__u32,entity_inst_t>& exported_client_map)
|
||||
map<client_t,entity_inst_t>& exported_client_map)
|
||||
{
|
||||
// encode caps
|
||||
map<int,Capability::Export> cap_map;
|
||||
map<client_t,Capability::Export> cap_map;
|
||||
in->export_client_caps(cap_map);
|
||||
::encode(cap_map, bl);
|
||||
|
||||
in->state_set(CInode::STATE_EXPORTINGCAPS);
|
||||
|
||||
// make note of clients named by exported capabilities
|
||||
for (map<int, Capability*>::iterator it = in->client_caps.begin();
|
||||
for (map<client_t, Capability*>::iterator it = in->client_caps.begin();
|
||||
it != in->client_caps.end();
|
||||
it++)
|
||||
exported_client_map[it->first] = mds->sessionmap.get_inst(entity_name_t::CLIENT(it->first));
|
||||
exported_client_map[it->first] = mds->sessionmap.get_inst(entity_name_t::CLIENT(it->first.v));
|
||||
}
|
||||
|
||||
void Migrator::finish_export_inode_caps(CInode *in)
|
||||
@ -899,7 +899,7 @@ void Migrator::finish_export_inode_caps(CInode *in)
|
||||
in->state_clear(CInode::STATE_EXPORTINGCAPS);
|
||||
|
||||
// tell (all) clients about migrating caps..
|
||||
for (map<int, Capability*>::iterator it = in->client_caps.begin();
|
||||
for (map<client_t, Capability*>::iterator it = in->client_caps.begin();
|
||||
it != in->client_caps.end();
|
||||
it++) {
|
||||
Capability *cap = it->second;
|
||||
@ -961,7 +961,7 @@ void Migrator::finish_export_inode(CInode *in, utime_t now, list<Context*>& fini
|
||||
|
||||
int Migrator::encode_export_dir(bufferlist& exportbl,
|
||||
CDir *dir,
|
||||
map<__u32,entity_inst_t>& exported_client_map,
|
||||
map<client_t,entity_inst_t>& exported_client_map,
|
||||
utime_t now)
|
||||
{
|
||||
int num_exported = 0;
|
||||
@ -1629,7 +1629,7 @@ class C_MDS_ImportDirLoggedStart : public Context {
|
||||
CDir *dir;
|
||||
int from;
|
||||
public:
|
||||
map<__u32,entity_inst_t> imported_client_map;
|
||||
map<client_t,entity_inst_t> imported_client_map;
|
||||
|
||||
C_MDS_ImportDirLoggedStart(Migrator *m, CDir *d, int f) :
|
||||
migrator(m), dir(d), from(f) {
|
||||
@ -1807,7 +1807,7 @@ void Migrator::import_reverse(CDir *dir)
|
||||
}
|
||||
|
||||
// reexport caps
|
||||
for (map<CInode*, map<__u32,Capability::Export> >::iterator p = import_caps[dir].begin();
|
||||
for (map<CInode*, map<client_t,Capability::Export> >::iterator p = import_caps[dir].begin();
|
||||
p != import_caps[dir].end();
|
||||
++p) {
|
||||
CInode *in = p->first;
|
||||
@ -1815,7 +1815,7 @@ void Migrator::import_reverse(CDir *dir)
|
||||
* bleh.. just export all caps for this inode. the auth mds
|
||||
* will pick them up during recovery.
|
||||
*/
|
||||
map<int,Capability::Export> cap_map; // throw this away
|
||||
map<client_t,Capability::Export> cap_map; // throw this away
|
||||
in->export_client_caps(cap_map);
|
||||
finish_export_inode_caps(in);
|
||||
}
|
||||
@ -1885,7 +1885,7 @@ void Migrator::import_reverse_final(CDir *dir)
|
||||
|
||||
|
||||
void Migrator::import_logged_start(CDir *dir, int from,
|
||||
map<__u32,entity_inst_t>& imported_client_map)
|
||||
map<client_t,entity_inst_t>& imported_client_map)
|
||||
{
|
||||
dout(7) << "import_logged " << *dir << dendl;
|
||||
|
||||
@ -1895,7 +1895,7 @@ void Migrator::import_logged_start(CDir *dir, int from,
|
||||
// force open client sessions and finish cap import
|
||||
mds->server->finish_force_open_sessions(imported_client_map);
|
||||
|
||||
for (map<CInode*, map<__u32,Capability::Export> >::iterator p = import_caps[dir].begin();
|
||||
for (map<CInode*, map<client_t,Capability::Export> >::iterator p = import_caps[dir].begin();
|
||||
p != import_caps[dir].end();
|
||||
++p) {
|
||||
finish_import_inode_caps(p->first, from, p->second);
|
||||
@ -1977,7 +1977,7 @@ void Migrator::import_finish(CDir *dir)
|
||||
|
||||
void Migrator::decode_import_inode(CDentry *dn, bufferlist::iterator& blp, int oldauth,
|
||||
LogSegment *ls,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports,
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports,
|
||||
list<ScatterLock*>& updated_scatterlocks)
|
||||
{
|
||||
dout(15) << "decode_import_inode on " << *dn << dendl;
|
||||
@ -2033,9 +2033,9 @@ void Migrator::decode_import_inode(CDentry *dn, bufferlist::iterator& blp, int o
|
||||
|
||||
void Migrator::decode_import_inode_caps(CInode *in,
|
||||
bufferlist::iterator &blp,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports)
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports)
|
||||
{
|
||||
map<__u32,Capability::Export> cap_map;
|
||||
map<client_t,Capability::Export> cap_map;
|
||||
::decode(cap_map, blp);
|
||||
if (!cap_map.empty()) {
|
||||
cap_imports[in].swap(cap_map);
|
||||
@ -2044,15 +2044,15 @@ void Migrator::decode_import_inode_caps(CInode *in,
|
||||
}
|
||||
|
||||
void Migrator::finish_import_inode_caps(CInode *in, int from,
|
||||
map<__u32,Capability::Export> &cap_map)
|
||||
map<client_t,Capability::Export> &cap_map)
|
||||
{
|
||||
assert(!cap_map.empty());
|
||||
|
||||
for (map<__u32,Capability::Export>::iterator it = cap_map.begin();
|
||||
for (map<client_t,Capability::Export>::iterator it = cap_map.begin();
|
||||
it != cap_map.end();
|
||||
it++) {
|
||||
dout(0) << "finish_import_inode_caps for client" << it->first << " on " << *in << dendl;
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(it->first));
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(it->first.v));
|
||||
assert(session);
|
||||
|
||||
Capability *cap = in->get_client_cap(it->first);
|
||||
@ -2072,7 +2072,7 @@ int Migrator::decode_import_dir(bufferlist::iterator& blp,
|
||||
CDir *import_root,
|
||||
EImportStart *le,
|
||||
LogSegment *ls,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports,
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports,
|
||||
list<ScatterLock*>& updated_scatterlocks)
|
||||
{
|
||||
// set up dir
|
||||
@ -2277,7 +2277,7 @@ class C_M_LoggedImportCaps : public Context {
|
||||
CInode *in;
|
||||
int from;
|
||||
public:
|
||||
map<CInode*, map<__u32,Capability::Export> > cap_imports;
|
||||
map<CInode*, map<client_t,Capability::Export> > cap_imports;
|
||||
|
||||
C_M_LoggedImportCaps(Migrator *m, CInode *i, int f) : migrator(m), in(i), from(f) {}
|
||||
void finish(int r) {
|
||||
@ -2318,7 +2318,7 @@ void Migrator::handle_export_caps(MExportCaps *ex)
|
||||
|
||||
void Migrator::logged_import_caps(CInode *in,
|
||||
int from,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports)
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports)
|
||||
{
|
||||
dout(10) << "logged_import_caps on " << *in << dendl;
|
||||
assert(cap_imports.count(in));
|
||||
|
@ -117,7 +117,7 @@ protected:
|
||||
map<CDir*,set<int> > import_bystanders;
|
||||
map<CDir*,list<dirfrag_t> > import_bound_ls;
|
||||
map<CDir*,list<ScatterLock*> > import_updated_scatterlocks;
|
||||
map<CDir*, map<CInode*, map<__u32,Capability::Export> > > import_caps;
|
||||
map<CDir*, map<CInode*, map<client_t,Capability::Export> > > import_caps;
|
||||
|
||||
|
||||
public:
|
||||
@ -183,15 +183,15 @@ public:
|
||||
}
|
||||
|
||||
void encode_export_inode(CInode *in, bufferlist& bl,
|
||||
map<__u32,entity_inst_t>& exported_client_map);
|
||||
map<client_t,entity_inst_t>& exported_client_map);
|
||||
void encode_export_inode_caps(CInode *in, bufferlist& bl,
|
||||
map<__u32,entity_inst_t>& exported_client_map);
|
||||
map<client_t,entity_inst_t>& exported_client_map);
|
||||
void finish_export_inode(CInode *in, utime_t now, list<Context*>& finished);
|
||||
void finish_export_inode_caps(CInode *in);
|
||||
|
||||
int encode_export_dir(bufferlist& exportbl,
|
||||
CDir *dir,
|
||||
map<__u32,entity_inst_t>& exported_client_map,
|
||||
map<client_t,entity_inst_t>& exported_client_map,
|
||||
utime_t now);
|
||||
void finish_export_dir(CDir *dir, list<Context*>& finished, utime_t now);
|
||||
|
||||
@ -230,18 +230,18 @@ public:
|
||||
public:
|
||||
void decode_import_inode(CDentry *dn, bufferlist::iterator& blp, int oldauth,
|
||||
LogSegment *ls,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports,
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports,
|
||||
list<ScatterLock*>& updated_scatterlocks);
|
||||
void decode_import_inode_caps(CInode *in,
|
||||
bufferlist::iterator &blp,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports);
|
||||
void finish_import_inode_caps(CInode *in, int from, map<__u32,Capability::Export> &cap_map);
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports);
|
||||
void finish_import_inode_caps(CInode *in, int from, map<client_t,Capability::Export> &cap_map);
|
||||
int decode_import_dir(bufferlist::iterator& blp,
|
||||
int oldauth,
|
||||
CDir *import_root,
|
||||
EImportStart *le,
|
||||
LogSegment *ls,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports,
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports,
|
||||
list<ScatterLock*>& updated_scatterlocks);
|
||||
|
||||
public:
|
||||
@ -252,7 +252,7 @@ protected:
|
||||
void import_reverse_final(CDir *dir);
|
||||
void import_notify_abort(CDir *dir, set<CDir*>& bounds);
|
||||
void import_logged_start(CDir *dir, int from,
|
||||
map<__u32,entity_inst_t> &imported_client_map);
|
||||
map<client_t,entity_inst_t> &imported_client_map);
|
||||
void handle_export_finish(MExportDirFinish *m);
|
||||
public:
|
||||
void import_finish(CDir *dir);
|
||||
@ -261,7 +261,7 @@ protected:
|
||||
void handle_export_caps(MExportCaps *m);
|
||||
void logged_import_caps(CInode *in,
|
||||
int from,
|
||||
map<CInode*, map<__u32,Capability::Export> >& cap_imports);
|
||||
map<CInode*, map<client_t,Capability::Export> >& cap_imports);
|
||||
|
||||
|
||||
friend class C_MDS_ImportDirLoggedStart;
|
||||
|
@ -283,13 +283,13 @@ void Server::_session_logged(Session *session, bool open, version_t pv, interval
|
||||
mds->sessionmap.version++; // noop
|
||||
}
|
||||
|
||||
version_t Server::prepare_force_open_sessions(map<__u32,entity_inst_t>& cm)
|
||||
version_t Server::prepare_force_open_sessions(map<client_t,entity_inst_t>& cm)
|
||||
{
|
||||
version_t pv = ++mds->sessionmap.projected;
|
||||
dout(10) << "prepare_force_open_sessions " << pv
|
||||
<< " on " << cm.size() << " clients"
|
||||
<< dendl;
|
||||
for (map<__u32,entity_inst_t>::iterator p = cm.begin(); p != cm.end(); ++p) {
|
||||
for (map<client_t,entity_inst_t>::iterator p = cm.begin(); p != cm.end(); ++p) {
|
||||
Session *session = mds->sessionmap.get_or_add_session(p->second);
|
||||
if (session->is_undef() || session->is_closing())
|
||||
mds->sessionmap.set_state(session, Session::STATE_OPENING);
|
||||
@ -298,7 +298,7 @@ version_t Server::prepare_force_open_sessions(map<__u32,entity_inst_t>& cm)
|
||||
return pv;
|
||||
}
|
||||
|
||||
void Server::finish_force_open_sessions(map<__u32,entity_inst_t>& cm)
|
||||
void Server::finish_force_open_sessions(map<client_t,entity_inst_t>& cm)
|
||||
{
|
||||
/*
|
||||
* FIXME: need to carefully consider the race conditions between a
|
||||
@ -307,7 +307,7 @@ void Server::finish_force_open_sessions(map<__u32,entity_inst_t>& cm)
|
||||
*/
|
||||
dout(10) << "finish_force_open_sessions on " << cm.size() << " clients,"
|
||||
<< " v " << mds->sessionmap.version << " -> " << (mds->sessionmap.version+1) << dendl;
|
||||
for (map<__u32,entity_inst_t>::iterator p = cm.begin(); p != cm.end(); ++p) {
|
||||
for (map<client_t,entity_inst_t>::iterator p = cm.begin(); p != cm.end(); ++p) {
|
||||
Session *session = mds->sessionmap.get_session(p->second.name);
|
||||
assert(session);
|
||||
if (session->is_opening()) {
|
||||
@ -550,10 +550,10 @@ void Server::reconnect_tick()
|
||||
if (g_clock.now() >= reconnect_end &&
|
||||
!client_reconnect_gather.empty()) {
|
||||
dout(10) << "reconnect timed out" << dendl;
|
||||
for (set<int>::iterator p = client_reconnect_gather.begin();
|
||||
for (set<client_t>::iterator p = client_reconnect_gather.begin();
|
||||
p != client_reconnect_gather.end();
|
||||
p++) {
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(*p));
|
||||
Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(p->v));
|
||||
dout(1) << "reconnect gave up on " << session->inst << dendl;
|
||||
|
||||
/* no, we need to respect g_conf.mds_session_autoclose
|
||||
@ -818,7 +818,7 @@ void Server::set_trace_dist(Session *session, MClientReply *reply,
|
||||
// inode, dentry, dir, ..., inode
|
||||
bufferlist bl;
|
||||
int whoami = mds->get_nodeid();
|
||||
int client = session->get_client();
|
||||
client_t client = session->get_client();
|
||||
utime_t now = g_clock.now();
|
||||
|
||||
dout(20) << "set_trace_dist snapid " << snapid << dendl;
|
||||
@ -938,7 +938,7 @@ void Server::handle_client_request(MClientRequest *req)
|
||||
// (only if NOT replay!)
|
||||
if (req->get_source().is_client() &&
|
||||
!req->is_replay()) {
|
||||
int client = req->get_source().num();
|
||||
client_t client = req->get_source().num();
|
||||
for (vector<MClientRequest::Release>::iterator p = req->releases.begin();
|
||||
p != req->releases.end();
|
||||
p++)
|
||||
@ -1401,7 +1401,7 @@ CDentry* Server::prepare_null_dentry(MDRequest *mdr, CDir *dir, const string& dn
|
||||
dout(10) << "prepare_null_dentry " << dname << " in " << *dir << dendl;
|
||||
assert(dir->is_auth());
|
||||
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
|
||||
// does it already exist?
|
||||
CDentry *dn = dir->lookup(dname);
|
||||
@ -1642,7 +1642,7 @@ CDentry* Server::rdlock_path_xlock_dentry(MDRequest *mdr, int n,
|
||||
|
||||
dout(10) << "rdlock_path_xlock_dentry " << *mdr << " " << refpath << dendl;
|
||||
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
|
||||
if (mdr->done_locking)
|
||||
return mdr->dn[n].back();
|
||||
@ -1778,7 +1778,7 @@ void Server::handle_client_stat(MDRequest *mdr)
|
||||
* handling this case here is easier than weakening rdlock
|
||||
* semantics... that would cause problems elsewhere.
|
||||
*/
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
int issued = 0;
|
||||
Capability *cap = ref->get_client_cap(client);
|
||||
if (cap)
|
||||
@ -2050,7 +2050,7 @@ public:
|
||||
void Server::handle_client_openc(MDRequest *mdr)
|
||||
{
|
||||
MClientRequest *req = mdr->client_request;
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
|
||||
dout(7) << "open w/ O_CREAT on " << req->get_filepath() << dendl;
|
||||
|
||||
@ -2130,7 +2130,7 @@ void Server::handle_client_openc(MDRequest *mdr)
|
||||
void Server::handle_client_readdir(MDRequest *mdr)
|
||||
{
|
||||
MClientRequest *req = mdr->client_request;
|
||||
int client = req->get_orig_source().num();
|
||||
client_t client = req->get_orig_source().num();
|
||||
set<SimpleLock*> rdlocks, wrlocks, xlocks;
|
||||
CInode *diri = rdlock_path_pin_ref(mdr, 0, rdlocks, false);
|
||||
if (!diri) return;
|
||||
@ -2414,7 +2414,7 @@ void Server::handle_client_setattr(MDRequest *mdr)
|
||||
pi->rstat.rbytes = pi->size;
|
||||
|
||||
// adjust client's max_size?
|
||||
map<int,byte_range_t> new_ranges;
|
||||
map<client_t,byte_range_t> new_ranges;
|
||||
mds->locker->calc_new_client_ranges(cur, pi->size, new_ranges);
|
||||
if (pi->client_ranges != new_ranges) {
|
||||
dout(10) << " client_ranges " << pi->client_ranges << " -> " << new_ranges << dendl;
|
||||
@ -2442,7 +2442,7 @@ void Server::handle_client_setattr(MDRequest *mdr)
|
||||
void Server::handle_client_opent(MDRequest *mdr, int cmode)
|
||||
{
|
||||
CInode *in = mdr->in[0];
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
assert(in);
|
||||
|
||||
dout(10) << "handle_client_opent " << *in << dendl;
|
||||
@ -3401,7 +3401,7 @@ void Server::handle_slave_link_prep_ack(MDRequest *mdr, MMDSSlaveRequest *m)
|
||||
void Server::handle_client_unlink(MDRequest *mdr)
|
||||
{
|
||||
MClientRequest *req = mdr->client_request;
|
||||
int client = mdr->get_client();
|
||||
client_t client = mdr->get_client();
|
||||
|
||||
// rmdir or unlink?
|
||||
bool rmdir = false;
|
||||
@ -4697,7 +4697,7 @@ void Server::_logged_slave_rename(MDRequest *mdr,
|
||||
// export srci?
|
||||
if (srcdn->is_auth() && srcdnl->is_primary()) {
|
||||
list<Context*> finished;
|
||||
map<__u32,entity_inst_t> exported_client_map;
|
||||
map<client_t,entity_inst_t> exported_client_map;
|
||||
bufferlist inodebl;
|
||||
mdcache->migrator->encode_export_inode(srcdnl->get_inode(), inodebl,
|
||||
exported_client_map);
|
||||
|
@ -65,18 +65,18 @@ public:
|
||||
|
||||
// -- sessions and recovery --
|
||||
utime_t reconnect_start;
|
||||
set<int> client_reconnect_gather; // clients i need a reconnect msg from.
|
||||
set<client_t> client_reconnect_gather; // clients i need a reconnect msg from.
|
||||
|
||||
void handle_client_session(class MClientSession *m);
|
||||
void _session_logged(Session *session, bool open, version_t pv, interval_set<inodeno_t>& inos,version_t piv);
|
||||
void _finish_session_purge(Session *);
|
||||
version_t prepare_force_open_sessions(map<__u32,entity_inst_t> &cm);
|
||||
void finish_force_open_sessions(map<__u32,entity_inst_t> &cm);
|
||||
version_t prepare_force_open_sessions(map<client_t,entity_inst_t> &cm);
|
||||
void finish_force_open_sessions(map<client_t,entity_inst_t> &cm);
|
||||
void terminate_sessions();
|
||||
void find_idle_sessions();
|
||||
void reconnect_clients();
|
||||
void handle_client_reconnect(class MClientReconnect *m);
|
||||
void process_reconnect_cap(CInode *in, int from, ceph_mds_cap_reconnect& capinfo);
|
||||
//void process_reconnect_cap(CInode *in, int from, ceph_mds_cap_reconnect& capinfo);
|
||||
void reconnect_gather_finish();
|
||||
void reconnect_tick();
|
||||
|
||||
|
@ -81,7 +81,7 @@ public:
|
||||
return prealloc_inos.size() + pending_prealloc_inos.size();
|
||||
}
|
||||
|
||||
int get_client() { return inst.name.num(); }
|
||||
client_t get_client() { return client_t(inst.name.num()); }
|
||||
|
||||
bool is_undef() { return state == STATE_UNDEF; }
|
||||
bool is_opening() { return state == STATE_OPENING; }
|
||||
@ -229,7 +229,7 @@ public:
|
||||
}
|
||||
void dump();
|
||||
|
||||
void get_client_set(set<int>& s) {
|
||||
void get_client_set(set<client_t>& s) {
|
||||
for (hash_map<entity_name_t,Session*>::iterator p = session_map.begin();
|
||||
p != session_map.end();
|
||||
p++)
|
||||
@ -244,8 +244,8 @@ public:
|
||||
s.insert(p->second);
|
||||
}
|
||||
|
||||
void open_sessions(map<__u32,entity_inst_t>& client_map) {
|
||||
for (map<__u32,entity_inst_t>::iterator p = client_map.begin();
|
||||
void open_sessions(map<client_t,entity_inst_t>& client_map) {
|
||||
for (map<client_t,entity_inst_t>::iterator p = client_map.begin();
|
||||
p != client_map.end();
|
||||
++p) {
|
||||
Session *session = get_or_add_session(p->second);
|
||||
@ -260,11 +260,11 @@ public:
|
||||
assert(session_map.count(w));
|
||||
return session_map[w]->inst;
|
||||
}
|
||||
version_t inc_push_seq(int client) {
|
||||
return get_session(entity_name_t::CLIENT(client))->inc_push_seq();
|
||||
version_t inc_push_seq(client_t client) {
|
||||
return get_session(entity_name_t::CLIENT(client.v))->inc_push_seq();
|
||||
}
|
||||
version_t get_push_seq(int client) {
|
||||
return get_session(entity_name_t::CLIENT(client))->get_push_seq();
|
||||
version_t get_push_seq(client_t client) {
|
||||
return get_session(entity_name_t::CLIENT(client.v))->get_push_seq();
|
||||
}
|
||||
bool have_completed_request(metareqid_t rid) {
|
||||
Session *session = get_session(rid.name);
|
||||
|
@ -136,9 +136,9 @@ protected:
|
||||
// local state
|
||||
int num_rdlock, num_wrlock, num_xlock;
|
||||
Mutation *xlock_by;
|
||||
int xlock_by_client;
|
||||
client_t xlock_by_client;
|
||||
public:
|
||||
int excl_client;
|
||||
client_t excl_client;
|
||||
|
||||
|
||||
public:
|
||||
@ -278,33 +278,33 @@ public:
|
||||
|
||||
|
||||
// can_*
|
||||
bool can_lease(int client) {
|
||||
bool can_lease(client_t client) {
|
||||
return sm->states[state].can_lease == ANY ||
|
||||
(sm->states[state].can_lease == AUTH && parent->is_auth()) ||
|
||||
(sm->states[state].can_lease == XCL && client >= 0 && xlock_by_client == client);
|
||||
}
|
||||
bool can_read(int client) {
|
||||
bool can_read(client_t client) {
|
||||
return sm->states[state].can_read == ANY ||
|
||||
(sm->states[state].can_read == AUTH && parent->is_auth()) ||
|
||||
(sm->states[state].can_read == XCL && client >= 0 && xlock_by_client == client);
|
||||
}
|
||||
bool can_read_projected(int client) {
|
||||
bool can_read_projected(client_t client) {
|
||||
return sm->states[state].can_read_projected == ANY ||
|
||||
(sm->states[state].can_read_projected == AUTH && parent->is_auth()) ||
|
||||
(sm->states[state].can_read_projected == XCL && client >= 0 && xlock_by_client == client);
|
||||
}
|
||||
bool can_rdlock(int client) {
|
||||
bool can_rdlock(client_t client) {
|
||||
return sm->states[state].can_rdlock == ANY ||
|
||||
(sm->states[state].can_rdlock == AUTH && parent->is_auth()) ||
|
||||
(sm->states[state].can_rdlock == XCL && client >= 0 && xlock_by_client == client);
|
||||
}
|
||||
bool can_wrlock(int client) {
|
||||
bool can_wrlock(client_t client) {
|
||||
return sm->states[state].can_wrlock == ANY ||
|
||||
(sm->states[state].can_wrlock == AUTH && parent->is_auth()) ||
|
||||
(sm->states[state].can_wrlock == XCL && client >= 0 && (xlock_by_client == client ||
|
||||
excl_client == client));
|
||||
}
|
||||
bool can_xlock(int client) {
|
||||
bool can_xlock(client_t client) {
|
||||
return sm->states[state].can_xlock == ANY ||
|
||||
(sm->states[state].can_xlock == AUTH && parent->is_auth()) ||
|
||||
(sm->states[state].can_xlock == XCL && client >= 0 && xlock_by_client == client);
|
||||
@ -338,7 +338,7 @@ public:
|
||||
int get_num_wrlocks() { return num_wrlock; }
|
||||
|
||||
// xlock
|
||||
void get_xlock(Mutation *who, int client) {
|
||||
void get_xlock(Mutation *who, client_t client) {
|
||||
assert(xlock_by == 0);
|
||||
assert(state == LOCK_XLOCK);
|
||||
parent->get(MDSCacheObject::PIN_LOCK);
|
||||
@ -363,7 +363,7 @@ public:
|
||||
}
|
||||
bool is_xlocked() { return num_xlock > 0; }
|
||||
int get_num_xlocks() { return num_xlock; }
|
||||
bool is_xlocked_by_client(int c) {
|
||||
bool is_xlocked_by_client(client_t c) {
|
||||
return xlock_by_client == c;
|
||||
}
|
||||
Mutation *get_xlocked_by() { return xlock_by; }
|
||||
@ -439,7 +439,7 @@ public:
|
||||
}
|
||||
|
||||
|
||||
int gcaps_xlocker_mask(int client) {
|
||||
int gcaps_xlocker_mask(client_t client) {
|
||||
if (client == xlock_by_client)
|
||||
return type == CEPH_LOCK_IFILE ? 0xffff : (CEPH_CAP_GSHARED|CEPH_CAP_GEXCL);
|
||||
return 0;
|
||||
|
@ -25,7 +25,7 @@ protected:
|
||||
version_t cmapv; // client map version
|
||||
|
||||
public:
|
||||
map<__u32,entity_inst_t> client_map;
|
||||
map<client_t,entity_inst_t> client_map;
|
||||
|
||||
ESessions() : LogEvent(EVENT_SESSION) { }
|
||||
ESessions(version_t v) :
|
||||
|
@ -1022,7 +1022,7 @@ void EImportStart::replay(MDS *mds)
|
||||
} else {
|
||||
dout(10) << "EImportStart.replay sessionmap " << mds->sessionmap.version
|
||||
<< " < " << cmapv << dendl;
|
||||
map<__u32,entity_inst_t> cm;
|
||||
map<client_t,entity_inst_t> cm;
|
||||
bufferlist::iterator blp = client_map.begin();
|
||||
::decode(cm, blp);
|
||||
mds->sessionmap.open_sessions(cm);
|
||||
|
@ -100,6 +100,7 @@ inline string ccap_string(int cap)
|
||||
}
|
||||
|
||||
|
||||
|
||||
struct frag_info_t {
|
||||
version_t version;
|
||||
|
||||
@ -345,7 +346,7 @@ struct inode_t {
|
||||
utime_t atime; // file data access time.
|
||||
uint32_t time_warp_seq; // count of (potential) mtime/atime timewarps (i.e., utimes())
|
||||
|
||||
map<int,byte_range_t> client_ranges; // client(s) can write to these ranges
|
||||
map<client_t,byte_range_t> client_ranges; // client(s) can write to these ranges
|
||||
|
||||
// dirfrag, recursive accountin
|
||||
frag_info_t dirstat;
|
||||
@ -380,7 +381,7 @@ struct inode_t {
|
||||
|
||||
__u64 get_max_size() const {
|
||||
__u64 max = 0;
|
||||
for (map<int,byte_range_t>::const_iterator p = client_ranges.begin();
|
||||
for (map<client_t,byte_range_t>::const_iterator p = client_ranges.begin();
|
||||
p != client_ranges.end();
|
||||
p++)
|
||||
if (p->second.last > max)
|
||||
@ -391,7 +392,7 @@ struct inode_t {
|
||||
if (new_max == 0) {
|
||||
client_ranges.clear();
|
||||
} else {
|
||||
for (map<int,byte_range_t>::iterator p = client_ranges.begin();
|
||||
for (map<client_t,byte_range_t>::iterator p = client_ranges.begin();
|
||||
p != client_ranges.end();
|
||||
p++)
|
||||
p->second.last = new_max;
|
||||
@ -1018,7 +1019,7 @@ class MDSCacheObject;
|
||||
* for metadata leases to clients
|
||||
*/
|
||||
struct ClientLease {
|
||||
int client;
|
||||
client_t client;
|
||||
int mask; // CEPH_STAT_MASK_*
|
||||
MDSCacheObject *parent;
|
||||
|
||||
@ -1027,7 +1028,7 @@ struct ClientLease {
|
||||
xlist<ClientLease*>::item session_lease_item; // per-session list
|
||||
xlist<ClientLease*>::item lease_item; // global list
|
||||
|
||||
ClientLease(int c, MDSCacheObject *p) :
|
||||
ClientLease(client_t c, MDSCacheObject *p) :
|
||||
client(c), mask(0), parent(p), seq(0),
|
||||
session_lease_item(this),
|
||||
lease_item(this) { }
|
||||
|
@ -132,7 +132,7 @@ struct SnapRealm {
|
||||
bufferlist cached_snap_trace;
|
||||
|
||||
xlist<CInode*> inodes_with_caps; // for efficient realm splits
|
||||
map<int, xlist<Capability*> > client_caps; // to identify clients who need snap notifications
|
||||
map<client_t, xlist<Capability*> > client_caps; // to identify clients who need snap notifications
|
||||
|
||||
SnapRealm(MDCache *c, CInode *in) :
|
||||
seq(0), created(0),
|
||||
@ -209,10 +209,10 @@ struct SnapRealm {
|
||||
void split_at(SnapRealm *child);
|
||||
void join(SnapRealm *child);
|
||||
|
||||
void add_cap(int client, Capability *cap) {
|
||||
void add_cap(client_t client, Capability *cap) {
|
||||
client_caps[client].push_back(&cap->snaprealm_caps_item);
|
||||
}
|
||||
void remove_cap(int client, Capability *cap) {
|
||||
void remove_cap(client_t client, Capability *cap) {
|
||||
cap->snaprealm_caps_item.remove_myself();
|
||||
if (client_caps[client].empty())
|
||||
client_caps.erase(client);
|
||||
|
@ -23,7 +23,7 @@ class MExportCaps : public Message {
|
||||
public:
|
||||
inodeno_t ino;
|
||||
bufferlist cap_bl;
|
||||
map<__u32,entity_inst_t> client_map;
|
||||
map<client_t,entity_inst_t> client_map;
|
||||
|
||||
MExportCaps() :
|
||||
Message(MSG_MDS_EXPORTCAPS) {}
|
||||
|
Loading…
Reference in New Issue
Block a user