mirror of
https://github.com/ceph/ceph
synced 2025-03-11 02:39:05 +00:00
mds: move some MDCache member init to header
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
parent
b906a6f1e3
commit
b365ffef34
@ -136,45 +136,17 @@ public:
|
||||
MDCache::MDCache(MDSRank *m, PurgeQueue &purge_queue_) :
|
||||
mds(m),
|
||||
filer(m->objecter, m->finisher),
|
||||
exceeded_size_limit(false),
|
||||
recovery_queue(m),
|
||||
stray_manager(m, purge_queue_),
|
||||
trim_counter(g_conf().get_val<double>("mds_cache_trim_decay_rate")),
|
||||
open_file_table(m)
|
||||
{
|
||||
migrator.reset(new Migrator(mds, this));
|
||||
root = NULL;
|
||||
myin = NULL;
|
||||
readonly = false;
|
||||
|
||||
stray_index = 0;
|
||||
for (int i = 0; i < NUM_STRAY; ++i) {
|
||||
strays[i] = NULL;
|
||||
}
|
||||
|
||||
num_shadow_inodes = 0;
|
||||
num_inodes_with_caps = 0;
|
||||
|
||||
max_dir_commit_size = g_conf()->mds_dir_max_commit_size ?
|
||||
(g_conf()->mds_dir_max_commit_size << 20) :
|
||||
(0.9 *(g_conf()->osd_max_write_size << 20));
|
||||
|
||||
discover_last_tid = 0;
|
||||
open_ino_last_tid = 0;
|
||||
find_ino_peer_last_tid = 0;
|
||||
|
||||
last_cap_id = 0;
|
||||
|
||||
client_lease_durations[0] = 5.0;
|
||||
client_lease_durations[1] = 30.0;
|
||||
client_lease_durations[2] = 300.0;
|
||||
|
||||
resolves_pending = false;
|
||||
rejoins_pending = false;
|
||||
cap_imports_num_opening = 0;
|
||||
|
||||
opening_root = open = false;
|
||||
|
||||
cache_inode_limit = g_conf().get_val<int64_t>("mds_cache_size");
|
||||
cache_memory_limit = g_conf().get_val<Option::size_t>("mds_cache_memory_limit");
|
||||
cache_reservation = g_conf().get_val<double>("mds_cache_reservation");
|
||||
@ -185,10 +157,6 @@ MDCache::MDCache(MDSRank *m, PurgeQueue &purge_queue_) :
|
||||
bottom_lru.lru_set_midpoint(0);
|
||||
|
||||
decayrate.set_halflife(g_conf()->mds_decay_halflife);
|
||||
|
||||
did_shutdown_log_cap = false;
|
||||
|
||||
global_snaprealm = NULL;
|
||||
}
|
||||
|
||||
MDCache::~MDCache()
|
||||
@ -7542,19 +7510,21 @@ void MDCache::trim_client_leases()
|
||||
|
||||
dout(10) << "trim_client_leases" << dendl;
|
||||
|
||||
for (int pool=0; pool<client_lease_pools; pool++) {
|
||||
int before = client_leases[pool].size();
|
||||
if (client_leases[pool].empty())
|
||||
std::size_t pool = 0;
|
||||
for (const auto& list : client_leases) {
|
||||
pool += 1;
|
||||
if (list.empty())
|
||||
continue;
|
||||
|
||||
while (!client_leases[pool].empty()) {
|
||||
ClientLease *r = client_leases[pool].front();
|
||||
auto before = list.size();
|
||||
while (!list.empty()) {
|
||||
ClientLease *r = list.front();
|
||||
if (r->ttl > now) break;
|
||||
CDentry *dn = static_cast<CDentry*>(r->parent);
|
||||
dout(10) << " expiring client." << r->client << " lease of " << *dn << dendl;
|
||||
dn->remove_client_lease(r, mds->locker);
|
||||
}
|
||||
int after = client_leases[pool].size();
|
||||
auto after = list.size();
|
||||
dout(10) << "trim_client_leases pool " << pool << " trimmed "
|
||||
<< (before-after) << " leases, " << after << " left" << dendl;
|
||||
}
|
||||
|
@ -135,14 +135,14 @@ class MDCache {
|
||||
protected:
|
||||
ceph::unordered_map<inodeno_t,CInode*> inode_map; // map of head inodes by ino
|
||||
map<vinodeno_t, CInode*> snap_inode_map; // map of snap inodes by ino
|
||||
CInode *root; // root inode
|
||||
CInode *myin; // .ceph/mds%d dir
|
||||
CInode *root = nullptr; // root inode
|
||||
CInode *myin = nullptr; // .ceph/mds%d dir
|
||||
|
||||
bool readonly;
|
||||
bool readonly = false;
|
||||
void set_readonly() { readonly = true; }
|
||||
|
||||
CInode *strays[NUM_STRAY]; // my stray dir
|
||||
int stray_index;
|
||||
std::array<CInode *, NUM_STRAY> strays{}; // my stray dir
|
||||
int stray_index = 0;
|
||||
|
||||
CInode *get_stray() {
|
||||
return strays[stray_index];
|
||||
@ -154,7 +154,7 @@ class MDCache {
|
||||
|
||||
Filer filer;
|
||||
|
||||
bool exceeded_size_limit;
|
||||
bool exceeded_size_limit = false;
|
||||
|
||||
private:
|
||||
uint64_t cache_inode_limit;
|
||||
@ -208,9 +208,9 @@ public:
|
||||
|
||||
DecayRate decayrate;
|
||||
|
||||
int num_shadow_inodes;
|
||||
int num_shadow_inodes = 0;
|
||||
|
||||
int num_inodes_with_caps;
|
||||
int num_inodes_with_caps = 0;
|
||||
|
||||
unsigned max_dir_commit_size;
|
||||
|
||||
@ -224,10 +224,11 @@ public:
|
||||
|
||||
// -- client leases --
|
||||
public:
|
||||
static const int client_lease_pools = 3;
|
||||
float client_lease_durations[client_lease_pools];
|
||||
static constexpr std::size_t client_lease_pools = 3;
|
||||
std::array<float, client_lease_pools> client_lease_durations{5.0, 30.0, 300.0};
|
||||
|
||||
protected:
|
||||
xlist<ClientLease*> client_leases[client_lease_pools];
|
||||
std::array<xlist<ClientLease*>, client_lease_pools> client_leases{};
|
||||
public:
|
||||
void touch_client_lease(ClientLease *r, int pool, utime_t ttl) {
|
||||
client_leases[pool].push_back(&r->item_lease);
|
||||
@ -250,9 +251,7 @@ public:
|
||||
}
|
||||
|
||||
// -- client caps --
|
||||
uint64_t last_cap_id;
|
||||
|
||||
|
||||
uint64_t last_cap_id = 0;
|
||||
|
||||
// -- discover --
|
||||
struct discover_info_t {
|
||||
@ -280,7 +279,7 @@ public:
|
||||
};
|
||||
|
||||
map<ceph_tid_t, discover_info_t> discovers;
|
||||
ceph_tid_t discover_last_tid;
|
||||
ceph_tid_t discover_last_tid = 0;
|
||||
|
||||
void _send_discover(discover_info_t& dis);
|
||||
discover_info_t& _create_discover(mds_rank_t mds) {
|
||||
@ -517,7 +516,7 @@ protected:
|
||||
friend class ESlaveUpdate;
|
||||
friend class ECommitted;
|
||||
|
||||
bool resolves_pending;
|
||||
bool resolves_pending = false;
|
||||
set<mds_rank_t> resolve_gather; // nodes i need resolves from
|
||||
set<mds_rank_t> resolve_ack_gather; // nodes i need a resolve_ack from
|
||||
set<version_t> resolve_snapclient_commits;
|
||||
@ -594,7 +593,7 @@ public:
|
||||
bool dump_inode(Formatter *f, uint64_t number);
|
||||
protected:
|
||||
// [rejoin]
|
||||
bool rejoins_pending;
|
||||
bool rejoins_pending = false;
|
||||
set<mds_rank_t> rejoin_gather; // nodes from whom i need a rejoin
|
||||
set<mds_rank_t> rejoin_sent; // nodes i sent a rejoin to
|
||||
set<mds_rank_t> rejoin_ack_sent; // nodes i sent a rejoin to
|
||||
@ -611,7 +610,7 @@ protected:
|
||||
map<inodeno_t,map<client_t,map<mds_rank_t,cap_reconnect_t> > > cap_imports; // ino -> client -> frommds -> capex
|
||||
set<inodeno_t> cap_imports_missing;
|
||||
map<inodeno_t, MDSContext::vec > cap_reconnect_waiters;
|
||||
int cap_imports_num_opening;
|
||||
int cap_imports_num_opening = 0;
|
||||
|
||||
set<CInode*> rejoin_undef_inodes;
|
||||
set<CInode*> rejoin_potential_updated_scatterlocks;
|
||||
@ -837,7 +836,7 @@ public:
|
||||
shutdown_export_strays();
|
||||
}
|
||||
|
||||
bool did_shutdown_log_cap;
|
||||
bool did_shutdown_log_cap = false;
|
||||
|
||||
// inode_map
|
||||
bool have_inode(vinodeno_t vino) {
|
||||
@ -952,7 +951,7 @@ protected:
|
||||
|
||||
|
||||
private:
|
||||
bool opening_root, open;
|
||||
bool opening_root = false, open = false;
|
||||
MDSContext::vec waiting_for_open;
|
||||
|
||||
public:
|
||||
@ -1057,7 +1056,7 @@ protected:
|
||||
want_replica(false), want_xlocked(false), tid(0), pool(-1),
|
||||
last_err(0) {}
|
||||
};
|
||||
ceph_tid_t open_ino_last_tid;
|
||||
ceph_tid_t open_ino_last_tid = 0;
|
||||
map<inodeno_t,open_ino_info_t> opening_inodes;
|
||||
|
||||
void _open_ino_backtrace_fetched(inodeno_t ino, bufferlist& bl, int err);
|
||||
@ -1094,7 +1093,7 @@ public:
|
||||
};
|
||||
|
||||
map<ceph_tid_t, find_ino_peer_info_t> find_ino_peer;
|
||||
ceph_tid_t find_ino_peer_last_tid;
|
||||
ceph_tid_t find_ino_peer_last_tid = 0;
|
||||
|
||||
void find_ino_peers(inodeno_t ino, MDSContext *c, mds_rank_t hint=MDS_RANK_NONE);
|
||||
void _do_find_ino_peer(find_ino_peer_info_t& fip);
|
||||
@ -1104,7 +1103,7 @@ public:
|
||||
|
||||
// -- snaprealms --
|
||||
private:
|
||||
SnapRealm *global_snaprealm;
|
||||
SnapRealm *global_snaprealm = nullptr;
|
||||
public:
|
||||
SnapRealm *get_global_snaprealm() const { return global_snaprealm; }
|
||||
void create_global_snaprealm();
|
||||
|
Loading…
Reference in New Issue
Block a user