os/bluestore: move most cache types into mempools

Keep onodes separate so we can use onodes as a sentinal for
overall memory usage (this is what we trim in terms of anyway).

Signed-off-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Sage Weil 2016-10-11 14:23:52 -04:00
parent 81295c61c4
commit 030bc063e4
5 changed files with 61 additions and 9 deletions

View File

@ -168,7 +168,9 @@ namespace mempool {
#define DEFINE_MEMORY_POOLS_HELPER(f) \
f(unittest_1) \
f(unittest_2)
f(unittest_2) \
f(bluestore_meta_onode) \
f(bluestore_meta_other)
// give them integer ids
#define P(x) x,

View File

@ -34,6 +34,38 @@
#define dout_subsys ceph_subsys_bluestore
// bluestore_meta_onode
MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Onode, bluestore_onode,
bluestore_meta_onode);
// bluestore_meta_other
MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Buffer, bluestore_buffer,
bluestore_meta_other);
MEMPOOL_DEFINE_MAP_FACTORY(uint64_t, std::unique_ptr<BlueStore::Buffer>,
bluestore_uint64_Buffer, bluestore_meta_other);
MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Extent, bluestore_extent,
bluestore_meta_other);
MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::Blob, bluestore_blob,
bluestore_meta_other);
MEMPOOL_DEFINE_MAP_FACTORY(int, BlueStore::BlobRef,
bluestore_int_BlobRef, bluestore_meta_other);
MEMPOOL_DEFINE_OBJECT_FACTORY(BlueStore::SharedBlob, bluestore_shared_blob,
bluestore_meta_other);
MEMPOOL_DEFINE_FACTORY(BlueStore::ExtentMap::Shard, bluestore_ExtentMap_Shard,
bluestore_meta_other);
MEMPOOL_DEFINE_UNORDERED_MAP_BASE_FACTORY(bluestore_meta_other);
MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(ghobject_t, BlueStore::OnodeRef, true,
bluestore_ghobject_OnodeRef,
bluestore_meta_other);
MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(coll_t, BlueStore::CollectionRef, true,
bluestore_coll_CollectionRef,
bluestore_meta_other);
MEMPOOL_DEFINE_UNORDERED_MAP_FACTORY(uint64_t, BlueStore::SharedBlob*, false,
bluestore_u64_sharedblob,
bluestore_meta_other);
// kv store prefixes
const string PREFIX_SUPER = "S"; // field -> value
const string PREFIX_STAT = "T"; // field -> value(int64 array)
const string PREFIX_COLL = "C"; // collection name -> cnode_t

View File

@ -32,6 +32,7 @@
#include "include/assert.h"
#include "include/unordered_map.h"
#include "include/memory.h"
#include "include/mempool.h"
#include "common/Finisher.h"
#include "compressor/Compressor.h"
#include "os/ObjectStore.h"
@ -120,6 +121,8 @@ public:
/// cached buffer
struct Buffer {
MEMPOOL_CLASS_HELPERS();
enum {
STATE_EMPTY, ///< empty buffer -- used for cache history
STATE_CLEAN, ///< clean data that is up to date
@ -207,7 +210,7 @@ public:
boost::intrusive::list_member_hook<>,
&Buffer::state_item> > state_list_t;
map<uint64_t,std::unique_ptr<Buffer>> buffer_map;
bluestore_meta_other::map<uint64_t, std::unique_ptr<Buffer>> buffer_map;
Cache *cache;
// we use a bare intrusive list here instead of std::map because
@ -320,6 +323,8 @@ public:
/// in-memory shared blob state (incl cached buffers)
struct SharedBlob {
MEMPOOL_CLASS_HELPERS();
std::atomic_int nref = {0}; ///< reference count
// these are defined/set if the shared_blob is 'loaded'
@ -362,7 +367,7 @@ public:
// we use a bare pointer because we don't want to affect the ref
// count
std::unordered_map<uint64_t,SharedBlob*> sb_map;
bluestore_meta_other::unordered_map<uint64_t,SharedBlob*> sb_map;
SharedBlobRef lookup(uint64_t sbid) {
std::lock_guard<std::mutex> l(lock);
@ -397,6 +402,8 @@ public:
/// in-memory blob metadata and associated cached buffers (if any)
struct Blob {
MEMPOOL_CLASS_HELPERS();
std::atomic_int nref = {0}; ///< reference count
int16_t id = -1; ///< id, for spanning blobs only, >= 0
int16_t last_encoded_id = -1; ///< (ephemeral) used during encoding only
@ -516,10 +523,12 @@ public:
#endif
};
typedef boost::intrusive_ptr<Blob> BlobRef;
typedef std::map<int,BlobRef> blob_map_t;
typedef bluestore_meta_other::map<int,BlobRef> blob_map_t;
/// a logical extent, pointing to (some portion of) a blob
struct Extent : public boost::intrusive::set_base_hook<boost::intrusive::optimize_size<true>> {
MEMPOOL_CLASS_HELPERS();
uint32_t logical_offset = 0; ///< logical offset
uint32_t blob_offset = 0; ///< blob offset
uint32_t length = 0; ///< length
@ -594,7 +603,7 @@ public:
bool loaded = false; ///< true if shard is loaded
bool dirty = false; ///< true if shard is dirty and needs reencoding
};
vector<Shard> shards; ///< shards
bluestore_meta_other::vector<Shard> shards; ///< shards
bufferlist inline_bl; ///< cached encoded map, if unsharded; empty=>dirty
@ -732,6 +741,8 @@ public:
/// an in-memory object
struct Onode {
MEMPOOL_CLASS_HELPERS();
std::atomic_int nref; ///< reference count
Collection *c;
@ -991,7 +1002,9 @@ public:
struct OnodeSpace {
Cache *cache;
ceph::unordered_map<ghobject_t,OnodeRef> onode_map; ///< forward lookups
/// forward lookups
bluestore_meta_other::unordered_map<ghobject_t,OnodeRef> onode_map;
OnodeSpace(Cache *c) : cache(c) {}
~OnodeSpace() {
@ -1429,7 +1442,7 @@ private:
bool mounted;
RWLock coll_lock; ///< rwlock to protect coll_map
ceph::unordered_map<coll_t, CollectionRef> coll_map;
bluestore_meta_other::unordered_map<coll_t, CollectionRef> coll_map;
vector<Cache*> cache_shards;

View File

@ -112,6 +112,9 @@ void bluestore_cnode_t::generate_test_instances(list<bluestore_cnode_t*>& o)
// bluestore_extent_ref_map_t
MEMPOOL_DEFINE_MAP_FACTORY(uint64_t, bluestore_extent_ref_map_t::record_t,
uint64_extent_ref_map_record, bluestore_meta_other)
void bluestore_extent_ref_map_t::_check() const
{
uint64_t pos = 0;

View File

@ -24,6 +24,7 @@
#include "common/hobject.h"
#include "compressor/Compressor.h"
#include "common/Checksummer.h"
#include "include/mempool.h"
namespace ceph {
class Formatter;
@ -187,10 +188,11 @@ struct bluestore_extent_ref_map_t {
}
};
map<uint64_t,record_t> ref_map;
typedef bluestore_meta_other::map<uint64_t,record_t> map_t;
map_t ref_map;
void _check() const;
void _maybe_merge_left(map<uint64_t,record_t>::iterator& p);
void _maybe_merge_left(map_t::iterator& p);
void clear() {
ref_map.clear();