mirror of
https://github.com/ceph/ceph
synced 2025-02-22 18:47:18 +00:00
osd: build_simple creates a single rule
The three rules created by build_simple are identical. They are replaced by a single rule named replicated_rule which is set to be used by the data, rbd and metadata pools. Instead of hardcoding the ruleset number to zero, it is read from osd_pool_default_crush_ruleset which defaults to zero. The CEPH_DEFAULT_CRUSH_REPLICATED_RULESET enum is moved from osd_type.h to config.h because it may be needed when osd_type.h is not included. Signed-off-by: Loic Dachary <loic@dachary.org>
This commit is contained in:
parent
15b695937b
commit
a10fc025d7
@ -359,7 +359,7 @@ ceph osd pool set rbd hit_set_period 123
|
||||
ceph osd pool set rbd hit_set_count 12
|
||||
ceph osd pool set rbd hit_set_fpp .01
|
||||
|
||||
ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 2'
|
||||
ceph osd pool get rbd crush_ruleset | grep 'crush_ruleset: 0'
|
||||
|
||||
ceph osd thrash 10
|
||||
|
||||
|
@ -29,6 +29,10 @@ extern struct ceph_file_layout g_default_file_layout;
|
||||
#include "common/config_obs.h"
|
||||
#include "msg/msg_types.h"
|
||||
|
||||
enum {
|
||||
CEPH_DEFAULT_CRUSH_REPLICATED_RULESET,
|
||||
};
|
||||
|
||||
#define OSD_REP_PRIMARY 0
|
||||
#define OSD_REP_SPLAY 1
|
||||
#define OSD_REP_CHAIN 2
|
||||
|
@ -395,7 +395,7 @@ OPTION(osd_pgp_bits, OPT_INT, 6) // bits per osd
|
||||
OPTION(osd_crush_chooseleaf_type, OPT_INT, 1) // 1 = host
|
||||
OPTION(osd_min_rep, OPT_INT, 1)
|
||||
OPTION(osd_max_rep, OPT_INT, 10)
|
||||
OPTION(osd_pool_default_crush_rule, OPT_INT, 0)
|
||||
OPTION(osd_pool_default_crush_rule, OPT_INT, CEPH_DEFAULT_CRUSH_REPLICATED_RULESET)
|
||||
OPTION(osd_pool_default_size, OPT_INT, 3)
|
||||
OPTION(osd_pool_default_min_size, OPT_INT, 0) // 0 means no specific default; ceph will use size-size/2
|
||||
OPTION(osd_pool_default_pg_num, OPT_INT, 8) // number of PGs for new pools. Configure in global or mon section of ceph.conf
|
||||
|
@ -1883,15 +1883,15 @@ int OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid,
|
||||
if (pgp_bits > pg_bits)
|
||||
pgp_bits = pg_bits;
|
||||
|
||||
// crush map
|
||||
map<int, const char*> rulesets;
|
||||
rulesets[CEPH_DATA_RULE] = "data";
|
||||
rulesets[CEPH_METADATA_RULE] = "metadata";
|
||||
rulesets[CEPH_RBD_RULE] = "rbd";
|
||||
vector<string> pool_names;
|
||||
pool_names.push_back("data");
|
||||
pool_names.push_back("metadata");
|
||||
pool_names.push_back("rbd");
|
||||
|
||||
int poolbase = get_max_osd() ? get_max_osd() : 1;
|
||||
|
||||
for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
|
||||
for (vector<string>::iterator p = pool_names.begin();
|
||||
p != pool_names.end(); ++p) {
|
||||
int64_t pool = ++pool_max;
|
||||
pools[pool].type = pg_pool_t::TYPE_REPLICATED;
|
||||
pools[pool].flags = cct->_conf->osd_pool_default_flags;
|
||||
@ -1899,21 +1899,21 @@ int OSDMap::build_simple(CephContext *cct, epoch_t e, uuid_d &fsid,
|
||||
pools[pool].flags |= pg_pool_t::FLAG_HASHPSPOOL;
|
||||
pools[pool].size = cct->_conf->osd_pool_default_size;
|
||||
pools[pool].min_size = cct->_conf->get_osd_pool_default_min_size();
|
||||
pools[pool].crush_ruleset = p->first;
|
||||
pools[pool].crush_ruleset = cct->_conf->osd_pool_default_crush_rule;
|
||||
pools[pool].object_hash = CEPH_STR_HASH_RJENKINS;
|
||||
pools[pool].set_pg_num(poolbase << pg_bits);
|
||||
pools[pool].set_pgp_num(poolbase << pgp_bits);
|
||||
pools[pool].last_change = epoch;
|
||||
if (p->first == CEPH_DATA_RULE)
|
||||
if (*p == "data")
|
||||
pools[pool].crash_replay_interval = cct->_conf->osd_default_data_pool_replay_window;
|
||||
pool_name[pool] = p->second;
|
||||
name_pool[p->second] = pool;
|
||||
pool_name[pool] = *p;
|
||||
name_pool[*p] = pool;
|
||||
}
|
||||
|
||||
if (nosd >= 0)
|
||||
build_simple_crush_map(cct, *crush, rulesets, nosd);
|
||||
build_simple_crush_map(cct, *crush, nosd);
|
||||
else
|
||||
build_simple_crush_map_from_conf(cct, *crush, rulesets);
|
||||
build_simple_crush_map_from_conf(cct, *crush);
|
||||
|
||||
for (int i=0; i<get_max_osd(); i++) {
|
||||
set_state(i, 0);
|
||||
@ -1940,7 +1940,7 @@ int OSDMap::_build_crush_types(CrushWrapper& crush)
|
||||
}
|
||||
|
||||
void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
|
||||
map<int, const char*>& rulesets, int nosd)
|
||||
int nosd)
|
||||
{
|
||||
const md_config_t *conf = cct->_conf;
|
||||
|
||||
@ -1969,9 +1969,10 @@ void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
|
||||
int minrep = conf->osd_min_rep;
|
||||
int maxrep = conf->osd_max_rep;
|
||||
assert(maxrep >= minrep);
|
||||
for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
|
||||
int ruleset = p->first;
|
||||
crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REPLICATED, minrep, maxrep);
|
||||
{
|
||||
crush_rule *rule = crush_make_rule(3, cct->_conf->osd_pool_default_crush_rule,
|
||||
pg_pool_t::TYPE_REPLICATED,
|
||||
minrep, maxrep);
|
||||
assert(rule);
|
||||
crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0);
|
||||
crush_rule_set_step(rule, 1,
|
||||
@ -1980,14 +1981,12 @@ void OSDMap::build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
|
||||
cct->_conf->osd_crush_chooseleaf_type);
|
||||
crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0);
|
||||
int rno = crush_add_rule(crush.crush, rule, -1);
|
||||
crush.set_rule_name(rno, p->second);
|
||||
crush.set_rule_name(rno, "replicated_rule");
|
||||
}
|
||||
|
||||
crush.finalize();
|
||||
}
|
||||
|
||||
void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush,
|
||||
map<int, const char*>& rulesets)
|
||||
void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush)
|
||||
{
|
||||
const md_config_t *conf = cct->_conf;
|
||||
|
||||
@ -2053,9 +2052,10 @@ void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& cr
|
||||
// rules
|
||||
int minrep = conf->osd_min_rep;
|
||||
int maxrep = conf->osd_max_rep;
|
||||
for (map<int,const char*>::iterator p = rulesets.begin(); p != rulesets.end(); ++p) {
|
||||
int ruleset = p->first;
|
||||
crush_rule *rule = crush_make_rule(3, ruleset, pg_pool_t::TYPE_REPLICATED, minrep, maxrep);
|
||||
{
|
||||
crush_rule *rule = crush_make_rule(3, cct->_conf->osd_pool_default_crush_rule,
|
||||
pg_pool_t::TYPE_REPLICATED,
|
||||
minrep, maxrep);
|
||||
assert(rule);
|
||||
crush_rule_set_step(rule, 0, CRUSH_RULE_TAKE, rootid, 0);
|
||||
|
||||
@ -2071,9 +2071,8 @@ void OSDMap::build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& cr
|
||||
}
|
||||
crush_rule_set_step(rule, 2, CRUSH_RULE_EMIT, 0, 0);
|
||||
int rno = crush_add_rule(crush.crush, rule, -1);
|
||||
crush.set_rule_name(rno, p->second);
|
||||
crush.set_rule_name(rno, "replicated_rule");
|
||||
}
|
||||
|
||||
crush.finalize();
|
||||
}
|
||||
|
||||
|
@ -617,9 +617,9 @@ public:
|
||||
int num_osd, int pg_bits, int pgp_bits);
|
||||
static int _build_crush_types(CrushWrapper& crush);
|
||||
static void build_simple_crush_map(CephContext *cct, CrushWrapper& crush,
|
||||
map<int, const char*>& poolsets, int num_osd);
|
||||
static void build_simple_crush_map_from_conf(CephContext *cct, CrushWrapper& crush,
|
||||
map<int, const char*>& rulesets);
|
||||
int num_osd);
|
||||
static void build_simple_crush_map_from_conf(CephContext *cct,
|
||||
CrushWrapper& crush);
|
||||
|
||||
bool crush_ruleset_in_use(int ruleset) const;
|
||||
|
||||
|
@ -216,13 +216,6 @@ enum {
|
||||
#define CEPH_CAS_NS 3
|
||||
#define CEPH_OSDMETADATA_NS 0xff
|
||||
|
||||
// poolsets
|
||||
enum {
|
||||
CEPH_DATA_RULE,
|
||||
CEPH_METADATA_RULE,
|
||||
CEPH_RBD_RULE,
|
||||
};
|
||||
|
||||
#define OSD_SUPERBLOCK_POBJECT hobject_t(sobject_t(object_t("osd_superblock"), 0))
|
||||
|
||||
// placement seed (a hash value)
|
||||
|
@ -21,8 +21,8 @@
|
||||
flags
|
||||
|
||||
pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool crash_replay_interval 45
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 2 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
|
||||
max_osd 3
|
||||
|
||||
@ -44,8 +44,8 @@
|
||||
flags
|
||||
|
||||
pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool crash_replay_interval 45
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 2 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 0 owner 0 flags hashpspool
|
||||
|
||||
max_osd 1
|
||||
|
||||
|
@ -11,8 +11,8 @@
|
||||
flags
|
||||
|
||||
pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool crash_replay_interval 45
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 1 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 2 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 192 pgp_num 192 last_change 0 owner 0 flags hashpspool
|
||||
|
||||
max_osd 3
|
||||
|
||||
|
@ -1,6 +1,22 @@
|
||||
$ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks > /dev/null
|
||||
$ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks
|
||||
osdmaptool: osdmap file 'om'
|
||||
osdmaptool: writing epoch 1 to om
|
||||
$ osdmaptool --test-map-pg 0.0 om
|
||||
osdmaptool: osdmap file 'om'
|
||||
parsed '0.0' -> 0.0
|
||||
0.0 raw [] up [] acting []
|
||||
$ osdmaptool --print om
|
||||
osdmaptool: osdmap file 'om'
|
||||
epoch 1
|
||||
fsid [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} (re)
|
||||
created \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re)
|
||||
modified \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d+ (re)
|
||||
flags
|
||||
|
||||
pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 owner 0 flags hashpspool crash_replay_interval 45
|
||||
pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 owner 0 flags hashpspool
|
||||
pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 15296 pgp_num 15296 last_change 0 owner 0 flags hashpspool
|
||||
|
||||
max_osd 239
|
||||
|
||||
$ rm -f om
|
||||
|
Loading…
Reference in New Issue
Block a user