Merge pull request #13974 from tchaikov/wip-vstart-start-mgr

vstart: do not start mgr if not start_all

Reviewed-by: Sage Weil <sage@redhat.com>
This commit is contained in:
Kefu Chai 2017-03-24 21:44:56 +08:00 committed by GitHub
commit 21d8a97d19
9 changed files with 347 additions and 339 deletions

View File

@ -12,7 +12,7 @@ It allows to deploy a fake local cluster on your machine for development purpose
To start your development cluster, type the following::
vstart.sh [OPTIONS]... [mon] [osd] [mds]
vstart.sh [OPTIONS]...
In order to stop the cluster, you can type::

View File

@ -152,7 +152,7 @@ def cat_file(level, filename):
def vstart(new, opt=""):
print("vstarting....", end="")
NEW = new and "-n" or "-N"
call("MON=1 OSD=4 CEPH_PORT=7400 {path}/src/vstart.sh --short -l {new} -d mon osd {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
call("MON=1 OSD=4 MDS=0 MGR=0 CEPH_PORT=7400 {path}/src/vstart.sh --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True)
print("DONE")

View File

@ -18,7 +18,7 @@
source $(dirname $0)/detect-build-env-vars.sh
CEPH_CLI_TEST_DUP_COMMAND=1 \
MDS=1 MON=1 OSD=3 CEPH_START='mon osd mds' CEPH_PORT=7200 $CEPH_ROOT/src/test/vstart_wrapper.sh \
MDS=1 MON=1 OSD=3 MGR=0 CEPH_PORT=7200 $CEPH_ROOT/src/test/vstart_wrapper.sh \
$CEPH_ROOT/qa/workunits/cephtool/test.sh \
--test-mds \
--asok-does-not-need-root

View File

@ -20,7 +20,7 @@ source $(dirname $0)/detect-build-env-vars.sh
CEPH_CLI_TEST_DUP_COMMAND=1 \
# uses CEPH_PORT going from 7202 7203 and 7204 because
# it starts at 7202 and runs 3 mons (see vstart.sh)
MON=3 OSD=3 CEPH_START='mon osd' CEPH_PORT=7202 $CEPH_ROOT/src/test/vstart_wrapper.sh \
MON=3 OSD=3 MDS=0 MGR=0 CEPH_PORT=7202 $CEPH_ROOT/src/test/vstart_wrapper.sh \
$CEPH_ROOT/qa/workunits/cephtool/test.sh \
--test-mon \
--asok-does-not-need-root

View File

@ -18,7 +18,7 @@
source $(dirname $0)/detect-build-env-vars.sh
CEPH_CLI_TEST_DUP_COMMAND=1 \
MON=1 OSD=3 CEPH_START='mon osd' CEPH_PORT=7201 $CEPH_ROOT/src/test/vstart_wrapper.sh \
MON=1 OSD=3 MDS=0 MGR=0 CEPH_PORT=7201 $CEPH_ROOT/src/test/vstart_wrapper.sh \
$CEPH_ROOT/qa/workunits/cephtool/test.sh \
--test-osd \
--asok-does-not-need-root

View File

@ -17,5 +17,5 @@
source $(dirname $0)/detect-build-env-vars.sh
CEPH_CLI_TEST_DUP_COMMAND=1 \
MON=1 OSD=3 CEPH_START='mon osd' CEPH_PORT=7205 $CEPH_ROOT/src/test/vstart_wrapper.sh \
MON=1 OSD=3 MDS=0 MGR=0 CEPH_PORT=7205 $CEPH_ROOT/src/test/vstart_wrapper.sh \
$CEPH_ROOT/src/test/test_rados_tool.sh

View File

@ -57,7 +57,7 @@ make_fresh() {
exit 1
fi
env CEPH_NUM_MON=3 CEPH_NUM_OSD=2 CEPH_NUM_MDS=1 \
env MON=3 OSD=0 MDS=0 MGR=0 \
./vstart.sh -n -l -d mon
}

View File

@ -33,7 +33,7 @@ function vstart_setup()
$CEPH_ROOT/src/vstart.sh \
--short \
-o 'paxos propose interval = 0.01' \
-n -l $CEPH_START || return 1
-n -l || return 1
export CEPH_CONF=$CEPH_DIR/ceph.conf
crit=$(expr 100 - $(ceph-conf --show-config-value mon_data_avail_crit))

View File

@ -57,6 +57,17 @@ export DYLD_LIBRARY_PATH=$CEPH_LIB:$DYLD_LIBRARY_PATH
[ -z "$CEPH_NUM_FS" ] && CEPH_NUM_FS="$FS"
[ -z "$CEPH_NUM_RGW" ] && CEPH_NUM_RGW="$RGW"
# if none of the CEPH_NUM_* number is specified, kill the existing
# cluster.
if [ -z "$CEPH_NUM_MON" -a \
-z "$CEPH_NUM_OSD" -a \
-z "$CEPH_NUM_MDS" -a \
-z "$CEPH_NUM_MGR" ]; then
kill_all=1
else
kill_all=0
fi
[ -z "$CEPH_NUM_MON" ] && CEPH_NUM_MON=3
[ -z "$CEPH_NUM_OSD" ] && CEPH_NUM_OSD=3
[ -z "$CEPH_NUM_MDS" ] && CEPH_NUM_MDS=3
@ -81,10 +92,6 @@ extra_conf=""
new=0
standby=0
debug=0
start_all=1
start_mon=0
start_mds=0
start_osd=0
ip=""
nodaemon=0
smallmds=0
@ -108,7 +115,7 @@ keyring_fn="$CEPH_CONF_PATH/keyring"
osdmap_fn="/tmp/ceph_osdmap.$$"
monmap_fn="/tmp/ceph_monmap.$$"
usage="usage: $0 [option]... [\"mon\"] [\"mds\"] [\"osd\"]\nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
usage="usage: $0 [option]... \nex: $0 -n -d --mon_num 3 --osd_num 3 --mds_num 1 --rgw_num 1\n"
usage=$usage"options:\n"
usage=$usage"\t-d, --debug\n"
usage=$usage"\t-s, --standby_mds: Generate standby-replay MDS for each active\n"
@ -126,9 +133,6 @@ usage=$usage"\t-X disable cephx\n"
usage=$usage"\t--hitset <pool> <hit_set_type>: enable hitset tracking\n"
usage=$usage"\t-e : create an erasure pool\n";
usage=$usage"\t-o config\t\t add extra config parameters to all sections\n"
usage=$usage"\tmon : start just ceph MONs\n"
usage=$usage"\tosd : start just ceph OSDs\n"
usage=$usage"\tmds : start just ceph MDSes\n"
usage=$usage"\t--mon_num specify ceph monitor count\n"
usage=$usage"\t--osd_num specify ceph osd count\n"
usage=$usage"\t--mds_num specify ceph mds count\n"
@ -236,18 +240,6 @@ case $1 in
rgw_frontend=$2
shift
;;
mon )
start_mon=1
start_all=0
;;
mds )
start_mds=1
start_all=0
;;
osd )
start_osd=1
start_all=0
;;
-m )
[ -z "$2" ] && usage_exit
MON_ADDR=$2
@ -303,7 +295,7 @@ esac
shift
done
if [ "$start_all" -eq 1 ]; then
if [ $kill_all -eq 1 ]; then
$SUDO $INIT_CEPH stop
fi
@ -328,12 +320,6 @@ else
fi
fi
if [ "$start_all" -eq 1 ]; then
start_mon=1
start_mds=1
start_osd=1
fi
ARGS="-c $conf_fn"
prunb() {
@ -370,9 +356,318 @@ wconf() {
fi
}
prepare_conf() {
local DAEMONOPTS="
log file = $CEPH_OUT_DIR/\$name.log
admin socket = $CEPH_OUT_DIR/\$name.asok
chdir = \"\"
pid file = $CEPH_OUT_DIR/\$name.pid
heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
"
wconf <<EOF
; generated by vstart.sh on `date`
[$VSTART_SEC]
num mon = $CEPH_NUM_MON
num osd = $CEPH_NUM_OSD
num mds = $CEPH_NUM_MDS
num mgr = $CEPH_NUM_MGR
num rgw = $CEPH_NUM_RGW
[global]
fsid = $(uuidgen)
osd pg bits = 3
osd pgp bits = 5 ; (invalid, but ceph should cope!)
osd pool default size = $OSD_POOL_DEFAULT_SIZE
osd crush chooseleaf type = 0
osd pool default min size = 1
osd failsafe full ratio = .99
mon osd reporter subtree level = osd
mon osd full ratio = .99
mon data avail warn = 10
mon data avail crit = 1
erasure code dir = $EC_PATH
plugin dir = $CEPH_LIB
osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
filestore fd cache size = 32
run dir = $CEPH_OUT_DIR
enable experimental unrecoverable data corrupting features = *
EOF
if [ "$lockdep" -eq 1 ] ; then
wconf <<EOF
lockdep = true
EOF
fi
if [ "$cephx" -eq 1 ] ; then
wconf <<EOF
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
EOF
else
wconf <<EOF
auth cluster required = none
auth service required = none
auth client required = none
EOF
fi
if [ "$short" -eq 1 ]; then
COSDSHORT=" osd max object name len = 460
osd max object namespace len = 64"
fi
wconf <<EOF
[client]
keyring = $keyring_fn
log file = $CEPH_OUT_DIR/\$name.\$pid.log
admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
[mds]
$DAEMONOPTS
$CMDSDEBUG
mds debug frag = true
mds debug auth pins = true
mds debug subtrees = true
mds data = $CEPH_DEV_DIR/mds.\$id
mds root ino uid = `id -u`
mds root ino gid = `id -g`
$extra_conf
[mgr]
mgr modules = rest fsstatus
mgr data = $CEPH_DEV_DIR/mgr.\$id
mgr module path = $MGR_PYTHON_PATH
$DAEMONOPTS
$CMGRDEBUG
$extra_conf
[osd]
$DAEMONOPTS
osd_check_max_object_name_len_on_startup = false
osd data = $CEPH_DEV_DIR/osd\$id
osd journal = $CEPH_DEV_DIR/osd\$id/journal
osd journal size = 100
osd class tmp = out
osd class dir = $OBJCLASS_PATH
osd class load list = *
osd class default list = *
osd scrub load threshold = 2000.0
osd debug op order = true
osd debug misdirected ops = true
filestore wbthrottle xfs ios start flusher = 10
filestore wbthrottle xfs ios hard limit = 20
filestore wbthrottle xfs inodes hard limit = 30
filestore wbthrottle btrfs ios start flusher = 10
filestore wbthrottle btrfs ios hard limit = 20
filestore wbthrottle btrfs inodes hard limit = 30
osd copyfrom max chunk = 524288
bluestore fsck on mount = true
bluestore block create = true
bluestore block db size = 67108864
bluestore block db create = true
bluestore block wal size = 1048576000
bluestore block wal create = true
$COSDDEBUG
$COSDMEMSTORE
$COSDSHORT
$extra_conf
[mon]
mon pg warn min per osd = 3
mon osd allow primary affinity = true
mon reweight min pgs per osd = 4
mon osd prime pg temp = true
crushtool = $CEPH_BIN/crushtool
mon allow pool delete = true
$DAEMONOPTS
$CMONDEBUG
$extra_conf
mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
[global]
$extra_conf
EOF
}
start_mon() {
local MONS=""
local count=0
for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
do
[ $count -eq $CEPH_NUM_MON ] && break;
count=$(($count + 1))
if [ -z "$MONS" ];
then
MONS="$f"
else
MONS="$MONS $f"
fi
done
if [ "$new" -eq 1 ]; then
if [ `echo $IP | grep '^127\\.'` ]
then
echo
echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
echo " connect. either adjust /etc/hosts, or edit this script to use your"
echo " machine's real IP."
echo
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin --set-uid=0 \
--cap mon 'allow *' \
--cap osd 'allow *' \
--cap mds 'allow *' \
"$keyring_fn"
# build a fresh fs monmap, mon fs
local str=""
local count=0
for f in $MONS
do
str="$str --add $f $IP:$(($CEPH_PORT+$count))"
wconf <<EOF
[mon.$f]
host = $HOSTNAME
mon data = $CEPH_DEV_DIR/mon.$f
mon addr = $IP:$(($CEPH_PORT+$count))
EOF
count=$(($count + 1))
done
prun "$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
for f in $MONS
do
prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
done
prun rm -- "$monmap_fn"
fi
# start monitors
for f in $MONS
do
run 'mon' $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
done
}
start_osd() {
for osd in `seq 0 $((CEPH_NUM_OSD-1))`
do
if [ "$new" -eq 1 ]; then
wconf <<EOF
[osd.$osd]
host = $HOSTNAME
EOF
rm -rf $CEPH_DEV_DIR/osd$osd || true
if command -v btrfs > /dev/null; then
for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
fi
mkdir -p $CEPH_DEV_DIR/osd$osd
local uuid=`uuidgen`
echo "add osd$osd $uuid"
ceph_adm osd create $uuid
ceph_adm osd crush add osd.$osd 1.0 host=$HOSTNAME root=default
$SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
local key_fn=$CEPH_DEV_DIR/osd$osd/keyring
echo adding osd$osd key to auth repository
ceph_adm -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow"
fi
echo start osd$osd
run 'osd' $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
done
}
start_mgr() {
local mgr=0
for name in x y z a b c d e f g h i j k l m n o p
do
[ $mgr -eq $CEPH_NUM_MGR ] && break
mgr=$(($mgr + 1))
if [ "$new" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mgr.$name
key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
$SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
ceph_adm -i $key_fn auth add mgr.$name mon 'allow *'
fi
wconf <<EOF
[mgr.$name]
host = $HOSTNAME
EOF
echo "Starting mgr.${name}"
run 'mgr' $CEPH_BIN/ceph-mgr -i $name
done
}
start_mds() {
if [ $new -eq 1 ]; then
if [ "$CEPH_NUM_FS" -gt "0" ] ; then
if [ "$CEPH_NUM_FS" -gt "1" ] ; then
ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
fi
local fs=0
for name in a b c d e f g h i j k l m n o p
do
ceph_adm osd pool create "cephfs_data_${name}" 8
ceph_adm osd pool create "cephfs_metadata_${name}" 8
ceph_adm fs new "cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
fs=$(($fs + 1))
[ $fs -eq $CEPH_NUM_FS ] && break
done
fi
fi
local mds=0
for name in a b c d e f g h i j k l m n o p
do
[ $mds -eq $CEPH_NUM_MDS ] && break
mds=$(($mds + 1))
if [ "$new" -eq 1 ]; then
prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
key_fn=$CEPH_DEV_DIR/mds.$name/keyring
wconf <<EOF
[mds.$name]
host = $HOSTNAME
EOF
if [ "$standby" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mds.${name}s
wconf <<EOF
mds standby for rank = $mds
[mds.${name}s]
mds standby replay = true
mds standby for name = ${name}
EOF
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow'
if [ "$standby" -eq 1 ]; then
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
"$CEPH_DEV_DIR/mds.${name}s/keyring"
ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
mon 'allow *' osd 'allow *' mds 'allow' mgr 'allow'
fi
fi
run 'mds' $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
if [ "$standby" -eq 1 ]; then
run 'mds' $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
fi
#valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
#$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
#ceph_adm mds set max_mds 2
done
}
if [ "$debug" -eq 0 ]; then
CMONDEBUG='
debug mon = 10
debug mon = 10
debug ms = 1'
COSDDEBUG='
debug ms = 1'
@ -381,7 +676,7 @@ if [ "$debug" -eq 0 ]; then
else
echo "** going verbose **"
CMONDEBUG='
debug mon = 20
debug mon = 20
debug paxos = 20
debug auth = 20
debug ms = 1'
@ -479,229 +774,17 @@ ceph_adm() {
fi
}
MONS=""
count=0
for f in a b c d e f g h i j k l m n o p q r s t u v w x y z
do
if [ -z "$MONS" ];
then
MONS="$f"
else
MONS="$MONS $f"
fi
count=$(($count + 1))
[ $count -eq $CEPH_NUM_MON ] && break;
done
DAEMONOPTS="
log file = $CEPH_OUT_DIR/\$name.log
admin socket = $CEPH_OUT_DIR/\$name.asok
chdir = \"\"
pid file = $CEPH_OUT_DIR/\$name.pid
heartbeat file = $CEPH_OUT_DIR/\$name.heartbeat
"
if [ "$start_mon" -eq 1 ]; then
if [ "$new" -eq 1 ]; then
wconf <<EOF
; generated by vstart.sh on `date`
[$VSTART_SEC]
num mon = $CEPH_NUM_MON
num osd = $CEPH_NUM_OSD
num mds = $CEPH_NUM_MDS
num mgr = $CEPH_NUM_MGR
num rgw = $CEPH_NUM_RGW
[global]
fsid = $(uuidgen)
osd pg bits = 3
osd pgp bits = 5 ; (invalid, but ceph should cope!)
osd pool default size = $OSD_POOL_DEFAULT_SIZE
osd crush chooseleaf type = 0
osd pool default min size = 1
osd failsafe full ratio = .99
mon osd reporter subtree level = osd
mon osd full ratio = .99
mon data avail warn = 10
mon data avail crit = 1
erasure code dir = $EC_PATH
plugin dir = $CEPH_LIB
osd pool default erasure code profile = plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd
rgw frontends = $rgw_frontend port=$CEPH_RGW_PORT
filestore fd cache size = 32
run dir = $CEPH_OUT_DIR
enable experimental unrecoverable data corrupting features = *
EOF
if [ "$lockdep" -eq 1 ] ; then
wconf <<EOF
lockdep = true
EOF
fi
if [ "$cephx" -eq 1 ] ; then
wconf <<EOF
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
EOF
else
wconf <<EOF
auth cluster required = none
auth service required = none
auth client required = none
EOF
fi
if [ "$short" -eq 1 ]; then
COSDSHORT=" osd max object name len = 460
osd max object namespace len = 64"
fi
wconf <<EOF
[client]
keyring = $keyring_fn
log file = $CEPH_OUT_DIR/\$name.\$pid.log
admin socket = $CEPH_OUT_DIR/\$name.\$pid.asok
[mds]
$DAEMONOPTS
$CMDSDEBUG
mds debug frag = true
mds debug auth pins = true
mds debug subtrees = true
mds data = $CEPH_DEV_DIR/mds.\$id
mds root ino uid = `id -u`
mds root ino gid = `id -g`
$extra_conf
[mgr]
mgr modules = rest fsstatus
mgr data = $CEPH_DEV_DIR/mgr.\$id
mgr module path = $MGR_PYTHON_PATH
$DAEMONOPTS
$CMGRDEBUG
$extra_conf
[osd]
$DAEMONOPTS
osd_check_max_object_name_len_on_startup = false
osd data = $CEPH_DEV_DIR/osd\$id
osd journal = $CEPH_DEV_DIR/osd\$id/journal
osd journal size = 100
osd class tmp = out
osd class dir = $OBJCLASS_PATH
osd class load list = *
osd class default list = *
osd scrub load threshold = 2000.0
osd debug op order = true
osd debug misdirected ops = true
filestore wbthrottle xfs ios start flusher = 10
filestore wbthrottle xfs ios hard limit = 20
filestore wbthrottle xfs inodes hard limit = 30
filestore wbthrottle btrfs ios start flusher = 10
filestore wbthrottle btrfs ios hard limit = 20
filestore wbthrottle btrfs inodes hard limit = 30
osd copyfrom max chunk = 524288
bluestore fsck on mount = true
bluestore block create = true
bluestore block db size = 67108864
bluestore block db create = true
bluestore block wal size = 1048576000
bluestore block wal create = true
$COSDDEBUG
$COSDMEMSTORE
$COSDSHORT
$extra_conf
[mon]
mon pg warn min per osd = 3
mon osd allow primary affinity = true
mon reweight min pgs per osd = 4
mon osd prime pg temp = true
crushtool = $CEPH_BIN/crushtool
mon allow pool delete = true
$DAEMONOPTS
$CMONDEBUG
$extra_conf
mon cluster log file = $CEPH_OUT_DIR/cluster.mon.\$id.log
[global]
$extra_conf
EOF
if [ `echo $IP | grep '^127\\.'` ]
then
echo
echo "NOTE: hostname resolves to loopback; remote hosts will not be able to"
echo " connect. either adjust /etc/hosts, or edit this script to use your"
echo " machine's real IP."
echo
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name=mon. "$keyring_fn" --cap mon 'allow *'
prun $SUDO "$CEPH_BIN/ceph-authtool" --gen-key --name=client.admin --set-uid=0 \
--cap mon 'allow *' \
--cap osd 'allow *' \
--cap mds 'allow *' \
"$keyring_fn"
# build a fresh fs monmap, mon fs
str=""
count=0
for f in $MONS
do
str="$str --add $f $IP:$(($CEPH_PORT+$count))"
wconf <<EOF
[mon.$f]
host = $HOSTNAME
mon data = $CEPH_DEV_DIR/mon.$f
mon addr = $IP:$(($CEPH_PORT+$count))
EOF
count=$(($count + 1))
done
prun "$CEPH_BIN/monmaptool" --create --clobber $str --print "$monmap_fn"
for f in $MONS
do
prun rm -rf -- "$CEPH_DEV_DIR/mon.$f"
prun mkdir -p "$CEPH_DEV_DIR/mon.$f"
prun "$CEPH_BIN/ceph-mon" --mkfs -c "$conf_fn" -i "$f" --monmap="$monmap_fn" --keyring="$keyring_fn"
done
prun rm -- "$monmap_fn"
fi
# start monitors
if [ "$start_mon" -ne 0 ]; then
for f in $MONS
do
run 'mon' $CEPH_BIN/ceph-mon -i $f $ARGS $CMON_ARGS
done
fi
if [ "$new" -eq 1 ]; then
prepare_conf
fi
#osd
if [ "$start_osd" -eq 1 ]; then
for osd in `seq 0 $((CEPH_NUM_OSD-1))`
do
if [ "$new" -eq 1 ]; then
wconf <<EOF
[osd.$osd]
host = $HOSTNAME
EOF
if [ $CEPH_NUM_MON -gt 0 ]; then
start_mon
fi
rm -rf $CEPH_DEV_DIR/osd$osd || true
if command -v btrfs > /dev/null; then
for f in $CEPH_DEV_DIR/osd$osd/*; do btrfs sub delete $f &> /dev/null || true; done
fi
mkdir -p $CEPH_DEV_DIR/osd$osd
uuid=`uuidgen`
echo "add osd$osd $uuid"
ceph_adm osd create $uuid
ceph_adm osd crush add osd.$osd 1.0 host=$HOSTNAME root=default
$SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS --mkfs --mkkey --osd-uuid $uuid
key_fn=$CEPH_DEV_DIR/osd$osd/keyring
echo adding osd$osd key to auth repository
ceph_adm -i "$key_fn" auth add osd.$osd osd "allow *" mon "allow profile osd" mgr "allow"
fi
echo start osd$osd
run 'osd' $SUDO $CEPH_BIN/ceph-osd -i $osd $ARGS $COSD_ARGS
done
# osd
if [ $CEPH_NUM_OSD -gt 0 ]; then
start_osd
fi
# mds
@ -713,65 +796,8 @@ if [ "$smallmds" -eq 1 ]; then
EOF
fi
if [ "$start_mds" -eq 1 -a "$CEPH_NUM_MDS" -gt 0 ]; then
if [ "$CEPH_NUM_FS" -gt "0" ] ; then
if [ "$CEPH_NUM_FS" -gt "1" ] ; then
ceph_adm fs flag set enable_multiple true --yes-i-really-mean-it
fi
fs=0
for name in a b c d e f g h i j k l m n o p
do
ceph_adm osd pool create "cephfs_data_${name}" 8
ceph_adm osd pool create "cephfs_metadata_${name}" 8
ceph_adm fs new "cephfs_${name}" "cephfs_metadata_${name}" "cephfs_data_${name}"
fs=$(($fs + 1))
[ $fs -eq $CEPH_NUM_FS ] && break
done
fi
mds=0
for name in a b c d e f g h i j k l m n o p
do
if [ "$new" -eq 1 ]; then
prun mkdir -p "$CEPH_DEV_DIR/mds.$name"
key_fn=$CEPH_DEV_DIR/mds.$name/keyring
wconf <<EOF
[mds.$name]
host = $HOSTNAME
EOF
if [ "$standby" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mds.${name}s
wconf <<EOF
mds standby for rank = $mds
[mds.${name}s]
mds standby replay = true
mds standby for name = ${name}
EOF
fi
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.$name" "$key_fn"
ceph_adm -i "$key_fn" auth add "mds.$name" mon 'allow profile mds' osd 'allow *' mds 'allow' mgr 'allow'
if [ "$standby" -eq 1 ]; then
prun $SUDO "$CEPH_BIN/ceph-authtool" --create-keyring --gen-key --name="mds.${name}s" \
"$CEPH_DEV_DIR/mds.${name}s/keyring"
ceph_adm -i "$CEPH_DEV_DIR/mds.${name}s/keyring" auth add "mds.${name}s" \
mon 'allow *' osd 'allow *' mds 'allow' mgr 'allow'
fi
fi
run 'mds' $CEPH_BIN/ceph-mds -i $name $ARGS $CMDS_ARGS
if [ "$standby" -eq 1 ]; then
run 'mds' $CEPH_BIN/ceph-mds -i ${name}s $ARGS $CMDS_ARGS
fi
mds=$(($mds + 1))
[ $mds -eq $CEPH_NUM_MDS ] && break
#valgrind --tool=massif $CEPH_BIN/ceph-mds $ARGS --mds_log_max_segments 2 --mds_thrash_fragments 0 --mds_thrash_exports 0 > m #--debug_ms 20
#$CEPH_BIN/ceph-mds -d $ARGS --mds_thrash_fragments 0 --mds_thrash_exports 0 #--debug_ms 20
#ceph_adm mds set max_mds 2
done
if [ $CEPH_NUM_MDS -gt 0 ]; then
start_mds
fi
# Don't set max_mds until all the daemons are started, otherwise
@ -782,36 +808,18 @@ fi
fs=0
for name in a b c d e f g h i j k l m n o p
do
[ $fs -eq $CEPH_NUM_FS ] && break
fs=$(($fs + 1))
if [ "$CEPH_MAX_MDS" -gt 1 ]; then
ceph_adm fs set "cephfs_${name}" allow_multimds true --yes-i-really-mean-it
ceph_adm fs set "cephfs_${name}" max_mds "$CEPH_MAX_MDS"
fi
fs=$(($fs + 1))
[ $fs -eq $CEPH_NUM_FS ] && break
done
if [ "$CEPH_NUM_MGR" -gt 0 ]; then
mgr=0
for name in x y z a b c d e f g h i j k l m n o p
do
if [ "$new" -eq 1 ]; then
mkdir -p $CEPH_DEV_DIR/mgr.$name
key_fn=$CEPH_DEV_DIR/mgr.$name/keyring
$SUDO $CEPH_BIN/ceph-authtool --create-keyring --gen-key --name=mgr.$name $key_fn
ceph_adm -i $key_fn auth add mgr.$name mon 'allow *'
fi
# mgr
wconf <<EOF
[mgr.$name]
host = $HOSTNAME
EOF
echo "Starting mgr.${name}"
run 'mgr' $CEPH_BIN/ceph-mgr -i $name
mgr=$(($mgr + 1))
[ $mgr -eq $CEPH_NUM_MGR ] && break
done
if [ $CEPH_NUM_MGR -gt 0 ]; then
start_mgr
fi
if [ "$ec" -eq 1 ]; then