micro-osd.sh: break script up into discrete steps

The "CEPH_FEATURES" env var can now be used to exactly control what
steps will be run. If unset the steps will be derived from the
CEPH_VERSION env var.

Best viewed by ignoring whitespace.

Signed-off-by: John Mulligan <jmulligan@redhat.com>
This commit is contained in:
John Mulligan 2022-03-18 13:25:06 -04:00 committed by mergify[bot]
parent 52bc6e9158
commit 8deb9cf778

View File

@ -38,10 +38,14 @@ RGW_ID="r"
S3_ACCESS_KEY=2262XNX11FZRR44XWIRD S3_ACCESS_KEY=2262XNX11FZRR44XWIRD
S3_SECRET_KEY=rmtuS1Uj1bIC08QFYGW18GfSHAbkPqdsuYynNudw S3_SECRET_KEY=rmtuS1Uj1bIC08QFYGW18GfSHAbkPqdsuYynNudw
FSID="$(uuidgen)"
export CEPH_CONF=${DIR}/ceph.conf
generate_ceph_conf() {
# cluster wide parameters # cluster wide parameters
cat >> ${DIR}/ceph.conf <<EOF cat >> "${CEPH_CONF}" <<EOF
[global] [global]
fsid = $(uuidgen) fsid = ${FSID}
osd crush chooseleaf type = 0 osd crush chooseleaf type = 0
run dir = ${DIR}/run run dir = ${DIR}/run
auth cluster required = none auth cluster required = none
@ -85,21 +89,22 @@ rgw usage max user shards = 1
log file = /var/log/ceph/client.rgw.${RGW_ID}.log log file = /var/log/ceph/client.rgw.${RGW_ID}.log
rgw frontends = beast port=80 rgw frontends = beast port=80
EOF EOF
}
export CEPH_CONF=${DIR}/ceph.conf launch_mon() {
# start an osd
ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null
touch ${MON_DATA}/keyring touch ${MON_DATA}/keyring
ceph-mon --id ${MON_NAME} ceph-mon --id ${MON_NAME}
}
# start an osd launch_osd() {
OSD_ID=$(ceph osd create) OSD_ID=$(ceph osd create)
ceph osd crush add osd.${OSD_ID} 1 root=default ceph osd crush add osd.${OSD_ID} 1 root=default
ceph-osd --id ${OSD_ID} --mkjournal --mkfs ceph-osd --id ${OSD_ID} --mkjournal --mkfs
ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID} ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID}
}
# start an mds for cephfs launch_mds() {
ceph auth get-or-create mds.${MDS_NAME} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > ${MDS_DATA}/keyring ceph auth get-or-create mds.${MDS_NAME} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > ${MDS_DATA}/keyring
ceph osd pool create cephfs_data 8 ceph osd pool create cephfs_data 8
ceph osd pool create cephfs_metadata 8 ceph osd pool create cephfs_metadata 8
@ -108,23 +113,18 @@ ceph fs ls
ceph-mds -i ${MDS_NAME} ceph-mds -i ${MDS_NAME}
ceph status ceph status
while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done
}
launch_mgr() {
# start a manager
ceph-mgr --id ${MGR_NAME} ceph-mgr --id ${MGR_NAME}
}
# start rbd-mirror launch_rbd_mirror() {
ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd' ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd'
rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log
}
# start cephfs-mirror launch_cephfs_mirror() {
# Skip on "nautilus" and "octopus" the supported ceph versions that
# don't have it.
case "${CEPH_VERSION}" in
nautilus|octopus)
echo "Skipping cephfs-mirror on ${CEPH_VERSION} ..."
;;
*)
ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \ ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \
mon 'profile cephfs-mirror' \ mon 'profile cephfs-mirror' \
mds 'allow r' \ mds 'allow r' \
@ -134,17 +134,16 @@ case "${CEPH_VERSION}" in
--log-file "${LOG_DIR}/cephfs-mirror.log" --log-file "${LOG_DIR}/cephfs-mirror.log"
ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out" ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out"
# the .out file above is not used by the scripts but can be used for debugging # the .out file above is not used by the scripts but can be used for debugging
;; }
esac
launch_radosgw() {
# start an rgw
ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring
radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring
timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done' timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done'
radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY" radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY"
}
# test the setup selftest() {
ceph --version ceph --version
ceph status ceph status
test_pool=$(uuidgen) test_pool=$(uuidgen)
@ -155,6 +154,37 @@ rados --pool ${test_pool} get group ${temp_file}
diff /etc/group ${temp_file} diff /etc/group ${temp_file}
ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it
rm ${temp_file} rm ${temp_file}
}
FEATURESET="${CEPH_FEATURESET}"
if [ -z "$FEATURESET" ] ; then
case "${CEPH_VERSION}" in
nautilus|octopus)
FEATURESET="mon osd mgr mds rbd-mirror rgw selftest"
;;
*)
FEATURESET="mon osd mgr mds rbd-mirror cephfs-mirror rgw selftest"
;;
esac
fi
generate_ceph_conf
for fname in ${FEATURESET}; do
case "${fname}" in
mon) launch_mon ;;
osd) launch_osd ;;
mds) launch_mds ;;
mgr) launch_mgr ;;
rbd-mirror) launch_rbd_mirror ;;
cephfs-mirror) launch_cephfs_mirror ;;
rgw|radosgw) launch_radosgw ;;
selftest) selftest ;;
*)
echo "Invalid feature: ${fname}"
exit 2
;;
esac
done
touch ${DIR}/.ready touch ${DIR}/.ready