mirror of
https://github.com/ceph/go-ceph
synced 2024-12-25 07:32:27 +00:00
micro-osd.sh: break script up into discrete steps
The "CEPH_FEATURES" env var can now be used to exactly control what steps will be run. If unset the steps will be derived from the CEPH_VERSION env var. Best viewed by ignoring whitespace. Signed-off-by: John Mulligan <jmulligan@redhat.com>
This commit is contained in:
parent
52bc6e9158
commit
8deb9cf778
156
micro-osd.sh
156
micro-osd.sh
@ -38,10 +38,14 @@ RGW_ID="r"
|
|||||||
S3_ACCESS_KEY=2262XNX11FZRR44XWIRD
|
S3_ACCESS_KEY=2262XNX11FZRR44XWIRD
|
||||||
S3_SECRET_KEY=rmtuS1Uj1bIC08QFYGW18GfSHAbkPqdsuYynNudw
|
S3_SECRET_KEY=rmtuS1Uj1bIC08QFYGW18GfSHAbkPqdsuYynNudw
|
||||||
|
|
||||||
# cluster wide parameters
|
FSID="$(uuidgen)"
|
||||||
cat >> ${DIR}/ceph.conf <<EOF
|
export CEPH_CONF=${DIR}/ceph.conf
|
||||||
|
|
||||||
|
generate_ceph_conf() {
|
||||||
|
# cluster wide parameters
|
||||||
|
cat >> "${CEPH_CONF}" <<EOF
|
||||||
[global]
|
[global]
|
||||||
fsid = $(uuidgen)
|
fsid = ${FSID}
|
||||||
osd crush chooseleaf type = 0
|
osd crush chooseleaf type = 0
|
||||||
run dir = ${DIR}/run
|
run dir = ${DIR}/run
|
||||||
auth cluster required = none
|
auth cluster required = none
|
||||||
@ -85,76 +89,102 @@ rgw usage max user shards = 1
|
|||||||
log file = /var/log/ceph/client.rgw.${RGW_ID}.log
|
log file = /var/log/ceph/client.rgw.${RGW_ID}.log
|
||||||
rgw frontends = beast port=80
|
rgw frontends = beast port=80
|
||||||
EOF
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
export CEPH_CONF=${DIR}/ceph.conf
|
launch_mon() {
|
||||||
|
ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null
|
||||||
|
touch ${MON_DATA}/keyring
|
||||||
|
ceph-mon --id ${MON_NAME}
|
||||||
|
}
|
||||||
|
|
||||||
# start an osd
|
launch_osd() {
|
||||||
ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null
|
OSD_ID=$(ceph osd create)
|
||||||
touch ${MON_DATA}/keyring
|
ceph osd crush add osd.${OSD_ID} 1 root=default
|
||||||
ceph-mon --id ${MON_NAME}
|
ceph-osd --id ${OSD_ID} --mkjournal --mkfs
|
||||||
|
ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID}
|
||||||
|
}
|
||||||
|
|
||||||
# start an osd
|
launch_mds() {
|
||||||
OSD_ID=$(ceph osd create)
|
ceph auth get-or-create mds.${MDS_NAME} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > ${MDS_DATA}/keyring
|
||||||
ceph osd crush add osd.${OSD_ID} 1 root=default
|
ceph osd pool create cephfs_data 8
|
||||||
ceph-osd --id ${OSD_ID} --mkjournal --mkfs
|
ceph osd pool create cephfs_metadata 8
|
||||||
ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID}
|
ceph fs new cephfs cephfs_metadata cephfs_data
|
||||||
|
ceph fs ls
|
||||||
|
ceph-mds -i ${MDS_NAME}
|
||||||
|
ceph status
|
||||||
|
while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done
|
||||||
|
}
|
||||||
|
|
||||||
# start an mds for cephfs
|
launch_mgr() {
|
||||||
ceph auth get-or-create mds.${MDS_NAME} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > ${MDS_DATA}/keyring
|
ceph-mgr --id ${MGR_NAME}
|
||||||
ceph osd pool create cephfs_data 8
|
}
|
||||||
ceph osd pool create cephfs_metadata 8
|
|
||||||
ceph fs new cephfs cephfs_metadata cephfs_data
|
|
||||||
ceph fs ls
|
|
||||||
ceph-mds -i ${MDS_NAME}
|
|
||||||
ceph status
|
|
||||||
while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done
|
|
||||||
|
|
||||||
|
launch_rbd_mirror() {
|
||||||
|
ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd'
|
||||||
|
rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log
|
||||||
|
}
|
||||||
|
|
||||||
# start a manager
|
launch_cephfs_mirror() {
|
||||||
ceph-mgr --id ${MGR_NAME}
|
ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \
|
||||||
|
mon 'profile cephfs-mirror' \
|
||||||
|
mds 'allow r' \
|
||||||
|
osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \
|
||||||
|
mgr 'allow r'
|
||||||
|
cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \
|
||||||
|
--log-file "${LOG_DIR}/cephfs-mirror.log"
|
||||||
|
ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out"
|
||||||
|
# the .out file above is not used by the scripts but can be used for debugging
|
||||||
|
}
|
||||||
|
|
||||||
# start rbd-mirror
|
launch_radosgw() {
|
||||||
ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd'
|
ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring
|
||||||
rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log
|
radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring
|
||||||
|
timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done'
|
||||||
|
radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY"
|
||||||
|
}
|
||||||
|
|
||||||
# start cephfs-mirror
|
selftest() {
|
||||||
# Skip on "nautilus" and "octopus" the supported ceph versions that
|
ceph --version
|
||||||
# don't have it.
|
ceph status
|
||||||
case "${CEPH_VERSION}" in
|
test_pool=$(uuidgen)
|
||||||
nautilus|octopus)
|
temp_file=$(mktemp)
|
||||||
echo "Skipping cephfs-mirror on ${CEPH_VERSION} ..."
|
ceph osd pool create ${test_pool} 0
|
||||||
;;
|
rados --pool ${test_pool} put group /etc/group
|
||||||
*)
|
rados --pool ${test_pool} get group ${temp_file}
|
||||||
ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \
|
diff /etc/group ${temp_file}
|
||||||
mon 'profile cephfs-mirror' \
|
ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it
|
||||||
mds 'allow r' \
|
rm ${temp_file}
|
||||||
osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \
|
}
|
||||||
mgr 'allow r'
|
|
||||||
cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \
|
|
||||||
--log-file "${LOG_DIR}/cephfs-mirror.log"
|
|
||||||
ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out"
|
|
||||||
# the .out file above is not used by the scripts but can be used for debugging
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
|
FEATURESET="${CEPH_FEATURESET}"
|
||||||
|
if [ -z "$FEATURESET" ] ; then
|
||||||
|
case "${CEPH_VERSION}" in
|
||||||
|
nautilus|octopus)
|
||||||
|
FEATURESET="mon osd mgr mds rbd-mirror rgw selftest"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
FEATURESET="mon osd mgr mds rbd-mirror cephfs-mirror rgw selftest"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
fi
|
||||||
|
|
||||||
# start an rgw
|
generate_ceph_conf
|
||||||
ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring
|
for fname in ${FEATURESET}; do
|
||||||
radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring
|
case "${fname}" in
|
||||||
timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done'
|
mon) launch_mon ;;
|
||||||
radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY"
|
osd) launch_osd ;;
|
||||||
|
mds) launch_mds ;;
|
||||||
# test the setup
|
mgr) launch_mgr ;;
|
||||||
ceph --version
|
rbd-mirror) launch_rbd_mirror ;;
|
||||||
ceph status
|
cephfs-mirror) launch_cephfs_mirror ;;
|
||||||
test_pool=$(uuidgen)
|
rgw|radosgw) launch_radosgw ;;
|
||||||
temp_file=$(mktemp)
|
selftest) selftest ;;
|
||||||
ceph osd pool create ${test_pool} 0
|
*)
|
||||||
rados --pool ${test_pool} put group /etc/group
|
echo "Invalid feature: ${fname}"
|
||||||
rados --pool ${test_pool} get group ${temp_file}
|
exit 2
|
||||||
diff /etc/group ${temp_file}
|
;;
|
||||||
ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it
|
esac
|
||||||
rm ${temp_file}
|
done
|
||||||
|
|
||||||
touch ${DIR}/.ready
|
touch ${DIR}/.ready
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user