diff --git a/micro-osd.sh b/micro-osd.sh index c6009d0..d5fe209 100755 --- a/micro-osd.sh +++ b/micro-osd.sh @@ -38,10 +38,14 @@ RGW_ID="r" S3_ACCESS_KEY=2262XNX11FZRR44XWIRD S3_SECRET_KEY=rmtuS1Uj1bIC08QFYGW18GfSHAbkPqdsuYynNudw -# cluster wide parameters -cat >> ${DIR}/ceph.conf <> "${CEPH_CONF}" < ${MDS_DATA}/keyring + ceph osd pool create cephfs_data 8 + ceph osd pool create cephfs_metadata 8 + ceph fs new cephfs cephfs_metadata cephfs_data + ceph fs ls + ceph-mds -i ${MDS_NAME} + ceph status + while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done +} -# start an mds for cephfs -ceph auth get-or-create mds.${MDS_NAME} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > ${MDS_DATA}/keyring -ceph osd pool create cephfs_data 8 -ceph osd pool create cephfs_metadata 8 -ceph fs new cephfs cephfs_metadata cephfs_data -ceph fs ls -ceph-mds -i ${MDS_NAME} -ceph status -while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done +launch_mgr() { + ceph-mgr --id ${MGR_NAME} +} +launch_rbd_mirror() { + ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd' + rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log +} -# start a manager -ceph-mgr --id ${MGR_NAME} +launch_cephfs_mirror() { + ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \ + mon 'profile cephfs-mirror' \ + mds 'allow r' \ + osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \ + mgr 'allow r' + cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \ + --log-file "${LOG_DIR}/cephfs-mirror.log" + ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out" + # the .out file above is not used by the scripts but can be used for debugging +} -# start rbd-mirror -ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd' -rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log +launch_radosgw() { + ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring + radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring + timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done' + radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY" +} -# start cephfs-mirror -# Skip on "nautilus" and "octopus" the supported ceph versions that -# don't have it. -case "${CEPH_VERSION}" in - nautilus|octopus) - echo "Skipping cephfs-mirror on ${CEPH_VERSION} ..." - ;; - *) - ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \ - mon 'profile cephfs-mirror' \ - mds 'allow r' \ - osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \ - mgr 'allow r' - cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \ - --log-file "${LOG_DIR}/cephfs-mirror.log" - ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out" - # the .out file above is not used by the scripts but can be used for debugging - ;; -esac +selftest() { + ceph --version + ceph status + test_pool=$(uuidgen) + temp_file=$(mktemp) + ceph osd pool create ${test_pool} 0 + rados --pool ${test_pool} put group /etc/group + rados --pool ${test_pool} get group ${temp_file} + diff /etc/group ${temp_file} + ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it + rm ${temp_file} +} +FEATURESET="${CEPH_FEATURESET}" +if [ -z "$FEATURESET" ] ; then + case "${CEPH_VERSION}" in + nautilus|octopus) + FEATURESET="mon osd mgr mds rbd-mirror rgw selftest" + ;; + *) + FEATURESET="mon osd mgr mds rbd-mirror cephfs-mirror rgw selftest" + ;; + esac +fi -# start an rgw -ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring -radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring -timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done' -radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY" - -# test the setup -ceph --version -ceph status -test_pool=$(uuidgen) -temp_file=$(mktemp) -ceph osd pool create ${test_pool} 0 -rados --pool ${test_pool} put group /etc/group -rados --pool ${test_pool} get group ${temp_file} -diff /etc/group ${temp_file} -ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it -rm ${temp_file} +generate_ceph_conf +for fname in ${FEATURESET}; do + case "${fname}" in + mon) launch_mon ;; + osd) launch_osd ;; + mds) launch_mds ;; + mgr) launch_mgr ;; + rbd-mirror) launch_rbd_mirror ;; + cephfs-mirror) launch_cephfs_mirror ;; + rgw|radosgw) launch_radosgw ;; + selftest) selftest ;; + *) + echo "Invalid feature: ${fname}" + exit 2 + ;; + esac +done touch ${DIR}/.ready