2018-07-07 17:34:56 +00:00
|
|
|
#
|
|
|
|
# Copyright (C) 2013,2014 Loic Dachary <loic@dachary.org>
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Affero General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
set -e
|
|
|
|
set -x
|
|
|
|
set -u
|
|
|
|
|
|
|
|
DIR=${1}
|
|
|
|
|
|
|
|
# reset
|
|
|
|
pkill ceph || true
|
|
|
|
rm -rf ${DIR}/*
|
|
|
|
LOG_DIR=${DIR}/log
|
|
|
|
MON_DATA=${DIR}/mon
|
2018-10-10 03:56:52 +00:00
|
|
|
MDS_DATA=${DIR}/mds
|
2018-10-10 22:06:47 +00:00
|
|
|
MOUNTPT=${MDS_DATA}/mnt
|
2018-07-07 17:34:56 +00:00
|
|
|
OSD_DATA=${DIR}/osd
|
2021-05-12 12:38:18 +00:00
|
|
|
RGW_DATA=${DIR}/radosgw
|
|
|
|
mkdir ${LOG_DIR} ${MON_DATA} ${OSD_DATA} ${MDS_DATA} ${MOUNTPT} ${RGW_DATA}
|
2018-10-10 03:56:52 +00:00
|
|
|
MDS_NAME="Z"
|
2018-10-10 22:06:47 +00:00
|
|
|
MON_NAME="a"
|
|
|
|
MGR_NAME="x"
|
2021-03-03 17:06:01 +00:00
|
|
|
MIRROR_ID="m"
|
2021-05-12 12:38:18 +00:00
|
|
|
RGW_ID="r"
|
|
|
|
S3_ACCESS_KEY=2262XNX11FZRR44XWIRD
|
|
|
|
S3_SECRET_KEY=rmtuS1Uj1bIC08QFYGW18GfSHAbkPqdsuYynNudw
|
2018-07-07 17:34:56 +00:00
|
|
|
|
|
|
|
# cluster wide parameters
|
|
|
|
cat >> ${DIR}/ceph.conf <<EOF
|
|
|
|
[global]
|
|
|
|
fsid = $(uuidgen)
|
|
|
|
osd crush chooseleaf type = 0
|
|
|
|
run dir = ${DIR}/run
|
|
|
|
auth cluster required = none
|
|
|
|
auth service required = none
|
|
|
|
auth client required = none
|
|
|
|
osd pool default size = 1
|
2021-03-03 17:06:01 +00:00
|
|
|
mon host = ${HOSTNAME}
|
2018-07-07 17:34:56 +00:00
|
|
|
|
2018-10-10 03:56:52 +00:00
|
|
|
[mds.${MDS_NAME}]
|
2021-03-03 17:06:01 +00:00
|
|
|
host = ${HOSTNAME}
|
2018-10-10 03:56:52 +00:00
|
|
|
|
2018-10-10 22:06:47 +00:00
|
|
|
[mon.${MON_NAME}]
|
2018-07-07 17:34:56 +00:00
|
|
|
log file = ${LOG_DIR}/mon.log
|
|
|
|
chdir = ""
|
|
|
|
mon cluster log file = ${LOG_DIR}/mon-cluster.log
|
|
|
|
mon data = ${MON_DATA}
|
2020-07-26 20:31:58 +00:00
|
|
|
mon data avail crit = 0
|
2021-03-03 17:06:01 +00:00
|
|
|
mon addr = ${HOSTNAME}
|
2018-07-07 17:34:56 +00:00
|
|
|
mon allow pool delete = true
|
|
|
|
|
|
|
|
[osd.0]
|
|
|
|
log file = ${LOG_DIR}/osd.log
|
|
|
|
chdir = ""
|
|
|
|
osd data = ${OSD_DATA}
|
|
|
|
osd journal = ${OSD_DATA}.journal
|
|
|
|
osd journal size = 100
|
|
|
|
osd objectstore = memstore
|
|
|
|
osd class load list = *
|
|
|
|
osd class default list = *
|
2021-05-12 12:38:18 +00:00
|
|
|
|
|
|
|
[client.rgw.${RGW_ID}]
|
|
|
|
rgw dns name = ${HOSTNAME}
|
|
|
|
rgw enable usage log = true
|
|
|
|
rgw usage log tick interval = 1
|
|
|
|
rgw usage log flush threshold = 1
|
|
|
|
rgw usage max shards = 32
|
|
|
|
rgw usage max user shards = 1
|
|
|
|
log file = /var/log/ceph/client.rgw.${RGW_ID}.log
|
|
|
|
rgw frontends = beast port=80
|
2018-07-07 17:34:56 +00:00
|
|
|
EOF
|
|
|
|
|
|
|
|
export CEPH_CONF=${DIR}/ceph.conf
|
|
|
|
|
|
|
|
# start an osd
|
2018-10-10 22:06:47 +00:00
|
|
|
ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null
|
2018-07-07 17:34:56 +00:00
|
|
|
touch ${MON_DATA}/keyring
|
2018-10-10 22:06:47 +00:00
|
|
|
ceph-mon --id ${MON_NAME}
|
2018-07-07 17:34:56 +00:00
|
|
|
|
|
|
|
# start an osd
|
|
|
|
OSD_ID=$(ceph osd create)
|
2021-03-03 17:06:01 +00:00
|
|
|
ceph osd crush add osd.${OSD_ID} 1 root=default
|
2021-02-03 14:59:56 +00:00
|
|
|
ceph-osd --id ${OSD_ID} --mkjournal --mkfs
|
2021-03-03 17:06:01 +00:00
|
|
|
ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID} || ceph-osd --id ${OSD_ID}
|
2018-07-07 17:34:56 +00:00
|
|
|
|
2018-10-10 03:56:52 +00:00
|
|
|
# start an mds for cephfs
|
|
|
|
ceph auth get-or-create mds.${MDS_NAME} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' > ${MDS_DATA}/keyring
|
|
|
|
ceph osd pool create cephfs_data 8
|
|
|
|
ceph osd pool create cephfs_metadata 8
|
|
|
|
ceph fs new cephfs cephfs_metadata cephfs_data
|
|
|
|
ceph fs ls
|
|
|
|
ceph-mds -i ${MDS_NAME}
|
2018-10-10 22:06:47 +00:00
|
|
|
ceph status
|
|
|
|
while [[ ! $(ceph mds stat | grep "up:active") ]]; do sleep 1; done
|
|
|
|
|
2018-10-10 03:56:52 +00:00
|
|
|
|
2018-07-07 17:34:56 +00:00
|
|
|
# start a manager
|
2018-10-10 22:06:47 +00:00
|
|
|
ceph-mgr --id ${MGR_NAME}
|
2018-07-07 17:34:56 +00:00
|
|
|
|
2021-03-03 17:06:01 +00:00
|
|
|
# start rbd-mirror
|
|
|
|
ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd'
|
|
|
|
rbd-mirror --id ${MIRROR_ID} --log-file ${LOG_DIR}/rbd-mirror.log
|
|
|
|
|
2021-06-14 19:16:54 +00:00
|
|
|
# start cephfs-mirror
|
|
|
|
# Skip on "nautilus" and "octopus" the supported ceph versions that
|
|
|
|
# don't have it.
|
|
|
|
case "${CEPH_VERSION}" in
|
|
|
|
nautilus|octopus)
|
|
|
|
echo "Skipping cephfs-mirror on ${CEPH_VERSION} ..."
|
|
|
|
;;
|
|
|
|
*)
|
|
|
|
ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \
|
|
|
|
mon 'profile cephfs-mirror' \
|
|
|
|
mds 'allow r' \
|
|
|
|
osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \
|
|
|
|
mgr 'allow r'
|
|
|
|
cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \
|
|
|
|
--log-file "${LOG_DIR}/cephfs-mirror.log"
|
|
|
|
ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out"
|
|
|
|
# the .out file above is not used by the scripts but can be used for debugging
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
|
|
|
|
|
2021-05-12 12:38:18 +00:00
|
|
|
# start an rgw
|
|
|
|
ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o ${RGW_DATA}/keyring
|
|
|
|
radosgw -n client.rgw."${RGW_ID}" -k ${RGW_DATA}/keyring
|
|
|
|
timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done'
|
|
|
|
radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY"
|
|
|
|
|
2018-07-07 17:34:56 +00:00
|
|
|
# test the setup
|
|
|
|
ceph --version
|
|
|
|
ceph status
|
|
|
|
test_pool=$(uuidgen)
|
|
|
|
temp_file=$(mktemp)
|
2019-10-31 13:47:08 +00:00
|
|
|
ceph osd pool create ${test_pool} 0
|
2018-07-07 17:34:56 +00:00
|
|
|
rados --pool ${test_pool} put group /etc/group
|
|
|
|
rados --pool ${test_pool} get group ${temp_file}
|
|
|
|
diff /etc/group ${temp_file}
|
2019-10-31 13:47:08 +00:00
|
|
|
ceph osd pool delete ${test_pool} ${test_pool} --yes-i-really-really-mean-it
|
2018-07-07 17:34:56 +00:00
|
|
|
rm ${temp_file}
|
2021-03-03 17:06:01 +00:00
|
|
|
|
|
|
|
touch ${DIR}/.ready
|
2021-05-10 16:00:58 +00:00
|
|
|
|
|
|
|
# vim: set ts=4 sw=4 sts=4 et:
|