mirror of
https://github.com/ceph/go-ceph
synced 2024-12-12 09:26:52 +00:00
09d81f5bb3
It's unusual to put any kind of credentials out in open. The set of credentials defined with `S3_ACCESS_KEY` and `S3_SECRET_KEY` variables in the script are questionable in its nature. But these are not real/valid credential values in any form for AWS rather used for testing the S3 compatible API from Ceph RGW. Therefore clarify the intention and replace with sample values from official AWS documentation. Signed-off-by: Anoop C S <anoopcs@cryptolab.net>
227 lines
6.6 KiB
Bash
Executable File
227 lines
6.6 KiB
Bash
Executable File
#!/bin/bash
|
|
#
|
|
# Copyright (C) 2013,2014 Loic Dachary <loic@dachary.org>
|
|
#
|
|
# This program is free software: you can redistribute it and/or modify
|
|
# it under the terms of the GNU Affero General Public License as published by
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU Affero General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
set -e
|
|
set -x
|
|
set -u
|
|
|
|
DIR=${1}
|
|
|
|
# reset
|
|
pkill ceph || true
|
|
rm -rf "${DIR:?}"/*
|
|
LOG_DIR="${DIR}/log"
|
|
MON_DATA="${DIR}/mon"
|
|
MDS_DATA="${DIR}/mds"
|
|
MOUNTPT="${MDS_DATA}/mnt"
|
|
OSD_DATA="${DIR}/osd"
|
|
RGW_DATA="${DIR}/radosgw"
|
|
mkdir "${LOG_DIR}" "${MON_DATA}" "${OSD_DATA}" "${MDS_DATA}" "${MOUNTPT}" "${RGW_DATA}"
|
|
MDS_NAME="Z"
|
|
FS_NAME="cephfs"
|
|
ALT_MDS_NAME="Y"
|
|
ALT_FS_NAME="altfs"
|
|
MON_NAME="a"
|
|
MGR_NAME="x"
|
|
MIRROR_ID="m"
|
|
RGW_ID="r"
|
|
|
|
# Following are examples for S3 credentials taken from official AWS docs:
|
|
# https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html#access-keys-and-secret-access-keys
|
|
# These does not represent real/valid credentials for AWS in any form.
|
|
# They are exclusively used for testing S3 compatible API from Ceph RGW.
|
|
S3_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE
|
|
S3_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
|
|
|
HOST_IP=$(getent ahostsv4 "${HOSTNAME}" | grep STREAM | head -n 1 | awk '{print $1}')
|
|
FSID="$(uuidgen)"
|
|
export CEPH_CONF=${DIR}/ceph.conf
|
|
|
|
generate_ceph_conf() {
|
|
# cluster wide parameters
|
|
cat >> "${CEPH_CONF}" <<EOF
|
|
[global]
|
|
fsid = ${FSID}
|
|
osd crush chooseleaf type = 0
|
|
run dir = ${DIR}/run
|
|
auth cluster required = none
|
|
auth service required = none
|
|
auth client required = none
|
|
osd pool default size = 1
|
|
mon host = ${HOSTNAME}
|
|
|
|
[mds.${MDS_NAME}]
|
|
host = ${HOSTNAME}
|
|
|
|
[mon.${MON_NAME}]
|
|
log file = ${LOG_DIR}/mon.log
|
|
chdir = ""
|
|
mon cluster log file = ${LOG_DIR}/mon-cluster.log
|
|
mon data = ${MON_DATA}
|
|
mon data avail crit = 0
|
|
mon addr = ${HOST_IP}:6789
|
|
mon allow pool delete = true
|
|
|
|
[osd.0]
|
|
log file = ${LOG_DIR}/osd.log
|
|
chdir = ""
|
|
osd data = ${OSD_DATA}
|
|
osd journal = ${OSD_DATA}.journal
|
|
osd journal size = 100
|
|
osd objectstore = memstore
|
|
osd class load list = *
|
|
osd class default list = *
|
|
|
|
[mgr.${MGR_NAME}]
|
|
log_file = ${LOG_DIR}/mgr.log
|
|
|
|
[client.rgw.${RGW_ID}]
|
|
rgw dns name = ${HOSTNAME}
|
|
rgw enable usage log = true
|
|
rgw usage log tick interval = 1
|
|
rgw usage log flush threshold = 1
|
|
rgw usage max shards = 32
|
|
rgw usage max user shards = 1
|
|
log file = ${LOG_DIR}/client.rgw.${RGW_ID}.log
|
|
rgw frontends = beast port=80
|
|
ms mon client mode = crc
|
|
EOF
|
|
}
|
|
|
|
launch_mon() {
|
|
ceph-mon --id ${MON_NAME} --mkfs --keyring /dev/null
|
|
touch "${MON_DATA}/keyring"
|
|
ceph-mon --id ${MON_NAME}
|
|
}
|
|
|
|
launch_osd() {
|
|
OSD_ID=$(ceph osd create)
|
|
ceph osd crush add "osd.${OSD_ID}" 1 root=default
|
|
ceph-osd --id "${OSD_ID}" --mkjournal --mkfs
|
|
ceph-osd --id "${OSD_ID}" || ceph-osd --id "${OSD_ID}" || ceph-osd --id "${OSD_ID}"
|
|
}
|
|
|
|
launch_mds_server() {
|
|
local mds="$1"
|
|
local fs="$2"
|
|
|
|
ceph auth get-or-create "mds.${mds}" mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' >> "${MDS_DATA}/keyring"
|
|
ceph osd pool create "${fs}_data" 8
|
|
ceph osd pool create "${fs}_metadata" 8
|
|
ceph fs new "${fs}" "${fs}_metadata" "${fs}_data"
|
|
ceph fs ls
|
|
ceph-mds -i "${mds}"
|
|
ceph status
|
|
while ! ceph mds stat | grep -q "up:active"; do sleep 1; done
|
|
|
|
}
|
|
|
|
launch_mds() {
|
|
launch_mds_server "${MDS_NAME}" "${FS_NAME}"
|
|
}
|
|
|
|
launch_mds2() {
|
|
launch_mds_server "${ALT_MDS_NAME}" "${ALT_FS_NAME}"
|
|
echo "${ALT_FS_NAME}" > "${DIR}/altfs.txt"
|
|
}
|
|
|
|
launch_mgr() {
|
|
ceph-mgr --id ${MGR_NAME}
|
|
}
|
|
|
|
launch_rbd_mirror() {
|
|
ceph auth get-or-create client.rbd-mirror.${MIRROR_ID} mon 'profile rbd-mirror' osd 'profile rbd'
|
|
rbd-mirror --id ${MIRROR_ID} --log-file "${LOG_DIR}/rbd-mirror.log"
|
|
}
|
|
|
|
launch_cephfs_mirror() {
|
|
ceph auth get-or-create "client.cephfs-mirror.${MIRROR_ID}" \
|
|
mon 'profile cephfs-mirror' \
|
|
mds 'allow r' \
|
|
osd 'allow rw tag cephfs metadata=*, allow r tag cephfs data=*' \
|
|
mgr 'allow r'
|
|
cephfs-mirror --id "cephfs-mirror.${MIRROR_ID}" \
|
|
--log-file "${LOG_DIR}/cephfs-mirror.log"
|
|
ceph fs authorize cephfs client.cephfs-mirror-remote / rwps > "${DIR}/cephfs-mirror-remote.out"
|
|
# the .out file above is not used by the scripts but can be used for debugging
|
|
}
|
|
|
|
launch_radosgw() {
|
|
ceph auth get-or-create client.rgw."${RGW_ID}" osd 'allow rwx' mon 'allow rw' -o "${RGW_DATA}/keyring"
|
|
radosgw -n client.rgw."${RGW_ID}" -k "${RGW_DATA}/keyring"
|
|
# not going to try to make shellcheck happy with this line at this time
|
|
# shellcheck disable=SC2016
|
|
timeout 60 sh -c 'until [ $(ceph -s | grep -c "rgw:") -eq 1 ]; do echo "waiting for rgw to show up" && sleep 1; done'
|
|
radosgw-admin user create --uid admin --display-name "Admin User" --caps "buckets=*;users=*;usage=read;metadata=read" --access-key="$S3_ACCESS_KEY" --secret-key="$S3_SECRET_KEY"
|
|
}
|
|
|
|
launch_radosgw2() {
|
|
radosgw-admin caps add --uid=admin --caps="info=read"
|
|
}
|
|
|
|
selftest() {
|
|
ceph --version
|
|
ceph status
|
|
test_pool=$(uuidgen)
|
|
temp_file=$(mktemp)
|
|
ceph osd pool create "${test_pool}" 0
|
|
rados --pool "${test_pool}" put group /etc/group
|
|
rados --pool "${test_pool}" get group "${temp_file}"
|
|
diff /etc/group "${temp_file}"
|
|
ceph osd pool delete "${test_pool}" "${test_pool}" --yes-i-really-really-mean-it
|
|
rm "${temp_file}"
|
|
}
|
|
|
|
FEATURESET="${CEPH_FEATURESET-}"
|
|
if [ -z "$FEATURESET" ] ; then
|
|
case "${CEPH_VERSION-}" in
|
|
nautilus|octopus)
|
|
FEATURESET="mon osd mgr mds rbd-mirror rgw selftest"
|
|
;;
|
|
pacific)
|
|
FEATURESET="mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw selftest"
|
|
;;
|
|
*)
|
|
FEATURESET="mon osd mgr mds mds2 rbd-mirror cephfs-mirror rgw rgw2 selftest"
|
|
;;
|
|
esac
|
|
fi
|
|
|
|
generate_ceph_conf
|
|
for fname in ${FEATURESET} ; do
|
|
case "${fname}" in
|
|
mon) launch_mon ;;
|
|
osd) launch_osd ;;
|
|
mds) launch_mds ;;
|
|
mds2) launch_mds2 ;;
|
|
mgr) launch_mgr ;;
|
|
rbd-mirror) launch_rbd_mirror ;;
|
|
cephfs-mirror) launch_cephfs_mirror ;;
|
|
rgw|radosgw) launch_radosgw ;;
|
|
rgw2|radosgw2) launch_radosgw2 ;;
|
|
selftest) selftest ;;
|
|
*)
|
|
echo "Invalid feature: ${fname}"
|
|
exit 2
|
|
;;
|
|
esac
|
|
done
|
|
|
|
touch "${DIR}/.ready"
|
|
|
|
# vim: set ts=4 sw=4 sts=4 et:
|