ci: update to luminous and dockerized setup

Signed-off-by: Noah Watkins <nwatkins@redhat.com>
This commit is contained in:
Noah Watkins 2018-07-07 10:34:56 -07:00
parent e57e53d585
commit 19a37c4565
9 changed files with 138 additions and 297 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
.build-docker
*.swp

View File

@ -1,27 +1,18 @@
dist: trusty
sudo: required
language: go
services:
- docker
branches:
except:
- gh-pages
matrix:
include:
- env: CEPH_RELEASE=jewel
- env: CEPH_RELEASE=kraken
#matrix:
# include:
# - env: CEPH_RELEASE=jewel
# - env: CEPH_RELEASE=kraken
before_install:
- sudo apt-get update
- ci/before_install.sh
- bash ci/micro-osd.sh /tmp/micro-ceph
- export CEPH_CONF=/tmp/micro-ceph/ceph.conf
- ceph status
- docker build -t ceph-golang-ci .
script:
- go get -t -v ./...
- go list ./...
- go test -v $(go list ./... | grep -v cephfs)
- go fmt ./...
- go vet ./...
- docker run --rm -it -v ${PWD}:/go/src/github.com/ceph/go-ceph:z ceph-golang-ci

View File

@ -1,26 +1,26 @@
FROM golang:1.7.1
MAINTAINER Abhishek Lekshmanan "abhishek.lekshmanan@gmail.com"
FROM ubuntu:xenial
ENV CEPH_VERSION jewel
RUN apt-get update && apt-get install -y \
apt-transport-https \
git \
golang-go \
software-properties-common \
uuid-runtime \
wget
RUN echo deb http://download.ceph.com/debian-$CEPH_VERSION/ jessie main | tee /etc/apt/sources.list.d/ceph-$CEPH_VERSION.list
RUN wget -q -O- 'https://download.ceph.com/keys/release.asc' | apt-key add -
RUN apt-add-repository 'deb https://download.ceph.com/debian-luminous/ xenial main'
# Running wget with no certificate checks, alternatively ssl-cert package should be installed
RUN wget --no-check-certificate -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | apt-key add - \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
ceph \
ceph-mds \
librados-dev \
librbd-dev \
libcephfs-dev \
uuid-runtime \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
RUN apt-get update && apt-get install -y \
ceph \
libcephfs-dev \
librados-dev \
librbd-dev
ENV GOPATH /go
WORKDIR /go/src/github.com/ceph/go-ceph
VOLUME /go/src/github.com/ceph/go-ceph
COPY ./ci/entrypoint.sh /tmp/entrypoint.sh
ENTRYPOINT ["/tmp/entrypoint.sh", "/tmp/micro-ceph"]
COPY micro-osd.sh /
COPY entrypoint.sh /
ENTRYPOINT /entrypoint.sh

View File

@ -89,6 +89,16 @@ delete a pool with the given name. The following will delete the pool named
conn.DeletePool("new_pool")
```
# Development
```
docker run --rm -it --net=host
-v ${PWD}:/go/src/github.com/ceph/go-ceph:z
-v /home/nwatkins/src/ceph/build:/home/nwatkins/src/ceph/build:z
-e CEPH_CONF=/home/nwatkins/src/ceph/build/ceph.conf
ceph-golang
```
## Contributing
Contributions are welcome & greatly appreciated, every little bit helps. Make code changes via Github pull requests:

View File

@ -1,27 +0,0 @@
#!/bin/bash
set -e
set -x
sudo apt-get install -y python-virtualenv
# ceph-deploy and ceph
WORKDIR=$HOME/workdir
mkdir $WORKDIR
pushd $WORKDIR
ssh-keygen -f $HOME/.ssh/id_rsa -t rsa -N ''
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
git clone git://github.com/ceph/ceph-deploy
pushd ceph-deploy
./bootstrap
./ceph-deploy install --release ${CEPH_RELEASE} `hostname`
./ceph-deploy pkg --install librados-dev `hostname`
./ceph-deploy pkg --install librbd-dev `hostname`
./ceph-deploy pkg --install libcephfs-dev `hostname`
popd # ceph-deploy
popd # workdir

View File

@ -1,114 +0,0 @@
#!/bin/bash
#
# Copyright (C) 2013,2014 Loic Dachary <loic@dachary.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
set -u
DIR=$1
#if ! dpkg -l ceph ; then
# wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add -
# echo deb http://ceph.com/debian-dumpling/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
# sudo apt-get update
# sudo apt-get --yes install ceph ceph-common
#fi
# get rid of process and directories leftovers
pkill ceph-mon || true
pkill ceph-osd || true
rm -fr $DIR
# cluster wide parameters
mkdir -p ${DIR}/log
cat >> $DIR/ceph.conf <<EOF
[global]
fsid = $(uuidgen)
osd crush chooseleaf type = 0
run dir = ${DIR}/run
auth cluster required = none
auth service required = none
auth client required = none
osd pool default size = 1
EOF
export CEPH_ARGS="--conf ${DIR}/ceph.conf"
# single monitor
MON_DATA=${DIR}/mon
mkdir -p $MON_DATA
cat >> $DIR/ceph.conf <<EOF
[mon.0]
log file = ${DIR}/log/mon.log
chdir = ""
mon cluster log file = ${DIR}/log/mon-cluster.log
mon data = ${MON_DATA}
mon addr = 127.0.0.1
EOF
ceph-mon --id 0 --mkfs --keyring /dev/null
touch ${MON_DATA}/keyring
ceph-mon --id 0
# single osd
OSD_DATA=${DIR}/osd
mkdir ${OSD_DATA}
cat >> $DIR/ceph.conf <<EOF
[osd.0]
log file = ${DIR}/log/osd.log
chdir = ""
osd data = ${OSD_DATA}
osd journal = ${OSD_DATA}.journal
osd journal size = 100
osd objectstore = memstore
EOF
OSD_ID=$(ceph osd create)
ceph osd crush add osd.${OSD_ID} 1 root=default host=localhost
ceph-osd --id ${OSD_ID} --mkjournal --mkfs
ceph-osd --id ${OSD_ID}
# single mds
MDS_DATA=${DIR}/mds.a
mkdir ${MDS_DATA}
cat >> $DIR/ceph.conf <<EOF
[mds.a]
mds data = ${MDS_DATA}
mds log max segments = 2
mds cache size = 10000
host = localhost
EOF
ceph-authtool --create-keyring --gen-key --name=mds.a ${MDS_DATA}/keyring
ceph -i ${MDS_DATA}/keyring auth add mds.a mon 'allow profile mds' osd 'allow *' mds 'allow'
ceph osd pool create cephfs_data 8
ceph osd pool create cephfs_metadata 8
ceph fs new cephfs cephfs_metadata cephfs_data
ceph-mds -i a
# check that it works
rados --pool rbd put group /etc/group
rados --pool rbd get group ${DIR}/group
diff /etc/group ${DIR}/group
ceph osd tree
export CEPH_CONF="${DIR}/ceph.conf"
go get github.com/stretchr/testify/assert
cd /go/src/github.com/ceph/go-ceph
exec go test -v ./...

View File

@ -1,119 +0,0 @@
#
# Copyright (C) 2013,2014 Loic Dachary <loic@dachary.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
set -u
DIR=$1
#if ! dpkg -l ceph ; then
# wget -q -O- 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | sudo apt-key add -
# echo deb http://ceph.com/debian-dumpling/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
# sudo apt-get update
# sudo apt-get --yes install ceph ceph-common
#fi
# get rid of process and directories leftovers
pkill ceph-mon || true
pkill ceph-osd || true
rm -fr $DIR
# cluster wide parameters
mkdir -p ${DIR}/log
cat >> $DIR/ceph.conf <<EOF
[global]
fsid = $(uuidgen)
osd crush chooseleaf type = 0
run dir = ${DIR}/run
auth cluster required = none
auth service required = none
auth client required = none
osd pool default size = 1
mon allow pool delete = true
EOF
export CEPH_ARGS="--conf ${DIR}/ceph.conf"
# single monitor
MON_DATA=${DIR}/mon
mkdir -p $MON_DATA
cat >> $DIR/ceph.conf <<EOF
[mon.0]
log file = ${DIR}/log/mon.log
chdir = ""
mon cluster log file = ${DIR}/log/mon-cluster.log
mon data = ${MON_DATA}
mon addr = 127.0.0.1
EOF
ceph-mon --id 0 --mkfs --keyring /dev/null
touch ${MON_DATA}/keyring
ceph-mon --id 0
# single osd
OSD_DATA=${DIR}/osd
mkdir ${OSD_DATA}
cat >> $DIR/ceph.conf <<EOF
[osd.0]
log file = ${DIR}/log/osd.log
chdir = ""
osd data = ${OSD_DATA}
osd journal = ${OSD_DATA}.journal
osd journal size = 100
osd objectstore = memstore
EOF
OSD_ID=$(ceph osd create)
ceph osd crush add osd.${OSD_ID} 1 root=default host=localhost
ceph-osd --id ${OSD_ID} --mkjournal --mkfs
ceph-osd --id ${OSD_ID}
# single mds
MDS_DATA=${DIR}/mds.a
mkdir ${MDS_DATA}
cat >> $DIR/ceph.conf <<EOF
[mds.a]
mds data = ${MDS_DATA}
mds log max segments = 2
mds cache size = 10000
host = localhost
EOF
ceph-authtool --create-keyring --gen-key --name=mds.a ${MDS_DATA}/keyring
ceph -i ${MDS_DATA}/keyring auth add mds.a mon 'allow profile mds' osd 'allow *' mds 'allow'
ceph osd pool create cephfs_data 8
ceph osd pool create cephfs_metadata 8
ceph fs new cephfs cephfs_metadata cephfs_data
ceph-mds -i a
export CEPH_CONF="${DIR}/ceph.conf"
while true; do
if ceph status | tee /dev/tty | grep -q HEALTH_OK; then
if ! ceph status | grep -q creating &> /dev/null; then
break
fi
fi
sleep 1
done
# check that it works
rados --pool rbd put group /etc/group
rados --pool rbd get group ${DIR}/group
diff /etc/group ${DIR}/group
ceph osd tree

12
entrypoint.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
set -e
mkdir /tmp/ceph
/micro-osd.sh /tmp/ceph
export CEPH_CONF=/tmp/ceph/ceph.conf
go vet ./...
go get -t -v ./...
go list ./...
go test -v $(go list ./... | grep -v cephfs)

87
micro-osd.sh Executable file
View File

@ -0,0 +1,87 @@
#
# Copyright (C) 2013,2014 Loic Dachary <loic@dachary.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
set -e
set -x
set -u
DIR=${1}
# reset
pkill ceph || true
rm -rf ${DIR}/*
LOG_DIR=${DIR}/log
MON_DATA=${DIR}/mon
OSD_DATA=${DIR}/osd
mkdir ${LOG_DIR} ${MON_DATA} ${OSD_DATA}
# cluster wide parameters
cat >> ${DIR}/ceph.conf <<EOF
[global]
fsid = $(uuidgen)
osd crush chooseleaf type = 0
run dir = ${DIR}/run
auth cluster required = none
auth service required = none
auth client required = none
osd pool default size = 1
[mon.0]
log file = ${LOG_DIR}/mon.log
chdir = ""
mon cluster log file = ${LOG_DIR}/mon-cluster.log
mon data = ${MON_DATA}
mon addr = 127.0.0.1
mon allow pool delete = true
[osd.0]
log file = ${LOG_DIR}/osd.log
chdir = ""
osd data = ${OSD_DATA}
osd journal = ${OSD_DATA}.journal
osd journal size = 100
osd objectstore = memstore
osd class load list = *
osd class default list = *
EOF
export CEPH_CONF=${DIR}/ceph.conf
# start an osd
ceph-mon --id 0 --mkfs --keyring /dev/null
touch ${MON_DATA}/keyring
ceph-mon --id 0
# start an osd
OSD_ID=$(ceph osd create)
ceph osd crush add osd.${OSD_ID} 1 root=default host=localhost
ceph-osd --id ${OSD_ID} --mkjournal --mkfs
ceph-osd --id ${OSD_ID}
# start a manager
ceph-mgr --id x
# test the setup
ceph --version
ceph status
test_pool=$(uuidgen)
temp_file=$(mktemp)
rados mkpool ${test_pool}
rados --pool ${test_pool} put group /etc/group
rados --pool ${test_pool} get group ${temp_file}
diff /etc/group ${temp_file}
rados rmpool ${test_pool} ${test_pool} --yes-i-really-really-mean-it
rm ${temp_file}