ceph/qa/suites/upgrade/hammer-jewel-x/stress-split/1-hammer-install-and-upgrade-to-jewel/hammer-to-jewel.yaml
Sage Weil e7006d06fb qa/tasks/ceph: explicitly add osds to crush map for upgrades
Before kraken, ceph-osd didn't add itself to crush... ceph-osd-prestart.sh
did it.  And ceph.py doesn't use that.

Signed-off-by: Sage Weil <sage@redhat.com>
2017-06-27 12:52:35 -04:00

84 lines
1.8 KiB
YAML

tasks:
- install:
branch: hammer
exclude_packages:
- ceph-mgr
- libcephfs2
- libcephfs-devel
- libcephfs-dev
- print: '**** done hammer'
- ceph:
fs: xfs
skip_mgr_daemons: true
add_osds_to_crush: true
- install.upgrade:
exclude_packages:
- ceph-mgr
- libcephfs2
- libcephfs-devel
- libcephfs-dev
osd.0:
branch: jewel
osd.3:
branch: jewel
- print: '*** client.0 upgraded packages to jewel'
- parallel:
- workload-h-j
- upgrade-sequence-h-j
- print: '**** done parallel'
- install.upgrade:
client.0:
branch: jewel
exclude_packages:
- ceph-mgr
- libcephfs2
- libcephfs-devel
- libcephfs-dev
- exec:
osd.0:
- ceph osd set sortbitwise
- ceph osd set require_jewel_osds
- for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ;
done
- print: '**** done install.upgrade client.0 to jewel'
upgrade-sequence-h-j:
sequential:
- ceph.restart:
daemons:
- osd.0
- osd.1
- osd.2
- osd.3
- osd.4
- osd.5
wait-for-healthy: false
wait-for-osds-up: true
- ceph.restart:
daemons:
- mon.a
- mon.b
- mon.c
wait-for-healthy: false
wait-for-osds-up: true
- print: '**** done ceph.restart do not wait for healthy'
- exec:
mon.a:
- sleep 300
- ceph osd set require_jewel_osds
- ceph.healthy: null
- print: '**** done ceph.healthy'
workload-h-j:
full_sequential:
- workunit:
branch: hammer
clients:
client.0:
- cls
- print: "**** done cls 2-workload"
- workunit:
branch: hammer
clients:
client.0:
- rbd/test_librbd.sh
- print: "**** done rbd/test_librbd.sh 2-workload"