ceph/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
Patrick Donnelly d6c66f3fa6
qa,pybind/mgr: allow disabling .mgr pool
This is mostly for testing: a lot of tests assume that there are no
existing pools. These tests relied on a config to turn off creating the
"device_health_metrics" pool which generally exists for any new Ceph
cluster. It would be better to make these tests tolerant of the new .mgr
pool but clearly there's a lot of these. So just convert the config to
make it work.

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
2021-06-11 19:35:17 -07:00

55 lines
1.6 KiB
YAML

openstack:
- volumes: # attached to each instance
count: 3
size: 10 # GB
roles:
- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
tasks:
- install:
- ceph:
pre-mgr-commands:
- sudo ceph config set mgr mgr_pool false --force
log-ignorelist:
- overall HEALTH_
- \(CACHE_POOL_NO_HIT_SET\)
conf:
global:
osd max object name len: 460
osd max object namespace len: 64
debug client: 20
debug mds: 20
debug ms: 1
- exec:
client.0:
- ceph osd pool create data_cache 4
- ceph osd tier add cephfs_data data_cache
- ceph osd tier cache-mode data_cache writeback
- ceph osd tier set-overlay cephfs_data data_cache
- ceph osd pool set data_cache hit_set_type bloom
- ceph osd pool set data_cache hit_set_count 8
- ceph osd pool set data_cache hit_set_period 3600
- ceph osd pool set data_cache min_read_recency_for_promote 0
- ceph-fuse:
- exec:
client.0:
- sudo chmod 777 $TESTDIR/mnt.0/
- dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
- ls -al $TESTDIR/mnt.0/foo
- truncate --size 0 $TESTDIR/mnt.0/foo
- ls -al $TESTDIR/mnt.0/foo
- dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
- ls -al $TESTDIR/mnt.0/foo
- cp $TESTDIR/mnt.0/foo /tmp/foo
- sync
- rados -p data_cache ls -
- sleep 10
- rados -p data_cache ls -
- rados -p data_cache cache-flush-evict-all
- rados -p data_cache ls -
- sleep 1
- exec:
client.1:
- hexdump -C /tmp/foo | head
- hexdump -C $TESTDIR/mnt.1/foo | head
- cmp $TESTDIR/mnt.1/foo /tmp/foo