Merge pull request #48162 from Matan-B/wip-matanb-c-rados-test

qa/suites/crimson-rados/thrash: Adding workloads / Simplifying thrashers

Reviewed-by: Samuel Just <sjust@redhat.com>
This commit is contained in:
Samuel Just 2022-10-06 16:35:10 -07:00 committed by GitHub
commit f567e63fd9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 678 additions and 0 deletions

View File

@ -20,6 +20,14 @@ overrides:
mon osdmap full prune txsize: 2
tasks:
- thrashosds:
timeout: 2400
dump_ops_enable: false
sighup_delay: 0
min_in: 3
noscrub_toggle_delay: 0
chance_down: 0
chance_thrash_pg_upmap: 0
reweight_osd: 0
thrash_primary_affinity: false
ceph_objectstore_tool: false
chance_inject_pause_short: 0

View File

@ -0,0 +1,13 @@
overrides:
ceph:
conf:
client.0:
admin socket: /var/run/ceph/ceph-$name.asok
tasks:
- radosbench:
clients: [client.0]
time: 150
- admin_socket:
client.0:
objecter_requests:
test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"

View File

@ -0,0 +1,37 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
- sudo ceph osd pool create base 4 4 erasure myprofile
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool set base min_size 2
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 5000
- rados:
clients: [client.0]
pools: [base]
ops: 10000
objects: 6600
max_seconds: 1200
size: 1024
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,35 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 60
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 2
- sudo ceph osd pool set cache min_write_recency_for_promote 2
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,44 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache readproxy
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
# TODO: CEPH_OSD_OP_CACHE_FLUSH
cache_flush: 0
# TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
cache_try_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0

View File

@ -0,0 +1,49 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 0
- sudo ceph osd pool set cache min_write_recency_for_promote 0
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
# TODO: CEPH_OSD_OP_CACHE_FLUSH
cache_flush: 0
# TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
cache_try_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
openstack:
- machine:
ram: 15000 # MB

View File

@ -0,0 +1,46 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 2
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
balance_reads: true
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
# TODO: CEPH_OSD_OP_CACHE_FLUSH
cache_flush: 0
# TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
cache_try_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0

View File

@ -0,0 +1,45 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache target_max_objects 250
- sudo ceph osd pool set cache min_read_recency_for_promote 2
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
# TODO: CEPH_OSD_OP_CACHE_FLUSH
cache_flush: 0
# TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
cache_try_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0

View File

@ -0,0 +1,41 @@
overrides:
ceph:
log-ignorelist:
- must scrub before tier agent can activate
conf:
osd:
# override short_pg_log_entries.yaml (which sets these under [global])
osd_min_pg_log_entries: 3000
osd_max_pg_log_entries: 3000
tasks:
- exec:
client.0:
- sudo ceph osd pool create base 4
- sudo ceph osd pool application enable base rados
- sudo ceph osd pool create cache 4
- sudo ceph osd tier add base cache
- sudo ceph osd tier cache-mode cache writeback
- sudo ceph osd tier set-overlay base cache
- sudo ceph osd pool set cache hit_set_type bloom
- sudo ceph osd pool set cache hit_set_count 8
- sudo ceph osd pool set cache hit_set_period 3600
- sudo ceph osd pool set cache min_read_recency_for_promote 0
- sudo ceph osd pool set cache min_write_recency_for_promote 0
- rados:
clients: [client.0]
pools: [base]
ops: 4000
objects: 500
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
# TODO: CEPH_OSD_OP_CACHE_FLUSH
cache_flush: 0
# TODO: CEPH_OSD_OP_CACHE_TRY_FLUSH
cache_try_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0

View File

@ -0,0 +1,24 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 1500
objects: 50
set_chunk: true
enable_dedup: true
dedup_chunk_size: '131072'
dedup_chunk_algo: 'fastcdc'
op_weights:
read: 100
write: 50
# TODO: CEPH_OSD_OP_SET_CHUNK
set_chunk: 0
# TODO: CEPH_OSD_OP_TIER_PROMOTE
tier_promote: 0
# TODO: CEPH_OSD_OP_TIER_FLUSH
tier_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0

View File

@ -0,0 +1,28 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 1500
objects: 50
set_chunk: true
enable_dedup: true
dedup_chunk_size: '131072'
dedup_chunk_algo: 'fastcdc'
op_weights:
read: 100
write: 50
# TODO: CEPH_OSD_OP_SET_CHUNK
set_chunk: 0
# TODO: CEPH_OSD_OP_TIER_PROMOTE
tier_promote: 0
# TODO: CEPH_OSD_OP_TIER_FLUSH
tier_flush: 0
# TODO: CEPH_OSD_OP_CACHE_EVICT
cache_evict: 0
snap_create: 10
snap_remove: 10
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0

View File

@ -0,0 +1,21 @@
override:
conf:
osd:
osd deep scrub update digest min age: 0
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
pool_snaps: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,23 @@
overrides:
ceph:
log-ignorelist:
- reached quota
- \(POOL_APP_NOT_ENABLED\)
- \(PG_AVAILABILITY\)
crush_tunables: jewel
conf:
client:
debug ms: 1
debug objecter: 20
debug rados: 20
mon:
mon warn on pool no app: false
debug mgrc: 20
osd:
osd class load list: "*"
osd class default list: "*"
tasks:
- workunit:
clients:
client.0:
- rados/test.sh

View File

@ -0,0 +1,49 @@
overrides:
ceph:
conf:
client.0:
debug ms: 1
debug objecter: 20
debug rados: 20
tasks:
- full_sequential:
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90
- radosbench:
clients: [client.0]
concurrency: 128
size: 8192
time: 90

View File

@ -0,0 +1,24 @@
overrides:
ceph:
conf:
client.0:
debug ms: 1
debug objecter: 20
debug rados: 20
tasks:
- full_sequential:
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90
- radosbench:
clients: [client.0]
time: 90

View File

@ -0,0 +1,17 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
# TODO: CEPH_OSD_OP_SET_REDIRECT
set_redirect: false
op_weights:
read: 100
write: 100
delete: 50
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,17 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
# TODO: CEPH_OSD_OP_SET_REDIRECT
set_redirect: false
op_weights:
# TODO: CEPH_OSD_OP_SET_REDIRECT
set_redirect: 0
read: 50
# TODO: CEPH_OSD_OP_TIER_PROMOTE
tier_promote: 0

View File

@ -0,0 +1,16 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
set_redirect: true
op_weights:
# TODO: CEPH_OSD_OP_SET_REDIRECT
set_redirect: 0
read: 50
# TODO: CEPH_OSD_OP_TIER_PROMOTE
tier_promote: 0

View File

@ -0,0 +1,15 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 500
set_redirect: true
op_weights:
# TODO: CEPH_OSD_OP_SET_REDIRECT
set_redirect: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,14 @@
tasks:
- exec:
client.0:
- sudo ceph osd pool create low_tier 4
- rados:
clients: [client.0]
low_tier_pool: 'low_tier'
ops: 4000
objects: 300
set_chunk: true
op_weights:
chunk_read: 0
# TODO: CEPH_OSD_OP_TIER_PROMOTE
tier_promote: 0

View File

@ -0,0 +1,24 @@
overrides:
ceph:
crush_tunables: jewel
tasks:
- rados:
clients: [client.0]
ops: 400000
max_seconds: 600
max_in_flight: 64
objects: 1024
size: 16384
balance_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
setattr: 25
rmattr: 25

View File

@ -0,0 +1,24 @@
overrides:
ceph:
crush_tunables: jewel
tasks:
- rados:
clients: [client.0]
ops: 400000
max_seconds: 600
max_in_flight: 64
objects: 1024
size: 16384
localize_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
setattr: 25
rmattr: 25

View File

@ -12,3 +12,12 @@ tasks:
op_weights:
read: 100
write: 100
delete: 50
snap_create: 0
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0
setattr: 25
rmattr: 25

View File

@ -0,0 +1,16 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
balance_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,16 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
localize_reads: true
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,15 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 50
op_weights:
read: 100
write: 100
delete: 50
snap_create: 50
snap_remove: 0
# TODO: CEPH_OSD_OP_ROLLBACK
rollback: 0
# TODO: CEPH_OSD_OP_COPY_FROM
copy_from: 0

View File

@ -0,0 +1,8 @@
tasks:
- rados:
clients: [client.0]
ops: 4000
objects: 500
write_fadvise_dontneed: true
op_weights:
write: 100