ceph/qa/tasks/ceph.conf.template
Sridhar Seshasayee 33c647e811 osd/OSDMap: Show health warning if a pool is configured with size 1
Introduce a config option called 'mon_warn_on_pool_no_redundancy' that is
used to show a health warning if any pool in the ceph cluster is
configured with a size of 1. The user can mute/unmute the warning using
'ceph health mute/unmute POOL_NO_REDUNDANCY'.

Add standalone test to verify warning on setting pool size=1. Set the
associated warning to 'false' in ceph.conf.template under qa/tasks so
that existing tests do not break.

Fixes: https://tracker.ceph.com/issues/41666
Signed-off-by: Sridhar Seshasayee <sseshasa@redhat.com>
2019-11-11 10:36:35 +05:30

98 lines
2.3 KiB
Plaintext

[global]
chdir = ""
pid file = /var/run/ceph/$cluster-$name.pid
auth supported = cephx
filestore xattr use omap = true
mon clock drift allowed = 1.000
osd crush chooseleaf type = 0
auth debug = true
ms die on old message = true
ms die on bug = true
mon max pg per osd = 10000 # >= luminous
mon pg warn max object skew = 0
# disable pg_autoscaler by default for new pools
osd_pool_default_pg_autoscale_mode = off
osd pool default size = 2
mon osd allow primary affinity = true
mon osd allow pg remap = true
mon warn on legacy crush tunables = false
mon warn on crush straw calc version zero = false
mon warn on no sortbitwise = false
mon warn on osd down out interval zero = false
mon warn on too few osds = false
mon_warn_on_pool_pg_num_not_power_of_two = false
mon_warn_on_pool_no_redundancy = false
osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd"
osd default data pool replay window = 5
mon allow pool delete = true
mon cluster log file level = debug
debug asserts on shutdown = true
[osd]
osd journal size = 100
osd scrub load threshold = 5.0
osd scrub max interval = 600
osd recover clone overlap = true
osd recovery max chunk = 1048576
osd debug shutdown = true
osd debug op order = true
osd debug verify stray on activate = true
osd open classes on start = true
osd debug pg log writeout = true
osd deep scrub update digest min age = 30
osd map max advance = 10
journal zero on create = true
filestore ondisk finisher threads = 3
filestore apply finisher threads = 3
bdev debug aio = true
osd debug misdirected ops = true
[mgr]
debug ms = 1
debug mgr = 20
debug mon = 20
debug auth = 20
mon reweight min pgs per osd = 4
mon reweight min bytes per osd = 10
mgr/telemetry/nag = false
[mon]
debug ms = 1
debug mon = 20
debug paxos = 20
debug auth = 20
mon data avail warn = 5
mon mgr mkfs grace = 240
mon reweight min pgs per osd = 4
mon osd reporter subtree level = osd
mon osd prime pg temp = true
mon reweight min bytes per osd = 10
[client]
rgw cache enabled = true
rgw enable ops log = true
rgw enable usage log = true
log file = /var/log/ceph/$cluster-$name.$pid.log
admin socket = /var/run/ceph/$cluster-$name.$pid.asok