ceph/qa/tasks/ceph.conf.template
Deepika Upadhyay 21508bd9dd mon/OSDMonitor: add flag --yes-i-really-mean-it for setting pool size 1
Adds option `mon_allow_pool_size_one` which will be disabled by default
to ensure pools are not configured without replicas.
If the user still wants to use pool size 1, they will have to change the
value of `mon_allow_pool_size_one` to true and then have to pass flag
`--yes-i-really-mean-it` to cli command:

Example:
`ceph osd pool test set size 1 --yes-i-really-mean-it`

Fixes: https://tracker.ceph.com/issues/44025
Signed-off-by: Deepika Upadhyay <dupadhya@redhat.com>
2020-03-09 23:27:36 +05:30

99 lines
2.4 KiB
Plaintext

[global]
chdir = ""
pid file = /var/run/ceph/$cluster-$name.pid
auth supported = cephx
filestore xattr use omap = true
mon clock drift allowed = 1.000
osd crush chooseleaf type = 0
auth debug = true
ms die on old message = true
ms die on bug = true
mon max pg per osd = 10000 # >= luminous
mon pg warn max object skew = 0
# disable pg_autoscaler by default for new pools
osd_pool_default_pg_autoscale_mode = off
osd pool default size = 2
mon osd allow primary affinity = true
mon osd allow pg remap = true
mon warn on legacy crush tunables = false
mon warn on crush straw calc version zero = false
mon warn on no sortbitwise = false
mon warn on osd down out interval zero = false
mon warn on too few osds = false
mon_warn_on_pool_pg_num_not_power_of_two = false
mon_warn_on_pool_no_redundancy = false
mon_allow_pool_size_one = true
osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd"
osd default data pool replay window = 5
mon allow pool delete = true
mon cluster log file level = debug
debug asserts on shutdown = true
[osd]
osd journal size = 100
osd scrub load threshold = 5.0
osd scrub max interval = 600
osd recover clone overlap = true
osd recovery max chunk = 1048576
osd debug shutdown = true
osd debug op order = true
osd debug verify stray on activate = true
osd open classes on start = true
osd debug pg log writeout = true
osd deep scrub update digest min age = 30
osd map max advance = 10
journal zero on create = true
filestore ondisk finisher threads = 3
filestore apply finisher threads = 3
bdev debug aio = true
osd debug misdirected ops = true
[mgr]
debug ms = 1
debug mgr = 20
debug mon = 20
debug auth = 20
mon reweight min pgs per osd = 4
mon reweight min bytes per osd = 10
mgr/telemetry/nag = false
[mon]
debug ms = 1
debug mon = 20
debug paxos = 20
debug auth = 20
mon data avail warn = 5
mon mgr mkfs grace = 240
mon reweight min pgs per osd = 4
mon osd reporter subtree level = osd
mon osd prime pg temp = true
mon reweight min bytes per osd = 10
[client]
rgw cache enabled = true
rgw enable ops log = true
rgw enable usage log = true
log file = /var/log/ceph/$cluster-$name.$pid.log
admin socket = /var/run/ceph/$cluster-$name.$pid.asok