ceph/teuthology/ceph.conf.template
Loic Dachary 6acfa6d638 ceph_manager: update ec_pool creation parameters
As of https://github.com/ceph/ceph/pull/1477 the erasure code parameters
are controled via the osd erasure-code-profile set command instead of
being inlined and duplicated in the crush ruleset creation and the pool
creation. There is no need to create the crush ruleset, it is done
implicitly.

Signed-off-by: Loic Dachary <loic@dachary.org>
2014-03-18 00:48:53 +01:00

57 lines
1.2 KiB
Plaintext

[global]
chdir = ""
pid file = $name.pid
auth supported = cephx
filestore xattr use omap = true
mon clock drift allowed = .250
osd crush chooseleaf type = 0
auth debug = true
ms die on old message = true
mon pg warn min per osd = 5
osd pool default size = 2
mon osd allow primary affinity = true
osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd"
[osd]
osd journal size = 100
osd scrub load threshold = 5.0
osd scrub max interval = 600
osd recover clone overlap = true
osd recovery max chunk = 1048576
osd debug op order = true
osd debug verify stray on activate = true
osd open classes on start = true
osd debug pg log writeout = true
[mon]
debug ms = 1
debug mon = 20
debug paxos = 20
mon data avail warn = 10
[mds]
lockdep = 1
mds debug scatterstat = true
mds verify scatter = true
mds debug frag = true
[client]
rgw cache enabled = true
rgw enable ops log = true
rgw enable usage log = true
log file = /var/log/ceph/ceph-$name.$pid.log
admin socket = /var/run/ceph/ceph-$name.$pid.asok