2013-03-01 19:35:47 +00:00
|
|
|
[global]
|
2013-02-25 23:28:07 +00:00
|
|
|
|
2020-12-15 06:02:31 +00:00
|
|
|
# By default, Ceph makes 3 replicas of RADOS objects. If you want to maintain four
|
2017-09-22 12:44:41 +00:00
|
|
|
# copies of an object the default value--a primary copy and three replica
|
2020-12-15 06:02:31 +00:00
|
|
|
# copies--reset the default values as shown in 'osd_pool_default_size'.
|
2020-12-16 02:23:24 +00:00
|
|
|
# If you want to allow Ceph to accept an I/O operation to a degraded PG,
|
|
|
|
# set 'osd_pool_default_min_size' to a number less than the
|
2022-04-13 03:33:36 +00:00
|
|
|
# 'osd_pool_default_size' value.
|
2013-02-25 23:28:07 +00:00
|
|
|
|
2020-12-15 06:02:31 +00:00
|
|
|
osd_pool_default_size = 3 # Write an object 3 times.
|
2022-04-13 03:33:36 +00:00
|
|
|
osd_pool_default_min_size = 2 # Accept an I/O operation to a PG that has two copies of an object.
|
2014-05-22 12:08:54 +00:00
|
|
|
|
|
|
|
# Ensure you have a realistic number of placement groups. We recommend
|
2017-09-22 12:44:41 +00:00
|
|
|
# approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
|
2013-02-25 23:28:07 +00:00
|
|
|
# divided by the number of replicas (i.e., osd pool default size). So for
|
2014-05-22 12:08:54 +00:00
|
|
|
# 10 OSDs and osd pool default size = 4, we'd recommend approximately
|
|
|
|
# (100 * 10) / 4 = 250.
|
2019-11-11 21:21:11 +00:00
|
|
|
# always use the nearest power of 2
|
2013-02-25 23:28:07 +00:00
|
|
|
|
2020-12-15 06:02:31 +00:00
|
|
|
osd_pool_default_pg_num = 256
|
|
|
|
osd_pool_default_pgp_num = 256
|