mirror of
https://github.com/ceph/ceph
synced 2025-03-08 01:10:10 +00:00
I've made the following changes to simplify initial cluster configuration: * Removed deprecated option(s). * Removed overrides for default paths that are "not recommended to change". * Added comments with keys' descriptions, types and defaults. * Added references to relevant on-line docs; sorted keys according to their respective docs sections (pages). * Use different types of comments for annotations ('#') and for keys/values (';'). * Added many useful keys. * Commented some lines to make sample config "neutral". The idea is to let users to un-comment what they need rather than remove sample sections that they surely won't be using as is. Signed-off-by: Dmitry Smirnov <onlyjob@member.fsf.org>
365 lines
13 KiB
Plaintext
365 lines
13 KiB
Plaintext
##
|
||
# Sample ceph ceph.conf file.
|
||
##
|
||
# This file defines cluster membership, the various locations
|
||
# that Ceph stores data, and any other runtime options.
|
||
|
||
# If a 'host' is defined for a daemon, the init.d start/stop script will
|
||
# verify that it matches the hostname (or else ignore it). If it is
|
||
# not defined, it is assumed that the daemon is intended to start on
|
||
# the current host (e.g., in a setup with a startup.conf on each
|
||
# node).
|
||
|
||
## Metavariables
|
||
# $cluster ; Expands to the Ceph Storage Cluster name. Useful
|
||
# ; when running multiple Ceph Storage Clusters
|
||
# ; on the same hardware.
|
||
# ; Example: /etc/ceph/$cluster.keyring
|
||
# ; (Default: ceph)
|
||
#
|
||
# $type ; Expands to one of mds, osd, or mon, depending on
|
||
# ; the type of the instant daemon.
|
||
# ; Example: /var/lib/ceph/$type
|
||
#
|
||
# $id ; Expands to the daemon identifier. For osd.0, this
|
||
# ; would be 0; for mds.a, it would be a.
|
||
# ; Example: /var/lib/ceph/$type/$cluster-$id
|
||
#
|
||
# $host ; Expands to the host name of the instant daemon.
|
||
#
|
||
# $name ; Expands to $type.$id.
|
||
# ; Example: /var/run/ceph/$cluster-$name.asok
|
||
|
||
[global]
|
||
### http://ceph.com/docs/master/rados/configuration/general-config-ref/
|
||
|
||
;fsid = {UUID} # use `uuidgen` to generate your own UUID
|
||
;public network = 192.168.0.0/24
|
||
;cluster network = 192.168.0.0/24
|
||
|
||
# Each running Ceph daemon has a running process identifier (PID) file.
|
||
# The PID file is generated upon start-up.
|
||
# Type: String (optional)
|
||
# (Default: N/A). The default path is /var/run/$cluster/$name.pid.
|
||
pid file = /var/run/ceph/$name.pid
|
||
|
||
# If set, when the Ceph Storage Cluster starts, Ceph sets the max open fds
|
||
# at the OS level (i.e., the max # of file descriptors).
|
||
# It helps prevents Ceph OSD Daemons from running out of file descriptors.
|
||
# Type: 64-bit Integer (optional)
|
||
# (Default: 0)
|
||
;max open files = 131072
|
||
|
||
|
||
### http://ceph.com/docs/master/rados/operations/authentication
|
||
### http://ceph.com/docs/master/rados/configuration/auth-config-ref/
|
||
|
||
# If enabled, the Ceph Storage Cluster daemons (i.e., ceph-mon, ceph-osd,
|
||
# and ceph-mds) must authenticate with each other.
|
||
# Type: String (optional); Valid settings are "cephx" or "none".
|
||
# (Default: cephx)
|
||
auth cluster required = cephx
|
||
|
||
# If enabled, the Ceph Storage Cluster daemons require Ceph Clients to
|
||
# authenticate with the Ceph Storage Cluster in order to access Ceph
|
||
# services.
|
||
# Type: String (optional); Valid settings are "cephx" or "none".
|
||
# (Default: cephx)
|
||
auth service required = cephx
|
||
|
||
# If enabled, the Ceph Client requires the Ceph Storage Cluster to
|
||
# authenticate with the Ceph Client.
|
||
# Type: String (optional); Valid settings are "cephx" or "none".
|
||
# (Default: cephx)
|
||
auth client required = cephx
|
||
|
||
# If set to true, Ceph requires signatures on all message traffic between
|
||
# the Ceph Client and the Ceph Storage Cluster, and between daemons
|
||
# comprising the Ceph Storage Cluster.
|
||
# Type: Boolean (optional)
|
||
# (Default: false)
|
||
cephx require signatures = true ; everywhere possible
|
||
|
||
# The path to the keyring file.
|
||
# Type: String (optional)
|
||
# Default: /etc/ceph/$cluster.$name.keyring,/etc/ceph/$cluster.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin
|
||
;keyring = /etc/ceph/$cluster.$name.keyring
|
||
|
||
|
||
### http://ceph.com/docs/master/rados/configuration/pool-pg-config-ref/
|
||
|
||
|
||
## Replication level, number of data copies.
|
||
# Type: 32-bit Integer
|
||
# (Default: 2)
|
||
;osd pool default size = 2
|
||
|
||
## Replication level in degraded state, less than 'osd pool default size' value.
|
||
# Sets the minimum number of written replicas for objects in the
|
||
# pool in order to acknowledge a write operation to the client. If
|
||
# minimum is not met, Ceph will not acknowledge the write to the
|
||
# client. This setting ensures a minimum number of replicas when
|
||
# operating in degraded mode.
|
||
# Type: 32-bit Integer
|
||
# (Default: 0), which means no particular minimum. If 0, minimum is size - (size / 2).
|
||
;osd pool default min size = 1
|
||
|
||
## Ensure you have a realistic number of placement groups. We recommend
|
||
## approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
|
||
## divided by the number of replicas (i.e., osd pool default size). So for
|
||
## 10 OSDs and osd pool default size = 3, we'd recommend approximately
|
||
## (100 * 10) / 3 = 333
|
||
|
||
# Description: The default number of placement groups for a pool. The
|
||
# default value is the same as pg_num with mkpool.
|
||
# Type: 32-bit Integer
|
||
# (Default: 8)
|
||
;osd pool default pg num = 100
|
||
|
||
# Description: The default number of placement groups for placement for a
|
||
# pool. The default value is the same as pgp_num with mkpool.
|
||
# PG and PGP should be equal (for now).
|
||
# Type: 32-bit Integer
|
||
# (Default: 8)
|
||
;osd pool default pgp num = 100
|
||
|
||
# The default CRUSH ruleset to use when creating a pool
|
||
# Type: 32-bit Integer
|
||
# (Default: 0)
|
||
;osd pool default crush rule = 0
|
||
|
||
# The bucket type to use for chooseleaf in a CRUSH rule.
|
||
# Uses ordinal rank rather than name.
|
||
# Type: 32-bit Integer
|
||
# (Default: 1) Typically a host containing one or more Ceph OSD Daemons.
|
||
;osd crush chooseleaf type = 1
|
||
|
||
|
||
### http://ceph.com/docs/bobtail/rados/configuration/log-and-debug-ref/
|
||
|
||
# Default: /var/log/ceph/$cluster-$name.log
|
||
;log file = /var/log/ceph/$cluster-$name.log
|
||
|
||
;log to syslog = true
|
||
|
||
|
||
### http://ceph.com/docs/master/rados/configuration/ms-ref/
|
||
|
||
# Enable if you want your daemons to bind to IPv6 address instead of
|
||
# IPv4 ones. (Not required if you specify a daemon or cluster IP.)
|
||
# Type: Boolean
|
||
# (Default: false)
|
||
;ms bind ipv6 = true
|
||
|
||
|
||
### http://ceph.com/docs/master/rados/configuration/filestore-config-ref/
|
||
|
||
# The maximum interval in seconds for synchronizing the filestore.
|
||
# Type: Double (optional)
|
||
# (Default: 5)
|
||
;filestore max sync interval = 5
|
||
|
||
# Use object map for XATTRS. Set to true for ext4 file systems only.
|
||
# Type: Boolean (optional)
|
||
# (Default: false)
|
||
;filestore xattr use omap = true
|
||
|
||
### http://ceph.com/docs/master/rados/configuration/journal-ref/
|
||
|
||
##################
|
||
## Monitors
|
||
## You need at least one. You need at least three if you want to
|
||
## tolerate any node failures. Always create an odd number.
|
||
[mon]
|
||
### http://ceph.com/docs/argonaut/config-ref/mon-config/
|
||
### http://ceph.com/docs/master/rados/configuration/mon-config-ref/
|
||
### http://ceph.com/docs/dumpling/rados/configuration/mon-osd-interaction/
|
||
|
||
# The IDs of initial monitors in a cluster during startup.
|
||
# If specified, Ceph requires an odd number of monitors to form an
|
||
# initial quorum (e.g., 3).
|
||
# Type: String
|
||
# (Default: None)
|
||
;mon initial members = mycephhost
|
||
|
||
;mon host = cephhost01,cephhost02
|
||
;mon addr = 192.168.0.101,192.168.0.102
|
||
|
||
# The monitor’s data location
|
||
# Default: /var/lib/ceph/mon/$cluster-$id
|
||
;mon data = /var/lib/ceph/mon/$name
|
||
|
||
# The clock drift in seconds allowed between monitors.
|
||
# Type: Float
|
||
# (Default: .050)
|
||
;mon clock drift allowed = .15
|
||
|
||
# Exponential backoff for clock drift warnings
|
||
# Type: Float
|
||
# (Default: 5)
|
||
;mon clock drift warn backoff = 30 ; Tell the monitor to backoff from this warning for 30 seconds
|
||
|
||
# The percentage of disk space used before an OSD is considered full.
|
||
# Type: Float
|
||
# (Default: .95)
|
||
;mon osd full ratio = .95
|
||
|
||
# The percentage of disk space used before an OSD is considered nearfull.
|
||
# Type: Float
|
||
# (Default: .85)
|
||
;mon osd nearfull ratio = .85
|
||
|
||
|
||
### http://ceph.com/docs/next/rados/troubleshooting/log-and-debug/
|
||
|
||
# logging, for debugging monitor crashes, in order of
|
||
# their likelihood of being helpful :)
|
||
;debug ms = 1
|
||
;debug mon = 20
|
||
;debug paxos = 20
|
||
;debug auth = 20
|
||
|
||
|
||
;[mon.alpha]
|
||
; host = alpha
|
||
; mon addr = 192.168.0.10:6789
|
||
|
||
;[mon.beta]
|
||
; host = beta
|
||
; mon addr = 192.168.0.11:6789
|
||
|
||
;[mon.gamma]
|
||
; host = gamma
|
||
; mon addr = 192.168.0.12:6789
|
||
|
||
|
||
##################
|
||
## Metadata servers
|
||
# You must deploy at least one metadata server to use CephFS. There is
|
||
# experimental support for running multiple metadata servers. Do not run
|
||
# multiple metadata servers in production.
|
||
[mds]
|
||
### http://ceph.com/docs/argonaut/config-ref/mds-config/
|
||
### http://ceph.com/docs/master/cephfs/mds-config-ref/
|
||
|
||
# where the mds keeps it's secret encryption keys
|
||
;keyring = /var/lib/ceph/mds/$name/keyring
|
||
|
||
; mds logging to debug issues.
|
||
;debug ms = 1
|
||
;debug mds = 20
|
||
|
||
|
||
;[mds.alpha]
|
||
; host = alpha
|
||
|
||
;[mds.beta]
|
||
; host = beta
|
||
|
||
##################
|
||
## osd
|
||
# You need at least one. Two or more if you want data to be replicated.
|
||
# Define as many as you like.
|
||
[osd]
|
||
### http://ceph.com/docs/argonaut/config-ref/osd-config/
|
||
### http://ceph.com/docs/bobtail/rados/configuration/osd-config-ref/
|
||
|
||
# The path to the OSDs data.
|
||
# You must create the directory when deploying Ceph.
|
||
# You should mount a drive for OSD data at this mount point.
|
||
# We do not recommend changing the default.
|
||
# Type: String
|
||
# Default: /var/lib/ceph/osd/$cluster-$id
|
||
;osd data = /var/lib/ceph/osd/$name
|
||
|
||
## You can change the number of recovery operations to speed up recovery
|
||
## or slow it down if your machines can't handle it
|
||
|
||
# The number of active recovery requests per OSD at one time.
|
||
# More requests will accelerate recovery, but the requests
|
||
# places an increased load on the cluster.
|
||
# Type: 32-bit Integer
|
||
# (Default: 5)
|
||
;osd recovery max active = 3
|
||
|
||
|
||
# You may add settings for mkcephfs so that it will create and mount
|
||
# the file system for you. Remove the comment `#` character for
|
||
# the following settings and replace the values in parenthesis
|
||
# with appropriate values, or leave the following settings commented
|
||
# out to accept the default values. You must specify the --mkfs
|
||
# option with mkcephfs in order for the deployment script to
|
||
# utilize the following settings, and you must define the 'devs'
|
||
# option for each osd instance; see below.
|
||
|
||
#osd mkfs type = {fs-type}
|
||
#osd mkfs options {fs-type} = {mkfs options} # default for xfs is "-f"
|
||
#osd mount options {fs-type} = {mount options} # default mount option is "rw, noatime"
|
||
;osd mkfs type = btrfs
|
||
;osd mount options btrfs = noatime,nodiratime
|
||
|
||
## Ideally, make this a separate disk or partition. A few
|
||
## hundred MB should be enough; more if you have fast or many
|
||
## disks. You can use a file under the osd data dir if need be
|
||
## (e.g. /data/$name/journal), but it will be slower than a
|
||
## separate disk or partition.
|
||
# The path to the OSD’s journal. This may be a path to a file or a block
|
||
# device (such as a partition of an SSD). If it is a file, you must
|
||
# create the directory to contain it.
|
||
# We recommend using a drive separate from the osd data drive.
|
||
# Type: String
|
||
# Default: /var/lib/ceph/osd/$cluster-$id/journal
|
||
;osd journal = /var/lib/ceph/osd/$name/journal
|
||
|
||
### http://ceph.com/docs/master/rados/configuration/journal-ref/
|
||
|
||
# The size of the journal in megabytes. If this is 0,
|
||
# and the journal is a block device, the entire block device is used.
|
||
# Since v0.54, this is ignored if the journal is a block device,
|
||
# and the entire block device is used.
|
||
# Type: 32-bit Integer
|
||
# (Default: 5120)
|
||
# Recommended: Begin with 1GB. Should be at least twice the product
|
||
# of the expected speed multiplied by "filestore max sync interval".
|
||
;osd journal size = 1000 ; journal size, in megabytes
|
||
|
||
## If you want to run the journal on a tmpfs, disable DirectIO
|
||
# Enables direct i/o to the journal.
|
||
# Requires journal block align set to true.
|
||
# Type: Boolean
|
||
# Required: Yes when using aio.
|
||
# (Default: true)
|
||
;journal dio = false
|
||
|
||
# Check log files for corruption. Can be computationally expensive.
|
||
# Type: Boolean
|
||
# (Default: false)
|
||
;osd check for log corruption = true
|
||
|
||
# osd logging to debug osd issues, in order of likelihood of being helpful
|
||
;debug ms = 1
|
||
;debug osd = 20
|
||
;debug filestore = 20
|
||
;debug journal = 20
|
||
|
||
|
||
|
||
;[osd.0]
|
||
; host = delta
|
||
|
||
# If 'devs' is not specified, you're responsible for
|
||
# setting up the 'osd data' dir (e.g. `mkdir /var/lib/ceph/osd/ceph-0`)
|
||
;devs = /dev/sdx
|
||
|
||
;[osd.1]
|
||
; host = epsilon
|
||
; devs = /dev/sdy
|
||
|
||
;[osd.2]
|
||
; host = zeta
|
||
; devs = /dev/sdx
|
||
|
||
;[osd.3]
|
||
; host = eta
|
||
; devs = /dev/sdy
|