mirror of
https://github.com/ceph/ceph
synced 2024-12-30 15:33:31 +00:00
d88c834ea4
Ceph AIO installation with single/multiple node is not friendly for loopback mount, especially always get deadlock issue during graceful system reboot. We already have `rbdmap.service` with graceful system reboot friendly as below: [Unit] After=network-online.target Before=remote-fs-pre.target Wants=network-online.target remote-fs-pre.target [Service] ExecStart=/usr/bin/rbdmap map ExecReload=/usr/bin/rbdmap map ExecStop=/usr/bin/rbdmap unmap-all This PR introduce: - `ceph-mon.target`: Ensure startup after `network-online.target` and before `remote-fs-pre.target` - `ceph-*.target`: Ensure startup after `ceph-mon.target` and before `remote-fs-pre.target` - `rbdmap.service`: Once all `_netdev` get unmount by `remote-fs.target`, ensure unmap all RBD BEFORE any Ceph components under `ceph.target` get stopped during shutdown The logic is concept proof by <https://github.com/alvistack/ansible-role-ceph_common/tree/develop>; also works as expected with Ceph + Kubernetes deployment by <https://github.com/alvistack/ansible-collection-kubernetes/tree/develop>. No more deadlock happened during graceful system reboot, both AIO single/multiple no de with loopback mount. Also see: - <https://github.com/ceph/ceph/pull/36776> - <https://github.com/etcd-io/etcd/pull/12259> - <https://github.com/cri-o/cri-o/pull/4128> - <https://github.com/kubernetes/release/pull/1504> Fixes: https://tracker.ceph.com/issues/47528 Signed-off-by: Wong Hoi Sing Edison <hswong3i@gmail.com>
39 lines
1.1 KiB
SYSTEMD
39 lines
1.1 KiB
SYSTEMD
[Unit]
|
|
Description=Ceph object storage daemon osd.%i
|
|
PartOf=ceph-osd.target
|
|
After=network-online.target local-fs.target time-sync.target
|
|
Before=remote-fs-pre.target ceph-osd.target
|
|
Wants=network-online.target local-fs.target time-sync.target remote-fs-pre.target ceph-osd.target
|
|
|
|
[Service]
|
|
Environment=CLUSTER=ceph
|
|
EnvironmentFile=-@SYSTEMD_ENV_FILE@
|
|
ExecReload=/bin/kill -HUP $MAINPID
|
|
ExecStart=/usr/bin/ceph-osd -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph
|
|
ExecStartPre=@CMAKE_INSTALL_FULL_LIBEXECDIR@/ceph/ceph-osd-prestart.sh --cluster ${CLUSTER} --id %i
|
|
LimitNOFILE=1048576
|
|
LimitNPROC=1048576
|
|
LockPersonality=true
|
|
MemoryDenyWriteExecute=true
|
|
# Need NewPrivileges via `sudo smartctl`
|
|
NoNewPrivileges=false
|
|
PrivateTmp=true
|
|
ProtectClock=true
|
|
ProtectControlGroups=true
|
|
ProtectHome=true
|
|
ProtectHostname=true
|
|
ProtectKernelLogs=true
|
|
ProtectKernelModules=true
|
|
# flushing filestore requires access to /proc/sys/vm/drop_caches
|
|
ProtectKernelTunables=false
|
|
ProtectSystem=full
|
|
Restart=on-failure
|
|
RestartSec=10
|
|
RestrictSUIDSGID=true
|
|
StartLimitBurst=3
|
|
StartLimitInterval=30min
|
|
TasksMax=infinity
|
|
|
|
[Install]
|
|
WantedBy=ceph-osd.target
|