mirror of
https://github.com/ceph/ceph
synced 2025-04-07 18:17:22 +00:00
ceph-volume: address review comments, mostly tidying, clarification
Signed-off-by: Jan Fajerski <jfajerski@suse.com>
This commit is contained in:
parent
fcacd0b96a
commit
d0735ce1c9
@ -20,12 +20,12 @@ are supported.
|
|||||||
|
|
||||||
Automatic sorting of disks
|
Automatic sorting of disks
|
||||||
--------------------------
|
--------------------------
|
||||||
If ``batch`` receives only a single list of devices and the ``--no-auto`` option
|
If ``batch`` receives only a single list of data devices and the ``--no-auto`` option
|
||||||
is not passed, ``ceph-volume`` will auto-sort disks by its rotational
|
is *not* passed (currently the default), ``ceph-volume`` will auto-sort disks by its rotational
|
||||||
property and use non-rotating disks for ``block.db`` or ``journal`` depending
|
property and use non-rotating disks for ``block.db`` or ``journal`` depending
|
||||||
on the objectstore used.
|
on the objectstore used.
|
||||||
This behavior is now DEPRECATED and will be removed in future releases. Instead
|
This default behavior is now DEPRECATED and will be removed in future releases. Instead
|
||||||
an ``auto`` option will be introduced to retain this behavior.
|
an ``auto`` option is introduced to retain this behavior.
|
||||||
It is recommended to make use of the explicit device lists for ``block.db``,
|
It is recommended to make use of the explicit device lists for ``block.db``,
|
||||||
``block.wal`` and ``journal``.
|
``block.wal`` and ``journal``.
|
||||||
For example assuming :term:`bluestore` is used and ``--no-auto`` is not passed,
|
For example assuming :term:`bluestore` is used and ``--no-auto`` is not passed,
|
||||||
|
@ -30,13 +30,21 @@ def device_formatter(devices):
|
|||||||
|
|
||||||
def ensure_disjoint_device_lists(data, db=[], wal=[], journal=[]):
|
def ensure_disjoint_device_lists(data, db=[], wal=[], journal=[]):
|
||||||
# check that all device lists are disjoint with each other
|
# check that all device lists are disjoint with each other
|
||||||
if not(set(data).isdisjoint(set(db)) and
|
if not all([set(data).isdisjoint(set(db)),
|
||||||
set(data).isdisjoint(set(wal)) and
|
set(data).isdisjoint(set(wal)),
|
||||||
set(data).isdisjoint(set(journal)) and
|
set(data).isdisjoint(set(journal)),
|
||||||
set(db).isdisjoint(set(wal))):
|
set(db).isdisjoint(set(wal))]):
|
||||||
raise Exception('Device lists are not disjoint')
|
raise Exception('Device lists are not disjoint')
|
||||||
|
|
||||||
|
|
||||||
|
def separate_devices_from_lvs(devices):
|
||||||
|
phys = []
|
||||||
|
lvm = []
|
||||||
|
for d in devices:
|
||||||
|
phys.append(d) if d.is_device else lvm.append(d)
|
||||||
|
return phys, lvm
|
||||||
|
|
||||||
|
|
||||||
def get_physical_osds(devices, args):
|
def get_physical_osds(devices, args):
|
||||||
'''
|
'''
|
||||||
Goes through passed physical devices and assigns OSDs
|
Goes through passed physical devices and assigns OSDs
|
||||||
@ -52,9 +60,6 @@ def get_physical_osds(devices, args):
|
|||||||
dev_size = dev.vg_size[0]
|
dev_size = dev.vg_size[0]
|
||||||
abs_size = disk.Size(b=int(dev_size * rel_data_size))
|
abs_size = disk.Size(b=int(dev_size * rel_data_size))
|
||||||
free_size = dev.vg_free[0]
|
free_size = dev.vg_free[0]
|
||||||
if abs_size < 419430400:
|
|
||||||
mlogger.error('Data LV on {} would be too small (<400M)'.format(dev.path))
|
|
||||||
continue
|
|
||||||
for _ in range(args.osds_per_device):
|
for _ in range(args.osds_per_device):
|
||||||
if abs_size > free_size:
|
if abs_size > free_size:
|
||||||
break
|
break
|
||||||
@ -113,10 +118,6 @@ def get_physical_fast_allocs(devices, type_, fast_slots_per_device, new_osds, ar
|
|||||||
dev_size = dev.vg_size[0]
|
dev_size = dev.vg_size[0]
|
||||||
abs_size = disk.Size(b=int(dev_size / requested_slots))
|
abs_size = disk.Size(b=int(dev_size / requested_slots))
|
||||||
free_size = dev.vg_free[0]
|
free_size = dev.vg_free[0]
|
||||||
# if abs_size < 419430400:
|
|
||||||
# mlogger.error('{} LV on {} would be too small (<400M)'.format(
|
|
||||||
# type_, dev.path))
|
|
||||||
# continue
|
|
||||||
relative_size = int(abs_size) / dev_size
|
relative_size = int(abs_size) / dev_size
|
||||||
if requested_size:
|
if requested_size:
|
||||||
if requested_size <= abs_size:
|
if requested_size <= abs_size:
|
||||||
@ -196,8 +197,16 @@ class Batch(object):
|
|||||||
help='Devices to provision OSDs journal volumes',
|
help='Devices to provision OSDs journal volumes',
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--no-auto',
|
'--auto',
|
||||||
action='store_true',
|
action='store_true',
|
||||||
|
help=('deploy multi-device OSDs if rotational and non-rotational drives '
|
||||||
|
'are passed in DEVICES'),
|
||||||
|
default=True
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
'--no-auto',
|
||||||
|
action='store_false',
|
||||||
|
dest='auto',
|
||||||
help=('deploy standalone OSDs if rotational and non-rotational drives '
|
help=('deploy standalone OSDs if rotational and non-rotational drives '
|
||||||
'are passed in DEVICES'),
|
'are passed in DEVICES'),
|
||||||
)
|
)
|
||||||
@ -331,12 +340,6 @@ class Batch(object):
|
|||||||
if self.args.data_slots and self.args.osds_per_device:
|
if self.args.data_slots and self.args.osds_per_device:
|
||||||
if self.args.data_slots < self.args.osds_per_device:
|
if self.args.data_slots < self.args.osds_per_device:
|
||||||
raise ValueError('data_slots is smaller then osds_per_device')
|
raise ValueError('data_slots is smaller then osds_per_device')
|
||||||
# TODO this needs more thought.
|
|
||||||
# for slot in ['block_db_slots', 'block_wal_slots', 'journal_slots']:
|
|
||||||
# slot_value = getattr(self.args, slot, None)
|
|
||||||
# if slot_value:
|
|
||||||
# if slot_value < len(self.args.devices):
|
|
||||||
# raise ValueError('{} is smaller then osds_per_device')
|
|
||||||
|
|
||||||
def _sort_rotational_disks(self):
|
def _sort_rotational_disks(self):
|
||||||
'''
|
'''
|
||||||
@ -346,14 +349,11 @@ class Batch(object):
|
|||||||
'''
|
'''
|
||||||
mlogger.warning('DEPRECATION NOTICE')
|
mlogger.warning('DEPRECATION NOTICE')
|
||||||
mlogger.warning('You are using the legacy automatic disk sorting behavior')
|
mlogger.warning('You are using the legacy automatic disk sorting behavior')
|
||||||
mlogger.warning('The Pacific release will change the default to --notauto')
|
mlogger.warning('The Pacific release will change the default to --no-auto')
|
||||||
rotating = []
|
rotating = []
|
||||||
ssd = []
|
ssd = []
|
||||||
for d in self.args.devices:
|
for d in self.args.devices:
|
||||||
if d.rotational:
|
rotating.append(d) if d.rotational else ssd.append(d)
|
||||||
rotating.append(d)
|
|
||||||
else:
|
|
||||||
ssd.append(d)
|
|
||||||
self.args.devices = rotating
|
self.args.devices = rotating
|
||||||
if self.args.filestore:
|
if self.args.filestore:
|
||||||
self.args.journal_devices = ssd
|
self.args.journal_devices = ssd
|
||||||
@ -370,7 +370,7 @@ class Batch(object):
|
|||||||
if not self.args.bluestore and not self.args.filestore:
|
if not self.args.bluestore and not self.args.filestore:
|
||||||
self.args.bluestore = True
|
self.args.bluestore = True
|
||||||
|
|
||||||
if (not self.args.no_auto and not self.args.db_devices and not
|
if (self.args.auto and not self.args.db_devices and not
|
||||||
self.args.wal_devices and not self.args.journal_devices):
|
self.args.wal_devices and not self.args.journal_devices):
|
||||||
self._sort_rotational_disks()
|
self._sort_rotational_disks()
|
||||||
|
|
||||||
@ -428,14 +428,12 @@ class Batch(object):
|
|||||||
very_fast_devices=[]):
|
very_fast_devices=[]):
|
||||||
'''
|
'''
|
||||||
The methods here are mostly just organization, error reporting and
|
The methods here are mostly just organization, error reporting and
|
||||||
setting up of (default) args. The hravy lifting code for the deployment
|
setting up of (default) args. The heavy lifting code for the deployment
|
||||||
layout can be found in the static get_*_osds and get_*:fast_allocs
|
layout can be found in the static get_*_osds and get_*_fast_allocs
|
||||||
functions.
|
functions.
|
||||||
'''
|
'''
|
||||||
plan = []
|
plan = []
|
||||||
phys_devs = [d for d in devices if d.is_device]
|
phys_devs, lvm_devs = separate_devices_from_lvs(devices)
|
||||||
lvm_devs = [d.lvs[0] for d in list(set(devices) -
|
|
||||||
set(phys_devs))]
|
|
||||||
mlogger.debug(('passed data devices: {} physical,'
|
mlogger.debug(('passed data devices: {} physical,'
|
||||||
' {} LVM').format(len(phys_devs), len(lvm_devs)))
|
' {} LVM').format(len(phys_devs), len(lvm_devs)))
|
||||||
|
|
||||||
@ -487,9 +485,7 @@ class Batch(object):
|
|||||||
ret = []
|
ret = []
|
||||||
if not devices:
|
if not devices:
|
||||||
return ret
|
return ret
|
||||||
phys_devs = [d for d in devices if d.is_device]
|
phys_devs, lvm_devs = separate_devices_from_lvs(devices)
|
||||||
lvm_devs = [d.lvs[0] for d in list(set(devices) -
|
|
||||||
set(phys_devs))]
|
|
||||||
mlogger.debug(('passed {} devices: {} physical,'
|
mlogger.debug(('passed {} devices: {} physical,'
|
||||||
' {} LVM').format(type_, len(phys_devs), len(lvm_devs)))
|
' {} LVM').format(type_, len(phys_devs), len(lvm_devs)))
|
||||||
|
|
||||||
|
@ -297,8 +297,6 @@ class Prepare(object):
|
|||||||
'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
|
'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
|
||||||
}
|
}
|
||||||
if self.args.filestore:
|
if self.args.filestore:
|
||||||
#TODO: allow auto creation of journal on passed device, only works
|
|
||||||
# when physical device is passed, not LV
|
|
||||||
if not self.args.journal:
|
if not self.args.journal:
|
||||||
logger.info(('no journal was specifed, creating journal lv '
|
logger.info(('no journal was specifed, creating journal lv '
|
||||||
'on {}').format(self.args.data))
|
'on {}').format(self.args.data))
|
||||||
|
Loading…
Reference in New Issue
Block a user