mirror of
https://github.com/ceph/ceph
synced 2024-12-28 14:34:13 +00:00
Merge pull request #24060 from alfredodeza/wip-rm35913
ceph-volume batch: allow --osds-per-device, default it to 1 Reviewed-by: Andrew Schoen <aschoen@redhat.com>
This commit is contained in:
commit
4a46abd901
@ -206,6 +206,12 @@ class Batch(object):
|
||||
action='store_true',
|
||||
help='Skip creating and enabling systemd units and starting OSD services',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--osds-per-device',
|
||||
type=int,
|
||||
default=1,
|
||||
help='Provision more than 1 (the default) OSD per device',
|
||||
)
|
||||
args = parser.parse_args(self.argv)
|
||||
|
||||
if not args.devices:
|
||||
|
@ -1,6 +1,5 @@
|
||||
from __future__ import print_function
|
||||
import json
|
||||
from uuid import uuid4
|
||||
from ceph_volume.util import disk, prepare
|
||||
from ceph_volume.api import lvm
|
||||
from . import validators
|
||||
@ -16,6 +15,7 @@ class SingleType(object):
|
||||
|
||||
def __init__(self, devices, args):
|
||||
self.args = args
|
||||
self.osds_per_device = args.osds_per_device
|
||||
self.devices = devices
|
||||
# TODO: add --fast-devices and --slow-devices so these can be customized
|
||||
self.hdds = [device for device in devices if device.sys_api['rotational'] == '1']
|
||||
@ -24,13 +24,20 @@ class SingleType(object):
|
||||
self.validate()
|
||||
self.compute()
|
||||
|
||||
@property
|
||||
def total_osds(self):
|
||||
if self.hdds:
|
||||
return len(self.hdds) * self.osds_per_device
|
||||
else:
|
||||
return len(self.ssds) * self.osds_per_device
|
||||
|
||||
def report_json(self):
|
||||
print(json.dumps(self.computed, indent=4, sort_keys=True))
|
||||
|
||||
def report_pretty(self):
|
||||
string = ""
|
||||
string += templates.total_osds.format(
|
||||
total_osds=len(self.hdds) or len(self.ssds) * 2
|
||||
total_osds=self.total_osds,
|
||||
)
|
||||
string += templates.osd_component_titles
|
||||
|
||||
@ -51,7 +58,9 @@ class SingleType(object):
|
||||
met, raise an error if the provided devices would not work
|
||||
"""
|
||||
# validate minimum size for all devices
|
||||
validators.minimum_device_size(self.devices)
|
||||
validators.minimum_device_size(
|
||||
self.devices, osds_per_device=self.osds_per_device
|
||||
)
|
||||
|
||||
# make sure that data devices do not have any LVs
|
||||
validators.no_lvm_membership(self.hdds)
|
||||
@ -62,27 +71,26 @@ class SingleType(object):
|
||||
a dictionary with the result
|
||||
"""
|
||||
osds = self.computed['osds']
|
||||
vgs = self.computed['vgs']
|
||||
for device in self.hdds:
|
||||
vgs.append({'devices': [device.abspath], 'parts': 1})
|
||||
osd = {'data': {}, 'block.db': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = device.sys_api['size']
|
||||
osd['data']['parts'] = 1
|
||||
osd['data']['percentage'] = 100
|
||||
osd['data']['human_readable_size'] = str(disk.Size(b=device.sys_api['size']))
|
||||
osds.append(osd)
|
||||
for hdd in range(self.osds_per_device):
|
||||
osd = {'data': {}, 'block.db': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = device.sys_api['size'] / self.osds_per_device
|
||||
osd['data']['parts'] = self.osds_per_device
|
||||
osd['data']['percentage'] = 100 / self.osds_per_device
|
||||
osd['data']['human_readable_size'] = str(
|
||||
disk.Size(b=device.sys_api['size']) / self.osds_per_device
|
||||
)
|
||||
osds.append(osd)
|
||||
|
||||
for device in self.ssds:
|
||||
# TODO: creates 2 OSDs per device, make this configurable (env var?)
|
||||
extents = lvm.sizing(device.sys_api['size'], parts=2)
|
||||
vgs.append({'devices': [device.abspath], 'parts': 2})
|
||||
for ssd in range(2):
|
||||
extents = lvm.sizing(device.sys_api['size'], parts=self.osds_per_device)
|
||||
for ssd in range(self.osds_per_device):
|
||||
osd = {'data': {}, 'block.db': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = extents['sizes']
|
||||
osd['data']['parts'] = extents['parts']
|
||||
osd['data']['percentage'] = 50
|
||||
osd['data']['percentage'] = 100 / self.osds_per_device
|
||||
osd['data']['human_readable_size'] = str(disk.Size(b=extents['sizes']))
|
||||
osds.append(osd)
|
||||
|
||||
@ -123,14 +131,14 @@ class MixedType(object):
|
||||
def __init__(self, devices, args):
|
||||
self.args = args
|
||||
self.devices = devices
|
||||
self.osds_per_device = args.osds_per_device
|
||||
# TODO: add --fast-devices and --slow-devices so these can be customized
|
||||
self.hdds = [device for device in devices if device.sys_api['rotational'] == '1']
|
||||
self.ssds = [device for device in devices if device.sys_api['rotational'] == '0']
|
||||
self.computed = {'osds': []}
|
||||
self.block_db_size = prepare.get_block_db_size(lv_format=False) or disk.Size(b=0)
|
||||
self.system_vgs = lvm.VolumeGroups()
|
||||
# For every HDD we get 1 block.db
|
||||
self.dbs_needed = len(self.hdds)
|
||||
self.dbs_needed = len(self.hdds) * self.osds_per_device
|
||||
self.validate()
|
||||
self.compute()
|
||||
|
||||
@ -143,13 +151,13 @@ class MixedType(object):
|
||||
|
||||
string = ""
|
||||
string += templates.total_osds.format(
|
||||
total_osds=len(self.hdds)
|
||||
total_osds=len(self.hdds) * self.osds_per_device
|
||||
)
|
||||
|
||||
string += templates.ssd_volume_group.format(
|
||||
target='block.db',
|
||||
total_lv_size=str(self.total_available_db_space),
|
||||
total_lvs=vg_extents['parts'],
|
||||
total_lvs=vg_extents['parts'] * self.osds_per_device,
|
||||
block_lv_size=db_size,
|
||||
block_db_devices=', '.join([ssd.abspath for ssd in self.ssds]),
|
||||
lv_size=self.block_db_size or str(disk.Size(b=(vg_extents['sizes']))),
|
||||
@ -193,21 +201,24 @@ class MixedType(object):
|
||||
'human_readable_sizes': str(self.block_db_size),
|
||||
'human_readable_size': str(self.total_available_db_space),
|
||||
}
|
||||
vg_name = 'lv/vg'
|
||||
vg_name = 'vg/lv'
|
||||
else:
|
||||
vg_name = self.common_vg.name
|
||||
|
||||
for device in self.hdds:
|
||||
osd = {'data': {}, 'block.db': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = device.sys_api['size']
|
||||
osd['data']['percentage'] = 100
|
||||
osd['data']['human_readable_size'] = str(disk.Size(b=(device.sys_api['size'])))
|
||||
osd['block.db']['path'] = 'vg: %s' % vg_name
|
||||
osd['block.db']['size'] = int(self.block_db_size.b)
|
||||
osd['block.db']['human_readable_size'] = str(self.block_db_size)
|
||||
osd['block.db']['percentage'] = self.vg_extents['percentages']
|
||||
osds.append(osd)
|
||||
for hdd in range(self.osds_per_device):
|
||||
osd = {'data': {}, 'block.db': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = device.sys_api['size'] / self.osds_per_device
|
||||
osd['data']['percentage'] = 100 / self.osds_per_device
|
||||
osd['data']['human_readable_size'] = str(
|
||||
disk.Size(b=(device.sys_api['size'])) / self.osds_per_device
|
||||
)
|
||||
osd['block.db']['path'] = 'vg: %s' % vg_name
|
||||
osd['block.db']['size'] = int(self.block_db_size.b)
|
||||
osd['block.db']['human_readable_size'] = str(self.block_db_size)
|
||||
osd['block.db']['percentage'] = self.vg_extents['percentages']
|
||||
osds.append(osd)
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
@ -216,10 +227,11 @@ class MixedType(object):
|
||||
``lvm create``
|
||||
"""
|
||||
blank_ssd_paths = [d.abspath for d in self.blank_ssds]
|
||||
data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
|
||||
|
||||
# no common vg is found, create one with all the blank SSDs
|
||||
if not self.common_vg:
|
||||
db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-dbs')
|
||||
db_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-block-dbs')
|
||||
|
||||
# if a common vg exists then extend it with any blank ssds
|
||||
elif self.common_vg and blank_ssd_paths:
|
||||
@ -236,10 +248,25 @@ class MixedType(object):
|
||||
# function that looks up this value
|
||||
block_db_size = "%sG" % self.block_db_size.gb.as_int()
|
||||
|
||||
# create the data lvs, and create the OSD with the matching block.db lvs from before
|
||||
# create 1 vg per data device first, mapping them to the device path,
|
||||
# when the lv gets created later, it can create as many as needed (or
|
||||
# even just 1)
|
||||
for osd in self.computed['osds']:
|
||||
data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block-db')
|
||||
data_lv = lvm.create_lv('osd-data-%s' % str(uuid4()), data_vg.name)
|
||||
vg = data_vgs.get(osd['data']['path'])
|
||||
if not vg:
|
||||
vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-block')
|
||||
data_vgs[osd['data']['path']] = vg
|
||||
|
||||
# create the data lvs, and create the OSD with an lv from the common
|
||||
# block.db vg from before
|
||||
for osd in self.computed['osds']:
|
||||
data_path = osd['data']['path']
|
||||
data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
|
||||
data_vg = data_vgs[data_path]
|
||||
data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
|
||||
data_lv = lvm.create_lv(
|
||||
'osd-block', data_vg.name, extents=data_lv_extents, uuid_name=True
|
||||
)
|
||||
db_lv = lvm.create_lv(
|
||||
'osd-block-db', db_vg.name, size=block_db_size, uuid_name=True
|
||||
)
|
||||
@ -275,7 +302,7 @@ class MixedType(object):
|
||||
those LVs would be large enough to accommodate a block.db
|
||||
"""
|
||||
# validate minimum size for all devices
|
||||
validators.minimum_device_size(self.devices)
|
||||
validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
|
||||
|
||||
# make sure that data devices do not have any LVs
|
||||
validators.no_lvm_membership(self.hdds)
|
||||
@ -306,11 +333,13 @@ class MixedType(object):
|
||||
if self.block_db_size.gb > 0:
|
||||
try:
|
||||
self.vg_extents = lvm.sizing(
|
||||
self.total_available_db_space.b, size=self.block_db_size.b
|
||||
self.total_available_db_space.b, size=self.block_db_size.b * self.osds_per_device
|
||||
)
|
||||
except SizeAllocationError:
|
||||
msg = "Not enough space in fast devices (%s) to create a %s block.db LV"
|
||||
raise RuntimeError(msg % (self.total_available_db_space, self.block_db_size))
|
||||
msg = "Not enough space in fast devices (%s) to create %s x %s block.db LV"
|
||||
raise RuntimeError(
|
||||
msg % (self.total_available_db_space, self.osds_per_device, self.block_db_size)
|
||||
)
|
||||
else:
|
||||
self.vg_extents = lvm.sizing(
|
||||
self.total_available_db_space.b, parts=self.dbs_needed
|
||||
@ -330,8 +359,8 @@ class MixedType(object):
|
||||
|
||||
total_dbs_possible = self.total_available_db_space / self.block_db_size
|
||||
|
||||
if len(self.hdds) > total_dbs_possible:
|
||||
msg = "%s is not enough to create %s x %s block.db LVs" % (
|
||||
self.block_db_size, len(self.hdds), self.block_db_size,
|
||||
if self.dbs_needed > total_dbs_possible:
|
||||
msg = "Not enough space (%s) to create %s x %s block.db LVs" % (
|
||||
self.total_available_db_space, self.dbs_needed, self.block_db_size,
|
||||
)
|
||||
raise RuntimeError(msg)
|
||||
|
@ -16,6 +16,7 @@ class SingleType(object):
|
||||
|
||||
def __init__(self, devices, args):
|
||||
self.args = args
|
||||
self.osds_per_device = args.osds_per_device
|
||||
self.devices = devices
|
||||
self.hdds = [device for device in devices if device.sys_api['rotational'] == '1']
|
||||
self.ssds = [device for device in devices if device.sys_api['rotational'] == '0']
|
||||
@ -23,13 +24,20 @@ class SingleType(object):
|
||||
self.validate()
|
||||
self.compute()
|
||||
|
||||
@property
|
||||
def total_osds(self):
|
||||
if self.hdds:
|
||||
return len(self.hdds) * self.osds_per_device
|
||||
else:
|
||||
return len(self.ssds) * self.osds_per_device
|
||||
|
||||
def report_json(self):
|
||||
print(json.dumps(self.computed, indent=4, sort_keys=True))
|
||||
|
||||
def report_pretty(self):
|
||||
string = ""
|
||||
string += templates.total_osds.format(
|
||||
total_osds=len(self.hdds) or len(self.ssds) * 2
|
||||
total_osds=self.total_osds
|
||||
)
|
||||
string += templates.osd_component_titles
|
||||
|
||||
@ -56,7 +64,21 @@ class SingleType(object):
|
||||
met, raise an error if the provided devices would not work
|
||||
"""
|
||||
# validate minimum size for all devices
|
||||
validators.minimum_device_size(self.devices)
|
||||
validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
|
||||
|
||||
# validate collocation
|
||||
self.journal_size = prepare.get_journal_size(lv_format=False)
|
||||
if self.hdds:
|
||||
validators.minimum_device_collocated_size(
|
||||
self.hdds, self.journal_size, osds_per_device=self.osds_per_device
|
||||
)
|
||||
else:
|
||||
validators.minimum_device_collocated_size(
|
||||
self.ssds, self.journal_size, osds_per_device=self.osds_per_device
|
||||
)
|
||||
|
||||
# make sure that data devices do not have any LVs
|
||||
validators.no_lvm_membership(self.hdds)
|
||||
|
||||
def compute(self):
|
||||
"""
|
||||
@ -66,51 +88,58 @@ class SingleType(object):
|
||||
# chose whichever is the one group we have to compute against
|
||||
devices = self.hdds or self.ssds
|
||||
osds = self.computed['osds']
|
||||
vgs = self.computed['vgs']
|
||||
for device in devices:
|
||||
device_size = disk.Size(b=device.sys_api['size'])
|
||||
journal_size = prepare.get_journal_size(lv_format=False)
|
||||
data_size = device_size - journal_size
|
||||
data_percentage = data_size * 100 / device_size
|
||||
vgs.append({'devices': [device.abspath], 'parts': 2})
|
||||
osd = {'data': {}, 'journal': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = data_size.b
|
||||
osd['data']['percentage'] = int(data_percentage)
|
||||
osd['data']['human_readable_size'] = str(data_size)
|
||||
osd['journal']['path'] = device.abspath
|
||||
osd['journal']['size'] = journal_size.b
|
||||
osd['journal']['percentage'] = int(100 - data_percentage)
|
||||
osd['journal']['human_readable_size'] = str(journal_size)
|
||||
osds.append(osd)
|
||||
for osd in range(self.osds_per_device):
|
||||
device_size = disk.Size(b=device.sys_api['size'])
|
||||
osd_size = device_size / self.osds_per_device
|
||||
journal_size = prepare.get_journal_size(lv_format=False)
|
||||
data_size = osd_size - journal_size
|
||||
data_percentage = data_size * 100 / device_size
|
||||
osd = {'data': {}, 'journal': {}}
|
||||
osd['data']['path'] = device.abspath
|
||||
osd['data']['size'] = data_size.b
|
||||
osd['data']['parts'] = self.osds_per_device
|
||||
osd['data']['percentage'] = int(data_percentage)
|
||||
osd['data']['human_readable_size'] = str(data_size)
|
||||
osd['journal']['path'] = device.abspath
|
||||
osd['journal']['size'] = journal_size.b
|
||||
osd['journal']['percentage'] = int(100 - data_percentage)
|
||||
osd['journal']['human_readable_size'] = str(journal_size)
|
||||
osds.append(osd)
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
Create vgs/lvs from the incoming set of devices, assign their roles
|
||||
(data, journal) and offload the OSD creation to ``lvm create``
|
||||
"""
|
||||
osd_vgs = []
|
||||
device_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
|
||||
|
||||
# create the vgs first, one per device (since this is colocating, it
|
||||
# picks the 'data' path)
|
||||
# create 1 vg per data device first, mapping them to the device path,
|
||||
# when the lvs get created later, it can create as many as needed,
|
||||
# including the journals since it is going to be collocated
|
||||
for osd in self.computed['osds']:
|
||||
vg = lvm.create_vg(osd['data']['path'])
|
||||
osd_vgs.append(vg)
|
||||
vg = device_vgs.get(osd['data']['path'])
|
||||
if not vg:
|
||||
vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-filestore')
|
||||
device_vgs[osd['data']['path']] = vg
|
||||
|
||||
journal_size = prepare.get_journal_size()
|
||||
|
||||
# create the lvs from the vgs captured in the beginning
|
||||
for vg in osd_vgs:
|
||||
# this is called again, getting us the LVM formatted string
|
||||
journal_lv = lvm.create_lv(
|
||||
'osd-journal', vg.name, size=journal_size, uuid_name=True
|
||||
# create the lvs from the per-device vg created in the beginning
|
||||
for osd in self.computed['osds']:
|
||||
data_path = osd['data']['path']
|
||||
data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
|
||||
device_vg = device_vgs[data_path]
|
||||
data_lv_extents = device_vg.sizing(size=data_lv_size)['extents']
|
||||
journal_lv_extents = device_vg.sizing(size=self.journal_size.gb.as_int())['extents']
|
||||
data_lv = lvm.create_lv(
|
||||
'osd-data', device_vg.name, extents=data_lv_extents, uuid_name=True
|
||||
)
|
||||
journal_lv = lvm.create_lv(
|
||||
'osd-journal', device_vg.name, extents=journal_lv_extents, uuid_name=True
|
||||
)
|
||||
# no extents or size means it will use 100%FREE
|
||||
data_lv = lvm.create_lv('osd-data', vg.name)
|
||||
|
||||
command = ['--filestore', '--data']
|
||||
command.append('%s/%s' % (vg.name, data_lv.name))
|
||||
command.extend(['--journal', '%s/%s' % (vg.name, journal_lv.name)])
|
||||
command.append('%s/%s' % (device_vg.name, data_lv.name))
|
||||
command.extend(['--journal', '%s/%s' % (device_vg.name, journal_lv.name)])
|
||||
if self.args.dmcrypt:
|
||||
command.append('--dmcrypt')
|
||||
if self.args.no_systemd:
|
||||
@ -132,12 +161,13 @@ class MixedType(object):
|
||||
|
||||
def __init__(self, devices, args):
|
||||
self.args = args
|
||||
self.osds_per_device = args.osds_per_device
|
||||
self.devices = devices
|
||||
self.hdds = [device for device in devices if device.sys_api['rotational'] == '1']
|
||||
self.ssds = [device for device in devices if device.sys_api['rotational'] == '0']
|
||||
self.computed = {'osds': [], 'vg': None}
|
||||
self.blank_ssds = []
|
||||
self.journals_needed = len(self.hdds)
|
||||
self.journals_needed = len(self.hdds) * self.osds_per_device
|
||||
self.journal_size = prepare.get_journal_size(lv_format=False)
|
||||
self.system_vgs = lvm.VolumeGroups()
|
||||
self.validate()
|
||||
@ -146,10 +176,17 @@ class MixedType(object):
|
||||
def report_json(self):
|
||||
print(json.dumps(self.computed, indent=4, sort_keys=True))
|
||||
|
||||
@property
|
||||
def total_osds(self):
|
||||
if self.hdds:
|
||||
return len(self.hdds) * self.osds_per_device
|
||||
else:
|
||||
return len(self.ssds) * self.osds_per_device
|
||||
|
||||
def report_pretty(self):
|
||||
string = ""
|
||||
string += templates.total_osds.format(
|
||||
total_osds=len(self.hdds) or len(self.ssds) * 2
|
||||
total_osds=self.total_osds
|
||||
)
|
||||
|
||||
string += templates.ssd_volume_group.format(
|
||||
@ -197,7 +234,7 @@ class MixedType(object):
|
||||
met, raise an error if the provided devices would not work
|
||||
"""
|
||||
# validate minimum size for all devices
|
||||
validators.minimum_device_size(self.devices)
|
||||
validators.minimum_device_size(self.devices, osds_per_device=self.osds_per_device)
|
||||
|
||||
# make sure that data devices do not have any LVs
|
||||
validators.no_lvm_membership(self.hdds)
|
||||
@ -225,17 +262,20 @@ class MixedType(object):
|
||||
|
||||
try:
|
||||
self.vg_extents = lvm.sizing(
|
||||
self.total_available_journal_space.b, size=self.journal_size.b
|
||||
self.total_available_journal_space.b, size=self.journal_size.b * self.osds_per_device
|
||||
)
|
||||
except SizeAllocationError:
|
||||
self.vg_extents = {'parts': 0, 'percentages': 0, 'sizes': 0}
|
||||
msg = "Not enough space in fast devices (%s) to create %s x %s journal LV"
|
||||
raise RuntimeError(
|
||||
msg % (self.total_available_journal_space, self.osds_per_device, self.journal_size)
|
||||
)
|
||||
|
||||
# validate that number of journals possible are enough for number of
|
||||
# OSDs proposed
|
||||
total_journals_possible = self.total_available_journal_space / self.journal_size
|
||||
if len(self.hdds) > total_journals_possible:
|
||||
msg = "Not enough %s journals (%s) can be created for %s OSDs" % (
|
||||
self.journal_size, total_journals_possible, len(self.hdds)
|
||||
if self.osds_per_device > total_journals_possible:
|
||||
msg = "Not enough space (%s) to create %s x %s journal LVs" % (
|
||||
self.total_available_journal_space, self.journals_needed, self.journal_size
|
||||
)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
@ -264,42 +304,57 @@ class MixedType(object):
|
||||
vg_name = self.common_vg.name
|
||||
|
||||
for device in self.hdds:
|
||||
device_size = disk.Size(b=device.sys_api['size'])
|
||||
data_size = device_size - self.journal_size
|
||||
osd = {'data': {}, 'journal': {}}
|
||||
osd['data']['path'] = device.path
|
||||
osd['data']['size'] = data_size.b
|
||||
osd['data']['percentage'] = 100
|
||||
osd['data']['human_readable_size'] = str(device_size)
|
||||
osd['journal']['path'] = 'vg: %s' % vg_name
|
||||
osd['journal']['size'] = self.journal_size.b
|
||||
osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
|
||||
osd['journal']['human_readable_size'] = str(self.journal_size)
|
||||
osds.append(osd)
|
||||
for osd in range(self.osds_per_device):
|
||||
device_size = disk.Size(b=device.sys_api['size'])
|
||||
data_size = device_size / self.osds_per_device
|
||||
osd = {'data': {}, 'journal': {}}
|
||||
osd['data']['path'] = device.path
|
||||
osd['data']['size'] = data_size.b
|
||||
osd['data']['percentage'] = 100 / self.osds_per_device
|
||||
osd['data']['human_readable_size'] = str(data_size)
|
||||
osd['journal']['path'] = 'vg: %s' % vg_name
|
||||
osd['journal']['size'] = self.journal_size.b
|
||||
osd['journal']['percentage'] = int(self.journal_size.gb * 100 / vg_free)
|
||||
osd['journal']['human_readable_size'] = str(self.journal_size)
|
||||
osds.append(osd)
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
Create vgs/lvs from the incoming set of devices, assign their roles
|
||||
(data, journal) and offload the OSD creation to ``lvm create``
|
||||
"""
|
||||
ssd_paths = [d.abspath for d in self.blank_ssds]
|
||||
blank_ssd_paths = [d.abspath for d in self.blank_ssds]
|
||||
data_vgs = dict([(osd['data']['path'], None) for osd in self.computed['osds']])
|
||||
|
||||
# no common vg is found, create one with all the blank SSDs
|
||||
if not self.common_vg:
|
||||
journal_vg = lvm.create_vg(ssd_paths, name_prefix='ceph-journals')
|
||||
journal_vg = lvm.create_vg(blank_ssd_paths, name_prefix='ceph-journals')
|
||||
# a vg exists that can be extended
|
||||
elif self.common_vg and ssd_paths:
|
||||
journal_vg = lvm.extend_vg(self.common_vg, ssd_paths)
|
||||
elif self.common_vg and blank_ssd_paths:
|
||||
journal_vg = lvm.extend_vg(self.common_vg, blank_ssd_paths)
|
||||
# one common vg with nothing else to extend can be used directly
|
||||
else:
|
||||
journal_vg = self.common_vg
|
||||
|
||||
journal_size = prepare.get_journal_size(lv_format=True)
|
||||
|
||||
# create 1 vg per data device first, mapping them to the device path,
|
||||
# when the lv gets created later, it can create as many as needed (or
|
||||
# even just 1)
|
||||
for osd in self.computed['osds']:
|
||||
data_vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
|
||||
# no extents or size means it will use 100%FREE
|
||||
data_lv = lvm.create_lv('osd-data', data_vg.name)
|
||||
vg = data_vgs.get(osd['data']['path'])
|
||||
if not vg:
|
||||
vg = lvm.create_vg(osd['data']['path'], name_prefix='ceph-data')
|
||||
data_vgs[osd['data']['path']] = vg
|
||||
|
||||
for osd in self.computed['osds']:
|
||||
data_path = osd['data']['path']
|
||||
data_lv_size = disk.Size(b=osd['data']['size']).gb.as_int()
|
||||
data_vg = data_vgs[data_path]
|
||||
data_lv_extents = data_vg.sizing(size=data_lv_size)['extents']
|
||||
data_lv = lvm.create_lv(
|
||||
'osd-data', data_vg.name, extents=data_lv_extents, uuid_name=True
|
||||
)
|
||||
journal_lv = lvm.create_lv(
|
||||
'osd-journal', journal_vg.name, size=journal_size, uuid_name=True
|
||||
)
|
||||
|
@ -2,16 +2,30 @@ from ceph_volume.util import disk
|
||||
from ceph_volume.api import lvm
|
||||
|
||||
|
||||
def minimum_device_size(devices):
|
||||
def minimum_device_size(devices, osds_per_device=1):
|
||||
"""
|
||||
Ensure that the minimum requirements for this type of scenario is
|
||||
met, raise an error if the provided devices would not work
|
||||
"""
|
||||
msg = 'Unable to use device smaller than 5GB: %s (%s)'
|
||||
msg = 'Unable to use device %s %s, LVs would be smaller than 5GB'
|
||||
for device in devices:
|
||||
device_size = disk.Size(b=device.sys_api['size'])
|
||||
if device_size < disk.Size(gb=5):
|
||||
raise RuntimeError(msg % (device, device_size))
|
||||
lv_size = device_size / osds_per_device
|
||||
if lv_size < disk.Size(gb=5):
|
||||
raise RuntimeError(msg % (device_size, device.path))
|
||||
|
||||
|
||||
def minimum_device_collocated_size(devices, journal_size, osds_per_device=1):
|
||||
"""
|
||||
Similar to ``minimum_device_size``, but take into account that the size of
|
||||
the journal affects the size left of the device
|
||||
"""
|
||||
msg = 'Unable to use device %s %s, LVs would be smaller than 5GB'
|
||||
for device in devices:
|
||||
device_size = disk.Size(b=device.sys_api['size'])
|
||||
lv_size = (device_size / osds_per_device) - journal_size
|
||||
if lv_size < disk.Size(gb=5):
|
||||
raise RuntimeError(msg % (device_size, device.path))
|
||||
|
||||
|
||||
def no_lvm_membership(devices):
|
||||
|
@ -52,6 +52,24 @@ def fake_call(monkeypatch):
|
||||
return fake_call
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def fakedevice(factory):
|
||||
def apply(**kw):
|
||||
params = dict(
|
||||
path='/dev/sda',
|
||||
abspath='/dev/sda',
|
||||
lv_api=None,
|
||||
pvs_api=[],
|
||||
disk_api={},
|
||||
sys_api={},
|
||||
exists=True,
|
||||
is_lvm_member=True,
|
||||
)
|
||||
params.update(dict(kw))
|
||||
return factory(**params)
|
||||
return apply
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def stub_call(monkeypatch):
|
||||
"""
|
||||
@ -117,6 +135,13 @@ def volume_groups(monkeypatch):
|
||||
return vgs
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def stub_vgs(monkeypatch, volume_groups):
|
||||
def apply(vgs):
|
||||
monkeypatch.setattr(lvm_api, 'get_api_vgs', lambda: vgs)
|
||||
return apply
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def pvolumes(monkeypatch):
|
||||
monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0))
|
||||
|
@ -0,0 +1,139 @@
|
||||
import pytest
|
||||
from ceph_volume.devices.lvm.strategies import bluestore
|
||||
|
||||
|
||||
class TestSingleType(object):
|
||||
|
||||
def test_hdd_device_is_large_enough(self, fakedevice, factory):
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
computed_osd = bluestore.SingleType(devices, args).computed['osds'][0]
|
||||
assert computed_osd['data']['percentage'] == 100
|
||||
assert computed_osd['data']['parts'] == 1
|
||||
assert computed_osd['data']['human_readable_size'] == '5.66 GB'
|
||||
assert computed_osd['data']['path'] == '/dev/sda'
|
||||
|
||||
def test_sdd_device_is_large_enough(self, fakedevice, factory):
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
|
||||
]
|
||||
computed_osd = bluestore.SingleType(devices, args).computed['osds'][0]
|
||||
assert computed_osd['data']['percentage'] == 100
|
||||
assert computed_osd['data']['parts'] == 1
|
||||
assert computed_osd['data']['human_readable_size'] == '5.66 GB'
|
||||
assert computed_osd['data']['path'] == '/dev/sda'
|
||||
|
||||
def test_device_cannot_have_many_osds_per_device(self, fakedevice, factory):
|
||||
args = factory(osds_per_device=3)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
bluestore.SingleType(devices, args)
|
||||
assert 'Unable to use device 5.66 GB /dev/sda' in str(error)
|
||||
|
||||
def test_device_is_lvm_member_fails(self, fakedevice, factory):
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=True, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
bluestore.SingleType(devices, args)
|
||||
assert 'Unable to use device, already a member of LVM' in str(error)
|
||||
|
||||
|
||||
class TestMixedTypeConfiguredSize(object):
|
||||
# uses a block.db size that has been configured via ceph.conf, instead of
|
||||
# defaulting to 'as large as possible'
|
||||
|
||||
def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
# 3GB block.db in ceph.conf
|
||||
conf_ceph(get_safe=lambda *a: 3147483640)
|
||||
args = factory(osds_per_device=1)
|
||||
ssd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
devices = [ssd, hdd]
|
||||
|
||||
osd = bluestore.MixedType(devices, args).computed['osds'][0]
|
||||
assert osd['data']['percentage'] == 100
|
||||
assert osd['data']['human_readable_size'] == '5.66 GB'
|
||||
assert osd['data']['path'] == '/dev/sda'
|
||||
# a new vg will be created
|
||||
assert osd['block.db']['path'] == 'vg: vg/lv'
|
||||
assert osd['block.db']['percentage'] == 100
|
||||
|
||||
def test_ssd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
# 7GB block.db in ceph.conf
|
||||
conf_ceph(get_safe=lambda *a: 7747483640)
|
||||
args = factory(osds_per_device=1)
|
||||
ssd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
devices = [ssd, hdd]
|
||||
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
bluestore.MixedType(devices, args).computed['osds'][0]
|
||||
expected = 'Not enough space in fast devices (5.66 GB) to create 1 x 7.22 GB block.db LV'
|
||||
assert expected in str(error)
|
||||
|
||||
def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
# 3GB block.db in ceph.conf
|
||||
conf_ceph(get_safe=lambda *a: 3147483640)
|
||||
args = factory(osds_per_device=2)
|
||||
ssd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000))
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
devices = [ssd, hdd]
|
||||
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
bluestore.MixedType(devices, args)
|
||||
expected = 'Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB'
|
||||
assert expected in str(error)
|
||||
|
||||
|
||||
class TestMixedTypeLargeAsPossible(object):
|
||||
|
||||
def test_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: None)
|
||||
args = factory(osds_per_device=1)
|
||||
ssd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
devices = [ssd, hdd]
|
||||
|
||||
osd = bluestore.MixedType(devices, args).computed['osds'][0]
|
||||
assert osd['data']['percentage'] == 100
|
||||
assert osd['data']['human_readable_size'] == '5.66 GB'
|
||||
assert osd['data']['path'] == '/dev/sda'
|
||||
# a new vg will be created
|
||||
assert osd['block.db']['path'] == 'vg: vg/lv'
|
||||
# as large as possible
|
||||
assert osd['block.db']['percentage'] == 100
|
||||
|
||||
def test_multi_hdd_device_is_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: None)
|
||||
args = factory(osds_per_device=2)
|
||||
ssd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=60073740000))
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=60073740000))
|
||||
devices = [ssd, hdd]
|
||||
|
||||
osd = bluestore.MixedType(devices, args).computed['osds'][0]
|
||||
assert osd['data']['percentage'] == 50
|
||||
assert osd['data']['human_readable_size'] == '27.97 GB'
|
||||
assert osd['data']['path'] == '/dev/sda'
|
||||
# a new vg will be created
|
||||
assert osd['block.db']['path'] == 'vg: vg/lv'
|
||||
# as large as possible
|
||||
assert osd['block.db']['percentage'] == 50
|
||||
|
||||
def test_multi_hdd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: None)
|
||||
args = factory(osds_per_device=2)
|
||||
ssd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=60737400000))
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
devices = [ssd, hdd]
|
||||
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
bluestore.MixedType(devices, args)
|
||||
expected = 'Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB'
|
||||
assert expected in str(error)
|
@ -0,0 +1,210 @@
|
||||
import pytest
|
||||
from ceph_volume.devices.lvm.strategies import filestore
|
||||
from ceph_volume.api import lvm
|
||||
|
||||
|
||||
class TestSingleType(object):
|
||||
|
||||
def test_hdd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=12073740000))
|
||||
]
|
||||
computed_osd = filestore.SingleType(devices, args).computed['osds'][0]
|
||||
assert computed_osd['data']['percentage'] == 55
|
||||
assert computed_osd['data']['parts'] == 1
|
||||
assert computed_osd['data']['human_readable_size'] == '6.24 GB'
|
||||
assert computed_osd['data']['path'] == '/dev/sda'
|
||||
|
||||
def test_hdd_device_with_large_journal(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
msg = "Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_ssd_device_is_large_enough(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=12073740000))
|
||||
]
|
||||
computed_osd = filestore.SingleType(devices, args).computed['osds'][0]
|
||||
assert computed_osd['data']['percentage'] == 55
|
||||
assert computed_osd['data']['parts'] == 1
|
||||
assert computed_osd['data']['human_readable_size'] == '6.24 GB'
|
||||
assert computed_osd['data']['path'] == '/dev/sda'
|
||||
|
||||
def test_ssd_device_with_large_journal(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
msg = "Unable to use device 5.66 GB /dev/sda, LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_ssd_device_multi_osd(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=4)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=16073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
msg = "Unable to use device 14.97 GB /dev/sda, LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_hdd_device_multi_osd(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=4)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=16073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
msg = "Unable to use device 14.97 GB /dev/sda, LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_device_is_lvm_member_fails(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=True, sys_api=dict(rotational='1', size=12073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
assert 'Unable to use device, already a member of LVM' in str(error)
|
||||
|
||||
def test_hdd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
msg = "journal sizes must be larger than 2GB, detected: 120.00 MB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_ssd_device_with_small_configured_journal(self, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.SingleType(devices, args)
|
||||
msg = "journal sizes must be larger than 2GB, detected: 120.00 MB"
|
||||
assert msg in str(error)
|
||||
|
||||
|
||||
class TestMixedType(object):
|
||||
|
||||
def test_minimum_size_is_not_met(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)),
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.MixedType(devices, args)
|
||||
msg = "journal sizes must be larger than 2GB, detected: 120.00 MB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_ssd_device_is_not_large_enough(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '7120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)),
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.MixedType(devices, args)
|
||||
msg = "Not enough space in fast devices (5.66 GB) to create 1 x 6.95 GB journal LV"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_hdd_device_is_lvm_member_fails(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=6073740000)),
|
||||
fakedevice(is_lvm_member=True, sys_api=dict(rotational='1', size=6073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.MixedType(devices, args)
|
||||
assert 'Unable to use device, already a member of LVM' in str(error)
|
||||
|
||||
def test_ssd_is_lvm_member_doesnt_fail(self, volumes, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
# fast PV, because ssd is an LVM member
|
||||
CephPV = lvm.PVolume(vg_name='fast', pv_name='/dev/sda', pv_tags='')
|
||||
ssd = fakedevice(
|
||||
is_lvm_member=True, sys_api=dict(rotational='0', size=6073740000), pvs_api=[CephPV]
|
||||
)
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
# when get_api_vgs() gets called, it will return this one VG
|
||||
stub_vgs([
|
||||
dict(
|
||||
vg_free='7g', vg_name='fast', lv_name='foo',
|
||||
lv_path='/dev/vg/foo', lv_tags="ceph.type=data"
|
||||
)
|
||||
])
|
||||
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [ssd, hdd]
|
||||
result = filestore.MixedType(devices, args).computed['osds'][0]
|
||||
assert result['journal']['path'] == 'vg: fast'
|
||||
assert result['journal']['percentage'] == 71
|
||||
assert result['journal']['human_readable_size'] == '5.00 GB'
|
||||
|
||||
def test_no_common_vg(self, volumes, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
# fast PV, because ssd is an LVM member
|
||||
CephPV1 = lvm.PVolume(vg_name='fast1', pv_name='/dev/sda', pv_tags='')
|
||||
CephPV2 = lvm.PVolume(vg_name='fast2', pv_name='/dev/sdb', pv_tags='')
|
||||
ssd1 = fakedevice(
|
||||
is_lvm_member=True, sys_api=dict(rotational='0', size=6073740000), pvs_api=[CephPV1]
|
||||
)
|
||||
ssd2 = fakedevice(
|
||||
is_lvm_member=True, sys_api=dict(rotational='0', size=6073740000), pvs_api=[CephPV2]
|
||||
)
|
||||
hdd = fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=6073740000))
|
||||
# when get_api_vgs() gets called, it will return this one VG
|
||||
stub_vgs([
|
||||
dict(
|
||||
vg_free='7g', vg_name='fast1', lv_name='foo',
|
||||
lv_path='/dev/vg/fast1', lv_tags="ceph.type=data"
|
||||
),
|
||||
dict(
|
||||
vg_free='7g', vg_name='fast2', lv_name='foo',
|
||||
lv_path='/dev/vg/fast2', lv_tags="ceph.type=data"
|
||||
)
|
||||
])
|
||||
|
||||
conf_ceph(get_safe=lambda *a: '5120')
|
||||
args = factory(osds_per_device=1)
|
||||
devices = [ssd1, ssd2, hdd]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.MixedType(devices, args)
|
||||
|
||||
assert 'Could not find a common VG between devices' in str(error)
|
||||
|
||||
def test_ssd_device_fails_multiple_osds(self, stub_vgs, fakedevice, factory, conf_ceph):
|
||||
conf_ceph(get_safe=lambda *a: '15120')
|
||||
args = factory(osds_per_device=2)
|
||||
devices = [
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='0', size=16073740000)),
|
||||
fakedevice(is_lvm_member=False, sys_api=dict(rotational='1', size=16073740000))
|
||||
]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
filestore.MixedType(devices, args)
|
||||
msg = "Not enough space in fast devices (14.97 GB) to create 2 x 14.77 GB journal LV"
|
||||
assert msg in str(error)
|
@ -0,0 +1,52 @@
|
||||
import pytest
|
||||
from ceph_volume.util import disk
|
||||
from ceph_volume.devices.lvm.strategies import validators
|
||||
|
||||
|
||||
class TestMinimumDeviceSize(object):
|
||||
|
||||
def test_size_is_larger_than_5gb(self, fakedevice):
|
||||
devices = [fakedevice(sys_api=dict(size=6073740000))]
|
||||
assert validators.minimum_device_size(devices) is None
|
||||
|
||||
def test_size_is_smaller_than_5gb(self, fakedevice):
|
||||
devices = [fakedevice(sys_api=dict(size=1073740000))]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
validators.minimum_device_size(devices)
|
||||
msg = "LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_large_device_multiple_osds_fails(self, fakedevice):
|
||||
devices = [fakedevice(sys_api=dict(size=6073740000))]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
validators.minimum_device_size(
|
||||
devices, osds_per_device=4
|
||||
)
|
||||
msg = "LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
|
||||
class TestMinimumCollocatedDeviceSize(object):
|
||||
|
||||
def setup(self):
|
||||
self.journal_size = disk.Size(gb=5)
|
||||
|
||||
def test_size_is_larger_than_5gb_large_journal(self, fakedevice):
|
||||
devices = [fakedevice(sys_api=dict(size=6073740000))]
|
||||
assert validators.minimum_device_collocated_size(devices, disk.Size(mb=1)) is None
|
||||
|
||||
def test_size_is_larger_than_5gb_large_journal_fails(self, fakedevice):
|
||||
devices = [fakedevice(sys_api=dict(size=1073740000))]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
validators.minimum_device_collocated_size(devices, self.journal_size)
|
||||
msg = "LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
||||
|
||||
def test_large_device_multiple_osds_fails(self, fakedevice):
|
||||
devices = [fakedevice(sys_api=dict(size=16073740000))]
|
||||
with pytest.raises(RuntimeError) as error:
|
||||
validators.minimum_device_collocated_size(
|
||||
devices, self.journal_size, osds_per_device=3
|
||||
)
|
||||
msg = "LVs would be smaller than 5GB"
|
||||
assert msg in str(error)
|
Loading…
Reference in New Issue
Block a user