mirror of
https://github.com/ceph/ceph
synced 2025-02-24 11:37:37 +00:00
Merge pull request #45349 from guits/check-check-device-is-available
ceph-volume: various fixes in arg_validators Reviewed-by: Adam King <adking@redhat.com>
This commit is contained in:
commit
f298cceff5
@ -372,10 +372,10 @@ To configure a Ceph cluster to run on a single host, use the ``--single-host-def
|
||||
|
||||
The ``--single-host-defaults`` flag sets the following configuration options::
|
||||
|
||||
global/osd_crush_choose_leaf_type = 0
|
||||
global/osd_crush_chooseleaf_type = 0
|
||||
global/osd_pool_default_size = 2
|
||||
mgr/mgr_standby_modules = False
|
||||
|
||||
|
||||
For more information on these options, see :ref:`one-node-cluster` and ``mgr_standby_modules`` in :ref:`mgr-administrator-guide`.
|
||||
|
||||
Deployment in an isolated environment
|
||||
|
@ -138,6 +138,12 @@ There are a few ways to create new OSDs:
|
||||
|
||||
ceph orch daemon add osd host1:/dev/sdb
|
||||
|
||||
Advanced OSD creation from specific devices on a specific host:
|
||||
|
||||
.. prompt:: bash #
|
||||
|
||||
ceph orch daemon add osd host1:data_devices=/dev/sda,/dev/sdb,db_devices=/dev/sdc,osds_per_device=2
|
||||
|
||||
* You can use :ref:`drivegroups` to categorize device(s) based on their
|
||||
properties. This might be useful in forming a clearer picture of which
|
||||
devices are available to consume. Properties include device type (SSD or
|
||||
|
@ -196,7 +196,7 @@ class Batch(object):
|
||||
'devices',
|
||||
metavar='DEVICES',
|
||||
nargs='*',
|
||||
type=arg_validators.ValidBatchDevice(),
|
||||
type=arg_validators.ValidBatchDataDevice(),
|
||||
default=[],
|
||||
help='Devices to provision OSDs',
|
||||
)
|
||||
|
@ -4,7 +4,6 @@ from ceph_volume import terminal
|
||||
from ceph_volume.devices.lvm.zap import Zap
|
||||
import argparse
|
||||
|
||||
|
||||
def rollback_osd(args, osd_id=None):
|
||||
"""
|
||||
When the process of creating or preparing fails, the OSD needs to be
|
||||
@ -40,7 +39,7 @@ common_args = {
|
||||
'--data': {
|
||||
'help': 'OSD data path. A physical device or logical volume',
|
||||
'required': True,
|
||||
'type': arg_validators.ValidDevice(as_string=True),
|
||||
'type': arg_validators.ValidDataDevice(as_string=True),
|
||||
#'default':,
|
||||
#'type':,
|
||||
},
|
||||
|
@ -168,6 +168,7 @@ class Zap(object):
|
||||
"""
|
||||
lv = api.get_single_lv(filters={'lv_name': device.lv_name, 'vg_name':
|
||||
device.vg_name})
|
||||
pv = api.get_single_pv(filters={'lv_uuid': lv.lv_uuid})
|
||||
self.unmount_lv(lv)
|
||||
|
||||
wipefs(device.abspath)
|
||||
@ -182,6 +183,7 @@ class Zap(object):
|
||||
mlogger.info('Only 1 LV left in VG, will proceed to destroy '
|
||||
'volume group %s', device.vg_name)
|
||||
api.remove_vg(device.vg_name)
|
||||
api.remove_pv(pv.pv_name)
|
||||
else:
|
||||
mlogger.info('More than 1 LV left in VG, will proceed to '
|
||||
'destroy LV only')
|
||||
@ -362,7 +364,7 @@ class Zap(object):
|
||||
'devices',
|
||||
metavar='DEVICES',
|
||||
nargs='*',
|
||||
type=arg_validators.ValidDevice(gpt_ok=True),
|
||||
type=arg_validators.ValidZapDevice(gpt_ok=True),
|
||||
default=[],
|
||||
help='Path to one or many lv (as vg/lv), partition (as /dev/sda1) or device (as /dev/sda)'
|
||||
)
|
||||
|
@ -14,7 +14,7 @@ def create_parser(prog, description):
|
||||
parser.add_argument(
|
||||
'--data',
|
||||
required=True,
|
||||
type=arg_validators.ValidDevice(as_string=True),
|
||||
type=arg_validators.ValidRawDevice(as_string=True),
|
||||
help='a raw device to use for the OSD',
|
||||
)
|
||||
parser.add_argument(
|
||||
@ -35,12 +35,14 @@ def create_parser(prog, description):
|
||||
parser.add_argument(
|
||||
'--block.db',
|
||||
dest='block_db',
|
||||
help='Path to bluestore block.db block device'
|
||||
help='Path to bluestore block.db block device',
|
||||
type=arg_validators.ValidRawDevice(as_string=True)
|
||||
)
|
||||
parser.add_argument(
|
||||
'--block.wal',
|
||||
dest='block_wal',
|
||||
help='Path to bluestore block.wal block device'
|
||||
help='Path to bluestore block.wal block device',
|
||||
type=arg_validators.ValidRawDevice(as_string=True)
|
||||
)
|
||||
parser.add_argument(
|
||||
'--dmcrypt',
|
||||
|
@ -32,6 +32,8 @@ class TestBatch(object):
|
||||
def test_reject_partition(self, mocked_device):
|
||||
mocked_device.return_value = MagicMock(
|
||||
is_partition=True,
|
||||
has_fs=False,
|
||||
is_lvm_member=False,
|
||||
has_gpt_headers=False,
|
||||
has_partitions=False,
|
||||
)
|
||||
|
@ -1,5 +1,6 @@
|
||||
import pytest
|
||||
from ceph_volume.devices import lvm
|
||||
from mock import patch
|
||||
|
||||
|
||||
class TestCreate(object):
|
||||
@ -17,7 +18,8 @@ class TestCreate(object):
|
||||
assert 'Use the bluestore objectstore' in stdout
|
||||
assert 'A physical device or logical' in stdout
|
||||
|
||||
def test_excludes_filestore_bluestore_flags(self, capsys, device_info):
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
|
||||
device_info()
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
|
||||
@ -25,7 +27,8 @@ class TestCreate(object):
|
||||
expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
|
||||
assert expected in stderr
|
||||
|
||||
def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info):
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
|
||||
device_info()
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.create.Create(argv=[
|
||||
@ -36,7 +39,8 @@ class TestCreate(object):
|
||||
expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
|
||||
assert expected in stderr
|
||||
|
||||
def test_excludes_block_and_journal_flags(self, capsys, device_info):
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
|
||||
device_info()
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.create.Create(argv=[
|
||||
|
@ -62,7 +62,7 @@ class TestPrettyReport(object):
|
||||
|
||||
class TestList(object):
|
||||
|
||||
def test_empty_full_json_zero_exit_status(self, is_root,factory,capsys):
|
||||
def test_empty_full_json_zero_exit_status(self, fake_call, is_root, factory, capsys):
|
||||
args = factory(format='json', device=None)
|
||||
lvm.listing.List([]).list(args)
|
||||
stdout, stderr = capsys.readouterr()
|
||||
@ -74,7 +74,7 @@ class TestList(object):
|
||||
stdout, stderr = capsys.readouterr()
|
||||
assert stdout == '{}\n'
|
||||
|
||||
def test_empty_full_zero_exit_status(self, is_root, factory):
|
||||
def test_empty_full_zero_exit_status(self, fake_call, is_root, factory):
|
||||
args = factory(format='pretty', device=None)
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.listing.List([]).list(args)
|
||||
|
@ -1,7 +1,7 @@
|
||||
import pytest
|
||||
from ceph_volume.devices import lvm
|
||||
from ceph_volume.api import lvm as api
|
||||
from mock.mock import patch, Mock
|
||||
from mock.mock import patch, Mock, MagicMock
|
||||
|
||||
|
||||
class TestLVM(object):
|
||||
@ -66,7 +66,9 @@ class TestPrepare(object):
|
||||
assert 'Use the bluestore objectstore' in stdout
|
||||
assert 'A physical device or logical' in stdout
|
||||
|
||||
def test_excludes_filestore_bluestore_flags(self, capsys, device_info):
|
||||
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_excludes_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
|
||||
device_info()
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
|
||||
@ -74,7 +76,9 @@ class TestPrepare(object):
|
||||
expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
|
||||
assert expected in stderr
|
||||
|
||||
def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info):
|
||||
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_excludes_other_filestore_bluestore_flags(self, m_has_bs_label, fake_call, capsys, device_info):
|
||||
device_info()
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.prepare.Prepare(argv=[
|
||||
@ -85,7 +89,8 @@ class TestPrepare(object):
|
||||
expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
|
||||
assert expected in stderr
|
||||
|
||||
def test_excludes_block_and_journal_flags(self, capsys, device_info):
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_excludes_block_and_journal_flags(self, m_has_bs_label, fake_call, capsys, device_info):
|
||||
device_info()
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.prepare.Prepare(argv=[
|
||||
@ -96,9 +101,15 @@ class TestPrepare(object):
|
||||
expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
|
||||
assert expected in stderr
|
||||
|
||||
def test_journal_is_required_with_filestore(self, is_root, monkeypatch, device_info):
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.disk.has_bluestore_label', return_value=False)
|
||||
def test_journal_is_required_with_filestore(self, m_has_bs_label, m_device, is_root, monkeypatch, device_info):
|
||||
m_device.return_value = MagicMock(exists=True,
|
||||
has_fs=False,
|
||||
used_by_ceph=False,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False)
|
||||
monkeypatch.setattr("os.path.exists", lambda path: True)
|
||||
device_info()
|
||||
with pytest.raises(SystemExit) as error:
|
||||
lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
|
||||
expected = '--journal is required when using --filestore'
|
||||
|
@ -105,7 +105,7 @@ class TestEnsureAssociatedLVs(object):
|
||||
result = zap.ensure_associated_lvs(volumes)
|
||||
assert result == []
|
||||
|
||||
def test_data_is_found(self):
|
||||
def test_data_is_found(self, fake_call):
|
||||
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
|
||||
osd = api.Volume(
|
||||
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
|
||||
@ -114,7 +114,7 @@ class TestEnsureAssociatedLVs(object):
|
||||
result = zap.ensure_associated_lvs(volumes)
|
||||
assert result == ['/dev/VolGroup/data']
|
||||
|
||||
def test_block_is_found(self):
|
||||
def test_block_is_found(self, fake_call):
|
||||
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
|
||||
osd = api.Volume(
|
||||
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
|
||||
@ -150,7 +150,7 @@ class TestEnsureAssociatedLVs(object):
|
||||
assert '/dev/sdb1' in result
|
||||
assert '/dev/VolGroup/block' in result
|
||||
|
||||
def test_journal_is_found(self):
|
||||
def test_journal_is_found(self, fake_call):
|
||||
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
|
||||
osd = api.Volume(
|
||||
lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
|
||||
|
@ -41,7 +41,7 @@ class TestPrepare(object):
|
||||
assert 'Path to bluestore block.wal block device' in stdout
|
||||
assert 'Enable device encryption via dm-crypt' in stdout
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.ValidDevice.__call__')
|
||||
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
|
||||
def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys):
|
||||
m_valid_device.return_value = '/dev/foo'
|
||||
with pytest.raises(SystemExit):
|
||||
@ -87,7 +87,7 @@ class TestPrepare(object):
|
||||
|
||||
@patch('ceph_volume.devices.raw.prepare.rollback_osd')
|
||||
@patch('ceph_volume.devices.raw.prepare.Prepare.prepare')
|
||||
@patch('ceph_volume.util.arg_validators.ValidDevice.__call__')
|
||||
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
|
||||
def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd):
|
||||
m_valid_device.return_value = '/dev/foo'
|
||||
m_prepare.side_effect=Exception('foo')
|
||||
|
@ -1,5 +1,6 @@
|
||||
import pytest
|
||||
from ceph_volume.devices import lvm
|
||||
from mock.mock import patch, MagicMock
|
||||
|
||||
|
||||
class TestZap(object):
|
||||
@ -19,9 +20,18 @@ class TestZap(object):
|
||||
'/dev/mapper/foo',
|
||||
'/dev/dm-0',
|
||||
])
|
||||
def test_can_not_zap_mapper_device(self, monkeypatch, device_info, capsys, is_root, device_name):
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
def test_can_not_zap_mapper_device(self, mocked_device, monkeypatch, device_info, capsys, is_root, device_name):
|
||||
monkeypatch.setattr('os.path.exists', lambda x: True)
|
||||
device_info()
|
||||
mocked_device.return_value = MagicMock(
|
||||
is_mapper=True,
|
||||
is_mpath=False,
|
||||
used_by_ceph=True,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
with pytest.raises(SystemExit):
|
||||
lvm.zap.Zap(argv=[device_name]).main()
|
||||
stdout, stderr = capsys.readouterr()
|
||||
|
@ -10,8 +10,8 @@
|
||||
- name: tell lvm to ignore loop devices
|
||||
lineinfile:
|
||||
path: /etc/lvm/lvm.conf
|
||||
line: ' global_filter = [ "r|loop|", "a|.*|" ]'
|
||||
insertafter: '^devices {'
|
||||
line: "\tfilter = [ 'r|loop.*|' ]"
|
||||
insertafter: 'devices {'
|
||||
- name: lvm allow changes depsite duplicate PVIDs
|
||||
lineinfile:
|
||||
path: /etc/lvm/lvm.conf
|
||||
|
@ -111,6 +111,7 @@ def device_data(device_info):
|
||||
class TestInventory(object):
|
||||
|
||||
expected_keys = [
|
||||
'ceph_device',
|
||||
'path',
|
||||
'rejected_reasons',
|
||||
'sys_api',
|
||||
@ -152,30 +153,30 @@ class TestInventory(object):
|
||||
'errors',
|
||||
]
|
||||
|
||||
def test_json_inventory_keys_unexpected(self, device_report_keys):
|
||||
def test_json_inventory_keys_unexpected(self, fake_call, device_report_keys):
|
||||
for k in device_report_keys:
|
||||
assert k in self.expected_keys, "unexpected key {} in report".format(k)
|
||||
|
||||
def test_json_inventory_keys_missing(self, device_report_keys):
|
||||
def test_json_inventory_keys_missing(self, fake_call, device_report_keys):
|
||||
for k in self.expected_keys:
|
||||
assert k in device_report_keys, "expected key {} in report".format(k)
|
||||
|
||||
def test_sys_api_keys_unexpected(self, device_sys_api_keys):
|
||||
def test_sys_api_keys_unexpected(self, fake_call, device_sys_api_keys):
|
||||
for k in device_sys_api_keys:
|
||||
assert k in self.expected_sys_api_keys, "unexpected key {} in sys_api field".format(k)
|
||||
|
||||
def test_sys_api_keys_missing(self, device_sys_api_keys):
|
||||
def test_sys_api_keys_missing(self, fake_call, device_sys_api_keys):
|
||||
for k in self.expected_sys_api_keys:
|
||||
assert k in device_sys_api_keys, "expected key {} in sys_api field".format(k)
|
||||
|
||||
def test_lsm_data_type_unexpected(self, device_data):
|
||||
def test_lsm_data_type_unexpected(self, fake_call, device_data):
|
||||
assert isinstance(device_data['lsm_data'], dict), "lsm_data field must be of type dict"
|
||||
|
||||
def test_lsm_data_keys_unexpected(self, device_data):
|
||||
def test_lsm_data_keys_unexpected(self, fake_call, device_data):
|
||||
for k in device_data['lsm_data'].keys():
|
||||
assert k in self.expected_lsm_keys, "unexpected key {} in lsm_data field".format(k)
|
||||
|
||||
def test_lsm_data_keys_missing(self, device_data):
|
||||
def test_lsm_data_keys_missing(self, fake_call, device_data):
|
||||
lsm_keys = device_data['lsm_data'].keys()
|
||||
assert lsm_keys
|
||||
for k in self.expected_lsm_keys:
|
||||
@ -251,4 +252,4 @@ class TestLSM(object):
|
||||
def test_lsmdisk_led_fault(self, lsm_info):
|
||||
assert lsm_info.led_fault_state == 'Off'
|
||||
def test_lsmdisk_report(self, lsm_info):
|
||||
assert isinstance(lsm_info.json_report(), dict)
|
||||
assert isinstance(lsm_info.json_report(), dict)
|
||||
|
@ -1,9 +1,9 @@
|
||||
import argparse
|
||||
import pytest
|
||||
import os
|
||||
from ceph_volume import exceptions
|
||||
from ceph_volume import exceptions, process
|
||||
from ceph_volume.util import arg_validators
|
||||
from mock.mock import patch, PropertyMock
|
||||
from mock.mock import patch, MagicMock
|
||||
|
||||
|
||||
class TestOSDPath(object):
|
||||
@ -81,21 +81,252 @@ class TestValidDevice(object):
|
||||
def setup(self):
|
||||
self.validator = arg_validators.ValidDevice()
|
||||
|
||||
def test_path_is_valid(self, fake_call, patch_bluestore_label):
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
def test_path_is_valid(self, m_has_bs_label, fake_call, patch_bluestore_label):
|
||||
result = self.validator('/')
|
||||
assert result.abspath == '/'
|
||||
|
||||
def test_path_is_invalid(self, fake_call, patch_bluestore_label):
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
def test_path_is_invalid(self, m_has_bs_label, fake_call, patch_bluestore_label):
|
||||
with pytest.raises(argparse.ArgumentError):
|
||||
self.validator('/device/does/not/exist')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device.has_partitions', new_callable=PropertyMock, return_value=True)
|
||||
@patch('ceph_volume.util.arg_validators.Device.exists', new_callable=PropertyMock, return_value=True)
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_dev_has_partitions(self, m_get_single_lv, m_exists, m_has_partitions, fake_call):
|
||||
def test_dev_has_partitions(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
exists=True,
|
||||
has_partitions=True,
|
||||
)
|
||||
with pytest.raises(RuntimeError):
|
||||
self.validator('/dev/foo')
|
||||
|
||||
class TestValidZapDevice(object):
|
||||
def setup(self):
|
||||
self.validator = arg_validators.ValidZapDevice()
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_has_partition(self, m_get_single_lv, m_has_bs_label, mocked_device):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=True,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
self.validator.zap = False
|
||||
with pytest.raises(RuntimeError):
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_has_no_partition(self, m_get_single_lv, m_has_bs_label, mocked_device):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
self.validator.zap = False
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
class TestValidDataDevice(object):
|
||||
def setup(self):
|
||||
self.validator = arg_validators.ValidDataDevice()
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_used_by_ceph(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=True,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False
|
||||
)
|
||||
with pytest.raises(SystemExit):
|
||||
self.validator.zap = False
|
||||
self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_has_fs(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=True
|
||||
)
|
||||
with pytest.raises(RuntimeError):
|
||||
self.validator.zap = False
|
||||
self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=True)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_has_bs_signature(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
with pytest.raises(RuntimeError):
|
||||
self.validator.zap = False
|
||||
self.validator('/dev/foo')
|
||||
|
||||
class TestValidRawDevice(object):
|
||||
def setup(self):
|
||||
self.validator = arg_validators.ValidRawDevice()
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.util.arg_validators.disk.blkid')
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_dmcrypt_device_already_prepared(self, m_get_single_lv, m_blkid, m_has_bs_label, mocked_device, fake_call, monkeypatch):
|
||||
def mock_call(cmd, **kw):
|
||||
return ('', '', 1)
|
||||
monkeypatch.setattr(process, 'call', mock_call)
|
||||
m_blkid.return_value = {'UUID': '8fd92779-ad78-437c-a06f-275f7170fa74', 'TYPE': 'crypto_LUKS'}
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
with pytest.raises(SystemExit):
|
||||
self.validator.zap = False
|
||||
self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_already_prepared(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
with pytest.raises(SystemExit):
|
||||
self.validator.zap = False
|
||||
self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_not_prepared(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call, monkeypatch):
|
||||
def mock_call(cmd, **kw):
|
||||
return ('', '', 1)
|
||||
monkeypatch.setattr(process, 'call', mock_call)
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
self.validator.zap = False
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_has_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call, monkeypatch):
|
||||
def mock_call(cmd, **kw):
|
||||
return ('', '', 1)
|
||||
monkeypatch.setattr(process, 'call', mock_call)
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=True,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False
|
||||
)
|
||||
self.validator.zap = False
|
||||
with pytest.raises(RuntimeError):
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
class TestValidBatchDevice(object):
|
||||
def setup(self):
|
||||
self.validator = arg_validators.ValidBatchDevice()
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_is_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False,
|
||||
is_partition=True
|
||||
)
|
||||
with pytest.raises(argparse.ArgumentError):
|
||||
self.validator.zap = False
|
||||
self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_is_not_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False,
|
||||
is_partition=False
|
||||
)
|
||||
self.validator.zap = False
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
class TestValidBatchDataDevice(object):
|
||||
def setup(self):
|
||||
self.validator = arg_validators.ValidBatchDataDevice()
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_is_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False,
|
||||
is_partition=True
|
||||
)
|
||||
with pytest.raises(argparse.ArgumentError):
|
||||
self.validator.zap = False
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
@patch('ceph_volume.util.arg_validators.Device')
|
||||
@patch('ceph_volume.util.arg_validators.disk.has_bluestore_label', return_value=False)
|
||||
@patch('ceph_volume.api.lvm.get_single_lv', return_value=None)
|
||||
def test_device_is_not_partition(self, m_get_single_lv, m_has_bs_label, mocked_device, fake_call):
|
||||
mocked_device.return_value = MagicMock(
|
||||
used_by_ceph=False,
|
||||
exists=True,
|
||||
has_partitions=False,
|
||||
has_gpt_headers=False,
|
||||
has_fs=False,
|
||||
is_partition=False
|
||||
)
|
||||
self.validator.zap = False
|
||||
assert self.validator('/dev/foo')
|
||||
|
||||
|
||||
class TestValidFraction(object):
|
||||
|
||||
|
@ -37,7 +37,7 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.lvm_size.gb == 4
|
||||
|
||||
def test_lvm_size_rounds_down(self, device_info):
|
||||
def test_lvm_size_rounds_down(self, fake_call, device_info):
|
||||
# 5.5GB in size
|
||||
data = {"/dev/sda": {"size": "5905580032"}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
@ -45,14 +45,14 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.lvm_size.gb == 4
|
||||
|
||||
def test_is_lv(self, device_info):
|
||||
def test_is_lv(self, fake_call, device_info):
|
||||
data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"}
|
||||
lsblk = {"TYPE": "lvm"}
|
||||
device_info(lv=data,lsblk=lsblk)
|
||||
disk = device.Device("vg/lv")
|
||||
assert disk.is_lv
|
||||
|
||||
def test_vgs_is_empty(self, device_info, monkeypatch):
|
||||
def test_vgs_is_empty(self, fake_call, device_info, monkeypatch):
|
||||
BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000",
|
||||
pv_tags={})
|
||||
pvolumes = []
|
||||
@ -64,7 +64,7 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/nvme0n1")
|
||||
assert disk.vgs == []
|
||||
|
||||
def test_vgs_is_not_empty(self, device_info, monkeypatch):
|
||||
def test_vgs_is_not_empty(self, fake_call, device_info, monkeypatch):
|
||||
vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
|
||||
vg_extent_size=1073741824)
|
||||
monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
|
||||
@ -73,42 +73,42 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/nvme0n1")
|
||||
assert len(disk.vgs) == 1
|
||||
|
||||
def test_device_is_device(self, device_info):
|
||||
def test_device_is_device(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "device"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_device is True
|
||||
|
||||
def test_device_is_rotational(self, device_info):
|
||||
def test_device_is_rotational(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"rotational": "1"}}
|
||||
lsblk = {"TYPE": "device"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.rotational
|
||||
|
||||
def test_device_is_not_rotational(self, device_info):
|
||||
def test_device_is_not_rotational(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"rotational": "0"}}
|
||||
lsblk = {"TYPE": "device"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert not disk.rotational
|
||||
|
||||
def test_device_is_rotational_lsblk(self, device_info):
|
||||
def test_device_is_rotational_lsblk(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "device", "ROTA": "1"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.rotational
|
||||
|
||||
def test_device_is_not_rotational_lsblk(self, device_info):
|
||||
def test_device_is_not_rotational_lsblk(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"rotational": "0"}}
|
||||
lsblk = {"TYPE": "device", "ROTA": "0"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert not disk.rotational
|
||||
|
||||
def test_device_is_rotational_defaults_true(self, device_info):
|
||||
def test_device_is_rotational_defaults_true(self, fake_call, device_info):
|
||||
# rotational will default true if no info from sys_api or lsblk is found
|
||||
data = {"/dev/sda": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "device", "foo": "bar"}
|
||||
@ -116,54 +116,54 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.rotational
|
||||
|
||||
def test_disk_is_device(self, device_info):
|
||||
def test_disk_is_device(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_device is True
|
||||
|
||||
def test_is_partition(self, device_info):
|
||||
def test_is_partition(self, fake_call, device_info):
|
||||
data = {"/dev/sda1": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "part", "PKNAME": "sda"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda1")
|
||||
assert disk.is_partition
|
||||
|
||||
def test_mpath_device_is_device(self, device_info):
|
||||
def test_mpath_device_is_device(self, fake_call, device_info):
|
||||
data = {"/dev/foo": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "mpath"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/foo")
|
||||
assert disk.is_device is True
|
||||
|
||||
def test_is_not_lvm_member(self, device_info):
|
||||
def test_is_not_lvm_member(self, fake_call, device_info):
|
||||
data = {"/dev/sda1": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "part", "PKNAME": "sda"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda1")
|
||||
assert not disk.is_lvm_member
|
||||
|
||||
def test_is_lvm_member(self, device_info):
|
||||
def test_is_lvm_member(self, fake_call, device_info):
|
||||
data = {"/dev/sda1": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "part", "PKNAME": "sda"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda1")
|
||||
assert not disk.is_lvm_member
|
||||
|
||||
def test_is_mapper_device(self, device_info):
|
||||
def test_is_mapper_device(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "lvm"}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.Device("/dev/mapper/foo")
|
||||
assert disk.is_mapper
|
||||
|
||||
def test_dm_is_mapper_device(self, device_info):
|
||||
def test_dm_is_mapper_device(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "lvm"}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.Device("/dev/dm-4")
|
||||
assert disk.is_mapper
|
||||
|
||||
def test_is_not_mapper_device(self, device_info):
|
||||
def test_is_not_mapper_device(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
@ -171,19 +171,19 @@ class TestDevice(object):
|
||||
|
||||
@pytest.mark.usefixtures("lsblk_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_ceph_disk_lsblk(self, monkeypatch, patch_bluestore_label):
|
||||
def test_is_ceph_disk_lsblk(self, fake_call, monkeypatch, patch_bluestore_label):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_ceph_disk_member
|
||||
|
||||
@pytest.mark.usefixtures("blkid_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_ceph_disk_blkid(self, monkeypatch, patch_bluestore_label):
|
||||
def test_is_ceph_disk_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_ceph_disk_member
|
||||
|
||||
@pytest.mark.usefixtures("lsblk_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_ceph_disk_member_not_available_lsblk(self, monkeypatch, patch_bluestore_label):
|
||||
def test_is_ceph_disk_member_not_available_lsblk(self, fake_call, monkeypatch, patch_bluestore_label):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_ceph_disk_member
|
||||
assert not disk.available
|
||||
@ -191,20 +191,20 @@ class TestDevice(object):
|
||||
|
||||
@pytest.mark.usefixtures("blkid_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_ceph_disk_member_not_available_blkid(self, monkeypatch, patch_bluestore_label):
|
||||
def test_is_ceph_disk_member_not_available_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_ceph_disk_member
|
||||
assert not disk.available
|
||||
assert "Used by ceph-disk" in disk.rejected_reasons
|
||||
|
||||
def test_reject_removable_device(self, device_info):
|
||||
def test_reject_removable_device(self, fake_call, device_info):
|
||||
data = {"/dev/sdb": {"removable": 1}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=data,lsblk=lsblk)
|
||||
disk = device.Device("/dev/sdb")
|
||||
assert not disk.available
|
||||
|
||||
def test_reject_device_with_gpt_headers(self, device_info):
|
||||
def test_reject_device_with_gpt_headers(self, fake_call, device_info):
|
||||
data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
blkid= {"PTTYPE": "gpt"}
|
||||
@ -216,42 +216,42 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/sdb")
|
||||
assert not disk.available
|
||||
|
||||
def test_accept_non_removable_device(self, device_info):
|
||||
def test_accept_non_removable_device(self, fake_call, device_info):
|
||||
data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=data,lsblk=lsblk)
|
||||
disk = device.Device("/dev/sdb")
|
||||
assert disk.available
|
||||
|
||||
def test_reject_not_acceptable_device(self, device_info):
|
||||
def test_reject_not_acceptable_device(self, fake_call, device_info):
|
||||
data = {"/dev/dm-0": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "mpath"}
|
||||
device_info(devices=data, lsblk=lsblk)
|
||||
disk = device.Device("/dev/dm-0")
|
||||
assert not disk.available
|
||||
|
||||
def test_reject_readonly_device(self, device_info):
|
||||
def test_reject_readonly_device(self, fake_call, device_info):
|
||||
data = {"/dev/cdrom": {"ro": 1}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=data,lsblk=lsblk)
|
||||
disk = device.Device("/dev/cdrom")
|
||||
assert not disk.available
|
||||
|
||||
def test_reject_smaller_than_5gb(self, device_info):
|
||||
def test_reject_smaller_than_5gb(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"size": 5368709119}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=data,lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert not disk.available, 'too small device is available'
|
||||
|
||||
def test_accept_non_readonly_device(self, device_info):
|
||||
def test_accept_non_readonly_device(self, fake_call, device_info):
|
||||
data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=data,lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.available
|
||||
|
||||
def test_reject_bluestore_device(self, monkeypatch, patch_bluestore_label, device_info):
|
||||
def test_reject_bluestore_device(self, fake_call, monkeypatch, patch_bluestore_label, device_info):
|
||||
patch_bluestore_label.return_value = True
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(lsblk=lsblk)
|
||||
@ -259,7 +259,7 @@ class TestDevice(object):
|
||||
assert not disk.available
|
||||
assert "Has BlueStore device label" in disk.rejected_reasons
|
||||
|
||||
def test_reject_device_with_oserror(self, monkeypatch, patch_bluestore_label, device_info):
|
||||
def test_reject_device_with_oserror(self, fake_call, monkeypatch, patch_bluestore_label, device_info):
|
||||
patch_bluestore_label.side_effect = OSError('test failure')
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(lsblk=lsblk)
|
||||
@ -269,11 +269,11 @@ class TestDevice(object):
|
||||
|
||||
@pytest.mark.usefixtures("device_info_not_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_not_ceph_disk_member_lsblk(self, patch_bluestore_label):
|
||||
def test_is_not_ceph_disk_member_lsblk(self, fake_call, patch_bluestore_label):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_ceph_disk_member is False
|
||||
|
||||
def test_existing_vg_available(self, monkeypatch, device_info):
|
||||
def test_existing_vg_available(self, fake_call, monkeypatch, device_info):
|
||||
vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536,
|
||||
vg_extent_size=4194304)
|
||||
monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
|
||||
@ -285,7 +285,7 @@ class TestDevice(object):
|
||||
assert not disk.available
|
||||
assert not disk.available_raw
|
||||
|
||||
def test_existing_vg_too_small(self, monkeypatch, device_info):
|
||||
def test_existing_vg_too_small(self, fake_call, monkeypatch, device_info):
|
||||
vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4,
|
||||
vg_extent_size=1073741824)
|
||||
monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
|
||||
@ -297,7 +297,7 @@ class TestDevice(object):
|
||||
assert not disk.available
|
||||
assert not disk.available_raw
|
||||
|
||||
def test_multiple_existing_vgs(self, monkeypatch, device_info):
|
||||
def test_multiple_existing_vgs(self, fake_call, monkeypatch, device_info):
|
||||
vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000,
|
||||
vg_extent_size=4194304)
|
||||
vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536,
|
||||
@ -312,7 +312,7 @@ class TestDevice(object):
|
||||
assert not disk.available_raw
|
||||
|
||||
@pytest.mark.parametrize("ceph_type", ["data", "block"])
|
||||
def test_used_by_ceph(self, device_info,
|
||||
def test_used_by_ceph(self, fake_call, device_info,
|
||||
monkeypatch, ceph_type):
|
||||
data = {"/dev/sda": {"foo": "bar"}}
|
||||
lsblk = {"TYPE": "part", "PKNAME": "sda"}
|
||||
@ -337,7 +337,7 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.used_by_ceph
|
||||
|
||||
def test_not_used_by_ceph(self, device_info, monkeypatch):
|
||||
def test_not_used_by_ceph(self, fake_call, device_info, monkeypatch):
|
||||
FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg")
|
||||
pvolumes = []
|
||||
pvolumes.append(FooPVolume)
|
||||
@ -350,7 +350,7 @@ class TestDevice(object):
|
||||
disk = device.Device("/dev/sda")
|
||||
assert not disk.used_by_ceph
|
||||
|
||||
def test_get_device_id(self, device_info):
|
||||
def test_get_device_id(self, fake_call, device_info):
|
||||
udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']}
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(udevadm=udev,lsblk=lsblk)
|
||||
@ -371,33 +371,33 @@ class TestDevice(object):
|
||||
|
||||
class TestDeviceEncryption(object):
|
||||
|
||||
def test_partition_is_not_encrypted_lsblk(self, device_info):
|
||||
def test_partition_is_not_encrypted_lsblk(self, fake_call, device_info):
|
||||
lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs', 'PKNAME': 'sda'}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_encrypted is False
|
||||
|
||||
def test_partition_is_encrypted_lsblk(self, device_info):
|
||||
def test_partition_is_encrypted_lsblk(self, fake_call, device_info):
|
||||
lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS', 'PKNAME': 'sda'}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_partition_is_not_encrypted_blkid(self, device_info):
|
||||
def test_partition_is_not_encrypted_blkid(self, fake_call, device_info):
|
||||
lsblk = {'TYPE': 'part', 'PKNAME': 'sda'}
|
||||
blkid = {'TYPE': 'ceph data'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_encrypted is False
|
||||
|
||||
def test_partition_is_encrypted_blkid(self, device_info):
|
||||
def test_partition_is_encrypted_blkid(self, fake_call, device_info):
|
||||
lsblk = {'TYPE': 'part', 'PKNAME': 'sda'}
|
||||
blkid = {'TYPE': 'crypto_LUKS'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
disk = device.Device("/dev/sda")
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_mapper_is_encrypted_luks1(self, device_info, monkeypatch):
|
||||
def test_mapper_is_encrypted_luks1(self, fake_call, device_info, monkeypatch):
|
||||
status = {'type': 'LUKS1'}
|
||||
monkeypatch.setattr(device, 'encryption_status', lambda x: status)
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
@ -406,7 +406,7 @@ class TestDeviceEncryption(object):
|
||||
disk = device.Device("/dev/mapper/uuid")
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_mapper_is_encrypted_luks2(self, device_info, monkeypatch):
|
||||
def test_mapper_is_encrypted_luks2(self, fake_call, device_info, monkeypatch):
|
||||
status = {'type': 'LUKS2'}
|
||||
monkeypatch.setattr(device, 'encryption_status', lambda x: status)
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
@ -415,7 +415,7 @@ class TestDeviceEncryption(object):
|
||||
disk = device.Device("/dev/mapper/uuid")
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_mapper_is_encrypted_plain(self, device_info, monkeypatch):
|
||||
def test_mapper_is_encrypted_plain(self, fake_call, device_info, monkeypatch):
|
||||
status = {'type': 'PLAIN'}
|
||||
monkeypatch.setattr(device, 'encryption_status', lambda x: status)
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
@ -424,7 +424,7 @@ class TestDeviceEncryption(object):
|
||||
disk = device.Device("/dev/mapper/uuid")
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_mapper_is_not_encrypted_plain(self, device_info, monkeypatch):
|
||||
def test_mapper_is_not_encrypted_plain(self, fake_call, device_info, monkeypatch):
|
||||
monkeypatch.setattr(device, 'encryption_status', lambda x: {})
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'mapper'}
|
||||
@ -432,7 +432,7 @@ class TestDeviceEncryption(object):
|
||||
disk = device.Device("/dev/mapper/uuid")
|
||||
assert disk.is_encrypted is False
|
||||
|
||||
def test_lv_is_encrypted_blkid(self, device_info):
|
||||
def test_lv_is_encrypted_blkid(self, fake_call, device_info):
|
||||
lsblk = {'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'crypto_LUKS'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
@ -440,7 +440,7 @@ class TestDeviceEncryption(object):
|
||||
disk.lv_api = {}
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_lv_is_not_encrypted_blkid(self, factory, device_info):
|
||||
def test_lv_is_not_encrypted_blkid(self, fake_call, factory, device_info):
|
||||
lsblk = {'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'xfs'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
@ -448,7 +448,7 @@ class TestDeviceEncryption(object):
|
||||
disk.lv_api = factory(encrypted=None)
|
||||
assert disk.is_encrypted is False
|
||||
|
||||
def test_lv_is_encrypted_lsblk(self, device_info):
|
||||
def test_lv_is_encrypted_lsblk(self, fake_call, device_info):
|
||||
lsblk = {'FSTYPE': 'crypto_LUKS', 'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'mapper'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
@ -456,7 +456,7 @@ class TestDeviceEncryption(object):
|
||||
disk.lv_api = {}
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_lv_is_not_encrypted_lsblk(self, factory, device_info):
|
||||
def test_lv_is_not_encrypted_lsblk(self, fake_call, factory, device_info):
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'mapper'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
@ -464,7 +464,7 @@ class TestDeviceEncryption(object):
|
||||
disk.lv_api = factory(encrypted=None)
|
||||
assert disk.is_encrypted is False
|
||||
|
||||
def test_lv_is_encrypted_lvm_api(self, factory, device_info):
|
||||
def test_lv_is_encrypted_lvm_api(self, fake_call, factory, device_info):
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'mapper'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
@ -472,7 +472,7 @@ class TestDeviceEncryption(object):
|
||||
disk.lv_api = factory(encrypted=True)
|
||||
assert disk.is_encrypted is True
|
||||
|
||||
def test_lv_is_not_encrypted_lvm_api(self, factory, device_info):
|
||||
def test_lv_is_not_encrypted_lvm_api(self, fake_call, factory, device_info):
|
||||
lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
|
||||
blkid = {'TYPE': 'mapper'}
|
||||
device_info(lsblk=lsblk, blkid=blkid)
|
||||
@ -491,7 +491,7 @@ class TestDeviceOrdering(object):
|
||||
"/dev/sdd": {"removable": 1}, # invalid
|
||||
}
|
||||
|
||||
def test_valid_before_invalid(self, device_info):
|
||||
def test_valid_before_invalid(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=self.data,lsblk=lsblk)
|
||||
sda = device.Device("/dev/sda")
|
||||
@ -500,7 +500,7 @@ class TestDeviceOrdering(object):
|
||||
assert sda < sdb
|
||||
assert sdb > sda
|
||||
|
||||
def test_valid_alphabetical_ordering(self, device_info):
|
||||
def test_valid_alphabetical_ordering(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=self.data,lsblk=lsblk)
|
||||
sda = device.Device("/dev/sda")
|
||||
@ -509,7 +509,7 @@ class TestDeviceOrdering(object):
|
||||
assert sda < sdc
|
||||
assert sdc > sda
|
||||
|
||||
def test_invalid_alphabetical_ordering(self, device_info):
|
||||
def test_invalid_alphabetical_ordering(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "disk"}
|
||||
device_info(devices=self.data,lsblk=lsblk)
|
||||
sdb = device.Device("/dev/sdb")
|
||||
@ -521,14 +521,14 @@ class TestDeviceOrdering(object):
|
||||
|
||||
class TestCephDiskDevice(object):
|
||||
|
||||
def test_partlabel_lsblk(self, device_info):
|
||||
def test_partlabel_lsblk(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "disk", "PARTLABEL": ""}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
|
||||
assert disk.partlabel == ''
|
||||
|
||||
def test_partlabel_blkid(self, device_info):
|
||||
def test_partlabel_blkid(self, fake_call, device_info):
|
||||
blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"}
|
||||
device_info(blkid=blkid)
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
@ -537,21 +537,21 @@ class TestCephDiskDevice(object):
|
||||
|
||||
@pytest.mark.usefixtures("blkid_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_member_blkid(self, monkeypatch, patch_bluestore_label):
|
||||
def test_is_member_blkid(self, fake_call, monkeypatch, patch_bluestore_label):
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
|
||||
assert disk.is_member is True
|
||||
|
||||
@pytest.mark.usefixtures("lsblk_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_is_member_lsblk(self, patch_bluestore_label, device_info):
|
||||
def test_is_member_lsblk(self, fake_call, patch_bluestore_label, device_info):
|
||||
lsblk = {"TYPE": "disk", "PARTLABEL": "ceph"}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
|
||||
assert disk.is_member is True
|
||||
|
||||
def test_unknown_type(self, device_info):
|
||||
def test_unknown_type(self, fake_call, device_info):
|
||||
lsblk = {"TYPE": "disk", "PARTLABEL": "gluster"}
|
||||
device_info(lsblk=lsblk)
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
@ -562,7 +562,7 @@ class TestCephDiskDevice(object):
|
||||
|
||||
@pytest.mark.usefixtures("blkid_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_type_blkid(self, monkeypatch, device_info, ceph_partlabel):
|
||||
def test_type_blkid(self, monkeypatch, fake_call, device_info, ceph_partlabel):
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
|
||||
assert disk.type in self.ceph_types
|
||||
@ -570,7 +570,7 @@ class TestCephDiskDevice(object):
|
||||
@pytest.mark.usefixtures("blkid_ceph_disk_member",
|
||||
"lsblk_ceph_disk_member",
|
||||
"disable_kernel_queries")
|
||||
def test_type_lsblk(self, device_info, ceph_partlabel):
|
||||
def test_type_lsblk(self, fake_call, device_info, ceph_partlabel):
|
||||
disk = device.CephDiskDevice(device.Device("/dev/sda"))
|
||||
|
||||
assert disk.type in self.ceph_types
|
||||
|
@ -1,10 +1,9 @@
|
||||
import argparse
|
||||
import os
|
||||
import math
|
||||
from ceph_volume import terminal
|
||||
from ceph_volume import decorators
|
||||
from ceph_volume.util import disk
|
||||
from ceph_volume import terminal, decorators, process
|
||||
from ceph_volume.util.device import Device
|
||||
from ceph_volume.util import disk
|
||||
|
||||
|
||||
def valid_osd_id(val):
|
||||
@ -17,8 +16,13 @@ class ValidDevice(object):
|
||||
self.gpt_ok = gpt_ok
|
||||
|
||||
def __call__(self, dev_path):
|
||||
device = self._is_valid_device(dev_path)
|
||||
return self._format_device(device)
|
||||
self.get_device(dev_path)
|
||||
self._validated_device = self._is_valid_device()
|
||||
return self._format_device(self._validated_device)
|
||||
|
||||
def get_device(self, dev_path):
|
||||
self._device = Device(dev_path)
|
||||
self.dev_path = dev_path
|
||||
|
||||
def _format_device(self, device):
|
||||
if self.as_string:
|
||||
@ -28,36 +32,101 @@ class ValidDevice(object):
|
||||
return device.path
|
||||
return device
|
||||
|
||||
def _is_valid_device(self, dev_path):
|
||||
device = Device(dev_path)
|
||||
def _is_valid_device(self):
|
||||
error = None
|
||||
if not device.exists:
|
||||
error = "Unable to proceed with non-existing device: %s" % dev_path
|
||||
if not self._device.exists:
|
||||
error = "Unable to proceed with non-existing device: %s" % self.dev_path
|
||||
# FIXME this is not a nice API, this validator was meant to catch any
|
||||
# non-existing devices upfront, not check for gpt headers. Now this
|
||||
# needs to optionally skip checking gpt headers which is beyond
|
||||
# verifying if the device exists. The better solution would be to
|
||||
# configure this with a list of checks that can be excluded/included on
|
||||
# __init__
|
||||
elif device.has_gpt_headers and not self.gpt_ok:
|
||||
error = "GPT headers found, they must be removed on: %s" % dev_path
|
||||
if device.has_partitions:
|
||||
raise RuntimeError("Device {} has partitions.".format(dev_path))
|
||||
elif self._device.has_gpt_headers and not self.gpt_ok:
|
||||
error = "GPT headers found, they must be removed on: %s" % self.dev_path
|
||||
if self._device.has_partitions:
|
||||
raise RuntimeError("Device {} has partitions.".format(self.dev_path))
|
||||
if error:
|
||||
raise argparse.ArgumentError(None, error)
|
||||
return device
|
||||
return self._device
|
||||
|
||||
|
||||
class ValidZapDevice(ValidDevice):
|
||||
def __call__(self, dev_path):
|
||||
super().get_device(dev_path)
|
||||
return self._format_device(self._is_valid_device())
|
||||
|
||||
def _is_valid_device(self, raise_sys_exit=True):
|
||||
super()._is_valid_device()
|
||||
return self._device
|
||||
|
||||
|
||||
class ValidDataDevice(ValidDevice):
|
||||
def __call__(self, dev_path):
|
||||
super().get_device(dev_path)
|
||||
return self._format_device(self._is_valid_device())
|
||||
|
||||
def _is_valid_device(self, raise_sys_exit=True):
|
||||
super()._is_valid_device()
|
||||
if self._device.used_by_ceph:
|
||||
terminal.info('Device {} is already prepared'.format(self.dev_path))
|
||||
if raise_sys_exit:
|
||||
raise SystemExit(0)
|
||||
if self._device.has_fs and not self._device.used_by_ceph:
|
||||
raise RuntimeError("Device {} has a filesystem.".format(self.dev_path))
|
||||
if self.dev_path[0] == '/' and disk.has_bluestore_label(self.dev_path):
|
||||
raise RuntimeError("Device {} has bluestore signature.".format(self.dev_path))
|
||||
return self._device
|
||||
|
||||
class ValidRawDevice(ValidDevice):
|
||||
def __call__(self, dev_path):
|
||||
super().get_device(dev_path)
|
||||
return self._format_device(self._is_valid_device())
|
||||
|
||||
def _is_valid_device(self, raise_sys_exit=True):
|
||||
out, err, rc = process.call([
|
||||
'ceph-bluestore-tool', 'show-label',
|
||||
'--dev', self.dev_path], verbose_on_failure=False)
|
||||
if not rc:
|
||||
terminal.info("Raw device {} is already prepared.".format(self.dev_path))
|
||||
raise SystemExit(0)
|
||||
if disk.blkid(self.dev_path).get('TYPE') == 'crypto_LUKS':
|
||||
terminal.info("Raw device {} might already be in use for a dmcrypt OSD, skipping.".format(self.dev_path))
|
||||
raise SystemExit(0)
|
||||
super()._is_valid_device()
|
||||
return self._device
|
||||
|
||||
class ValidBatchDevice(ValidDevice):
|
||||
|
||||
def __call__(self, dev_path):
|
||||
dev = self._is_valid_device(dev_path)
|
||||
if dev.is_partition:
|
||||
super().get_device(dev_path)
|
||||
return self._format_device(self._is_valid_device())
|
||||
|
||||
def _is_valid_device(self, raise_sys_exit=False):
|
||||
super()._is_valid_device()
|
||||
if self._device.is_partition:
|
||||
raise argparse.ArgumentError(
|
||||
None,
|
||||
'{} is a partition, please pass '
|
||||
'LVs or raw block devices'.format(dev_path))
|
||||
return self._format_device(dev)
|
||||
'LVs or raw block devices'.format(self.dev_path))
|
||||
return self._device
|
||||
|
||||
|
||||
class ValidBatchDataDevice(ValidBatchDevice, ValidDataDevice):
|
||||
def __call__(self, dev_path):
|
||||
super().get_device(dev_path)
|
||||
return self._format_device(self._is_valid_device())
|
||||
|
||||
def _is_valid_device(self):
|
||||
# if device is already used by ceph,
|
||||
# leave the validation to Batch.get_deployment_layout()
|
||||
# This way the idempotency isn't broken (especially when using --osds-per-device)
|
||||
for lv in self._device.lvs:
|
||||
if lv.tags.get('ceph.type') in ['db', 'wal', 'journal']:
|
||||
return self._device
|
||||
if self._device.used_by_ceph:
|
||||
return self._device
|
||||
super()._is_valid_device(raise_sys_exit=False)
|
||||
return self._device
|
||||
|
||||
|
||||
class OSDPath(object):
|
||||
|
@ -3,7 +3,7 @@
|
||||
import logging
|
||||
import os
|
||||
from functools import total_ordering
|
||||
from ceph_volume import sys_info
|
||||
from ceph_volume import sys_info, process
|
||||
from ceph_volume.api import lvm
|
||||
from ceph_volume.util import disk, system
|
||||
from ceph_volume.util.lsmdisk import LSMDisk
|
||||
@ -66,6 +66,7 @@ class Device(object):
|
||||
{attr:<25} {value}"""
|
||||
|
||||
report_fields = [
|
||||
'ceph_device',
|
||||
'rejected_reasons',
|
||||
'available',
|
||||
'path',
|
||||
@ -104,6 +105,7 @@ class Device(object):
|
||||
self._is_lvm_member = None
|
||||
self._parse()
|
||||
self.lsm_data = self.fetch_lsm(with_lsm)
|
||||
self.ceph_device = None
|
||||
|
||||
self.available_lvm, self.rejected_reasons_lvm = self._check_lvm_reject_reasons()
|
||||
self.available_raw, self.rejected_reasons_raw = self._check_raw_reject_reasons()
|
||||
@ -173,6 +175,7 @@ class Device(object):
|
||||
self.abspath = lv.lv_path
|
||||
self.vg_name = lv.vg_name
|
||||
self.lv_name = lv.name
|
||||
self.ceph_device = lvm.is_ceph_device(lv)
|
||||
else:
|
||||
dev = disk.lsblk(self.path)
|
||||
self.blkid_api = disk.blkid(self.path)
|
||||
@ -181,6 +184,11 @@ class Device(object):
|
||||
# always check is this is an lvm member
|
||||
if device_type in ['part', 'disk']:
|
||||
self._set_lvm_membership()
|
||||
out, err, rc = process.call([
|
||||
'ceph-bluestore-tool', 'show-label',
|
||||
'--dev', self.path], verbose_on_failure=False)
|
||||
if rc:
|
||||
self.ceph_device = True
|
||||
|
||||
self.ceph_disk = CephDiskDevice(self)
|
||||
|
||||
@ -301,6 +309,10 @@ class Device(object):
|
||||
def exists(self):
|
||||
return os.path.exists(self.abspath)
|
||||
|
||||
@property
|
||||
def has_fs(self):
|
||||
return 'TYPE' in self.blkid_api
|
||||
|
||||
@property
|
||||
def has_gpt_headers(self):
|
||||
return self.blkid_api.get("PTTYPE") == "gpt"
|
||||
|
@ -5,9 +5,10 @@ skip_missing_interpreters = true
|
||||
[testenv]
|
||||
deps=
|
||||
pytest
|
||||
pytest-xdist
|
||||
mock
|
||||
install_command=./tox_install_command.sh {opts} {packages}
|
||||
commands=py.test -v {posargs:ceph_volume/tests} --ignore=ceph_volume/tests/functional
|
||||
commands=py.test --numprocesses=auto -vv {posargs:ceph_volume/tests} --ignore=ceph_volume/tests/functional
|
||||
|
||||
[testenv:py3-flake8]
|
||||
deps=flake8
|
||||
|
@ -792,17 +792,42 @@ class OrchestratorCli(OrchestratorClientMixin, MgrModule,
|
||||
usage = """
|
||||
Usage:
|
||||
ceph orch daemon add osd host:device1,device2,...
|
||||
ceph orch daemon add osd host:data_devices=device1,device2,db_devices=device3,osds_per_device=2,...
|
||||
"""
|
||||
if not svc_arg:
|
||||
return HandleCommandResult(-errno.EINVAL, stderr=usage)
|
||||
try:
|
||||
host_name, block_device = svc_arg.split(":")
|
||||
block_devices = block_device.split(',')
|
||||
devs = DeviceSelection(paths=block_devices)
|
||||
host_name, raw = svc_arg.split(":")
|
||||
drive_group_spec = {
|
||||
'data_devices': []
|
||||
} # type: Dict
|
||||
drv_grp_spec_arg = None
|
||||
values = raw.split(',')
|
||||
while values:
|
||||
v = values[0].split(',', 1)[0]
|
||||
if '=' in v:
|
||||
drv_grp_spec_arg, value = v.split('=')
|
||||
if drv_grp_spec_arg in ['data_devices',
|
||||
'db_devices',
|
||||
'wal_devices',
|
||||
'journal_devices']:
|
||||
drive_group_spec[drv_grp_spec_arg] = []
|
||||
drive_group_spec[drv_grp_spec_arg].append(value)
|
||||
else:
|
||||
drive_group_spec[drv_grp_spec_arg] = value
|
||||
elif drv_grp_spec_arg is not None:
|
||||
drive_group_spec[drv_grp_spec_arg].append(v)
|
||||
else:
|
||||
drive_group_spec['data_devices'].append(v)
|
||||
values.remove(v)
|
||||
|
||||
for dev_type in ['data_devices', 'db_devices', 'wal_devices', 'journal_devices']:
|
||||
drive_group_spec[dev_type] = DeviceSelection(paths=drive_group_spec[dev_type]) if drive_group_spec.get(dev_type) else None
|
||||
|
||||
drive_group = DriveGroupSpec(
|
||||
placement=PlacementSpec(host_pattern=host_name),
|
||||
data_devices=devs,
|
||||
method=method,
|
||||
**drive_group_spec,
|
||||
)
|
||||
except (TypeError, KeyError, ValueError) as e:
|
||||
msg = f"Invalid 'host:device' spec: '{svc_arg}': {e}" + usage
|
||||
|
@ -121,6 +121,13 @@ class DriveSelection(object):
|
||||
for disk in self.disks:
|
||||
logger.debug("Processing disk {}".format(disk.path))
|
||||
|
||||
if not disk.available and not disk.ceph_device:
|
||||
logger.debug(
|
||||
("Ignoring disk {}. "
|
||||
"Disk is unavailable due to {}".format(disk.path, disk.rejected_reasons))
|
||||
)
|
||||
continue
|
||||
|
||||
if not self._has_mandatory_idents(disk):
|
||||
logger.debug(
|
||||
"Ignoring disk {}. Missing mandatory idents".format(
|
||||
|
@ -43,6 +43,7 @@ class Devices(object):
|
||||
|
||||
class Device(object):
|
||||
report_fields = [
|
||||
'ceph_device',
|
||||
'rejected_reasons',
|
||||
'available',
|
||||
'path',
|
||||
@ -62,7 +63,8 @@ class Device(object):
|
||||
lvs=None, # type: Optional[List[str]]
|
||||
device_id=None, # type: Optional[str]
|
||||
lsm_data=None, # type: Optional[Dict[str, Dict[str, str]]]
|
||||
created=None # type: Optional[datetime.datetime]
|
||||
created=None, # type: Optional[datetime.datetime]
|
||||
ceph_device=None # type: Optional[bool]
|
||||
):
|
||||
self.path = path
|
||||
self.sys_api = sys_api if sys_api is not None else {} # type: Dict[str, Any]
|
||||
@ -72,6 +74,7 @@ class Device(object):
|
||||
self.device_id = device_id
|
||||
self.lsm_data = lsm_data if lsm_data is not None else {} # type: Dict[str, Dict[str, str]]
|
||||
self.created = created if created is not None else datetime_now()
|
||||
self.ceph_device = ceph_device
|
||||
|
||||
def __eq__(self, other):
|
||||
# type: (Any) -> bool
|
||||
@ -120,6 +123,7 @@ class Device(object):
|
||||
'path': self.path if self.path is not None else 'unknown',
|
||||
'lvs': self.lvs if self.lvs else 'None',
|
||||
'available': str(self.available),
|
||||
'ceph_device': str(self.ceph_device)
|
||||
}
|
||||
if not self.available and self.rejected_reasons:
|
||||
device_desc['rejection reasons'] = self.rejected_reasons
|
||||
|
Loading…
Reference in New Issue
Block a user