ceph-volume: fix device unittest, mock has_bluestore_label, lint

This also adds a test case for has_bluestore_label.

Fixes: https://tracker.ceph.com/issues/43611

Signed-off-by: Jan Fajerski <jfajerski@suse.com>
This commit is contained in:
Jan Fajerski 2020-01-15 15:55:45 +01:00
parent 05adb6474a
commit 91c7f5189d
7 changed files with 59 additions and 82 deletions

View File

@ -3,12 +3,9 @@ import argparse
import logging
import os
from textwrap import dedent
from ceph_volume import process, conf, decorators, terminal, __release__, configuration
from ceph_volume.util import system, disk
from ceph_volume import process, conf, decorators, terminal
from ceph_volume.util import system
from ceph_volume.util import prepare as prepare_utils
from ceph_volume.util import encryption as encryption_utils
from ceph_volume.systemd import systemctl
from ceph_volume.api import lvm as api
from .list import direct_report
@ -17,7 +14,6 @@ logger = logging.getLogger(__name__)
def activate_bluestore(meta, tmpfs, systemd):
# find the osd
osd_id = meta['osd_id']
osd_fsid = meta['osd_uuid']
# mount on tmpfs the osd directory
osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)

View File

@ -2,7 +2,6 @@ from __future__ import print_function
from textwrap import dedent
import logging
from ceph_volume.util import system
from ceph_volume.util.arg_validators import exclude_group_options
from ceph_volume import decorators, terminal
from .prepare import Prepare
from .activate import Activate
@ -76,7 +75,7 @@ class Create(object):
if not self.args.bluestore:
terminal.error('must specify --bluestore (currently the only supported backend)')
raise SystemExit(1)
if not args.no_systemd:
if not self.args.no_systemd:
terminal.error('systemd support not yet implemented')
raise SystemExit(1)
self.create(self.args)

View File

@ -1,15 +1,9 @@
from __future__ import print_function
import argparse
import base64
import json
import logging
import os
from textwrap import dedent
from ceph_volume import decorators, terminal, conf, process
from ceph_volume.api import lvm
from ceph_volume.systemd import systemctl
from ceph_volume.util import arg_validators, system, disk, encryption
from ceph_volume.util.device import Device
from ceph_volume import decorators, process
logger = logging.getLogger(__name__)

View File

@ -1,11 +1,9 @@
from __future__ import print_function
import argparse
import json
import logging
from textwrap import dedent
from ceph_volume.util import prepare as prepare_utils
from ceph_volume.util import encryption as encryption_utils
from ceph_volume.util import system, disk
from ceph_volume.util import system
from ceph_volume import conf, decorators, terminal
from ceph_volume.devices.lvm.common import rollback_osd
from .common import create_parser
@ -79,7 +77,6 @@ class Prepare(object):
@decorators.needs_root
def prepare(self):
secrets = {'cephx_secret': prepare_utils.create_key()}
cluster_fsid = self.get_cluster_fsid()
osd_fsid = self.args.osd_fsid or system.generate_uuid()
crush_device_class = self.args.crush_device_class
if crush_device_class:

View File

@ -1,23 +0,0 @@
from __future__ import print_function
import argparse
import base64
import json
import logging
import os
from textwrap import dedent
from ceph_volume import decorators, terminal, conf
from ceph_volume.api import lvm
from ceph_volume.systemd import systemctl
from ceph_volume.util import arg_validators, system, disk, encryption
from ceph_volume.util.device import Device
logger = logging.getLogger(__name__)
class Scan(object):
help = 'Capture metadata from all running ceph-disk OSDs, OSD data partition or directory'
def __init__(self, argv):
self.argv = argv
self._etc_path = '/etc/ceph/osd/'

View File

@ -1,6 +1,6 @@
import os
import pytest
from mock.mock import patch
from mock.mock import patch, PropertyMock
from ceph_volume.util import disk
from ceph_volume.util.constants import ceph_disk_guids
from ceph_volume.api import lvm as lvm_api
@ -270,8 +270,16 @@ def patched_get_block_devs_lsblk():
yield p
@pytest.fixture
def device_info(monkeypatch):
def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None):
def patch_bluestore_label():
with patch('ceph_volume.util.device.Device.has_bluestore_label',
new_callable=PropertyMock) as p:
p.return_value = False
yield p
@pytest.fixture
def device_info(monkeypatch, patch_bluestore_label):
def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None,
has_bluestore_label=False):
devices = devices if devices else {}
lsblk = lsblk if lsblk else {}
blkid = blkid if blkid else {}

View File

@ -139,14 +139,14 @@ class TestDevice(object):
@pytest.mark.usefixtures("lsblk_ceph_disk_member",
"disable_kernel_queries",
"disable_lvm_queries")
def test_is_ceph_disk_lsblk(self, monkeypatch):
def test_is_ceph_disk_lsblk(self, monkeypatch, patch_bluestore_label):
disk = device.Device("/dev/sda")
assert disk.is_ceph_disk_member
@pytest.mark.usefixtures("blkid_ceph_disk_member",
"disable_kernel_queries",
"disable_lvm_queries")
def test_is_ceph_disk_blkid(self, monkeypatch):
def test_is_ceph_disk_blkid(self, monkeypatch, patch_bluestore_label):
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
lambda path: {'PARTLABEL': ""})
disk = device.Device("/dev/sda")
@ -155,7 +155,7 @@ class TestDevice(object):
@pytest.mark.usefixtures("lsblk_ceph_disk_member",
"disable_kernel_queries",
"disable_lvm_queries")
def test_is_ceph_disk_member_not_available_lsblk(self, monkeypatch):
def test_is_ceph_disk_member_not_available_lsblk(self, monkeypatch, patch_bluestore_label):
disk = device.Device("/dev/sda")
assert disk.is_ceph_disk_member
assert not disk.available
@ -164,7 +164,7 @@ class TestDevice(object):
@pytest.mark.usefixtures("blkid_ceph_disk_member",
"disable_kernel_queries",
"disable_lvm_queries")
def test_is_ceph_disk_member_not_available_blkid(self, monkeypatch):
def test_is_ceph_disk_member_not_available_blkid(self, monkeypatch, patch_bluestore_label):
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
lambda path: {'PARTLABEL': ""})
disk = device.Device("/dev/sda")
@ -172,10 +172,46 @@ class TestDevice(object):
assert not disk.available
assert "Used by ceph-disk" in disk.rejected_reasons
def test_reject_removable_device(self, device_info):
data = {"/dev/sdb": {"removable": 1}}
device_info(devices=data)
disk = device.Device("/dev/sdb")
assert not disk.available
def test_accept_non_removable_device(self, device_info):
data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
device_info(devices=data)
disk = device.Device("/dev/sdb")
assert disk.available
def test_reject_readonly_device(self, device_info):
data = {"/dev/cdrom": {"ro": 1}}
device_info(devices=data)
disk = device.Device("/dev/cdrom")
assert not disk.available
def test_reject_smaller_than_5gb(self, device_info):
data = {"/dev/sda": {"size": 5368709119}}
device_info(devices=data)
disk = device.Device("/dev/sda")
assert not disk.available, 'too small device is available'
def test_accept_non_readonly_device(self, device_info):
data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
device_info(devices=data)
disk = device.Device("/dev/sda")
assert disk.available
def test_reject_bluestore_device(self, monkeypatch, patch_bluestore_label):
patch_bluestore_label.return_value = True
disk = device.Device("/dev/sda")
assert not disk.available
assert "Has BlueStore device label" in disk.rejected_reasons
@pytest.mark.usefixtures("device_info_not_ceph_disk_member",
"disable_lvm_queries",
"disable_kernel_queries")
def test_is_not_ceph_disk_member_lsblk(self):
def test_is_not_ceph_disk_member_lsblk(self, patch_bluestore_label):
disk = device.Device("/dev/sda")
assert disk.is_ceph_disk_member is False
@ -387,47 +423,17 @@ class TestCephDiskDevice(object):
@pytest.mark.usefixtures("blkid_ceph_disk_member",
"disable_kernel_queries",
"disable_lvm_queries")
def test_is_member_blkid(self, monkeypatch):
def test_is_member_blkid(self, monkeypatch, patch_bluestore_label):
monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
lambda path: {'PARTLABEL': ""})
disk = device.CephDiskDevice(device.Device("/dev/sda"))
assert disk.is_member is True
def test_reject_removable_device(self, device_info):
data = {"/dev/sdb": {"removable": 1}}
device_info(devices=data)
disk = device.Device("/dev/sdb")
assert not disk.available
def test_accept_non_removable_device(self, device_info):
data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
device_info(devices=data)
disk = device.Device("/dev/sdb")
assert disk.available
def test_reject_readonly_device(self, device_info):
data = {"/dev/cdrom": {"ro": 1}}
device_info(devices=data)
disk = device.Device("/dev/cdrom")
assert not disk.available
def test_reject_smaller_than_5gb(self, device_info):
data = {"/dev/sda": {"size": 5368709119}}
device_info(devices=data)
disk = device.Device("/dev/sda")
assert not disk.available, 'too small device is available'
def test_accept_non_readonly_device(self, device_info):
data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
device_info(devices=data)
disk = device.Device("/dev/sda")
assert disk.available
@pytest.mark.usefixtures("lsblk_ceph_disk_member",
"disable_kernel_queries",
"disable_lvm_queries")
def test_is_member_lsblk(self):
def test_is_member_lsblk(self, patch_bluestore_label):
disk = device.CephDiskDevice(device.Device("/dev/sda"))
assert disk.is_member is True