Merge pull request #17606 from ceph/wip-bz1478598

ceph-volume allow filtering by `uuid`, do not require osd id 

Reviewed-by: Andrew Schoen <aschoen@redhat.com>
This commit is contained in:
Alfredo Deza 2017-09-11 15:44:42 -04:00 committed by GitHub
commit e5e50f75ce
8 changed files with 146 additions and 44 deletions

View File

@ -63,7 +63,10 @@ class Activate(object):
def activate(self, args):
lvs = api.Volumes()
# filter them down for the OSD ID and FSID we need to activate
lvs.filter(lv_tags={'ceph.osd_id': args.osd_id, 'ceph.osd_fsid': args.osd_fsid})
if args.osd_id and args.osd_fsid:
lvs.filter(lv_tags={'ceph.osd_id': args.osd_id, 'ceph.osd_fsid': args.osd_fsid})
elif args.osd_fsid and not args.osd_id:
lvs.filter(lv_tags={'ceph.osd_fsid': args.osd_fsid})
if not lvs:
raise RuntimeError('could not find osd.%s with fsid %s' % (args.osd_id, args.osd_fsid))
activate_filestore(lvs)

View File

@ -284,13 +284,10 @@ class VolumeGroups(list):
# actual filtered list if any filters were applied
if vg_tags:
tag_filtered = []
for k, v in vg_tags.items():
for volume in filtered:
if volume.tags.get(k) == str(v):
if volume not in tag_filtered:
tag_filtered.append(volume)
# return the tag_filtered volumes here, the `filtered` list is no
# longer useable
for volume in filtered:
matches = all(volume.tags.get(k) == str(v) for k, v in vg_tags.items())
if matches:
tag_filtered.append(volume)
return tag_filtered
return filtered
@ -390,13 +387,11 @@ class Volumes(list):
# actual filtered list if any filters were applied
if lv_tags:
tag_filtered = []
for k, v in lv_tags.items():
for volume in filtered:
if volume.tags.get(k) == str(v):
if volume not in tag_filtered:
tag_filtered.append(volume)
# return the tag_filtered volumes here, the `filtered` list is no
# longer useable
for volume in filtered:
# all the tags we got need to match on the volume
matches = all(volume.tags.get(k) == str(v) for k, v in lv_tags.items())
if matches:
tag_filtered.append(volume)
return tag_filtered
return filtered
@ -493,11 +488,10 @@ class PVolumes(list):
# or is an actual filtered list if any filters were applied
if pv_tags:
tag_filtered = []
for k, v in pv_tags.items():
for pvolume in filtered:
if pvolume.tags.get(k) == str(v):
if pvolume not in tag_filtered:
tag_filtered.append(pvolume)
for pvolume in filtered:
matches = all(pvolume.tags.get(k) == str(v) for k, v in pv_tags.items())
if matches:
tag_filtered.append(pvolume)
# return the tag_filtered pvolumes here, the `filtered` list is no
# longer useable
return tag_filtered

View File

@ -18,7 +18,7 @@ def parse_osd_id(string):
def parse_osd_uuid(string):
osd_id = '%s-' % parse_osd_id(string)
# remove the id first
osd_uuid = string.split(osd_id)[-1]
osd_uuid = string.split(osd_id, 1)[-1]
if not osd_uuid:
raise SuffixParsingError('OSD uuid', string)
return osd_uuid

View File

@ -1,4 +1,5 @@
import pytest
from ceph_volume.devices.lvm import api
class Capture(object):
@ -14,3 +15,28 @@ class Capture(object):
@pytest.fixture
def capture():
return Capture()
@pytest.fixture
def volumes(monkeypatch):
monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0))
volumes = api.Volumes()
volumes._purge()
return volumes
@pytest.fixture
def volume_groups(monkeypatch):
monkeypatch.setattr('ceph_volume.process.call', lambda x: ('', '', 0))
vgs = api.VolumeGroups()
vgs._purge()
return vgs
@pytest.fixture
def is_root(monkeypatch):
"""
Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
is root (or is sudoing to superuser) can continue as-is
"""
monkeypatch.setattr('os.getuid', lambda: 0)

View File

@ -0,0 +1,34 @@
import pytest
from ceph_volume.devices.lvm import activate, api
class Args(object):
def __init__(self, **kw):
for k, v in kw.items():
setattr(self, k, v)
class TestActivate(object):
# these tests are very functional, hence the heavy patching, it is hard to
# test the negative side effect with an actual functional run, so we must
# setup a perfect scenario for this test to check it can really work
# with/without osd_id
def test_no_osd_id_matches_fsid(self, is_root, volumes, monkeypatch, capture):
FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', lv_tags="ceph.osd_fsid=1234")
volumes.append(FooVolume)
monkeypatch.setattr(api, 'Volumes', lambda: volumes)
monkeypatch.setattr(activate, 'activate_filestore', capture)
args = Args(osd_id=None, osd_fsid='1234')
activate.Activate([]).activate(args)
assert capture.calls[0]['args'][0] == [FooVolume]
def test_no_osd_id_no_matching_fsid(self, is_root, volumes, monkeypatch, capture):
FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo', lv_tags="ceph.osd_fsid=11234")
volumes.append(FooVolume)
monkeypatch.setattr(api, 'Volumes', lambda: volumes)
monkeypatch.setattr(activate, 'activate_filestore', capture)
args = Args(osd_id=None, osd_fsid='1234')
with pytest.raises(RuntimeError):
activate.Activate([]).activate(args)

View File

@ -131,13 +131,35 @@ class TestGetPV(object):
monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes)
assert api.get_pv(pv_uuid='0000') == FooPVolume
def test_single_pv_is_matched_by_uuid(self, volumes, monkeypatch):
FooVolume = api.Volume(
lv_name='foo', lv_path='/dev/vg/foo',
lv_uuid='1111', lv_tags="ceph.type=data")
volumes.append(FooVolume)
monkeypatch.setattr(api, 'Volumes', lambda: volumes)
assert api.get_lv(lv_uuid='1111') == FooVolume
def test_single_pv_is_matched_by_uuid(self, pvolumes, monkeypatch):
FooPVolume = api.PVolume(
pv_name='/dev/vg/foo',
pv_uuid='1111', pv_tags="ceph.type=data")
pvolumes.append(FooPVolume)
monkeypatch.setattr(api, 'PVolumes', lambda: pvolumes)
assert api.get_pv(pv_uuid='1111') == FooPVolume
class TestPVolumes(object):
def test_filter_by_tag_does_not_match_one(self, pvolumes, monkeypatch):
pv_tags = "ceph.type=journal,ceph.osd_id=1,ceph.fsid=000-aaa"
FooPVolume = api.PVolume(
pv_name='/dev/vg/foo',
pv_uuid='1111', pv_tags=pv_tags)
pvolumes.append(FooPVolume)
pvolumes.filter(pv_tags={'ceph.type': 'journal', 'ceph.osd_id': '2'})
assert pvolumes == []
def test_filter_by_tags_matches(self, pvolumes, monkeypatch):
pv_tags = "ceph.type=journal,ceph.osd_id=1"
FooPVolume = api.PVolume(
pv_name='/dev/vg/foo',
pv_uuid='1111', pv_tags=pv_tags)
pvolumes.append(FooPVolume)
pvolumes.filter(pv_tags={'ceph.type': 'journal', 'ceph.osd_id': '1'})
assert pvolumes == [FooPVolume]
class TestGetVG(object):
@ -184,6 +206,16 @@ class TestVolumes(object):
assert len(volumes) == 1
assert volumes[0].lv_name == 'volume1'
def test_filter_by_tag_does_not_match_one(self, volumes):
lv_tags = "ceph.type=data,ceph.fsid=000-aaa"
osd = api.Volume(lv_name='volume1', lv_path='/dev/vg/lv', lv_tags=lv_tags)
journal = api.Volume(lv_name='volume2', lv_path='/dev/vg/lv', lv_tags='ceph.osd_id=1,ceph.type=journal')
volumes.append(osd)
volumes.append(journal)
# note the different osd_id!
volumes.filter(lv_tags={'ceph.type': 'data', 'ceph.osd_id': '2'})
assert volumes == []
def test_filter_by_vg_name(self, volumes):
lv_tags = "ceph.type=data,ceph.fsid=000-aaa"
osd = api.Volume(lv_name='volume1', vg_name='ceph_vg', lv_tags=lv_tags)
@ -257,6 +289,13 @@ class TestVolumeGroups(object):
assert len(volume_groups) == 1
assert volume_groups[0].vg_name == 'volume1'
def test_filter_by_tag_does_not_match_one(self, volume_groups):
vg_tags = "ceph.group=dmcache,ceph.disk_type=ssd"
osd = api.VolumeGroup(vg_name='volume1', vg_path='/dev/vg/lv', vg_tags=vg_tags)
volume_groups.append(osd)
volume_groups.filter(vg_tags={'ceph.group': 'data', 'ceph.disk_type': 'ssd'})
assert volume_groups == []
def test_filter_by_vg_name(self, volume_groups):
vg_tags = "ceph.type=data,ceph.fsid=000-aaa"
osd = api.VolumeGroup(vg_name='ceph_vg', vg_tags=vg_tags)

View File

@ -36,4 +36,10 @@ class TestParseOSDUUID(object):
with pytest.raises(exceptions.SuffixParsingError):
trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
def test_robust_double_id_in_uuid(self):
# it is possible to have the id in the SHA1, this should
# be fine parsing that
result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'

View File

@ -65,7 +65,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
(0..CLIENTS - 1).each do |i|
config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
client.vm.box = CLIENT_BOX
client.vm.hostname = "#{LABEL_PREFIX}ceph-client#{i}"
client.vm.hostname = "#{LABEL_PREFIX}client#{i}"
if ASSIGN_STATIC_IP
client.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.4#{i}"
@ -88,7 +88,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Parallels
client.vm.provider "parallels" do |prl|
prl.name = "ceph-client#{i}"
prl.name = "client#{i}"
prl.memory = "#{MEMORY}"
end
@ -102,7 +102,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
rgw.vm.box = BOX
rgw.vm.box_url = BOX_URL
rgw.vm.hostname = "#{LABEL_PREFIX}ceph-rgw#{i}"
rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}"
if ASSIGN_STATIC_IP
rgw.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.5#{i}"
@ -126,7 +126,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Parallels
rgw.vm.provider "parallels" do |prl|
prl.name = "ceph-rgw#{i}"
prl.name = "rgw#{i}"
prl.memory = "#{MEMORY}"
end
@ -140,7 +140,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "nfs#{i}" do |nfs|
nfs.vm.box = BOX
nfs.vm.box_url = BOX_URL
nfs.vm.hostname = "ceph-nfs#{i}"
nfs.vm.hostname = "nfs#{i}"
if ASSIGN_STATIC_IP
nfs.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.6#{i}"
@ -164,7 +164,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Parallels
nfs.vm.provider "parallels" do |prl|
prl.name = "ceph-nfs#{i}"
prl.name = "nfs#{i}"
prl.memory = "#{MEMORY}"
end
@ -178,7 +178,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
mds.vm.box = BOX
mds.vm.box_url = BOX_URL
mds.vm.hostname = "#{LABEL_PREFIX}ceph-mds#{i}"
mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
if ASSIGN_STATIC_IP
mds.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.7#{i}"
@ -200,7 +200,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end
# Parallels
mds.vm.provider "parallels" do |prl|
prl.name = "ceph-mds#{i}"
prl.name = "mds#{i}"
prl.memory = "#{MEMORY}"
end
@ -214,7 +214,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
rbd_mirror.vm.box = BOX
rbd_mirror.vm.box_url = BOX_URL
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}ceph-rbd-mirror#{i}"
rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}"
if ASSIGN_STATIC_IP
rbd_mirror.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.8#{i}"
@ -236,7 +236,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end
# Parallels
rbd_mirror.vm.provider "parallels" do |prl|
prl.name = "ceph-rbd-mirror#{i}"
prl.name = "rbd-mirror#{i}"
prl.memory = "#{MEMORY}"
end
@ -250,7 +250,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
iscsi_gw.vm.box = BOX
iscsi_gw.vm.box_url = BOX_URL
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}ceph-iscsi-gw#{i}"
iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}"
if ASSIGN_STATIC_IP
iscsi_gw.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.9#{i}"
@ -272,7 +272,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end
# Parallels
iscsi_gw.vm.provider "parallels" do |prl|
prl.name = "ceph-iscsi-gw#{i}"
prl.name = "iscsi-gw#{i}"
prl.memory = "#{MEMORY}"
end
@ -286,7 +286,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
mon.vm.box = BOX
mon.vm.box_url = BOX_URL
mon.vm.hostname = "#{LABEL_PREFIX}ceph-mon#{i}"
mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}"
if ASSIGN_STATIC_IP
mon.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.1#{i}"
@ -309,7 +309,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Parallels
mon.vm.provider "parallels" do |prl|
prl.name = "ceph-mon#{i}"
prl.name = "mon#{i}"
prl.memory = "#{MEMORY}"
end
@ -323,7 +323,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
osd.vm.box = BOX
osd.vm.box_url = BOX_URL
osd.vm.hostname = "#{LABEL_PREFIX}ceph-osd#{i}"
osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}"
if ASSIGN_STATIC_IP
osd.vm.network :private_network,
ip: "#{PUBLIC_SUBNET}.10#{i}"
@ -378,7 +378,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# Parallels
osd.vm.provider "parallels" do |prl|
prl.name = "ceph-osd#{i}"
prl.name = "osd#{i}"
prl.memory = "#{MEMORY}"
(0..1).each do |d|
prl.customize ["set", :id,