2014-07-21 22:08:08 +00:00
|
|
|
"""
|
2014-12-17 01:04:09 +00:00
|
|
|
ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility
|
2014-07-21 22:08:08 +00:00
|
|
|
"""
|
2020-02-21 21:46:31 +00:00
|
|
|
from io import BytesIO
|
|
|
|
|
2014-07-21 22:08:08 +00:00
|
|
|
import contextlib
|
2020-03-24 08:33:22 +00:00
|
|
|
import json
|
2014-07-21 22:08:08 +00:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import tempfile
|
2020-03-24 08:33:22 +00:00
|
|
|
import time
|
|
|
|
from tasks import ceph_manager
|
|
|
|
from tasks.util.rados import (rados, create_replicated_pool, create_ec_pool)
|
|
|
|
from teuthology import misc as teuthology
|
|
|
|
from teuthology.orchestra import run
|
2020-02-21 21:46:31 +00:00
|
|
|
|
|
|
|
from teuthology.exceptions import CommandFailedError
|
|
|
|
|
2014-07-21 22:08:08 +00:00
|
|
|
# from util.rados import (rados, create_ec_pool,
|
|
|
|
# create_replicated_pool,
|
|
|
|
# create_cache_pool)
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
# Should get cluster name "ceph" from somewhere
|
|
|
|
# and normal path from osd_data and osd_journal in conf
|
|
|
|
FSPATH = "/var/lib/ceph/osd/ceph-{id}"
|
|
|
|
JPATH = "/var/lib/ceph/osd/ceph-{id}/journal"
|
|
|
|
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR,
|
|
|
|
BASE_NAME, DATALINECOUNT):
|
2014-07-21 22:08:08 +00:00
|
|
|
objects = range(1, NUM_OBJECTS + 1)
|
|
|
|
for i in objects:
|
2014-11-19 12:44:38 +00:00
|
|
|
NAME = BASE_NAME + "{num}".format(num=i)
|
2014-11-20 17:39:50 +00:00
|
|
|
LOCALNAME = os.path.join(DATADIR, NAME)
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
dataline = range(DATALINECOUNT)
|
|
|
|
fd = open(LOCALNAME, "w")
|
2014-11-19 12:44:38 +00:00
|
|
|
data = "This is the data for " + NAME + "\n"
|
2014-07-21 22:08:08 +00:00
|
|
|
for _ in dataline:
|
|
|
|
fd.write(data)
|
|
|
|
fd.close()
|
|
|
|
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
|
|
|
|
BASE_NAME, DATALINECOUNT):
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
objects = range(1, NUM_OBJECTS + 1)
|
|
|
|
for i in objects:
|
2014-11-19 12:44:38 +00:00
|
|
|
NAME = BASE_NAME + "{num}".format(num=i)
|
2014-07-21 22:08:08 +00:00
|
|
|
DDNAME = os.path.join(DATADIR, NAME)
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
remote.run(args=['rm', '-f', DDNAME])
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
dataline = range(DATALINECOUNT)
|
2014-11-19 12:44:38 +00:00
|
|
|
data = "This is the data for " + NAME + "\n"
|
2014-07-21 22:08:08 +00:00
|
|
|
DATA = ""
|
|
|
|
for _ in dataline:
|
|
|
|
DATA += data
|
2020-07-20 10:38:24 +00:00
|
|
|
remote.write_file(DDNAME, DATA)
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR,
|
|
|
|
BASE_NAME, DATALINECOUNT, POOL, db, ec):
|
2014-07-21 22:08:08 +00:00
|
|
|
ERRORS = 0
|
2014-11-19 12:44:38 +00:00
|
|
|
log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS))
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
objects = range(1, NUM_OBJECTS + 1)
|
|
|
|
for i in objects:
|
2014-11-19 12:44:38 +00:00
|
|
|
NAME = BASE_NAME + "{num}".format(num=i)
|
2014-07-21 22:08:08 +00:00
|
|
|
DDNAME = os.path.join(DATADIR, NAME)
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME],
|
|
|
|
wait=False)
|
2014-11-19 12:44:38 +00:00
|
|
|
# proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME])
|
2014-07-21 22:08:08 +00:00
|
|
|
ret = proc.wait()
|
|
|
|
if ret != 0:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.critical("Rados put failed with status {ret}".
|
|
|
|
format(ret=proc.exitstatus))
|
2014-07-21 22:08:08 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
db[NAME] = {}
|
|
|
|
|
|
|
|
keys = range(i)
|
|
|
|
db[NAME]["xattr"] = {}
|
|
|
|
for k in keys:
|
|
|
|
if k == 0:
|
|
|
|
continue
|
|
|
|
mykey = "key{i}-{k}".format(i=i, k=k)
|
|
|
|
myval = "val{i}-{k}".format(i=i, k=k)
|
2014-12-18 22:50:12 +00:00
|
|
|
proc = remote.run(args=['rados', '-p', POOL, 'setxattr',
|
|
|
|
NAME, mykey, myval])
|
2014-07-21 22:08:08 +00:00
|
|
|
ret = proc.wait()
|
|
|
|
if ret != 0:
|
|
|
|
log.error("setxattr failed with {ret}".format(ret=ret))
|
|
|
|
ERRORS += 1
|
|
|
|
db[NAME]["xattr"][mykey] = myval
|
|
|
|
|
2014-11-19 18:02:11 +00:00
|
|
|
# Erasure coded pools don't support omap
|
|
|
|
if ec:
|
|
|
|
continue
|
|
|
|
|
2014-07-21 22:08:08 +00:00
|
|
|
# Create omap header in all objects but REPobject1
|
|
|
|
if i != 1:
|
|
|
|
myhdr = "hdr{i}".format(i=i)
|
2014-12-18 22:50:12 +00:00
|
|
|
proc = remote.run(args=['rados', '-p', POOL, 'setomapheader',
|
|
|
|
NAME, myhdr])
|
2014-07-21 22:08:08 +00:00
|
|
|
ret = proc.wait()
|
|
|
|
if ret != 0:
|
|
|
|
log.critical("setomapheader failed with {ret}".format(ret=ret))
|
|
|
|
ERRORS += 1
|
|
|
|
db[NAME]["omapheader"] = myhdr
|
|
|
|
|
|
|
|
db[NAME]["omap"] = {}
|
|
|
|
for k in keys:
|
|
|
|
if k == 0:
|
|
|
|
continue
|
|
|
|
mykey = "okey{i}-{k}".format(i=i, k=k)
|
|
|
|
myval = "oval{i}-{k}".format(i=i, k=k)
|
2014-12-18 22:50:12 +00:00
|
|
|
proc = remote.run(args=['rados', '-p', POOL, 'setomapval',
|
|
|
|
NAME, mykey, myval])
|
2014-07-21 22:08:08 +00:00
|
|
|
ret = proc.wait()
|
|
|
|
if ret != 0:
|
|
|
|
log.critical("setomapval failed with {ret}".format(ret=ret))
|
|
|
|
db[NAME]["omap"][mykey] = myval
|
|
|
|
|
|
|
|
return ERRORS
|
|
|
|
|
|
|
|
|
|
|
|
def get_lines(filename):
|
|
|
|
tmpfd = open(filename, "r")
|
|
|
|
line = True
|
|
|
|
lines = []
|
|
|
|
while line:
|
|
|
|
line = tmpfd.readline().rstrip('\n')
|
|
|
|
if line:
|
|
|
|
lines += [line]
|
|
|
|
tmpfd.close()
|
|
|
|
os.unlink(filename)
|
|
|
|
return lines
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def task(ctx, config):
|
|
|
|
"""
|
|
|
|
Run ceph_objectstore_tool test
|
|
|
|
|
|
|
|
The config should be as follows::
|
|
|
|
|
|
|
|
ceph_objectstore_tool:
|
2014-11-19 17:54:02 +00:00
|
|
|
objects: 20 # <number of objects>
|
|
|
|
pgnum: 12
|
2014-07-21 22:08:08 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
if config is None:
|
|
|
|
config = {}
|
|
|
|
assert isinstance(config, dict), \
|
|
|
|
'ceph_objectstore_tool task only accepts a dict for configuration'
|
|
|
|
|
|
|
|
log.info('Beginning ceph_objectstore_tool...')
|
|
|
|
|
|
|
|
log.debug(config)
|
|
|
|
log.debug(ctx)
|
|
|
|
clients = ctx.cluster.only(teuthology.is_type('client'))
|
|
|
|
assert len(clients.remotes) > 0, 'Must specify at least 1 client'
|
|
|
|
(cli_remote, _) = clients.remotes.popitem()
|
|
|
|
log.debug(cli_remote)
|
|
|
|
|
|
|
|
# clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys()))
|
|
|
|
# client = clients.popitem()
|
|
|
|
# log.info(client)
|
|
|
|
osds = ctx.cluster.only(teuthology.is_type('osd'))
|
|
|
|
log.info("OSDS")
|
|
|
|
log.info(osds)
|
|
|
|
log.info(osds.remotes)
|
|
|
|
|
2016-03-25 00:15:14 +00:00
|
|
|
manager = ctx.managers['ceph']
|
2014-12-18 22:50:12 +00:00
|
|
|
while (len(manager.get_osd_status()['up']) !=
|
|
|
|
len(manager.get_osd_status()['raw'])):
|
2014-07-21 22:08:08 +00:00
|
|
|
time.sleep(10)
|
2014-12-18 22:50:12 +00:00
|
|
|
while (len(manager.get_osd_status()['in']) !=
|
|
|
|
len(manager.get_osd_status()['up'])):
|
2014-07-21 22:08:08 +00:00
|
|
|
time.sleep(10)
|
|
|
|
manager.raw_cluster_cmd('osd', 'set', 'noout')
|
|
|
|
manager.raw_cluster_cmd('osd', 'set', 'nodown')
|
|
|
|
|
2014-11-19 17:54:02 +00:00
|
|
|
PGNUM = config.get('pgnum', 12)
|
|
|
|
log.info("pgnum: {num}".format(num=PGNUM))
|
|
|
|
|
2014-11-19 12:44:38 +00:00
|
|
|
ERRORS = 0
|
|
|
|
|
|
|
|
REP_POOL = "rep_pool"
|
|
|
|
REP_NAME = "REPobject"
|
2014-11-19 17:54:02 +00:00
|
|
|
create_replicated_pool(cli_remote, REP_POOL, PGNUM)
|
2014-11-19 12:44:38 +00:00
|
|
|
ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME)
|
|
|
|
|
2014-11-19 18:02:11 +00:00
|
|
|
EC_POOL = "ec_pool"
|
|
|
|
EC_NAME = "ECobject"
|
|
|
|
create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM)
|
2014-12-18 22:50:12 +00:00
|
|
|
ERRORS += test_objectstore(ctx, config, cli_remote,
|
|
|
|
EC_POOL, EC_NAME, ec=True)
|
2014-11-19 18:02:11 +00:00
|
|
|
|
2014-11-19 12:44:38 +00:00
|
|
|
if ERRORS == 0:
|
|
|
|
log.info("TEST PASSED")
|
|
|
|
else:
|
|
|
|
log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS))
|
|
|
|
|
2014-11-20 00:40:15 +00:00
|
|
|
assert ERRORS == 0
|
|
|
|
|
2014-11-19 12:44:38 +00:00
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
log.info('Ending ceph_objectstore_tool')
|
|
|
|
|
|
|
|
|
2014-11-20 17:39:50 +00:00
|
|
|
def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
|
2016-03-25 00:15:14 +00:00
|
|
|
manager = ctx.managers['ceph']
|
2014-11-19 12:44:38 +00:00
|
|
|
|
|
|
|
osds = ctx.cluster.only(teuthology.is_type('osd'))
|
|
|
|
|
|
|
|
TEUTHDIR = teuthology.get_testdir(ctx)
|
2016-05-11 19:08:17 +00:00
|
|
|
DATADIR = os.path.join(TEUTHDIR, "ceph.data")
|
2014-11-19 12:44:38 +00:00
|
|
|
DATALINECOUNT = 10000
|
|
|
|
ERRORS = 0
|
|
|
|
NUM_OBJECTS = config.get('objects', 10)
|
|
|
|
log.info("objects: {num}".format(num=NUM_OBJECTS))
|
|
|
|
|
2014-11-19 22:17:03 +00:00
|
|
|
pool_dump = manager.get_pool_dump(REP_POOL)
|
|
|
|
REPID = pool_dump['pool']
|
2014-11-19 12:44:38 +00:00
|
|
|
|
|
|
|
log.debug("repid={num}".format(num=REPID))
|
|
|
|
|
2014-07-21 22:08:08 +00:00
|
|
|
db = {}
|
|
|
|
|
|
|
|
LOCALDIR = tempfile.mkdtemp("cod")
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR,
|
|
|
|
REP_NAME, DATALINECOUNT)
|
2014-07-21 22:08:08 +00:00
|
|
|
allremote = []
|
|
|
|
allremote.append(cli_remote)
|
qa/tasks: use next(iter(..)) for accessing first element in a view
in python2, dict.values() and dict.keys() return lists. but in python3,
they return views, which cannot be indexed directly using an integer index.
there are three use cases when we access these views in python3:
1. get the first element
2. get all the elements and then *might* want to access them by index
3. get the first element assuming there is only a single element in
the view
4. iterate thru the view
in the 1st case, we cannot assume the number of elements, so to be
python3 compatible, we should use `next(iter(a_dict))` instead.
in the 2nd case, in this change, the view is materialized using
`list(a_dict)`.
in the 3rd case, we can just continue using the short hand of
```py
(first_element,) = a_dict.keys()
```
to unpack the view. this works in both python2 and python3.
in the 4th case, the existing code works in both python2 and python3, as
both list and view can be iterated using `iter`, and `len` works as
well.
Signed-off-by: Kefu Chai <kchai@redhat.com>
2020-03-31 02:16:40 +00:00
|
|
|
allremote += list(osds.remotes.keys())
|
2014-07-21 22:08:08 +00:00
|
|
|
allremote = list(set(allremote))
|
|
|
|
for remote in allremote:
|
2014-12-18 22:50:12 +00:00
|
|
|
cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
|
|
|
|
REP_NAME, DATALINECOUNT)
|
2014-07-21 22:08:08 +00:00
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR,
|
|
|
|
REP_NAME, DATALINECOUNT, REP_POOL, db, ec)
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
pgs = {}
|
2014-11-19 18:02:11 +00:00
|
|
|
for stats in manager.get_pg_stats():
|
|
|
|
if stats["pgid"].find(str(REPID) + ".") != 0:
|
|
|
|
continue
|
2019-06-27 10:57:54 +00:00
|
|
|
if pool_dump["type"] == ceph_manager.PoolType.REPLICATED:
|
2014-07-21 22:08:08 +00:00
|
|
|
for osd in stats["acting"]:
|
2014-11-19 18:02:11 +00:00
|
|
|
pgs.setdefault(osd, []).append(stats["pgid"])
|
2019-06-27 10:57:54 +00:00
|
|
|
elif pool_dump["type"] == ceph_manager.PoolType.ERASURE_CODED:
|
2014-11-19 18:02:11 +00:00
|
|
|
shard = 0
|
|
|
|
for osd in stats["acting"]:
|
2014-12-18 22:50:12 +00:00
|
|
|
pgs.setdefault(osd, []).append("{pgid}s{shard}".
|
|
|
|
format(pgid=stats["pgid"],
|
|
|
|
shard=shard))
|
2014-11-19 18:02:11 +00:00
|
|
|
shard += 1
|
|
|
|
else:
|
2014-12-18 22:50:12 +00:00
|
|
|
raise Exception("{pool} has an unexpected type {type}".
|
|
|
|
format(pool=REP_POOL, type=pool_dump["type"]))
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
log.info(pgs)
|
|
|
|
log.info(db)
|
|
|
|
|
|
|
|
for osd in manager.get_osd_status()['up']:
|
|
|
|
manager.kill_osd(osd)
|
|
|
|
time.sleep(5)
|
|
|
|
|
|
|
|
pgswithobjects = set()
|
|
|
|
objsinpg = {}
|
|
|
|
|
|
|
|
# Test --op list and generate json for all objects
|
|
|
|
log.info("Test --op list by generating json for all objects")
|
2014-12-18 22:50:12 +00:00
|
|
|
prefix = ("sudo ceph-objectstore-tool "
|
|
|
|
"--data-path {fpath} "
|
|
|
|
"--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH)
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
log.debug(remote)
|
|
|
|
log.debug(osds.remotes[remote])
|
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-12-18 22:50:12 +00:00
|
|
|
log.info("process osd.{id} on {remote}".
|
|
|
|
format(id=osdid, remote=remote))
|
2014-12-18 22:12:48 +00:00
|
|
|
cmd = (prefix + "--op list").format(id=osdid)
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
lines = remote.sh(cmd, check_status=False).splitlines()
|
|
|
|
for pgline in lines:
|
2014-12-18 22:12:48 +00:00
|
|
|
if not pgline:
|
|
|
|
continue
|
|
|
|
(pg, obj) = json.loads(pgline)
|
|
|
|
name = obj['oid']
|
|
|
|
if name in db:
|
2014-07-21 22:08:08 +00:00
|
|
|
pgswithobjects.add(pg)
|
2014-12-18 22:12:48 +00:00
|
|
|
objsinpg.setdefault(pg, []).append(name)
|
|
|
|
db[name].setdefault("pg2json",
|
|
|
|
{})[pg] = json.dumps(obj)
|
2020-02-21 21:46:31 +00:00
|
|
|
except CommandFailedError as e:
|
|
|
|
log.error("Bad exit status {ret} from --op list request".
|
|
|
|
format(ret=e.exitstatus))
|
|
|
|
ERRORS += 1
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
log.info(db)
|
|
|
|
log.info(pgswithobjects)
|
|
|
|
log.info(objsinpg)
|
|
|
|
|
2019-06-27 10:57:54 +00:00
|
|
|
if pool_dump["type"] == ceph_manager.PoolType.REPLICATED:
|
2014-11-19 18:00:53 +00:00
|
|
|
# Test get-bytes
|
|
|
|
log.info("Test get-bytes and set-bytes")
|
|
|
|
for basename in db.keys():
|
|
|
|
file = os.path.join(DATADIR, basename)
|
|
|
|
GETNAME = os.path.join(DATADIR, "get")
|
|
|
|
SETNAME = os.path.join(DATADIR, "set")
|
|
|
|
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-11-19 18:00:53 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-11-19 18:00:53 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
2014-11-19 18:00:53 +00:00
|
|
|
continue
|
|
|
|
|
2019-10-09 12:36:58 +00:00
|
|
|
for pg, JSON in db[basename]["pg2json"].items():
|
2014-11-19 18:00:53 +00:00
|
|
|
if pg in pgs[osdid]:
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
cmd.append(run.Raw("'{json}'".format(json=JSON)))
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd += ("get-bytes {fname}".
|
|
|
|
format(fname=GETNAME).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
proc = remote.run(args=cmd, check_status=False)
|
|
|
|
if proc.exitstatus != 0:
|
2014-12-18 22:50:12 +00:00
|
|
|
remote.run(args="rm -f {getfile}".
|
|
|
|
format(getfile=GETNAME).split())
|
|
|
|
log.error("Bad exit status {ret}".
|
|
|
|
format(ret=proc.exitstatus))
|
2014-11-19 18:00:53 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ("diff -q {file} {getfile}".
|
|
|
|
format(file=file, getfile=GETNAME))
|
2014-11-19 18:00:53 +00:00
|
|
|
proc = remote.run(args=cmd.split())
|
|
|
|
if proc.exitstatus != 0:
|
|
|
|
log.error("Data from get-bytes differ")
|
|
|
|
# log.debug("Got:")
|
|
|
|
# cat_file(logging.DEBUG, GETNAME)
|
|
|
|
# log.debug("Expected:")
|
|
|
|
# cat_file(logging.DEBUG, file)
|
|
|
|
ERRORS += 1
|
2014-12-18 22:50:12 +00:00
|
|
|
remote.run(args="rm -f {getfile}".
|
|
|
|
format(getfile=GETNAME).split())
|
2014-07-21 22:08:08 +00:00
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
data = ("put-bytes going into {file}\n".
|
|
|
|
format(file=file))
|
2020-07-20 10:38:24 +00:00
|
|
|
remote.write_file(SETNAME, data)
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
cmd.append(run.Raw("'{json}'".format(json=JSON)))
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd += ("set-bytes {fname}".
|
|
|
|
format(fname=SETNAME).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
proc = remote.run(args=cmd, check_status=False)
|
|
|
|
proc.wait()
|
|
|
|
if proc.exitstatus != 0:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.info("set-bytes failed for object {obj} "
|
|
|
|
"in pg {pg} osd.{id} ret={ret}".
|
|
|
|
format(obj=basename, pg=pg,
|
|
|
|
id=osdid, ret=proc.exitstatus))
|
2014-11-17 16:31:13 +00:00
|
|
|
ERRORS += 1
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
cmd.append(run.Raw("'{json}'".format(json=JSON)))
|
|
|
|
cmd += "get-bytes -".split()
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
output = remote.sh(cmd, wait=True)
|
|
|
|
if data != output:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Data inconsistent after "
|
|
|
|
"set-bytes, got:")
|
2020-02-21 21:46:31 +00:00
|
|
|
log.error(output)
|
2014-11-19 18:00:53 +00:00
|
|
|
ERRORS += 1
|
2020-02-21 21:46:31 +00:00
|
|
|
except CommandFailedError as e:
|
|
|
|
log.error("get-bytes after "
|
|
|
|
"set-bytes ret={ret}".
|
|
|
|
format(ret=e.exitstatus))
|
|
|
|
ERRORS += 1
|
2014-11-19 18:00:53 +00:00
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
cmd.append(run.Raw("'{json}'".format(json=JSON)))
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd += ("set-bytes {fname}".
|
|
|
|
format(fname=file).split())
|
2014-11-19 18:00:53 +00:00
|
|
|
proc = remote.run(args=cmd, check_status=False)
|
|
|
|
proc.wait()
|
|
|
|
if proc.exitstatus != 0:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.info("set-bytes failed for object {obj} "
|
|
|
|
"in pg {pg} osd.{id} ret={ret}".
|
|
|
|
format(obj=basename, pg=pg,
|
|
|
|
id=osdid, ret=proc.exitstatus))
|
2014-11-19 18:00:53 +00:00
|
|
|
ERRORS += 1
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
log.info("Test list-attrs get-attr")
|
|
|
|
for basename in db.keys():
|
|
|
|
file = os.path.join(DATADIR, basename)
|
|
|
|
GETNAME = os.path.join(DATADIR, "get")
|
|
|
|
SETNAME = os.path.join(DATADIR, "set")
|
|
|
|
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
2019-10-09 12:36:58 +00:00
|
|
|
for pg, JSON in db[basename]["pg2json"].items():
|
2014-11-17 16:31:13 +00:00
|
|
|
if pg in pgs[osdid]:
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2014-07-21 22:08:08 +00:00
|
|
|
cmd.append(run.Raw("'{json}'".format(json=JSON)))
|
2014-11-17 16:31:13 +00:00
|
|
|
cmd += ["list-attrs"]
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
keys = remote.sh(cmd, wait=True, stderr=BytesIO()).split()
|
|
|
|
except CommandFailedError as e:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Bad exit status {ret}".
|
2020-02-21 21:46:31 +00:00
|
|
|
format(ret=e.exitstatus))
|
2014-07-21 22:08:08 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
2014-11-17 16:31:13 +00:00
|
|
|
values = dict(db[basename]["xattr"])
|
|
|
|
|
|
|
|
for key in keys:
|
2014-12-18 22:50:12 +00:00
|
|
|
if (key == "_" or
|
|
|
|
key == "snapset" or
|
|
|
|
key == "hinfo_key"):
|
2014-11-17 16:31:13 +00:00
|
|
|
continue
|
|
|
|
key = key.strip("_")
|
|
|
|
if key not in values:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("The key {key} should be present".
|
|
|
|
format(key=key))
|
2014-11-17 16:31:13 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
|
|
|
exp = values.pop(key)
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2014-11-17 16:31:13 +00:00
|
|
|
cmd.append(run.Raw("'{json}'".format(json=JSON)))
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd += ("get-attr {key}".
|
|
|
|
format(key="_" + key).split())
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
val = remote.sh(cmd, wait=True)
|
|
|
|
except CommandFailedError as e:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("get-attr failed with {ret}".
|
2020-02-21 21:46:31 +00:00
|
|
|
format(ret=e.exitstatus))
|
2014-11-17 16:31:13 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
|
|
|
if exp != val:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("For key {key} got value {got} "
|
|
|
|
"instead of {expected}".
|
|
|
|
format(key=key, got=val,
|
|
|
|
expected=exp))
|
2014-11-17 16:31:13 +00:00
|
|
|
ERRORS += 1
|
2014-11-17 16:33:56 +00:00
|
|
|
if "hinfo_key" in keys:
|
|
|
|
cmd_prefix = prefix.format(id=osdid)
|
|
|
|
cmd = """
|
2014-12-18 22:50:12 +00:00
|
|
|
expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64)
|
|
|
|
echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} -
|
|
|
|
test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder
|
|
|
|
echo $expected | base64 --decode | \
|
|
|
|
{prefix} --pgid {pg} '{json}' set-attr {key} -
|
|
|
|
test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected
|
|
|
|
""".format(prefix=cmd_prefix, pg=pg, json=JSON,
|
|
|
|
key="hinfo_key")
|
2014-11-17 16:33:56 +00:00
|
|
|
log.debug(cmd)
|
2014-12-18 22:50:12 +00:00
|
|
|
proc = remote.run(args=['bash', '-e', '-x',
|
|
|
|
'-c', cmd],
|
|
|
|
check_status=False,
|
2020-02-21 21:46:31 +00:00
|
|
|
stdout=BytesIO(),
|
|
|
|
stderr=BytesIO())
|
2014-11-17 16:33:56 +00:00
|
|
|
proc.wait()
|
|
|
|
if proc.exitstatus != 0:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("failed with " +
|
|
|
|
str(proc.exitstatus))
|
2020-02-21 21:46:31 +00:00
|
|
|
log.error(" ".join([
|
2020-06-28 11:59:39 +00:00
|
|
|
proc.stdout.getvalue().decode(),
|
|
|
|
proc.stderr.getvalue().decode(),
|
2020-02-21 21:46:31 +00:00
|
|
|
]))
|
2014-11-17 16:33:56 +00:00
|
|
|
ERRORS += 1
|
2014-11-17 16:31:13 +00:00
|
|
|
|
|
|
|
if len(values) != 0:
|
|
|
|
log.error("Not all keys found, remaining keys:")
|
|
|
|
log.error(values)
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
log.info("Test pg info")
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
for pg in pgs[osdid]:
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--op info --pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
info = remote.sh(cmd, wait=True)
|
|
|
|
except CommandFailedError as e:
|
2020-06-19 11:26:10 +00:00
|
|
|
log.error("Failure of --op info command with %s",
|
|
|
|
e.exitstatus)
|
2014-07-21 22:08:08 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
|
|
|
if not str(pg) in info:
|
2020-06-19 11:26:10 +00:00
|
|
|
log.error("Bad data from info: %s", info)
|
2014-07-21 22:08:08 +00:00
|
|
|
ERRORS += 1
|
|
|
|
|
|
|
|
log.info("Test pg logging")
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
for pg in pgs[osdid]:
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--op log --pgid {pg}").
|
|
|
|
format(id=osdid, pg=pg).split())
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
output = remote.sh(cmd, wait=True)
|
|
|
|
except CommandFailedError as e:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Getting log failed for pg {pg} "
|
|
|
|
"from osd.{id} with {ret}".
|
2020-02-21 21:46:31 +00:00
|
|
|
format(pg=pg, id=osdid, ret=e.exitstatus))
|
2014-07-21 22:08:08 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
|
|
|
HASOBJ = pg in pgswithobjects
|
2020-02-21 21:46:31 +00:00
|
|
|
MODOBJ = "modify" in output
|
2014-07-21 22:08:08 +00:00
|
|
|
if HASOBJ != MODOBJ:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Bad log for pg {pg} from osd.{id}".
|
|
|
|
format(pg=pg, id=osdid))
|
2014-07-21 22:08:08 +00:00
|
|
|
MSG = (HASOBJ and [""] or ["NOT "])[0]
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Log should {msg}have a modify entry".
|
|
|
|
format(msg=MSG))
|
2014-07-21 22:08:08 +00:00
|
|
|
ERRORS += 1
|
|
|
|
|
|
|
|
log.info("Test pg export")
|
|
|
|
EXP_ERRORS = 0
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
for pg in pgs[osdid]:
|
2014-12-18 22:50:12 +00:00
|
|
|
fpath = os.path.join(DATADIR, "osd{id}.{pg}".
|
|
|
|
format(id=osdid, pg=pg))
|
2014-07-21 22:08:08 +00:00
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--op export --pgid {pg} --file {file}").
|
|
|
|
format(id=osdid, pg=pg, file=fpath))
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
remote.sh(cmd, wait=True)
|
|
|
|
except CommandFailedError as e:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Exporting failed for pg {pg} "
|
|
|
|
"on osd.{id} with {ret}".
|
2020-02-21 21:46:31 +00:00
|
|
|
format(pg=pg, id=osdid, ret=e.exitstatus))
|
2014-07-21 22:08:08 +00:00
|
|
|
EXP_ERRORS += 1
|
|
|
|
|
|
|
|
ERRORS += EXP_ERRORS
|
|
|
|
|
|
|
|
log.info("Test pg removal")
|
|
|
|
RM_ERRORS = 0
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
for pg in pgs[osdid]:
|
2017-09-09 00:53:07 +00:00
|
|
|
cmd = ((prefix + "--force --op remove --pgid {pg}").
|
2014-12-18 22:50:12 +00:00
|
|
|
format(pg=pg, id=osdid))
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
remote.sh(cmd, wait=True)
|
|
|
|
except CommandFailedError as e:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Removing failed for pg {pg} "
|
|
|
|
"on osd.{id} with {ret}".
|
2020-02-21 21:46:31 +00:00
|
|
|
format(pg=pg, id=osdid, ret=e.exitstatus))
|
2014-07-21 22:08:08 +00:00
|
|
|
RM_ERRORS += 1
|
|
|
|
|
|
|
|
ERRORS += RM_ERRORS
|
|
|
|
|
|
|
|
IMP_ERRORS = 0
|
|
|
|
if EXP_ERRORS == 0 and RM_ERRORS == 0:
|
|
|
|
log.info("Test pg import")
|
|
|
|
|
2019-10-11 15:57:47 +00:00
|
|
|
for remote in osds.remotes.keys():
|
2014-07-21 22:08:08 +00:00
|
|
|
for role in osds.remotes[remote]:
|
2020-04-08 04:32:54 +00:00
|
|
|
if not role.startswith("osd."):
|
2014-07-21 22:08:08 +00:00
|
|
|
continue
|
|
|
|
osdid = int(role.split('.')[1])
|
2014-11-19 18:03:57 +00:00
|
|
|
if osdid not in pgs:
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
for pg in pgs[osdid]:
|
2014-12-18 22:50:12 +00:00
|
|
|
fpath = os.path.join(DATADIR, "osd{id}.{pg}".
|
|
|
|
format(id=osdid, pg=pg))
|
2014-07-21 22:08:08 +00:00
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = ((prefix + "--op import --file {file}").
|
|
|
|
format(id=osdid, file=fpath))
|
2020-02-21 21:46:31 +00:00
|
|
|
try:
|
|
|
|
remote.sh(cmd, wait=True)
|
|
|
|
except CommandFailedError as e:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("Import failed from {file} with {ret}".
|
2020-02-21 21:46:31 +00:00
|
|
|
format(file=fpath, ret=e.exitstatus))
|
2014-07-21 22:08:08 +00:00
|
|
|
IMP_ERRORS += 1
|
|
|
|
else:
|
|
|
|
log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES")
|
|
|
|
|
|
|
|
ERRORS += IMP_ERRORS
|
|
|
|
|
|
|
|
if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0:
|
|
|
|
log.info("Restarting OSDs....")
|
|
|
|
# They are still look to be up because of setting nodown
|
|
|
|
for osd in manager.get_osd_status()['up']:
|
|
|
|
manager.revive_osd(osd)
|
|
|
|
# Wait for health?
|
|
|
|
time.sleep(5)
|
|
|
|
# Let scrub after test runs verify consistency of all copies
|
|
|
|
log.info("Verify replicated import data")
|
|
|
|
objects = range(1, NUM_OBJECTS + 1)
|
|
|
|
for i in objects:
|
|
|
|
NAME = REP_NAME + "{num}".format(num=i)
|
|
|
|
TESTNAME = os.path.join(DATADIR, "gettest")
|
|
|
|
REFNAME = os.path.join(DATADIR, NAME)
|
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
proc = rados(ctx, cli_remote,
|
|
|
|
['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False)
|
2014-07-21 22:08:08 +00:00
|
|
|
|
|
|
|
ret = proc.wait()
|
|
|
|
if ret != 0:
|
2014-12-18 22:50:12 +00:00
|
|
|
log.error("After import, rados get failed with {ret}".
|
|
|
|
format(ret=proc.exitstatus))
|
2014-11-20 17:39:50 +00:00
|
|
|
ERRORS += 1
|
|
|
|
continue
|
2014-07-21 22:08:08 +00:00
|
|
|
|
2014-12-18 22:50:12 +00:00
|
|
|
cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME,
|
|
|
|
ref=REFNAME)
|
2014-07-21 22:08:08 +00:00
|
|
|
proc = cli_remote.run(args=cmd, check_status=False)
|
|
|
|
proc.wait()
|
|
|
|
if proc.exitstatus != 0:
|
|
|
|
log.error("Data comparison failed for {obj}".format(obj=NAME))
|
|
|
|
ERRORS += 1
|
|
|
|
|
2014-11-19 12:44:38 +00:00
|
|
|
return ERRORS
|