diff --git a/tasks/ceph_manager.py b/tasks/ceph_manager.py index 327dd08ba77..b1ffc80ec77 100644 --- a/tasks/ceph_manager.py +++ b/tasks/ceph_manager.py @@ -110,7 +110,7 @@ class Thrasher: self.revive_timeout += 120 self.clean_wait = self.config.get('clean_wait', 0) self.minin = self.config.get("min_in", 3) - if cmd_exists("ceph_objectstore_tool"): + if cmd_exists("ceph-objectstore-tool"): self.ceph_objectstore_tool = self.config.get('ceph_objectstore_tool', False) else: self.ceph_objectstore_tool = False @@ -156,7 +156,7 @@ class Thrasher: if mark_out and osd in self.in_osds: self.out_osd(osd) if self.ceph_objectstore_tool: - self.log("Testing ceph_objectstore_tool on down osd") + self.log("Testing ceph-objectstore-tool on down osd") (remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=osd)).remotes.iterkeys() FSPATH = self.ceph_manager.get_filepath() JPATH = os.path.join(FSPATH, "journal") @@ -167,13 +167,13 @@ class Thrasher: exp_osd = random.choice(self.dead_osds[:-1]) (exp_remote,) = self.ceph_manager.ctx.cluster.only('osd.{o}'.format(o=exp_osd)).remotes.iterkeys() if 'keyvaluestore_backend' in self.ceph_manager.ctx.ceph.conf['osd']: - prefix = "sudo ceph_objectstore_tool --data-path {fpath} --journal-path {jpath} --type keyvaluestore-dev --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) + prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --type keyvaluestore-dev --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) else: - prefix = "sudo ceph_objectstore_tool --data-path {fpath} --journal-path {jpath} --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) + prefix = "sudo ceph-objectstore-tool --data-path {fpath} --journal-path {jpath} --log-file=/var/log/ceph/objectstore_tool.\\$pid.log ".format(fpath=FSPATH, jpath=JPATH) cmd = (prefix + "--op list-pgs").format(id=exp_osd) proc = exp_remote.run(args=cmd, wait=True, check_status=True, stdout=StringIO()) if proc.exitstatus: - raise Exception("ceph_objectstore_tool: exp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) + raise Exception("ceph-objectstore-tool: exp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) pgs = proc.stdout.getvalue().split('\n')[:-1] if len(pgs) == 0: self.log("No PGs found for osd.{osd}".format(osd=exp_osd)) @@ -184,19 +184,19 @@ class Thrasher: cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=exp_osd, pg=pg, file=exp_path) proc = exp_remote.run(args=cmd) if proc.exitstatus: - raise Exception("ceph_objectstore_tool: export failure with status {ret}".format(ret=proc.exitstatus)) + raise Exception("ceph-objectstore-tool: export failure with status {ret}".format(ret=proc.exitstatus)) # remove cmd = (prefix + "--op remove --pgid {pg}").format(id=exp_osd, pg=pg) proc = exp_remote.run(args=cmd) if proc.exitstatus: - raise Exception("ceph_objectstore_tool: remove failure with status {ret}".format(ret=proc.exitstatus)) + raise Exception("ceph-objectstore-tool: remove failure with status {ret}".format(ret=proc.exitstatus)) # If there are at least 2 dead osds we might move the pg if exp_osd != imp_osd: # If pg isn't already on this osd, then we will move it there cmd = (prefix + "--op list-pgs").format(id=imp_osd) proc = imp_remote.run(args=cmd, wait=True, check_status=True, stdout=StringIO()) if proc.exitstatus: - raise Exception("ceph_objectstore_tool: imp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) + raise Exception("ceph-objectstore-tool: imp list-pgs failure with status {ret}".format(ret=proc.exitstatus)) pgs = proc.stdout.getvalue().split('\n')[:-1] if pg not in pgs: self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}".format(pg=pg, fosd=exp_osd, tosd=imp_osd)) @@ -214,7 +214,7 @@ class Thrasher: cmd = (prefix + "--op import --file {file}").format(id=imp_osd, file=exp_path) imp_remote.run(args=cmd) if proc.exitstatus: - raise Exception("ceph_objectstore_tool: import failure with status {ret}".format(ret=proc.exitstatus)) + raise Exception("ceph-objectstore-tool: import failure with status {ret}".format(ret=proc.exitstatus)) cmd = "rm -f {file}".format(file=exp_path) exp_remote.run(args=cmd) if imp_remote != exp_remote: diff --git a/tasks/ceph_objectstore_tool.py b/tasks/ceph_objectstore_tool.py index 5d3e533817a..3b899de33b8 100644 --- a/tasks/ceph_objectstore_tool.py +++ b/tasks/ceph_objectstore_tool.py @@ -1,5 +1,5 @@ """ -ceph_objectstore_tool - Simple test of ceph_objectstore_tool utility +ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility """ from cStringIO import StringIO import contextlib @@ -26,7 +26,8 @@ FSPATH = "/var/lib/ceph/osd/ceph-{id}" JPATH = "/var/lib/ceph/osd/ceph-{id}/journal" -def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT): +def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT): objects = range(1, NUM_OBJECTS + 1) for i in objects: NAME = BASE_NAME + "{num}".format(num=i) @@ -40,14 +41,15 @@ def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUN fd.close() -def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT): +def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT): objects = range(1, NUM_OBJECTS + 1) for i in objects: NAME = BASE_NAME + "{num}".format(num=i) DDNAME = os.path.join(DATADIR, NAME) - remote.run(args=['rm', '-f', DDNAME ]) + remote.run(args=['rm', '-f', DDNAME]) dataline = range(DATALINECOUNT) data = "This is the data for " + NAME + "\n" @@ -57,7 +59,8 @@ def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DAT teuthology.write_file(remote, DDNAME, DATA) -def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, POOL, db, ec): +def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT, POOL, db, ec): ERRORS = 0 log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS)) @@ -66,11 +69,13 @@ def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, NAME = BASE_NAME + "{num}".format(num=i) DDNAME = os.path.join(DATADIR, NAME) - proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME], wait=False) + proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME], + wait=False) # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME]) ret = proc.wait() if ret != 0: - log.critical("Rados put failed with status {ret}".format(ret=proc.exitstatus)) + log.critical("Rados put failed with status {ret}". + format(ret=proc.exitstatus)) sys.exit(1) db[NAME] = {} @@ -82,7 +87,8 @@ def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, continue mykey = "key{i}-{k}".format(i=i, k=k) myval = "val{i}-{k}".format(i=i, k=k) - proc = remote.run(args=['rados', '-p', POOL, 'setxattr', NAME, mykey, myval]) + proc = remote.run(args=['rados', '-p', POOL, 'setxattr', + NAME, mykey, myval]) ret = proc.wait() if ret != 0: log.error("setxattr failed with {ret}".format(ret=ret)) @@ -96,7 +102,8 @@ def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, # Create omap header in all objects but REPobject1 if i != 1: myhdr = "hdr{i}".format(i=i) - proc = remote.run(args=['rados', '-p', POOL, 'setomapheader', NAME, myhdr]) + proc = remote.run(args=['rados', '-p', POOL, 'setomapheader', + NAME, myhdr]) ret = proc.wait() if ret != 0: log.critical("setomapheader failed with {ret}".format(ret=ret)) @@ -109,7 +116,8 @@ def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, BASE_NAME, DATALINECOUNT, continue mykey = "okey{i}-{k}".format(i=i, k=k) myval = "oval{i}-{k}".format(i=i, k=k) - proc = remote.run(args=['rados', '-p', POOL, 'setomapval', NAME, mykey, myval]) + proc = remote.run(args=['rados', '-p', POOL, 'setomapval', + NAME, mykey, myval]) ret = proc.wait() if ret != 0: log.critical("setomapval failed with {ret}".format(ret=ret)) @@ -175,9 +183,11 @@ def task(ctx, config): ) ctx.manager = manager - while len(manager.get_osd_status()['up']) != len(manager.get_osd_status()['raw']): + while (len(manager.get_osd_status()['up']) != + len(manager.get_osd_status()['raw'])): time.sleep(10) - while len(manager.get_osd_status()['in']) != len(manager.get_osd_status()['up']): + while (len(manager.get_osd_status()['in']) != + len(manager.get_osd_status()['up'])): time.sleep(10) manager.raw_cluster_cmd('osd', 'set', 'noout') manager.raw_cluster_cmd('osd', 'set', 'nodown') @@ -195,7 +205,8 @@ def task(ctx, config): EC_POOL = "ec_pool" EC_NAME = "ECobject" create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM) - ERRORS += test_objectstore(ctx, config, cli_remote, EC_POOL, EC_NAME, ec=True) + ERRORS += test_objectstore(ctx, config, cli_remote, + EC_POOL, EC_NAME, ec=True) if ERRORS == 0: log.info("TEST PASSED") @@ -231,15 +242,18 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): LOCALDIR = tempfile.mkdtemp("cod") - cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR, REP_NAME, DATALINECOUNT) + cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR, + REP_NAME, DATALINECOUNT) allremote = [] allremote.append(cli_remote) allremote += osds.remotes.keys() allremote = list(set(allremote)) for remote in allremote: - cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT) + cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, + REP_NAME, DATALINECOUNT) - ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT, REP_POOL, db, ec) + ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, + REP_NAME, DATALINECOUNT, REP_POOL, db, ec) pgs = {} for stats in manager.get_pg_stats(): @@ -251,10 +265,13 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL: shard = 0 for osd in stats["acting"]: - pgs.setdefault(osd, []).append("{pgid}s{shard}".format(pgid=stats["pgid"], shard=shard)) + pgs.setdefault(osd, []).append("{pgid}s{shard}". + format(pgid=stats["pgid"], + shard=shard)) shard += 1 else: - raise Exception("{pool} has an unexpected type {type}".format(pool=REP_POOL, type=pool_dump["type"])) + raise Exception("{pool} has an unexpected type {type}". + format(pool=REP_POOL, type=pool_dump["type"])) log.info(pgs) log.info(db) @@ -268,7 +285,9 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): # Test --op list and generate json for all objects log.info("Test --op list by generating json for all objects") - prefix = "sudo ceph_objectstore_tool --data-path {fpath} --journal-path {jpath} ".format(fpath=FSPATH, jpath=JPATH) + prefix = ("sudo ceph-objectstore-tool " + "--data-path {fpath} " + "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH) for remote in osds.remotes.iterkeys(): log.debug(remote) log.debug(osds.remotes[remote]) @@ -276,33 +295,26 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): if string.find(role, "osd.") != 0: continue osdid = int(role.split('.')[1]) - if osdid not in pgs: - continue - log.info("process osd.{id} on {remote}".format(id=osdid, remote=remote)) - for pg in pgs[osdid]: - cmd = (prefix + "--op list --pgid {pg}").format(id=osdid, pg=pg) - proc = remote.run(args=cmd.split(), check_status=False, stdout=StringIO()) - # proc.wait() - if proc.exitstatus != 0: - log.error("Bad exit status {ret} from --op list request".format(ret=proc.exitstatus)) - ERRORS += 1 - else: - data = proc.stdout.getvalue() - if len(data): - # This pg has some objects in it + log.info("process osd.{id} on {remote}". + format(id=osdid, remote=remote)) + cmd = (prefix + "--op list").format(id=osdid) + proc = remote.run(args=cmd.split(), check_status=False, + stdout=StringIO()) + if proc.exitstatus != 0: + log.error("Bad exit status {ret} from --op list request". + format(ret=proc.exitstatus)) + ERRORS += 1 + else: + for pgline in proc.stdout.getvalue().splitlines(): + if not pgline: + continue + (pg, obj) = json.loads(pgline) + name = obj['oid'] + if name in db: pgswithobjects.add(pg) - pglines = data.split('\n') - # All copies of a pg are the same so we can overwrite - objsinpg[pg] = [] - while(len(pglines)): - # Drop any blank lines - if (len(pglines[-1]) == 0): - pglines.pop() - continue - objjson = pglines.pop() - name = json.loads(objjson)['oid'] - objsinpg[pg].append(name) - db[name].setdefault("pg2json", {})[pg] = objjson + objsinpg.setdefault(pg, []).append(name) + db[name].setdefault("pg2json", + {})[pg] = json.dumps(obj) log.info(db) log.info(pgswithobjects) @@ -326,16 +338,21 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): for pg, JSON in db[basename]["pg2json"].iteritems(): if pg in pgs[osdid]: - cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split() + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += "get-bytes {fname}".format(fname=GETNAME).split() + cmd += ("get-bytes {fname}". + format(fname=GETNAME).split()) proc = remote.run(args=cmd, check_status=False) if proc.exitstatus != 0: - remote.run(args="rm -f {getfile}".format(getfile=GETNAME).split()) - log.error("Bad exit status {ret}".format(ret=proc.exitstatus)) + remote.run(args="rm -f {getfile}". + format(getfile=GETNAME).split()) + log.error("Bad exit status {ret}". + format(ret=proc.exitstatus)) ERRORS += 1 continue - cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME) + cmd = ("diff -q {file} {getfile}". + format(file=file, getfile=GETNAME)) proc = remote.run(args=cmd.split()) if proc.exitstatus != 0: log.error("Data from get-bytes differ") @@ -344,40 +361,57 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): # log.debug("Expected:") # cat_file(logging.DEBUG, file) ERRORS += 1 - remote.run(args="rm -f {getfile}".format(getfile=GETNAME).split()) + remote.run(args="rm -f {getfile}". + format(getfile=GETNAME).split()) - data = "put-bytes going into {file}\n".format(file=file) + data = ("put-bytes going into {file}\n". + format(file=file)) teuthology.write_file(remote, SETNAME, data) - cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split() + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += "set-bytes {fname}".format(fname=SETNAME).split() + cmd += ("set-bytes {fname}". + format(fname=SETNAME).split()) proc = remote.run(args=cmd, check_status=False) proc.wait() if proc.exitstatus != 0: - log.info("set-bytes failed for object {obj} in pg {pg} osd.{id} ret={ret}".format(obj=basename, pg=pg, id=osdid, ret=proc.exitstatus)) + log.info("set-bytes failed for object {obj} " + "in pg {pg} osd.{id} ret={ret}". + format(obj=basename, pg=pg, + id=osdid, ret=proc.exitstatus)) ERRORS += 1 - cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split() + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) cmd.append(run.Raw("'{json}'".format(json=JSON))) cmd += "get-bytes -".split() - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("get-bytes after set-bytes ret={ret}".format(ret=proc.exitstatus)) + log.error("get-bytes after " + "set-bytes ret={ret}". + format(ret=proc.exitstatus)) ERRORS += 1 else: if data != proc.stdout.getvalue(): - log.error("Data inconsistent after set-bytes, got:") + log.error("Data inconsistent after " + "set-bytes, got:") log.error(proc.stdout.getvalue()) ERRORS += 1 - cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split() + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += "set-bytes {fname}".format(fname=file).split() + cmd += ("set-bytes {fname}". + format(fname=file).split()) proc = remote.run(args=cmd, check_status=False) proc.wait() if proc.exitstatus != 0: - log.info("set-bytes failed for object {obj} in pg {pg} osd.{id} ret={ret}".format(obj=basename, pg=pg, id=osdid, ret=proc.exitstatus)) + log.info("set-bytes failed for object {obj} " + "in pg {pg} osd.{id} ret={ret}". + format(obj=basename, pg=pg, + id=osdid, ret=proc.exitstatus)) ERRORS += 1 log.info("Test list-attrs get-attr") @@ -396,55 +430,76 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): for pg, JSON in db[basename]["pg2json"].iteritems(): if pg in pgs[osdid]: - cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split() + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) cmd.append(run.Raw("'{json}'".format(json=JSON))) cmd += ["list-attrs"] - proc = remote.run(args=cmd, check_status=False, stdout=StringIO(), stderr=StringIO()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO(), stderr=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("Bad exit status {ret}".format(ret=proc.exitstatus)) + log.error("Bad exit status {ret}". + format(ret=proc.exitstatus)) ERRORS += 1 continue keys = proc.stdout.getvalue().split() values = dict(db[basename]["xattr"]) for key in keys: - if key == "_" or key == "snapset" or key == "hinfo_key": + if (key == "_" or + key == "snapset" or + key == "hinfo_key"): continue key = key.strip("_") if key not in values: - log.error("The key {key} should be present".format(key=key)) + log.error("The key {key} should be present". + format(key=key)) ERRORS += 1 continue exp = values.pop(key) - cmd = (prefix + "--pgid {pg}").format(id=osdid, pg=pg).split() + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) cmd.append(run.Raw("'{json}'".format(json=JSON))) - cmd += "get-attr {key}".format(key="_" + key).split() - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + cmd += ("get-attr {key}". + format(key="_" + key).split()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("get-attr failed with {ret}".format(ret=proc.exitstatus)) + log.error("get-attr failed with {ret}". + format(ret=proc.exitstatus)) ERRORS += 1 continue val = proc.stdout.getvalue() if exp != val: - log.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp)) + log.error("For key {key} got value {got} " + "instead of {expected}". + format(key=key, got=val, + expected=exp)) ERRORS += 1 if "hinfo_key" in keys: cmd_prefix = prefix.format(id=osdid) cmd = """ - expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64) - echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} - - test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder - echo $expected | base64 --decode | {prefix} --pgid {pg} '{json}' set-attr {key} - - test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected - """.format(prefix=cmd_prefix, pg=pg, json=JSON, key="hinfo_key") + expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64) + echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} - + test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder + echo $expected | base64 --decode | \ + {prefix} --pgid {pg} '{json}' set-attr {key} - + test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected + """.format(prefix=cmd_prefix, pg=pg, json=JSON, + key="hinfo_key") log.debug(cmd) - proc = remote.run(args=['bash', '-e', '-x', '-c', cmd], check_status=False, stdout=StringIO(), stderr=StringIO()) + proc = remote.run(args=['bash', '-e', '-x', + '-c', cmd], + check_status=False, + stdout=StringIO(), + stderr=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("failed with " + str(proc.exitstatus)) - log.error(proc.stdout.getvalue() + " " + proc.stderr.getvalue()) + log.error("failed with " + + str(proc.exitstatus)) + log.error(proc.stdout.getvalue() + " " + + proc.stderr.getvalue()) ERRORS += 1 if len(values) != 0: @@ -461,11 +516,14 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): continue for pg in pgs[osdid]: - cmd = (prefix + "--op info --pgid {pg}").format(id=osdid, pg=pg).split() - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + cmd = ((prefix + "--op info --pgid {pg}"). + format(id=osdid, pg=pg).split()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("Failure of --op info command with {ret}".format(proc.exitstatus)) + log.error("Failure of --op info command with {ret}". + format(proc.exitstatus)) ERRORS += 1 continue info = proc.stdout.getvalue() @@ -483,19 +541,25 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): continue for pg in pgs[osdid]: - cmd = (prefix + "--op log --pgid {pg}").format(id=osdid, pg=pg).split() - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + cmd = ((prefix + "--op log --pgid {pg}"). + format(id=osdid, pg=pg).split()) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("Getting log failed for pg {pg} from osd.{id} with {ret}".format(pg=pg, id=osdid, ret=proc.exitstatus)) + log.error("Getting log failed for pg {pg} " + "from osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=proc.exitstatus)) ERRORS += 1 continue HASOBJ = pg in pgswithobjects MODOBJ = "modify" in proc.stdout.getvalue() if HASOBJ != MODOBJ: - log.error("Bad log for pg {pg} from osd.{id}".format(pg=pg, id=osdid)) + log.error("Bad log for pg {pg} from osd.{id}". + format(pg=pg, id=osdid)) MSG = (HASOBJ and [""] or ["NOT "])[0] - log.error("Log should {msg}have a modify entry".format(msg=MSG)) + log.error("Log should {msg}have a modify entry". + format(msg=MSG)) ERRORS += 1 log.info("Test pg export") @@ -509,13 +573,18 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): continue for pg in pgs[osdid]: - fpath = os.path.join(DATADIR, "osd{id}.{pg}".format(id=osdid, pg=pg)) + fpath = os.path.join(DATADIR, "osd{id}.{pg}". + format(id=osdid, pg=pg)) - cmd = (prefix + "--op export --pgid {pg} --file {file}").format(id=osdid, pg=pg, file=fpath) - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + cmd = ((prefix + "--op export --pgid {pg} --file {file}"). + format(id=osdid, pg=pg, file=fpath)) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("Exporting failed for pg {pg} on osd.{id} with {ret}".format(pg=pg, id=osdid, ret=proc.exitstatus)) + log.error("Exporting failed for pg {pg} " + "on osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=proc.exitstatus)) EXP_ERRORS += 1 ERRORS += EXP_ERRORS @@ -531,11 +600,15 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): continue for pg in pgs[osdid]: - cmd = (prefix + "--op remove --pgid {pg}").format(pg=pg, id=osdid) - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + cmd = ((prefix + "--op remove --pgid {pg}"). + format(pg=pg, id=osdid)) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("Removing failed for pg {pg} on osd.{id} with {ret}".format(pg=pg, id=osdid, ret=proc.exitstatus)) + log.error("Removing failed for pg {pg} " + "on osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=proc.exitstatus)) RM_ERRORS += 1 ERRORS += RM_ERRORS @@ -553,13 +626,17 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): continue for pg in pgs[osdid]: - fpath = os.path.join(DATADIR, "osd{id}.{pg}".format(id=osdid, pg=pg)) + fpath = os.path.join(DATADIR, "osd{id}.{pg}". + format(id=osdid, pg=pg)) - cmd = (prefix + "--op import --file {file}").format(id=osdid, file=fpath) - proc = remote.run(args=cmd, check_status=False, stdout=StringIO()) + cmd = ((prefix + "--op import --file {file}"). + format(id=osdid, file=fpath)) + proc = remote.run(args=cmd, check_status=False, + stdout=StringIO()) proc.wait() if proc.exitstatus != 0: - log.error("Import failed from {file} with {ret}".format(file=fpath, ret=proc.exitstatus)) + log.error("Import failed from {file} with {ret}". + format(file=fpath, ret=proc.exitstatus)) IMP_ERRORS += 1 else: log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES") @@ -581,15 +658,18 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): TESTNAME = os.path.join(DATADIR, "gettest") REFNAME = os.path.join(DATADIR, NAME) - proc = rados(ctx, cli_remote, ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False) + proc = rados(ctx, cli_remote, + ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False) ret = proc.wait() if ret != 0: - log.error("After import, rados get failed with {ret}".format(ret=proc.exitstatus)) + log.error("After import, rados get failed with {ret}". + format(ret=proc.exitstatus)) ERRORS += 1 continue - cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME, ref=REFNAME) + cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME, + ref=REFNAME) proc = cli_remote.run(args=cmd, check_status=False) proc.wait() if proc.exitstatus != 0: