mirror of
https://github.com/ceph/ceph
synced 2025-03-31 16:25:56 +00:00
qa/tasks: use next(iter(..)) for accessing first element in a view
in python2, dict.values() and dict.keys() return lists. but in python3, they return views, which cannot be indexed directly using an integer index. there are three use cases when we access these views in python3: 1. get the first element 2. get all the elements and then *might* want to access them by index 3. get the first element assuming there is only a single element in the view 4. iterate thru the view in the 1st case, we cannot assume the number of elements, so to be python3 compatible, we should use `next(iter(a_dict))` instead. in the 2nd case, in this change, the view is materialized using `list(a_dict)`. in the 3rd case, we can just continue using the short hand of ```py (first_element,) = a_dict.keys() ``` to unpack the view. this works in both python2 and python3. in the 4th case, the existing code works in both python2 and python3, as both list and view can be iterated using `iter`, and `len` works as well. Signed-off-by: Kefu Chai <kchai@redhat.com>
This commit is contained in:
parent
16e2ccaf14
commit
d7258ea7fd
@ -172,7 +172,7 @@ def configure_barbican(ctx, config):
|
||||
Configure barbican paste-api and barbican-api.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
(cclient, cconfig) = config.items()[0]
|
||||
(cclient, cconfig) = next(iter(config.items()))
|
||||
|
||||
keystone_role = cconfig.get('use-keystone-role', None)
|
||||
if keystone_role is None:
|
||||
@ -239,7 +239,7 @@ def create_secrets(ctx, config):
|
||||
Create a main and an alternate s3 user.
|
||||
"""
|
||||
assert isinstance(config, dict)
|
||||
(cclient, cconfig) = config.items()[0]
|
||||
(cclient, cconfig) = next(iter(config.items()))
|
||||
|
||||
rgw_user = cconfig['rgw_user']
|
||||
|
||||
|
@ -44,7 +44,7 @@ class CBT(Task):
|
||||
)
|
||||
|
||||
benchmark_config = self.config.get('benchmarks')
|
||||
benchmark_type = benchmark_config.keys()[0]
|
||||
benchmark_type = next(iter(benchmark_config.keys()))
|
||||
if benchmark_type in ['librbdfio', 'fio']:
|
||||
testdir = misc.get_testdir(self.ctx)
|
||||
benchmark_config[benchmark_type]['cmd_path'] = os.path.join(testdir, 'fio/fio')
|
||||
@ -80,7 +80,7 @@ class CBT(Task):
|
||||
cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl']
|
||||
self.first_mon.run(args=install_cmd + cbt_depends)
|
||||
|
||||
benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
|
||||
benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
|
||||
self.log.info('benchmark: %s', benchmark_type)
|
||||
|
||||
if benchmark_type in ['librbdfio', 'fio']:
|
||||
@ -201,7 +201,7 @@ class CBT(Task):
|
||||
|
||||
def setup(self):
|
||||
super(CBT, self).setup()
|
||||
self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0]
|
||||
self.first_mon = next(iter(self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()))
|
||||
self.cbt_config = self.generate_cbt_config()
|
||||
self.log.info('cbt configuration is %s', self.cbt_config)
|
||||
self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
|
||||
@ -233,7 +233,7 @@ class CBT(Task):
|
||||
'{tdir}/cbt'.format(tdir=testdir),
|
||||
]
|
||||
)
|
||||
benchmark_type = self.cbt_config.get('benchmarks').keys()[0]
|
||||
benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys()))
|
||||
if benchmark_type in ['librbdfio', 'fio']:
|
||||
self.first_mon.run(
|
||||
args=[
|
||||
|
@ -1239,8 +1239,8 @@ class ObjectStoreTool:
|
||||
self.pgid = self.manager.get_object_pg_with_shard(self.pool,
|
||||
self.object_name,
|
||||
self.osd)
|
||||
self.remote = self.manager.ctx.\
|
||||
cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()[0]
|
||||
self.remote = next(iter(self.manager.ctx.\
|
||||
cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys()))
|
||||
path = self.manager.get_filepath().format(id=self.osd)
|
||||
self.paths = ("--data-path {path} --journal-path {path}/journal".
|
||||
format(path=path))
|
||||
|
@ -242,7 +242,7 @@ def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False):
|
||||
REP_NAME, DATALINECOUNT)
|
||||
allremote = []
|
||||
allremote.append(cli_remote)
|
||||
allremote += osds.remotes.keys()
|
||||
allremote += list(osds.remotes.keys())
|
||||
allremote = list(set(allremote))
|
||||
for remote in allremote:
|
||||
cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR,
|
||||
|
@ -691,7 +691,7 @@ class Filesystem(MDSCluster):
|
||||
if refresh or self.data_pools is None:
|
||||
self.get_pool_names(refresh = True)
|
||||
assert(len(self.data_pools) == 1)
|
||||
return self.data_pools.values()[0]
|
||||
return next(iter(self.data_pools.values()))
|
||||
|
||||
def get_data_pool_id(self, refresh = False):
|
||||
"""
|
||||
@ -701,12 +701,12 @@ class Filesystem(MDSCluster):
|
||||
if refresh or self.data_pools is None:
|
||||
self.get_pool_names(refresh = True)
|
||||
assert(len(self.data_pools) == 1)
|
||||
return self.data_pools.keys()[0]
|
||||
return next(iter(self.data_pools.keys()))
|
||||
|
||||
def get_data_pool_names(self, refresh = False):
|
||||
if refresh or self.data_pools is None:
|
||||
self.get_pool_names(refresh = True)
|
||||
return self.data_pools.values()
|
||||
return list(self.data_pools.values())
|
||||
|
||||
def get_metadata_pool_name(self):
|
||||
return self.metadata_pool_name
|
||||
|
@ -45,7 +45,7 @@ class CheckCounter(Task):
|
||||
targets = self.config.get('counters', {})
|
||||
|
||||
if cluster_name is None:
|
||||
cluster_name = self.ctx.managers.keys()[0]
|
||||
cluster_name = next(iter(self.ctx.managers.keys()))
|
||||
|
||||
for daemon_type, counters in targets.items():
|
||||
# List of 'a', 'b', 'c'...
|
||||
|
@ -60,8 +60,8 @@ def install(ctx, config):
|
||||
if not isinstance(config, dict):
|
||||
raise TypeError("config must be a dict")
|
||||
|
||||
devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
|
||||
an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
|
||||
devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
|
||||
an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
|
||||
|
||||
devstack_branch = config.get("branch", "master")
|
||||
install_devstack(devstack_node, devstack_branch)
|
||||
@ -301,7 +301,7 @@ def exercise(ctx, config):
|
||||
if not isinstance(config, dict):
|
||||
raise TypeError("config must be a dict")
|
||||
|
||||
devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
|
||||
devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
|
||||
|
||||
# TODO: save the log *and* preserve failures
|
||||
#devstack_archive_dir = create_devstack_archive(ctx, devstack_node)
|
||||
@ -328,8 +328,8 @@ def create_devstack_archive(ctx, devstack_node):
|
||||
def smoke(ctx, config):
|
||||
log.info("Running a basic smoketest...")
|
||||
|
||||
devstack_node = ctx.cluster.only(is_devstack_node).remotes.keys()[0]
|
||||
an_osd_node = ctx.cluster.only(is_osd_node).remotes.keys()[0]
|
||||
devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys()))
|
||||
an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys()))
|
||||
|
||||
try:
|
||||
create_volume(devstack_node, an_osd_node, 'smoke0', 1)
|
||||
|
@ -83,7 +83,7 @@ def setup_dnsmasq(remote, testdir, cnames):
|
||||
# restart dnsmasq
|
||||
remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq'])
|
||||
# verify dns name is set
|
||||
remote.run(args=['ping', '-c', '4', cnames.keys()[0]])
|
||||
remote.run(args=['ping', '-c', '4', next(iter(cnames.keys()))])
|
||||
|
||||
try:
|
||||
yield
|
||||
|
@ -31,7 +31,7 @@ def task(ctx, config):
|
||||
clients = config.keys()
|
||||
|
||||
# just use the first client...
|
||||
client = clients[0];
|
||||
client = next(iter(clients))
|
||||
(remote,) = ctx.cluster.only(client).remotes.keys()
|
||||
|
||||
testdir = teuthology.get_testdir(ctx)
|
||||
|
@ -73,7 +73,7 @@ class TestCrash(MgrTestCase):
|
||||
self.assertIn(crash['crash_id'], retstr)
|
||||
|
||||
def test_rm(self):
|
||||
crashid = self.crashes.keys()[0]
|
||||
crashid = next(iter(self.crashes.keys()))
|
||||
self.assertEqual(
|
||||
0,
|
||||
self.mgr_cluster.mon_manager.raw_cluster_cmd_result(
|
||||
|
@ -272,7 +272,7 @@ def task(ctx, config):
|
||||
clients_from_config = config.keys()
|
||||
|
||||
# choose first client as default
|
||||
client = clients_from_config[0]
|
||||
client = next(iter(clients_from_config))
|
||||
|
||||
# once the client is chosen, pull the host name and assigned port out of
|
||||
# the role_endpoints that were assigned by the rgw task
|
||||
|
@ -130,7 +130,7 @@ def task(ctx, config):
|
||||
clients = config.keys()
|
||||
|
||||
# just use the first client...
|
||||
client = clients[0]
|
||||
client = next(iter(clients))
|
||||
|
||||
##
|
||||
admin_user = 'ada'
|
||||
|
@ -128,7 +128,7 @@ def setup_vault(ctx, config):
|
||||
"""
|
||||
Mount Transit or KV version 2 secrets engine
|
||||
"""
|
||||
(cclient, cconfig) = config.items()[0]
|
||||
(cclient, cconfig) = next(iter(config.items()))
|
||||
engine = cconfig.get('engine')
|
||||
|
||||
if engine == 'kv':
|
||||
@ -169,7 +169,7 @@ def send_req(ctx, cconfig, client, path, body, method='POST'):
|
||||
|
||||
@contextlib.contextmanager
|
||||
def create_secrets(ctx, config):
|
||||
(cclient, cconfig) = config.items()[0]
|
||||
(cclient, cconfig) = next(iter(config.items()))
|
||||
engine = cconfig.get('engine')
|
||||
prefix = cconfig.get('prefix')
|
||||
secrets = cconfig.get('secrets')
|
||||
|
Loading…
Reference in New Issue
Block a user