2014-07-17 20:35:22 +00:00
|
|
|
from contextlib import contextmanager
|
2015-05-13 12:39:46 +00:00
|
|
|
import json
|
2014-07-02 15:43:16 +00:00
|
|
|
import logging
|
2014-07-02 18:25:14 +00:00
|
|
|
import datetime
|
2014-09-15 11:50:55 +00:00
|
|
|
import time
|
2014-07-14 12:02:58 +00:00
|
|
|
from textwrap import dedent
|
2014-07-02 15:43:16 +00:00
|
|
|
import os
|
2014-11-03 15:38:57 +00:00
|
|
|
from StringIO import StringIO
|
2014-07-02 18:25:14 +00:00
|
|
|
from teuthology.orchestra import run
|
2014-09-15 22:41:34 +00:00
|
|
|
from teuthology.orchestra.run import CommandFailedError, ConnectionLostError
|
2018-07-12 13:25:53 +00:00
|
|
|
from tasks.cephfs.filesystem import Filesystem
|
2014-07-02 15:43:16 +00:00
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class CephFSMount(object):
|
2018-07-12 13:25:53 +00:00
|
|
|
def __init__(self, ctx, test_dir, client_id, client_remote):
|
2014-07-02 15:43:16 +00:00
|
|
|
"""
|
|
|
|
:param test_dir: Global teuthology test dir
|
|
|
|
:param client_id: Client ID, the 'foo' in client.foo
|
|
|
|
:param client_remote: Remote instance for the host where client will run
|
|
|
|
"""
|
|
|
|
|
2018-07-12 13:25:53 +00:00
|
|
|
self.ctx = ctx
|
2014-07-02 15:43:16 +00:00
|
|
|
self.test_dir = test_dir
|
|
|
|
self.client_id = client_id
|
|
|
|
self.client_remote = client_remote
|
2016-04-19 04:11:22 +00:00
|
|
|
self.mountpoint_dir_name = 'mnt.{id}'.format(id=self.client_id)
|
2018-07-12 13:25:53 +00:00
|
|
|
self.fs = None
|
2014-07-02 15:43:16 +00:00
|
|
|
|
|
|
|
self.test_files = ['a', 'b', 'c']
|
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
self.background_procs = []
|
|
|
|
|
2015-11-30 18:21:14 +00:00
|
|
|
@property
|
|
|
|
def mountpoint(self):
|
2016-04-19 04:11:22 +00:00
|
|
|
return os.path.join(
|
|
|
|
self.test_dir, '{dir_name}'.format(dir_name=self.mountpoint_dir_name))
|
2015-11-30 18:21:14 +00:00
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
def is_mounted(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2018-07-12 13:25:53 +00:00
|
|
|
def setupfs(self, name=None):
|
2018-07-14 01:15:17 +00:00
|
|
|
if name is None and self.fs is not None:
|
|
|
|
# Previous mount existed, reuse the old name
|
|
|
|
name = self.fs.name
|
2018-07-12 13:25:53 +00:00
|
|
|
self.fs = Filesystem(self.ctx, name=name)
|
|
|
|
log.info('Wait for MDS to reach steady state...')
|
|
|
|
self.fs.wait_for_daemons()
|
|
|
|
log.info('Ready to start {}...'.format(type(self).__name__))
|
|
|
|
|
2016-11-06 22:01:00 +00:00
|
|
|
def mount(self, mount_path=None, mount_fs_name=None):
|
2014-07-02 18:25:14 +00:00
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def umount(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2016-02-10 12:20:33 +00:00
|
|
|
def umount_wait(self, force=False, require_clean=False):
|
|
|
|
"""
|
|
|
|
|
|
|
|
:param force: Expect that the mount will not shutdown cleanly: kill
|
|
|
|
it hard.
|
|
|
|
:param require_clean: Wait for the Ceph client associated with the
|
|
|
|
mount (e.g. ceph-fuse) to terminate, and
|
|
|
|
raise if it doesn't do so cleanly.
|
|
|
|
:return:
|
|
|
|
"""
|
2014-07-02 18:25:14 +00:00
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def kill_cleanup(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def kill(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
raise NotImplementedError()
|
2014-07-02 15:43:16 +00:00
|
|
|
|
2014-07-17 20:35:22 +00:00
|
|
|
def wait_until_mounted(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2015-11-09 13:19:23 +00:00
|
|
|
def get_keyring_path(self):
|
|
|
|
return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id)
|
|
|
|
|
|
|
|
@property
|
|
|
|
def config_path(self):
|
|
|
|
"""
|
|
|
|
Path to ceph.conf: override this if you're not a normal systemwide ceph install
|
|
|
|
:return: stringv
|
|
|
|
"""
|
|
|
|
return "/etc/ceph/ceph.conf"
|
|
|
|
|
2014-07-17 20:35:22 +00:00
|
|
|
@contextmanager
|
|
|
|
def mounted(self):
|
|
|
|
"""
|
|
|
|
A context manager, from an initially unmounted state, to mount
|
|
|
|
this, yield, and then unmount and clean up.
|
|
|
|
"""
|
|
|
|
self.mount()
|
|
|
|
self.wait_until_mounted()
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
self.umount_wait()
|
|
|
|
|
2018-07-12 13:25:53 +00:00
|
|
|
def is_blacklisted(self):
|
|
|
|
addr = self.get_global_addr()
|
|
|
|
blacklist = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "blacklist", "ls", "--format=json"))
|
|
|
|
for b in blacklist:
|
|
|
|
if addr == b["addr"]:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2014-07-02 15:43:16 +00:00
|
|
|
def create_files(self):
|
2014-07-02 18:25:14 +00:00
|
|
|
assert(self.is_mounted())
|
|
|
|
|
2014-07-02 15:43:16 +00:00
|
|
|
for suffix in self.test_files:
|
|
|
|
log.info("Creating file {0}".format(suffix))
|
|
|
|
self.client_remote.run(args=[
|
2014-07-02 18:25:14 +00:00
|
|
|
'sudo', 'touch', os.path.join(self.mountpoint, suffix)
|
2014-07-02 15:43:16 +00:00
|
|
|
])
|
|
|
|
|
|
|
|
def check_files(self):
|
2014-07-02 18:25:14 +00:00
|
|
|
assert(self.is_mounted())
|
|
|
|
|
2014-07-02 15:43:16 +00:00
|
|
|
for suffix in self.test_files:
|
|
|
|
log.info("Checking file {0}".format(suffix))
|
|
|
|
r = self.client_remote.run(args=[
|
2014-07-02 18:25:14 +00:00
|
|
|
'sudo', 'ls', os.path.join(self.mountpoint, suffix)
|
2014-07-02 15:43:16 +00:00
|
|
|
], check_status=False)
|
|
|
|
if r.exitstatus != 0:
|
|
|
|
raise RuntimeError("Expected file {0} not found".format(suffix))
|
2014-07-02 18:25:14 +00:00
|
|
|
|
|
|
|
def create_destroy(self):
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
filename = "{0} {1}".format(datetime.datetime.now(), self.client_id)
|
|
|
|
log.debug("Creating test file {0}".format(filename))
|
|
|
|
self.client_remote.run(args=[
|
|
|
|
'sudo', 'touch', os.path.join(self.mountpoint, filename)
|
|
|
|
])
|
|
|
|
log.debug("Deleting test file {0}".format(filename))
|
|
|
|
self.client_remote.run(args=[
|
|
|
|
'sudo', 'rm', '-f', os.path.join(self.mountpoint, filename)
|
|
|
|
])
|
|
|
|
|
|
|
|
def _run_python(self, pyscript):
|
|
|
|
return self.client_remote.run(args=[
|
2014-09-03 13:15:54 +00:00
|
|
|
'sudo', 'adjust-ulimits', 'daemon-helper', 'kill', 'python', '-c', pyscript
|
2014-11-03 15:38:57 +00:00
|
|
|
], wait=False, stdin=run.PIPE, stdout=StringIO())
|
2014-07-02 18:25:14 +00:00
|
|
|
|
2014-10-02 12:01:56 +00:00
|
|
|
def run_python(self, pyscript):
|
|
|
|
p = self._run_python(pyscript)
|
|
|
|
p.wait()
|
2015-11-09 13:19:23 +00:00
|
|
|
return p.stdout.getvalue().strip()
|
2014-10-02 12:01:56 +00:00
|
|
|
|
2014-12-19 13:49:07 +00:00
|
|
|
def run_shell(self, args, wait=True):
|
2015-11-29 11:43:30 +00:00
|
|
|
args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args
|
2017-01-05 13:40:41 +00:00
|
|
|
return self.client_remote.run(args=args, stdout=StringIO(),
|
|
|
|
stderr=StringIO(), wait=wait)
|
2014-09-15 11:50:55 +00:00
|
|
|
|
2014-10-02 12:01:56 +00:00
|
|
|
def open_no_data(self, basename):
|
|
|
|
"""
|
|
|
|
A pure metadata operation
|
|
|
|
"""
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
path = os.path.join(self.mountpoint, basename)
|
|
|
|
|
|
|
|
p = self._run_python(dedent(
|
|
|
|
"""
|
|
|
|
f = open("{path}", 'w')
|
|
|
|
""".format(path=path)
|
|
|
|
))
|
|
|
|
p.wait()
|
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
def open_background(self, basename="background_file"):
|
|
|
|
"""
|
|
|
|
Open a file for writing, then block such that the client
|
2017-01-26 16:48:58 +00:00
|
|
|
will hold a capability.
|
|
|
|
|
|
|
|
Don't return until the remote process has got as far as opening
|
|
|
|
the file, then return the RemoteProcess instance.
|
2014-07-02 18:25:14 +00:00
|
|
|
"""
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
path = os.path.join(self.mountpoint, basename)
|
|
|
|
|
2014-07-14 12:02:58 +00:00
|
|
|
pyscript = dedent("""
|
|
|
|
import time
|
2014-07-02 18:25:14 +00:00
|
|
|
|
2014-07-14 12:02:58 +00:00
|
|
|
f = open("{path}", 'w')
|
|
|
|
f.write('content')
|
|
|
|
f.flush()
|
|
|
|
f.write('content2')
|
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
|
|
|
""").format(path=path)
|
2014-07-02 18:25:14 +00:00
|
|
|
|
|
|
|
rproc = self._run_python(pyscript)
|
|
|
|
self.background_procs.append(rproc)
|
2017-01-26 16:48:58 +00:00
|
|
|
|
|
|
|
# This wait would not be sufficient if the file had already
|
|
|
|
# existed, but it's simple and in practice users of open_background
|
|
|
|
# are not using it on existing files.
|
|
|
|
self.wait_for_visible(basename)
|
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
return rproc
|
|
|
|
|
2014-09-15 11:50:55 +00:00
|
|
|
def wait_for_visible(self, basename="background_file", timeout=30):
|
|
|
|
i = 0
|
|
|
|
while i < timeout:
|
|
|
|
r = self.client_remote.run(args=[
|
|
|
|
'sudo', 'ls', os.path.join(self.mountpoint, basename)
|
|
|
|
], check_status=False)
|
|
|
|
if r.exitstatus == 0:
|
|
|
|
log.debug("File {0} became visible from {1} after {2}s".format(
|
|
|
|
basename, self.client_id, i))
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
time.sleep(1)
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
raise RuntimeError("Timed out after {0}s waiting for {1} to become visible from {2}".format(
|
|
|
|
i, basename, self.client_id))
|
|
|
|
|
2015-05-01 01:15:05 +00:00
|
|
|
def lock_background(self, basename="background_file", do_flock=True):
|
2014-10-10 08:00:44 +00:00
|
|
|
"""
|
|
|
|
Open and lock a files for writing, hold the lock in a background process
|
|
|
|
"""
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
path = os.path.join(self.mountpoint, basename)
|
|
|
|
|
2015-05-01 01:15:05 +00:00
|
|
|
script_builder = """
|
2014-10-10 08:00:44 +00:00
|
|
|
import time
|
|
|
|
import fcntl
|
2015-05-01 01:15:05 +00:00
|
|
|
import struct"""
|
|
|
|
if do_flock:
|
|
|
|
script_builder += """
|
2014-11-06 08:01:50 +00:00
|
|
|
f1 = open("{path}-1", 'w')
|
2015-05-01 01:15:05 +00:00
|
|
|
fcntl.flock(f1, fcntl.LOCK_EX | fcntl.LOCK_NB)"""
|
|
|
|
script_builder += """
|
2014-11-06 08:01:50 +00:00
|
|
|
f2 = open("{path}-2", 'w')
|
2014-10-10 08:00:44 +00:00
|
|
|
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
|
2014-11-06 08:01:50 +00:00
|
|
|
fcntl.fcntl(f2, fcntl.F_SETLK, lockdata)
|
2014-10-10 08:00:44 +00:00
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
2015-05-01 01:15:05 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
pyscript = dedent(script_builder).format(path=path)
|
2014-10-10 08:00:44 +00:00
|
|
|
|
2017-03-21 14:38:47 +00:00
|
|
|
log.info("lock_background file {0}".format(basename))
|
2014-10-10 08:00:44 +00:00
|
|
|
rproc = self._run_python(pyscript)
|
|
|
|
self.background_procs.append(rproc)
|
|
|
|
return rproc
|
|
|
|
|
2017-03-21 14:38:47 +00:00
|
|
|
def lock_and_release(self, basename="background_file"):
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
path = os.path.join(self.mountpoint, basename)
|
|
|
|
|
|
|
|
script = """
|
|
|
|
import time
|
|
|
|
import fcntl
|
|
|
|
import struct
|
|
|
|
f1 = open("{path}-1", 'w')
|
|
|
|
fcntl.flock(f1, fcntl.LOCK_EX)
|
|
|
|
f2 = open("{path}-2", 'w')
|
|
|
|
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
|
|
|
|
fcntl.fcntl(f2, fcntl.F_SETLK, lockdata)
|
|
|
|
"""
|
|
|
|
pyscript = dedent(script).format(path=path)
|
|
|
|
|
|
|
|
log.info("lock_and_release file {0}".format(basename))
|
|
|
|
return self._run_python(pyscript)
|
|
|
|
|
2015-05-01 01:15:05 +00:00
|
|
|
def check_filelock(self, basename="background_file", do_flock=True):
|
2014-10-10 08:00:44 +00:00
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
path = os.path.join(self.mountpoint, basename)
|
|
|
|
|
2015-05-01 01:15:05 +00:00
|
|
|
script_builder = """
|
2014-10-10 08:00:44 +00:00
|
|
|
import fcntl
|
|
|
|
import errno
|
2015-05-01 01:15:05 +00:00
|
|
|
import struct"""
|
|
|
|
if do_flock:
|
|
|
|
script_builder += """
|
2014-11-06 08:01:50 +00:00
|
|
|
f1 = open("{path}-1", 'r')
|
2014-10-10 08:00:44 +00:00
|
|
|
try:
|
2014-11-06 08:01:50 +00:00
|
|
|
fcntl.flock(f1, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
2014-10-10 08:00:44 +00:00
|
|
|
except IOError, e:
|
|
|
|
if e.errno == errno.EAGAIN:
|
|
|
|
pass
|
|
|
|
else:
|
2015-05-01 01:15:05 +00:00
|
|
|
raise RuntimeError("flock on file {path}-1 not found")"""
|
|
|
|
script_builder += """
|
2014-11-06 08:01:50 +00:00
|
|
|
f2 = open("{path}-2", 'r')
|
2014-10-10 08:00:44 +00:00
|
|
|
try:
|
|
|
|
lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
|
2014-11-06 08:01:50 +00:00
|
|
|
fcntl.fcntl(f2, fcntl.F_SETLK, lockdata)
|
2014-10-10 08:00:44 +00:00
|
|
|
except IOError, e:
|
|
|
|
if e.errno == errno.EAGAIN:
|
|
|
|
pass
|
|
|
|
else:
|
2014-11-06 08:01:50 +00:00
|
|
|
raise RuntimeError("posix lock on file {path}-2 not found")
|
2015-05-01 01:15:05 +00:00
|
|
|
"""
|
|
|
|
pyscript = dedent(script_builder).format(path=path)
|
2014-10-10 08:00:44 +00:00
|
|
|
|
|
|
|
log.info("check lock on file {0}".format(basename))
|
2015-02-26 16:35:31 +00:00
|
|
|
self.client_remote.run(args=[
|
2014-10-10 08:00:44 +00:00
|
|
|
'sudo', 'python', '-c', pyscript
|
|
|
|
])
|
|
|
|
|
2014-12-05 14:01:13 +00:00
|
|
|
def write_background(self, basename="background_file", loop=False):
|
2014-07-02 18:25:14 +00:00
|
|
|
"""
|
|
|
|
Open a file for writing, complete as soon as you can
|
|
|
|
:param basename:
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
path = os.path.join(self.mountpoint, basename)
|
|
|
|
|
2014-07-14 12:02:58 +00:00
|
|
|
pyscript = dedent("""
|
2014-12-05 14:01:13 +00:00
|
|
|
import os
|
2014-07-14 12:02:58 +00:00
|
|
|
import time
|
2014-07-02 18:25:14 +00:00
|
|
|
|
2014-12-05 14:01:13 +00:00
|
|
|
fd = os.open("{path}", os.O_RDWR | os.O_CREAT, 0644)
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
os.write(fd, 'content')
|
|
|
|
time.sleep(1)
|
|
|
|
if not {loop}:
|
|
|
|
break
|
|
|
|
except IOError, e:
|
|
|
|
pass
|
|
|
|
os.close(fd)
|
|
|
|
""").format(path=path, loop=str(loop))
|
2014-07-02 18:25:14 +00:00
|
|
|
|
|
|
|
rproc = self._run_python(pyscript)
|
|
|
|
self.background_procs.append(rproc)
|
|
|
|
return rproc
|
|
|
|
|
2015-11-11 10:24:50 +00:00
|
|
|
def write_n_mb(self, filename, n_mb, seek=0, wait=True):
|
2014-10-02 12:01:56 +00:00
|
|
|
"""
|
|
|
|
Write the requested number of megabytes to a file
|
|
|
|
"""
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
2015-11-11 10:24:50 +00:00
|
|
|
return self.run_shell(["dd", "if=/dev/urandom", "of={0}".format(filename),
|
2016-03-11 08:58:23 +00:00
|
|
|
"bs=1M", "conv=fdatasync",
|
2015-11-11 10:24:50 +00:00
|
|
|
"count={0}".format(n_mb),
|
|
|
|
"seek={0}".format(seek)
|
|
|
|
], wait=wait)
|
2014-10-02 12:01:56 +00:00
|
|
|
|
2015-08-06 11:02:50 +00:00
|
|
|
def write_test_pattern(self, filename, size):
|
|
|
|
log.info("Writing {0} bytes to {1}".format(size, filename))
|
|
|
|
return self.run_python(dedent("""
|
|
|
|
import zlib
|
|
|
|
path = "{path}"
|
|
|
|
f = open(path, 'w')
|
|
|
|
for i in range(0, {size}):
|
|
|
|
val = zlib.crc32("%s" % i) & 7
|
|
|
|
f.write(chr(val))
|
|
|
|
f.close()
|
|
|
|
""".format(
|
|
|
|
path=os.path.join(self.mountpoint, filename),
|
|
|
|
size=size
|
|
|
|
)))
|
|
|
|
|
|
|
|
def validate_test_pattern(self, filename, size):
|
|
|
|
log.info("Validating {0} bytes from {1}".format(size, filename))
|
|
|
|
return self.run_python(dedent("""
|
|
|
|
import zlib
|
|
|
|
path = "{path}"
|
|
|
|
f = open(path, 'r')
|
|
|
|
bytes = f.read()
|
|
|
|
f.close()
|
|
|
|
if len(bytes) != {size}:
|
|
|
|
raise RuntimeError("Bad length {{0}} vs. expected {{1}}".format(
|
|
|
|
len(bytes), {size}
|
|
|
|
))
|
|
|
|
for i, b in enumerate(bytes):
|
|
|
|
val = zlib.crc32("%s" % i) & 7
|
|
|
|
if b != chr(val):
|
|
|
|
raise RuntimeError("Bad data at offset {{0}}".format(i))
|
|
|
|
""".format(
|
|
|
|
path=os.path.join(self.mountpoint, filename),
|
|
|
|
size=size
|
|
|
|
)))
|
|
|
|
|
2014-09-03 13:15:54 +00:00
|
|
|
def open_n_background(self, fs_path, count):
|
|
|
|
"""
|
|
|
|
Open N files for writing, hold them open in a background process
|
|
|
|
|
|
|
|
:param fs_path: Path relative to CephFS root, e.g. "foo/bar"
|
|
|
|
:return: a RemoteProcess
|
|
|
|
"""
|
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
abs_path = os.path.join(self.mountpoint, fs_path)
|
|
|
|
|
|
|
|
pyscript = dedent("""
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import os
|
|
|
|
|
|
|
|
n = {count}
|
|
|
|
abs_path = "{abs_path}"
|
|
|
|
|
|
|
|
if not os.path.exists(os.path.dirname(abs_path)):
|
|
|
|
os.makedirs(os.path.dirname(abs_path))
|
|
|
|
|
|
|
|
handles = []
|
|
|
|
for i in range(0, n):
|
|
|
|
fname = "{{0}}_{{1}}".format(abs_path, i)
|
|
|
|
handles.append(open(fname, 'w'))
|
|
|
|
|
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
|
|
|
""").format(abs_path=abs_path, count=count)
|
|
|
|
|
|
|
|
rproc = self._run_python(pyscript)
|
|
|
|
self.background_procs.append(rproc)
|
|
|
|
return rproc
|
|
|
|
|
2015-10-13 03:50:37 +00:00
|
|
|
def create_n_files(self, fs_path, count, sync=False):
|
2015-07-23 08:52:28 +00:00
|
|
|
assert(self.is_mounted())
|
|
|
|
|
|
|
|
abs_path = os.path.join(self.mountpoint, fs_path)
|
|
|
|
|
|
|
|
pyscript = dedent("""
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
import os
|
|
|
|
|
|
|
|
n = {count}
|
|
|
|
abs_path = "{abs_path}"
|
|
|
|
|
|
|
|
if not os.path.exists(os.path.dirname(abs_path)):
|
|
|
|
os.makedirs(os.path.dirname(abs_path))
|
|
|
|
|
|
|
|
for i in range(0, n):
|
|
|
|
fname = "{{0}}_{{1}}".format(abs_path, i)
|
|
|
|
h = open(fname, 'w')
|
|
|
|
h.write('content')
|
2015-10-13 03:50:37 +00:00
|
|
|
if {sync}:
|
|
|
|
h.flush()
|
|
|
|
os.fsync(h.fileno())
|
2015-07-23 08:52:28 +00:00
|
|
|
h.close()
|
2015-10-13 03:50:37 +00:00
|
|
|
""").format(abs_path=abs_path, count=count, sync=str(sync))
|
2015-07-23 08:52:28 +00:00
|
|
|
|
|
|
|
self.run_python(pyscript)
|
|
|
|
|
2014-07-02 18:25:14 +00:00
|
|
|
def teardown(self):
|
|
|
|
for p in self.background_procs:
|
|
|
|
log.info("Terminating background process")
|
2015-01-21 14:28:13 +00:00
|
|
|
self._kill_background(p)
|
|
|
|
|
|
|
|
self.background_procs = []
|
|
|
|
|
|
|
|
def _kill_background(self, p):
|
|
|
|
if p.stdin:
|
|
|
|
p.stdin.close()
|
|
|
|
try:
|
|
|
|
p.wait()
|
|
|
|
except (CommandFailedError, ConnectionLostError):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def kill_background(self, p):
|
|
|
|
"""
|
|
|
|
For a process that was returned by one of the _background member functions,
|
|
|
|
kill it hard.
|
|
|
|
"""
|
|
|
|
self._kill_background(p)
|
|
|
|
self.background_procs.remove(p)
|
2014-09-15 22:41:34 +00:00
|
|
|
|
|
|
|
def get_global_id(self):
|
|
|
|
raise NotImplementedError()
|
2014-10-28 17:12:02 +00:00
|
|
|
|
2018-07-12 13:25:53 +00:00
|
|
|
def get_global_inst(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
def get_global_addr(self):
|
|
|
|
raise NotImplementedError()
|
|
|
|
|
2014-10-28 17:12:02 +00:00
|
|
|
def get_osd_epoch(self):
|
|
|
|
raise NotImplementedError()
|
2014-11-03 15:38:57 +00:00
|
|
|
|
2015-12-14 22:13:13 +00:00
|
|
|
def stat(self, fs_path, wait=True):
|
2015-05-13 12:39:46 +00:00
|
|
|
"""
|
|
|
|
stat a file, and return the result as a dictionary like this:
|
|
|
|
{
|
|
|
|
"st_ctime": 1414161137.0,
|
|
|
|
"st_mtime": 1414161137.0,
|
|
|
|
"st_nlink": 33,
|
|
|
|
"st_gid": 0,
|
|
|
|
"st_dev": 16777218,
|
|
|
|
"st_size": 1190,
|
|
|
|
"st_ino": 2,
|
|
|
|
"st_uid": 0,
|
|
|
|
"st_mode": 16877,
|
|
|
|
"st_atime": 1431520593.0
|
|
|
|
}
|
|
|
|
|
|
|
|
Raises exception on absent file.
|
|
|
|
"""
|
|
|
|
abs_path = os.path.join(self.mountpoint, fs_path)
|
|
|
|
|
|
|
|
pyscript = dedent("""
|
|
|
|
import os
|
|
|
|
import stat
|
|
|
|
import json
|
2015-12-08 16:29:11 +00:00
|
|
|
import sys
|
|
|
|
|
|
|
|
try:
|
|
|
|
s = os.stat("{path}")
|
|
|
|
except OSError as e:
|
|
|
|
sys.exit(e.errno)
|
2015-05-13 12:39:46 +00:00
|
|
|
|
|
|
|
attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"]
|
|
|
|
print json.dumps(
|
|
|
|
dict([(a, getattr(s, a)) for a in attrs]),
|
|
|
|
indent=2)
|
|
|
|
""").format(path=abs_path)
|
|
|
|
proc = self._run_python(pyscript)
|
2015-12-14 22:13:13 +00:00
|
|
|
if wait:
|
|
|
|
proc.wait()
|
|
|
|
return json.loads(proc.stdout.getvalue().strip())
|
|
|
|
else:
|
|
|
|
return proc
|
2015-05-13 12:39:46 +00:00
|
|
|
|
2016-02-23 17:32:30 +00:00
|
|
|
def touch(self, fs_path):
|
|
|
|
"""
|
|
|
|
Create a dentry if it doesn't already exist. This python
|
|
|
|
implementation exists because the usual command line tool doesn't
|
|
|
|
pass through error codes like EIO.
|
|
|
|
|
|
|
|
:param fs_path:
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
abs_path = os.path.join(self.mountpoint, fs_path)
|
|
|
|
pyscript = dedent("""
|
|
|
|
import sys
|
|
|
|
import errno
|
|
|
|
|
|
|
|
try:
|
|
|
|
f = open("{path}", "w")
|
|
|
|
f.close()
|
|
|
|
except IOError as e:
|
|
|
|
sys.exit(errno.EIO)
|
|
|
|
""").format(path=abs_path)
|
|
|
|
proc = self._run_python(pyscript)
|
|
|
|
proc.wait()
|
|
|
|
|
2016-06-28 19:23:53 +00:00
|
|
|
def path_to_ino(self, fs_path, follow_symlinks=True):
|
2014-11-03 15:38:57 +00:00
|
|
|
abs_path = os.path.join(self.mountpoint, fs_path)
|
|
|
|
|
2016-06-28 19:23:53 +00:00
|
|
|
if follow_symlinks:
|
|
|
|
pyscript = dedent("""
|
|
|
|
import os
|
|
|
|
import stat
|
|
|
|
|
|
|
|
print os.stat("{path}").st_ino
|
|
|
|
""").format(path=abs_path)
|
|
|
|
else:
|
|
|
|
pyscript = dedent("""
|
|
|
|
import os
|
|
|
|
import stat
|
|
|
|
|
|
|
|
print os.lstat("{path}").st_ino
|
|
|
|
""").format(path=abs_path)
|
2014-11-03 15:38:57 +00:00
|
|
|
|
|
|
|
proc = self._run_python(pyscript)
|
|
|
|
proc.wait()
|
|
|
|
return int(proc.stdout.getvalue().strip())
|
2015-06-17 09:52:00 +00:00
|
|
|
|
2016-10-24 08:24:24 +00:00
|
|
|
def path_to_nlink(self, fs_path):
|
|
|
|
abs_path = os.path.join(self.mountpoint, fs_path)
|
|
|
|
|
|
|
|
pyscript = dedent("""
|
|
|
|
import os
|
|
|
|
import stat
|
|
|
|
|
|
|
|
print os.stat("{path}").st_nlink
|
|
|
|
""").format(path=abs_path)
|
|
|
|
|
|
|
|
proc = self._run_python(pyscript)
|
|
|
|
proc.wait()
|
|
|
|
return int(proc.stdout.getvalue().strip())
|
|
|
|
|
2015-06-17 09:52:00 +00:00
|
|
|
def ls(self, path=None):
|
|
|
|
"""
|
|
|
|
Wrap ls: return a list of strings
|
|
|
|
"""
|
|
|
|
cmd = ["ls"]
|
|
|
|
if path:
|
|
|
|
cmd.append(path)
|
2016-03-29 11:08:30 +00:00
|
|
|
|
|
|
|
ls_text = self.run_shell(cmd).stdout.getvalue().strip()
|
|
|
|
|
|
|
|
if ls_text:
|
|
|
|
return ls_text.split("\n")
|
|
|
|
else:
|
|
|
|
# Special case because otherwise split on empty string
|
|
|
|
# gives you [''] instead of []
|
|
|
|
return []
|
2016-02-23 17:32:30 +00:00
|
|
|
|
2017-03-15 19:26:30 +00:00
|
|
|
def setfattr(self, path, key, val):
|
|
|
|
"""
|
|
|
|
Wrap setfattr.
|
|
|
|
|
|
|
|
:param path: relative to mount point
|
|
|
|
:param key: xattr name
|
|
|
|
:param val: xattr value
|
|
|
|
:return: None
|
|
|
|
"""
|
|
|
|
self.run_shell(["setfattr", "-n", key, "-v", val, path])
|
|
|
|
|
2016-02-23 17:32:30 +00:00
|
|
|
def getfattr(self, path, attr):
|
|
|
|
"""
|
2017-03-15 19:26:30 +00:00
|
|
|
Wrap getfattr: return the values of a named xattr on one file, or
|
|
|
|
None if the attribute is not found.
|
2016-02-23 17:32:30 +00:00
|
|
|
|
|
|
|
:return: a string
|
|
|
|
"""
|
2017-03-15 19:26:30 +00:00
|
|
|
p = self.run_shell(["getfattr", "--only-values", "-n", attr, path], wait=False)
|
|
|
|
try:
|
|
|
|
p.wait()
|
|
|
|
except CommandFailedError as e:
|
|
|
|
if e.exitstatus == 1 and "No such attribute" in p.stderr.getvalue():
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
2016-02-23 17:32:30 +00:00
|
|
|
return p.stdout.getvalue()
|
2016-04-25 22:33:04 +00:00
|
|
|
|
|
|
|
def df(self):
|
|
|
|
"""
|
|
|
|
Wrap df: return a dict of usage fields in bytes
|
|
|
|
"""
|
|
|
|
|
|
|
|
p = self.run_shell(["df", "-B1", "."])
|
|
|
|
lines = p.stdout.getvalue().strip().split("\n")
|
|
|
|
fs, total, used, avail = lines[1].split()[:4]
|
|
|
|
log.warn(lines)
|
|
|
|
|
|
|
|
return {
|
|
|
|
"total": int(total),
|
|
|
|
"used": int(used),
|
|
|
|
"available": int(avail)
|
|
|
|
}
|