qa: remove use of obsolete mds commands

Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
This commit is contained in:
Patrick Donnelly 2017-10-01 17:07:05 -07:00
parent cbbdd0da7d
commit 183646c919
No known key found for this signature in database
GPG Key ID: 3A2A7E25BEA8AADB
27 changed files with 78 additions and 83 deletions

View File

@ -1,4 +1,4 @@
tasks:
- exec:
client.0:
- sudo ceph mds set inline_data true --yes-i-really-mean-it
- sudo ceph fs set cephfs inline_data true --yes-i-really-mean-it

View File

@ -3,4 +3,4 @@ tasks:
- ceph:
- exec:
client.0:
- sudo ceph mds set inline_data true --yes-i-really-mean-it
- sudo ceph fs set cephfs inline_data true --yes-i-really-mean-it

View File

@ -441,6 +441,9 @@ class Filesystem(MDSCluster):
def set_allow_dirfrags(self, yes):
self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_dirfrags", str(yes).lower(), '--yes-i-really-mean-it')
def set_allow_new_snaps(self, yes):
self.mon_manager.raw_cluster_cmd("fs", "set", self.name, "allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it')
def get_pgs_per_fs_pool(self):
"""
Calculate how many PGs to use when creating a pool, in order to avoid raising any

View File

@ -277,7 +277,7 @@ class TestStandbyReplay(CephFSTestCase):
# Shrink the cluster
fs_a.set_max_mds(1)
fs_a.mon_manager.raw_cluster_cmd("mds", "stop", "{0}:1".format(fs_a.name))
fs_a.mon_manager.raw_cluster_cmd("mds", "deactivate", "{0}:1".format(fs_a.name))
self.wait_until_equal(
lambda: fs_a.get_active_names(), [mds_a],
60

View File

@ -638,8 +638,8 @@ class TestStrays(CephFSTestCase):
mds_id=rank_1_id)
# Shut down rank 1
self.fs.mon_manager.raw_cluster_cmd_result('mds', 'set', "max_mds", "1")
self.fs.mon_manager.raw_cluster_cmd_result('mds', 'deactivate', "1")
self.fs.set_max_mds(1)
self.fs.deactivate(1)
# Wait til we get to a single active MDS mdsmap state
self.wait_until_true(lambda: self._is_stopped(1), timeout=120)
@ -744,8 +744,7 @@ class TestStrays(CephFSTestCase):
in purging on the stray for the file.
"""
# Enable snapshots
self.fs.mon_manager.raw_cluster_cmd("mds", "set", "allow_new_snaps", "true",
"--yes-i-really-mean-it")
self.fs.set_allow_new_snaps(True)
# Create a dir with a file in it
size_mb = 8

View File

@ -29,10 +29,9 @@ def task(ctx, config):
)
# Stop MDS
manager.raw_cluster_cmd('mds', 'set', "max_mds", "0")
mds = ctx.daemons.get_daemon('mds', mds_id)
mds.stop()
manager.raw_cluster_cmd('mds', 'fail', mds_id)
self.fs.set_max_mds(0)
self.fs.mds_stop(mds_id)
self.fs.mds_fail(mds_id)
# Reset the filesystem so that next start will go into CREATING
manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it")

View File

@ -614,7 +614,7 @@ function test_auth_profiles()
ceph -n client.xx-profile-ro -k client.xx.keyring osd dump
ceph -n client.xx-profile-ro -k client.xx.keyring pg dump
ceph -n client.xx-profile-ro -k client.xx.keyring mon dump
ceph -n client.xx-profile-ro -k client.xx.keyring mds dump
ceph -n client.xx-profile-ro -k client.xx.keyring fs get cephfs
# read-only gets access denied for rw commands or auth commands
ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
@ -628,7 +628,7 @@ function test_auth_profiles()
ceph -n client.xx-profile-rw -k client.xx.keyring osd dump
ceph -n client.xx-profile-rw -k client.xx.keyring pg dump
ceph -n client.xx-profile-rw -k client.xx.keyring mon dump
ceph -n client.xx-profile-rw -k client.xx.keyring mds dump
ceph -n client.xx-profile-rw -k client.xx.keyring fs dump
ceph -n client.xx-profile-rw -k client.xx.keyring log foo
ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout
ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout
@ -650,7 +650,7 @@ function test_auth_profiles()
# but read-write 'mon' commands are not
ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring mds dump >& $TMPFILE || true
ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true
check_response "EACCES: access denied"
ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true
check_response "EACCES: access denied"
@ -892,10 +892,6 @@ function test_mon_mds()
ceph fs set $FS_NAME cluster_down true
ceph fs set $FS_NAME cluster_down false
# Legacy commands, act on default fs
ceph mds cluster_down
ceph mds cluster_up
ceph mds compat rm_incompat 4
ceph mds compat rm_incompat 4
@ -905,7 +901,6 @@ function test_mon_mds()
ceph mds compat show
expect_false ceph mds deactivate 2
ceph mds dump
ceph fs dump
ceph fs get $FS_NAME
for mds_gid in $(get_mds_gids $FS_NAME) ; do
@ -917,7 +912,7 @@ function test_mon_mds()
# XXX mds fail, but how do you undo it?
mdsmapfile=$TEMP_DIR/mdsmap.$$
current_epoch=$(ceph mds getmap -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //')
[ -s $mdsmapfile ]
rm $mdsmapfile
@ -925,52 +920,52 @@ function test_mon_mds()
ceph osd pool create data3 10
data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}')
data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}')
ceph mds add_data_pool $data2_pool
ceph mds add_data_pool $data3_pool
ceph mds add_data_pool 100 >& $TMPFILE || true
ceph fs add_data_pool cephfs $data2_pool
ceph fs add_data_pool cephfs $data3_pool
ceph fs add_data_pool cephfs 100 >& $TMPFILE || true
check_response "Error ENOENT"
ceph mds add_data_pool foobarbaz >& $TMPFILE || true
ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true
check_response "Error ENOENT"
ceph mds remove_data_pool $data2_pool
ceph mds remove_data_pool $data3_pool
ceph fs remove_data_pool cephfs $data2_pool
ceph fs remove_data_pool cephfs $data3_pool
ceph osd pool delete data2 data2 --yes-i-really-really-mean-it
ceph osd pool delete data3 data3 --yes-i-really-really-mean-it
ceph mds set allow_multimds false
expect_false ceph mds set_max_mds 4
ceph mds set allow_multimds true
ceph mds set_max_mds 4
ceph mds set_max_mds 3
ceph mds set_max_mds 256
expect_false ceph mds set_max_mds 257
ceph mds set max_mds 4
ceph mds set max_mds 256
expect_false ceph mds set max_mds 257
expect_false ceph mds set max_mds asdf
expect_false ceph mds set inline_data true
ceph mds set inline_data true --yes-i-really-mean-it
ceph mds set inline_data yes --yes-i-really-mean-it
ceph mds set inline_data 1 --yes-i-really-mean-it
expect_false ceph mds set inline_data --yes-i-really-mean-it
ceph mds set inline_data false
ceph mds set inline_data no
ceph mds set inline_data 0
expect_false ceph mds set inline_data asdf
ceph mds set max_file_size 1048576
expect_false ceph mds set max_file_size 123asdf
ceph fs set cephfs allow_multimds false
expect_false ceph fs set cephfs max_mds 4
ceph fs set cephfs allow_multimds true
ceph fs set cephfs max_mds 4
ceph fs set cephfs max_mds 3
ceph fs set cephfs max_mds 256
expect_false ceph fs set cephfs max_mds 257
ceph fs set cephfs max_mds 4
ceph fs set cephfs max_mds 256
expect_false ceph fs set cephfs max_mds 257
expect_false ceph fs set cephfs max_mds asdf
expect_false ceph fs set cephfs inline_data true
ceph fs set cephfs inline_data true --yes-i-really-mean-it
ceph fs set cephfs inline_data yes --yes-i-really-mean-it
ceph fs set cephfs inline_data 1 --yes-i-really-mean-it
expect_false ceph fs set cephfs inline_data --yes-i-really-mean-it
ceph fs set cephfs inline_data false
ceph fs set cephfs inline_data no
ceph fs set cephfs inline_data 0
expect_false ceph fs set cephfs inline_data asdf
ceph fs set cephfs max_file_size 1048576
expect_false ceph fs set cephfs max_file_size 123asdf
expect_false ceph mds set allow_new_snaps
expect_false ceph mds set allow_new_snaps true
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph mds set allow_new_snaps 0
ceph mds set allow_new_snaps false
ceph mds set allow_new_snaps no
expect_false ceph mds set allow_new_snaps taco
expect_false ceph fs set cephfs allow_new_snaps
expect_false ceph fs set cephfs allow_new_snaps true
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps 0
ceph fs set cephfs allow_new_snaps false
ceph fs set cephfs allow_new_snaps no
expect_false ceph fs set cephfs allow_new_snaps taco
# we should never be able to add EC pools as data or metadata pools
# create an ec-pool...
ceph osd pool create mds-ec-pool 10 10 erasure
set +e
ceph mds add_data_pool mds-ec-pool 2>$TMPFILE
ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE
check_response 'erasure-code' $? 22
set -e
ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}')
@ -985,8 +980,8 @@ function test_mon_mds()
ceph mds rmfailed 0 --yes-i-really-mean-it
set -e
# Check that `newfs` is no longer permitted
expect_false ceph mds newfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
# Check that `fs new` is no longer permitted
expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE
# Check that 'fs reset' runs
ceph fs reset $FS_NAME --yes-i-really-mean-it
@ -1119,7 +1114,6 @@ function test_mon_mds()
# ceph mds rm
# ceph mds rmfailed
# ceph mds set_state
# ceph mds stop
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
@ -1130,7 +1124,7 @@ function test_mon_mds_metadata()
local nmons=$(ceph tell 'mon.*' version | grep -c 'version')
test "$nmons" -gt 0
ceph mds dump |
ceph fs dump |
sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" |
while read gid id rank; do
ceph mds metadata ${gid} | grep '"hostname":'

View File

@ -4,12 +4,12 @@ set -e
touch foo.$$
rados mkpool foo.$$
ceph mds add_data_pool foo.$$
ceph fs add_data_pool cephfs foo.$$
setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$
# cleanup
rm foo.$$
ceph mds remove_data_pool foo.$$
ceph fs remove_data_pool cephfs foo.$$
rados rmpool foo.$$ foo.$$ --yes-i-really-really-mean-it
echo OK

View File

@ -1,6 +1,6 @@
#!/bin/sh -ex
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
wget -q http://download.ceph.com/qa/linux-2.6.33.tar.bz2
mkdir foo
cp linux* foo

View File

@ -5,9 +5,9 @@ expect_failure() {
}
set -e
ceph mds set allow_new_snaps false
ceph fs set cephfs allow_new_snaps false
expect_failure mkdir .snap/foo
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
echo asdf > foo
mkdir .snap/foo
@ -21,7 +21,7 @@ grep asdf .snap/bar/bar
rmdir .snap/bar
rm foo
ceph mds set allow_new_snaps false
ceph fs set cephfs allow_new_snaps false
expect_failure mkdir .snap/baz
echo OK

View File

@ -2,7 +2,7 @@
set -ex
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
echo 1 > file1
echo 2 > file2

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
echo "Create dir 100 to 199 ..."
for i in $(seq 100 199); do

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
touch foo
chmod +x foo

View File

@ -4,7 +4,7 @@ set -e
mkdir foo
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
# make sure mds handles it when the client does not send flushsnap
echo x > foo/x

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
#
# make sure we keep an existing dn's seq

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
# multiple intervening snapshots with no modifications, and thus no
# snapflush client_caps messages. make sure the mds can handle this.

View File

@ -1,6 +1,6 @@
#!/bin/sh -x
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
mkdir .snap/foo

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
git clone git://git.ceph.com/ceph.git
cd ceph

View File

@ -1,6 +1,6 @@
#!/bin/sh -ex
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
# this tests fix for #1399
mkdir foo

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
echo asdf > a
mkdir .snap/1

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
echo "making directory tree and files"
mkdir -p 1/a/b/c/

View File

@ -5,7 +5,7 @@ expect_failure() {
}
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
mkdir -p d1/d2
mkdir -p d1/d3

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
file=linux-2.6.33.tar.bz2
wget -q http://download.ceph.com/qa/$file

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
#
# verify that a snap update on a parent realm will induce

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
echo "testing simple xattr wb"
touch x

View File

@ -2,7 +2,7 @@
set -e
ceph mds set allow_new_snaps true --yes-i-really-mean-it
ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it
do_tarball() {
wget http://download.ceph.com/qa/$1

View File

@ -16,7 +16,7 @@ for repl in 2 3 7 8 9; do
ceph osd pool set $name size $repl
id=`ceph osd dump | sed -n "s/^pool \([0-9]*\) '$name'.*/\1/p"`
ceph mds add_data_pool $id
ceph fs add_data_pool cephfs $id
done
# create a file in each of the pools