mirror of
https://github.com/ceph/ceph
synced 2025-03-25 11:48:05 +00:00
Merge remote-tracking branch 'gh/next'
This commit is contained in:
commit
c0685ec709
@ -8,7 +8,7 @@ AC_PREREQ(2.59)
|
||||
# VERSION define is not used by the code. It gets a version string
|
||||
# from 'git describe'; see src/ceph_ver.[ch]
|
||||
|
||||
AC_INIT([ceph], [0.88], [ceph-devel@vger.kernel.org])
|
||||
AC_INIT([ceph], [0.89], [ceph-devel@vger.kernel.org])
|
||||
|
||||
# Create release string. Used with VERSION for RPMs.
|
||||
RPM_RELEASE=0
|
||||
|
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
ceph (0.89-1) stable; urgency=low
|
||||
|
||||
* New upstream release
|
||||
|
||||
-- Alfredo Deza <adeza@redhat.com> Wed, 03 Dec 2014 08:18:33 -0800
|
||||
|
||||
ceph (0.88-1) stable; urgency=low
|
||||
|
||||
* New upstream release
|
||||
|
@ -765,6 +765,21 @@ function test_mon_mds()
|
||||
fail_all_mds
|
||||
ceph fs rm cephfs --yes-i-really-mean-it
|
||||
|
||||
# Create a FS and check that we can subsequently add a cache tier to it
|
||||
ceph fs new cephfs fs_metadata fs_data
|
||||
|
||||
# Adding overlay to FS pool should be permitted, RADOS clients handle this.
|
||||
ceph osd tier add fs_metadata mds-tier
|
||||
ceph osd tier cache-mode mds-tier writeback
|
||||
ceph osd tier set-overlay fs_metadata mds-tier
|
||||
|
||||
# Clean up FS
|
||||
fail_all_mds
|
||||
ceph fs rm cephfs --yes-i-really-mean-it
|
||||
|
||||
# Clean up overlay/tier relationship
|
||||
ceph osd tier remove-overlay fs_metadata
|
||||
ceph osd tier remove fs_metadata mds-tier
|
||||
|
||||
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
|
||||
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
|
||||
@ -1299,39 +1314,31 @@ function test_osd_bench()
|
||||
#
|
||||
|
||||
set +x
|
||||
MON_TESTS=(
|
||||
mon_injectargs
|
||||
mon_injectargs_SI
|
||||
tiering
|
||||
auth
|
||||
auth_profiles
|
||||
mon_misc
|
||||
mon_mon
|
||||
mon_osd
|
||||
mon_osd_pool
|
||||
mon_osd_pool_quota
|
||||
mon_pg
|
||||
mon_osd_pool_set
|
||||
mon_osd_tiered_pool_set
|
||||
mon_osd_erasure_code
|
||||
mon_osd_misc
|
||||
mon_heap_profiler
|
||||
)
|
||||
MON_TESTS+=" mon_injectargs"
|
||||
MON_TESTS+=" mon_injectargs_SI"
|
||||
MON_TESTS+=" tiering"
|
||||
MON_TESTS+=" auth"
|
||||
MON_TESTS+=" auth_profiles"
|
||||
MON_TESTS+=" mon_misc"
|
||||
MON_TESTS+=" mon_mon"
|
||||
MON_TESTS+=" mon_osd"
|
||||
MON_TESTS+=" mon_osd_pool"
|
||||
MON_TESTS+=" mon_osd_pool_quota"
|
||||
MON_TESTS+=" mon_pg"
|
||||
MON_TESTS+=" mon_osd_pool_set"
|
||||
MON_TESTS+=" mon_osd_tiered_pool_set"
|
||||
MON_TESTS+=" mon_osd_erasure_code"
|
||||
MON_TESTS+=" mon_osd_misc"
|
||||
MON_TESTS+=" mon_heap_profiler"
|
||||
|
||||
OSD_TESTS=(
|
||||
osd_bench
|
||||
)
|
||||
OSD_TESTS+=" osd_bench"
|
||||
|
||||
MDS_TESTS=(
|
||||
mds_tell
|
||||
mon_mds
|
||||
)
|
||||
MDS_TESTS+=" mds_tell"
|
||||
MDS_TESTS+=" mon_mds"
|
||||
|
||||
TESTS=(
|
||||
$MON_TESTS
|
||||
$OSD_TESTS
|
||||
$MDS_TESTS
|
||||
)
|
||||
TESTS+=$MON_TESTS
|
||||
TESTS+=$OSD_TESTS
|
||||
TESTS+=$MDS_TESTS
|
||||
|
||||
#
|
||||
# "main" follows
|
||||
@ -1340,7 +1347,7 @@ TESTS=(
|
||||
function list_tests()
|
||||
{
|
||||
echo "AVAILABLE TESTS"
|
||||
for i in ${TESTS[@]}; do
|
||||
for i in $TESTS; do
|
||||
echo " $i"
|
||||
done
|
||||
}
|
||||
@ -1368,13 +1375,13 @@ while [[ $# -gt 0 ]]; do
|
||||
sanity_check=false
|
||||
;;
|
||||
"--test-mon" )
|
||||
tests_to_run=("${tests_to_run[@]}" $MON_TESTS)
|
||||
tests_to_run+="$MON_TESTS"
|
||||
;;
|
||||
"--test-osd" )
|
||||
tests_to_run=("${tests_to_run[@]}" $OSD_TESTS)
|
||||
tests_to_run+="$OSD_TESTS"
|
||||
;;
|
||||
"--test-mds" )
|
||||
tests_to_run=("${tests_to_run[@]}" $MDS_TESTS)
|
||||
tests_to_run+="$MDS_TESTS"
|
||||
;;
|
||||
"-t" )
|
||||
shift
|
||||
@ -1383,7 +1390,7 @@ while [[ $# -gt 0 ]]; do
|
||||
usage ;
|
||||
exit 1
|
||||
fi
|
||||
tests_to_run=("${tests_to_run[@]}" "$1")
|
||||
tests_to_run+=" $1"
|
||||
;;
|
||||
"-h" )
|
||||
usage ;
|
||||
@ -1398,14 +1405,14 @@ if [[ $do_list -eq 1 ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ ${#tests_to_run[@]} -eq 0 ]]; then
|
||||
tests_to_run=("${TESTS[@]}")
|
||||
if test -z "$tests_to_run" ; then
|
||||
tests_to_run="$TESTS"
|
||||
fi
|
||||
|
||||
if $sanity_check ; then
|
||||
wait_no_osd_down
|
||||
fi
|
||||
for i in ${tests_to_run[@]}; do
|
||||
for i in $tests_to_run; do
|
||||
if $sanity_check ; then
|
||||
check_no_osd_down
|
||||
fi
|
||||
|
@ -216,7 +216,7 @@ Client::Client(Messenger *m, MonClient *mc)
|
||||
cct->_conf->client_oc_max_dirty_age,
|
||||
true);
|
||||
objecter_finisher.start();
|
||||
filer = new Filer(objecter);
|
||||
filer = new Filer(objecter, &objecter_finisher);
|
||||
}
|
||||
|
||||
|
||||
@ -543,7 +543,7 @@ void Client::trim_cache_for_reconnect(MetaSession *s)
|
||||
<< " trimmed " << trimmed << " dentries" << dendl;
|
||||
|
||||
if (s->caps.size() > 0)
|
||||
_invalidate_kernel_dcache();
|
||||
_invalidate_kernel_dcache(s);
|
||||
}
|
||||
|
||||
void Client::trim_dentry(Dentry *dn)
|
||||
@ -3277,16 +3277,19 @@ void Client::remove_session_caps(MetaSession *s)
|
||||
sync_cond.Signal();
|
||||
}
|
||||
|
||||
void Client::_invalidate_kernel_dcache()
|
||||
void Client::_invalidate_kernel_dcache(MetaSession *s)
|
||||
{
|
||||
// notify kernel to invalidate top level directory entries. As a side effect,
|
||||
// unused inodes underneath these entries get pruned.
|
||||
if (dentry_invalidate_cb && root->dir) {
|
||||
for (ceph::unordered_map<string, Dentry*>::iterator p = root->dir->dentries.begin();
|
||||
p != root->dir->dentries.end();
|
||||
++p) {
|
||||
if (p->second->inode)
|
||||
_schedule_invalidate_dentry_callback(p->second, false);
|
||||
if (!dentry_invalidate_cb)
|
||||
return;
|
||||
|
||||
for (xlist<Cap*>::iterator p = s->caps.begin(); !p.end(); ++p) {
|
||||
Inode *in = (*p)->inode;
|
||||
if (in->dn_set.empty())
|
||||
continue;
|
||||
for (set<Dentry*>::iterator q = in->dn_set.begin();
|
||||
q != in->dn_set.end();
|
||||
++q) {
|
||||
_schedule_invalidate_dentry_callback(*q, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3320,14 +3323,8 @@ void Client::trim_caps(MetaSession *s, int max)
|
||||
while (q != in->dn_set.end()) {
|
||||
Dentry *dn = *q++;
|
||||
if (dn->lru_is_expireable()) {
|
||||
if (dn->dir->parent_inode->ino == MDS_INO_ROOT) {
|
||||
// Only issue one of these per DN for inodes in root: handle
|
||||
// others more efficiently by calling for root-child DNs at
|
||||
// the end of this function.
|
||||
_schedule_invalidate_dentry_callback(dn, true);
|
||||
}
|
||||
_schedule_invalidate_dentry_callback(dn, false);
|
||||
trim_dentry(dn);
|
||||
|
||||
} else {
|
||||
ldout(cct, 20) << " not expirable: " << dn->name << dendl;
|
||||
all = false;
|
||||
@ -3348,9 +3345,6 @@ void Client::trim_caps(MetaSession *s, int max)
|
||||
}
|
||||
}
|
||||
s->s_cap_iterator = NULL;
|
||||
|
||||
if (s->caps.size() > max)
|
||||
_invalidate_kernel_dcache();
|
||||
}
|
||||
|
||||
void Client::mark_caps_dirty(Inode *in, int caps)
|
||||
|
@ -429,7 +429,7 @@ protected:
|
||||
void trim_cache_for_reconnect(MetaSession *s);
|
||||
void trim_dentry(Dentry *dn);
|
||||
void trim_caps(MetaSession *s, int max);
|
||||
void _invalidate_kernel_dcache();
|
||||
void _invalidate_kernel_dcache(MetaSession *s);
|
||||
|
||||
void dump_inode(Formatter *f, Inode *in, set<Inode*>& did, bool disconnected);
|
||||
void dump_cache(Formatter *f); // debug
|
||||
|
@ -129,7 +129,7 @@ MDS::MDS(const std::string &n, Messenger *m, MonClient *mc) :
|
||||
objecter = new Objecter(m->cct, messenger, monc, 0, 0);
|
||||
objecter->unset_honor_osdmap_full();
|
||||
|
||||
filer = new Filer(objecter);
|
||||
filer = new Filer(objecter, &finisher);
|
||||
|
||||
mdcache = new MDCache(this);
|
||||
mdlog = new MDLog(this);
|
||||
|
@ -577,8 +577,8 @@ void MDSMonitor::get_health(list<pair<health_status_t, string> >& summary,
|
||||
mdsmap.get_health(summary, detail);
|
||||
|
||||
// For each MDS GID...
|
||||
for (std::map<mds_gid_t, MDSMap::mds_info_t>::const_iterator i = pending_mdsmap.mds_info.begin();
|
||||
i != pending_mdsmap.mds_info.end(); ++i) {
|
||||
for (std::map<mds_gid_t, MDSMap::mds_info_t>::const_iterator i = mdsmap.mds_info.begin();
|
||||
i != mdsmap.mds_info.end(); ++i) {
|
||||
// Decode MDSHealth
|
||||
bufferlist bl;
|
||||
mon->store->get(MDS_HEALTH_PREFIX, stringify(i->first), bl);
|
||||
|
@ -5685,10 +5685,6 @@ done:
|
||||
goto reply;
|
||||
}
|
||||
|
||||
if (!_check_remove_tier(pool_id, p, &err, &ss)) {
|
||||
goto reply;
|
||||
}
|
||||
|
||||
// go
|
||||
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
|
||||
np->read_tier = overlaypool_id;
|
||||
|
@ -566,7 +566,6 @@ public:
|
||||
*/
|
||||
bool is_readable(version_t ver = 0) {
|
||||
if (ver > get_last_committed() ||
|
||||
is_proposing() ||
|
||||
!paxos->is_readable(0) ||
|
||||
get_last_committed() == 0)
|
||||
return false;
|
||||
|
@ -25,6 +25,7 @@
|
||||
|
||||
#include "include/Context.h"
|
||||
|
||||
#include "common/Finisher.h"
|
||||
#include "common/config.h"
|
||||
|
||||
#define dout_subsys ceph_subsys_filer
|
||||
@ -134,8 +135,9 @@ void Filer::_probe(Probe *probe)
|
||||
for (std::vector<ObjectExtent>::iterator i = stat_extents.begin();
|
||||
i != stat_extents.end(); ++i) {
|
||||
C_Probe *c = new C_Probe(this, probe, i->oid);
|
||||
objecter->stat(i->oid, i->oloc, probe->snapid, &c->size, &c->mtime,
|
||||
probe->flags | CEPH_OSD_FLAG_RWORDERED, c);
|
||||
objecter->stat(i->oid, i->oloc, probe->snapid, &c->size, &c->mtime,
|
||||
probe->flags | CEPH_OSD_FLAG_RWORDERED,
|
||||
new C_OnFinisher(c, finisher));
|
||||
}
|
||||
}
|
||||
|
||||
@ -339,8 +341,8 @@ void Filer::_do_purge_range(PurgeRange *pr, int fin)
|
||||
const OSDMap *osdmap = objecter->get_osdmap_read();
|
||||
const object_locator_t oloc = osdmap->file_to_object_locator(pr->layout);
|
||||
objecter->put_osdmap_read();
|
||||
objecter->remove(oid, oloc, pr->snapc, pr->mtime, pr->flags,
|
||||
NULL, new C_PurgeRange(this, pr));
|
||||
objecter->remove(oid, oloc, pr->snapc, pr->mtime, pr->flags, NULL,
|
||||
new C_OnFinisher(new C_PurgeRange(this, pr), finisher));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
class Context;
|
||||
class Messenger;
|
||||
class OSDMap;
|
||||
|
||||
class Finisher;
|
||||
|
||||
|
||||
/**** Filer interface ***/
|
||||
@ -43,6 +43,7 @@ class OSDMap;
|
||||
class Filer {
|
||||
CephContext *cct;
|
||||
Objecter *objecter;
|
||||
Finisher *finisher;
|
||||
|
||||
// probes
|
||||
struct Probe {
|
||||
@ -88,7 +89,7 @@ class Filer {
|
||||
Filer(const Filer& other);
|
||||
const Filer operator=(const Filer& other);
|
||||
|
||||
Filer(Objecter *o) : cct(o->cct), objecter(o) {}
|
||||
Filer(Objecter *o, Finisher *f) : cct(o->cct), objecter(o), finisher(f) {}
|
||||
~Filer() {}
|
||||
|
||||
bool is_active() {
|
||||
|
@ -377,7 +377,7 @@ public:
|
||||
ino(ino_), pg_pool(pool), readonly(true),
|
||||
stream_format(-1), journal_stream(-1),
|
||||
magic(mag),
|
||||
objecter(obj), filer(objecter), logger(l), logger_key_lat(lkey),
|
||||
objecter(obj), filer(objecter, f), logger(l), logger_key_lat(lkey),
|
||||
timer(tim), delay_flush_event(0),
|
||||
state(STATE_UNDEF), error(0),
|
||||
prezeroing_pos(0), prezero_pos(0), write_pos(0), flush_pos(0), safe_pos(0),
|
||||
|
@ -29,7 +29,7 @@ function vstart_setup()
|
||||
export LC_ALL=C # some tests are vulnerable to i18n
|
||||
./vstart.sh \
|
||||
-o 'paxos propose interval = 0.01' \
|
||||
-n -l $VSTART_ARGS || return 1
|
||||
-n -l $CEPH_START || return 1
|
||||
export PATH=.:$PATH
|
||||
export CEPH_CONF=$CEPH_DIR/ceph.conf
|
||||
|
||||
|
@ -85,7 +85,7 @@ int Dumper::dump(const char *dump_file)
|
||||
|
||||
cout << "journal is " << start << "~" << len << std::endl;
|
||||
|
||||
Filer filer(objecter);
|
||||
Filer filer(objecter, &finisher);
|
||||
bufferlist bl;
|
||||
|
||||
C_SaferCond cond;
|
||||
@ -238,7 +238,7 @@ int Dumper::undump(const char *dump_file)
|
||||
return r;
|
||||
}
|
||||
|
||||
Filer filer(objecter);
|
||||
Filer filer(objecter, &finisher);
|
||||
|
||||
/* Erase any objects at the end of the region to which we shall write
|
||||
* the new log data. This is to avoid leaving trailing junk after
|
||||
|
Loading…
Reference in New Issue
Block a user