mirror of
https://github.com/ceph/ceph
synced 2024-12-18 01:16:55 +00:00
Merge remote-tracking branch 'gh/hammer'
This commit is contained in:
commit
a08d3be053
@ -648,13 +648,18 @@ if(${WITH_RBD})
|
||||
common/ContextCompletion.cc
|
||||
librbd/AioCompletion.cc
|
||||
librbd/AioRequest.cc
|
||||
librbd/AsyncFlattenRequest.cc
|
||||
librbd/AsyncObjectThrottle.cc
|
||||
librbd/AsyncRequest.cc
|
||||
librbd/AsyncResizeRequest.cc
|
||||
librbd/AsyncTrimRequest.cc
|
||||
librbd/CopyupRequest.cc
|
||||
librbd/ImageCtx.cc
|
||||
librbd/ImageWatcher.cc
|
||||
librbd/internal.cc
|
||||
librbd/librbd.cc
|
||||
librbd/LibrbdWriteback.cc)
|
||||
librbd/LibrbdWriteback.cc
|
||||
librbd/ObjectMap.cc)
|
||||
add_library(librbd ${CEPH_SHARED} ${librbd_srcs}
|
||||
$<TARGET_OBJECTS:osdc_rbd_objs>
|
||||
$<TARGET_OBJECTS:common_util_obj>)
|
||||
|
@ -5471,6 +5471,10 @@ int Client::fsetattr(int fd, struct stat *attr, int mask)
|
||||
Fh *f = get_filehandle(fd);
|
||||
if (!f)
|
||||
return -EBADF;
|
||||
#if defined(__linux__) && defined(O_PATH)
|
||||
if (f->flags & O_PATH)
|
||||
return -EBADF;
|
||||
#endif
|
||||
return _setattr(f->inode, attr, mask);
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,8 @@
|
||||
#define CEPH_FEATURE_MDS_QUOTA (1ULL<<47)
|
||||
#define CEPH_FEATURE_CRUSH_V4 (1ULL<<48) /* straw2 buckets */
|
||||
#define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
|
||||
// duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
|
||||
#define CEPH_FEATURE_OSD_DEGRADED_WRITES (1ULL<<49)
|
||||
|
||||
#define CEPH_FEATURE_RESERVED2 (1ULL<<61) /* slow down, we are almost out... */
|
||||
#define CEPH_FEATURE_RESERVED (1ULL<<62) /* DO NOT USE THIS ... last bit! */
|
||||
@ -146,6 +148,7 @@ static inline unsigned long long ceph_sanitize_features(unsigned long long f) {
|
||||
CEPH_FEATURE_MDS_QUOTA | \
|
||||
CEPH_FEATURE_CRUSH_V4 | \
|
||||
CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY | \
|
||||
CEPH_FEATURE_OSD_DEGRADED_WRITES | \
|
||||
0ULL)
|
||||
|
||||
#define CEPH_FEATURES_SUPPORTED_DEFAULT CEPH_FEATURES_ALL
|
||||
|
@ -158,9 +158,13 @@ public:
|
||||
inline void release_xio_rsp(XioRsp* xrsp) {
|
||||
struct xio_msg *msg = xrsp->dequeue();
|
||||
struct xio_msg *next_msg = NULL;
|
||||
int code;
|
||||
while (msg) {
|
||||
next_msg = static_cast<struct xio_msg *>(msg->user_context);
|
||||
int code = xio_release_msg(msg);
|
||||
if (unlikely(!xrsp->xcon->conn || !xrsp->xcon->is_connected()))
|
||||
code = ENOTCONN;
|
||||
else
|
||||
code = xio_release_msg(msg);
|
||||
if (unlikely(code)) {
|
||||
/* very unlikely, so log it */
|
||||
xrsp->xcon->msg_release_fail(msg, code);
|
||||
@ -344,30 +348,30 @@ private:
|
||||
|
||||
public:
|
||||
XioPortals(Messenger *msgr, int _n) : p_vec(NULL)
|
||||
{
|
||||
/* portal0 */
|
||||
portals.push_back(new XioPortal(msgr));
|
||||
last_use = 0;
|
||||
{
|
||||
/* portal0 */
|
||||
portals.push_back(new XioPortal(msgr));
|
||||
last_use = 0;
|
||||
|
||||
/* enforce at least two portals if bind */
|
||||
if (_n < 2)
|
||||
_n = 2;
|
||||
n = _n;
|
||||
/* enforce at least two portals if bind */
|
||||
if (_n < 2)
|
||||
_n = 2;
|
||||
n = _n;
|
||||
|
||||
/* additional portals allocated on bind() */
|
||||
}
|
||||
/* additional portals allocated on bind() */
|
||||
}
|
||||
|
||||
vector<XioPortal*>& get() { return portals; }
|
||||
|
||||
const char **get_vec()
|
||||
{
|
||||
return (const char **) p_vec;
|
||||
}
|
||||
{
|
||||
return (const char **) p_vec;
|
||||
}
|
||||
|
||||
int get_portals_len()
|
||||
{
|
||||
return n;
|
||||
}
|
||||
{
|
||||
return n;
|
||||
}
|
||||
|
||||
int get_last_use()
|
||||
{
|
||||
@ -378,78 +382,76 @@ public:
|
||||
}
|
||||
|
||||
XioPortal* get_portal0()
|
||||
{
|
||||
return portals[0];
|
||||
}
|
||||
{
|
||||
return portals[0];
|
||||
}
|
||||
|
||||
int bind(struct xio_session_ops *ops, const string& base_uri,
|
||||
uint16_t port, uint16_t *port0);
|
||||
|
||||
int accept(struct xio_session *session,
|
||||
struct xio_new_session_req *req,
|
||||
void *cb_user_context)
|
||||
{
|
||||
const char **portals_vec = get_vec();
|
||||
int pix = get_last_use();
|
||||
int accept(struct xio_session *session,
|
||||
struct xio_new_session_req *req,
|
||||
void *cb_user_context)
|
||||
{
|
||||
const char **portals_vec = get_vec();
|
||||
int pix = get_last_use();
|
||||
|
||||
return xio_accept(session,
|
||||
(const char **)&(portals_vec[pix]),
|
||||
1, NULL, 0);
|
||||
}
|
||||
return xio_accept(session,
|
||||
(const char **)&(portals_vec[pix]),
|
||||
1, NULL, 0);
|
||||
}
|
||||
|
||||
void start()
|
||||
{
|
||||
XioPortal *portal;
|
||||
int p_ix, nportals = portals.size();
|
||||
{
|
||||
XioPortal *portal;
|
||||
int p_ix, nportals = portals.size();
|
||||
|
||||
/* portal_0 is the new-session handler, portal_1+ terminate
|
||||
* active sessions */
|
||||
/* portal_0 is the new-session handler, portal_1+ terminate
|
||||
* active sessions */
|
||||
|
||||
p_vec = new char*[(nportals-1)];
|
||||
for (p_ix = 1; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
/* shift left */
|
||||
p_vec[(p_ix-1)] = (char*) /* portal->xio_uri.c_str() */
|
||||
portal->portal_id;
|
||||
p_vec = new char*[(nportals-1)];
|
||||
for (p_ix = 1; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
/* shift left */
|
||||
p_vec[(p_ix-1)] = (char*) /* portal->xio_uri.c_str() */
|
||||
portal->portal_id;
|
||||
}
|
||||
|
||||
for (p_ix = 0; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
portal->create();
|
||||
}
|
||||
for (p_ix = 0; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
portal->create();
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown()
|
||||
{
|
||||
XioPortal *portal;
|
||||
int nportals = portals.size();
|
||||
for (int p_ix = 0; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
portal->shutdown();
|
||||
}
|
||||
{
|
||||
XioPortal *portal;
|
||||
int nportals = portals.size();
|
||||
for (int p_ix = 0; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
portal->shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
void join()
|
||||
{
|
||||
XioPortal *portal;
|
||||
int nportals = portals.size();
|
||||
for (int p_ix = 0; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
portal->join();
|
||||
}
|
||||
{
|
||||
XioPortal *portal;
|
||||
int nportals = portals.size();
|
||||
for (int p_ix = 0; p_ix < nportals; ++p_ix) {
|
||||
portal = portals[p_ix];
|
||||
portal->join();
|
||||
}
|
||||
}
|
||||
|
||||
~XioPortals()
|
||||
{
|
||||
int nportals = portals.size();
|
||||
for (int ix = 0; ix < nportals; ++ix) {
|
||||
delete(portals[ix]);
|
||||
}
|
||||
portals.clear();
|
||||
if (p_vec) {
|
||||
delete[] p_vec;
|
||||
}
|
||||
}
|
||||
{
|
||||
int nportals = portals.size();
|
||||
for (int ix = 0; ix < nportals; ++ix)
|
||||
delete(portals[ix]);
|
||||
portals.clear();
|
||||
if (p_vec)
|
||||
delete[] p_vec;
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* XIO_PORTAL_H */
|
||||
|
@ -1450,8 +1450,13 @@ void ReplicatedPG::do_op(OpRequestRef& op)
|
||||
* We can enable degraded writes on ec pools by blocking such a write
|
||||
* to a peer until all previous writes have completed. For now, we
|
||||
* will simply block them.
|
||||
*
|
||||
* We also block if our peers do not support DEGRADED_WRITES.
|
||||
*/
|
||||
if (pool.info.ec_pool() && write_ordered && is_degraded_object(head)) {
|
||||
if ((pool.info.ec_pool() ||
|
||||
!(get_min_peer_features() & CEPH_FEATURE_OSD_DEGRADED_WRITES)) &&
|
||||
write_ordered &&
|
||||
is_degraded_object(head)) {
|
||||
wait_for_degraded_object(head, op);
|
||||
return;
|
||||
}
|
||||
|
@ -7412,7 +7412,7 @@ int RGWRados::cls_obj_complete_cancel(BucketShard& bs, string& tag, rgw_obj& obj
|
||||
{
|
||||
RGWObjEnt ent;
|
||||
obj.get_index_key(&ent.key);
|
||||
return cls_obj_complete_op(bs, CLS_RGW_OP_ADD, tag, -1 /* pool id */, 0, ent, RGW_OBJ_CATEGORY_NONE, NULL, bilog_flags);
|
||||
return cls_obj_complete_op(bs, CLS_RGW_OP_CANCEL, tag, -1 /* pool id */, 0, ent, RGW_OBJ_CATEGORY_NONE, NULL, bilog_flags);
|
||||
}
|
||||
|
||||
int RGWRados::cls_obj_set_bucket_tag_timeout(rgw_bucket& bucket, uint64_t timeout)
|
||||
|
Loading…
Reference in New Issue
Block a user