mirror of
https://github.com/ceph/ceph
synced 2025-01-02 17:12:31 +00:00
rgw_file: fix some zipper flow for RGWLibContinuedReq
Some bits of the standard Zipper conversions were missed for the RGWLibContinuedReq case, where the setup is encapsulated in the request, but execution is broken up in to steps. This currently affects only RGWWriteRequest. Fixes: https://tracker.ceph.com/issues/48136 Signed-off-by: Matt Benjamin <mbenjamin@redhat.com>
This commit is contained in:
parent
a86df246c6
commit
a559cd4cc0
@ -371,9 +371,21 @@ namespace rgw {
|
||||
}
|
||||
|
||||
struct req_state* s = req->get_state();
|
||||
RGWLibIO& io_ctx = req->get_io();
|
||||
RGWEnv& rgw_env = io_ctx.get_env();
|
||||
RGWObjectCtx& rados_ctx = req->get_octx();
|
||||
|
||||
rgw_env.set("HTTP_HOST", "");
|
||||
|
||||
int ret = req->init(rgw_env, &rados_ctx, &io_ctx, s);
|
||||
if (ret < 0) {
|
||||
dout(10) << "failed to initialize request" << dendl;
|
||||
abort_req(s, op, ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* req is-a RGWOp, currently initialized separately */
|
||||
int ret = req->op_init();
|
||||
ret = req->op_init();
|
||||
if (ret < 0) {
|
||||
dout(10) << "failed to initialize RGWOp" << dendl;
|
||||
abort_req(s, op, ret);
|
||||
@ -460,6 +472,8 @@ namespace rgw {
|
||||
<< " op status=" << op_ret
|
||||
<< " ======" << dendl;
|
||||
|
||||
perfcounter->inc(l_rgw_req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1536,6 +1536,9 @@ namespace rgw {
|
||||
int RGWWriteRequest::exec_start() {
|
||||
struct req_state* state = get_state();
|
||||
|
||||
/* Object needs a bucket from this point */
|
||||
state->object->set_bucket(state->bucket.get());
|
||||
|
||||
auto compression_type =
|
||||
get_store()->svc()->zone->get_zone_params().get_compression_type(
|
||||
state->bucket->get_placement_rule());
|
||||
|
@ -2389,8 +2389,10 @@ public:
|
||||
size_t bytes_written;
|
||||
bool eio;
|
||||
|
||||
RGWWriteRequest(rgw::sal::RGWRadosStore* store, std::unique_ptr<rgw::sal::RGWUser> _user,
|
||||
RGWFileHandle* _fh, const std::string& _bname, const std::string& _oname)
|
||||
RGWWriteRequest(rgw::sal::RGWRadosStore* store,
|
||||
std::unique_ptr<rgw::sal::RGWUser> _user,
|
||||
RGWFileHandle* _fh, const std::string& _bname,
|
||||
const std::string& _oname)
|
||||
: RGWLibContinuedReq(store->ctx(), std::move(_user)),
|
||||
bucket_name(_bname), obj_name(_oname),
|
||||
rgw_fh(_fh), filter(nullptr), timer_id(0), real_ofs(0),
|
||||
|
@ -192,7 +192,8 @@ namespace rgw {
|
||||
RGWObjectCtx rados_ctx;
|
||||
public:
|
||||
|
||||
RGWLibContinuedReq(CephContext* _cct, std::unique_ptr<rgw::sal::RGWUser> _user)
|
||||
RGWLibContinuedReq(CephContext* _cct,
|
||||
std::unique_ptr<rgw::sal::RGWUser> _user)
|
||||
: RGWLibRequest(_cct, std::move(_user)), io_ctx(),
|
||||
rstate(_cct, &io_ctx.get_env(), id),
|
||||
rados_ctx(rgwlib.get_store(), &rstate)
|
||||
@ -204,6 +205,7 @@ namespace rgw {
|
||||
|
||||
sysobj_ctx.emplace(store->svc()->sysobj);
|
||||
|
||||
get_state()->cio = &io_ctx;
|
||||
get_state()->obj_ctx = &rados_ctx;
|
||||
get_state()->sysobj_ctx = &(sysobj_ctx.get());
|
||||
get_state()->req_id = store->svc()->zone_utils->unique_id(id);
|
||||
@ -214,6 +216,8 @@ namespace rgw {
|
||||
}
|
||||
|
||||
inline rgw::sal::RGWRadosStore* get_store() { return store; }
|
||||
inline RGWLibIO& get_io() { return io_ctx; }
|
||||
inline RGWObjectCtx& get_octx() { return rados_ctx; }
|
||||
|
||||
virtual int execute() final { ceph_abort(); }
|
||||
virtual int exec_start() = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user