rgw: svc: more zone work, add zone_utils, quota services

Signed-off-by: Yehuda Sadeh <yehuda@redhat.com>
This commit is contained in:
Yehuda Sadeh 2018-08-15 18:38:23 -07:00
parent ca8a636d08
commit f46a94f8fc
35 changed files with 451 additions and 328 deletions

View File

@ -39,8 +39,10 @@ function(gperf_generate input output)
endfunction()
set(librgw_common_srcs
services/svc_quota.cc
services/svc_rados.cc
services/svc_zone.cc
services/svc_zone_utils.cc
rgw_service.cc
rgw_acl.cc
rgw_acl_s3.cc

View File

@ -13,6 +13,7 @@
#include "common/errno.h"
#include "common/ceph_json.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h"
@ -22,6 +23,8 @@
#include "rgw_string.h"
#include "rgw_multi.h"
#include "services/svc_zone.h"
#include "include/rados/librados.hpp"
// until everything is moved from rgw_common
#include "rgw_common.h"
@ -106,7 +109,7 @@ int rgw_read_user_buckets(RGWRados * store,
buckets.clear();
std::string buckets_obj_id;
rgw_get_buckets_obj(user_id, buckets_obj_id);
rgw_raw_obj obj(store->get_zone_params().user_uid_pool, buckets_obj_id);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_uid_pool, buckets_obj_id);
bool truncated = false;
string m = marker;
@ -154,7 +157,7 @@ int rgw_bucket_sync_user_stats(RGWRados *store, const rgw_user& user_id, const R
{
string buckets_obj_id;
rgw_get_buckets_obj(user_id, buckets_obj_id);
rgw_raw_obj obj(store->get_zone_params().user_uid_pool, buckets_obj_id);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_uid_pool, buckets_obj_id);
return store->cls_user_sync_bucket_stats(obj, bucket_info);
}
@ -214,7 +217,7 @@ int rgw_link_bucket(RGWRados* const store,
string buckets_obj_id;
rgw_get_buckets_obj(user_id, buckets_obj_id);
rgw_raw_obj obj(store->get_zone_params().user_uid_pool, buckets_obj_id);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_uid_pool, buckets_obj_id);
ret = store->cls_user_add_bucket(obj, new_bucket);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: error adding bucket to directory: "
@ -251,7 +254,7 @@ int rgw_unlink_bucket(RGWRados *store, const rgw_user& user_id, const string& te
cls_user_bucket bucket;
bucket.name = bucket_name;
rgw_raw_obj obj(store->get_zone_params().user_uid_pool, buckets_obj_id);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_uid_pool, buckets_obj_id);
ret = store->cls_user_remove_bucket(obj, bucket);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: error removing bucket from directory: "
@ -818,7 +821,7 @@ int RGWBucket::link(RGWBucketAdminOpState& op_state, std::string *err_msg)
std::string display_name = op_state.get_user_display_name();
rgw_bucket bucket = op_state.get_bucket();
const rgw_pool& root_pool = store->get_zone_params().domain_root;
const rgw_pool& root_pool = store->svc.zone->get_zone_params().domain_root;
std::string bucket_entry;
rgw_make_bucket_entry_name(tenant, bucket_name, bucket_entry);
rgw_raw_obj obj(root_pool, bucket_entry);
@ -1703,7 +1706,7 @@ int RGWDataChangesLog::choose_oid(const rgw_bucket_shard& bs) {
int RGWDataChangesLog::renew_entries()
{
if (!store->need_to_log_data())
if (!store->svc.zone->need_to_log_data())
return 0;
/* we can't keep the bucket name as part of the cls_log_entry, and we need
@ -1797,7 +1800,7 @@ int RGWDataChangesLog::get_log_shard_id(rgw_bucket& bucket, int shard_id) {
}
int RGWDataChangesLog::add_entry(rgw_bucket& bucket, int shard_id) {
if (!store->need_to_log_data())
if (!store->svc.zone->need_to_log_data())
return 0;
if (observer) {
@ -2007,6 +2010,14 @@ int RGWDataChangesLog::trim_entries(const real_time& start_time, const real_time
return 0;
}
int RGWDataChangesLog::lock_exclusive(int shard_id, timespan duration, string& zone_id, string& owner_id) {
return store->lock_exclusive(store->svc.zone->get_zone_params().log_pool, oids[shard_id], duration, zone_id, owner_id);
}
int RGWDataChangesLog::unlock(int shard_id, string& zone_id, string& owner_id) {
return store->unlock(store->svc.zone->get_zone_params().log_pool, oids[shard_id], zone_id, owner_id);
}
bool RGWDataChangesLog::going_down()
{
return down_flag;
@ -2187,7 +2198,7 @@ public:
void get_pool_and_oid(RGWRados *store, const string& key, rgw_pool& pool, string& oid) override {
oid = key;
pool = store->get_zone_params().domain_root;
pool = store->svc.zone->get_zone_params().domain_root;
}
int list_keys_init(RGWRados *store, const string& marker, void **phandle) override {
@ -2195,7 +2206,7 @@ public:
info->store = store;
int ret = store->list_raw_objects_init(store->get_zone_params().domain_root, marker,
int ret = store->list_raw_objects_init(store->svc.zone->get_zone_params().domain_root, marker,
&info->ctx);
if (ret < 0) {
return ret;
@ -2303,7 +2314,7 @@ public:
bci.info.bucket.name = bucket_name;
bci.info.bucket.bucket_id = bucket_instance;
bci.info.bucket.tenant = tenant_name;
ret = store->select_bucket_location_by_rule(bci.info.placement_rule, &rule_info);
ret = store->svc.zone->select_bucket_location_by_rule(bci.info.placement_rule, &rule_info);
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: select_bucket_placement() returned " << ret << dendl;
return ret;
@ -2386,7 +2397,7 @@ public:
void get_pool_and_oid(RGWRados *store, const string& key, rgw_pool& pool, string& oid) override {
oid = RGW_BUCKET_INSTANCE_MD_PREFIX + key;
rgw_bucket_instance_key_to_oid(oid);
pool = store->get_zone_params().domain_root;
pool = store->svc.zone->get_zone_params().domain_root;
}
int list_keys_init(RGWRados *store, const string& marker, void **phandle) override {
@ -2394,7 +2405,7 @@ public:
info->store = store;
int ret = store->list_raw_objects_init(store->get_zone_params().domain_root, marker,
int ret = store->list_raw_objects_init(store->svc.zone->get_zone_params().domain_root, marker,
&info->ctx);
if (ret < 0) {
return ret;

View File

@ -508,12 +508,8 @@ public:
int trim_entries(const real_time& start_time, const real_time& end_time,
const string& start_marker, const string& end_marker);
int get_info(int shard_id, RGWDataChangesLogInfo *info);
int lock_exclusive(int shard_id, timespan duration, string& zone_id, string& owner_id) {
return store->lock_exclusive(store->get_zone_params().log_pool, oids[shard_id], duration, zone_id, owner_id);
}
int unlock(int shard_id, string& zone_id, string& owner_id) {
return store->unlock(store->get_zone_params().log_pool, oids[shard_id], zone_id, owner_id);
}
int lock_exclusive(int shard_id, timespan duration, string& zone_id, string& owner_id);
int unlock(int shard_id, string& zone_id, string& owner_id);
struct LogMarker {
int shard;
string marker;

View File

@ -1,7 +1,11 @@
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_coroutine.h"
#include "rgw_cr_rados.h"
#include "services/svc_zone.h"
#include "services/svc_zone_utils.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/rgw/cls_rgw_client.h"

View File

@ -12,6 +12,7 @@
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_sync.h"
#include "rgw_data_sync.h"
#include "rgw_rest_conn.h"
@ -25,6 +26,8 @@
#include "cls/lock/cls_lock_client.h"
#include "services/svc_zone.h"
#include "include/random.h"
#include <boost/asio/yield.hpp>
@ -83,7 +86,7 @@ bool RGWReadDataSyncStatusMarkersCR::spawn_next()
}
using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
spawn(new CR(env->async_rados, env->store,
rgw_raw_obj(env->store->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id)),
rgw_raw_obj(env->store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id)),
&markers[shard_id]),
false);
shard_id++;
@ -119,7 +122,7 @@ bool RGWReadDataSyncRecoveringShardsCR::spawn_next()
string error_oid = RGWDataSyncStatusManager::shard_obj_name(env->source_zone, shard_id) + ".retry";
auto& shard_keys = omapkeys[shard_id];
shard_keys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->store->get_zone_params().log_pool, error_oid),
spawn(new RGWRadosGetOmapKeysCR(env->store, rgw_raw_obj(env->store->svc.zone->get_zone_params().log_pool, error_oid),
marker, max_entries, shard_keys), false);
++shard_id;
@ -146,7 +149,7 @@ int RGWReadDataSyncStatusCoroutine::operate()
yield {
bool empty_on_enoent = false; // fail on ENOENT
call(new ReadInfoCR(sync_env->async_rados, sync_env->store,
rgw_raw_obj(sync_env->store->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
@ -454,7 +457,7 @@ public:
RGWSyncTraceNodeRef& _tn_parent,
rgw_data_sync_status *status)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), store(sync_env->store),
pool(store->get_zone_params().log_pool),
pool(store->svc.zone->get_zone_params().log_pool),
num_shards(num_shards), status(status),
tn(sync_env->sync_tracer->add_node(_tn_parent, "init_data_sync_status")) {
lock_name = "sync_lock";
@ -504,7 +507,7 @@ public:
/* fetch current position in logs */
yield {
RGWRESTConn *conn = store->get_zone_conn_by_id(sync_env->source_zone);
RGWRESTConn *conn = store->svc.zone->get_zone_conn_by_id(sync_env->source_zone);
if (!conn) {
tn->log(0, SSTR("ERROR: connection to zone " << sync_env->source_zone << " does not exist!"));
return set_cr_error(-EIO);
@ -758,7 +761,7 @@ public:
return set_cr_error(retcode);
}
entries_index = new RGWShardedOmapCRManager(sync_env->async_rados, store, this, num_shards,
store->get_zone_params().log_pool,
store->svc.zone->get_zone_params().log_pool,
oid_prefix);
yield; // yield so OmapAppendCRs can start
for (iter = result.begin(); iter != result.end(); ++iter) {
@ -796,7 +799,7 @@ public:
rgw_data_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
marker), true);
}
} else {
@ -862,7 +865,7 @@ public:
RGWRados *store = sync_env->store;
return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, marker_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
@ -1197,7 +1200,7 @@ public:
}
RGWRados *store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, status_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
@ -1278,7 +1281,7 @@ public:
sync_marker.next_step_marker.clear();
RGWRados *store = sync_env->store;
call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, status_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
sync_marker));
}
if (retcode < 0) {
@ -1484,7 +1487,7 @@ public:
RGWCoroutine *alloc_finisher_cr() override {
RGWRados *store = sync_env->store;
return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sync_env->source_zone, shard_id)),
&sync_marker);
}
@ -1595,7 +1598,7 @@ public:
tn->log(10, SSTR("spawning " << num_shards << " shards sync"));
for (map<uint32_t, rgw_data_sync_marker>::iterator iter = sync_status.sync_markers.begin();
iter != sync_status.sync_markers.end(); ++iter) {
RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sync_env, sync_env->store->get_zone_params().log_pool,
RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sync_env, sync_env->store->svc.zone->get_zone_params().log_pool,
iter->first, iter->second, tn);
cr->get();
shard_crs_lock.Lock();
@ -1614,7 +1617,7 @@ public:
RGWCoroutine *set_sync_info_cr() {
RGWRados *store = sync_env->store;
return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sync_env->source_zone)),
sync_status.sync_info);
}
@ -1749,25 +1752,24 @@ int RGWRemoteDataLog::run_sync(int num_shards)
int RGWDataSyncStatusManager::init()
{
auto zone_def_iter = store->zone_by_id.find(source_zone);
if (zone_def_iter == store->zone_by_id.end()) {
RGWZone *zone_def;
if (!store->svc.zone->find_zone_by_id(source_zone, &zone_def)) {
ldpp_dout(this, 0) << "ERROR: failed to find zone config info for zone=" << source_zone << dendl;
return -EIO;
}
auto& zone_def = zone_def_iter->second;
if (!store->get_sync_modules_manager()->supports_data_export(zone_def.tier_type)) {
if (!store->get_sync_modules_manager()->supports_data_export(zone_def->tier_type)) {
return -ENOTSUP;
}
RGWZoneParams& zone_params = store->get_zone_params();
RGWZoneParams& zone_params = store->svc.zone->get_zone_params();
if (sync_module == nullptr) {
sync_module = store->get_sync_module();
}
conn = store->get_zone_conn_by_id(source_zone);
conn = store->svc.zone->get_zone_conn_by_id(source_zone);
if (!conn) {
ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
return -EINVAL;
@ -1910,7 +1912,7 @@ public:
}
yield {
auto store = sync_env->store;
rgw_raw_obj obj(store->get_zone_params().log_pool, sync_status_oid);
rgw_raw_obj obj(store->svc.zone->get_zone_params().log_pool, sync_status_oid);
if (info.syncstopped) {
call(new RGWRadosRemoveCR(store, obj));
@ -2008,7 +2010,7 @@ int RGWReadBucketSyncStatusCoroutine::operate()
{
reenter(this) {
yield call(new RGWSimpleRadosReadAttrsCR(sync_env->async_rados, sync_env->store,
rgw_raw_obj(sync_env->store->get_zone_params().log_pool, oid),
rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, oid),
&attrs));
if (retcode == -ENOENT) {
*status = rgw_bucket_shard_sync_info();
@ -2061,7 +2063,7 @@ int RGWReadRecoveringBucketShardsCoroutine::operate()
count = 0;
do {
omapkeys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(store->get_zone_params().log_pool, error_oid),
yield call(new RGWRadosGetOmapKeysCR(store, rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, error_oid),
marker, max_omap_entries, omapkeys));
if (retcode == -ENOENT) {
@ -2128,7 +2130,7 @@ int RGWReadPendingBucketShardsCoroutine::operate()
//read sync status marker
using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>;
yield call(new CR(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, status_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
sync_marker));
if (retcode < 0) {
ldout(sync_env->cct,0) << "failed to read sync status marker with "
@ -2379,7 +2381,7 @@ public:
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, marker_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
attrs);
}
@ -2442,7 +2444,7 @@ public:
tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker));
return new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados,
store,
rgw_raw_obj(store->get_zone_params().log_pool, marker_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
attrs);
}
@ -2545,7 +2547,7 @@ public:
data_sync_module = sync_env->sync_module->get_data_handler();
zones_trace = _zones_trace;
zones_trace.insert(sync_env->store->get_zone().id);
zones_trace.insert(sync_env->store->svc.zone->get_zone().id);
}
int operate() override {
@ -2754,7 +2756,7 @@ int RGWBucketShardFullSyncCR::operate()
sync_info.encode_state_attr(attrs);
RGWRados *store = sync_env->store;
call(new RGWSimpleRadosWriteAttrsCR(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, status_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
attrs));
}
} else {
@ -2810,7 +2812,7 @@ public:
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bs(bs),
bucket_info(_bucket_info), lease_cr(lease_cr), sync_info(sync_info),
marker_tracker(sync_env, status_oid, sync_info.inc_marker),
status_oid(status_oid), zone_id(_sync_env->store->get_zone().id),
status_oid(status_oid), zone_id(_sync_env->store->svc.zone->get_zone().id),
tn(sync_env->sync_tracer->add_node(_tn_parent, "inc_sync",
SSTR(bucket_shard_str{bs})))
{
@ -3057,7 +3059,7 @@ int RGWRunBucketSyncCoroutine::operate()
set_status("acquiring sync lock");
auto store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, status_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, status_oid),
"sync_lock",
cct->_conf->rgw_sync_lease_period,
this));
@ -3091,7 +3093,7 @@ int RGWRunBucketSyncCoroutine::operate()
tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata"));
string raw_key = string("bucket.instance:") + bs.bucket.get_key();
meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->rest_master_conn, sync_env->async_rados,
meta_sync_env.init(sync_env->dpp, cct, sync_env->store, sync_env->store->svc.zone->get_master_conn(), sync_env->async_rados,
sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer);
call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key,
@ -3174,7 +3176,7 @@ RGWCoroutine *RGWRemoteBucketLog::run_sync_cr()
int RGWBucketSyncStatusManager::init()
{
conn = store->get_zone_conn_by_id(source_zone);
conn = store->svc.zone->get_zone_conn_by_id(source_zone);
if (!conn) {
ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl;
return -EINVAL;
@ -3422,8 +3424,8 @@ class DataLogTrimCR : public RGWCoroutine {
int num_shards, std::vector<std::string>& last_trim)
: RGWCoroutine(store->ctx()), store(store), http(http),
num_shards(num_shards),
zone_id(store->get_zone().id),
peer_status(store->zone_conn_map.size()),
zone_id(store->svc.zone->get_zone().id),
peer_status(store->svc.zone->get_zone_conn_map().size()),
min_shard_markers(num_shards),
last_trim(last_trim)
{}
@ -3446,7 +3448,7 @@ int DataLogTrimCR::operate()
};
auto p = peer_status.begin();
for (auto& c : store->zone_conn_map) {
for (auto& c : store->svc.zone->get_zone_conn_map()) {
ldout(cct, 20) << "query sync status from " << c.first << dendl;
using StatusCR = RGWReadRESTResourceCR<rgw_data_sync_status>;
spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
@ -3528,7 +3530,7 @@ int DataLogTrimPollCR::operate()
// prevent other gateways from attempting to trim for the duration
set_status("acquiring trim lock");
yield call(new RGWSimpleRadosLockCR(store->get_async_rados(), store,
rgw_raw_obj(store->get_zone_params().log_pool, lock_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, lock_oid),
"data_trim", lock_cookie,
interval.sec()));
if (retcode < 0) {

View File

@ -228,6 +228,7 @@ struct rgw_bucket_entry_owner {
};
class RGWSyncErrorLogger;
class RGWRESTConn;
struct RGWDataSyncEnv {
const DoutPrefixProvider *dpp{nullptr};

View File

@ -3,6 +3,7 @@
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_log.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h"

View File

@ -3,6 +3,7 @@
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_log.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h"

View File

@ -9,11 +9,15 @@
#include "cls/version/cls_version_types.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_tools.h"
#include "rgw_cr_rados.h"
#include "services/svc_zone.h"
#include "include/ceph_assert.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
@ -94,7 +98,7 @@ void RGWMetadataLogData::decode_json(JSONObj *obj) {
int RGWMetadataLog::add_entry(RGWMetadataHandler *handler, const string& section, const string& key, bufferlist& bl) {
if (!store->need_to_log_metadata())
if (!store->svc.zone->need_to_log_metadata())
return 0;
string oid;
@ -236,14 +240,14 @@ int RGWMetadataLog::lock_exclusive(int shard_id, timespan duration, string& zone
string oid;
get_shard_oid(shard_id, oid);
return store->lock_exclusive(store->get_zone_params().log_pool, oid, duration, zone_id, owner_id);
return store->lock_exclusive(store->svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id);
}
int RGWMetadataLog::unlock(int shard_id, string& zone_id, string& owner_id) {
string oid;
get_shard_oid(shard_id, oid);
return store->unlock(store->get_zone_params().log_pool, oid, zone_id, owner_id);
return store->unlock(store->svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id);
}
void RGWMetadataLog::mark_modified(int shard_id)
@ -357,7 +361,7 @@ int read_history(RGWRados *store, RGWMetadataLogHistory *state,
RGWObjVersionTracker *objv_tracker)
{
RGWObjectCtx ctx{store};
auto& pool = store->get_zone_params().log_pool;
auto& pool = store->svc.zone->get_zone_params().log_pool;
const auto& oid = RGWMetadataLogHistory::oid;
bufferlist bl;
int ret = rgw_get_system_obj(store, ctx, pool, oid, bl, objv_tracker, nullptr);
@ -391,7 +395,7 @@ int write_history(RGWRados *store, const RGWMetadataLogHistory& state,
bufferlist bl;
state.encode(bl);
auto& pool = store->get_zone_params().log_pool;
auto& pool = store->svc.zone->get_zone_params().log_pool;
const auto& oid = RGWMetadataLogHistory::oid;
return rgw_put_system_obj(store, pool, oid, bl,
exclusive, objv_tracker, real_time{});
@ -415,7 +419,7 @@ class ReadHistoryCR : public RGWCoroutine {
int operate() {
reenter(this) {
yield {
rgw_raw_obj obj{store->get_zone_params().log_pool,
rgw_raw_obj obj{store->svc.zone->get_zone_params().log_pool,
RGWMetadataLogHistory::oid};
constexpr bool empty_on_enoent = false;
@ -460,7 +464,7 @@ class WriteHistoryCR : public RGWCoroutine {
state.oldest_realm_epoch = cursor.get_epoch();
yield {
rgw_raw_obj obj{store->get_zone_params().log_pool,
rgw_raw_obj obj{store->svc.zone->get_zone_params().log_pool,
RGWMetadataLogHistory::oid};
using WriteCR = RGWSimpleRadosWriteCR<RGWMetadataLogHistory>;
@ -1038,7 +1042,7 @@ int RGWMetadataManager::store_in_heap(RGWMetadataHandler *handler, const string&
return -EINVAL;
}
rgw_pool heap_pool(store->get_zone_params().metadata_heap);
rgw_pool heap_pool(store->svc.zone->get_zone_params().metadata_heap);
if (heap_pool.empty()) {
return 0;
@ -1063,7 +1067,7 @@ int RGWMetadataManager::remove_from_heap(RGWMetadataHandler *handler, const stri
return -EINVAL;
}
rgw_pool heap_pool(store->get_zone_params().metadata_heap);
rgw_pool heap_pool(store->svc.zone->get_zone_params().metadata_heap);
if (heap_pool.empty()) {
return 0;

View File

@ -24,6 +24,7 @@
#include "common/static_ptr.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_op.h"
#include "rgw_rest.h"
#include "rgw_acl.h"
@ -45,6 +46,10 @@
#include "rgw_tag_s3.h"
#include "rgw_putobj_processor.h"
#include "rgw_putobj_throttle.h"
#include "services/svc_zone.h"
#include "services/svc_quota.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/rgw/cls_rgw_client.h"
@ -454,7 +459,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
}
if (ret == 0) {
string& zonegroup = source_info.zonegroup;
s->local_source = store->get_zonegroup().equals(zonegroup);
s->local_source = store->svc.zone->get_zonegroup().equals(zonegroup);
}
}
@ -504,7 +509,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
s->bucket_owner = s->bucket_acl->get_owner();
RGWZoneGroup zonegroup;
int r = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
int r = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
if (!r) {
if (!zonegroup.endpoints.empty()) {
s->zonegroup_endpoint = zonegroup.endpoints.front();
@ -521,14 +526,14 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
ret = r;
}
if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
if (s->bucket_exists && !store->svc.zone->get_zonegroup().equals(s->bucket_info.zonegroup)) {
ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
<< s->bucket_info.zonegroup << " != "
<< store->get_zonegroup().get_id() << ")" << dendl;
<< store->svc.zone->get_zonegroup().get_id() << ")" << dendl;
/* we now need to make sure that the operation actually requires copy source, that is
* it's a copy operation
*/
if (store->get_zonegroup().is_master_zonegroup() && s->system_request) {
if (store->svc.zone->get_zonegroup().is_master_zonegroup() && s->system_request) {
/*If this is the master, don't redirect*/
} else if (s->op_type == RGW_OP_GET_BUCKET_LOCATION ) {
/* If op is get bucket location, don't redirect */
@ -592,7 +597,7 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s)
ret = -EACCES;
}
bool success = store->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
bool success = store->svc.zone->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
if (success) {
ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
}
@ -830,7 +835,7 @@ int RGWOp::verify_op_mask()
return -EPERM;
}
if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->svc.zone->zone_is_writeable()) {
ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
"non-system user, permission denied" << dendl;
return -EPERM;
@ -1027,13 +1032,13 @@ int RGWOp::init_quota()
} else if (uinfo->bucket_quota.enabled) {
bucket_quota = uinfo->bucket_quota;
} else {
bucket_quota = store->get_bucket_quota();
bucket_quota = store->svc.quota->get_bucket_quota();
}
if (uinfo->user_quota.enabled) {
user_quota = uinfo->user_quota;
} else {
user_quota = store->get_user_quota();
user_quota = store->svc.quota->get_user_quota();
}
return 0;
@ -2046,7 +2051,7 @@ void RGWListBuckets::execute()
/* We need to have stats for all our policies - even if a given policy
* isn't actually used in a given account. In such situation its usage
* stats would be simply full of zeros. */
for (const auto& policy : store->get_zonegroup().placement_targets) {
for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
policies_stats.emplace(policy.second.name,
decltype(policies_stats)::mapped_type());
}
@ -2188,7 +2193,7 @@ void RGWStatAccount::execute()
/* We need to have stats for all our policies - even if a given policy
* isn't actually used in a given account. In such situation its usage
* stats would be simply full of zeros. */
for (const auto& policy : store->get_zonegroup().placement_targets) {
for (const auto& policy : store->svc.zone->get_zonegroup().placement_targets) {
policies_stats.emplace(policy.second.name,
decltype(policies_stats)::mapped_type());
}
@ -2258,7 +2263,7 @@ void RGWSetBucketVersioning::execute()
return;
}
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@ -2336,7 +2341,7 @@ void RGWSetBucketWebsite::execute()
if (op_ret < 0)
return;
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
@ -2575,7 +2580,7 @@ static int forward_request_to_master(struct req_state *s, obj_version *objv,
RGWRados *store, bufferlist& in_data,
JSONParser *jp, req_info *forward_info)
{
if (!store->rest_master_conn) {
if (!store->svc.zone->get_master_conn()) {
ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
return -EINVAL;
}
@ -2583,8 +2588,8 @@ static int forward_request_to_master(struct req_state *s, obj_version *objv,
bufferlist response;
string uid_str = s->user->user_id.to_str();
#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
int ret = store->rest_master_conn->forward(uid_str, (forward_info ? *forward_info : s->info),
objv, MAX_REST_RESPONSE, &in_data, &response);
int ret = store->svc.zone->get_master_conn()->forward(uid_str, (forward_info ? *forward_info : s->info),
objv, MAX_REST_RESPONSE, &in_data, &response);
if (ret < 0)
return ret;
@ -2776,7 +2781,7 @@ void RGWCreateBucket::execute()
bool existed;
string bucket_name;
rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_name);
rgw_raw_obj obj(store->get_zone_params().domain_root, bucket_name);
rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root, bucket_name);
obj_version objv, *pobjv = NULL;
op_ret = get_params();
@ -2785,7 +2790,7 @@ void RGWCreateBucket::execute()
if (!relaxed_region_enforcement &&
!location_constraint.empty() &&
!store->has_zonegroup_api(location_constraint)) {
!store->svc.zone->has_zonegroup_api(location_constraint)) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
<< " can't be found." << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
@ -2793,22 +2798,22 @@ void RGWCreateBucket::execute()
return;
}
if (!relaxed_region_enforcement && !store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
store->get_zonegroup().api_name != location_constraint) {
if (!relaxed_region_enforcement && !store->svc.zone->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
store->svc.zone->get_zonegroup().api_name != location_constraint) {
ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
<< " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
<< " doesn't match zonegroup" << " (" << store->svc.zone->get_zonegroup().api_name << ")"
<< dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified location-constraint is not valid";
return;
}
const auto& zonegroup = store->get_zonegroup();
const auto& zonegroup = store->svc.zone->get_zonegroup();
if (!placement_rule.empty() &&
!zonegroup.placement_targets.count(placement_rule)) {
ldpp_dout(this, 0) << "placement target (" << placement_rule << ")"
<< " doesn't exist in the placement targets of zonegroup"
<< " (" << store->get_zonegroup().api_name << ")" << dendl;
<< " (" << store->svc.zone->get_zonegroup().api_name << ")" << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified placement target does not exist";
return;
@ -2841,7 +2846,7 @@ void RGWCreateBucket::execute()
uint32_t *pmaster_num_shards;
real_time creation_time;
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
JSONParser jp;
op_ret = forward_request_to_master(s, NULL, store, in_data, &jp);
if (op_ret < 0) {
@ -2867,10 +2872,10 @@ void RGWCreateBucket::execute()
if (s->system_request) {
zonegroup_id = s->info.args.get(RGW_SYS_PARAM_PREFIX "zonegroup");
if (zonegroup_id.empty()) {
zonegroup_id = store->get_zonegroup().get_id();
zonegroup_id = store->svc.zone->get_zonegroup().get_id();
}
} else {
zonegroup_id = store->get_zonegroup().get_id();
zonegroup_id = store->svc.zone->get_zonegroup().get_id();
}
if (s->bucket_exists) {
@ -2878,7 +2883,7 @@ void RGWCreateBucket::execute()
rgw_bucket bucket;
bucket.tenant = s->bucket_tenant;
bucket.name = s->bucket_name;
op_ret = store->select_bucket_placement(*(s->user), zonegroup_id,
op_ret = store->svc.zone->select_bucket_placement(*(s->user), zonegroup_id,
placement_rule,
&selected_placement_rule, nullptr);
if (selected_placement_rule != s->bucket_info.placement_rule) {
@ -3088,7 +3093,7 @@ void RGWDeleteBucket::execute()
return;
}
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
bufferlist in_data;
op_ret = forward_request_to_master(s, &ot.read_version, store, in_data,
NULL);
@ -3520,7 +3525,7 @@ void RGWPutObj::execute()
// no filters by default
DataProcessor *filter = processor.get();
const auto& compression_type = store->get_zone_params().get_compression_type(
const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
s->bucket_info.placement_rule);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
@ -3824,7 +3829,7 @@ void RGWPostObj::execute()
if (encrypt != nullptr) {
filter = encrypt.get();
} else {
const auto& compression_type = store->get_zone_params().get_compression_type(
const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
s->bucket_info.placement_rule);
if (compression_type != "none") {
plugin = Compressor::create(s->cct, compression_type);
@ -4883,7 +4888,7 @@ void RGWPutACLs::execute()
}
// forward bucket acl requests to meta master zone
if (s->object.empty() && !store->is_meta_master()) {
if (s->object.empty() && !store->svc.zone->is_meta_master()) {
bufferlist in_data;
// include acl data unless it was generated from a canned_acl
if (s->canned_acl.empty()) {
@ -5132,7 +5137,7 @@ void RGWPutCORS::execute()
if (op_ret < 0)
return;
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@ -6108,7 +6113,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path)
goto delop_fail;
}
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
bufferlist in_data;
ret = forward_request_to_master(s, &ot.read_version, store, in_data,
nullptr);
@ -6336,7 +6341,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
rgw_obj_key object_junk;
std::tie(bucket_name, object_junk) = *parse_path(path);
rgw_raw_obj obj(store->get_zone_params().domain_root,
rgw_raw_obj obj(store->svc.zone->get_zone_params().domain_root,
rgw_make_bucket_entry_name(s->bucket_tenant, bucket_name));
/* we need to make sure we read bucket info, it's not read before for this
@ -6368,7 +6373,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
real_time creation_time;
obj_version objv, ep_objv, *pobjv = nullptr;
if (! store->is_meta_master()) {
if (! store->svc.zone->is_meta_master()) {
JSONParser jp;
ceph::bufferlist in_data;
req_info info = s->info;
@ -6401,8 +6406,8 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
rgw_bucket bucket;
bucket.tenant = s->bucket_tenant;
bucket.name = s->bucket_name;
op_ret = store->select_bucket_placement(*(s->user),
store->get_zonegroup().get_id(),
op_ret = store->svc.zone->select_bucket_placement(*(s->user),
store->svc.zone->get_zonegroup().get_id(),
placement_rule,
&selected_placement_rule,
nullptr);
@ -6432,7 +6437,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
RGWBucketInfo out_info;
op_ret = store->create_bucket(*(s->user),
bucket,
store->get_zonegroup().get_id(),
store->svc.zone->get_zonegroup().get_id(),
placement_rule, binfo.swift_ver_location,
pquota_info, attrs,
out_info, pobjv, &ep_objv, creation_time,
@ -6584,7 +6589,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path,
/* No filters by default. */
DataProcessor *filter = &processor;
const auto& compression_type = store->get_zone_params().get_compression_type(
const auto& compression_type = store->svc.zone->get_zone_params().get_compression_type(
binfo.placement_rule);
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
@ -7046,7 +7051,7 @@ void RGWPutBucketPolicy::execute()
return;
}
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, data, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;

View File

@ -11,12 +11,15 @@
#include "common/Formatter.h"
#include "common/ceph_json.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "include/types.h"
#include "rgw_common.h"
#include "rgw_tools.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
@ -85,7 +88,7 @@ public:
void get_pool_and_oid(RGWRados *store, const string& key, rgw_pool& pool, string& oid) override {
oid = key;
pool = store->get_zone_params().otp_pool;
pool = store->svc.zone->get_zone_params().otp_pool;
}
struct list_keys_info {
@ -99,7 +102,7 @@ public:
info->store = store;
int ret = store->list_raw_objects_init(store->get_zone_params().otp_pool, marker,
int ret = store->list_raw_objects_init(store->svc.zone->get_zone_params().otp_pool, marker,
&info->ctx);
if (ret < 0) {
return ret;

View File

@ -3,6 +3,7 @@
#include "rgw_period_history.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "include/ceph_assert.h"

View File

@ -2,10 +2,13 @@
// vim: ts=8 sw=2 smarttab
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_rest_conn.h"
#include "common/ceph_json.h"
#include "common/errno.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
@ -60,7 +63,7 @@ int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period)
period.set_epoch(0);
int r = period.init(store->ctx(), store);
if (r < 0) {
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
// can't pull if we're the master
ldout(store->ctx(), 1) << "metadata master failed to read period "
<< period_id << " from local storage: " << cpp_strerror(r) << dendl;
@ -69,8 +72,8 @@ int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period)
ldout(store->ctx(), 14) << "pulling period " << period_id
<< " from master" << dendl;
// request the period from the master zone
r = pull_period(store->rest_master_conn, period_id,
store->realm.get_id(), period);
r = pull_period(store->svc.zone->get_master_conn(), period_id,
store->svc.zone->get_realm().get_id(), period);
if (r < 0) {
lderr(store->ctx()) << "failed to pull period " << period_id << dendl;
return r;
@ -95,7 +98,7 @@ int RGWPeriodPuller::pull(const std::string& period_id, RGWPeriod& period)
return r;
}
// reflect period objects if this is the latest version
if (store->realm.get_current_period() == period_id) {
if (store->svc.zone->get_realm().get_current_period() == period_id) {
r = period.reflect();
if (r < 0) {
return r;

View File

@ -21,6 +21,7 @@
#include "common/Finisher.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_cache.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h" /* for dumping s3policy in debug log */
@ -527,9 +528,9 @@ int RGWObjManifest::append(RGWObjManifest& m, RGWZoneGroup& zonegroup, RGWZonePa
return 0;
}
int RGWObjManifest::append(RGWObjManifest& m, RGWRados *store)
int RGWObjManifest::append(RGWObjManifest& m, RGWSI_Zone *zone_svc)
{
return append(m, store->get_zonegroup(), store->get_zone_params());
return append(m, zone_svc->get_zonegroup(), zone_svc->get_zone_params());
}
void RGWObjManifest::append_rules(RGWObjManifest& m, map<uint64_t, RGWObjManifestRule>::iterator& miter,
@ -2291,8 +2292,6 @@ int RGWRados::init_complete()
writeable_zone = (zone_public_config.tier_type.empty() || zone_public_config.tier_type == "rgw");
init_unique_trans_id_deps();
finisher = new Finisher(cct);
finisher->start();
@ -2400,7 +2399,7 @@ int RGWRados::init_complete()
return ret;
}
if (zone_svc->is_meta_master()) {
if (svc.zone->is_meta_master()) {
auto md_log = meta_mgr->get_log(current_period.get_id());
meta_notifier = new RGWMetaNotifier(this, md_log);
meta_notifier->start();
@ -2528,12 +2527,24 @@ int RGWRados::initialize()
svc_registry->register_all();
JSONFormattable zone_svc_conf;
ret = svc_registry->get_instance("zone", zone_svc_conf, &zone_svc);
ret = svc_registry->get_instance("zone", zone_svc_conf, &svc.zone);
if (ret < 0) {
return ret;
}
host_id = zone_svc->gen_host_id();
JSONFormattable zone_utils_svc_conf;
ret = svc_registry->get_instance("zone_utils", zone_utils_svc_conf, &svc.zone_utils);
if (ret < 0) {
return ret;
}
JSONFormattable quota_svc_conf;
ret = svc_registry->get_instance("quota", quota_svc_conf, &svc.quota);
if (ret < 0) {
return ret;
}
host_id = svc.zone_utils->gen_host_id();
ret = init_rados();
if (ret < 0)

View File

@ -27,7 +27,6 @@
#include "rgw_sync_module.h"
#include "rgw_sync_log_trim.h"
#include "services/svc_zone.h"
#include "services/svc_rados.h"
class RGWWatcher;
@ -42,13 +41,14 @@ class RGWMetaSyncProcessorThread;
class RGWDataSyncProcessorThread;
class RGWSyncLogTrimThread;
class RGWSyncTraceManager;
class RGWRESTConn;
struct RGWZoneGroup;
struct RGWZoneParams;
class RGWReshard;
class RGWReshardWait;
struct RGWZone;
struct RGWPeriod;
class RGWSI_Zone;
class RGWSI_ZoneUtils;
class RGWSI_Quota;
/* flags for put_obj_meta() */
#define PUT_OBJ_CREATE 0x01
@ -583,7 +583,7 @@ public:
static void generate_test_instances(list<RGWObjManifest*>& o);
int append(RGWObjManifest& m, RGWZoneGroup& zonegroup, RGWZoneParams& zone_params);
int append(RGWObjManifest& m, RGWRados *store);
int append(RGWObjManifest& m, RGWSI_Zone *zone_svc);
bool get_rule(uint64_t ofs, RGWObjManifestRule *rule);
@ -1304,8 +1304,6 @@ protected:
bool pools_initialized;
string trans_id_suffix;
RGWQuotaHandler *quota_handler;
Finisher *finisher;
@ -1317,8 +1315,6 @@ protected:
bool writeable_zone{false};
RGWServiceRegistryRef svc_registry;
std::shared_ptr<RGWSI_Zone> zone_svc;
RGWIndexCompletionManager *index_completion_manager{nullptr};
public:
RGWRados() : lock("rados_timer_lock"), watchers_lock("watchers_lock"), timer(NULL),
@ -1338,7 +1334,6 @@ public:
quota_handler(NULL),
finisher(NULL),
cr_registry(NULL),
rest_master_conn(NULL),
meta_mgr(NULL), data_log(NULL), reshard(NULL) {}
uint64_t get_new_req_id() {
@ -1352,56 +1347,19 @@ public:
cct = _cct;
}
struct {
std::shared_ptr<RGWSI_RADOS> rados;
std::shared_ptr<RGWSI_Zone> zone;
std::shared_ptr<RGWSI_ZoneUtils> zone_utils;
std::shared_ptr<RGWSI_Quota> quota;
} svc;
/**
* AmazonS3 errors contain a HostId string, but is an opaque base64 blob; we
* try to be more transparent. This has a wrapper so we can update it when zonegroup/zone are changed.
*/
string host_id;
RGWRESTConn *rest_master_conn;
map<string, RGWRESTConn *> zone_conn_map;
map<string, RGWRESTConn *> zone_data_sync_from_map;
map<string, RGWRESTConn *> zone_data_notify_to_map;
map<string, RGWRESTConn *> zonegroup_conn_map;
map<string, string> zone_id_by_name;
map<string, RGWZone> zone_by_id;
RGWRESTConn *get_zone_conn_by_id(const string& id) {
auto citer = zone_conn_map.find(id);
if (citer == zone_conn_map.end()) {
return NULL;
}
return citer->second;
}
RGWRESTConn *get_zone_conn_by_name(const string& name) {
auto i = zone_id_by_name.find(name);
if (i == zone_id_by_name.end()) {
return NULL;
}
return get_zone_conn_by_id(i->second);
}
bool find_zone_id_by_name(const string& name, string *id) {
auto i = zone_id_by_name.find(name);
if (i == zone_id_by_name.end()) {
return false;
}
*id = i->second;
return true;
}
const RGWQuotaInfo& get_bucket_quota() {
return zone_svc->get_current_period().get_config().bucket_quota;
}
const RGWQuotaInfo& get_user_quota() {
return zone_svc->current_period.get_config().user_quota;
}
// pulls missing periods for period_history
std::unique_ptr<RGWPeriodPuller> period_puller;
// maintains a connected history of periods
@ -2554,39 +2512,6 @@ public:
uint64_t instance_id();
string unique_id(uint64_t unique_num) {
return zone_svc->unique_id(unique_num);
}
void init_unique_trans_id_deps() {
char buf[16 + 2 + 1]; /* uint64_t needs 16, 2 hyphens add further 2 */
snprintf(buf, sizeof(buf), "-%llx-", (unsigned long long)instance_id());
url_encode(string(buf) + get_zone_params().get_name(), trans_id_suffix);
}
/* In order to preserve compatibility with Swift API, transaction ID
* should contain at least 32 characters satisfying following spec:
* - first 21 chars must be in range [0-9a-f]. Swift uses this
* space for storing fragment of UUID obtained through a call to
* uuid4() function of Python's uuid module;
* - char no. 22 must be a hyphen;
* - at least 10 next characters constitute hex-formatted timestamp
* padded with zeroes if necessary. All bytes must be in [0-9a-f]
* range;
* - last, optional part of transaction ID is any url-encoded string
* without restriction on length. */
string unique_trans_id(const uint64_t unique_num) {
char buf[41]; /* 2 + 21 + 1 + 16 (timestamp can consume up to 16) + 1 */
time_t timestamp = time(NULL);
snprintf(buf, sizeof(buf), "tx%021llx-%010llx",
(unsigned long long)unique_num,
(unsigned long long)timestamp);
return string(buf) + trans_id_suffix;
}
librados::Rados* get_rados_handle();
int delete_raw_obj_aio(const rgw_raw_obj& obj, list<librados::AioCompletion *>& handles);

View File

@ -4,6 +4,7 @@
#include <limits>
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_bucket.h"
#include "rgw_reshard.h"
#include "cls/rgw/cls_rgw_client.h"
@ -13,6 +14,8 @@
#include "common/dout.h"
#include "services/svc_zone.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
@ -733,7 +736,7 @@ void RGWReshard::get_bucket_logshard_oid(const string& tenant, const string& buc
int RGWReshard::add(cls_rgw_reshard_entry& entry)
{
if (!store->can_reshard()) {
if (!store->svc.zone->can_reshard()) {
ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}
@ -792,7 +795,7 @@ int RGWReshard::list(int logshard_num, string& marker, uint32_t max, std::list<c
}
lderr(store->ctx()) << "ERROR: failed to list reshard log entries, oid=" << logshard_oid << dendl;
if (ret == -EACCES) {
lderr(store->ctx()) << "access denied to pool " << store->get_zone_params().reshard_pool
lderr(store->ctx()) << "access denied to pool " << store->svc.zone->get_zone_params().reshard_pool
<< ". Fix the pool access permissions of your client" << dendl;
}
}
@ -1035,7 +1038,7 @@ void RGWReshard::get_logshard_oid(int shard_num, string *logshard)
int RGWReshard::process_all_logshards()
{
if (!store->can_reshard()) {
if (!store->svc.zone->can_reshard()) {
ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}

View File

@ -12,6 +12,7 @@
#include "include/str_list.h"
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_formats.h"
#include "rgw_op.h"
#include "rgw_rest.h"

View File

@ -15,6 +15,8 @@
#ifndef RGW_REST_CONFIG_H
#define RGW_REST_CONFIG_H
#include "rgw_zone.h"
class RGWOp_ZoneGroupMap_Get : public RGWRESTOp {
RGWZoneGroupMap zonegroup_map;
bool old_format;

View File

@ -2,11 +2,14 @@
// vim: ts=8 sw=2 smarttab
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_rest_conn.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
RGWRESTConn::RGWRESTConn(CephContext *_cct, RGWRados *store,
RGWRESTConn::RGWRESTConn(CephContext *_cct, RGWSI_Zone *zone_svc,
const string& _remote_id,
const list<string>& remote_endpoints,
HostStyle _host_style)
@ -14,13 +17,13 @@ RGWRESTConn::RGWRESTConn(CephContext *_cct, RGWRados *store,
endpoints(remote_endpoints.begin(), remote_endpoints.end()),
remote_id(_remote_id), host_style(_host_style)
{
if (store) {
key = store->get_zone_params().system_key;
self_zone_group = store->get_zonegroup().get_id();
if (zone_svc) {
key = zone_svc->get_zone_params().system_key;
self_zone_group = zone_svc->get_zonegroup().get_id();
}
}
RGWRESTConn::RGWRESTConn(CephContext *_cct, RGWRados *store,
RGWRESTConn::RGWRESTConn(CephContext *_cct, RGWSI_Zone *zone_svc,
const string& _remote_id,
const list<string>& remote_endpoints,
RGWAccessKey _cred,
@ -30,8 +33,8 @@ RGWRESTConn::RGWRESTConn(CephContext *_cct, RGWRados *store,
key(std::move(_cred)),
remote_id(_remote_id), host_style(_host_style)
{
if (store) {
self_zone_group = store->get_zonegroup().get_id();
if (zone_svc) {
self_zone_group = zone_svc->get_zonegroup().get_id();
}
}

View File

@ -12,7 +12,7 @@
#include <atomic>
class CephContext;
class RGWRados;
class RGWSI_Zone;
template <class T>
static int parse_decode_json(CephContext *cct, T& t, bufferlist& bl)
@ -79,8 +79,8 @@ class RGWRESTConn
public:
RGWRESTConn(CephContext *_cct, RGWRados *store, const string& _remote_id, const list<string>& endpoints, HostStyle _host_style = PathStyle);
RGWRESTConn(CephContext *_cct, RGWRados *store, const string& _remote_id, const list<string>& endpoints, RGWAccessKey _cred, HostStyle _host_style = PathStyle);
RGWRESTConn(CephContext *_cct, RGWSI_Zone *zone_svc, const string& _remote_id, const list<string>& endpoints, HostStyle _host_style = PathStyle);
RGWRESTConn(CephContext *_cct, RGWSI_Zone *zone_svc, const string& _remote_id, const list<string>& endpoints, RGWAccessKey _cred, HostStyle _host_style = PathStyle);
// custom move needed for atomic
RGWRESTConn(RGWRESTConn&& other);
@ -193,11 +193,11 @@ class S3RESTConn : public RGWRESTConn {
public:
S3RESTConn(CephContext *_cct, RGWRados *store, const string& _remote_id, const list<string>& endpoints, HostStyle _host_style = PathStyle) :
RGWRESTConn(_cct, store, _remote_id, endpoints, _host_style) {}
S3RESTConn(CephContext *_cct, RGWSI_Zone *svc_zone, const string& _remote_id, const list<string>& endpoints, HostStyle _host_style = PathStyle) :
RGWRESTConn(_cct, svc_zone, _remote_id, endpoints, _host_style) {}
S3RESTConn(CephContext *_cct, RGWRados *store, const string& _remote_id, const list<string>& endpoints, RGWAccessKey _cred, HostStyle _host_style = PathStyle):
RGWRESTConn(_cct, store, _remote_id, endpoints, _cred, _host_style) {}
S3RESTConn(CephContext *_cct, RGWSI_Zone *svc_zone, const string& _remote_id, const list<string>& endpoints, RGWAccessKey _cred, HostStyle _host_style = PathStyle):
RGWRESTConn(_cct, svc_zone, _remote_id, endpoints, _cred, _host_style) {}
~S3RESTConn() override = default;
void populate_params(param_vec_t& params, const rgw_user *uid, const string& zonegroup) override {

View File

@ -21,6 +21,10 @@
#include "rgw_sync.h"
#include "rgw_data_sync.h"
#include "rgw_common.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
@ -86,7 +90,7 @@ void RGWOp_MDLog_List::execute() {
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
period = store->get_current_period_id();
period = store->svc.zone->get_current_period_id();
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id" << dendl;
http_ret = -EINVAL;
@ -164,7 +168,7 @@ void RGWOp_MDLog_ShardInfo::execute() {
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
period = store->get_current_period_id();
period = store->svc.zone->get_current_period_id();
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id" << dendl;
@ -223,7 +227,7 @@ void RGWOp_MDLog_Delete::execute() {
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
period = store->get_current_period_id();
period = store->svc.zone->get_current_period_id();
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id" << dendl;
@ -250,7 +254,7 @@ void RGWOp_MDLog_Lock::execute() {
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
period = store->get_current_period_id();
period = store->svc.zone->get_current_period_id();
}
if (period.empty() ||
@ -298,7 +302,7 @@ void RGWOp_MDLog_Unlock::execute() {
if (period.empty()) {
ldout(s->cct, 5) << "Missing period id trying to use current" << dendl;
period = store->get_current_period_id();
period = store->svc.zone->get_current_period_id();
}
if (period.empty() ||

View File

@ -5,6 +5,9 @@
#include "rgw_rest_realm.h"
#include "rgw_rest_s3.h"
#include "rgw_rest_config.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include "include/ceph_assert.h"
@ -91,9 +94,9 @@ void RGWOp_Period_Post::execute()
}
// require period.realm_id to match our realm
if (period.get_realm() != store->realm.get_id()) {
if (period.get_realm() != store->svc.zone->get_realm().get_id()) {
error_stream << "period with realm id " << period.get_realm()
<< " doesn't match current realm " << store->realm.get_id() << std::endl;
<< " doesn't match current realm " << store->svc.zone->get_realm().get_id() << std::endl;
http_ret = -EINVAL;
return;
}
@ -127,7 +130,7 @@ void RGWOp_Period_Post::execute()
}
// if it's not period commit, nobody is allowed to push to the master zone
if (period.get_master_zone() == store->get_zone_params().get_id()) {
if (period.get_master_zone() == store->svc.zone->get_zone_params().get_id()) {
ldout(cct, 10) << "master zone rejecting period id="
<< period.get_id() << " epoch=" << period.get_epoch() << dendl;
http_ret = -EINVAL; // XXX: error code

View File

@ -45,6 +45,10 @@
#include "rgw_crypt.h"
#include "rgw_crypt_sanitize.h"
#include "rgw_rest_user_policy.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include "include/ceph_assert.h"
#include "rgw_role.h"
#include "rgw_rest_sts.h"
@ -868,7 +872,7 @@ void RGWGetBucketLocation_ObjStore_S3::send_response()
RGWZoneGroup zonegroup;
string api_name;
int ret = store->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
int ret = store->svc.zone->get_zonegroup(s->bucket_info.zonegroup, zonegroup);
if (ret >= 0) {
api_name = zonegroup.api_name;
} else {
@ -967,7 +971,7 @@ int RGWSetBucketVersioning_ObjStore_S3::get_params()
return -EINVAL;
}
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data.append(data);
}
@ -2413,7 +2417,7 @@ int RGWPutCORS_ObjStore_S3::get_params()
}
// forward bucket cors requests to meta master zone
if (!store->is_meta_master()) {
if (!store->svc.zone->is_meta_master()) {
/* only need to keep this data around if we're not meta master */
in_data.append(data);
}

View File

@ -7,6 +7,7 @@
#include "common/ceph_json.h"
#include "common/ceph_time.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "include/types.h"
#include "rgw_string.h"
@ -15,6 +16,8 @@
#include "rgw_tools.h"
#include "rgw_role.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
@ -30,7 +33,7 @@ int RGWRole::store_info(bool exclusive)
bufferlist bl;
encode(*this, bl);
return rgw_put_system_obj(store, store->get_zone_params().roles_pool, oid,
return rgw_put_system_obj(store, store->svc.zone->get_zone_params().roles_pool, oid,
bl, exclusive, NULL, real_time(), NULL);
}
@ -44,7 +47,7 @@ int RGWRole::store_name(bool exclusive)
bufferlist bl;
using ceph::encode;
encode(nameToId, bl);
return rgw_put_system_obj(store, store->get_zone_params().roles_pool, oid,
return rgw_put_system_obj(store, store->svc.zone->get_zone_params().roles_pool, oid,
bl, exclusive, NULL, real_time(), NULL);
}
@ -53,7 +56,7 @@ int RGWRole::store_path(bool exclusive)
string oid = tenant + get_path_oid_prefix() + path + get_info_oid_prefix() + id;
bufferlist bl;
return rgw_put_system_obj(store, store->get_zone_params().roles_pool, oid,
return rgw_put_system_obj(store, store->svc.zone->get_zone_params().roles_pool, oid,
bl, exclusive, NULL, real_time(), NULL);
}
@ -100,7 +103,7 @@ int RGWRole::create(bool exclusive)
sprintf(buf + strlen(buf),".%dZ",(int)tv.tv_usec/1000);
creation_date.assign(buf, strlen(buf));
auto& pool = store->get_zone_params().roles_pool;
auto& pool = store->svc.zone->get_zone_params().roles_pool;
ret = store_info(exclusive);
if (ret < 0) {
ldout(cct, 0) << "ERROR: storing role info in pool: " << pool.name << ": "
@ -148,7 +151,7 @@ int RGWRole::create(bool exclusive)
int RGWRole::delete_obj()
{
auto& pool = store->get_zone_params().roles_pool;
auto& pool = store->svc.zone->get_zone_params().roles_pool;
int ret = read_name();
if (ret < 0) {
@ -217,7 +220,7 @@ int RGWRole::get_by_id()
int RGWRole::update()
{
auto& pool = store->get_zone_params().roles_pool;
auto& pool = store->svc.zone->get_zone_params().roles_pool;
int ret = store_info(false);
if (ret < 0) {
@ -293,7 +296,7 @@ void RGWRole::decode_json(JSONObj *obj)
int RGWRole::read_id(const string& role_name, const string& tenant, string& role_id)
{
auto& pool = store->get_zone_params().roles_pool;
auto& pool = store->svc.zone->get_zone_params().roles_pool;
string oid = tenant + get_names_oid_prefix() + role_name;
bufferlist bl;
RGWObjectCtx obj_ctx(store);
@ -319,7 +322,7 @@ int RGWRole::read_id(const string& role_name, const string& tenant, string& role
int RGWRole::read_info()
{
auto& pool = store->get_zone_params().roles_pool;
auto& pool = store->svc.zone->get_zone_params().roles_pool;
string oid = get_info_oid_prefix() + id;
bufferlist bl;
RGWObjectCtx obj_ctx(store);
@ -346,7 +349,7 @@ int RGWRole::read_info()
int RGWRole::read_name()
{
auto& pool = store->get_zone_params().roles_pool;
auto& pool = store->svc.zone->get_zone_params().roles_pool;
string oid = tenant + get_names_oid_prefix() + name;
bufferlist bl;
RGWObjectCtx obj_ctx(store);
@ -424,7 +427,7 @@ int RGWRole::get_roles_by_path_prefix(RGWRados *store,
const string& tenant,
vector<RGWRole>& roles)
{
auto pool = store->get_zone_params().roles_pool;
auto pool = store->svc.zone->get_zone_params().roles_pool;
string prefix;
// List all roles if path prefix is empty

View File

@ -13,6 +13,7 @@
#include "rgw_common.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_sync.h"
#include "rgw_metadata.h"
#include "rgw_rest_conn.h"
@ -24,6 +25,8 @@
#include "cls/lock/cls_lock_client.h"
#include "services/svc_zone.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
@ -251,7 +254,7 @@ int RGWRemoteMetaLog::read_log_info(rgw_mdlog_info *log_info)
int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return 0;
}
@ -266,7 +269,7 @@ int RGWRemoteMetaLog::read_master_log_shards_info(const string &master_period, m
int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return 0;
}
@ -275,7 +278,7 @@ int RGWRemoteMetaLog::read_master_log_shards_next(const string& period, map<int,
int RGWRemoteMetaLog::init()
{
conn = store->rest_master_conn;
conn = store->svc.zone->get_master_conn();
int ret = http_manager.start();
if (ret < 0) {
@ -302,18 +305,18 @@ void RGWRemoteMetaLog::finish()
int RGWMetaSyncStatusManager::init()
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return 0;
}
if (!store->rest_master_conn) {
if (!store->svc.zone->get_master_conn()) {
lderr(store->ctx()) << "no REST connection to master zone" << dendl;
return -EIO;
}
int r = rgw_init_ioctx(store->get_rados_handle(), store->get_zone_params().log_pool, ioctx, true);
int r = rgw_init_ioctx(store->get_rados_handle(), store->svc.zone->get_zone_params().log_pool, ioctx, true);
if (r < 0) {
lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->get_zone_params().log_pool << " ret=" << r << dendl;
lderr(store->ctx()) << "ERROR: failed to open log pool (" << store->svc.zone->get_zone_params().log_pool << " ret=" << r << dendl;
return r;
}
@ -335,7 +338,7 @@ int RGWMetaSyncStatusManager::init()
int num_shards = sync_status.sync_info.num_shards;
for (int i = 0; i < num_shards; i++) {
shard_objs[i] = rgw_raw_obj(store->get_zone_params().log_pool, sync_env.shard_obj_name(i));
shard_objs[i] = rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
}
RWLock::WLocker wl(ts_to_shard_lock);
@ -480,7 +483,7 @@ public:
int operate() override {
auto store = env->store;
RGWRESTConn *conn = store->rest_master_conn;
RGWRESTConn *conn = store->svc.zone->get_master_conn();
reenter(this) {
yield {
char buf[16];
@ -631,7 +634,7 @@ public:
string lock_name = "sync_lock";
RGWRados *store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, sync_env->status_oid()),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
@ -648,7 +651,7 @@ public:
set_status("writing sync status");
RGWRados *store = sync_env->store;
call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, sync_env->status_oid()),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
@ -679,7 +682,7 @@ public:
RGWRados *store = sync_env->store;
spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
store,
rgw_raw_obj(store->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
marker), true);
}
}
@ -688,7 +691,7 @@ public:
status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
RGWRados *store = sync_env->store;
call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(sync_env->async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, sync_env->status_oid()),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
set_status("drop lock lease");
@ -729,7 +732,7 @@ bool RGWReadSyncStatusMarkersCR::spawn_next()
return false;
}
using CR = RGWSimpleRadosReadCR<rgw_meta_sync_marker>;
rgw_raw_obj obj{env->store->get_zone_params().log_pool,
rgw_raw_obj obj{env->store->svc.zone->get_zone_params().log_pool,
env->shard_obj_name(shard_id)};
spawn(new CR(env->async_rados, env->store, obj, &markers[shard_id]), false);
shard_id++;
@ -755,7 +758,7 @@ int RGWReadSyncStatusCoroutine::operate()
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_meta_sync_info>;
yield {
bool empty_on_enoent = false; // fail on ENOENT
rgw_raw_obj obj{sync_env->store->get_zone_params().log_pool,
rgw_raw_obj obj{sync_env->store->svc.zone->get_zone_params().log_pool,
sync_env->status_oid()};
call(new ReadInfoCR(sync_env->async_rados, sync_env->store, obj,
&sync_status->sync_info, empty_on_enoent));
@ -866,7 +869,7 @@ public:
string lock_name = "sync_lock";
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados,
sync_env->store,
rgw_raw_obj(sync_env->store->get_zone_params().log_pool, sync_env->status_oid()),
rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this));
lease_stack.reset(spawn(lease_cr.get(), false));
}
@ -880,7 +883,7 @@ public:
yield;
}
entries_index.reset(new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
sync_env->store->get_zone_params().log_pool,
sync_env->store->svc.zone->get_zone_params().log_pool,
mdlog_sync_full_sync_index_prefix));
yield {
call(new RGWReadRESTResourceCR<list<string> >(cct, conn, sync_env->http_manager,
@ -954,7 +957,7 @@ public:
rgw_meta_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados, sync_env->store,
rgw_raw_obj(sync_env->store->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
rgw_raw_obj(sync_env->store->svc.zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
marker), true);
}
}
@ -1211,7 +1214,7 @@ public:
RGWRados *store = sync_env->store;
return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->async_rados,
store,
rgw_raw_obj(store->get_zone_params().log_pool, marker_oid),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
@ -1895,7 +1898,7 @@ public:
RGWMetaSyncCR(RGWMetaSyncEnv *_sync_env, const RGWPeriodHistory::Cursor &cursor,
const rgw_meta_sync_status& _sync_status, RGWSyncTraceNodeRef& _tn)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
pool(sync_env->store->get_zone_params().log_pool),
pool(sync_env->store->svc.zone->get_zone_params().log_pool),
cursor(cursor), sync_status(_sync_status), tn(_tn) {}
~RGWMetaSyncCR() {
@ -2011,7 +2014,7 @@ void RGWRemoteMetaLog::init_sync_env(RGWMetaSyncEnv *env) {
int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return 0;
}
// cannot run concurrently with run_sync(), so run in a separate manager
@ -2032,7 +2035,7 @@ int RGWRemoteMetaLog::read_sync_status(rgw_meta_sync_status *sync_status)
int RGWRemoteMetaLog::init_sync_status()
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return 0;
}
@ -2058,7 +2061,7 @@ int RGWRemoteMetaLog::store_sync_info(const rgw_meta_sync_info& sync_info)
{
tn->log(20, "store sync info");
return run(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(async_rados, store,
rgw_raw_obj(store->get_zone_params().log_pool, sync_env.status_oid()),
rgw_raw_obj(store->svc.zone->get_zone_params().log_pool, sync_env.status_oid()),
sync_info));
}
@ -2105,7 +2108,7 @@ static RGWPeriodHistory::Cursor get_period_at(RGWRados* store,
int RGWRemoteMetaLog::run_sync()
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return 0;
}
@ -2511,7 +2514,7 @@ int PurgePeriodLogsCR::operate()
<< " period=" << cursor.get_period().get_id() << dendl;
yield {
const auto mdlog = metadata->get_log(cursor.get_period().get_id());
const auto& pool = store->get_zone_params().log_pool;
const auto& pool = store->svc.zone->get_zone_params().log_pool;
auto num_shards = cct->_conf->rgw_md_log_max_shards;
call(new PurgeLogShardsCR(store, mdlog, pool, num_shards));
}
@ -2563,7 +2566,7 @@ connection_map make_peer_connections(RGWRados *store,
for (auto& g : zonegroups) {
for (auto& z : g.second.zones) {
std::unique_ptr<RGWRESTConn> conn{
new RGWRESTConn(store->ctx(), store, z.first, z.second.endpoints)};
new RGWRESTConn(store->ctx(), store->svc.zone.get(), z.first, z.second.endpoints)};
connections.emplace(z.first, std::move(conn));
}
}
@ -2631,7 +2634,7 @@ struct TrimEnv {
TrimEnv(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http, int num_shards)
: dpp(dpp), store(store), http(http), num_shards(num_shards),
zone(store->get_zone_params().get_id()),
zone(store->svc.zone->get_zone_params().get_id()),
current(store->period_history->get_current())
{}
};
@ -2946,7 +2949,7 @@ class MetaPeerTrimShardCollectCR : public RGWShardCollectCR {
: RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
{
meta_env.init(env.dpp, cct, env.store, env.store->rest_master_conn,
meta_env.init(env.dpp, cct, env.store, env.store->svc.zone->get_master_conn(),
env.store->get_async_rados(), env.http, nullptr,
env.store->get_sync_tracer());
}
@ -2988,7 +2991,7 @@ int MetaPeerTrimCR::operate()
};
using LogInfoCR = RGWReadRESTResourceCR<rgw_mdlog_info>;
call(new LogInfoCR(cct, env.store->rest_master_conn, env.http,
call(new LogInfoCR(cct, env.store->svc.zone->get_master_conn(), env.http,
"/admin/log/", params, &mdlog_info));
}
if (retcode < 0) {
@ -3035,7 +3038,7 @@ class MetaTrimPollCR : public RGWCoroutine {
public:
MetaTrimPollCR(RGWRados *store, utime_t interval)
: RGWCoroutine(store->ctx()), store(store), interval(interval),
obj(store->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
obj(store->svc.zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct))
{}
@ -3101,7 +3104,7 @@ class MetaPeerTrimPollCR : public MetaTrimPollCR {
RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRados *store, RGWHTTPManager *http,
int num_shards, utime_t interval)
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval);
}
return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval);
@ -3126,7 +3129,7 @@ RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, RGWRa
RGWHTTPManager *http,
int num_shards)
{
if (store->is_meta_master()) {
if (store->svc.zone->is_meta_master()) {
return new MetaMasterAdminTrimCR(dpp, store, http, num_shards);
}
return new MetaPeerAdminTrimCR(dpp, store, http, num_shards);

View File

@ -25,8 +25,11 @@
#include "rgw_data_sync.h"
#include "rgw_metadata.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_sync.h"
#include "services/svc_zone.h"
#include <boost/asio/yield.hpp>
#include "include/ceph_assert.h"
@ -434,8 +437,8 @@ class BucketTrimInstanceCR : public RGWCoroutine {
: RGWCoroutine(store->ctx()), store(store),
http(http), observer(observer),
bucket_instance(bucket_instance),
zone_id(store->get_zone().id),
peer_status(store->zone_conn_map.size())
zone_id(store->svc.zone->get_zone().id),
peer_status(store->svc.zone->get_zone_conn_map().size())
{}
int operate() override;
@ -459,7 +462,7 @@ int BucketTrimInstanceCR::operate()
};
auto p = peer_status.begin();
for (auto& c : store->zone_conn_map) {
for (auto& c : store->svc.zone->get_zone_conn_map()) {
using StatusCR = RGWReadRESTResourceCR<StatusShards>;
spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
false);
@ -1016,7 +1019,7 @@ class BucketTrimManager::Impl : public TrimCounters::Server,
Impl(RGWRados *store, const BucketTrimConfig& config)
: store(store), config(config),
status_obj(store->get_zone_params().log_pool, BucketTrimStatus::oid),
status_obj(store->svc.zone->get_zone_params().log_pool, BucketTrimStatus::oid),
counter(config.counter_size),
trimmed(config.recent_size, config.recent_duration),
watcher(store, status_obj, this)

View File

@ -9,6 +9,9 @@
#include "rgw_rest_conn.h"
#include "rgw_cr_rest.h"
#include "rgw_acl.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include <boost/asio/yield.hpp>
@ -557,11 +560,11 @@ struct AWSSyncConfig {
void expand_target(RGWDataSyncEnv *sync_env, const string& sid, const string& path, string *dest) {
apply_meta_param(path, "sid", sid, dest);
RGWZoneGroup& zg = sync_env->store->get_zonegroup();
RGWZoneGroup& zg = sync_env->store->svc.zone->get_zonegroup();
apply_meta_param(path, "zonegroup", zg.get_name(), dest);
apply_meta_param(path, "zonegroup_id", zg.get_id(), dest);
RGWZone& zone = sync_env->store->get_zone();
RGWZone& zone = sync_env->store->svc.zone->get_zone();
apply_meta_param(path, "zone", zone.name, dest);
apply_meta_param(path, "zone_id", zone.id, dest);
}
@ -636,7 +639,7 @@ struct AWSSyncConfig {
auto& root_conf = root_profile->conn_conf;
root_profile->conn.reset(new S3RESTConn(sync_env->cct,
sync_env->store,
sync_env->store->svc.zone.get(),
id,
{ root_conf->endpoint },
root_conf->key,
@ -646,7 +649,7 @@ struct AWSSyncConfig {
auto& c = i.second;
c->conn.reset(new S3RESTConn(sync_env->cct,
sync_env->store,
sync_env->store->svc.zone.get(),
id,
{ c->conn_conf->endpoint },
c->conn_conf->key,
@ -1420,7 +1423,7 @@ public:
obj_size(_obj_size),
src_properties(_src_properties),
rest_obj(_rest_obj),
status_obj(sync_env->store->get_zone_params().log_pool,
status_obj(sync_env->store->svc.zone->get_zone_params().log_pool,
RGWBucketSyncStatusManager::obj_status_oid(sync_env->source_zone, src_obj)) {
}
@ -1599,7 +1602,7 @@ public:
<< " attrs=" << attrs
<< dendl;
source_conn = sync_env->store->get_zone_conn_by_id(sync_env->source_zone);
source_conn = sync_env->store->svc.zone->get_zone_conn_by_id(sync_env->source_zone);
if (!source_conn) {
ldout(sync_env->cct, 0) << "ERROR: cannot find http connection to zone " << sync_env->source_zone << dendl;
return set_cr_error(-EINVAL);

View File

@ -8,6 +8,9 @@
#include "rgw_cr_rest.h"
#include "rgw_op.h"
#include "rgw_es_query.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include "include/str_list.h"
@ -546,7 +549,7 @@ public:
~RGWElasticDataSyncModule() override {}
void init(RGWDataSyncEnv *sync_env, uint64_t instance_id) override {
conf->init_instance(sync_env->store->get_realm(), instance_id);
conf->init_instance(sync_env->store->svc.zone->get_realm(), instance_id);
}
RGWCoroutine *init_sync(RGWDataSyncEnv *sync_env) override {

View File

@ -12,6 +12,7 @@
#include "common/ceph_json.h"
#include "common/RWLock.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_acl.h"
#include "include/types.h"
@ -24,6 +25,8 @@
#include "rgw_bucket.h"
#include "rgw_quota.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
@ -210,7 +213,7 @@ int rgw_store_user_info(RGWRados *store,
if (!info.user_email.empty()) {
if (!old_info ||
old_info->user_email.compare(info.user_email) != 0) { /* only if new index changed */
ret = rgw_put_system_obj(store, store->get_zone_params().user_email_pool, info.user_email,
ret = rgw_put_system_obj(store, store->svc.zone->get_zone_params().user_email_pool, info.user_email,
link_bl, exclusive, NULL, real_time());
if (ret < 0)
return ret;
@ -224,7 +227,7 @@ int rgw_store_user_info(RGWRados *store,
if (old_info && old_info->access_keys.count(iter->first) != 0)
continue;
ret = rgw_put_system_obj(store, store->get_zone_params().user_keys_pool, k.id,
ret = rgw_put_system_obj(store, store->svc.zone->get_zone_params().user_keys_pool, k.id,
link_bl, exclusive, NULL, real_time());
if (ret < 0)
return ret;
@ -237,7 +240,7 @@ int rgw_store_user_info(RGWRados *store,
if (old_info && old_info->swift_keys.count(siter->first) != 0)
continue;
ret = rgw_put_system_obj(store, store->get_zone_params().user_swift_pool, k.id,
ret = rgw_put_system_obj(store, store->svc.zone->get_zone_params().user_swift_pool, k.id,
link_bl, exclusive, NULL, real_time());
if (ret < 0)
return ret;
@ -321,7 +324,7 @@ int rgw_get_user_info_by_uid(RGWRados *store,
RGWObjectCtx obj_ctx(store);
string oid = uid.to_str();
int ret = rgw_get_system_obj(store, obj_ctx, store->get_zone_params().user_uid_pool, oid, bl, objv_tracker, pmtime, pattrs, cache_info);
int ret = rgw_get_system_obj(store, obj_ctx, store->svc.zone->get_zone_params().user_uid_pool, oid, bl, objv_tracker, pmtime, pattrs, cache_info);
if (ret < 0) {
return ret;
}
@ -351,7 +354,7 @@ int rgw_get_user_info_by_uid(RGWRados *store,
int rgw_get_user_info_by_email(RGWRados *store, string& email, RGWUserInfo& info,
RGWObjVersionTracker *objv_tracker, real_time *pmtime)
{
return rgw_get_user_info_from_index(store, email, store->get_zone_params().user_email_pool, info, objv_tracker, pmtime);
return rgw_get_user_info_from_index(store, email, store->svc.zone->get_zone_params().user_email_pool, info, objv_tracker, pmtime);
}
/**
@ -365,7 +368,7 @@ extern int rgw_get_user_info_by_swift(RGWRados * const store,
real_time * const pmtime)
{
return rgw_get_user_info_from_index(store, swift_name,
store->get_zone_params().user_swift_pool,
store->svc.zone->get_zone_params().user_swift_pool,
info, objv_tracker, pmtime);
}
@ -380,7 +383,7 @@ extern int rgw_get_user_info_by_access_key(RGWRados* store,
real_time *pmtime)
{
return rgw_get_user_info_from_index(store, access_key,
store->get_zone_params().user_keys_pool,
store->svc.zone->get_zone_params().user_keys_pool,
info, objv_tracker, pmtime);
}
@ -390,7 +393,7 @@ int rgw_get_user_attrs_by_uid(RGWRados *store,
RGWObjVersionTracker *objv_tracker)
{
RGWObjectCtx obj_ctx(store);
rgw_raw_obj obj(store->get_zone_params().user_uid_pool, user_id.to_str());
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_uid_pool, user_id.to_str());
RGWRados::SystemObject src(store, obj_ctx, obj);
RGWRados::SystemObject::Read rop(&src);
@ -400,7 +403,7 @@ int rgw_get_user_attrs_by_uid(RGWRados *store,
int rgw_remove_key_index(RGWRados *store, RGWAccessKey& access_key)
{
rgw_raw_obj obj(store->get_zone_params().user_keys_pool, access_key.id);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_keys_pool, access_key.id);
int ret = store->delete_system_obj(obj);
return ret;
}
@ -426,13 +429,13 @@ int rgw_remove_email_index(RGWRados *store, string& email)
if (email.empty()) {
return 0;
}
rgw_raw_obj obj(store->get_zone_params().user_email_pool, email);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_email_pool, email);
return store->delete_system_obj(obj);
}
int rgw_remove_swift_name_index(RGWRados *store, string& swift_name)
{
rgw_raw_obj obj(store->get_zone_params().user_swift_pool, swift_name);
rgw_raw_obj obj(store->svc.zone->get_zone_params().user_swift_pool, swift_name);
int ret = store->delete_system_obj(obj);
return ret;
}
@ -478,7 +481,7 @@ int rgw_delete_user(RGWRados *store, RGWUserInfo& info, RGWObjVersionTracker& ob
string buckets_obj_id;
rgw_get_buckets_obj(info.user_id, buckets_obj_id);
rgw_raw_obj uid_bucks(store->get_zone_params().user_uid_pool, buckets_obj_id);
rgw_raw_obj uid_bucks(store->svc.zone->get_zone_params().user_uid_pool, buckets_obj_id);
ldout(store->ctx(), 10) << "removing user buckets index" << dendl;
ret = store->delete_system_obj(uid_bucks);
if (ret < 0 && ret != -ENOENT) {
@ -489,7 +492,7 @@ int rgw_delete_user(RGWRados *store, RGWUserInfo& info, RGWObjVersionTracker& ob
string key;
info.user_id.to_str(key);
rgw_raw_obj uid_obj(store->get_zone_params().user_uid_pool, key);
rgw_raw_obj uid_obj(store->svc.zone->get_zone_params().user_uid_pool, key);
ldout(store->ctx(), 10) << "removing user index: " << info.user_id << dendl;
ret = store->meta_mgr->remove_entry(user_meta_handler, key, &objv_tracker);
if (ret < 0 && ret != -ENOENT && ret != -ECANCELED) {
@ -2746,7 +2749,7 @@ public:
void get_pool_and_oid(RGWRados *store, const string& key, rgw_pool& pool, string& oid) override {
oid = key;
pool = store->get_zone_params().user_uid_pool;
pool = store->svc.zone->get_zone_params().user_uid_pool;
}
int list_keys_init(RGWRados *store, const string& marker, void **phandle) override
@ -2755,7 +2758,7 @@ public:
info->store = store;
int ret = store->list_raw_objects_init(store->get_zone_params().user_uid_pool, marker,
int ret = store->list_raw_objects_init(store->svc.zone->get_zone_params().user_uid_pool, marker,
&info->ctx);
if (ret < 0) {
return ret;

View File

@ -0,0 +1,37 @@
#include "svc_quota.h"
#include "svc_zone.h"
#include "rgw/rgw_zone.h"
int RGWS_Quota::create_instance(const string& conf, RGWServiceInstanceRef *instance)
{
instance->reset(new RGWSI_Quota(this, cct));
return 0;
}
std::map<string, RGWServiceInstance::dependency> RGWSI_Quota::get_deps()
{
RGWServiceInstance::dependency dep = { .name = "zone",
.conf = "{}" };
map<string, RGWServiceInstance::dependency> deps;
deps["zone_dep"] = dep;
return deps;
}
int RGWSI_Quota::init(const string& conf, std::map<std::string, RGWServiceInstanceRef>& dep_refs)
{
zone_svc = static_pointer_cast<RGWSI_Zone>(dep_refs["zone_dep"]);
assert(zone_svc);
return 0;
}
const RGWQuotaInfo& RGWSI_Quota::get_bucket_quota() const
{
return zone_svc->get_current_period().get_config().bucket_quota;
}
const RGWQuotaInfo& RGWSI_Quota::get_user_quota() const
{
return zone_svc->get_current_period().get_config().user_quota;
}

View File

@ -0,0 +1,32 @@
#ifndef CEPH_RGW_SERVICES_QUOTA_H
#define CEPH_RGW_SERVICES_QUOTA_H
#include "rgw/rgw_service.h"
class RGWSI_Zone;
class RGWS_Quota : public RGWService
{
public:
RGWS_Quota(CephContext *cct) : RGWService(cct, "quota") {}
int create_instance(const std::string& conf, RGWServiceInstanceRef *instance);
};
class RGWSI_Quota : public RGWServiceInstance
{
std::shared_ptr<RGWSI_Zone> zone_svc;
std::map<std::string, RGWServiceInstance::dependency> get_deps();
int init(const std::string& conf, std::map<std::string, RGWServiceInstanceRef>& dep_refs);
public:
RGWSI_Quota(RGWService *svc, CephContext *cct): RGWServiceInstance(svc, cct) {}
const RGWQuotaInfo& get_bucket_quota() const;
const RGWQuotaInfo& get_user_quota() const;
};
#endif

View File

@ -8,7 +8,7 @@
int RGWS_RADOS::create_instance(const string& conf, RGWServiceInstanceRef *instance)
{
*instance = std::make_shared<RGWServiceInstance>();
instance->reset(new RGWSI_RADOS(this, cct));
return 0;
}

View File

@ -24,8 +24,6 @@ struct rgw_rados_ref {
class RGWSI_RADOS : public RGWServiceInstance
{
RGWServiceInstanceRef svc_rados;
std::vector<librados::Rados> rados;
uint32_t next_rados_handle{0};
RWLock handle_lock;

View File

@ -3,6 +3,12 @@
#include "rgw/rgw_zone.h"
int RGWS_Zone::create_instance(const string& conf, RGWServiceInstanceRef *instance)
{
instance->reset(new RGWSI_Zone(this, cct));
return 0;
}
std::map<string, RGWServiceInstance::dependency> RGWSI_Zone::get_deps()
{
RGWServiceInstance::dependency dep = { .name = "rados",
@ -72,23 +78,6 @@ bool RGWSI_Zone::has_zonegroup_api(const std::string& api) const
return false;
}
string RGWSI_Zone::gen_host_id() {
/* uint64_t needs 16, two '-' separators and a trailing null */
const string& zone_name = zone_public_config->name;
const string& zonegroup_name = zonegroup->get_name();
char charbuf[16 + zone_name.size() + zonegroup_name.size() + 2 + 1];
snprintf(charbuf, sizeof(charbuf), "%llx-%s-%s", (unsigned long long)rados_svc->instance_id(), zone_name.c_str(), zonegroup_name.c_str());
return string(charbuf);
}
string RGWSI_Zone::unique_id(uint64_t unique_num)
{
char buf[32];
snprintf(buf, sizeof(buf), ".%llu.%llu", (unsigned long long)rados_svc->instance_id(), (unsigned long long)unique_num);
string s = zone_params->get_id() + buf;
return s;
}
bool RGWSI_Zone::zone_is_writeable()
{
return writeable_zone && !get_zone().is_read_only();
@ -108,6 +97,43 @@ const string& RGWSI_Zone::zone_id()
return get_zone_params().get_id();
}
bool RGWSI_Zone::find_zone_by_id(const string& id, RGWZone **zone)
{
auto iter = zone_by_id.find(id);
if (iter == zone_by_id.end()) {
return false;
}
*zone = &(iter->second);
return true;
}
RGWRESTConn *RGWSI_Zone::get_zone_conn_by_id(const string& id) {
auto citer = zone_conn_map.find(id);
if (citer == zone_conn_map.end()) {
return NULL;
}
return citer->second;
}
RGWRESTConn *RGWSI_Zone::get_zone_conn_by_name(const string& name) {
auto i = zone_id_by_name.find(name);
if (i == zone_id_by_name.end()) {
return NULL;
}
return get_zone_conn_by_id(i->second);
}
bool RGWSI_Zone::find_zone_id_by_name(const string& name, string *id) {
auto i = zone_id_by_name.find(name);
if (i == zone_id_by_name.end()) {
return false;
}
*id = i->second;
return true;
}
bool RGWSI_Zone::need_to_log_data() const
{
return zone_public_config->log_data;

View File

@ -5,6 +5,8 @@
#include "rgw/rgw_service.h"
class RGWSI_RADOS;
struct RGWZoneGroup;
struct RGWZone;
struct RGWZoneParams;
@ -12,7 +14,7 @@ struct RGWPeriod;
struct RGWRealm;
struct RGWZonePlacementInfo;
class RGWSI_RADOS;
class RGWRESTConn;
class RGWS_Zone : public RGWService
{
@ -26,17 +28,26 @@ class RGWSI_Zone : public RGWServiceInstance
{
std::shared_ptr<RGWSI_RADOS> rados_svc;
std::unique_ptr<RGWRealm> realm;
std::unique_ptr<RGWZoneGroup> zonegroup;
std::unique_ptr<RGWZone> zone_public_config; /* external zone params, e.g., entrypoints, log flags, etc. */
std::unique_ptr<RGWZoneParams> zone_params; /* internal zone params, e.g., rados pools */
std::unique_ptr<RGWPeriod> current_period;
std::shared_ptr<RGWRealm> realm;
std::shared_ptr<RGWZoneGroup> zonegroup;
std::shared_ptr<RGWZone> zone_public_config; /* external zone params, e.g., entrypoints, log flags, etc. */
std::shared_ptr<RGWZoneParams> zone_params; /* internal zone params, e.g., rados pools */
std::shared_ptr<RGWPeriod> current_period;
uint32_t zone_short_id{0};
bool writeable_zone{false};
std::map<std::string, RGWServiceInstance::dependency> get_deps();
int init(const std::string& conf, std::map<std::string, RGWServiceInstanceRef>& dep_refs);
RGWRESTConn *rest_master_conn{nullptr};
map<string, RGWRESTConn *> zone_conn_map;
map<string, RGWRESTConn *> zone_data_sync_from_map;
map<string, RGWRESTConn *> zone_data_notify_to_map;
map<string, RGWRESTConn *> zonegroup_conn_map;
map<string, string> zone_id_by_name;
map<string, RGWZone> zone_by_id;
public:
RGWSI_Zone(RGWService *svc, CephContext *cct): RGWServiceInstance(svc, cct) {}
@ -54,13 +65,24 @@ public:
const string& get_current_period_id();
bool has_zonegroup_api(const std::string& api) const;
string gen_host_id();
string unique_id(uint64_t unique_num);
bool zone_is_writeable();
bool zone_syncs_from(RGWZone& target_zone, RGWZone& source_zone);
bool get_redirect_zone_endpoint(string *endpoint);
RGWRESTConn *get_master_conn() {
return rest_master_conn;
}
map<string, RGWRESTConn *>& get_zone_conn_map() {
return zone_conn_map;
}
bool find_zone_by_id(const string& id, RGWZone **zone);
RGWRESTConn *get_zone_conn_by_id(const string& id);
RGWRESTConn *get_zone_conn_by_name(const string& name);
bool find_zone_id_by_name(const string& name, string *id);
int select_bucket_placement(RGWUserInfo& user_info, const string& zonegroup_id, const string& rule,
string *pselected_rule_name, RGWZonePlacementInfo *rule_info);
int select_legacy_bucket_placement(RGWZonePlacementInfo *rule_info);