mirror of
https://github.com/ceph/ceph
synced 2025-01-01 08:32:24 +00:00
crimson/osd: settle snap_reserver within ShardServices
This bit will be used by the upcoming `SnapTrimRequest`. Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
This commit is contained in:
parent
c9bdfdc1a4
commit
ba60d84e7b
@ -135,7 +135,11 @@ OSDSingletonState::OSDSingletonState(
|
||||
&cct,
|
||||
&finisher,
|
||||
crimson::common::local_conf()->osd_max_backfills,
|
||||
crimson::common::local_conf()->osd_min_recovery_priority)
|
||||
crimson::common::local_conf()->osd_min_recovery_priority),
|
||||
snap_reserver(
|
||||
&cct,
|
||||
&finisher,
|
||||
crimson::common::local_conf()->osd_max_trimming_pgs)
|
||||
{
|
||||
crimson::common::local_conf().add_observer(this);
|
||||
osdmaps[0] = boost::make_local_shared<OSDMap>();
|
||||
@ -317,6 +321,7 @@ const char** OSDSingletonState::get_tracked_conf_keys() const
|
||||
static const char* KEYS[] = {
|
||||
"osd_max_backfills",
|
||||
"osd_min_recovery_priority",
|
||||
"osd_max_trimming_pgs",
|
||||
nullptr
|
||||
};
|
||||
return KEYS;
|
||||
@ -334,6 +339,9 @@ void OSDSingletonState::handle_conf_change(
|
||||
local_reserver.set_min_priority(conf->osd_min_recovery_priority);
|
||||
remote_reserver.set_min_priority(conf->osd_min_recovery_priority);
|
||||
}
|
||||
if (changed.count("osd_max_trimming_pgs")) {
|
||||
snap_reserver.set_max(conf->osd_max_trimming_pgs);
|
||||
}
|
||||
}
|
||||
|
||||
seastar::future<OSDSingletonState::local_cached_map_t>
|
||||
|
@ -269,6 +269,7 @@ private:
|
||||
} finisher;
|
||||
AsyncReserver<spg_t, DirectFinisher> local_reserver;
|
||||
AsyncReserver<spg_t, DirectFinisher> remote_reserver;
|
||||
AsyncReserver<spg_t, DirectFinisher> snap_reserver;
|
||||
|
||||
epoch_t up_thru_wanted = 0;
|
||||
seastar::future<> send_alive(epoch_t want);
|
||||
@ -478,6 +479,12 @@ public:
|
||||
FORWARD_TO_OSD_SINGLETON_TARGET(
|
||||
remote_dump_reservations,
|
||||
remote_reserver.dump)
|
||||
FORWARD_TO_OSD_SINGLETON_TARGET(
|
||||
snap_cancel_reservation,
|
||||
snap_reserver.cancel_reservation)
|
||||
FORWARD_TO_OSD_SINGLETON_TARGET(
|
||||
snap_dump_reservations,
|
||||
snap_reserver.dump)
|
||||
|
||||
Context *invoke_context_on_core(core_id_t core, Context *c) {
|
||||
if (!c) return nullptr;
|
||||
@ -523,6 +530,23 @@ public:
|
||||
invoke_context_on_core(seastar::this_shard_id(), on_reserved),
|
||||
invoke_context_on_core(seastar::this_shard_id(), on_preempt));
|
||||
}
|
||||
seastar::future<> snap_request_reservation(
|
||||
spg_t item,
|
||||
Context *on_reserved,
|
||||
unsigned prio,
|
||||
Context *on_preempt) {
|
||||
return with_singleton(
|
||||
[item, prio](OSDSingletonState &singleton,
|
||||
Context *wrapped_on_reserved, Context *wrapped_on_preempt) {
|
||||
return singleton.snap_reserver.request_reservation(
|
||||
item,
|
||||
wrapped_on_reserved,
|
||||
prio,
|
||||
wrapped_on_preempt);
|
||||
},
|
||||
invoke_context_on_core(seastar::this_shard_id(), on_reserved),
|
||||
invoke_context_on_core(seastar::this_shard_id(), on_preempt));
|
||||
}
|
||||
|
||||
#undef FORWARD_CONST
|
||||
#undef FORWARD
|
||||
|
Loading…
Reference in New Issue
Block a user