mirror of
https://github.com/ceph/ceph
synced 2025-02-22 02:27:29 +00:00
mon: OSDMonitor: allow adding tiers to FS pools
This was an overly-strict check. In fact it is perfectly fine to set an overlay on a pool that is already in use as a filesystem data or metadata pool. Fixes: #10135 Signed-off-by: John Spray <john.spray@redhat.com>
This commit is contained in:
parent
aa4d147864
commit
17b5fc9a40
@ -765,6 +765,16 @@ function test_mon_mds()
|
||||
fail_all_mds
|
||||
ceph fs rm cephfs --yes-i-really-mean-it
|
||||
|
||||
# Create a FS and check that we can subsequently add a cache tier to it
|
||||
ceph fs new cephfs fs_metadata fs_data
|
||||
|
||||
# Adding overlay to FS pool should be permitted, RADOS clients handle this.
|
||||
ceph osd tier add fs_metadata mds-tier
|
||||
ceph osd tier cache-mode mds-tier writeback
|
||||
ceph osd tier set-overlay fs_metadata mds-tier
|
||||
|
||||
fail_all_mds
|
||||
ceph fs rm cephfs --yes-i-really-mean-it
|
||||
|
||||
ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it
|
||||
ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it
|
||||
|
@ -5685,10 +5685,6 @@ done:
|
||||
goto reply;
|
||||
}
|
||||
|
||||
if (!_check_remove_tier(pool_id, p, &err, &ss)) {
|
||||
goto reply;
|
||||
}
|
||||
|
||||
// go
|
||||
pg_pool_t *np = pending_inc.get_new_pool(pool_id, p);
|
||||
np->read_tier = overlaypool_id;
|
||||
|
Loading…
Reference in New Issue
Block a user