mirror of https://github.com/ceph/ceph
mgr/restful: remove deprecated module
Detailed changes: * Remove `restful` mgr module dir, * Remove Python depedencies (`pecan`, `werkzeug`) from ceph.spec and debian control, * Remove docs, * Remove associated QA tests, * Update vstart. Fixes: https://tracker.ceph.com/issues/47066 Signed-off-by: Ernesto Puerta <epuertat@redhat.com>
This commit is contained in:
parent
20007ea4a7
commit
96ec7badb8
|
@ -336,7 +336,6 @@ BuildRequires: hostname
|
|||
BuildRequires: jq
|
||||
BuildRequires: libuuid-devel
|
||||
BuildRequires: python%{python3_pkgversion}-bcrypt
|
||||
BuildRequires: python%{python3_pkgversion}-pecan
|
||||
BuildRequires: python%{python3_pkgversion}-requests
|
||||
BuildRequires: python%{python3_pkgversion}-dateutil
|
||||
BuildRequires: python%{python3_pkgversion}-coverage
|
||||
|
@ -469,7 +468,6 @@ BuildRequires: xmlsec1-openssl-devel
|
|||
BuildRequires: python%{python3_pkgversion}-cherrypy
|
||||
BuildRequires: python%{python3_pkgversion}-routes
|
||||
BuildRequires: python%{python3_pkgversion}-scipy
|
||||
BuildRequires: python%{python3_pkgversion}-werkzeug
|
||||
BuildRequires: python%{python3_pkgversion}-pyOpenSSL
|
||||
%endif
|
||||
BuildRequires: jsonnet
|
||||
|
@ -480,7 +478,6 @@ BuildRequires: libxmlsec1-nss1
|
|||
BuildRequires: libxmlsec1-openssl1
|
||||
BuildRequires: python%{python3_pkgversion}-CherryPy
|
||||
BuildRequires: python%{python3_pkgversion}-Routes
|
||||
BuildRequires: python%{python3_pkgversion}-Werkzeug
|
||||
BuildRequires: python%{python3_pkgversion}-numpy-devel
|
||||
BuildRequires: xmlsec1-devel
|
||||
BuildRequires: xmlsec1-openssl-devel
|
||||
|
@ -705,7 +702,6 @@ Recommends: python%{python3_pkgversion}-grpcio-tools
|
|||
%if 0%{?suse_version}
|
||||
Requires: python%{python3_pkgversion}-CherryPy
|
||||
Requires: python%{python3_pkgversion}-Routes
|
||||
Requires: python%{python3_pkgversion}-Werkzeug
|
||||
Recommends: python%{python3_pkgversion}-python3-saml
|
||||
%endif
|
||||
%description mgr-dashboard
|
||||
|
@ -738,7 +734,6 @@ Group: System/Filesystems
|
|||
%endif
|
||||
Requires: python%{python3_pkgversion}-bcrypt
|
||||
Requires: python%{python3_pkgversion}-packaging
|
||||
Requires: python%{python3_pkgversion}-pecan
|
||||
Requires: python%{python3_pkgversion}-pyOpenSSL
|
||||
Requires: python%{python3_pkgversion}-requests
|
||||
Requires: python%{python3_pkgversion}-dateutil
|
||||
|
@ -746,12 +741,10 @@ Requires: python%{python3_pkgversion}-setuptools
|
|||
%if 0%{?fedora} || 0%{?rhel} >= 8 || 0%{?openEuler}
|
||||
Requires: python%{python3_pkgversion}-cherrypy
|
||||
Requires: python%{python3_pkgversion}-pyyaml
|
||||
Requires: python%{python3_pkgversion}-werkzeug
|
||||
%endif
|
||||
%if 0%{?suse_version}
|
||||
Requires: python%{python3_pkgversion}-CherryPy
|
||||
Requires: python%{python3_pkgversion}-PyYAML
|
||||
Requires: python%{python3_pkgversion}-Werkzeug
|
||||
%endif
|
||||
# RHEL8 has python 3.6 and that lacks dataclasses in the stdlib, so pull in the
|
||||
# backport dataclasses module instead.
|
||||
|
@ -1996,7 +1989,6 @@ fi
|
|||
%{_datadir}/ceph/mgr/progress
|
||||
%{_datadir}/ceph/mgr/prometheus
|
||||
%{_datadir}/ceph/mgr/rbd_support
|
||||
%{_datadir}/ceph/mgr/restful
|
||||
%{_datadir}/ceph/mgr/rgw
|
||||
%{_datadir}/ceph/mgr/selftest
|
||||
%{_datadir}/ceph/mgr/smb
|
||||
|
|
|
@ -15,8 +15,6 @@ usr/share/ceph/mgr/pg_autoscaler
|
|||
usr/share/ceph/mgr/progress
|
||||
usr/share/ceph/mgr/prometheus
|
||||
usr/share/ceph/mgr/rbd_support
|
||||
usr/share/ceph/mgr/rgw
|
||||
usr/share/ceph/mgr/restful
|
||||
usr/share/ceph/mgr/selftest
|
||||
usr/share/ceph/mgr/snap_schedule
|
||||
usr/share/ceph/mgr/stats
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
natsort
|
||||
CherryPy
|
||||
packaging
|
||||
pecan
|
||||
werkzeug
|
||||
requests
|
||||
python-dateutil
|
||||
|
|
|
@ -94,7 +94,6 @@ Build-Depends: automake,
|
|||
python3-all-dev,
|
||||
python3-cherrypy3,
|
||||
python3-natsort,
|
||||
python3-pecan <pkg.ceph.check>,
|
||||
python3-bcrypt <pkg.ceph.check>,
|
||||
tox <pkg.ceph.check>,
|
||||
python3-coverage <pkg.ceph.check>,
|
||||
|
@ -107,7 +106,6 @@ Build-Depends: automake,
|
|||
python3-setuptools,
|
||||
python3-sphinx,
|
||||
python3-venv,
|
||||
python3-werkzeug <pkg.ceph.check>,
|
||||
python3-yaml,
|
||||
ragel <pkg.ceph.crimson>,
|
||||
socat <pkg.ceph.check>,
|
||||
|
|
|
@ -285,12 +285,6 @@ class CephMgrCommands(Directive):
|
|||
# make diskprediction_local happy
|
||||
mock_imports += ['numpy',
|
||||
'scipy']
|
||||
# make restful happy
|
||||
mock_imports += ['pecan',
|
||||
'pecan.rest',
|
||||
'pecan.hooks',
|
||||
'werkzeug',
|
||||
'werkzeug.serving']
|
||||
|
||||
for m in mock_imports:
|
||||
args = {}
|
||||
|
|
|
@ -289,12 +289,6 @@ class CephOption(ObjectDescription):
|
|||
# make diskprediction_local happy
|
||||
mock_imports += ['numpy',
|
||||
'scipy']
|
||||
# make restful happy
|
||||
mock_imports += ['pecan',
|
||||
'pecan.rest',
|
||||
'pecan.hooks',
|
||||
'werkzeug',
|
||||
'werkzeug.serving']
|
||||
|
||||
for m in mock_imports:
|
||||
args = {}
|
||||
|
|
|
@ -161,8 +161,6 @@ dashboard and its URLs::
|
|||
|
||||
dashboard urls: https://192.168.178.84:41259, https://192.168.178.84:43259, https://192.168.178.84:45259
|
||||
w/ user/pass: admin / admin
|
||||
restful urls: https://192.168.178.84:42259, https://192.168.178.84:44259, https://192.168.178.84:46259
|
||||
w/ user/pass: admin / 598da51f-8cd1-4161-a970-b2944d5ad200
|
||||
|
||||
During development (especially in backend development), you also want to
|
||||
check on occasions if the dashboard manager module is still running. To do so
|
||||
|
@ -175,8 +173,7 @@ should look similar to the following output:
|
|||
|
||||
$ ./bin/ceph mgr services
|
||||
{
|
||||
"dashboard": "https://home:41931/",
|
||||
"restful": "https://home:42931/"
|
||||
"dashboard": "https://home:41931/"
|
||||
}
|
||||
|
||||
By default, this environment uses a randomly chosen port for Ceph Dashboard
|
||||
|
|
|
@ -100,7 +100,6 @@ Here is an example of enabling the :term:`Dashboard` module:
|
|||
$ ceph mgr module ls
|
||||
{
|
||||
"enabled_modules": [
|
||||
"restful",
|
||||
"status"
|
||||
],
|
||||
"disabled_modules": [
|
||||
|
@ -112,7 +111,6 @@ Here is an example of enabling the :term:`Dashboard` module:
|
|||
$ ceph mgr module ls
|
||||
{
|
||||
"enabled_modules": [
|
||||
"restful",
|
||||
"status",
|
||||
"dashboard"
|
||||
],
|
||||
|
@ -122,8 +120,7 @@ Here is an example of enabling the :term:`Dashboard` module:
|
|||
|
||||
$ ceph mgr services
|
||||
{
|
||||
"dashboard": "http://myserver.com:7789/",
|
||||
"restful": "https://myserver.com:8789/"
|
||||
"dashboard": "http://myserver.com:7789/"
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -33,7 +33,6 @@ sensible.
|
|||
Alerts module <alerts>
|
||||
DiskPrediction module <diskprediction>
|
||||
Local pool module <localpool>
|
||||
RESTful module <restful>
|
||||
Zabbix module <zabbix>
|
||||
Prometheus module <prometheus>
|
||||
Influx module <influx>
|
||||
|
|
|
@ -1,202 +0,0 @@
|
|||
Restful Module
|
||||
==============
|
||||
|
||||
RESTful module offers the REST API access to the status of the cluster
|
||||
over an SSL-secured connection.
|
||||
|
||||
Enabling
|
||||
--------
|
||||
|
||||
The *restful* module is enabled with::
|
||||
|
||||
ceph mgr module enable restful
|
||||
|
||||
You will also need to configure an SSL certificate below before the
|
||||
API endpoint is available. By default the module will accept HTTPS
|
||||
requests on port ``8003`` on all IPv4 and IPv6 addresses on the host.
|
||||
|
||||
Securing
|
||||
--------
|
||||
|
||||
All connections to *restful* are secured with SSL. You can generate a
|
||||
self-signed certificate with the command::
|
||||
|
||||
ceph restful create-self-signed-cert
|
||||
|
||||
Note that with a self-signed certificate most clients will need a flag
|
||||
to allow a connection and/or suppress warning messages. For example,
|
||||
if the ``ceph-mgr`` daemon is on the same host,::
|
||||
|
||||
curl -k https://localhost:8003/
|
||||
|
||||
To properly secure a deployment, a certificate that is signed by the
|
||||
organization's certificate authority should be used. For example, a key pair
|
||||
can be generated with a command similar to::
|
||||
|
||||
openssl req -new -nodes -x509 \
|
||||
-subj "/O=IT/CN=ceph-mgr-restful" \
|
||||
-days 3650 -keyout restful.key -out restful.crt -extensions v3_ca
|
||||
|
||||
The ``restful.crt`` should then be signed by your organization's CA
|
||||
(certificate authority). Once that is done, you can set it with::
|
||||
|
||||
ceph config-key set mgr/restful/$name/crt -i restful.crt
|
||||
ceph config-key set mgr/restful/$name/key -i restful.key
|
||||
|
||||
where ``$name`` is the name of the ``ceph-mgr`` instance (usually the
|
||||
hostname). If all manager instances are to share the same certificate,
|
||||
you can leave off the ``$name`` portion::
|
||||
|
||||
ceph config-key set mgr/restful/crt -i restful.crt
|
||||
ceph config-key set mgr/restful/key -i restful.key
|
||||
|
||||
|
||||
Configuring IP and port
|
||||
-----------------------
|
||||
|
||||
Like any other RESTful API endpoint, *restful* binds to an IP and
|
||||
port. By default, the currently active ``ceph-mgr`` daemon will bind
|
||||
to port 8003 and any available IPv4 or IPv6 address on the host.
|
||||
|
||||
Since each ``ceph-mgr`` hosts its own instance of *restful*, it may
|
||||
also be necessary to configure them separately. The IP and port
|
||||
can be changed via the configuration key facility::
|
||||
|
||||
ceph config set mgr mgr/restful/$name/server_addr $IP
|
||||
ceph config set mgr mgr/restful/$name/server_port $PORT
|
||||
|
||||
where ``$name`` is the ID of the ceph-mgr daemon (usually the hostname).
|
||||
|
||||
These settings can also be configured cluster-wide and not manager
|
||||
specific. For example,::
|
||||
|
||||
ceph config set mgr mgr/restful/server_addr $IP
|
||||
ceph config set mgr mgr/restful/server_port $PORT
|
||||
|
||||
If the port is not configured, *restful* will bind to port ``8003``.
|
||||
If the address it not configured, the *restful* will bind to ``::``,
|
||||
which corresponds to all available IPv4 and IPv6 addresses.
|
||||
|
||||
Configuring max_request
|
||||
---------------------------
|
||||
|
||||
The maximum request size can be configured via a central configuration
|
||||
option::
|
||||
|
||||
ceph config set mgr mgr/restful/$name/max_requests $NUM
|
||||
|
||||
where ``$name`` is the ID of the ceph-mgr daemon (usually the hostname).
|
||||
|
||||
.. mgr_module:: restful
|
||||
.. confval:: max_requests
|
||||
|
||||
.. _creating-an-api-user:
|
||||
|
||||
Creating an API User
|
||||
-----------------------
|
||||
|
||||
To create an API user, please run the following command::
|
||||
|
||||
ceph restful create-key <username>
|
||||
|
||||
Replace ``<username>`` with the desired name of the user. For example, to create a user named
|
||||
``api``::
|
||||
|
||||
$ ceph restful create-key api
|
||||
52dffd92-a103-4a10-bfce-5b60f48f764e
|
||||
|
||||
The UUID generated from ``ceph restful create-key api`` acts as the key for the user.
|
||||
|
||||
To list all of your API keys, please run the following command::
|
||||
|
||||
ceph restful list-keys
|
||||
|
||||
The ``ceph restful list-keys`` command will output in JSON::
|
||||
|
||||
{
|
||||
"api": "52dffd92-a103-4a10-bfce-5b60f48f764e"
|
||||
}
|
||||
|
||||
You can use ``curl`` in order to test your user with the API. Here is an example::
|
||||
|
||||
curl -k https://api:52dffd92-a103-4a10-bfce-5b60f48f764e@<ceph-mgr>:<port>/server
|
||||
|
||||
In the case above, we are using ``GET`` to fetch information from the ``server`` endpoint.
|
||||
|
||||
Load balancer
|
||||
-------------
|
||||
|
||||
Please note that *restful* will *only* start on the manager which
|
||||
is active at that moment. Query the Ceph cluster status to see which
|
||||
manager is active (e.g., ``ceph mgr dump``). In order to make the
|
||||
API available via a consistent URL regardless of which manager
|
||||
daemon is currently active, you may want to set up a load balancer
|
||||
front-end to direct traffic to whichever manager endpoint is
|
||||
available.
|
||||
|
||||
Available methods
|
||||
-----------------
|
||||
|
||||
You can navigate to the ``/doc`` endpoint for full list of available
|
||||
endpoints and HTTP methods implemented for each endpoint.
|
||||
|
||||
For example, if you want to use the PATCH method of the ``/osd/<id>``
|
||||
endpoint to set the state ``up`` of the OSD id ``1``, you can use the
|
||||
following curl command::
|
||||
|
||||
echo -En '{"up": true}' | curl --request PATCH --data @- --silent --insecure --user <user> 'https://<ceph-mgr>:<port>/osd/1'
|
||||
|
||||
or you can use python to do so::
|
||||
|
||||
$ python
|
||||
>> import requests
|
||||
>> result = requests.patch(
|
||||
'https://<ceph-mgr>:<port>/osd/1',
|
||||
json={"up": True},
|
||||
auth=("<user>", "<password>")
|
||||
)
|
||||
>> print result.json()
|
||||
|
||||
Some of the other endpoints implemented in the *restful* module include
|
||||
|
||||
* ``/config/cluster``: **GET**
|
||||
* ``/config/osd``: **GET**, **PATCH**
|
||||
* ``/crush/rule``: **GET**
|
||||
* ``/mon``: **GET**
|
||||
* ``/osd``: **GET**
|
||||
* ``/pool``: **GET**, **POST**
|
||||
* ``/pool/<arg>``: **DELETE**, **GET**, **PATCH**
|
||||
* ``/request``: **DELETE**, **GET**, **POST**
|
||||
* ``/request/<arg>``: **DELETE**, **GET**
|
||||
* ``/server``: **GET**
|
||||
|
||||
The ``/request`` endpoint
|
||||
-------------------------
|
||||
|
||||
You can use the ``/request`` endpoint to poll the state of a request
|
||||
you scheduled with any **DELETE**, **POST** or **PATCH** method. These
|
||||
methods are by default asynchronous since it may take longer for them
|
||||
to finish execution. You can modify this behaviour by appending
|
||||
``?wait=1`` to the request url. The returned request will then always
|
||||
be completed.
|
||||
|
||||
The **POST** method of the ``/request`` method provides a passthrough
|
||||
for the ceph mon commands as defined in ``src/mon/MonCommands.h``.
|
||||
Let's consider the following command::
|
||||
|
||||
COMMAND("osd ls " \
|
||||
"name=epoch,type=CephInt,range=0,req=false", \
|
||||
"show all OSD ids", "osd", "r", "cli,rest")
|
||||
|
||||
The **prefix** is **osd ls**. The optional argument's name is **epoch**
|
||||
and it is of type ``CephInt``, i.e. ``integer``. This means that you
|
||||
need to do the following **POST** request to schedule the command::
|
||||
|
||||
$ python
|
||||
>> import requests
|
||||
>> result = requests.post(
|
||||
'https://<ceph-mgr>:<port>/request',
|
||||
json={'prefix': 'osd ls', 'epoch': 0},
|
||||
auth=("<user>", "<password>")
|
||||
)
|
||||
>> print result.json()
|
|
@ -374,10 +374,6 @@ if [ x$(uname)x = xFreeBSDx ]; then
|
|||
security/oath-toolkit \
|
||||
sysutils/flock \
|
||||
sysutils/fusefs-libs \
|
||||
|
||||
# Now use pip to install some extra python modules
|
||||
pip install pecan
|
||||
|
||||
exit
|
||||
else
|
||||
[ $WITH_SEASTAR ] && with_seastar=true || with_seastar=false
|
||||
|
|
|
@ -21,7 +21,6 @@ overrides:
|
|||
ceph_repository: dev
|
||||
ceph_mgr_modules:
|
||||
- status
|
||||
- restful
|
||||
cephfs_pools:
|
||||
- name: "cephfs_data"
|
||||
pg_num: "64"
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
tasks:
|
||||
- exec:
|
||||
mgr.x:
|
||||
- systemctl stop ceph-mgr.target
|
||||
- sleep 5
|
||||
- ceph -s
|
||||
- exec:
|
||||
mon.a:
|
||||
- ceph restful create-key admin
|
||||
- ceph restful create-self-signed-cert
|
||||
- ceph restful restart
|
||||
- workunit:
|
||||
clients:
|
||||
client.0:
|
||||
- rest/test-restful.sh
|
|
@ -20,7 +20,6 @@ overrides:
|
|||
ceph_repository: dev
|
||||
ceph_mgr_modules:
|
||||
- status
|
||||
- restful
|
||||
cephfs_pools:
|
||||
- name: "cephfs_data"
|
||||
pg_num: "64"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
../.qa/
|
|
@ -1,31 +0,0 @@
|
|||
openstack:
|
||||
- volumes: # attached to each instance
|
||||
count: 3
|
||||
size: 10 # GB
|
||||
roles:
|
||||
- [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, client.a]
|
||||
tasks:
|
||||
- install:
|
||||
- ceph:
|
||||
log-ignorelist:
|
||||
- overall HEALTH_
|
||||
- \(MGR_DOWN\)
|
||||
- \(PG_
|
||||
- \(OSD_
|
||||
- \(OBJECT_
|
||||
- \(OSDMAP_FLAGS\)
|
||||
- \(POOL_APP_NOT_ENABLED\)
|
||||
- exec:
|
||||
mon.a:
|
||||
- ceph restful create-key admin
|
||||
- ceph restful create-self-signed-cert
|
||||
- ceph restful restart
|
||||
- workunit:
|
||||
clients:
|
||||
client.a:
|
||||
- rest/test-restful.sh
|
||||
- exec:
|
||||
mon.a:
|
||||
- ceph restful delete-key admin
|
||||
- ceph restful list-keys | jq ".admin" | grep null
|
||||
|
|
@ -1 +0,0 @@
|
|||
../basic/supported-random-distro$
|
|
@ -108,7 +108,7 @@ class MgrTestCase(CephTestCase):
|
|||
# Unload all non-default plugins
|
||||
loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd(
|
||||
"mgr", "module", "ls", "--format=json-pretty"))['enabled_modules']
|
||||
unload_modules = set(loaded) - {"cephadm", "restful"}
|
||||
unload_modules = set(loaded) - {"cephadm"}
|
||||
|
||||
for m in unload_modules:
|
||||
cls.mgr_cluster.mon_manager.raw_cluster_cmd(
|
||||
|
@ -137,7 +137,7 @@ class MgrTestCase(CephTestCase):
|
|||
raise SkipTest(
|
||||
"Only have {0} manager daemons, {1} are required".format(
|
||||
len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED))
|
||||
|
||||
|
||||
# We expect laggy OSDs in this testing environment so turn off this warning.
|
||||
# See https://tracker.ceph.com/issues/61907
|
||||
cls.mgr_cluster.mon_manager.raw_cluster_cmd('config', 'set', 'mds',
|
||||
|
|
|
@ -63,7 +63,7 @@ function retry_eagain()
|
|||
for count in $(seq 1 $max) ; do
|
||||
status=0
|
||||
"$@" > $tmpfile 2>&1 || status=$?
|
||||
if test $status = 0 ||
|
||||
if test $status = 0 ||
|
||||
! grep --quiet EAGAIN $tmpfile ; then
|
||||
break
|
||||
fi
|
||||
|
@ -108,7 +108,7 @@ function check_response()
|
|||
exit 1
|
||||
fi
|
||||
|
||||
if ! grep --quiet -- "$expected_string" $TMPFILE ; then
|
||||
if ! grep --quiet -- "$expected_string" $TMPFILE ; then
|
||||
echo "Didn't find $expected_string in output" >&2
|
||||
cat $TMPFILE >&2
|
||||
exit 1
|
||||
|
@ -696,7 +696,7 @@ function test_auth_profiles()
|
|||
|
||||
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro
|
||||
ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw
|
||||
|
||||
|
||||
# add a new role-definer with the existing role-definer
|
||||
ceph -n client.xx-profile-rd -k client.xx.keyring \
|
||||
auth add client.xx-profile-rd2 mon 'allow profile role-definer'
|
||||
|
@ -730,7 +730,7 @@ function test_mon_caps()
|
|||
ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring
|
||||
ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring
|
||||
rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true
|
||||
check_response "Permission denied"
|
||||
check_response "Permission denied"
|
||||
}
|
||||
|
||||
function test_mon_misc()
|
||||
|
@ -780,7 +780,6 @@ function test_mon_misc()
|
|||
ceph mgr dump
|
||||
ceph mgr dump | jq -e '.active_clients[0].name'
|
||||
ceph mgr module ls
|
||||
ceph mgr module enable restful
|
||||
expect_false ceph mgr module enable foodne
|
||||
ceph mgr module enable foodne --force
|
||||
ceph mgr module disable foodne
|
||||
|
@ -1650,7 +1649,7 @@ function test_mon_osd()
|
|||
dump_json=$(ceph osd dump --format=json | \
|
||||
jq -cM '.osds[] | select(.osd == 0)')
|
||||
[[ "${info_json}" == "${dump_json}" ]]
|
||||
|
||||
|
||||
info_plain="$(ceph osd info)"
|
||||
dump_plain="$(ceph osd dump | grep '^osd')"
|
||||
[[ "${info_plain}" == "${dump_plain}" ]]
|
||||
|
@ -2244,7 +2243,7 @@ function test_mon_pg()
|
|||
# tell osd version
|
||||
#
|
||||
ceph tell osd.0 version
|
||||
expect_false ceph tell osd.9999 version
|
||||
expect_false ceph tell osd.9999 version
|
||||
expect_false ceph tell osd.foo version
|
||||
|
||||
# back to pg stuff
|
||||
|
@ -2336,7 +2335,7 @@ function test_mon_osd_pool_set()
|
|||
ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.'
|
||||
|
||||
ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.'
|
||||
ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
|
||||
ceph osd pool set $TEST_POOL_GETSET recovery_priority 5
|
||||
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5'
|
||||
ceph osd pool set $TEST_POOL_GETSET recovery_priority -5
|
||||
ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5'
|
||||
|
@ -2346,13 +2345,13 @@ function test_mon_osd_pool_set()
|
|||
expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11
|
||||
|
||||
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
|
||||
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
|
||||
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5
|
||||
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5'
|
||||
ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0
|
||||
ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.'
|
||||
|
||||
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
|
||||
ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
|
||||
ceph osd pool set $TEST_POOL_GETSET scrub_priority 5
|
||||
ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5'
|
||||
ceph osd pool set $TEST_POOL_GETSET scrub_priority 0
|
||||
ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.'
|
||||
|
@ -2386,10 +2385,10 @@ function test_mon_osd_pool_set()
|
|||
ceph osd pool set $TEST_POOL_GETSET size 2
|
||||
wait_for_clean
|
||||
ceph osd pool set $TEST_POOL_GETSET min_size 2
|
||||
|
||||
|
||||
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0
|
||||
ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it
|
||||
|
||||
|
||||
expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1
|
||||
ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it
|
||||
|
||||
|
@ -2587,7 +2586,7 @@ function test_mon_osd_misc()
|
|||
ceph osd map 2>$TMPFILE; check_response 'pool' $? 22
|
||||
|
||||
# expect error about unused argument foo
|
||||
ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
|
||||
ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22
|
||||
|
||||
# expect "not in range" for invalid overload percentage
|
||||
ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
#!/bin/sh -ex
|
||||
|
||||
mydir=`dirname $0`
|
||||
|
||||
secret=`ceph config-key get mgr/restful/keys/admin`
|
||||
url=$(ceph mgr dump|jq -r .services.restful|sed -e 's/\/$//')
|
||||
echo "url $url secret $secret"
|
||||
$mydir/test_mgr_rest_api.py $url $secret
|
||||
|
||||
echo $0 OK
|
|
@ -152,7 +152,7 @@ options:
|
|||
first started after installation, to populate the list of enabled manager modules. Subsequent
|
||||
updates are done using the 'mgr module [enable|disable]' commands. List may be
|
||||
comma or space separated.
|
||||
default: restful iostat nfs
|
||||
default: iostat nfs
|
||||
services:
|
||||
- mon
|
||||
- common
|
||||
|
|
|
@ -42,7 +42,6 @@ set(mgr_modules
|
|||
progress
|
||||
prometheus
|
||||
rbd_support
|
||||
restful
|
||||
rgw
|
||||
# rook (optional)
|
||||
selftest
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
from .module import Module
|
|
@ -1,39 +0,0 @@
|
|||
from pecan import expose
|
||||
from pecan.rest import RestController
|
||||
|
||||
from .config import Config
|
||||
from .crush import Crush
|
||||
from .doc import Doc
|
||||
from .mon import Mon
|
||||
from .osd import Osd
|
||||
from .pool import Pool
|
||||
from .perf import Perf
|
||||
from .request import Request
|
||||
from .server import Server
|
||||
|
||||
|
||||
class Root(RestController):
|
||||
config = Config()
|
||||
crush = Crush()
|
||||
doc = Doc()
|
||||
mon = Mon()
|
||||
osd = Osd()
|
||||
perf = Perf()
|
||||
pool = Pool()
|
||||
request = Request()
|
||||
server = Server()
|
||||
|
||||
@expose(template='json')
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the basic information for the REST API
|
||||
This includes values like api version or auth method
|
||||
"""
|
||||
return {
|
||||
'api_version': 1,
|
||||
'auth':
|
||||
'Use "ceph restful create-key <key>" to create a key pair, '
|
||||
'pass it as HTTP Basic auth to authenticate',
|
||||
'doc': 'See /doc endpoint',
|
||||
'info': "Ceph Manager RESTful API server",
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
from pecan import expose, request
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import common, context
|
||||
from restful.decorators import auth
|
||||
|
||||
|
||||
class ConfigOsd(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show OSD configuration options
|
||||
"""
|
||||
flags = context.instance.get("osd_map")['flags']
|
||||
|
||||
# pause is a valid osd config command that sets pauserd,pausewr
|
||||
flags = flags.replace('pauserd,pausewr', 'pause')
|
||||
|
||||
return flags.split(',')
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def patch(self, **kwargs):
|
||||
"""
|
||||
Modify OSD configuration options
|
||||
"""
|
||||
args = request.json
|
||||
|
||||
commands = []
|
||||
|
||||
valid_flags = set(args.keys()) & set(common.OSD_FLAGS)
|
||||
invalid_flags = list(set(args.keys()) - valid_flags)
|
||||
if invalid_flags:
|
||||
context.instance.log.warning("%s not valid to set/unset", invalid_flags)
|
||||
|
||||
for flag in list(valid_flags):
|
||||
if args[flag]:
|
||||
mode = 'set'
|
||||
else:
|
||||
mode = 'unset'
|
||||
|
||||
commands.append({
|
||||
'prefix': 'osd ' + mode,
|
||||
'key': flag,
|
||||
})
|
||||
|
||||
return context.instance.submit_request([commands], **kwargs)
|
||||
|
||||
|
||||
|
||||
class ConfigClusterKey(RestController):
|
||||
def __init__(self, key):
|
||||
self.key = key
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show specific configuration option
|
||||
"""
|
||||
return context.instance.get("config").get(self.key, None)
|
||||
|
||||
|
||||
|
||||
class ConfigCluster(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show all cluster configuration options
|
||||
"""
|
||||
return context.instance.get("config")
|
||||
|
||||
|
||||
@expose()
|
||||
def _lookup(self, key, *remainder):
|
||||
return ConfigClusterKey(key), remainder
|
||||
|
||||
|
||||
|
||||
class Config(RestController):
|
||||
cluster = ConfigCluster()
|
||||
osd = ConfigOsd()
|
|
@ -1,25 +0,0 @@
|
|||
from pecan import expose
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import common, context
|
||||
|
||||
from restful.decorators import auth
|
||||
|
||||
|
||||
class CrushRule(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show crush rules
|
||||
"""
|
||||
crush = context.instance.get('osd_map_crush')
|
||||
rules = crush['rules']
|
||||
|
||||
for rule in rules:
|
||||
rule['osd_count'] = len(common.crush_rule_osds(crush['buckets'], rule))
|
||||
|
||||
return rules
|
||||
|
||||
class Crush(RestController):
|
||||
rule = CrushRule()
|
|
@ -1,15 +0,0 @@
|
|||
from pecan import expose
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import context
|
||||
|
||||
import restful
|
||||
|
||||
|
||||
class Doc(RestController):
|
||||
@expose(template='json')
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show documentation information
|
||||
"""
|
||||
return context.instance.get_doc_api(restful.api.Root)
|
|
@ -1,40 +0,0 @@
|
|||
from pecan import expose, response
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import context
|
||||
from restful.decorators import auth
|
||||
|
||||
|
||||
class MonName(RestController):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for the monitor name
|
||||
"""
|
||||
mon = [x for x in context.instance.get_mons()
|
||||
if x['name'] == self.name]
|
||||
if len(mon) != 1:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the monitor node "{}"'.format(self.name)}
|
||||
return mon[0]
|
||||
|
||||
|
||||
|
||||
class Mon(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for all the monitors
|
||||
"""
|
||||
return context.instance.get_mons()
|
||||
|
||||
|
||||
@expose()
|
||||
def _lookup(self, name, *remainder):
|
||||
return MonName(name), remainder
|
|
@ -1,135 +0,0 @@
|
|||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import common, context
|
||||
from restful.decorators import auth
|
||||
|
||||
|
||||
class OsdIdCommand(RestController):
|
||||
def __init__(self, osd_id):
|
||||
self.osd_id = osd_id
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show implemented commands for the OSD id
|
||||
"""
|
||||
osd = context.instance.get_osd_by_id(self.osd_id)
|
||||
|
||||
if not osd:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the OSD id "{}"'.format(self.osd_id)}
|
||||
|
||||
if osd['up']:
|
||||
return common.OSD_IMPLEMENTED_COMMANDS
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def post(self, **kwargs):
|
||||
"""
|
||||
Run the implemented command for the OSD id
|
||||
"""
|
||||
command = request.json.get('command', None)
|
||||
|
||||
osd = context.instance.get_osd_by_id(self.osd_id)
|
||||
|
||||
if not osd:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the OSD id "{}"'.format(self.osd_id)}
|
||||
|
||||
if not osd['up'] or command not in common.OSD_IMPLEMENTED_COMMANDS:
|
||||
response.status = 500
|
||||
return {'message': 'Command "{}" not available'.format(command)}
|
||||
|
||||
return context.instance.submit_request([[{
|
||||
'prefix': 'osd ' + command,
|
||||
'who': str(self.osd_id)
|
||||
}]], **kwargs)
|
||||
|
||||
|
||||
|
||||
class OsdId(RestController):
|
||||
def __init__(self, osd_id):
|
||||
self.osd_id = osd_id
|
||||
self.command = OsdIdCommand(osd_id)
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for the OSD id
|
||||
"""
|
||||
osd = context.instance.get_osds(ids=[str(self.osd_id)])
|
||||
if len(osd) != 1:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the OSD id "{}"'.format(self.osd_id)}
|
||||
|
||||
return osd[0]
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def patch(self, **kwargs):
|
||||
"""
|
||||
Modify the state (up, in) of the OSD id or reweight it
|
||||
"""
|
||||
args = request.json
|
||||
|
||||
commands = []
|
||||
|
||||
if 'in' in args:
|
||||
if args['in']:
|
||||
commands.append({
|
||||
'prefix': 'osd in',
|
||||
'ids': [str(self.osd_id)]
|
||||
})
|
||||
else:
|
||||
commands.append({
|
||||
'prefix': 'osd out',
|
||||
'ids': [str(self.osd_id)]
|
||||
})
|
||||
|
||||
if 'up' in args:
|
||||
if args['up']:
|
||||
response.status = 500
|
||||
return {'message': "It is not valid to set a down OSD to be up"}
|
||||
else:
|
||||
commands.append({
|
||||
'prefix': 'osd down',
|
||||
'ids': [str(self.osd_id)]
|
||||
})
|
||||
|
||||
if 'reweight' in args:
|
||||
commands.append({
|
||||
'prefix': 'osd reweight',
|
||||
'id': self.osd_id,
|
||||
'weight': args['reweight']
|
||||
})
|
||||
|
||||
return context.instance.submit_request([commands], **kwargs)
|
||||
|
||||
|
||||
|
||||
class Osd(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for all the OSDs
|
||||
"""
|
||||
# Parse request args
|
||||
# TODO Filter by ids
|
||||
pool_id = kwargs.get('pool', None)
|
||||
|
||||
return context.instance.get_osds(pool_id)
|
||||
|
||||
|
||||
@expose()
|
||||
def _lookup(self, osd_id, *remainder):
|
||||
return OsdId(int(osd_id)), remainder
|
|
@ -1,27 +0,0 @@
|
|||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import context
|
||||
from restful.decorators import auth, lock, paginate
|
||||
|
||||
import re
|
||||
|
||||
class Perf(RestController):
|
||||
@expose(template='json')
|
||||
@paginate
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
List all the available performance counters
|
||||
|
||||
Options:
|
||||
- 'daemon' -- filter by daemon, accepts Python regexp
|
||||
"""
|
||||
|
||||
counters = context.instance.get_unlabeled_perf_counters()
|
||||
|
||||
if 'daemon' in kwargs:
|
||||
_re = re.compile(kwargs['daemon'])
|
||||
counters = {k: v for k, v in counters.items() if _re.match(k)}
|
||||
|
||||
return counters
|
|
@ -1,140 +0,0 @@
|
|||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import common, context
|
||||
from restful.decorators import auth
|
||||
|
||||
|
||||
class PoolId(RestController):
|
||||
def __init__(self, pool_id):
|
||||
self.pool_id = pool_id
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for the pool id
|
||||
"""
|
||||
pool = context.instance.get_pool_by_id(self.pool_id)
|
||||
|
||||
if not pool:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the pool id "{}"'.format(self.pool_id)}
|
||||
|
||||
# pgp_num is called pg_placement_num, deal with that
|
||||
if 'pg_placement_num' in pool:
|
||||
pool['pgp_num'] = pool.pop('pg_placement_num')
|
||||
return pool
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def patch(self, **kwargs):
|
||||
"""
|
||||
Modify the information for the pool id
|
||||
"""
|
||||
try:
|
||||
args = request.json
|
||||
except ValueError:
|
||||
response.status = 400
|
||||
return {'message': 'Bad request: malformed JSON or wrong Content-Type'}
|
||||
|
||||
# Get the pool info for its name
|
||||
pool = context.instance.get_pool_by_id(self.pool_id)
|
||||
if not pool:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the pool id "{}"'.format(self.pool_id)}
|
||||
|
||||
# Check for invalid pool args
|
||||
invalid = common.invalid_pool_args(args)
|
||||
if invalid:
|
||||
response.status = 500
|
||||
return {'message': 'Invalid arguments found: "{}"'.format(invalid)}
|
||||
|
||||
# Schedule the update request
|
||||
return context.instance.submit_request(common.pool_update_commands(pool['pool_name'], args), **kwargs)
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def delete(self, **kwargs):
|
||||
"""
|
||||
Remove the pool data for the pool id
|
||||
"""
|
||||
pool = context.instance.get_pool_by_id(self.pool_id)
|
||||
|
||||
if not pool:
|
||||
response.status = 500
|
||||
return {'message': 'Failed to identify the pool id "{}"'.format(self.pool_id)}
|
||||
|
||||
return context.instance.submit_request([[{
|
||||
'prefix': 'osd pool delete',
|
||||
'pool': pool['pool_name'],
|
||||
'pool2': pool['pool_name'],
|
||||
'yes_i_really_really_mean_it': True
|
||||
}]], **kwargs)
|
||||
|
||||
|
||||
|
||||
class Pool(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for all the pools
|
||||
"""
|
||||
pools = context.instance.get('osd_map')['pools']
|
||||
|
||||
# pgp_num is called pg_placement_num, deal with that
|
||||
for pool in pools:
|
||||
if 'pg_placement_num' in pool:
|
||||
pool['pgp_num'] = pool.pop('pg_placement_num')
|
||||
|
||||
return pools
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def post(self, **kwargs):
|
||||
"""
|
||||
Create a new pool
|
||||
Requires name and pg_num dict arguments
|
||||
"""
|
||||
args = request.json
|
||||
|
||||
# Check for the required arguments
|
||||
pool_name = args.pop('name', None)
|
||||
if pool_name is None:
|
||||
response.status = 500
|
||||
return {'message': 'You need to specify the pool "name" argument'}
|
||||
|
||||
pg_num = args.pop('pg_num', None)
|
||||
if pg_num is None:
|
||||
response.status = 500
|
||||
return {'message': 'You need to specify the "pg_num" argument'}
|
||||
|
||||
# Run the pool create command first
|
||||
create_command = {
|
||||
'prefix': 'osd pool create',
|
||||
'pool': pool_name,
|
||||
'pg_num': pg_num
|
||||
}
|
||||
|
||||
# Check for invalid pool args
|
||||
invalid = common.invalid_pool_args(args)
|
||||
if invalid:
|
||||
response.status = 500
|
||||
return {'message': 'Invalid arguments found: "{}"'.format(invalid)}
|
||||
|
||||
# Schedule the creation and update requests
|
||||
return context.instance.submit_request(
|
||||
[[create_command]] +
|
||||
common.pool_update_commands(pool_name, args),
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
@expose()
|
||||
def _lookup(self, pool_id, *remainder):
|
||||
return PoolId(int(pool_id)), remainder
|
|
@ -1,93 +0,0 @@
|
|||
from pecan import expose, request, response
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import context
|
||||
from restful.decorators import auth, lock, paginate
|
||||
|
||||
|
||||
class RequestId(RestController):
|
||||
def __init__(self, request_id):
|
||||
self.request_id = request_id
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for the request id
|
||||
"""
|
||||
request = [x for x in context.instance.requests
|
||||
if x.id == self.request_id]
|
||||
if len(request) != 1:
|
||||
response.status = 500
|
||||
return {'message': 'Unknown request id "{}"'.format(self.request_id)}
|
||||
return request[0]
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
@lock
|
||||
def delete(self, **kwargs):
|
||||
"""
|
||||
Remove the request id from the database
|
||||
"""
|
||||
for index in range(len(context.instance.requests)):
|
||||
if context.instance.requests[index].id == self.request_id:
|
||||
return context.instance.requests.pop(index)
|
||||
|
||||
# Failed to find the job to cancel
|
||||
response.status = 500
|
||||
return {'message': 'No such request id'}
|
||||
|
||||
|
||||
|
||||
class Request(RestController):
|
||||
@expose(template='json')
|
||||
@paginate
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
List all the available requests
|
||||
"""
|
||||
return context.instance.requests
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
@lock
|
||||
def delete(self, **kwargs):
|
||||
"""
|
||||
Remove all the finished requests
|
||||
"""
|
||||
num_requests = len(context.instance.requests)
|
||||
|
||||
context.instance.requests = [x for x in context.instance.requests
|
||||
if not x.is_finished()]
|
||||
remaining = len(context.instance.requests)
|
||||
# Return the job statistics
|
||||
return {
|
||||
'cleaned': num_requests - remaining,
|
||||
'remaining': remaining,
|
||||
}
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def post(self, **kwargs):
|
||||
"""
|
||||
Pass through method to create any request
|
||||
"""
|
||||
if isinstance(request.json, list):
|
||||
if all(isinstance(element, list) for element in request.json):
|
||||
return context.instance.submit_request(request.json, **kwargs)
|
||||
|
||||
# The request.json has wrong format
|
||||
response.status = 500
|
||||
return {'message': 'The request format should be [[{c1},{c2}]]'}
|
||||
|
||||
return context.instance.submit_request([[request.json]], **kwargs)
|
||||
|
||||
|
||||
@expose()
|
||||
def _lookup(self, request_id, *remainder):
|
||||
return RequestId(request_id), remainder
|
|
@ -1,35 +0,0 @@
|
|||
from pecan import expose
|
||||
from pecan.rest import RestController
|
||||
|
||||
from restful import context
|
||||
from restful.decorators import auth
|
||||
|
||||
|
||||
class ServerFqdn(RestController):
|
||||
def __init__(self, fqdn):
|
||||
self.fqdn = fqdn
|
||||
|
||||
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for the server fqdn
|
||||
"""
|
||||
return context.instance.get_server(self.fqdn)
|
||||
|
||||
|
||||
|
||||
class Server(RestController):
|
||||
@expose(template='json')
|
||||
@auth
|
||||
def get(self, **kwargs):
|
||||
"""
|
||||
Show the information for all the servers
|
||||
"""
|
||||
return context.instance.list_servers()
|
||||
|
||||
|
||||
@expose()
|
||||
def _lookup(self, fqdn, *remainder):
|
||||
return ServerFqdn(fqdn), remainder
|
|
@ -1,156 +0,0 @@
|
|||
# List of valid osd flags
|
||||
OSD_FLAGS = [
|
||||
'pause', 'noup', 'nodown', 'noout', 'noin', 'nobackfill',
|
||||
'norecover', 'noscrub', 'nodeep-scrub',
|
||||
]
|
||||
|
||||
# Implemented osd commands
|
||||
OSD_IMPLEMENTED_COMMANDS = [
|
||||
'scrub', 'deep-scrub', 'repair'
|
||||
]
|
||||
|
||||
# Valid values for the 'var' argument to 'ceph osd pool set'
|
||||
POOL_PROPERTIES_1 = [
|
||||
'size', 'min_size', 'pg_num',
|
||||
'crush_rule', 'hashpspool',
|
||||
]
|
||||
|
||||
POOL_PROPERTIES_2 = [
|
||||
'pgp_num'
|
||||
]
|
||||
|
||||
POOL_PROPERTIES = POOL_PROPERTIES_1 + POOL_PROPERTIES_2
|
||||
|
||||
# Valid values for the 'ceph osd pool set-quota' command
|
||||
POOL_QUOTA_PROPERTIES = [
|
||||
('quota_max_bytes', 'max_bytes'),
|
||||
('quota_max_objects', 'max_objects'),
|
||||
]
|
||||
|
||||
POOL_ARGS = POOL_PROPERTIES + [x for x,_ in POOL_QUOTA_PROPERTIES]
|
||||
|
||||
|
||||
# Transform command to a human readable form
|
||||
def humanify_command(command):
|
||||
out = [command['prefix']]
|
||||
|
||||
for arg, val in command.items():
|
||||
if arg != 'prefix':
|
||||
out.append("%s=%s" % (str(arg), str(val)))
|
||||
|
||||
return " ".join(out)
|
||||
|
||||
|
||||
def invalid_pool_args(args):
|
||||
invalid = []
|
||||
for arg in args:
|
||||
if arg not in POOL_ARGS:
|
||||
invalid.append(arg)
|
||||
|
||||
return invalid
|
||||
|
||||
|
||||
def pool_update_commands(pool_name, args):
|
||||
commands = [[], []]
|
||||
|
||||
# We should increase pgp_num when we are re-setting pg_num
|
||||
if 'pg_num' in args and 'pgp_num' not in args:
|
||||
args['pgp_num'] = args['pg_num']
|
||||
|
||||
# Run the first pool set and quota properties in parallel
|
||||
for var in POOL_PROPERTIES_1:
|
||||
if var in args:
|
||||
commands[0].append({
|
||||
'prefix': 'osd pool set',
|
||||
'pool': pool_name,
|
||||
'var': var,
|
||||
'val': args[var],
|
||||
})
|
||||
|
||||
for (var, field) in POOL_QUOTA_PROPERTIES:
|
||||
if var in args:
|
||||
commands[0].append({
|
||||
'prefix': 'osd pool set-quota',
|
||||
'pool': pool_name,
|
||||
'field': field,
|
||||
'val': str(args[var]),
|
||||
})
|
||||
|
||||
# The second pool set properties need to be run after the first wave
|
||||
for var in POOL_PROPERTIES_2:
|
||||
if var in args:
|
||||
commands[1].append({
|
||||
'prefix': 'osd pool set',
|
||||
'pool': pool_name,
|
||||
'var': var,
|
||||
'val': args[var],
|
||||
})
|
||||
|
||||
return commands
|
||||
|
||||
def crush_rule_osds(node_buckets, rule):
|
||||
nodes_by_id = dict((b['id'], b) for b in node_buckets)
|
||||
|
||||
def _gather_leaf_ids(node_id):
|
||||
if node_id >= 0:
|
||||
return set([node_id])
|
||||
|
||||
result = set()
|
||||
for item in nodes_by_id[node_id]['items']:
|
||||
result |= _gather_leaf_ids(item['id'])
|
||||
|
||||
return result
|
||||
|
||||
def _gather_descendent_ids(node, typ):
|
||||
result = set()
|
||||
for item in node['items']:
|
||||
if item['id'] >= 0:
|
||||
if typ == "osd":
|
||||
result.add(item['id'])
|
||||
else:
|
||||
child_node = nodes_by_id[item['id']]
|
||||
if child_node['type_name'] == typ:
|
||||
result.add(child_node['id'])
|
||||
elif 'items' in child_node:
|
||||
result |= _gather_descendent_ids(child_node, typ)
|
||||
|
||||
return result
|
||||
|
||||
def _gather_osds(root, steps):
|
||||
if root['id'] >= 0:
|
||||
return set([root['id']])
|
||||
|
||||
osds = set()
|
||||
step = steps[0]
|
||||
if step['op'] == 'choose_firstn':
|
||||
# Choose all descendents of the current node of type 'type'
|
||||
descendent_ids = _gather_descendent_ids(root, step['type'])
|
||||
for node_id in descendent_ids:
|
||||
if node_id >= 0:
|
||||
osds.add(node_id)
|
||||
else:
|
||||
osds |= _gather_osds(nodes_by_id[node_id], steps[1:])
|
||||
elif step['op'] == 'chooseleaf_firstn':
|
||||
# Choose all descendents of the current node of type 'type',
|
||||
# and select all leaves beneath those
|
||||
descendent_ids = _gather_descendent_ids(root, step['type'])
|
||||
for node_id in descendent_ids:
|
||||
if node_id >= 0:
|
||||
osds.add(node_id)
|
||||
else:
|
||||
for desc_node in nodes_by_id[node_id]['items']:
|
||||
# Short circuit another iteration to find the emit
|
||||
# and assume anything we've done a chooseleaf on
|
||||
# is going to be part of the selected set of osds
|
||||
osds |= _gather_leaf_ids(desc_node['id'])
|
||||
elif step['op'] == 'emit':
|
||||
if root['id'] >= 0:
|
||||
osds |= root['id']
|
||||
|
||||
return osds
|
||||
|
||||
osds = set()
|
||||
for i, step in enumerate(rule['steps']):
|
||||
if step['op'] == 'take':
|
||||
osds |= _gather_osds(nodes_by_id[step['item']], rule['steps'][i + 1:])
|
||||
return osds
|
|
@ -1,2 +0,0 @@
|
|||
# Global instance to share
|
||||
instance = None
|
|
@ -1,81 +0,0 @@
|
|||
|
||||
from pecan import request, response
|
||||
from base64 import b64decode
|
||||
from functools import wraps
|
||||
|
||||
import traceback
|
||||
|
||||
from . import context
|
||||
|
||||
|
||||
# Handle authorization
|
||||
def auth(f):
|
||||
@wraps(f)
|
||||
def decorated(*args, **kwargs):
|
||||
if not context.instance.enable_auth:
|
||||
return f(*args, **kwargs)
|
||||
|
||||
if not request.authorization:
|
||||
response.status = 401
|
||||
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
|
||||
return {'message': 'auth: No HTTP username/password'}
|
||||
|
||||
username, password = b64decode(request.authorization[1]).decode('utf-8').split(':')
|
||||
|
||||
# Check that the username exists
|
||||
if username not in context.instance.keys:
|
||||
response.status = 401
|
||||
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
|
||||
return {'message': 'auth: No such user'}
|
||||
|
||||
# Check the password
|
||||
if context.instance.keys[username] != password:
|
||||
response.status = 401
|
||||
response.headers['WWW-Authenticate'] = 'Basic realm="Login Required"'
|
||||
return {'message': 'auth: Incorrect password'}
|
||||
|
||||
return f(*args, **kwargs)
|
||||
return decorated
|
||||
|
||||
|
||||
# Helper function to lock the function
|
||||
def lock(f):
|
||||
@wraps(f)
|
||||
def decorated(*args, **kwargs):
|
||||
with context.instance.requests_lock:
|
||||
return f(*args, **kwargs)
|
||||
return decorated
|
||||
|
||||
|
||||
# Support ?page=N argument
|
||||
def paginate(f):
|
||||
@wraps(f)
|
||||
def decorated(*args, **kwargs):
|
||||
_out = f(*args, **kwargs)
|
||||
|
||||
# Do not modify anything without a specific request
|
||||
if not 'page' in kwargs:
|
||||
return _out
|
||||
|
||||
# A pass-through for errors, etc
|
||||
if not isinstance(_out, list):
|
||||
return _out
|
||||
|
||||
# Parse the page argument
|
||||
_page = kwargs['page']
|
||||
try:
|
||||
_page = int(_page)
|
||||
except ValueError:
|
||||
response.status = 500
|
||||
return {'message': 'The requested page is not an integer'}
|
||||
|
||||
# Raise _page so that 0 is the first page and -1 is the last
|
||||
_page += 1
|
||||
|
||||
if _page > 0:
|
||||
_page *= 100
|
||||
else:
|
||||
_page = len(_out) - (_page*100)
|
||||
|
||||
return _out[_page - 100: _page]
|
||||
return decorated
|
|
@ -1,10 +0,0 @@
|
|||
|
||||
from pecan.hooks import PecanHook
|
||||
|
||||
import traceback
|
||||
|
||||
from . import context
|
||||
|
||||
class ErrorHook(PecanHook):
|
||||
def on_error(self, stat, exc):
|
||||
context.instance.log.error(str(traceback.format_exc()))
|
|
@ -1,633 +0,0 @@
|
|||
"""
|
||||
A RESTful API for Ceph
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import errno
|
||||
import inspect
|
||||
import tempfile
|
||||
import threading
|
||||
import traceback
|
||||
import socket
|
||||
import fcntl
|
||||
from typing import cast
|
||||
|
||||
from . import common
|
||||
from . import context
|
||||
|
||||
from uuid import uuid4
|
||||
from pecan import jsonify, make_app
|
||||
from OpenSSL import crypto
|
||||
from pecan.rest import RestController
|
||||
from werkzeug.serving import make_server, make_ssl_devcert
|
||||
|
||||
from .hooks import ErrorHook
|
||||
from mgr_module import MgrModule, CommandResult, NotifyType, Option
|
||||
from mgr_util import build_url
|
||||
|
||||
|
||||
class CannotServe(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CommandsRequest(object):
|
||||
"""
|
||||
This class handles parallel as well as sequential execution of
|
||||
commands. The class accept a list of iterables that should be
|
||||
executed sequentially. Each iterable can contain several commands
|
||||
that can be executed in parallel.
|
||||
|
||||
Example:
|
||||
[[c1,c2],[c3,c4]]
|
||||
- run c1 and c2 in parallel
|
||||
- wait for them to finish
|
||||
- run c3 and c4 in parallel
|
||||
- wait for them to finish
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, commands_arrays):
|
||||
self.id = str(id(self))
|
||||
|
||||
# Filter out empty sub-requests
|
||||
commands_arrays = [x for x in commands_arrays
|
||||
if len(x) != 0]
|
||||
|
||||
self.running = []
|
||||
self.waiting = commands_arrays[1:]
|
||||
self.finished = []
|
||||
self.failed = []
|
||||
|
||||
self.lock = threading.RLock()
|
||||
if not len(commands_arrays):
|
||||
# Nothing to run
|
||||
return
|
||||
|
||||
# Process first iteration of commands_arrays in parallel
|
||||
results = self.run(commands_arrays[0])
|
||||
|
||||
self.running.extend(results)
|
||||
|
||||
|
||||
def run(self, commands):
|
||||
"""
|
||||
A static method that will execute the given list of commands in
|
||||
parallel and will return the list of command results.
|
||||
"""
|
||||
|
||||
# Gather the results (in parallel)
|
||||
results = []
|
||||
for index, command in enumerate(commands):
|
||||
tag = '%s:%s:%d' % (__name__, self.id, index)
|
||||
|
||||
# Store the result
|
||||
result = CommandResult(tag)
|
||||
result.command = common.humanify_command(command)
|
||||
results.append(result)
|
||||
|
||||
# Run the command
|
||||
context.instance.send_command(result, 'mon', '', json.dumps(command), tag)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def next(self):
|
||||
with self.lock:
|
||||
if not self.waiting:
|
||||
# Nothing to run
|
||||
return
|
||||
|
||||
# Run a next iteration of commands
|
||||
commands = self.waiting[0]
|
||||
self.waiting = self.waiting[1:]
|
||||
|
||||
self.running.extend(self.run(commands))
|
||||
|
||||
|
||||
def finish(self, tag):
|
||||
with self.lock:
|
||||
for index in range(len(self.running)):
|
||||
if self.running[index].tag == tag:
|
||||
if self.running[index].r == 0:
|
||||
self.finished.append(self.running.pop(index))
|
||||
else:
|
||||
self.failed.append(self.running.pop(index))
|
||||
return True
|
||||
|
||||
# No such tag found
|
||||
return False
|
||||
|
||||
|
||||
def is_running(self, tag):
|
||||
for result in self.running:
|
||||
if result.tag == tag:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_ready(self):
|
||||
with self.lock:
|
||||
return not self.running and self.waiting
|
||||
|
||||
|
||||
def is_waiting(self):
|
||||
return bool(self.waiting)
|
||||
|
||||
|
||||
def is_finished(self):
|
||||
with self.lock:
|
||||
return not self.running and not self.waiting
|
||||
|
||||
|
||||
def has_failed(self):
|
||||
return bool(self.failed)
|
||||
|
||||
|
||||
def get_state(self):
|
||||
with self.lock:
|
||||
if not self.is_finished():
|
||||
return "pending"
|
||||
|
||||
if self.has_failed():
|
||||
return "failed"
|
||||
|
||||
return "success"
|
||||
|
||||
|
||||
def __json__(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'running': [
|
||||
{
|
||||
'command': x.command,
|
||||
'outs': x.outs,
|
||||
'outb': x.outb,
|
||||
} for x in self.running
|
||||
],
|
||||
'finished': [
|
||||
{
|
||||
'command': x.command,
|
||||
'outs': x.outs,
|
||||
'outb': x.outb,
|
||||
} for x in self.finished
|
||||
],
|
||||
'waiting': [
|
||||
[common.humanify_command(y) for y in x]
|
||||
for x in self.waiting
|
||||
],
|
||||
'failed': [
|
||||
{
|
||||
'command': x.command,
|
||||
'outs': x.outs,
|
||||
'outb': x.outb,
|
||||
} for x in self.failed
|
||||
],
|
||||
'is_waiting': self.is_waiting(),
|
||||
'is_finished': self.is_finished(),
|
||||
'has_failed': self.has_failed(),
|
||||
'state': self.get_state(),
|
||||
}
|
||||
|
||||
|
||||
|
||||
class Module(MgrModule):
|
||||
MODULE_OPTIONS = [
|
||||
Option(name='server_addr'),
|
||||
Option(name='server_port'),
|
||||
Option(name='key_file'),
|
||||
Option(name='enable_auth',
|
||||
type='bool',
|
||||
default=True),
|
||||
Option(name='max_requests',
|
||||
type='int',
|
||||
default=500,
|
||||
desc='Maximum number of requests to keep in memory. '
|
||||
' When new request comes in, the oldest request will be removed if the number of requests exceeds the max request number.'
|
||||
'if un-finished request is removed, error message will be logged in the ceph-mgr log.'),
|
||||
]
|
||||
|
||||
COMMANDS = [
|
||||
{
|
||||
"cmd": "restful create-key name=key_name,type=CephString",
|
||||
"desc": "Create an API key with this name",
|
||||
"perm": "rw"
|
||||
},
|
||||
{
|
||||
"cmd": "restful delete-key name=key_name,type=CephString",
|
||||
"desc": "Delete an API key with this name",
|
||||
"perm": "rw"
|
||||
},
|
||||
{
|
||||
"cmd": "restful list-keys",
|
||||
"desc": "List all API keys",
|
||||
"perm": "r"
|
||||
},
|
||||
{
|
||||
"cmd": "restful create-self-signed-cert",
|
||||
"desc": "Create localized self signed certificate",
|
||||
"perm": "rw"
|
||||
},
|
||||
{
|
||||
"cmd": "restful restart",
|
||||
"desc": "Restart API server",
|
||||
"perm": "rw"
|
||||
},
|
||||
]
|
||||
|
||||
NOTIFY_TYPES = [NotifyType.command]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Module, self).__init__(*args, **kwargs)
|
||||
context.instance = self
|
||||
|
||||
self.requests = []
|
||||
self.requests_lock = threading.RLock()
|
||||
|
||||
self.keys = {}
|
||||
self.enable_auth = True
|
||||
|
||||
self.server = None
|
||||
|
||||
self.stop_server = False
|
||||
self.serve_event = threading.Event()
|
||||
self.max_requests = cast(int, self.get_localized_module_option('max_requests', 500))
|
||||
|
||||
|
||||
def serve(self):
|
||||
self.log.debug('serve enter')
|
||||
while not self.stop_server:
|
||||
try:
|
||||
self._serve()
|
||||
self.server.socket.close()
|
||||
except CannotServe as cs:
|
||||
self.log.warning("server not running: %s", cs)
|
||||
except:
|
||||
self.log.error(str(traceback.format_exc()))
|
||||
|
||||
# Wait and clear the threading event
|
||||
self.serve_event.wait()
|
||||
self.serve_event.clear()
|
||||
self.log.debug('serve exit')
|
||||
|
||||
def refresh_keys(self):
|
||||
self.keys = {}
|
||||
rawkeys = self.get_store_prefix('keys/') or {}
|
||||
for k, v in rawkeys.items():
|
||||
self.keys[k[5:]] = v # strip of keys/ prefix
|
||||
|
||||
def _serve(self):
|
||||
# Load stored authentication keys
|
||||
self.refresh_keys()
|
||||
|
||||
jsonify._instance = jsonify.GenericJSON(
|
||||
sort_keys=True,
|
||||
indent=4,
|
||||
separators=(',', ': '),
|
||||
)
|
||||
|
||||
server_addr = self.get_localized_module_option('server_addr', '::')
|
||||
if server_addr is None:
|
||||
raise CannotServe('no server_addr configured; try "ceph config-key set mgr/restful/server_addr <ip>"')
|
||||
|
||||
server_port = int(self.get_localized_module_option('server_port', '8003'))
|
||||
self.log.info('server_addr: %s server_port: %d',
|
||||
server_addr, server_port)
|
||||
|
||||
cert = self.get_localized_store("crt")
|
||||
if cert is not None:
|
||||
cert_tmp = tempfile.NamedTemporaryFile()
|
||||
cert_tmp.write(cert.encode('utf-8'))
|
||||
cert_tmp.flush()
|
||||
cert_fname = cert_tmp.name
|
||||
else:
|
||||
cert_fname = self.get_localized_store('crt_file')
|
||||
|
||||
pkey = self.get_localized_store("key")
|
||||
if pkey is not None:
|
||||
pkey_tmp = tempfile.NamedTemporaryFile()
|
||||
pkey_tmp.write(pkey.encode('utf-8'))
|
||||
pkey_tmp.flush()
|
||||
pkey_fname = pkey_tmp.name
|
||||
else:
|
||||
pkey_fname = self.get_localized_module_option('key_file')
|
||||
|
||||
self.enable_auth = self.get_localized_module_option('enable_auth', True)
|
||||
|
||||
if not cert_fname or not pkey_fname:
|
||||
raise CannotServe('no certificate configured')
|
||||
if not os.path.isfile(cert_fname):
|
||||
raise CannotServe('certificate %s does not exist' % cert_fname)
|
||||
if not os.path.isfile(pkey_fname):
|
||||
raise CannotServe('private key %s does not exist' % pkey_fname)
|
||||
|
||||
# Publish the URI that others may use to access the service we're
|
||||
# about to start serving
|
||||
addr = self.get_mgr_ip() if server_addr == "::" else server_addr
|
||||
self.set_uri(build_url(scheme='https', host=addr, port=server_port, path='/'))
|
||||
|
||||
# Create the HTTPS werkzeug server serving pecan app
|
||||
self.server = make_server(
|
||||
host=server_addr,
|
||||
port=server_port,
|
||||
app=make_app(
|
||||
root='restful.api.Root',
|
||||
hooks = [ErrorHook()], # use a callable if pecan >= 0.3.2
|
||||
),
|
||||
ssl_context=(cert_fname, pkey_fname),
|
||||
)
|
||||
sock_fd_flag = fcntl.fcntl(self.server.socket.fileno(), fcntl.F_GETFD)
|
||||
if not (sock_fd_flag & fcntl.FD_CLOEXEC):
|
||||
self.log.debug("set server socket close-on-exec")
|
||||
fcntl.fcntl(self.server.socket.fileno(), fcntl.F_SETFD, sock_fd_flag | fcntl.FD_CLOEXEC)
|
||||
if self.stop_server:
|
||||
self.log.debug('made server, but stop flag set')
|
||||
else:
|
||||
self.log.debug('made server, serving forever')
|
||||
self.server.serve_forever()
|
||||
|
||||
|
||||
def shutdown(self):
|
||||
self.log.debug('shutdown enter')
|
||||
try:
|
||||
self.stop_server = True
|
||||
if self.server:
|
||||
self.log.debug('calling server.shutdown')
|
||||
self.server.shutdown()
|
||||
self.log.debug('called server.shutdown')
|
||||
self.serve_event.set()
|
||||
except:
|
||||
self.log.error(str(traceback.format_exc()))
|
||||
raise
|
||||
self.log.debug('shutdown exit')
|
||||
|
||||
|
||||
def restart(self):
|
||||
try:
|
||||
if self.server:
|
||||
self.server.shutdown()
|
||||
self.serve_event.set()
|
||||
except:
|
||||
self.log.error(str(traceback.format_exc()))
|
||||
|
||||
|
||||
def notify(self, notify_type: NotifyType, tag: str):
|
||||
try:
|
||||
self._notify(notify_type, tag)
|
||||
except:
|
||||
self.log.error(str(traceback.format_exc()))
|
||||
|
||||
|
||||
def _notify(self, notify_type: NotifyType, tag):
|
||||
if notify_type != NotifyType.command:
|
||||
self.log.debug("Unhandled notification type '%s'", notify_type)
|
||||
return
|
||||
# we can safely skip all the sequential commands
|
||||
if tag == 'seq':
|
||||
return
|
||||
try:
|
||||
with self.requests_lock:
|
||||
request = next(x for x in self.requests if x.is_running(tag))
|
||||
request.finish(tag)
|
||||
if request.is_ready():
|
||||
request.next()
|
||||
except StopIteration:
|
||||
# the command was not issued by me
|
||||
pass
|
||||
|
||||
def config_notify(self):
|
||||
self.enable_auth = self.get_localized_module_option('enable_auth', True)
|
||||
|
||||
|
||||
def create_self_signed_cert(self):
|
||||
# create a key pair
|
||||
pkey = crypto.PKey()
|
||||
pkey.generate_key(crypto.TYPE_RSA, 2048)
|
||||
|
||||
# create a self-signed cert
|
||||
cert = crypto.X509()
|
||||
cert.get_subject().O = "IT"
|
||||
cert.get_subject().CN = "ceph-restful"
|
||||
cert.set_serial_number(int(uuid4()))
|
||||
cert.gmtime_adj_notBefore(0)
|
||||
cert.gmtime_adj_notAfter(10*365*24*60*60)
|
||||
cert.set_issuer(cert.get_subject())
|
||||
cert.set_pubkey(pkey)
|
||||
cert.sign(pkey, 'sha512')
|
||||
|
||||
return (
|
||||
crypto.dump_certificate(crypto.FILETYPE_PEM, cert),
|
||||
crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)
|
||||
)
|
||||
|
||||
|
||||
def handle_command(self, inbuf, command):
|
||||
self.log.warning("Handling command: '%s'" % str(command))
|
||||
if command['prefix'] == "restful create-key":
|
||||
if command['key_name'] in self.keys:
|
||||
return 0, self.keys[command['key_name']], ""
|
||||
|
||||
else:
|
||||
key = str(uuid4())
|
||||
self.keys[command['key_name']] = key
|
||||
self.set_store('keys/' + command['key_name'], key)
|
||||
|
||||
return (
|
||||
0,
|
||||
self.keys[command['key_name']],
|
||||
"",
|
||||
)
|
||||
|
||||
elif command['prefix'] == "restful delete-key":
|
||||
if command['key_name'] in self.keys:
|
||||
del self.keys[command['key_name']]
|
||||
self.set_store('keys/' + command['key_name'], None)
|
||||
|
||||
return (
|
||||
0,
|
||||
"",
|
||||
"",
|
||||
)
|
||||
|
||||
elif command['prefix'] == "restful list-keys":
|
||||
self.refresh_keys()
|
||||
return (
|
||||
0,
|
||||
json.dumps(self.keys, indent=4, sort_keys=True),
|
||||
"",
|
||||
)
|
||||
|
||||
elif command['prefix'] == "restful create-self-signed-cert":
|
||||
cert, pkey = self.create_self_signed_cert()
|
||||
self.set_store(self.get_mgr_id() + '/crt', cert.decode('utf-8'))
|
||||
self.set_store(self.get_mgr_id() + '/key', pkey.decode('utf-8'))
|
||||
|
||||
self.restart()
|
||||
return (
|
||||
0,
|
||||
"Restarting RESTful API server...",
|
||||
""
|
||||
)
|
||||
|
||||
elif command['prefix'] == 'restful restart':
|
||||
self.restart();
|
||||
return (
|
||||
0,
|
||||
"Restarting RESTful API server...",
|
||||
""
|
||||
)
|
||||
|
||||
else:
|
||||
return (
|
||||
-errno.EINVAL,
|
||||
"",
|
||||
"Command not found '{0}'".format(command['prefix'])
|
||||
)
|
||||
|
||||
|
||||
def get_doc_api(self, root, prefix=''):
|
||||
doc = {}
|
||||
for _obj in dir(root):
|
||||
obj = getattr(root, _obj)
|
||||
|
||||
if isinstance(obj, RestController):
|
||||
doc.update(self.get_doc_api(obj, prefix + '/' + _obj))
|
||||
|
||||
if getattr(root, '_lookup', None) and isinstance(root._lookup('0')[0], RestController):
|
||||
doc.update(self.get_doc_api(root._lookup('0')[0], prefix + '/<arg>'))
|
||||
|
||||
prefix = prefix or '/'
|
||||
|
||||
doc[prefix] = {}
|
||||
for method in 'get', 'post', 'patch', 'delete':
|
||||
if getattr(root, method, None):
|
||||
doc[prefix][method.upper()] = inspect.getdoc(getattr(root, method)).split('\n')
|
||||
|
||||
if len(doc[prefix]) == 0:
|
||||
del doc[prefix]
|
||||
|
||||
return doc
|
||||
|
||||
|
||||
def get_mons(self):
|
||||
mon_map_mons = self.get('mon_map')['mons']
|
||||
mon_status = json.loads(self.get('mon_status')['json'])
|
||||
|
||||
# Add more information
|
||||
for mon in mon_map_mons:
|
||||
mon['in_quorum'] = mon['rank'] in mon_status['quorum']
|
||||
mon['server'] = self.get_metadata("mon", mon['name'])['hostname']
|
||||
mon['leader'] = mon['rank'] == mon_status['quorum'][0]
|
||||
|
||||
return mon_map_mons
|
||||
|
||||
|
||||
def get_osd_pools(self):
|
||||
osds = dict(map(lambda x: (x['osd'], []), self.get('osd_map')['osds']))
|
||||
pools = dict(map(lambda x: (x['pool'], x), self.get('osd_map')['pools']))
|
||||
crush = self.get('osd_map_crush')
|
||||
crush_rules = crush['rules']
|
||||
|
||||
osds_by_pool = {}
|
||||
for pool_id, pool in pools.items():
|
||||
pool_osds = None
|
||||
for rule in [r for r in crush_rules if r['rule_id'] == pool['crush_rule']]:
|
||||
pool_osds = common.crush_rule_osds(crush['buckets'], rule)
|
||||
|
||||
osds_by_pool[pool_id] = pool_osds
|
||||
|
||||
for pool_id in pools.keys():
|
||||
for in_pool_id in osds_by_pool[pool_id]:
|
||||
osds[in_pool_id].append(pool_id)
|
||||
|
||||
return osds
|
||||
|
||||
|
||||
def get_osds(self, pool_id=None, ids=None):
|
||||
# Get data
|
||||
osd_map = self.get('osd_map')
|
||||
osd_metadata = self.get('osd_metadata')
|
||||
|
||||
# Update the data with the additional info from the osd map
|
||||
osds = osd_map['osds']
|
||||
|
||||
# Filter by osd ids
|
||||
if ids is not None:
|
||||
osds = [x for x in osds if str(x['osd']) in ids]
|
||||
|
||||
# Get list of pools per osd node
|
||||
pools_map = self.get_osd_pools()
|
||||
|
||||
# map osd IDs to reweight
|
||||
reweight_map = dict([
|
||||
(x.get('id'), x.get('reweight', None))
|
||||
for x in self.get('osd_map_tree')['nodes']
|
||||
])
|
||||
|
||||
# Build OSD data objects
|
||||
for osd in osds:
|
||||
osd['pools'] = pools_map[osd['osd']]
|
||||
osd['server'] = osd_metadata.get(str(osd['osd']), {}).get('hostname', None)
|
||||
|
||||
osd['reweight'] = reweight_map.get(osd['osd'], 0.0)
|
||||
|
||||
if osd['up']:
|
||||
osd['valid_commands'] = common.OSD_IMPLEMENTED_COMMANDS
|
||||
else:
|
||||
osd['valid_commands'] = []
|
||||
|
||||
# Filter by pool
|
||||
if pool_id:
|
||||
pool_id = int(pool_id)
|
||||
osds = [x for x in osds if pool_id in x['pools']]
|
||||
|
||||
return osds
|
||||
|
||||
|
||||
def get_osd_by_id(self, osd_id):
|
||||
osd = [x for x in self.get('osd_map')['osds']
|
||||
if x['osd'] == osd_id]
|
||||
|
||||
if len(osd) != 1:
|
||||
return None
|
||||
|
||||
return osd[0]
|
||||
|
||||
|
||||
def get_pool_by_id(self, pool_id):
|
||||
pool = [x for x in self.get('osd_map')['pools']
|
||||
if x['pool'] == pool_id]
|
||||
|
||||
if len(pool) != 1:
|
||||
return None
|
||||
|
||||
return pool[0]
|
||||
|
||||
|
||||
def submit_request(self, _request, **kwargs):
|
||||
with self.requests_lock:
|
||||
request = CommandsRequest(_request)
|
||||
self.requests.append(request)
|
||||
if len(self.requests) > self.max_requests:
|
||||
req_to_trim = 0
|
||||
for i, req in enumerate(self.requests):
|
||||
if req.is_finished():
|
||||
self.log.error("Trimmed one finished request due to exceeded maximum requests limit")
|
||||
req_to_trim = i
|
||||
break
|
||||
else:
|
||||
self.log.error("Trimmed the oldest unfinished request due to exceeded maximum requests limit")
|
||||
self.requests.pop(req_to_trim)
|
||||
if kwargs.get('wait', 0):
|
||||
while not request.is_finished():
|
||||
time.sleep(0.001)
|
||||
return request
|
||||
|
||||
|
||||
def run_command(self, command):
|
||||
# tag with 'seq' so that we can ignore these in notify function
|
||||
result = CommandResult('seq')
|
||||
|
||||
self.send_command(result, 'mon', '', json.dumps(command), 'seq')
|
||||
return result.wait()
|
|
@ -192,7 +192,6 @@ if [[ "$(get_cmake_variable WITH_MGR_DASHBOARD_FRONTEND)" != "ON" ]] ||
|
|||
debug echo "ceph-mgr dashboard not built - disabling."
|
||||
with_mgr_dashboard=false
|
||||
fi
|
||||
with_mgr_restful=false
|
||||
|
||||
kstore_path=
|
||||
declare -a block_devs
|
||||
|
@ -205,7 +204,6 @@ VSTART_SEC="client.vstart.sh"
|
|||
|
||||
MON_ADDR=""
|
||||
DASH_URLS=""
|
||||
RESTFUL_URLS=""
|
||||
|
||||
conf_fn="$CEPH_CONF_PATH/ceph.conf"
|
||||
keyring_fn="$CEPH_CONF_PATH/keyring"
|
||||
|
@ -558,9 +556,6 @@ case $1 in
|
|||
--without-dashboard)
|
||||
with_mgr_dashboard=false
|
||||
;;
|
||||
--with-restful)
|
||||
with_mgr_restful=true
|
||||
;;
|
||||
--seastore-device-size)
|
||||
seastore_size="$2"
|
||||
shift
|
||||
|
@ -782,9 +777,6 @@ prepare_conf() {
|
|||
if $with_mgr_dashboard; then
|
||||
mgr_modules+=" dashboard"
|
||||
fi
|
||||
if $with_mgr_restful; then
|
||||
mgr_modules+=" restful"
|
||||
fi
|
||||
|
||||
local msgr_conf=''
|
||||
if [ $msgr -eq 21 ]; then
|
||||
|
@ -1010,7 +1002,7 @@ EOF
|
|||
; see src/vstart.sh for more info
|
||||
public bind addr =
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
write_logrotate_conf() {
|
||||
|
@ -1254,22 +1246,6 @@ EOF
|
|||
fi
|
||||
}
|
||||
|
||||
create_mgr_restful_secret() {
|
||||
while ! ceph_adm -h | grep -c -q ^restful ; do
|
||||
debug echo 'waiting for mgr restful module to start'
|
||||
sleep 1
|
||||
done
|
||||
local secret_file
|
||||
if ceph_adm restful create-self-signed-cert > /dev/null; then
|
||||
secret_file=`mktemp`
|
||||
ceph_adm restful create-key admin -o $secret_file
|
||||
RESTFUL_SECRET=`cat $secret_file`
|
||||
rm $secret_file
|
||||
else
|
||||
debug echo MGR Restful is not working, perhaps the package is not installed?
|
||||
fi
|
||||
}
|
||||
|
||||
start_mgr() {
|
||||
local mgr=0
|
||||
local ssl=${DASHBOARD_SSL:-1}
|
||||
|
@ -1309,15 +1285,7 @@ EOF
|
|||
MGR_PORT=$(($MGR_PORT + 1000))
|
||||
ceph_adm config set mgr mgr/prometheus/$name/server_port $PROMETHEUS_PORT --force
|
||||
PROMETHEUS_PORT=$(($PROMETHEUS_PORT + 1000))
|
||||
|
||||
ceph_adm config set mgr mgr/restful/$name/server_port $MGR_PORT --force
|
||||
if [ $mgr -eq 1 ]; then
|
||||
RESTFUL_URLS="https://$IP:$MGR_PORT"
|
||||
else
|
||||
RESTFUL_URLS+=", https://$IP:$MGR_PORT"
|
||||
fi
|
||||
MGR_PORT=$(($MGR_PORT + 1000))
|
||||
fi
|
||||
fi
|
||||
|
||||
debug echo "Starting mgr.${name}"
|
||||
run 'mgr' $name $CEPH_BIN/ceph-mgr -i $name $ARGS
|
||||
|
@ -1327,7 +1295,7 @@ EOF
|
|||
debug echo 'waiting for mgr to become available'
|
||||
sleep 1
|
||||
done
|
||||
|
||||
|
||||
if [ "$new" -eq 1 ]; then
|
||||
# setting login credentials for dashboard
|
||||
if $with_mgr_dashboard; then
|
||||
|
@ -1353,9 +1321,6 @@ EOF
|
|||
ceph_adm dashboard nvmeof-gateway-add -i <(echo "${NVMEOF_GW}") "${NVMEOF_GW/:/_}"
|
||||
fi
|
||||
fi
|
||||
if $with_mgr_restful; then
|
||||
create_mgr_restful_secret
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$cephadm" -eq 1 ]; then
|
||||
|
@ -2044,12 +2009,6 @@ if [ "$new" -eq 1 ]; then
|
|||
cat <<EOF
|
||||
dashboard urls: $DASH_URLS
|
||||
w/ user/pass: admin / admin
|
||||
EOF
|
||||
fi
|
||||
if $with_mgr_restful; then
|
||||
cat <<EOF
|
||||
restful urls: $RESTFUL_URLS
|
||||
w/ user/pass: admin / $RESTFUL_SECRET
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
|
Loading…
Reference in New Issue