mirror of
https://github.com/dynup/kpatch
synced 2024-12-29 00:32:01 +00:00
commit
1717e17290
27
README.md
27
README.md
@ -1,12 +1,12 @@
|
||||
kpatch: dynamic kernel patching
|
||||
===============================
|
||||
|
||||
kpatch is a Linux dynamic kernel patching tool which allows you to patch a
|
||||
running kernel without rebooting or restarting any processes. It enables
|
||||
sysadmins to apply critical security patches to the kernel immediately, without
|
||||
having to wait for long-running tasks to complete, users to log off, or
|
||||
for scheduled reboot windows. It gives more control over uptime without
|
||||
sacrificing security or stability.
|
||||
kpatch is a Linux dynamic kernel patching infrastructure which allows you to
|
||||
patch a running kernel without rebooting or restarting any processes. It
|
||||
enables sysadmins to apply critical security patches to the kernel immediately,
|
||||
without having to wait for long-running tasks to complete, for users to log
|
||||
off, or for scheduled reboot windows. It gives more control over uptime
|
||||
without sacrificing security or stability.
|
||||
|
||||
kpatch is currently in active development. For now, it should _not_ be used
|
||||
in production environments.
|
||||
@ -205,11 +205,18 @@ ability to arbitrarily modify the kernel, with or without kpatch.
|
||||
|
||||
**Q. How can I detect if somebody has patched the kernel?**
|
||||
|
||||
We hope to create a new kernel TAINT flag which will get set whenever a patch
|
||||
module is loaded. We are currently using the `TAINT_USER` flag.
|
||||
When a patch module is loaded, the `TAINT_USER` flag is set. To test for it,
|
||||
`cat /proc/sys/kernel/tainted` and check to see if the value of 64 has been
|
||||
OR'ed in.
|
||||
|
||||
Also, many distros ship with cryptographically signed kernel modules, and will
|
||||
taint the kernel anyway if you load an unsigned module.
|
||||
Eventually we hope to have a dedicated `TAINT_KPATCH` flag instead.
|
||||
|
||||
Note that the `TAINT_OOT_MODULE` flag (4096) will also be set, since the patch
|
||||
module is built outside the Linux kernel source tree.
|
||||
|
||||
If your patch module is unsigned, the `TAINT_FORCED_MODULE` flag (2) will also
|
||||
be set. Starting with Linux 3.15, this will be changed to the more specific
|
||||
`TAINT_UNSIGNED_MODULE` (8192).
|
||||
|
||||
**Q. Will it destabilize my system?**
|
||||
|
||||
|
@ -13,24 +13,22 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
|
||||
* 02110-1301, USA.
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/* Contains the code for the core kpatch module. Each patch module registers
|
||||
* with this module to redirect old functions to new functions.
|
||||
/*
|
||||
* kpatch core module
|
||||
*
|
||||
* Each patch module can contain one or more new functions. This information
|
||||
* is contained in the .patches section of the patch module. For each function
|
||||
* patched by the module we must:
|
||||
* Patch modules register with this module to redirect old functions to new
|
||||
* functions.
|
||||
*
|
||||
* For each function patched by the module we must:
|
||||
* - Call stop_machine
|
||||
* - Ensure that no execution thread is currently in the old function (or has
|
||||
* it in the call stack)
|
||||
* - Add the new function address to the kpatch_funcs table
|
||||
* - Ensure that no task has the old function in its call stack
|
||||
* - Add the new function address to kpatch_func_hash
|
||||
*
|
||||
* After that, each call to the old function calls into kpatch_ftrace_handler()
|
||||
* which finds the new function in the kpatch_funcs table and updates the
|
||||
* which finds the new function in kpatch_func_hash table and updates the
|
||||
* return instruction pointer so that ftrace will return to the new function.
|
||||
*/
|
||||
|
||||
@ -47,9 +45,9 @@
|
||||
#include "kpatch.h"
|
||||
|
||||
#define KPATCH_HASH_BITS 8
|
||||
DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS);
|
||||
static DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS);
|
||||
|
||||
DEFINE_SEMAPHORE(kpatch_mutex);
|
||||
static DEFINE_SEMAPHORE(kpatch_mutex);
|
||||
|
||||
static int kpatch_num_registered;
|
||||
|
||||
@ -143,8 +141,8 @@ static struct kpatch_func *kpatch_get_prev_func(struct kpatch_func *f,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void kpatch_backtrace_address_verify(void *data, unsigned long address,
|
||||
int reliable)
|
||||
static void kpatch_backtrace_address_verify(void *data, unsigned long address,
|
||||
int reliable)
|
||||
{
|
||||
struct kpatch_backtrace_args *args = data;
|
||||
struct kpatch_module *kpmod = args->kpmod;
|
||||
@ -170,8 +168,8 @@ void kpatch_backtrace_address_verify(void *data, unsigned long address,
|
||||
}
|
||||
|
||||
if (address >= func_addr && address < func_addr + func_size) {
|
||||
pr_err("activeness safety check failed for function "
|
||||
"at address 0x%lx\n", func_addr);
|
||||
pr_err("activeness safety check failed for function at address 0x%lx\n",
|
||||
func_addr);
|
||||
args->ret = -EBUSY;
|
||||
return;
|
||||
}
|
||||
@ -183,10 +181,10 @@ static int kpatch_backtrace_stack(void *data, char *name)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct stacktrace_ops kpatch_backtrace_ops = {
|
||||
static const struct stacktrace_ops kpatch_backtrace_ops = {
|
||||
.address = kpatch_backtrace_address_verify,
|
||||
.stack = kpatch_backtrace_stack,
|
||||
.walk_stack = print_context_stack_bp,
|
||||
.walk_stack = print_context_stack_bp,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -292,9 +290,9 @@ static int kpatch_remove_patch(void *data)
|
||||
* function, the last one to register wins, as it'll be first in the hash
|
||||
* bucket.
|
||||
*/
|
||||
void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *fops,
|
||||
struct pt_regs *regs)
|
||||
static void notrace
|
||||
kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *fops, struct pt_regs *regs)
|
||||
{
|
||||
struct kpatch_func *func;
|
||||
int state;
|
||||
@ -409,7 +407,7 @@ int kpatch_register(struct kpatch_module *kpmod)
|
||||
}
|
||||
}
|
||||
|
||||
/* Register the ftrace trampoline if it hasn't been done already. */
|
||||
/* Register the ftrace handler if it hasn't been done already. */
|
||||
if (!kpatch_num_registered) {
|
||||
ret = register_ftrace_function(&kpatch_ftrace_ops);
|
||||
if (ret) {
|
||||
@ -426,7 +424,7 @@ int kpatch_register(struct kpatch_module *kpmod)
|
||||
|
||||
/*
|
||||
* Idle the CPUs, verify activeness safety, and atomically make the new
|
||||
* functions visible to the trampoline.
|
||||
* functions visible to the ftrace handler.
|
||||
*/
|
||||
ret = stop_machine(kpatch_apply_patch, kpmod, NULL);
|
||||
|
||||
|
@ -15,9 +15,7 @@
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
|
||||
* 02110-1301, USA.
|
||||
* along with this program; if not, see <https://www.gnu.org/licenses/>.
|
||||
*
|
||||
* Contains the API for the core kpatch module used by the patch modules
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user