2014-02-11 20:01:00 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
|
2014-03-13 18:02:28 +00:00
|
|
|
* Copyright (C) 2013-2014 Josh Poimboeuf <jpoimboe@redhat.com>
|
2014-02-11 20:01:00 +00:00
|
|
|
*
|
2014-03-05 03:34:03 +00:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
|
|
|
|
* 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
2014-03-13 18:11:58 +00:00
|
|
|
/* Contains the code for the core kpatch module. Each patch module registers
|
|
|
|
* with this module to redirect old functions to new functions.
|
2014-02-11 20:01:00 +00:00
|
|
|
*
|
2014-03-13 18:11:58 +00:00
|
|
|
* Each patch module can contain one or more new functions. This information
|
|
|
|
* is contained in the .patches section of the patch module. For each function
|
|
|
|
* patched by the module we must:
|
2014-02-11 20:01:00 +00:00
|
|
|
* - Call stop_machine
|
2014-03-13 18:11:58 +00:00
|
|
|
* - Ensure that no execution thread is currently in the old function (or has
|
|
|
|
* it in the call stack)
|
2014-02-11 20:01:00 +00:00
|
|
|
* - Add the new function address to the kpatch_funcs table
|
|
|
|
*
|
2014-03-13 18:11:58 +00:00
|
|
|
* After that, each call to the old function calls into kpatch_ftrace_handler()
|
|
|
|
* which finds the new function in the kpatch_funcs table and updates the
|
|
|
|
* return instruction pointer so that ftrace will return to the new function.
|
2014-02-11 20:01:00 +00:00
|
|
|
*/
|
|
|
|
|
2014-02-11 16:25:48 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2013-01-15 03:46:38 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/stop_machine.h>
|
2014-02-11 16:25:48 +00:00
|
|
|
#include <linux/ftrace.h>
|
2014-03-14 21:41:00 +00:00
|
|
|
#include <linux/hashtable.h>
|
2013-01-15 03:46:38 +00:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include "kpatch.h"
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
#define KPATCH_HASH_BITS 8
|
|
|
|
DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS);
|
2013-01-15 03:46:38 +00:00
|
|
|
|
2014-03-19 15:01:29 +00:00
|
|
|
DEFINE_SEMAPHORE(kpatch_mutex);
|
|
|
|
|
2013-01-15 03:46:38 +00:00
|
|
|
static int kpatch_num_registered;
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_backtrace_args {
|
2013-01-15 03:46:38 +00:00
|
|
|
struct kpatch_func *funcs;
|
2014-03-14 21:41:00 +00:00
|
|
|
int num_funcs, ret;
|
2013-01-15 03:46:38 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
void kpatch_backtrace_address_verify(void *data, unsigned long address,
|
|
|
|
int reliable)
|
|
|
|
{
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_backtrace_args *args = data;
|
|
|
|
struct kpatch_func *funcs = args->funcs;
|
|
|
|
int i, num_funcs = args->num_funcs;
|
2013-01-15 03:46:38 +00:00
|
|
|
|
|
|
|
if (args->ret)
|
|
|
|
return;
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
for (i = 0; i < num_funcs; i++) {
|
|
|
|
struct kpatch_func *func = &funcs[i];
|
2013-01-15 03:46:38 +00:00
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
if (address >= func->old_addr &&
|
|
|
|
address < func->old_addr + func->old_size) {
|
|
|
|
printk("kpatch: activeness safety check failed for "
|
|
|
|
"function at address " "'%lx()'\n",
|
|
|
|
func->old_addr);
|
|
|
|
args->ret = -EBUSY;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-01-15 03:46:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int kpatch_backtrace_stack(void *data, char *name)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct stacktrace_ops kpatch_backtrace_ops = {
|
|
|
|
.address = kpatch_backtrace_address_verify,
|
|
|
|
.stack = kpatch_backtrace_stack,
|
|
|
|
.walk_stack = print_context_stack_bp,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verify activeness safety, i.e. that none of the to-be-patched functions are
|
|
|
|
* on the stack of any task.
|
|
|
|
*
|
|
|
|
* This function is called from stop_machine() context.
|
|
|
|
*/
|
2014-03-14 21:41:00 +00:00
|
|
|
static int kpatch_verify_activeness_safety(struct kpatch_func *funcs,
|
|
|
|
int num_funcs)
|
2013-01-15 03:46:38 +00:00
|
|
|
{
|
|
|
|
struct task_struct *g, *t;
|
|
|
|
int ret = 0;
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_backtrace_args args = {
|
2013-01-15 03:46:38 +00:00
|
|
|
.funcs = funcs,
|
2014-03-14 21:41:00 +00:00
|
|
|
.num_funcs = num_funcs,
|
2013-01-15 03:46:38 +00:00
|
|
|
.ret = 0
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Check the stacks of all tasks. */
|
|
|
|
do_each_thread(g, t) {
|
|
|
|
dump_trace(t, NULL, NULL, 0, &kpatch_backtrace_ops, &args);
|
|
|
|
if (args.ret) {
|
|
|
|
ret = args.ret;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} while_each_thread(g, t);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_stop_machine_args {
|
|
|
|
struct kpatch_func *funcs;
|
|
|
|
int num_funcs;
|
|
|
|
};
|
|
|
|
|
2013-01-15 03:46:38 +00:00
|
|
|
/* Called from stop_machine */
|
|
|
|
static int kpatch_apply_patch(void *data)
|
|
|
|
{
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_stop_machine_args *args = data;
|
|
|
|
struct kpatch_func *funcs = args->funcs;
|
|
|
|
int num_funcs = args->num_funcs;
|
|
|
|
int i, ret;
|
2013-01-15 03:46:38 +00:00
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
ret = kpatch_verify_activeness_safety(funcs, num_funcs);
|
2013-01-15 03:46:38 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
for (i = 0; i < num_funcs; i++) {
|
|
|
|
struct kpatch_func *func = &funcs[i];
|
|
|
|
|
|
|
|
/* update the global list and go live */
|
|
|
|
hash_add(kpatch_func_hash, &func->node, func->old_addr);
|
2013-01-15 03:46:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-14 18:08:12 +00:00
|
|
|
/* Called from stop_machine */
|
|
|
|
static int kpatch_remove_patch(void *data)
|
|
|
|
{
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_stop_machine_args *args = data;
|
|
|
|
struct kpatch_func *funcs = args->funcs;
|
|
|
|
int num_funcs = args->num_funcs;
|
|
|
|
int ret, i;
|
2014-03-14 18:08:12 +00:00
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
ret = kpatch_verify_activeness_safety(funcs, num_funcs);
|
2014-03-14 18:08:12 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
for (i = 0; i < num_funcs; i++)
|
|
|
|
hlist_del(&funcs[i].node);
|
2014-03-14 18:08:12 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
|
2014-03-20 00:16:12 +00:00
|
|
|
void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
|
|
|
struct ftrace_ops *op, struct pt_regs *regs)
|
2014-03-13 18:02:28 +00:00
|
|
|
{
|
2014-03-14 21:41:00 +00:00
|
|
|
struct kpatch_func *f;
|
2014-03-13 18:02:28 +00:00
|
|
|
|
2014-03-17 15:36:11 +00:00
|
|
|
/*
|
|
|
|
* This is where the magic happens. Update regs->ip to tell ftrace to
|
|
|
|
* return to the new function.
|
|
|
|
*
|
|
|
|
* If there are multiple patch modules that have registered to patch
|
|
|
|
* the same function, the last one to register wins, as it'll be first
|
|
|
|
* in the hash bucket.
|
|
|
|
*/
|
2014-03-14 21:41:00 +00:00
|
|
|
preempt_disable_notrace();
|
|
|
|
hash_for_each_possible(kpatch_func_hash, f, node, ip) {
|
|
|
|
if (f->old_addr == ip) {
|
|
|
|
regs->ip = f->new_addr;
|
2014-03-13 18:02:28 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-03-14 21:41:00 +00:00
|
|
|
preempt_enable_notrace();
|
2014-03-13 18:02:28 +00:00
|
|
|
}
|
|
|
|
|
2013-01-15 03:46:38 +00:00
|
|
|
static struct ftrace_ops kpatch_ftrace_ops __read_mostly = {
|
2014-03-13 18:02:28 +00:00
|
|
|
.func = kpatch_ftrace_handler,
|
2014-02-11 16:25:48 +00:00
|
|
|
.flags = FTRACE_OPS_FL_SAVE_REGS,
|
2013-01-15 03:46:38 +00:00
|
|
|
};
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
int kpatch_register(struct module *mod, struct kpatch_func *funcs,
|
|
|
|
int num_funcs)
|
2013-01-15 03:46:38 +00:00
|
|
|
{
|
2014-03-14 21:41:00 +00:00
|
|
|
int ret, ret2, i;
|
|
|
|
struct kpatch_stop_machine_args args = {
|
|
|
|
.funcs = funcs,
|
|
|
|
.num_funcs = num_funcs,
|
|
|
|
};
|
2014-03-13 19:16:06 +00:00
|
|
|
|
2014-03-19 15:01:29 +00:00
|
|
|
down(&kpatch_mutex);
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
for (i = 0; i < num_funcs; i++) {
|
2014-03-17 15:36:11 +00:00
|
|
|
struct kpatch_func *f, *func = &funcs[i];
|
|
|
|
bool found = false;
|
2013-01-19 07:29:35 +00:00
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
func->mod = mod;
|
2013-01-18 18:31:14 +00:00
|
|
|
|
2014-03-17 15:36:11 +00:00
|
|
|
/*
|
|
|
|
* If any other modules have also patched this function, it
|
|
|
|
* already has an ftrace handler.
|
|
|
|
*/
|
|
|
|
hash_for_each_possible(kpatch_func_hash, f, node,
|
|
|
|
func->old_addr) {
|
|
|
|
if (f->old_addr == func->old_addr) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Add an ftrace handler for this function. */
|
2014-03-14 21:41:00 +00:00
|
|
|
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
|
|
|
|
0, 0);
|
2013-01-15 03:46:38 +00:00
|
|
|
if (ret) {
|
2014-03-13 20:08:08 +00:00
|
|
|
printk("kpatch: can't set ftrace filter at address "
|
|
|
|
"0x%lx (%d)\n",
|
2014-03-14 21:41:00 +00:00
|
|
|
func->old_addr, ret);
|
2013-01-15 03:46:38 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register the ftrace trampoline if it hasn't been done already. */
|
2014-03-19 15:01:29 +00:00
|
|
|
if (!kpatch_num_registered++) {
|
2013-01-15 03:46:38 +00:00
|
|
|
ret = register_ftrace_function(&kpatch_ftrace_ops);
|
|
|
|
if (ret) {
|
|
|
|
printk("kpatch: can't register ftrace function \n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Idle the CPUs, verify activeness safety, and atomically make the new
|
|
|
|
* functions visible to the trampoline.
|
|
|
|
*/
|
2014-03-14 21:41:00 +00:00
|
|
|
ret = stop_machine(kpatch_apply_patch, &args, NULL);
|
2013-01-15 03:46:38 +00:00
|
|
|
if (ret) {
|
|
|
|
if (!--kpatch_num_registered) {
|
|
|
|
ret2 = unregister_ftrace_function(&kpatch_ftrace_ops);
|
|
|
|
if (ret2)
|
|
|
|
printk("kpatch: unregister failed (%d)\n",
|
|
|
|
ret2);
|
|
|
|
}
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2014-03-14 16:20:10 +00:00
|
|
|
pr_notice("loaded patch module \"%s\"\n", mod->name);
|
|
|
|
|
2013-01-15 03:46:38 +00:00
|
|
|
out:
|
2014-03-19 15:01:29 +00:00
|
|
|
up(&kpatch_mutex);
|
2013-01-15 03:46:38 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(kpatch_register);
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
|
|
|
|
int num_funcs)
|
2013-01-15 03:46:38 +00:00
|
|
|
{
|
2014-03-14 21:41:00 +00:00
|
|
|
int i, ret;
|
|
|
|
struct kpatch_stop_machine_args args = {
|
|
|
|
.funcs = funcs,
|
|
|
|
.num_funcs = num_funcs,
|
|
|
|
};
|
2013-01-15 03:46:38 +00:00
|
|
|
|
2014-03-19 15:01:29 +00:00
|
|
|
down(&kpatch_mutex);
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
ret = stop_machine(kpatch_remove_patch, &args, NULL);
|
2013-01-15 03:46:38 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!--kpatch_num_registered) {
|
|
|
|
ret = unregister_ftrace_function(&kpatch_ftrace_ops);
|
|
|
|
if (ret) {
|
|
|
|
printk("kpatch: can't unregister ftrace function\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-14 21:41:00 +00:00
|
|
|
for (i = 0; i < num_funcs; i++) {
|
2014-03-17 15:36:11 +00:00
|
|
|
struct kpatch_func *f, *func = &funcs[i];
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any other modules have also patched this function, don't
|
|
|
|
* remove its ftrace handler.
|
|
|
|
*/
|
|
|
|
hash_for_each_possible(kpatch_func_hash, f, node,
|
|
|
|
func->old_addr) {
|
|
|
|
if (f->old_addr == func->old_addr) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (found)
|
|
|
|
continue;
|
2014-03-14 21:41:00 +00:00
|
|
|
|
2014-03-17 15:36:11 +00:00
|
|
|
/* Remove the ftrace handler for this function. */
|
2014-03-14 18:22:59 +00:00
|
|
|
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
|
2013-01-15 03:46:38 +00:00
|
|
|
1, 0);
|
|
|
|
if (ret) {
|
2014-03-13 20:08:08 +00:00
|
|
|
printk("kpatch: can't remove ftrace filter at address "
|
|
|
|
"0x%lx (%d)\n",
|
2014-03-14 18:22:59 +00:00
|
|
|
func->old_addr, ret);
|
2013-01-15 03:46:38 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-14 16:20:10 +00:00
|
|
|
pr_notice("unloaded patch module \"%s\"\n", mod->name);
|
|
|
|
|
2013-01-15 03:46:38 +00:00
|
|
|
out:
|
2014-03-19 15:01:29 +00:00
|
|
|
up(&kpatch_mutex);
|
2013-01-15 03:46:38 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(kpatch_unregister);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|