Merge pull request #137 from jpoimboe/unpatch-safety-fix

fix activeness safety check when unpatching
This commit is contained in:
Seth Jennings 2014-04-24 17:06:40 -05:00
commit 32d6609c19
5 changed files with 235 additions and 204 deletions

View File

@ -53,8 +53,8 @@ DEFINE_SEMAPHORE(kpatch_mutex);
static int kpatch_num_registered;
struct kpatch_backtrace_args {
struct kpatch_func *funcs;
int num_funcs, ret;
struct kpatch_module *kpmod;
int ret;
};
enum {
@ -84,140 +84,6 @@ enum {
};
static atomic_t kpatch_operation;
void kpatch_backtrace_address_verify(void *data, unsigned long address,
int reliable)
{
struct kpatch_backtrace_args *args = data;
struct kpatch_func *funcs = args->funcs;
int i, num_funcs = args->num_funcs;
if (args->ret)
return;
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
if (address >= func->old_addr &&
address < func->old_addr + func->old_size) {
pr_err("activeness safety check failed for function "
"at address 0x%lx\n", func->old_addr);
args->ret = -EBUSY;
return;
}
}
}
static int kpatch_backtrace_stack(void *data, char *name)
{
return 0;
}
struct stacktrace_ops kpatch_backtrace_ops = {
.address = kpatch_backtrace_address_verify,
.stack = kpatch_backtrace_stack,
.walk_stack = print_context_stack_bp,
};
/*
* Verify activeness safety, i.e. that none of the to-be-patched functions are
* on the stack of any task.
*
* This function is called from stop_machine() context.
*/
static int kpatch_verify_activeness_safety(struct kpatch_func *funcs,
int num_funcs)
{
struct task_struct *g, *t;
int ret = 0;
struct kpatch_backtrace_args args = {
.funcs = funcs,
.num_funcs = num_funcs,
.ret = 0
};
/* Check the stacks of all tasks. */
do_each_thread(g, t) {
dump_trace(t, NULL, NULL, 0, &kpatch_backtrace_ops, &args);
if (args.ret) {
ret = args.ret;
goto out;
}
} while_each_thread(g, t);
out:
return ret;
}
struct kpatch_stop_machine_args {
struct kpatch_func *funcs;
int num_funcs;
};
/* Called from stop_machine */
static int kpatch_apply_patch(void *data)
{
struct kpatch_stop_machine_args *args = data;
struct kpatch_func *funcs = args->funcs;
int num_funcs = args->num_funcs;
int i, ret;
ret = kpatch_verify_activeness_safety(funcs, num_funcs);
if (ret)
goto out;
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
/* update the global list and go live */
hash_add_rcu(kpatch_func_hash, &func->node, func->old_addr);
}
/* Check if any inconsistent NMI has happened while updating */
ret = kpatch_finish_status(KPATCH_STATUS_SUCCESS);
if (ret == KPATCH_STATUS_FAILURE) {
/* Failed, we have to rollback patching process */
for (i = 0; i < num_funcs; i++)
hash_del_rcu(&funcs[i].node);
ret = -EBUSY;
} else {
/* Succeeded, clear updating flags */
for (i = 0; i < num_funcs; i++)
funcs[i].updating = false;
ret = 0;
}
out:
return ret;
}
/* Called from stop_machine */
static int kpatch_remove_patch(void *data)
{
struct kpatch_stop_machine_args *args = data;
struct kpatch_func *funcs = args->funcs;
int num_funcs = args->num_funcs;
int ret, i;
ret = kpatch_verify_activeness_safety(funcs, num_funcs);
if (ret)
goto out;
/* Check if any inconsistent NMI has happened while updating */
ret = kpatch_finish_status(KPATCH_STATUS_SUCCESS);
if (ret == KPATCH_STATUS_FAILURE) {
/* Failed, we must keep funcs on hash table */
for (i = 0; i < num_funcs; i++)
funcs[i].updating = false;
ret = -EBUSY;
} else {
/* Succeeded, remove all updating funcs from hash table */
for (i = 0; i < num_funcs; i++)
hash_del_rcu(&funcs[i].node);
ret = 0;
}
out:
return ret;
}
static struct kpatch_func *kpatch_get_func(unsigned long ip)
{
struct kpatch_func *f;
@ -241,6 +107,152 @@ static struct kpatch_func *kpatch_get_committed_func(struct kpatch_func *f,
return NULL;
}
void kpatch_backtrace_address_verify(void *data, unsigned long address,
int reliable)
{
struct kpatch_backtrace_args *args = data;
struct kpatch_module *kpmod = args->kpmod;
int i;
if (args->ret)
return;
for (i = 0; i < kpmod->num_funcs; i++) {
unsigned long func_addr, func_size;
struct kpatch_func *func, *active_func;
func = &kpmod->funcs[i];
active_func = kpatch_get_func(func->old_addr);
if (!active_func) {
/* patching an unpatched func */
func_addr = func->old_addr;
func_size = func->old_size;
} else {
/* repatching or unpatching */
func_addr = active_func->new_addr;
func_size = active_func->new_size;
}
if (address >= func_addr && address < func_addr + func_size) {
pr_err("activeness safety check failed for function "
"at address 0x%lx\n", func_addr);
args->ret = -EBUSY;
return;
}
}
}
static int kpatch_backtrace_stack(void *data, char *name)
{
return 0;
}
struct stacktrace_ops kpatch_backtrace_ops = {
.address = kpatch_backtrace_address_verify,
.stack = kpatch_backtrace_stack,
.walk_stack = print_context_stack_bp,
};
/*
* Verify activeness safety, i.e. that none of the to-be-patched functions are
* on the stack of any task.
*
* This function is called from stop_machine() context.
*/
static int kpatch_verify_activeness_safety(struct kpatch_module *kpmod)
{
struct task_struct *g, *t;
int ret = 0;
struct kpatch_backtrace_args args = {
.kpmod = kpmod,
.ret = 0
};
/* Check the stacks of all tasks. */
do_each_thread(g, t) {
dump_trace(t, NULL, NULL, 0, &kpatch_backtrace_ops, &args);
if (args.ret) {
ret = args.ret;
goto out;
}
} while_each_thread(g, t);
out:
return ret;
}
/* Called from stop_machine */
static int kpatch_apply_patch(void *data)
{
struct kpatch_module *kpmod = data;
struct kpatch_func *funcs = kpmod->funcs;
int num_funcs = kpmod->num_funcs;
int i, ret;
ret = kpatch_verify_activeness_safety(kpmod);
if (ret)
return ret;
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
/* update the global list and go live */
hash_add_rcu(kpatch_func_hash, &func->node, func->old_addr);
}
/* Check if any inconsistent NMI has happened while updating */
ret = kpatch_finish_status(KPATCH_STATUS_SUCCESS);
if (ret == KPATCH_STATUS_FAILURE) {
/* Failed, we have to rollback patching process */
for (i = 0; i < num_funcs; i++)
hash_del_rcu(&funcs[i].node);
return -EBUSY;
}
/* Succeeded, clear updating flags */
for (i = 0; i < num_funcs; i++)
funcs[i].updating = false;
return 0;
}
/* Called from stop_machine */
static int kpatch_remove_patch(void *data)
{
struct kpatch_module *kpmod = data;
struct kpatch_func *funcs = kpmod->funcs;
int num_funcs = kpmod->num_funcs;
int ret, i;
ret = kpatch_verify_activeness_safety(kpmod);
if (ret)
return ret;
/* Check if any inconsistent NMI has happened while updating */
ret = kpatch_finish_status(KPATCH_STATUS_SUCCESS);
if (ret == KPATCH_STATUS_FAILURE) {
/* Failed, we must keep funcs on hash table */
for (i = 0; i < num_funcs; i++)
funcs[i].updating = false;
return -EBUSY;
}
/* Succeeded, remove all updating funcs from hash table */
for (i = 0; i < num_funcs; i++)
hash_del_rcu(&funcs[i].node);
return 0;
}
/*
* This is where the magic happens. Update regs->ip to tell ftrace to return
* to the new function.
*
* If there are multiple patch modules that have registered to patch the same
* function, the last one to register wins, as it'll be first in the hash
* bucket.
*/
void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *fops,
struct pt_regs *regs)
@ -248,14 +260,6 @@ void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kpatch_func *func;
int ret, op;
/*
* This is where the magic happens. Update regs->ip to tell ftrace to
* return to the new function.
*
* If there are multiple patch modules that have registered to patch
* the same function, the last one to register wins, as it'll be first
* in the hash bucket.
*/
preempt_disable_notrace();
retry:
func = kpatch_get_func(ip);
@ -340,21 +344,20 @@ static int kpatch_remove_funcs_from_filter(struct kpatch_func *funcs,
return ret;
}
int kpatch_register(struct module *mod, struct kpatch_func *funcs,
int num_funcs)
int kpatch_register(struct kpatch_module *kpmod)
{
int ret, ret2, i;
struct kpatch_stop_machine_args args = {
.funcs = funcs,
.num_funcs = num_funcs,
};
int ret, i;
struct kpatch_func *funcs = kpmod->funcs;
int num_funcs = kpmod->num_funcs;
if (!kpmod->mod || !funcs || !num_funcs)
return -EINVAL;
down(&kpatch_mutex);
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
func->mod = mod;
func->updating = true;
/*
@ -371,20 +374,19 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs,
pr_err("can't set ftrace filter at address 0x%lx\n",
func->old_addr);
num_funcs = i;
goto out;
goto err_rollback;
}
}
/* Register the ftrace trampoline if it hasn't been done already. */
if (!kpatch_num_registered++) {
if (!kpatch_num_registered) {
ret = register_ftrace_function(&kpatch_ftrace_ops);
if (ret) {
pr_err("can't register ftrace handler\n");
/* For the next time, the counter should be unrolled */
--kpatch_num_registered;
goto out;
goto err_rollback;
}
}
kpatch_num_registered++;
kpatch_start_status();
/*
@ -397,48 +399,49 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs,
* Idle the CPUs, verify activeness safety, and atomically make the new
* functions visible to the trampoline.
*/
ret = stop_machine(kpatch_apply_patch, &args, NULL);
ret = stop_machine(kpatch_apply_patch, kpmod, NULL);
if (ret) {
if (!--kpatch_num_registered) {
ret2 = unregister_ftrace_function(&kpatch_ftrace_ops);
if (ret2)
pr_err("ftrace unregister failed (%d)\n", ret2);
}
/*
* This synchronize_rcu is to ensure any other kpatch_get_func
* user exits the rcu locked(preemt_disabled) critical section
* and hash_del_rcu() is correctly finished.
*/
synchronize_rcu();
goto err_unregister;
}
out:
/* Rollback the filter if we get any error */
if (ret)
kpatch_remove_funcs_from_filter(funcs, num_funcs);
else {
/* TODO: need TAINT_KPATCH */
pr_notice_once("tainting kernel with TAINT_USER\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
/* TODO: need TAINT_KPATCH */
pr_notice_once("tainting kernel with TAINT_USER\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
pr_notice("loaded patch module \"%s\"\n", mod->name);
}
pr_notice("loaded patch module \"%s\"\n", kpmod->mod->name);
atomic_set(&kpatch_operation, KPATCH_OP_NONE);
up(&kpatch_mutex);
return ret;
return 0;
err_unregister:
atomic_set(&kpatch_operation, KPATCH_OP_NONE);
if (kpatch_num_registered == 1) {
int ret2 = unregister_ftrace_function(&kpatch_ftrace_ops);
if (ret2) {
pr_err("ftrace unregister failed (%d)\n", ret2);
goto err_rollback;
}
}
kpatch_num_registered--;
err_rollback:
kpatch_remove_funcs_from_filter(funcs, num_funcs);
up(&kpatch_mutex);
return ret;
}
EXPORT_SYMBOL(kpatch_register);
int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
int num_funcs)
int kpatch_unregister(struct kpatch_module *kpmod)
{
struct kpatch_func *funcs = kpmod->funcs;
int num_funcs = kpmod->num_funcs;
int i, ret;
struct kpatch_stop_machine_args args = {
.funcs = funcs,
.num_funcs = num_funcs,
};
down(&kpatch_mutex);
@ -453,17 +456,19 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
for (i = 0; i < num_funcs; i++)
funcs[i].updating = true;
ret = stop_machine(kpatch_remove_patch, &args, NULL);
ret = stop_machine(kpatch_remove_patch, kpmod, NULL);
if (ret)
goto out;
if (!--kpatch_num_registered) {
if (kpatch_num_registered == 1) {
ret = unregister_ftrace_function(&kpatch_ftrace_ops);
if (ret) {
pr_err("can't unregister ftrace handler\n");
goto out;
}
}
kpatch_num_registered--;
/*
* This synchronize_rcu is to ensure any other kpatch_get_func
* user exits the rcu locked(preemt_disabled) critical section
@ -472,8 +477,10 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
synchronize_rcu();
ret = kpatch_remove_funcs_from_filter(funcs, num_funcs);
if (ret == 0)
pr_notice("unloaded patch module \"%s\"\n", mod->name);
if (ret)
goto out;
pr_notice("unloaded patch module \"%s\"\n", kpmod->mod->name);
out:
atomic_set(&kpatch_operation, KPATCH_OP_NONE);

View File

@ -29,16 +29,20 @@
struct kpatch_func {
unsigned long new_addr;
unsigned long new_size;
unsigned long old_addr;
unsigned long old_size;
struct module *mod;
struct hlist_node node;
bool updating;
};
extern int kpatch_register(struct module *mod, struct kpatch_func *funcs,
int num_funcs);
extern int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
int num_funcs);
struct kpatch_module {
struct module *mod;
struct kpatch_func *funcs;
int num_funcs;
};
extern int kpatch_register(struct kpatch_module *kpmod);
extern int kpatch_unregister(struct kpatch_module *kpmod);
#endif /* _KPATCH_H_ */

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2013 Josh Poimboeuf <jpoimboe@redhat.com>
* Copyright (C) 2013-2014 Josh Poimboeuf <jpoimboe@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -27,35 +27,46 @@
extern char __kpatch_patches, __kpatch_patches_end;
static struct kpatch_func *funcs;
static int num_funcs;
static struct kpatch_module kpmod;
static int __init patch_init(void)
{
struct kpatch_patch *patches;
int i;
int i, ret;
patches = (struct kpatch_patch *)&__kpatch_patches;
num_funcs = (&__kpatch_patches_end - &__kpatch_patches) /
sizeof(*patches);
funcs = kmalloc(num_funcs * sizeof(*funcs), GFP_KERNEL);
if (!funcs)
kpmod.mod = THIS_MODULE;
kpmod.num_funcs = (&__kpatch_patches_end - &__kpatch_patches) /
sizeof(*patches);
kpmod.funcs = kmalloc(kpmod.num_funcs * sizeof(struct kpatch_func),
GFP_KERNEL);
if (!kpmod.funcs)
return -ENOMEM;
for (i = 0; i < num_funcs; i++) {
funcs[i].old_addr = patches[i].old_addr;
funcs[i].old_size = patches[i].old_size;
funcs[i].new_addr = patches[i].new_addr;
for (i = 0; i < kpmod.num_funcs; i++) {
kpmod.funcs[i].old_addr = patches[i].old_addr;
kpmod.funcs[i].old_size = patches[i].old_size;
kpmod.funcs[i].new_addr = patches[i].new_addr;
kpmod.funcs[i].new_size = patches[i].new_size;
}
return kpatch_register(THIS_MODULE, funcs, num_funcs);
ret = kpatch_register(&kpmod);
if (ret)
goto err_free;
return 0;
err_free:
kfree(kpmod.funcs);
return ret;
}
static void __exit patch_exit(void)
{
int ret;
ret = kpatch_unregister(THIS_MODULE, funcs, num_funcs);
ret = kpatch_unregister(&kpmod);
if (ret) {
/*
* TODO: If this happens, we're screwed. We need a way to
@ -69,7 +80,7 @@ static void __exit patch_exit(void)
panic("kpatch_unregister failed: %d", ret);
}
kfree(funcs);
kfree(kpmod.funcs);
}
module_init(patch_init);

View File

@ -26,6 +26,7 @@
struct kpatch_patch {
unsigned long new_addr;
unsigned long new_size;
unsigned long old_addr;
unsigned long old_size;
};

View File

@ -315,10 +315,18 @@ int main(int argc, char **argv)
for_each_sym(&symlist, cur) {
if (cur->action != PATCH)
continue;
patches_data[i].old_addr = cur->vm_addr;
patches_data[i].old_size = cur->vm_len;
patches_data[i].new_size = cur->sym.st_size;
/*
* Use a relocation to set patches_data[i].new_addr at module
* loading time.
*/
relas_data[i].r_offset = i * sizeof(struct kpatch_patch);
relas_data[i].r_info = GELF_R_INFO(cur->index, R_X86_64_64);
i++;
}