Merge pull request #69 from jpoimboe/core-cumulative-performance

kmod/core: improve performance for cumulative patching
This commit is contained in:
Seth Jennings 2014-03-19 10:45:10 -05:00
commit e941fd446e
1 changed files with 51 additions and 14 deletions

View File

@ -48,6 +48,8 @@
#define KPATCH_HASH_BITS 8
DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS);
DEFINE_SEMAPHORE(kpatch_mutex);
static int kpatch_num_registered;
struct kpatch_backtrace_args {
@ -139,19 +141,8 @@ static int kpatch_apply_patch(void *data)
goto out;
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *f;
struct kpatch_func *func = &funcs[i];
/* do any needed incremental patching */
/* TODO: performance */
hash_for_each_possible(kpatch_func_hash, f, node,
func->old_addr) {
if (f->old_addr == func->old_addr) {
func->old_addr = f->new_addr;
ref_module(func->mod, f->mod);
}
}
/* update the global list and go live */
hash_add(kpatch_func_hash, &func->node, func->old_addr);
}
@ -185,6 +176,14 @@ void kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
{
struct kpatch_func *f;
/*
* This is where the magic happens. Update regs->ip to tell ftrace to
* return to the new function.
*
* If there are multiple patch modules that have registered to patch
* the same function, the last one to register wins, as it'll be first
* in the hash bucket.
*/
preempt_disable_notrace();
hash_for_each_possible(kpatch_func_hash, f, node, ip) {
if (f->old_addr == ip) {
@ -209,11 +208,29 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs,
.num_funcs = num_funcs,
};
down(&kpatch_mutex);
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
struct kpatch_func *f, *func = &funcs[i];
bool found = false;
func->mod = mod;
/*
* If any other modules have also patched this function, it
* already has an ftrace handler.
*/
hash_for_each_possible(kpatch_func_hash, f, node,
func->old_addr) {
if (f->old_addr == func->old_addr) {
found = true;
break;
}
}
if (found)
continue;
/* Add an ftrace handler for this function. */
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
0, 0);
if (ret) {
@ -225,7 +242,7 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs,
}
/* Register the ftrace trampoline if it hasn't been done already. */
if (!kpatch_num_registered++) { /* TODO atomic */
if (!kpatch_num_registered++) {
ret = register_ftrace_function(&kpatch_ftrace_ops);
if (ret) {
printk("kpatch: can't register ftrace function \n");
@ -252,6 +269,7 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs,
pr_notice("loaded patch module \"%s\"\n", mod->name);
out:
up(&kpatch_mutex);
return ret;
}
EXPORT_SYMBOL(kpatch_register);
@ -265,6 +283,8 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
.num_funcs = num_funcs,
};
down(&kpatch_mutex);
ret = stop_machine(kpatch_remove_patch, &args, NULL);
if (ret)
goto out;
@ -278,8 +298,24 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
}
for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
struct kpatch_func *f, *func = &funcs[i];
bool found = false;
/*
* If any other modules have also patched this function, don't
* remove its ftrace handler.
*/
hash_for_each_possible(kpatch_func_hash, f, node,
func->old_addr) {
if (f->old_addr == func->old_addr) {
found = true;
break;
}
}
if (found)
continue;
/* Remove the ftrace handler for this function. */
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
1, 0);
if (ret) {
@ -293,6 +329,7 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
pr_notice("unloaded patch module \"%s\"\n", mod->name);
out:
up(&kpatch_mutex);
return ret;
}
EXPORT_SYMBOL(kpatch_unregister);