Merge pull request #63 from jpoimboe/core-api-hash

kmod/core: new API and kpatch_funcs_hash
This commit is contained in:
Seth Jennings 2014-03-18 16:50:56 -05:00
commit fd2c137acf
8 changed files with 171 additions and 162 deletions

View File

@ -40,51 +40,43 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/stop_machine.h> #include <linux/stop_machine.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/hashtable.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include "kpatch.h" #include "kpatch.h"
/* TODO: this array is horrible */ #define KPATCH_HASH_BITS 8
#define KPATCH_MAX_FUNCS 256 DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS);
struct kpatch_func kpatch_funcs[KPATCH_MAX_FUNCS+1];
static int kpatch_num_registered; static int kpatch_num_registered;
static int kpatch_num_funcs(struct kpatch_func *funcs) struct kpatch_backtrace_args {
{
int i;
for (i = 0; funcs[i].old_addr; i++)
;
return i;
}
struct ktrace_backtrace_args {
struct kpatch_func *funcs; struct kpatch_func *funcs;
int ret; int num_funcs, ret;
}; };
void kpatch_backtrace_address_verify(void *data, unsigned long address, void kpatch_backtrace_address_verify(void *data, unsigned long address,
int reliable) int reliable)
{ {
struct kpatch_func *func; struct kpatch_backtrace_args *args = data;
struct ktrace_backtrace_args *args = data; struct kpatch_func *funcs = args->funcs;
int i, num_funcs = args->num_funcs;
if (args->ret) if (args->ret)
return; return;
for (func = args->funcs; func->old_addr; func++) for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
if (address >= func->old_addr && if (address >= func->old_addr &&
address < func->old_addr + func->old_size) address < func->old_addr + func->old_size) {
goto unsafe; printk("kpatch: activeness safety check failed for "
"function at address " "'%lx()'\n",
return; func->old_addr);
args->ret = -EBUSY;
unsafe: return;
printk("kpatch: activeness safety check failed for function at address " }
"'%lx()'\n", func->old_addr); }
args->ret = -EBUSY;
} }
static int kpatch_backtrace_stack(void *data, char *name) static int kpatch_backtrace_stack(void *data, char *name)
@ -98,20 +90,21 @@ struct stacktrace_ops kpatch_backtrace_ops = {
.walk_stack = print_context_stack_bp, .walk_stack = print_context_stack_bp,
}; };
/* /*
* Verify activeness safety, i.e. that none of the to-be-patched functions are * Verify activeness safety, i.e. that none of the to-be-patched functions are
* on the stack of any task. * on the stack of any task.
* *
* This function is called from stop_machine() context. * This function is called from stop_machine() context.
*/ */
static int kpatch_verify_activeness_safety(struct kpatch_func *funcs) static int kpatch_verify_activeness_safety(struct kpatch_func *funcs,
int num_funcs)
{ {
struct task_struct *g, *t; struct task_struct *g, *t;
int ret = 0; int ret = 0;
struct ktrace_backtrace_args args = { struct kpatch_backtrace_args args = {
.funcs = funcs, .funcs = funcs,
.num_funcs = num_funcs,
.ret = 0 .ret = 0
}; };
@ -128,29 +121,41 @@ out:
return ret; return ret;
} }
struct kpatch_stop_machine_args {
struct kpatch_func *funcs;
int num_funcs;
};
/* Called from stop_machine */ /* Called from stop_machine */
static int kpatch_apply_patch(void *data) static int kpatch_apply_patch(void *data)
{ {
int ret, num_global_funcs, num_new_funcs; struct kpatch_stop_machine_args *args = data;
struct kpatch_func *funcs = data; struct kpatch_func *funcs = args->funcs;
int num_funcs = args->num_funcs;
int i, ret;
ret = kpatch_verify_activeness_safety(funcs); ret = kpatch_verify_activeness_safety(funcs, num_funcs);
if (ret) if (ret)
goto out; goto out;
num_global_funcs = kpatch_num_funcs(kpatch_funcs); for (i = 0; i < num_funcs; i++) {
num_new_funcs = kpatch_num_funcs(funcs); struct kpatch_func *f;
struct kpatch_func *func = &funcs[i];
if (num_global_funcs + num_new_funcs > KPATCH_MAX_FUNCS) { /* do any needed incremental patching */
printk("kpatch: exceeded maximum # of patched functions (%d)\n", /* TODO: performance */
KPATCH_MAX_FUNCS); hash_for_each_possible(kpatch_func_hash, f, node,
ret = -E2BIG; func->old_addr) {
goto out; if (f->old_addr == func->old_addr) {
func->old_addr = f->new_addr;
ref_module(func->mod, f->mod);
}
}
/* update the global list and go live */
hash_add(kpatch_func_hash, &func->node, func->old_addr);
} }
memcpy(&kpatch_funcs[num_global_funcs], funcs,
num_new_funcs * sizeof(struct kpatch_func));
out: out:
return ret; return ret;
} }
@ -158,62 +163,36 @@ out:
/* Called from stop_machine */ /* Called from stop_machine */
static int kpatch_remove_patch(void *data) static int kpatch_remove_patch(void *data)
{ {
int num_remove_funcs, i, ret = 0; struct kpatch_stop_machine_args *args = data;
struct kpatch_func *funcs = data; struct kpatch_func *funcs = args->funcs;
int num_funcs = args->num_funcs;
int ret, i;
ret = kpatch_verify_activeness_safety(funcs); ret = kpatch_verify_activeness_safety(funcs, num_funcs);
if (ret) if (ret)
goto out; goto out;
for (i = 0; i < KPATCH_MAX_FUNCS && kpatch_funcs[i].old_addr; i++) for (i = 0; i < num_funcs; i++)
if (kpatch_funcs[i].old_addr == funcs->old_addr) hlist_del(&funcs[i].node);
break;
if (i == KPATCH_MAX_FUNCS) {
ret = -EINVAL;
goto out;
}
num_remove_funcs = kpatch_num_funcs(funcs);
memset(&kpatch_funcs[i], 0,
num_remove_funcs * sizeof(struct kpatch_func));
for ( ;kpatch_funcs[i + num_remove_funcs].old_addr; i++)
memcpy(&kpatch_funcs[i], &kpatch_funcs[i + num_remove_funcs],
sizeof(struct kpatch_func));
out: out:
return ret; return ret;
} }
void kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, void kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs) struct ftrace_ops *op, struct pt_regs *regs)
{ {
int i; struct kpatch_func *f;
struct kpatch_func *func = NULL;
for (i = 0; i < KPATCH_MAX_FUNCS && preempt_disable_notrace();
kpatch_funcs[i].old_addr; i++) { hash_for_each_possible(kpatch_func_hash, f, node, ip) {
if (kpatch_funcs[i].old_addr == ip) { if (f->old_addr == ip) {
func = &kpatch_funcs[i]; regs->ip = f->new_addr;
break; break;
} }
} }
preempt_enable_notrace();
/*
* Check for the rare case where we don't have a new function to call.
* This can happen in the small window of time during patch module
* insmod after it has called register_ftrace_function() but before it
* has called stop_machine() to do the activeness safety check and the
* array update. In this case we just return and let the old function
* run.
*/
if (!func)
return;
regs->ip = func->new_addr;
return;
} }
static struct ftrace_ops kpatch_ftrace_ops __read_mostly = { static struct ftrace_ops kpatch_ftrace_ops __read_mostly = {
@ -221,54 +200,32 @@ static struct ftrace_ops kpatch_ftrace_ops __read_mostly = {
.flags = FTRACE_OPS_FL_SAVE_REGS, .flags = FTRACE_OPS_FL_SAVE_REGS,
}; };
int kpatch_register(struct module *mod, struct kpatch_func *funcs,
int kpatch_register(struct module *mod, void *kpatch_patches, int num_funcs)
void *kpatch_patches_end)
{ {
int ret = 0; int ret, ret2, i;
int ret2; struct kpatch_stop_machine_args args = {
int i; .funcs = funcs,
int num_patches; .num_funcs = num_funcs,
struct kpatch_patch *patches; };
struct kpatch_func *funcs, *func;
num_patches = (kpatch_patches_end - kpatch_patches) / sizeof(*patches); for (i = 0; i < num_funcs; i++) {
patches = kpatch_patches; struct kpatch_func *func = &funcs[i];
funcs = kmalloc((num_patches + 1) * sizeof(*funcs), GFP_KERNEL); func->mod = mod;
if (!funcs) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_patches; i++) { ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
0, 0);
funcs[i].old_addr = patches[i].old_addr;
funcs[i].old_size = patches[i].old_size;
funcs[i].new_addr = patches[i].new_addr;
funcs[i].mod = mod;
/* Do any needed incremental patching. */
for (func = kpatch_funcs; func->old_addr; func++) {
if (funcs[i].old_addr == func->old_addr) {
funcs[i].old_addr = func->new_addr;
ref_module(funcs[i].mod, func->mod);
}
}
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops,
patches[i].old_addr, 0, 0);
if (ret) { if (ret) {
printk("kpatch: can't set ftrace filter at address " printk("kpatch: can't set ftrace filter at address "
"0x%lx (%d)\n", "0x%lx (%d)\n",
funcs[i].old_addr, ret); func->old_addr, ret);
goto out; goto out;
} }
} }
memset(&funcs[num_patches], 0, sizeof(*funcs));
/* Register the ftrace trampoline if it hasn't been done already. */ /* Register the ftrace trampoline if it hasn't been done already. */
if (!kpatch_num_registered++) { if (!kpatch_num_registered++) { /* TODO atomic */
ret = register_ftrace_function(&kpatch_ftrace_ops); ret = register_ftrace_function(&kpatch_ftrace_ops);
if (ret) { if (ret) {
printk("kpatch: can't register ftrace function \n"); printk("kpatch: can't register ftrace function \n");
@ -280,7 +237,7 @@ int kpatch_register(struct module *mod, void *kpatch_patches,
* Idle the CPUs, verify activeness safety, and atomically make the new * Idle the CPUs, verify activeness safety, and atomically make the new
* functions visible to the trampoline. * functions visible to the trampoline.
*/ */
ret = stop_machine(kpatch_apply_patch, funcs, NULL); ret = stop_machine(kpatch_apply_patch, &args, NULL);
if (ret) { if (ret) {
if (!--kpatch_num_registered) { if (!--kpatch_num_registered) {
ret2 = unregister_ftrace_function(&kpatch_ftrace_ops); ret2 = unregister_ftrace_function(&kpatch_ftrace_ops);
@ -295,32 +252,20 @@ int kpatch_register(struct module *mod, void *kpatch_patches,
pr_notice("loaded patch module \"%s\"\n", mod->name); pr_notice("loaded patch module \"%s\"\n", mod->name);
out: out:
if (funcs)
kfree(funcs);
return ret; return ret;
} }
EXPORT_SYMBOL(kpatch_register); EXPORT_SYMBOL(kpatch_register);
int kpatch_unregister(struct module *mod) int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
int num_funcs)
{ {
int ret = 0; int i, ret;
struct kpatch_func *funcs, *func; struct kpatch_stop_machine_args args = {
int num_funcs, i; .funcs = funcs,
.num_funcs = num_funcs,
};
num_funcs = kpatch_num_funcs(kpatch_funcs); ret = stop_machine(kpatch_remove_patch, &args, NULL);
funcs = kmalloc((num_funcs + 1) * sizeof(*funcs), GFP_KERNEL);
if (!funcs) {
ret = -ENOMEM;
goto out;
}
for (func = kpatch_funcs, i = 0; func->old_addr; func++)
if (func->mod == mod)
memcpy(&funcs[i++], func, sizeof(*funcs));
memset(&funcs[i], 0, sizeof(*funcs));
ret = stop_machine(kpatch_remove_patch, funcs, NULL);
if (ret) if (ret)
goto out; goto out;
@ -332,7 +277,9 @@ int kpatch_unregister(struct module *mod)
} }
} }
for (func = funcs; func->old_addr; func++) { for (i = 0; i < num_funcs; i++) {
struct kpatch_func *func = &funcs[i];
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr, ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
1, 0); 1, 0);
if (ret) { if (ret) {
@ -346,8 +293,6 @@ int kpatch_unregister(struct module *mod)
pr_notice("unloaded patch module \"%s\"\n", mod->name); pr_notice("unloaded patch module \"%s\"\n", mod->name);
out: out:
if (funcs)
kfree(funcs);
return ret; return ret;
} }
EXPORT_SYMBOL(kpatch_unregister); EXPORT_SYMBOL(kpatch_unregister);

View File

@ -25,21 +25,19 @@
#ifndef _KPATCH_H_ #ifndef _KPATCH_H_
#define _KPATCH_H_ #define _KPATCH_H_
#include <linux/types.h>
struct kpatch_func { struct kpatch_func {
unsigned long new_addr; unsigned long new_addr;
unsigned long old_addr; unsigned long old_addr;
unsigned long old_size; unsigned long old_size;
struct module *mod; struct module *mod;
struct hlist_node node;
}; };
struct kpatch_patch { extern int kpatch_register(struct module *mod, struct kpatch_func *funcs,
unsigned long new_addr; int num_funcs);
unsigned long old_addr; extern int kpatch_unregister(struct module *mod, struct kpatch_func *funcs,
unsigned long old_size; int num_funcs);
};
int kpatch_register(struct module *mod, void *kpatch_patches,
void *kpatch_patches_end);
int kpatch_unregister(struct module *mod);
#endif /* _KPATCH_H_ */ #endif /* _KPATCH_H_ */

View File

@ -21,19 +21,55 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/slab.h>
#include "kpatch.h" #include "kpatch.h"
#include "kpatch-patch.h"
extern char __kpatch_patches, __kpatch_patches_end; extern char __kpatch_patches, __kpatch_patches_end;
static struct kpatch_func *funcs;
static int num_funcs;
static int __init patch_init(void) static int __init patch_init(void)
{ {
return kpatch_register(THIS_MODULE, &__kpatch_patches, struct kpatch_patch *patches;
&__kpatch_patches_end); int i;
patches = (struct kpatch_patch *)&__kpatch_patches;
num_funcs = (&__kpatch_patches_end - &__kpatch_patches) /
sizeof(*patches);
funcs = kzalloc(num_funcs * sizeof(*funcs), GFP_KERNEL);
if (!funcs)
return -ENOMEM;
for (i = 0; i < num_funcs; i++) {
funcs[i].old_addr = patches[i].old_addr;
funcs[i].old_size = patches[i].old_size;
funcs[i].new_addr = patches[i].new_addr;
}
return kpatch_register(THIS_MODULE, funcs, num_funcs);
} }
static void __exit patch_exit(void) static void __exit patch_exit(void)
{ {
kpatch_unregister(THIS_MODULE); int ret;
ret = kpatch_unregister(THIS_MODULE, funcs, num_funcs);
if (ret) {
/*
* TODO: If this happens, we're screwed. We need a way to
* prevent the module from unloading if the activeness safety
* check fails.
*
* Or alternatively we could keep trying the activeness safety
* check in a loop, until it works or we timeout. Then we
* could panic.
*/
panic("kpatch_unregister failed: %d", ret);
}
kfree(funcs);
} }
module_init(patch_init); module_init(patch_init);

33
kmod/patch/kpatch-patch.h Normal file
View File

@ -0,0 +1,33 @@
/*
* kpatch-patch.h
*
* Copyright (C) 2014 Josh Poimboeuf <jpoimboe@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
* 02110-1301, USA.
*
* Contains definitions needed for creating patch modules
*/
#ifndef _KPATCH_PATCH_H_
#define _KPATCH_PATCH_H_
struct kpatch_patch {
unsigned long new_addr;
unsigned long old_addr;
unsigned long old_size;
};
#endif /* _KPATCH_PATCH_H_ */

View File

@ -1,6 +1,6 @@
include ../Makefile.inc include ../Makefile.inc
CFLAGS += -I../kmod/core -Wall -g CFLAGS += -I../kmod/patch -Wall -g
LDFLAGS = -lelf LDFLAGS = -lelf
TARGETS = create-diff-object add-patches-section link-vmlinux-syms TARGETS = create-diff-object add-patches-section link-vmlinux-syms

View File

@ -43,7 +43,7 @@
#include <gelf.h> #include <gelf.h>
#include <unistd.h> #include <unistd.h>
#include "kpatch.h" #include "kpatch-patch.h"
#define ERROR(format, ...) \ #define ERROR(format, ...) \
error(1, 0, "%s: %d: " format, __FUNCTION__, __LINE__, ##__VA_ARGS__) error(1, 0, "%s: %d: " format, __FUNCTION__, __LINE__, ##__VA_ARGS__)

View File

@ -44,8 +44,6 @@
#include <gelf.h> #include <gelf.h>
#include <argp.h> #include <argp.h>
#include "kpatch.h"
#define ERROR(format, ...) \ #define ERROR(format, ...) \
error(1, 0, "%s: %d: " format, __FUNCTION__, __LINE__, ##__VA_ARGS__) error(1, 0, "%s: %d: " format, __FUNCTION__, __LINE__, ##__VA_ARGS__)

View File

@ -40,8 +40,6 @@
#include <gelf.h> #include <gelf.h>
#include <unistd.h> #include <unistd.h>
#include "kpatch.h"
#define ERROR(format, ...) \ #define ERROR(format, ...) \
error(1, 0, "%s: %d: " format, __FUNCTION__, __LINE__, ##__VA_ARGS__) error(1, 0, "%s: %d: " format, __FUNCTION__, __LINE__, ##__VA_ARGS__)
@ -247,7 +245,8 @@ int main(int argc, char **argv)
vsym = find_symbol_by_name(&symlistv, cur->name); vsym = find_symbol_by_name(&symlistv, cur->name);
if (!vsym) if (!vsym)
ERROR("couldn't find global function in vmlinux"); ERROR("couldn't find global function %s in vmlinux",
cur->name);
cur->vm_addr = vsym->sym.st_value; cur->vm_addr = vsym->sym.st_value;
cur->vm_len = vsym->sym.st_size; cur->vm_len = vsym->sym.st_size;