Merge pull request #62 from jpoimboe/core-cleanup

kmod/core: long overdue cleanup
This commit is contained in:
Seth Jennings 2014-03-14 20:39:09 -05:00
commit c09ed67945
6 changed files with 134 additions and 198 deletions

View File

@ -8,7 +8,7 @@ endif
KPATCH_MAKE = $(MAKE) -C $(KPATCH_BUILD) M=$(THISDIR)
kpatch.ko: core.c trampoline.S
kpatch.ko: core.c
$(KPATCH_MAKE) kpatch.ko
all: kpatch.ko
@ -20,4 +20,4 @@ clean:
# kbuild rules
obj-m := kpatch.o
kpatch-y := core.o trampoline.o
kpatch-y := core.o

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
* Copyright (C) 2013 Josh Poimboeuf <jpoimboe@redhat.com>
* Copyright (C) 2013-2014 Josh Poimboeuf <jpoimboe@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -18,23 +18,20 @@
* 02110-1301, USA.
*/
/* Contains the code for the core kpatch module. This module reads
* information from the patch modules, find new patched functions,
* and register those functions in the ftrace handlers the redirects
* the old function call to the new function code.
/* Contains the code for the core kpatch module. Each patch module registers
* with this module to redirect old functions to new functions.
*
* Each patch module can contain one or more patched functions. This
* information is contained in the .patches section of the patch module. For
* each function patched by the module we must:
* Each patch module can contain one or more new functions. This information
* is contained in the .patches section of the patch module. For each function
* patched by the module we must:
* - Call stop_machine
* - Ensure that no execution thread is currently in the function to be
* patched (or has the function in the call stack)
* - Ensure that no execution thread is currently in the old function (or has
* it in the call stack)
* - Add the new function address to the kpatch_funcs table
*
* After that, each call to the old function calls into kpatch_trampoline()
* which searches for a patched version of the function in the kpatch_funcs
* table. If a patched version is found, the return instruction pointer is
* overwritten to return to the new function.
* After that, each call to the old function calls into kpatch_ftrace_handler()
* which finds the new function in the kpatch_funcs table and updates the
* return instruction pointer so that ftrace will return to the new function.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@ -47,32 +44,17 @@
#include <asm/cacheflush.h>
#include "kpatch.h"
/* TODO: this array is horrible */
#define KPATCH_MAX_FUNCS 256
struct kpatch_func kpatch_funcs[KPATCH_MAX_FUNCS+1];
static int kpatch_num_registered;
/* from trampoline.S */
extern void kpatch_trampoline(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
/*
* Deal with some of the peculiarities caused by the trampoline being called
* from __ftrace_ops_list_func instead of directly from ftrace_regs_caller.
*/
void kpatch_ftrace_hacks(void)
{
#define TRACE_INTERNAL_BIT (1<<11)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
trace_recursion_clear(TRACE_INTERNAL_BIT);
preempt_enable_notrace();
}
static int kpatch_num_funcs(struct kpatch_func *f)
static int kpatch_num_funcs(struct kpatch_func *funcs)
{
int i;
for (i = 0; f[i].old_func_name; i++)
for (i = 0; funcs[i].old_addr; i++)
;
return i;
@ -86,22 +68,22 @@ struct ktrace_backtrace_args {
void kpatch_backtrace_address_verify(void *data, unsigned long address,
int reliable)
{
struct kpatch_func *f;
struct kpatch_func *func;
struct ktrace_backtrace_args *args = data;
if (args->ret)
return;
for (f = args->funcs; f->old_func_name; f++)
if (address >= f->old_func_addr &&
address < f->old_func_addr_end)
for (func = args->funcs; func->old_addr; func++)
if (address >= func->old_addr &&
address < func->old_addr + func->old_size)
goto unsafe;
return;
unsafe:
printk("kpatch: activeness safety check failed for '%s()'\n",
f->old_func_name);
printk("kpatch: activeness safety check failed for function at address "
"'%lx()'\n", func->old_addr);
args->ret = -EBUSY;
}
@ -142,9 +124,6 @@ static int kpatch_verify_activeness_safety(struct kpatch_func *funcs)
}
} while_each_thread(g, t);
/* TODO: for preemptible support we would need to ensure that functions
* on top of the stack are actually seen on the stack.
*/
out:
return ret;
}
@ -172,15 +151,73 @@ static int kpatch_apply_patch(void *data)
memcpy(&kpatch_funcs[num_global_funcs], funcs,
num_new_funcs * sizeof(struct kpatch_func));
/* TODO: sync_core? */
out:
return ret;
}
/* Called from stop_machine */
static int kpatch_remove_patch(void *data)
{
int num_remove_funcs, i, ret = 0;
struct kpatch_func *funcs = data;
ret = kpatch_verify_activeness_safety(funcs);
if (ret)
goto out;
for (i = 0; i < KPATCH_MAX_FUNCS && kpatch_funcs[i].old_addr; i++)
if (kpatch_funcs[i].old_addr == funcs->old_addr)
break;
if (i == KPATCH_MAX_FUNCS) {
ret = -EINVAL;
goto out;
}
num_remove_funcs = kpatch_num_funcs(funcs);
memset(&kpatch_funcs[i], 0,
num_remove_funcs * sizeof(struct kpatch_func));
for ( ;kpatch_funcs[i + num_remove_funcs].old_addr; i++)
memcpy(&kpatch_funcs[i], &kpatch_funcs[i + num_remove_funcs],
sizeof(struct kpatch_func));
out:
return ret;
}
void kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{
int i;
struct kpatch_func *func = NULL;
for (i = 0; i < KPATCH_MAX_FUNCS &&
kpatch_funcs[i].old_addr; i++) {
if (kpatch_funcs[i].old_addr == ip) {
func = &kpatch_funcs[i];
break;
}
}
/*
* Check for the rare case where we don't have a new function to call.
* This can happen in the small window of time during patch module
* insmod after it has called register_ftrace_function() but before it
* has called stop_machine() to do the activeness safety check and the
* array update. In this case we just return and let the old function
* run.
*/
if (!func)
return;
regs->ip = func->new_addr;
return;
}
static struct ftrace_ops kpatch_ftrace_ops __read_mostly = {
.func = kpatch_trampoline,
.func = kpatch_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS,
};
@ -193,36 +230,38 @@ int kpatch_register(struct module *mod, void *kpatch_patches,
int i;
int num_patches;
struct kpatch_patch *patches;
struct kpatch_func *funcs, *f;
pr_err("loading patch module \"%s\"", mod->name);
struct kpatch_func *funcs, *func;
num_patches = (kpatch_patches_end - kpatch_patches) / sizeof(*patches);
patches = kpatch_patches;
funcs = kmalloc((num_patches + 1) * sizeof(*funcs), GFP_KERNEL); /*TODO: error handling, free, etc */
funcs = kmalloc((num_patches + 1) * sizeof(*funcs), GFP_KERNEL);
if (!funcs) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_patches; i++) {
funcs[i].old_func_addr = patches[i].orig;
funcs[i].old_func_addr_end = patches[i].orig_end;
funcs[i].new_func_addr = patches[i].new;
funcs[i].old_addr = patches[i].old_addr;
funcs[i].old_size = patches[i].old_size;
funcs[i].new_addr = patches[i].new_addr;
funcs[i].mod = mod;
funcs[i].old_func_name = "TODO";
/* Do any needed incremental patching. */
for (f = kpatch_funcs; f->old_func_name; f++) {
if (funcs[i].old_func_addr == f->old_func_addr) {
funcs[i].old_func_addr = f->new_func_addr;
ref_module(funcs[i].mod, f->mod);
for (func = kpatch_funcs; func->old_addr; func++) {
if (funcs[i].old_addr == func->old_addr) {
funcs[i].old_addr = func->new_addr;
ref_module(funcs[i].mod, func->mod);
}
}
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, patches[i].orig,
0, 0);
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops,
patches[i].old_addr, 0, 0);
if (ret) {
printk("kpatch: can't set ftrace filter at "
"%lx '%s' (%d)\n",
funcs[i].old_func_addr, funcs[i].old_func_name, ret);
printk("kpatch: can't set ftrace filter at address "
"0x%lx (%d)\n",
funcs[i].old_addr, ret);
goto out;
}
}
@ -253,56 +292,32 @@ int kpatch_register(struct module *mod, void *kpatch_patches,
goto out;
}
pr_notice("loaded patch module \"%s\"\n", mod->name);
out:
if (funcs)
kfree(funcs);
return ret;
}
EXPORT_SYMBOL(kpatch_register);
/* Called from stop_machine */
static int kpatch_remove_patch(void *data)
{
int num_remove_funcs, i, ret = 0;
struct kpatch_func *funcs = data;
ret = kpatch_verify_activeness_safety(funcs);
if (ret)
goto out;
for (i = 0; i < KPATCH_MAX_FUNCS; i++) /* TODO iterate by pointer */
if (kpatch_funcs[i].old_func_addr == funcs->old_func_addr)
break;
if (i == KPATCH_MAX_FUNCS) {
ret = -EINVAL;
goto out;
}
num_remove_funcs = kpatch_num_funcs(funcs);
memset(&kpatch_funcs[i], 0,
num_remove_funcs * sizeof(struct kpatch_func));
for ( ;kpatch_funcs[i + num_remove_funcs].old_func_name; i++)
memcpy(&kpatch_funcs[i], &kpatch_funcs[i + num_remove_funcs],
sizeof(struct kpatch_func));
out:
return ret;
}
int kpatch_unregister(struct module *mod)
{
int ret = 0;
struct kpatch_func *funcs, *f;
struct kpatch_func *funcs, *func;
int num_funcs, i;
num_funcs = kpatch_num_funcs(kpatch_funcs);
funcs = kmalloc((num_funcs + 1) * sizeof(*funcs), GFP_KERNEL);
if (!funcs) {
ret = -ENOMEM;
goto out;
}
for (f = kpatch_funcs, i = 0; f->old_func_name; f++)
if (f->mod == mod)
memcpy(&funcs[i++], f, sizeof(*funcs));
for (func = kpatch_funcs, i = 0; func->old_addr; func++)
if (func->mod == mod)
memcpy(&funcs[i++], func, sizeof(*funcs));
memset(&funcs[i], 0, sizeof(*funcs));
ret = stop_machine(kpatch_remove_patch, funcs, NULL);
@ -317,19 +332,22 @@ int kpatch_unregister(struct module *mod)
}
}
for (f = funcs; f->old_func_name; f++) {
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, f->old_func_addr,
for (func = funcs; func->old_addr; func++) {
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr,
1, 0);
if (ret) {
printk("kpatch: can't remove ftrace filter at "
"%lx '%s' (%d)\n",
f->old_func_addr, f->old_func_name, ret);
printk("kpatch: can't remove ftrace filter at address "
"0x%lx (%d)\n",
func->old_addr, ret);
goto out;
}
}
pr_notice("unloaded patch module \"%s\"\n", mod->name);
out:
kfree(funcs);
if (funcs)
kfree(funcs);
return ret;
}
EXPORT_SYMBOL(kpatch_unregister);

View File

@ -2,7 +2,7 @@
* kpatch.h
*
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
* Copyright (C) 2013 Josh Poimboeuf <jpoimboe@redhat.com>
* Copyright (C) 2013-2014 Josh Poimboeuf <jpoimboe@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -26,23 +26,16 @@
#define _KPATCH_H_
struct kpatch_func {
unsigned long old_func_addr;
unsigned long new_func_addr;
char *old_func_name;
unsigned long old_func_addr_end;
unsigned long new_addr;
unsigned long old_addr;
unsigned long old_size;
struct module *mod;
};
struct kpatch_rela {
unsigned long dest;
unsigned long src;
unsigned long type;
};
struct kpatch_patch {
unsigned long new;
unsigned long orig;
unsigned long orig_end;
unsigned long new_addr;
unsigned long old_addr;
unsigned long old_size;
};
int kpatch_register(struct module *mod, void *kpatch_patches,

View File

@ -1,73 +0,0 @@
/*
* kpatch trampoline
*
* Copyright (C) 2013 Josh Poimboeuf <jpoimboe@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <asm/calling.h>
GLOBAL(kpatch_trampoline)
pushq %rcx
pushq %rdi
callq kpatch_ftrace_hacks
leaq kpatch_funcs, %r10
/*
* TODO: if preemption is possible then we'll need to think about how
* to ensure atomic access to the array and how to ensure activeness
* safety here. if preemption is enabled then we need to make sure the
* IP isn't inside kpatch_trampoline for any task.
*/
/* TODO: save/restore flags? */
/* find new function in func_list */
popq %rdi
loop:
movq (%r10), %r9
cmpq %r9, %rdi
je found
/*
* Check for the rare case where we don't have a new function to call.
* This can happen in the small window of time during patch module
* insmod after it has called register_ftrace_function() but before it
* has called stop_machine() to do the activeness safety check and the
* array update. In this case we just return and let the old function
* run.
*/
cmpq $0, %r9
je bail
addq $40, %r10 /* FIXME http://docs.blackfin.uclinux.org/doku.php?id=toolchain:gas:structs */
jmp loop
found:
/* get new function address */
movq 8(%r10), %r10
/* tell ftrace to return to new function */
popq %rax
movq %r10, RIP(%rax)
bail:
retq

View File

@ -27,14 +27,12 @@ extern char __kpatch_patches, __kpatch_patches_end;
static int __init patch_init(void)
{
printk("patch loading\n");
return kpatch_register(THIS_MODULE, &__kpatch_patches,
&__kpatch_patches_end);
}
static void __exit patch_exit(void)
{
printk("patch unloading\n");
kpatch_unregister(THIS_MODULE);
}

View File

@ -28,9 +28,9 @@
* functions are overridden by the patch module.
*
* For each struct kpatch_patch entry in the .patches section, the core
* module will register the new function as an ftrace handler for the
* old function. The new function will return to the caller of the old
* function, not the old function itself, bypassing the old function.
* module will register as an ftrace handler for the old function. The new
* function will return to the caller of the old function, not the old function
* itself, bypassing the old function.
*/
#include <sys/types.h>
@ -309,8 +309,8 @@ int main(int argc, char **argv)
for_each_sym(&symlist, cur) {
if (cur->action != PATCH)
continue;
patches_data[i].orig = cur->vm_addr;
patches_data[i].orig_end = cur->vm_addr + cur->vm_len;
patches_data[i].old_addr = cur->vm_addr;
patches_data[i].old_size = cur->vm_len;
relas_data[i].r_offset = i * sizeof(struct kpatch_patch);
relas_data[i].r_info = GELF_R_INFO(cur->index, R_X86_64_64);
i++;