It's alive...

The end-to-end patching works.  From object analysis to generation to
runtime patching.  It's still missing the scripting piece that will only
take a patch and kernel source dir as input.
This commit is contained in:
Josh Poimboeuf 2013-01-14 21:46:38 -06:00
commit 4feb144e98
11 changed files with 2163 additions and 0 deletions

9
.gitignore vendored Normal file
View File

@ -0,0 +1,9 @@
elf-diff-copy/elf-diff-copy
*.o
*.o.cmd
*.ko
*.ko.cmd
*.mod.c
kmod/.tmp_versions/
kmod/Module.symvers
kmod/modules.order

13
Makefile Normal file
View File

@ -0,0 +1,13 @@
KDIR ?= /home/jpoimboe/git/linux
KPATCH_GENERATED ?= kpatch-generated.o
KMOD_DIR ?= /home/jpoimboe/kpatch/kmod
OBJ_ORIG = /home/jpoimboe/kpatch-test/meminfo.o
OBJ_PATCHED = /home/jpoimboe/kpatch-test/meminfo.o.patched
VMLINUX_ORIG = /home/jpoimboe/kpatch-test/vmlinux
all:
$(MAKE) -C elf-diff-copy
elf-diff-copy/elf-diff-copy $(OBJ_ORIG) $(OBJ_PATCHED) -v $(VMLINUX_ORIG) -o $(KMOD_DIR)/$(KPATCH_GENERATED)
$(MAKE) -C $(KDIR) M=$(KMOD_DIR) kpatch-module.o
ld -m elf_x86_64 -r -o $(KMOD_DIR)/kpatch-combined.o $(KMOD_DIR)/kpatch-module.o $(KMOD_DIR)/$(KPATCH_GENERATED) $(KMOD_DIR)/kpatch.lds
$(MAKE) -C $(KDIR) M=$(KMOD_DIR)

10
elf-diff-copy/Makefile Normal file
View File

@ -0,0 +1,10 @@
CC=gcc
CFLAGS=-g -Wall
.PHONY: all
all: elf-diff-copy
elf-diff-copy: elf-diff-copy.c
$(CC) $(CFLAGS) -o $@ $^ -lelf -ludis86

File diff suppressed because it is too large Load Diff

26
find_changed_objects Executable file
View File

@ -0,0 +1,26 @@
#!/bin/sh
yo="this/is/a/test"
case yo in
"*test")
echo yo
;;
esac
KSRC=/home/jpoimboe/git/linux/
for i in `find $KSRC -newer /tmp/kpatch_timestamp -name "*.o" -not -name "*.mod.o" -not -name "built-in.o" -not -name "vmlinux.o"`; do
case ${i##$KSRC} in
.tmp_kallsyms1.o|.tmp_kallsyms2.o|init/version.o|arch/x86/boot/version.o|arch/x86/boot/compressed/eboot.o|arch/x86/boot/header.o|arch/x86/boot/compressed/efi_stub_64.o|arch/x86/boot/compressed/piggy.o)
continue
;;
esac
num=`readelf -s $i |awk '{print $4}' |grep FILE |wc -l`
[[ $num -gt 1 ]] && continue
[[ $num == 0 ]] && {
echo "ERROR: unsupported assembly file ${i##$KSRC} changed"
exit 1
}
[[ $num == 1 ]] && echo ${i##$KSRC}
done

7
kmod/Kbuild Normal file
View File

@ -0,0 +1,7 @@
obj-m += dummy.o kpatch-patch.o kpatch.o
kpatch-objs += base.o trampoline.o
kpatch-patch-objs += kpatch-combined.o
dummy-objs += kpatch-module.o

360
kmod/base.c Normal file
View File

@ -0,0 +1,360 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <asm/stacktrace.h>
#include <asm/cacheflush.h>
#include "kpatch.h"
struct kpatch_func kpatch_funcs[KPATCH_MAX_FUNCS+1];
static int kpatch_num_registered;
/*
* Deal with some of the peculiarities caused by the trampoline being called
* from __ftrace_ops_list_func instead of directly from ftrace_regs_caller.
*/
void kpatch_ftrace_hacks(void)
{
#define TRACE_INTERNAL_BIT (1<<11)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(bit); } while (0)
trace_recursion_clear(TRACE_INTERNAL_BIT);
preempt_enable_notrace();
}
static int kpatch_num_funcs(struct kpatch_func *f)
{
int i;
for (i = 0; f[i].old_func_name; i++)
;
return i;
}
struct ktrace_backtrace_args {
struct kpatch_func *funcs;
int ret;
};
void kpatch_backtrace_address_verify(void *data, unsigned long address,
int reliable)
{
struct kpatch_func *f;
struct ktrace_backtrace_args *args = data;
if (args->ret)
return;
for (f = args->funcs; f->old_func_name; f++)
if (address >= f->old_func_addr &&
address < f->old_func_addr_end)
goto unsafe;
return;
unsafe:
printk("kpatch: activeness safety check failed for '%s()'\n",
f->old_func_name);
args->ret = -EBUSY;
}
static int kpatch_backtrace_stack(void *data, char *name)
{
return 0;
}
struct stacktrace_ops kpatch_backtrace_ops = {
.address = kpatch_backtrace_address_verify,
.stack = kpatch_backtrace_stack,
.walk_stack = print_context_stack_bp,
};
/*
* Verify activeness safety, i.e. that none of the to-be-patched functions are
* on the stack of any task.
*
* This function is called from stop_machine() context.
*/
static int kpatch_verify_activeness_safety(struct kpatch_func *funcs)
{
struct task_struct *g, *t;
int ret = 0;
struct ktrace_backtrace_args args = {
.funcs = funcs,
.ret = 0
};
/* Check the stacks of all tasks. */
do_each_thread(g, t) {
dump_trace(t, NULL, NULL, 0, &kpatch_backtrace_ops, &args);
if (args.ret) {
ret = args.ret;
goto out;
}
} while_each_thread(g, t);
/* TODO: for preemptible support we would need to ensure that functions
* on top of the stack are actually seen on the stack.
*/
out:
return ret;
}
/* Called from stop_machine */
static int kpatch_apply_patch(void *data)
{
int ret, num_global_funcs, num_new_funcs;
struct kpatch_func *funcs = data;
ret = kpatch_verify_activeness_safety(funcs);
if (ret)
goto out;
num_global_funcs = kpatch_num_funcs(kpatch_funcs);
num_new_funcs = kpatch_num_funcs(funcs);
if (num_global_funcs + num_new_funcs > KPATCH_MAX_FUNCS) {
printk("kpatch: exceeded maximum # of patched functions (%d)\n",
KPATCH_MAX_FUNCS);
ret = -E2BIG;
goto out;
}
memcpy(&kpatch_funcs[num_global_funcs], funcs,
num_new_funcs * sizeof(struct kpatch_func));
/* TODO: sync_core? */
out:
return ret;
}
static struct ftrace_ops kpatch_ftrace_ops __read_mostly = {
.func = kpatch_trampoline,
.flags = FTRACE_OPS_FL_NORETURN | FTRACE_OPS_FL_SAVE_REGS,
};
int kpatch_register(struct module *mod, void *kpatch_relas,
void *kpatch_relas_end, void *kpatch_patches,
void *kpatch_patches_end)
{
int ret = 0;
int ret2;
struct kpatch_func *f, *g;
int num_relas;
struct kpatch_rela *relas;
int i;
u64 val;
void *loc;
int size;
int num_patches;
struct kpatch_patch *patches;
struct kpatch_func *funcs;
num_relas = (kpatch_relas_end - kpatch_relas) / sizeof(*relas);
relas = kpatch_relas;
num_patches = (kpatch_patches_end - kpatch_patches) / sizeof(*patches);
patches = kpatch_patches;
/* FIXME consider change dest/src to loc/val */
/* TODO: ensure dest value is all zeros before touching it, and that it's within the module bounds */
for (i = 0; i < num_relas; i++) {
switch (relas[i].type) {
case R_X86_64_PC32:
loc = (void *)relas[i].dest;
val = (u32)(relas[i].src - relas[i].dest);
size = 4;
break;
case R_X86_64_32S:
loc = (void *)relas[i].dest;
val = (s32)relas[i].src;
size = 4;
break;
default:
ret = -EINVAL;
goto out;
}
printk("%p <- %lx\n", loc, val);
//printk("%lx\n", (unsigned long)__va(__pa((unsigned long)loc)));
//loc = __va(__pa((unsigned long)loc));
set_memory_rw((unsigned long)loc & PAGE_MASK, 1);
ret = probe_kernel_write(loc, &val, size);
set_memory_ro((unsigned long)loc & PAGE_MASK, 1);
if (ret)
goto out;
/* TODO: sync_core? */
/* TODO: understand identity mapping vs text mapping */
}
/* TODO: mutex here? */
/* TODO verify num_patches is within acceptable bounds */
funcs = kmalloc((num_patches + 1) * sizeof(*funcs), GFP_KERNEL); /*TODO: error handling, free, etc */
for (i = 0; i < num_patches; i++) {
funcs[i].old_func_addr = patches[i].orig;
funcs[i].new_func_addr = patches[i].new;
funcs[i].old_func_name = "FIXME";
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, patches[i].orig,
0, 0);
if (ret) {
printk("kpatch: can't set ftrace filter at "
"%lx '%s' (%d)\n",
funcs[i].old_func_addr, funcs[i].old_func_name, ret);
goto out;
}
}
memset(&funcs[num_patches], 0, sizeof(*funcs));
#if 0
/* Find the functions to be replaced. */
for (f = funcs; f->old_func_name; f++) {
/* TODO: verify it's a function and look for duplicate symbol names */
/* TODO: use pre-generated func address? if using exact kernel
* is a requirement?*/
f->old_func_addr = kallsyms_lookup_name(f->old_func_name);
if (!f->old_func_addr) {
printk("kpatch: can't find function '%s'\n",
f->old_func_name);
ret = -ENXIO;
goto out;
}
/* Do any needed incremental patching. */
for (g = kpatch_funcs; g->old_func_name; g++)
if (f->old_func_addr == g->old_func_addr) {
f->old_func_addr = g->new_func_addr;
ref_module(f->owner, g->owner);
}
if (!kallsyms_lookup_size_offset(f->old_func_addr, &size,
&offset)) {
printk("kpatch: no size for function '%s'\n",
f->old_func_name);
ret = -ENXIO;
goto out;
}
/* TODO: check ret, size, offset */
f->old_func_addr_end = f->old_func_addr + size;
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, f->old_func_addr,
0, 0);
if (ret) {
printk("kpatch: can't set ftrace filter at "
"%lx '%s' (%d)\n",
f->old_func_addr, f->old_func_name, ret);
goto out;
}
}
/* TODO: global variable/array locking */
#endif
/* Register the ftrace trampoline if it hasn't been done already. */
if (!kpatch_num_registered++) {
ret = register_ftrace_function(&kpatch_ftrace_ops);
if (ret) {
printk("kpatch: can't register ftrace function \n");
goto out;
}
}
/*
* Idle the CPUs, verify activeness safety, and atomically make the new
* functions visible to the trampoline.
*/
ret = stop_machine(kpatch_apply_patch, funcs, NULL);
if (ret) {
if (!--kpatch_num_registered) {
ret2 = unregister_ftrace_function(&kpatch_ftrace_ops);
if (ret2)
printk("kpatch: unregister failed (%d)\n",
ret2);
}
goto out;
}
out:
return ret;
}
EXPORT_SYMBOL(kpatch_register);
/* Called from stop_machine */
static int kpatch_remove_patch(void *data)
{
int num_remove_funcs, i, ret = 0;
struct kpatch_func *funcs = data;
ret = kpatch_verify_activeness_safety(funcs);
if (ret)
goto out;
for (i = 0; i < KPATCH_MAX_FUNCS; i++)
if (kpatch_funcs[i].old_func_addr == funcs->old_func_addr)
break;
if (i == KPATCH_MAX_FUNCS) {
ret = -EINVAL;
goto out;
}
num_remove_funcs = kpatch_num_funcs(funcs);
memset(&kpatch_funcs[i], 0,
num_remove_funcs * sizeof(struct kpatch_func));
for ( ;kpatch_funcs[i + num_remove_funcs].old_func_name; i++)
memcpy(&kpatch_funcs[i], &kpatch_funcs[i + num_remove_funcs],
sizeof(struct kpatch_func));
out:
return ret;
}
int kpatch_unregister(struct module *mod)
{
int ret = 0;
struct kpatch_func *f;
#if 0
ret = stop_machine(kpatch_remove_patch, funcs, NULL);
if (ret)
goto out;
if (!--kpatch_num_registered) {
ret = unregister_ftrace_function(&kpatch_ftrace_ops);
if (ret) {
printk("kpatch: can't unregister ftrace function\n");
goto out;
}
}
for (f = funcs; f->old_func_name; f++) {
ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, f->old_func_addr,
1, 0);
if (ret) {
printk("kpatch: can't remove ftrace filter at "
"%lx '%s' (%d)\n",
f->old_func_addr, f->old_func_name, ret);
goto out;
}
}
#endif
out:
return ret;
}
EXPORT_SYMBOL(kpatch_unregister);
MODULE_LICENSE("GPL");

32
kmod/kpatch-module.c Normal file
View File

@ -0,0 +1,32 @@
#include <linux/module.h>
#include "kpatch.h"
#include <linux/seq_file.h>
#include <linux/kernel_stat.h>
extern char __kpatch_relas, __kpatch_relas_end,
__kpatch_patches, __kpatch_patches_end;
static int __init patch_init(void)
{
int ret;
ret = kpatch_register(THIS_MODULE, &__kpatch_relas, &__kpatch_relas_end,
&__kpatch_patches, &__kpatch_patches_end);
return ret;
}
static void __exit patch_exit(void)
{
int ret;
ret = kpatch_unregister(THIS_MODULE);
}
module_init(patch_init);
module_exit(patch_exit);
MODULE_LICENSE("GPL");

30
kmod/kpatch.h Normal file
View File

@ -0,0 +1,30 @@
#include <linux/ftrace.h>
#define KPATCH_MAX_FUNCS 256
struct kpatch_func {
unsigned long old_func_addr;
unsigned long new_func_addr;
char *old_func_name;
unsigned long old_func_addr_end;
struct module *owner;
};
struct kpatch_rela {
unsigned long dest; /* TODO share struct header file with elfdiff */
unsigned long src;
unsigned long type;
};
struct kpatch_patch {
unsigned long new;
unsigned long orig; /* TODO eventually add name of symbol so we can verify it with kallsyms */
};
void kpatch_trampoline(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
int kpatch_register(struct module *mod, void *kpatch_relas,
void *kpatch_relas_end, void *kpatch_patches,
void *kpatch_patches_end);
int kpatch_unregister(struct module *mod);
void ftrace_hacks(void);

4
kmod/kpatch.lds Normal file
View File

@ -0,0 +1,4 @@
__kpatch_relas = ADDR(__kpatch_relas);
__kpatch_relas_end = ADDR(__kpatch_relas) + SIZEOF(__kpatch_relas);
__kpatch_patches = ADDR(__kpatch_patches);
__kpatch_patches_end = ADDR(__kpatch_patches) + SIZEOF(__kpatch_patches);

67
kmod/trampoline.S Normal file
View File

@ -0,0 +1,67 @@
#include <linux/linkage.h>
#include <asm/calling.h>
GLOBAL(kpatch_trampoline)
pushq %rcx
pushq %rdi
callq kpatch_ftrace_hacks
leaq kpatch_funcs, %r10
/*
* TODO: if preemption is possible then we'll need to think about how
* to ensure atomic access to the array and how to ensure activeness
* safety here. if preemption is enabled then we need to make sure the
* IP isn't inside kpatch_trampoline for any task.
*/
/* TODO: save/restore flags? */
/* find new function in func_list */
popq %rdi
loop:
movq (%r10), %r9
cmpq %r9, %rdi
je found
/*
* Check for the rare case where we don't have a new function to call.
* This can happen in the small window of time during patch module
* insmod after it has called register_ftrace_function() but before it
* has called stop_machine() to do the activeness safety check and the
* array update. In this case we just return and let the old function
* run.
*/
cmpq $0, %r9
je bail
addq $40, %r10 /* FIXME http://docs.blackfin.uclinux.org/doku.php?id=toolchain:gas:structs */
jmp loop
found:
/* get new function address */
movq 8(%r10), %r10
/* restore regs owned by original calling function */
popq %rax
movq RBP(%rax), %rbp
movq RBX(%rax), %rbx
movq R12(%rax), %r12
movq R13(%rax), %r13
movq R14(%rax), %r14
movq R15(%rax), %r15
/* restore arg registers and stack for new function */
movq RDI(%rax), %rdi
movq RSI(%rax), %rsi
movq RDX(%rax), %rdx
movq RCX(%rax), %rcx
movq RSP(%rax), %rsp
/* jump to new function */
jmpq *%r10
bail:
ret