From 79ca5dbfa7c0fa6ade707a96c8d2c8d27eb6a9d6 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 23 Apr 2014 10:58:45 +0900 Subject: [PATCH 1/2] kmod/core: Handle registering error and unroll it Handle registering error to unroll the ftrace filter. This also introduces get_kpatch_func() and kpatch_remove_funcs_from_filter() for holding up redundant loops. Changes from v2: - Rebased on the latest kpatch. Changes from v1: - Rename get_kpatch_func to kpatch_get_func. - Fix function definition style issue. - Do not jump to a label in "if" block. - Rollback the ftrace user counter if we hit an error. Signed-off-by: Masami Hiramatsu --- kmod/core/core.c | 106 ++++++++++++++++++++++++++--------------------- 1 file changed, 59 insertions(+), 47 deletions(-) diff --git a/kmod/core/core.c b/kmod/core/core.c index 22a7257..deb4bbf 100644 --- a/kmod/core/core.c +++ b/kmod/core/core.c @@ -169,6 +169,15 @@ out: return ret; } +static struct kpatch_func *kpatch_get_func(unsigned long ip) +{ + struct kpatch_func *f; + + hash_for_each_possible(kpatch_func_hash, f, node, ip) + if (f->old_addr == ip) + return f; + return NULL; +} void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *regs) @@ -198,6 +207,35 @@ static struct ftrace_ops kpatch_ftrace_ops __read_mostly = { .flags = FTRACE_OPS_FL_SAVE_REGS, }; +/* Remove kpatch_funcs from ftrace filter */ +static int kpatch_remove_funcs_from_filter(struct kpatch_func *funcs, + int num_funcs) +{ + int i, ret = 0; + + for (i = 0; i < num_funcs; i++) { + struct kpatch_func *func = &funcs[i]; + + /* + * If any other modules have also patched this function, don't + * remove its ftrace handler. + */ + if (kpatch_get_func(func->old_addr)) + continue; + + /* Remove the ftrace handler for this function. */ + ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr, + 1, 0); + if (ret) { + pr_err("can't remove ftrace filter at address 0x%lx\n", + func->old_addr); + break; + } + } + + return ret; +} + int kpatch_register(struct module *mod, struct kpatch_func *funcs, int num_funcs) { @@ -210,8 +248,7 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, down(&kpatch_mutex); for (i = 0; i < num_funcs; i++) { - struct kpatch_func *f, *func = &funcs[i]; - bool found = false; + struct kpatch_func *func = &funcs[i]; func->mod = mod; @@ -219,14 +256,7 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, * If any other modules have also patched this function, it * already has an ftrace handler. */ - hash_for_each_possible(kpatch_func_hash, f, node, - func->old_addr) { - if (f->old_addr == func->old_addr) { - found = true; - break; - } - } - if (found) + if (kpatch_get_func(func->old_addr)) continue; /* Add an ftrace handler for this function. */ @@ -235,6 +265,7 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, if (ret) { pr_err("can't set ftrace filter at address 0x%lx\n", func->old_addr); + num_funcs = i; goto out; } } @@ -244,6 +275,8 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, ret = register_ftrace_function(&kpatch_ftrace_ops); if (ret) { pr_err("can't register ftrace handler\n"); + /* For the next time, the counter should be unrolled */ + --kpatch_num_registered; goto out; } } @@ -259,25 +292,30 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, if (ret2) pr_err("ftrace unregister failed (%d)\n", ret2); } - - goto out; } - /* TODO: need TAINT_KPATCH */ - pr_notice_once("tainting kernel with TAINT_USER\n"); - add_taint(TAINT_USER, LOCKDEP_STILL_OK); - - pr_notice("loaded patch module \"%s\"\n", mod->name); out: + /* Rollback the filter if we get any error */ + if (ret) + kpatch_remove_funcs_from_filter(funcs, num_funcs); + else { + /* TODO: need TAINT_KPATCH */ + pr_notice_once("tainting kernel with TAINT_USER\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + + pr_notice("loaded patch module \"%s\"\n", mod->name); + } + up(&kpatch_mutex); return ret; + } EXPORT_SYMBOL(kpatch_register); int kpatch_unregister(struct module *mod, struct kpatch_func *funcs, int num_funcs) { - int i, ret; + int ret; struct kpatch_stop_machine_args args = { .funcs = funcs, .num_funcs = num_funcs, @@ -297,35 +335,9 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs, } } - for (i = 0; i < num_funcs; i++) { - struct kpatch_func *f, *func = &funcs[i]; - bool found = false; - - /* - * If any other modules have also patched this function, don't - * remove its ftrace handler. - */ - hash_for_each_possible(kpatch_func_hash, f, node, - func->old_addr) { - if (f->old_addr == func->old_addr) { - found = true; - break; - } - } - if (found) - continue; - - /* Remove the ftrace handler for this function. */ - ret = ftrace_set_filter_ip(&kpatch_ftrace_ops, func->old_addr, - 1, 0); - if (ret) { - pr_err("can't remove ftrace filter at address 0x%lx\n", - func->old_addr); - goto out; - } - } - - pr_notice("unloaded patch module \"%s\"\n", mod->name); + ret = kpatch_remove_funcs_from_filter(funcs, num_funcs); + if (ret == 0) + pr_notice("unloaded patch module \"%s\"\n", mod->name); out: up(&kpatch_mutex); From 42e0779c0cb2bbf7f3fdc8860b7182f496a515f2 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 23 Apr 2014 10:58:45 +0900 Subject: [PATCH 2/2] kmod/core: Support live patching on NMI handlers Support live patching on NMI handlers. This adds checks for possible inconsistency of live patching on NMI handlers. The inconsistency problem means that any concurrent execution of old function and new function, which can lead unexpected results. Current kpatch checks possible inconsistency problem with stop_machine, which can cover only threads and normal interrupts. However, beacuse NMI can not stop with it, stop_machine is not enough for live patching on NMI handlers or sub-functions which are invoked in the NMI context. To check for possible inconsistency of live patching on those functions, add an atomic flag to count patching target functions invoked in NMI context while updating kpatch hash table. If the flag is set by the target functions in NMI, we can not ensure there is no concurrent execution on it. This fixes the issue #65. Changes from v5: - Fix to add a NULL check in kpatch_get_committed_func(). Changes from v4: - Change kpatch_operation to atomic_t. - Use smp_rmb/wmb barriers between kpatch_operation and kpatch_status. - Check in_nmi() first and if true, access kpatch_operation. Changes from v3: - Fix kpatch_apply/remove_patch to return 0 if succeeded. Changes from v2: - Clean up kpatch_get_committed_func as same style of kpatch_get_func. - Rename opr to op in kpatch_ftrace_handler. - Consolidate in_nmi() and kpatch_operation check into one condition. - Fix UNPATCH/PATCH mistype in kpatch_register. Changes from v1: - Rename inconsistent_flag to kpatch_status. - Introduce new enums and helper functions for kpatch_status. - Use hash_del_rcu instead of hlist_del_rcu. - Rename get_committed_func to kpatch_get_committed_func. - Use ACCESS_ONCE for kpatch_operation to prevent compiler optimization. - Fix to remove (!func || func->updating) condition from NMI check. - Add more precise comments. - Fix setting order of kpatch_status and kpatch_operation. Signed-off-by: Masami Hiramatsu --- kmod/core/core.c | 165 +++++++++++++++++++++++++++++++++++++++++---- kmod/core/kpatch.h | 1 + 2 files changed, 152 insertions(+), 14 deletions(-) diff --git a/kmod/core/core.c b/kmod/core/core.c index deb4bbf..488ad90 100644 --- a/kmod/core/core.c +++ b/kmod/core/core.c @@ -57,6 +57,33 @@ struct kpatch_backtrace_args { int num_funcs, ret; }; +enum { + KPATCH_STATUS_START, + KPATCH_STATUS_SUCCESS, + KPATCH_STATUS_FAILURE, +}; +static atomic_t kpatch_status; + +static inline void kpatch_start_status(void) +{ + atomic_set(&kpatch_status, KPATCH_STATUS_START); +} + +/* Try to set a finish status, and return the result status */ +static inline int kpatch_finish_status(int status) +{ + int result; + result = atomic_cmpxchg(&kpatch_status, KPATCH_STATUS_START, status); + return result == KPATCH_STATUS_START ? status : result; +} + +enum { + KPATCH_OP_NONE, + KPATCH_OP_PATCH, + KPATCH_OP_UNPATCH, +}; +static atomic_t kpatch_operation; + void kpatch_backtrace_address_verify(void *data, unsigned long address, int reliable) { @@ -143,9 +170,21 @@ static int kpatch_apply_patch(void *data) struct kpatch_func *func = &funcs[i]; /* update the global list and go live */ - hash_add(kpatch_func_hash, &func->node, func->old_addr); + hash_add_rcu(kpatch_func_hash, &func->node, func->old_addr); + } + /* Check if any inconsistent NMI has happened while updating */ + ret = kpatch_finish_status(KPATCH_STATUS_SUCCESS); + if (ret == KPATCH_STATUS_FAILURE) { + /* Failed, we have to rollback patching process */ + for (i = 0; i < num_funcs; i++) + hash_del_rcu(&funcs[i].node); + ret = -EBUSY; + } else { + /* Succeeded, clear updating flags */ + for (i = 0; i < num_funcs; i++) + funcs[i].updating = false; + ret = 0; } - out: return ret; } @@ -162,9 +201,19 @@ static int kpatch_remove_patch(void *data) if (ret) goto out; - for (i = 0; i < num_funcs; i++) - hlist_del(&funcs[i].node); - + /* Check if any inconsistent NMI has happened while updating */ + ret = kpatch_finish_status(KPATCH_STATUS_SUCCESS); + if (ret == KPATCH_STATUS_FAILURE) { + /* Failed, we must keep funcs on hash table */ + for (i = 0; i < num_funcs; i++) + funcs[i].updating = false; + ret = -EBUSY; + } else { + /* Succeeded, remove all updating funcs from hash table */ + for (i = 0; i < num_funcs; i++) + hash_del_rcu(&funcs[i].node); + ret = 0; + } out: return ret; } @@ -173,16 +222,31 @@ static struct kpatch_func *kpatch_get_func(unsigned long ip) { struct kpatch_func *f; - hash_for_each_possible(kpatch_func_hash, f, node, ip) + /* Here, we have to use rcu safe hlist because of NMI concurrency */ + hash_for_each_possible_rcu(kpatch_func_hash, f, node, ip) if (f->old_addr == ip) return f; return NULL; } -void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *op, struct pt_regs *regs) +static struct kpatch_func *kpatch_get_committed_func(struct kpatch_func *f, + unsigned long ip) { - struct kpatch_func *f; + /* Continuing on the same hlist to find commited (!updating) func */ + if (f) { + hlist_for_each_entry_continue_rcu(f, node) + if (f->old_addr == ip && !f->updating) + return f; + } + return NULL; +} + +void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *fops, + struct pt_regs *regs) +{ + struct kpatch_func *func; + int ret, op; /* * This is where the magic happens. Update regs->ip to tell ftrace to @@ -193,12 +257,52 @@ void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, * in the hash bucket. */ preempt_disable_notrace(); - hash_for_each_possible(kpatch_func_hash, f, node, ip) { - if (f->old_addr == ip) { - regs->ip = f->new_addr; - break; +retry: + func = kpatch_get_func(ip); + if (unlikely(in_nmi())) { + op = atomic_read(&kpatch_operation); + if (likely(op == KPATCH_OP_NONE)) + goto done; + /* + * Make sure no memory reordering between + * kpatch_operation and kpatch_status + */ + smp_rmb(); + /* + * Checking for NMI inconsistency. + * If this can set the KPATCH_STATUS_FAILURE here, it means an + * NMI occures in updating process. In that case, we should + * rollback the process. + */ + ret = kpatch_finish_status(KPATCH_STATUS_FAILURE); + if (ret == KPATCH_STATUS_FAILURE) { + /* + * Inconsistency happens here, Newly added funcs have + * to be ignored. + */ + if (op == KPATCH_OP_PATCH) + func = kpatch_get_committed_func(func, ip); + } else { + /* + * Here, the updating process has been finished + * successfully. Unpatched funcs have to be ignored. + */ + if (op == KPATCH_OP_UNPATCH) + func = kpatch_get_committed_func(func, ip); + /* + * This is a very rare case but possible if the func + * is added in the hash table right after calling + * kpatch_get_func(ip) and before calling + * kpatch_finish_status(KPATCH_STATUS_FAILURE). + */ + else if (!func) + goto retry; } } +done: + if (func) + regs->ip = func->new_addr; + preempt_enable_notrace(); } @@ -251,6 +355,7 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, struct kpatch_func *func = &funcs[i]; func->mod = mod; + func->updating = true; /* * If any other modules have also patched this function, it @@ -281,6 +386,13 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, } } + kpatch_start_status(); + /* + * Make sure no memory reordering between kpatch_operation and + * kpatch_status. kpatch_ftrace_handler() has corresponding smp_rmb(). + */ + smp_wmb(); + atomic_set(&kpatch_operation, KPATCH_OP_PATCH); /* * Idle the CPUs, verify activeness safety, and atomically make the new * functions visible to the trampoline. @@ -292,6 +404,12 @@ int kpatch_register(struct module *mod, struct kpatch_func *funcs, if (ret2) pr_err("ftrace unregister failed (%d)\n", ret2); } + /* + * This synchronize_rcu is to ensure any other kpatch_get_func + * user exits the rcu locked(preemt_disabled) critical section + * and hash_del_rcu() is correctly finished. + */ + synchronize_rcu(); } out: @@ -306,6 +424,7 @@ out: pr_notice("loaded patch module \"%s\"\n", mod->name); } + atomic_set(&kpatch_operation, KPATCH_OP_NONE); up(&kpatch_mutex); return ret; @@ -315,7 +434,7 @@ EXPORT_SYMBOL(kpatch_register); int kpatch_unregister(struct module *mod, struct kpatch_func *funcs, int num_funcs) { - int ret; + int i, ret; struct kpatch_stop_machine_args args = { .funcs = funcs, .num_funcs = num_funcs, @@ -323,6 +442,17 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs, down(&kpatch_mutex); + /* Start unpatching operation */ + kpatch_start_status(); + /* + * Make sure no memory reordering between kpatch_operation and + * kpatch_status. kpatch_ftrace_handler() has corresponding smp_rmb(). + */ + smp_wmb(); + atomic_set(&kpatch_operation, KPATCH_OP_UNPATCH); + for (i = 0; i < num_funcs; i++) + funcs[i].updating = true; + ret = stop_machine(kpatch_remove_patch, &args, NULL); if (ret) goto out; @@ -334,12 +464,19 @@ int kpatch_unregister(struct module *mod, struct kpatch_func *funcs, goto out; } } + /* + * This synchronize_rcu is to ensure any other kpatch_get_func + * user exits the rcu locked(preemt_disabled) critical section + * and hash_del_rcu() is correctly finished. + */ + synchronize_rcu(); ret = kpatch_remove_funcs_from_filter(funcs, num_funcs); if (ret == 0) pr_notice("unloaded patch module \"%s\"\n", mod->name); out: + atomic_set(&kpatch_operation, KPATCH_OP_NONE); up(&kpatch_mutex); return ret; } diff --git a/kmod/core/kpatch.h b/kmod/core/kpatch.h index f649302..d9893bd 100644 --- a/kmod/core/kpatch.h +++ b/kmod/core/kpatch.h @@ -33,6 +33,7 @@ struct kpatch_func { unsigned long old_size; struct module *mod; struct hlist_node node; + bool updating; }; extern int kpatch_register(struct module *mod, struct kpatch_func *funcs,