Merge pull request #162 from jpoimboe/lkml

a few style fixes
This commit is contained in:
Seth Jennings 2014-05-01 16:14:01 -05:00
commit 1717e17290
3 changed files with 41 additions and 38 deletions

View File

@ -1,12 +1,12 @@
kpatch: dynamic kernel patching kpatch: dynamic kernel patching
=============================== ===============================
kpatch is a Linux dynamic kernel patching tool which allows you to patch a kpatch is a Linux dynamic kernel patching infrastructure which allows you to
running kernel without rebooting or restarting any processes. It enables patch a running kernel without rebooting or restarting any processes. It
sysadmins to apply critical security patches to the kernel immediately, without enables sysadmins to apply critical security patches to the kernel immediately,
having to wait for long-running tasks to complete, users to log off, or without having to wait for long-running tasks to complete, for users to log
for scheduled reboot windows. It gives more control over uptime without off, or for scheduled reboot windows. It gives more control over uptime
sacrificing security or stability. without sacrificing security or stability.
kpatch is currently in active development. For now, it should _not_ be used kpatch is currently in active development. For now, it should _not_ be used
in production environments. in production environments.
@ -205,11 +205,18 @@ ability to arbitrarily modify the kernel, with or without kpatch.
**Q. How can I detect if somebody has patched the kernel?** **Q. How can I detect if somebody has patched the kernel?**
We hope to create a new kernel TAINT flag which will get set whenever a patch When a patch module is loaded, the `TAINT_USER` flag is set. To test for it,
module is loaded. We are currently using the `TAINT_USER` flag. `cat /proc/sys/kernel/tainted` and check to see if the value of 64 has been
OR'ed in.
Also, many distros ship with cryptographically signed kernel modules, and will Eventually we hope to have a dedicated `TAINT_KPATCH` flag instead.
taint the kernel anyway if you load an unsigned module.
Note that the `TAINT_OOT_MODULE` flag (4096) will also be set, since the patch
module is built outside the Linux kernel source tree.
If your patch module is unsigned, the `TAINT_FORCED_MODULE` flag (2) will also
be set. Starting with Linux 3.15, this will be changed to the more specific
`TAINT_UNSIGNED_MODULE` (8192).
**Q. Will it destabilize my system?** **Q. Will it destabilize my system?**

View File

@ -13,24 +13,22 @@
* GNU General Public License for more details. * GNU General Public License for more details.
* *
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software * along with this program; if not, see <http://www.gnu.org/licenses/>.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
* 02110-1301, USA.
*/ */
/* Contains the code for the core kpatch module. Each patch module registers /*
* with this module to redirect old functions to new functions. * kpatch core module
* *
* Each patch module can contain one or more new functions. This information * Patch modules register with this module to redirect old functions to new
* is contained in the .patches section of the patch module. For each function * functions.
* patched by the module we must: *
* For each function patched by the module we must:
* - Call stop_machine * - Call stop_machine
* - Ensure that no execution thread is currently in the old function (or has * - Ensure that no task has the old function in its call stack
* it in the call stack) * - Add the new function address to kpatch_func_hash
* - Add the new function address to the kpatch_funcs table
* *
* After that, each call to the old function calls into kpatch_ftrace_handler() * After that, each call to the old function calls into kpatch_ftrace_handler()
* which finds the new function in the kpatch_funcs table and updates the * which finds the new function in kpatch_func_hash table and updates the
* return instruction pointer so that ftrace will return to the new function. * return instruction pointer so that ftrace will return to the new function.
*/ */
@ -47,9 +45,9 @@
#include "kpatch.h" #include "kpatch.h"
#define KPATCH_HASH_BITS 8 #define KPATCH_HASH_BITS 8
DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS); static DEFINE_HASHTABLE(kpatch_func_hash, KPATCH_HASH_BITS);
DEFINE_SEMAPHORE(kpatch_mutex); static DEFINE_SEMAPHORE(kpatch_mutex);
static int kpatch_num_registered; static int kpatch_num_registered;
@ -143,8 +141,8 @@ static struct kpatch_func *kpatch_get_prev_func(struct kpatch_func *f,
return NULL; return NULL;
} }
void kpatch_backtrace_address_verify(void *data, unsigned long address, static void kpatch_backtrace_address_verify(void *data, unsigned long address,
int reliable) int reliable)
{ {
struct kpatch_backtrace_args *args = data; struct kpatch_backtrace_args *args = data;
struct kpatch_module *kpmod = args->kpmod; struct kpatch_module *kpmod = args->kpmod;
@ -170,8 +168,8 @@ void kpatch_backtrace_address_verify(void *data, unsigned long address,
} }
if (address >= func_addr && address < func_addr + func_size) { if (address >= func_addr && address < func_addr + func_size) {
pr_err("activeness safety check failed for function " pr_err("activeness safety check failed for function at address 0x%lx\n",
"at address 0x%lx\n", func_addr); func_addr);
args->ret = -EBUSY; args->ret = -EBUSY;
return; return;
} }
@ -183,10 +181,10 @@ static int kpatch_backtrace_stack(void *data, char *name)
return 0; return 0;
} }
struct stacktrace_ops kpatch_backtrace_ops = { static const struct stacktrace_ops kpatch_backtrace_ops = {
.address = kpatch_backtrace_address_verify, .address = kpatch_backtrace_address_verify,
.stack = kpatch_backtrace_stack, .stack = kpatch_backtrace_stack,
.walk_stack = print_context_stack_bp, .walk_stack = print_context_stack_bp,
}; };
/* /*
@ -292,9 +290,9 @@ static int kpatch_remove_patch(void *data)
* function, the last one to register wins, as it'll be first in the hash * function, the last one to register wins, as it'll be first in the hash
* bucket. * bucket.
*/ */
void notrace kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip, static void notrace
struct ftrace_ops *fops, kpatch_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct pt_regs *regs) struct ftrace_ops *fops, struct pt_regs *regs)
{ {
struct kpatch_func *func; struct kpatch_func *func;
int state; int state;
@ -409,7 +407,7 @@ int kpatch_register(struct kpatch_module *kpmod)
} }
} }
/* Register the ftrace trampoline if it hasn't been done already. */ /* Register the ftrace handler if it hasn't been done already. */
if (!kpatch_num_registered) { if (!kpatch_num_registered) {
ret = register_ftrace_function(&kpatch_ftrace_ops); ret = register_ftrace_function(&kpatch_ftrace_ops);
if (ret) { if (ret) {
@ -426,7 +424,7 @@ int kpatch_register(struct kpatch_module *kpmod)
/* /*
* Idle the CPUs, verify activeness safety, and atomically make the new * Idle the CPUs, verify activeness safety, and atomically make the new
* functions visible to the trampoline. * functions visible to the ftrace handler.
*/ */
ret = stop_machine(kpatch_apply_patch, kpmod, NULL); ret = stop_machine(kpatch_apply_patch, kpmod, NULL);

View File

@ -15,9 +15,7 @@
* GNU General Public License for more details. * GNU General Public License for more details.
* *
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software * along with this program; if not, see <https://www.gnu.org/licenses/>.
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
* 02110-1301, USA.
* *
* Contains the API for the core kpatch module used by the patch modules * Contains the API for the core kpatch module used by the patch modules
*/ */