diff -Nupr src.orig/fs/proc/array.c src/fs/proc/array.c --- src.orig/fs/proc/array.c 2021-04-20 11:04:26.717100594 -0400 +++ src/fs/proc/array.c 2021-04-20 11:05:18.430230343 -0400 @@ -370,12 +370,19 @@ static inline void task_seccomp(struct s seq_putc(m, '\n'); } +#include static inline void task_context_switch_counts(struct seq_file *m, struct task_struct *p) { + int *newpid; + seq_put_decimal_ull(m, "voluntary_ctxt_switches:\t", p->nvcsw); seq_put_decimal_ull(m, "\nnonvoluntary_ctxt_switches:\t", p->nivcsw); seq_putc(m, '\n'); + + newpid = klp_shadow_get(p, 0); + if (newpid) + seq_printf(m, "newpid:\t%d\n", *newpid); } static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) diff -Nupr src.orig/kernel/exit.c src/kernel/exit.c --- src.orig/kernel/exit.c 2021-04-20 11:04:27.314102092 -0400 +++ src/kernel/exit.c 2021-04-20 11:05:18.430230343 -0400 @@ -701,6 +701,7 @@ static void check_stack_usage(void) static inline void check_stack_usage(void) {} #endif +#include void __noreturn do_exit(long code) { struct task_struct *tsk = current; @@ -794,6 +795,8 @@ void __noreturn do_exit(long code) exit_task_work(tsk); exit_thread(tsk); + klp_shadow_free(tsk, 0, NULL); + /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. diff -Nupr src.orig/kernel/fork.c src/kernel/fork.c --- src.orig/kernel/fork.c 2021-04-20 11:04:27.315102095 -0400 +++ src/kernel/fork.c 2021-04-20 11:05:18.431230346 -0400 @@ -2222,6 +2222,7 @@ struct mm_struct *copy_init_mm(void) * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. */ +#include long _do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, @@ -2234,6 +2235,8 @@ long _do_fork(unsigned long clone_flags, struct task_struct *p; int trace = 0; long nr; + int *newpid; + static int ctr = 0; /* * Determine whether and which event to report to ptracer. When @@ -2260,6 +2263,11 @@ long _do_fork(unsigned long clone_flags, if (IS_ERR(p)) return PTR_ERR(p); + newpid = klp_shadow_get_or_alloc(p, 0, sizeof(*newpid), GFP_KERNEL, + NULL, NULL); + if (newpid) + *newpid = ctr++; + /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly.