mirror of
https://github.com/dynup/kpatch
synced 2025-04-01 22:48:08 +00:00
create-diff-object: Add support for arm64 DYNAMIC_FTRACE_WITH_CALL_OPS
For arm64 this option uses -fpatchable-function-entry=M,2, so 2 NOPs are placed before the function entry point (in order to store a pointer to ftrace_ops). When calculating function padding, check for the presence of the two NOPs, and adjust the padding size by 8 if they are found. This was merged in the upstream kernel in v6.8 with: baaf553d3bc3 ("arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS") With this into the equation, the entry of a function can look like one of: 1. Without DYNAMIC_FTRACE_WITH_CALL_OPS and CONFIG_ARM64_BTI_KERNEL ------------------------------------------------------------------- Disassembly of section .text.cmdline_proc_show: 0000000000000008 <cmdline_proc_show>: 8: d503201f nop c: d503201f nop 2. Without DYNAMIC_FTRACE_WITH_CALL_OPS and with CONFIG_ARM64_BTI_KERNEL ------------------------------------------------------------------------ Disassembly of section .text.cmdline_proc_show: 0000000000000008 <cmdline_proc_show>: 0: d503245f bti c 4: d503201f nop 8: d503201f nop 3. With DYNAMIC_FTRACE_WITH_CALL_OPS and without CONFIG_ARM64_BTI_KERNEL ------------------------------------------------------------------------ Disassembly of section .text.cmdline_proc_show: 0000000000000000 <cmdline_proc_show-0x8>: 0: d503201f nop 4: d503201f nop 0000000000000008 <cmdline_proc_show>: 8: d503201f nop c: d503201f nop 4. With DYNAMIC_FTRACE_WITH_CALL_OPS and with CONFIG_ARM64_BTI_KERNEL --------------------------------------------------------------------- Disassembly of section .text.cmdline_proc_show: 0000000000000000 <cmdline_proc_show-0x8>: 0: d503201f nop 4: d503201f nop 0000000000000008 <cmdline_proc_show>: 8: d503245f bti c c: d503201f nop 10: d503201f nop make create-diff-object aware of DYNAMIC_FTRACE_WITH_CALL_OPS and its quirks. Signed-off-by: Dylan Hatch <dylanbhatch@google.com> Signed-off-by: Puranjay Mohan <pjy@amazon.com>
This commit is contained in:
parent
0db9999069
commit
c1ee849bed
@ -255,6 +255,37 @@ static bool kpatch_is_mapping_symbol(struct kpatch_elf *kelf, struct symbol *sym
|
||||
return false;
|
||||
}
|
||||
|
||||
static unsigned int function_padding_size(struct kpatch_elf *kelf, struct symbol *sym)
|
||||
{
|
||||
unsigned int size = 0;
|
||||
|
||||
switch (kelf->arch) {
|
||||
case AARCH64:
|
||||
{
|
||||
uint32_t *insn = sym->sec->data->d_buf;
|
||||
unsigned int i;
|
||||
void *insn_end = sym->sec->data->d_buf + sym->sym.st_value;
|
||||
|
||||
/*
|
||||
* If the arm64 kernel is compiled with CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
|
||||
* then there are two NOPs before the function and a `BTI C` + 2 NOPs at the
|
||||
* start of the function. Verify the presence of the two NOPs before the
|
||||
* function entry.
|
||||
*/
|
||||
for (i = 0; (void *)insn < insn_end && *insn == 0xd503201f; i++, insn++)
|
||||
;
|
||||
|
||||
if (i == 2)
|
||||
size = 8;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* When compiling with -ffunction-sections and -fdata-sections, almost every
|
||||
* symbol gets its own dedicated section. We call such symbols "bundled"
|
||||
@ -271,6 +302,8 @@ static void kpatch_bundle_symbols(struct kpatch_elf *kelf)
|
||||
expected_offset = sym->pfx->sym.st_size;
|
||||
else if (is_gcc6_localentry_bundled_sym(kelf, sym))
|
||||
expected_offset = 8;
|
||||
else if (sym->type == STT_FUNC)
|
||||
expected_offset = function_padding_size(kelf, sym);
|
||||
else
|
||||
expected_offset = 0;
|
||||
|
||||
@ -3806,8 +3839,15 @@ static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf, bool
|
||||
switch(kelf->arch) {
|
||||
case AARCH64: {
|
||||
unsigned char *insn = sym->sec->data->d_buf;
|
||||
int padding;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Skip the padding NOPs added by CALL_OPS.
|
||||
*/
|
||||
padding = function_padding_size(kelf, sym);
|
||||
insn += padding;
|
||||
|
||||
/*
|
||||
* If BTI (Branch Target Identification) is enabled then there
|
||||
* might be an additional 'BTI C' instruction before the two
|
||||
@ -3817,7 +3857,8 @@ static void kpatch_create_ftrace_callsite_sections(struct kpatch_elf *kelf, bool
|
||||
if (insn[0] == 0x5f) {
|
||||
if (insn[1] != 0x24 || insn[2] != 0x03 || insn[3] != 0xd5)
|
||||
ERROR("%s: unexpected instruction in patch section of function\n", sym->name);
|
||||
insn_offset += 4;
|
||||
if (!padding)
|
||||
insn_offset += 4;
|
||||
insn += 4;
|
||||
}
|
||||
for (i = 0; i < 8; i += 4) {
|
||||
|
Loading…
Reference in New Issue
Block a user