kpatch/kpatch-build/create-diff-object.c
Julien Thierry e49e3a59c2 create-diff-object: Rename elements getting correlated
Change 935f199875 ('create-diff-object: simplify mangled function
correlation') simplified the way symbols are correlated and got rid of
symbol section renaming.

As a result a symbol/section can now have a CHANGED status, being
correlated to an element that doesn't have the exact same name. This
will cause lookups to the original object fail when creating the new
patch object.

So lets bring back the symbol/section renaming, but only once they
have actually been correlated.

Fixes: 935f199875 ('create-diff-object: simplify mangled function
correlation')
Signed-off-by: Julien Thierry <jthierry@redhat.com>
2019-10-29 15:27:51 +00:00

3522 lines
97 KiB
C

/*
* create-diff-object.c
*
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
* Copyright (C) 2013-2014 Josh Poimboeuf <jpoimboe@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA,
* 02110-1301, USA.
*/
/*
* This file contains the heart of the ELF object differencing engine.
*
* The tool takes two ELF objects from two versions of the same source
* file; a "base" object and a "patched" object. These object need to have
* been compiled with the -ffunction-sections and -fdata-sections GCC options.
*
* The tool compares the objects at a section level to determine what
* sections have changed. Once a list of changed sections has been generated,
* various rules are applied to determine any object local sections that
* are dependencies of the changed section and also need to be included in
* the output object.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <error.h>
#include <gelf.h>
#include <argp.h>
#include <libgen.h>
#include <unistd.h>
#include "list.h"
#include "lookup.h"
#include "asm/insn.h"
#include "kpatch-patch.h"
#include "kpatch-elf.h"
#include "kpatch-intermediate.h"
#include "kpatch.h"
#define DIFF_FATAL(format, ...) \
({ \
fprintf(stderr, "ERROR: %s: " format "\n", childobj, ##__VA_ARGS__); \
error(EXIT_STATUS_DIFF_FATAL, 0, "unreconcilable difference"); \
})
#ifdef __powerpc64__
#define ABSOLUTE_RELA_TYPE R_PPC64_ADDR64
#else
#define ABSOLUTE_RELA_TYPE R_X86_64_64
#endif
char *childobj;
enum subsection {
SUBSECTION_NORMAL,
SUBSECTION_HOT,
SUBSECTION_UNLIKELY
};
enum loglevel loglevel = NORMAL;
/*******************
* Data structures
* ****************/
struct special_section {
char *name;
int (*group_size)(struct kpatch_elf *kelf, int offset);
};
/*************
* Functions
* **********/
static int is_bundleable(struct symbol *sym)
{
if (sym->type == STT_FUNC &&
!strncmp(sym->sec->name, ".text.",6) &&
!strcmp(sym->sec->name + 6, sym->name))
return 1;
if (sym->type == STT_FUNC &&
!strncmp(sym->sec->name, ".text.unlikely.",15) &&
(!strcmp(sym->sec->name + 15, sym->name) ||
(strstr(sym->name, ".cold.") &&
!strncmp(sym->sec->name + 15, sym->name, strlen(sym->sec->name) - 15))))
return 1;
if (sym->type == STT_FUNC &&
!strncmp(sym->sec->name, ".text.hot.",10) &&
!strcmp(sym->sec->name + 10, sym->name))
return 1;
if (sym->type == STT_OBJECT &&
!strncmp(sym->sec->name, ".data.",6) &&
!strcmp(sym->sec->name + 6, sym->name))
return 1;
if (sym->type == STT_OBJECT &&
!strncmp(sym->sec->name, ".data.rel.", 10) &&
!strcmp(sym->sec->name + 10, sym->name))
return 1;
if (sym->type == STT_OBJECT &&
!strncmp(sym->sec->name, ".data.rel.ro.", 13) &&
!strcmp(sym->sec->name + 13, sym->name))
return 1;
if (sym->type == STT_OBJECT &&
!strncmp(sym->sec->name, ".rodata.",8) &&
!strcmp(sym->sec->name + 8, sym->name))
return 1;
if (sym->type == STT_OBJECT &&
!strncmp(sym->sec->name, ".bss.",5) &&
!strcmp(sym->sec->name + 5, sym->name))
return 1;
return 0;
}
#ifdef __powerpc64__
/* Symbol st_others value for powerpc */
#define STO_PPC64_LOCAL_BIT 5
#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT)
#define PPC64_LOCAL_ENTRY_OFFSET(other) \
(((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2)
/*
* On ppc64le, the function prologue generated by GCC 6+ has the sequence:
*
* .globl my_func
* .type my_func, @function
* .quad .TOC.-my_func
* my_func:
* .reloc ., R_PPC64_ENTRY ; optional
* ld r2,-8(r12)
* add r2,r2,r12
* .localentry my_func, .-my_func
*
* my_func is the global entry point, which, when called, sets up the TOC.
* .localentry is the local entry point, for calls to the function from within
* the object file. The local entry point is 8 bytes after the global entry
* point.
*/
static int is_gcc6_localentry_bundled_sym(struct symbol *sym)
{
return ((PPC64_LOCAL_ENTRY_OFFSET(sym->sym.st_other) != 0) &&
sym->sym.st_value == 8);
}
#else
static int is_gcc6_localentry_bundled_sym(struct symbol *sym)
{
return 0;
}
#endif
static struct rela *toc_rela(const struct rela *rela)
{
if (rela->type != R_PPC64_TOC16_HA &&
rela->type != R_PPC64_TOC16_LO_DS)
return (struct rela *)rela;
/* Will return NULL for .toc constant entries */
return find_rela_by_offset(rela->sym->sec->rela, rela->addend);
}
/*
* When compiling with -ffunction-sections and -fdata-sections, almost every
* symbol gets its own dedicated section. We call such symbols "bundled"
* symbols. They're indicated by "sym->sec->sym == sym".
*/
static void kpatch_bundle_symbols(struct kpatch_elf *kelf)
{
struct symbol *sym;
list_for_each_entry(sym, &kelf->symbols, list) {
if (is_bundleable(sym)) {
if (sym->sym.st_value != 0 &&
!is_gcc6_localentry_bundled_sym(sym)) {
ERROR("symbol %s at offset %lu within section %s, expected 0",
sym->name, sym->sym.st_value,
sym->sec->name);
}
sym->sec->sym = sym;
}
}
}
/*
* During optimization gcc may move unlikely execution branches into *.cold
* subfunctions. kpatch_detect_child_functions detects such subfunctions and
* crossreferences them with their parent functions through parent/child
* pointers.
*/
static void kpatch_detect_child_functions(struct kpatch_elf *kelf)
{
struct symbol *sym;
list_for_each_entry(sym, &kelf->symbols, list) {
char *coldstr;
coldstr = strstr(sym->name, ".cold.");
if (coldstr != NULL) {
char *pname;
pname = strndup(sym->name, coldstr - sym->name);
if (!pname)
ERROR("strndup");
sym->parent = find_symbol_by_name(&kelf->symbols, pname);
free(pname);
if (!sym->parent)
ERROR("failed to find parent function for %s", sym->name);
sym->parent->child = sym;
}
}
}
static bool is_dynamic_debug_symbol(struct symbol *sym)
{
if (sym->type == STT_OBJECT && !strcmp(sym->sec->name, "__verbose"))
return true;
if (sym->type == STT_SECTION && !strcmp(sym->name, "__verbose"))
return true;
return false;
}
/*
* This function detects whether the given symbol is a "special" static local
* variable (for lack of a better term).
*
* Special static local variables should never be correlated and should always
* be included if they are referenced by an included function.
*/
static int is_special_static(struct symbol *sym)
{
static char *prefixes[] = {
"__key.",
"__warned.",
"__func__.",
"__FUNCTION__.",
"_rs.",
"CSWTCH.",
NULL,
};
char **prefix;
if (!sym)
return 0;
/* pr_debug() uses static local variables in the __verbose section */
if (is_dynamic_debug_symbol(sym))
return 1;
if (sym->type == STT_SECTION) {
/* make sure section is bundled */
if (!sym->sec->sym)
return 0;
/* use bundled object/function symbol for matching */
sym = sym->sec->sym;
}
if (sym->type != STT_OBJECT || sym->bind != STB_LOCAL)
return 0;
for (prefix = prefixes; *prefix; prefix++)
if (!strncmp(sym->name, *prefix, strlen(*prefix)))
return 1;
return 0;
}
/*
* This is like strcmp, but for gcc-mangled symbols. It skips the comparison
* of any substring which consists of '.' followed by any number of digits.
*/
static int kpatch_mangled_strcmp(char *s1, char *s2)
{
/*
* ELF string sections aren't mangled, though they look that way. Just
* compare them normally.
*/
if (strstr(s1, ".str1."))
return strcmp(s1, s2);
while (*s1 == *s2) {
if (!*s1)
return 0;
if (*s1 == '.' && isdigit(s1[1])) {
if (!isdigit(s2[1]))
return 1;
while (isdigit(*++s1))
;
while (isdigit(*++s2))
;
} else {
s1++;
s2++;
}
}
return 1;
}
static int rela_equal(struct rela *rela1, struct rela *rela2)
{
struct rela *rela_toc1, *rela_toc2;
unsigned long toc_data1 = 0, toc_data2 = 0; /* = 0 to prevent gcc warning */
if (rela1->type != rela2->type ||
rela1->offset != rela2->offset)
return 0;
/*
* With -mcmodel=large on ppc64le, GCC might generate entries in the .toc
* section for relocation symbol references. The .toc offsets may change
* between the original and patched .o, so comparing ".toc + offset" isn't
* right. Compare the .toc-based symbols by reading the corresponding relas
* from the .toc section.
*/
rela_toc1 = toc_rela(rela1);
if (!rela_toc1) {
/*
* .toc section entries are mostly place holder for relocation entries, specified
* in .rela.toc section. Sometimes, .toc section may have constants as entries.
* These constants are not reference to any symbols, but plain instructions mostly
* due to some arithmetics in the functions referring them.
*
* They are referred by the functions like normal .toc entries, these entries can
* not be resolved to any symbols.
*
* Disassembly of section .toc:
*
* 0000000000000000 <.toc>:
* ...
* 148: R_PPC64_ADDR64 .data.capacity_margin
* 150: 0b d7 a3 70 andi. r3,r5,55051
* 154: 3d 0a d7 a3 lhz r30,2621(r23)
* 158: R_PPC64_ADDR64 sched_max_numa_distance
*
* Relocation section '.rela.toc' at offset 0xadac0 contains 160 entries:
* Offset Info Type Symbol's Value Symbol's Name + Addend
* ...
* 0000000000000148 0000009100000026 R_PPC64_ADDR64 0000000000000000 .data.capacity_margin + 0
* 0000000000000158 000001a500000026 R_PPC64_ADDR64 0000000000000000 sched_max_numa_distance + 0
*
* Relocation section '.rela.text.select_task_rq_fair' at offset 0x90e98 contains 37 entries:
* Offset Info Type Symbol's Value Symbol's Name + Addend
* ...
* 00000000000004a0 0000008800000032 R_PPC64_TOC16_HA 0000000000000000 .toc + 148
* 00000000000004ac 0000008800000040 R_PPC64_TOC16_LO_DS 0000000000000000 .toc + 148
* 0000000000000514 0000008800000032 R_PPC64_TOC16_HA 0000000000000000 .toc + 150
* 000000000000051c 0000008800000040 R_PPC64_TOC16_LO_DS 0000000000000000 .toc + 150
*/
memcpy(&toc_data1, rela1->sym->sec->data->d_buf + rela1->addend, sizeof(toc_data1));
if (!toc_data1)
ERROR(".toc entry not found %s + %x", rela1->sym->name, rela1->addend);
}
rela_toc2 = toc_rela(rela2);
if (!rela_toc2) {
memcpy(&toc_data2, rela2->sym->sec->data->d_buf + rela2->addend, sizeof(toc_data2));
if (!toc_data2)
ERROR(".toc entry not found %s + %x", rela2->sym->name, rela2->addend);
}
if (!rela_toc1 && !rela_toc2)
return toc_data1 == toc_data2;
if (!rela_toc1 || !rela_toc2)
return 0;
if (rela_toc1->string)
return rela_toc2->string && !strcmp(rela_toc1->string, rela_toc2->string);
if (rela_toc1->addend != rela_toc2->addend)
return 0;
return !kpatch_mangled_strcmp(rela_toc1->sym->name, rela_toc2->sym->name);
}
static void kpatch_compare_correlated_rela_section(struct section *sec)
{
struct rela *rela1, *rela2 = NULL;
/*
* On ppc64le, don't compare the .rela.toc section. The .toc and
* .rela.toc sections are included as standard elements.
*/
if (!strcmp(sec->name, ".rela.toc")) {
sec->status = SAME;
return;
}
rela2 = list_entry(sec->twin->relas.next, struct rela, list);
list_for_each_entry(rela1, &sec->relas, list) {
if (rela_equal(rela1, rela2)) {
rela2 = list_entry(rela2->list.next, struct rela, list);
continue;
}
sec->status = CHANGED;
return;
}
sec->status = SAME;
}
static void kpatch_compare_correlated_nonrela_section(struct section *sec)
{
struct section *sec1 = sec, *sec2 = sec->twin;
if (sec1->sh.sh_type != SHT_NOBITS &&
memcmp(sec1->data->d_buf, sec2->data->d_buf, sec1->data->d_size))
sec->status = CHANGED;
else
sec->status = SAME;
}
static void kpatch_compare_correlated_section(struct section *sec)
{
struct section *sec1 = sec, *sec2 = sec->twin;
/* Compare section headers (must match or fatal) */
if (sec1->sh.sh_type != sec2->sh.sh_type ||
sec1->sh.sh_flags != sec2->sh.sh_flags ||
sec1->sh.sh_entsize != sec2->sh.sh_entsize ||
(sec1->sh.sh_addralign != sec2->sh.sh_addralign &&
!is_text_section(sec1)))
DIFF_FATAL("%s section header details differ from %s", sec1->name, sec2->name);
/* Short circuit for mcount sections, we rebuild regardless */
if (!strcmp(sec->name, ".rela__mcount_loc") ||
!strcmp(sec->name, "__mcount_loc")) {
sec->status = SAME;
goto out;
}
if (sec1->sh.sh_size != sec2->sh.sh_size ||
sec1->data->d_size != sec2->data->d_size) {
sec->status = CHANGED;
goto out;
}
if (is_rela_section(sec))
kpatch_compare_correlated_rela_section(sec);
else
kpatch_compare_correlated_nonrela_section(sec);
out:
if (sec->status == CHANGED)
log_debug("section %s has changed\n", sec->name);
}
#ifdef __x86_64__
/*
* Determine if a section has changed only due to a WARN* or might_sleep
* macro call's embedding of the line number into an instruction operand.
*
* Warning: Hackery lies herein. It's hopefully justified by the fact that
* this issue is very common.
*
* Example WARN*:
*
* 938: be 70 00 00 00 mov $0x70,%esi
* 93d: 48 c7 c7 00 00 00 00 mov $0x0,%rdi
* 940: R_X86_64_32S .rodata.tcp_conn_request.str1.8+0x88
* 944: c6 05 00 00 00 00 01 movb $0x1,0x0(%rip) # 94b <tcp_conn_request+0x94b>
* 946: R_X86_64_PC32 .data.unlikely-0x1
* 94b: e8 00 00 00 00 callq 950 <tcp_conn_request+0x950>
* 94c: R_X86_64_PC32 warn_slowpath_null-0x4
*
* Example might_sleep:
*
* 50f: be f7 01 00 00 mov $0x1f7,%esi
* 514: 48 c7 c7 00 00 00 00 mov $0x0,%rdi
* 517: R_X86_64_32S .rodata.do_select.str1.8+0x98
* 51b: e8 00 00 00 00 callq 520 <do_select+0x520>
* 51c: R_X86_64_PC32 ___might_sleep-0x4
*
* The pattern which applies to all cases:
* 1) immediate move of the line number to %esi
* 2) (optional) string section rela
* 3) (optional) __warned.xxxxx static local rela
* 4) warn_slowpath_* or __might_sleep or some other similar rela
*/
static int kpatch_line_macro_change_only(struct section *sec)
{
struct insn insn1, insn2;
unsigned long start1, start2, size, offset, length;
struct rela *rela;
int lineonly = 0, found;
if (sec->status != CHANGED ||
is_rela_section(sec) ||
!is_text_section(sec) ||
sec->sh.sh_size != sec->twin->sh.sh_size ||
!sec->rela ||
sec->rela->status != SAME)
return 0;
start1 = (unsigned long)sec->twin->data->d_buf;
start2 = (unsigned long)sec->data->d_buf;
size = sec->sh.sh_size;
for (offset = 0; offset < size; offset += length) {
insn_init(&insn1, (void *)(start1 + offset), 1);
insn_init(&insn2, (void *)(start2 + offset), 1);
insn_get_length(&insn1);
insn_get_length(&insn2);
length = insn1.length;
if (!insn1.length || !insn2.length)
ERROR("can't decode instruction in section %s at offset 0x%lx",
sec->name, offset);
if (insn1.length != insn2.length)
return 0;
if (!memcmp((void *)start1 + offset, (void *)start2 + offset,
length))
continue;
/* verify it's a mov immediate to %edx or %esi */
insn_get_opcode(&insn1);
insn_get_opcode(&insn2);
if (!(insn1.opcode.value == 0xba && insn2.opcode.value == 0xba) &&
!(insn1.opcode.value == 0xbe && insn2.opcode.value == 0xbe))
return 0;
/*
* Verify zero or more string relas followed by a
* warn_slowpath_* or another similar rela.
*/
found = 0;
list_for_each_entry(rela, &sec->rela->relas, list) {
if (rela->offset < offset + length)
continue;
if (rela->string)
continue;
if (!strncmp(rela->sym->name, "__warned.", 9))
continue;
if (!strncmp(rela->sym->name, "warn_slowpath_", 14) ||
(!strcmp(rela->sym->name, "__warn_printk")) ||
(!strcmp(rela->sym->name, "__might_sleep")) ||
(!strcmp(rela->sym->name, "___might_sleep")) ||
(!strcmp(rela->sym->name, "__might_fault")) ||
(!strcmp(rela->sym->name, "printk")) ||
(!strcmp(rela->sym->name, "lockdep_rcu_suspicious"))) {
found = 1;
break;
}
return 0;
}
if (!found)
return 0;
lineonly = 1;
}
if (!lineonly)
ERROR("no instruction changes detected for changed section %s",
sec->name);
return 1;
}
#elif __powerpc64__
#define PPC_INSTR_LEN 4
#define PPC_RA_OFFSET 16
static int kpatch_line_macro_change_only(struct section *sec)
{
unsigned long start1, start2, size, offset;
unsigned int instr1, instr2;
struct rela *rela;
int lineonly = 0, found;
if (sec->status != CHANGED ||
is_rela_section(sec) ||
!is_text_section(sec) ||
sec->sh.sh_size != sec->twin->sh.sh_size ||
!sec->rela ||
sec->rela->status != SAME)
return 0;
start1 = (unsigned long)sec->twin->data->d_buf;
start2 = (unsigned long)sec->data->d_buf;
size = sec->sh.sh_size;
for (offset = 0; offset < size; offset += PPC_INSTR_LEN) {
if (!memcmp((void *)start1 + offset, (void *)start2 + offset,
PPC_INSTR_LEN))
continue;
instr1 = *(unsigned int *)(start1 + offset) >> PPC_RA_OFFSET;
instr2 = *(unsigned int *)(start2 + offset) >> PPC_RA_OFFSET;
/* verify it's a load immediate to r5 */
if (!(instr1 == 0x38a0 && instr2 == 0x38a0))
return 0;
found = 0;
list_for_each_entry(rela, &sec->rela->relas, list) {
if (rela->offset < offset + PPC_INSTR_LEN)
continue;
if (toc_rela(rela) && toc_rela(rela)->string)
continue;
if (!strncmp(rela->sym->name, "__warned.", 9))
continue;
if (!strncmp(rela->sym->name, "warn_slowpath_", 14) ||
(!strcmp(rela->sym->name, "__warn_printk")) ||
(!strcmp(rela->sym->name, "__might_sleep")) ||
(!strcmp(rela->sym->name, "___might_sleep")) ||
(!strcmp(rela->sym->name, "__might_fault")) ||
(!strcmp(rela->sym->name, "printk")) ||
(!strcmp(rela->sym->name, "lockdep_rcu_suspicious"))) {
found = 1;
break;
}
return 0;
}
if (!found)
return 0;
lineonly = 1;
}
if (!lineonly)
ERROR("no instruction changes detected for changed section %s",
sec->name);
return 1;
}
#else
static int kpatch_line_macro_change_only(struct section *sec)
{
return 0;
}
#endif
static void kpatch_compare_sections(struct list_head *seclist)
{
struct section *sec;
/* compare all sections */
list_for_each_entry(sec, seclist, list) {
if (sec->twin)
kpatch_compare_correlated_section(sec);
else
sec->status = NEW;
}
/* exclude WARN-only, might_sleep changes */
list_for_each_entry(sec, seclist, list) {
if (kpatch_line_macro_change_only(sec)) {
log_debug("reverting macro / line number section %s status to SAME\n",
sec->name);
sec->status = SAME;
}
}
/* sync symbol status */
list_for_each_entry(sec, seclist, list) {
if (is_rela_section(sec)) {
if (sec->base->sym && sec->base->sym->status != CHANGED)
sec->base->sym->status = sec->status;
} else {
struct symbol *sym = sec->sym;
if (sym && sym->status != CHANGED)
sym->status = sec->status;
if (sym && sym->child && sym->status == SAME &&
sym->child->sec->status == CHANGED)
sym->status = CHANGED;
}
}
}
static enum subsection kpatch_subsection_type(struct section *sec)
{
if (!strncmp(sec->name, ".text.unlikely.", 15))
return SUBSECTION_UNLIKELY;
if (!strncmp(sec->name, ".text.hot.", 10))
return SUBSECTION_HOT;
return SUBSECTION_NORMAL;
}
static int kpatch_subsection_changed(struct section *sec1, struct section *sec2)
{
return kpatch_subsection_type(sec1) != kpatch_subsection_type(sec2);
}
static void kpatch_compare_correlated_symbol(struct symbol *sym)
{
struct symbol *sym1 = sym, *sym2 = sym->twin;
if (sym1->sym.st_info != sym2->sym.st_info ||
(sym1->sec && !sym2->sec) ||
(sym2->sec && !sym1->sec))
DIFF_FATAL("symbol info mismatch: %s", sym1->name);
/*
* If two symbols are correlated but their sections are not, then the
* symbol has changed sections. This is only allowed if the symbol is
* moving out of an ignored section, or moving between normal/hot/unlikely
* subsections.
*/
if (sym1->sec && sym2->sec && sym1->sec->twin != sym2->sec) {
if ((sym2->sec->twin && sym2->sec->twin->ignore) ||
kpatch_subsection_changed(sym1->sec, sym2->sec))
sym->status = CHANGED;
else
DIFF_FATAL("symbol changed sections: %s", sym1->name);
}
if (sym1->type == STT_OBJECT &&
sym1->sym.st_size != sym2->sym.st_size)
DIFF_FATAL("object size mismatch: %s", sym1->name);
if (sym1->sym.st_shndx == SHN_UNDEF ||
sym1->sym.st_shndx == SHN_ABS)
sym1->status = SAME;
/*
* The status of LOCAL symbols is dependent on the status of their
* matching section and is set during section comparison.
*/
}
static void kpatch_compare_symbols(struct list_head *symlist)
{
struct symbol *sym;
list_for_each_entry(sym, symlist, list) {
if (sym->twin)
kpatch_compare_correlated_symbol(sym);
else
sym->status = NEW;
log_debug("symbol %s is %s\n", sym->name, status_str(sym->status));
}
}
#define CORRELATE_ELEMENT(_e1_, _e2_, kindstr) \
do { \
typeof(_e1_) e1 = (_e1_); \
typeof(_e2_) e2 = (_e2_); \
e1->twin = e2; \
e2->twin = e1; \
/* set initial status, might change */ \
e1->status = e2->status = SAME; \
if (strcmp(e1->name, e2->name)) { \
/* Rename mangled element */ \
log_debug("renaming %s %s to %s\n", \
kindstr, e2->name, e1->name); \
e2->name = strdup(e1->name); \
} \
} while (0)
static void __kpatch_correlate_section(struct section *sec1, struct section *sec2)
{
CORRELATE_ELEMENT(sec1, sec2, "section");
}
static void kpatch_correlate_symbol(struct symbol *sym1, struct symbol *sym2)
{
CORRELATE_ELEMENT(sym1, sym2, "symbol");
}
static void kpatch_correlate_section(struct section *sec1, struct section *sec2)
{
__kpatch_correlate_section(sec1, sec2);
if (is_rela_section(sec1)) {
__kpatch_correlate_section(sec1->base, sec2->base);
sec1 = sec1->base;
sec2 = sec2->base;
} else if (sec1->rela) {
__kpatch_correlate_section(sec1->rela, sec2->rela);
}
if (sec1->secsym)
kpatch_correlate_symbol(sec1->secsym, sec2->secsym);
if (sec1->sym)
kpatch_correlate_symbol(sec1->sym, sec2->sym);
}
static void kpatch_correlate_sections(struct list_head *seclist1, struct list_head *seclist2)
{
struct section *sec1, *sec2;
list_for_each_entry(sec1, seclist1, list) {
if (sec1->twin)
continue;
list_for_each_entry(sec2, seclist2, list) {
if (kpatch_mangled_strcmp(sec1->name, sec2->name) ||
sec2->twin)
continue;
if (is_special_static(is_rela_section(sec1) ?
sec1->base->secsym :
sec1->secsym))
continue;
/*
* Group sections must match exactly to be correlated.
* Changed group sections are currently not supported.
*/
if (sec1->sh.sh_type == SHT_GROUP) {
if (sec1->data->d_size != sec2->data->d_size)
continue;
if (memcmp(sec1->data->d_buf, sec2->data->d_buf,
sec1->data->d_size))
continue;
}
kpatch_correlate_section(sec1, sec2);
break;
}
}
}
static void kpatch_correlate_symbols(struct list_head *symlist1, struct list_head *symlist2)
{
struct symbol *sym1, *sym2;
list_for_each_entry(sym1, symlist1, list) {
if (sym1->twin)
continue;
list_for_each_entry(sym2, symlist2, list) {
if (kpatch_mangled_strcmp(sym1->name, sym2->name) ||
sym1->type != sym2->type || sym2->twin)
continue;
if (is_special_static(sym1))
continue;
/*
* The .LCx symbols point to strings, usually used for
* the bug table. Don't correlate and compare the
* symbols themselves, because the suffix number might
* change.
*
* If the symbol is used by the bug table (usual case),
* it may get pulled in by
* kpatch_regenerate_special_section().
*
* If the symbol is used outside of the bug table (not
* sure if this actually happens anywhere), any string
* changes will be detected elsewhere in rela_equal().
*/
if (sym1->type == STT_NOTYPE &&
!strncmp(sym1->name, ".LC", 3))
continue;
/* group section symbols must have correlated sections */
if (sym1->sec &&
sym1->sec->sh.sh_type == SHT_GROUP &&
sym1->sec->twin != sym2->sec)
continue;
kpatch_correlate_symbol(sym1, sym2);
break;
}
}
}
static void kpatch_compare_elf_headers(Elf *elf1, Elf *elf2)
{
GElf_Ehdr eh1, eh2;
if (!gelf_getehdr(elf1, &eh1))
ERROR("gelf_getehdr");
if (!gelf_getehdr(elf2, &eh2))
ERROR("gelf_getehdr");
if (memcmp(eh1.e_ident, eh2.e_ident, EI_NIDENT) ||
eh1.e_type != eh2.e_type ||
eh1.e_machine != eh2.e_machine ||
eh1.e_version != eh2.e_version ||
eh1.e_entry != eh2.e_entry ||
eh1.e_phoff != eh2.e_phoff ||
eh1.e_flags != eh2.e_flags ||
eh1.e_ehsize != eh2.e_ehsize ||
eh1.e_phentsize != eh2.e_phentsize ||
eh1.e_shentsize != eh2.e_shentsize)
DIFF_FATAL("ELF headers differ");
}
static void kpatch_check_program_headers(Elf *elf)
{
size_t ph_nr;
if (elf_getphdrnum(elf, &ph_nr))
ERROR("elf_getphdrnum");
if (ph_nr != 0)
DIFF_FATAL("ELF contains program header");
}
static void kpatch_mark_grouped_sections(struct kpatch_elf *kelf)
{
struct section *groupsec, *sec;
unsigned int *data, *end;
list_for_each_entry(groupsec, &kelf->sections, list) {
if (groupsec->sh.sh_type != SHT_GROUP)
continue;
data = groupsec->data->d_buf;
end = groupsec->data->d_buf + groupsec->data->d_size;
data++; /* skip first flag word (e.g. GRP_COMDAT) */
while (data < end) {
sec = find_section_by_index(&kelf->sections, *data);
if (!sec)
ERROR("group section not found");
sec->grouped = 1;
log_debug("marking section %s (%d) as grouped\n",
sec->name, sec->index);
data++;
}
}
}
static char *kpatch_section_function_name(struct section *sec)
{
if (is_rela_section(sec))
sec = sec->base;
return sec->sym ? sec->sym->name : sec->name;
}
/*
* Given a static local variable symbol and a rela section which references it
* in the base object, find a corresponding usage of a similarly named symbol
* in the patched object.
*/
static struct symbol *kpatch_find_static_twin(struct section *sec,
struct symbol *sym)
{
struct rela *rela, *rela_toc;
if (!sec->twin)
return NULL;
/* find the patched object's corresponding variable */
list_for_each_entry(rela, &sec->twin->relas, list) {
rela_toc = toc_rela(rela);
if (!rela_toc)
continue; /* skip toc constants */
if (rela_toc->sym->twin)
continue;
if (kpatch_mangled_strcmp(rela_toc->sym->name, sym->name))
continue;
return rela_toc->sym;
}
return NULL;
}
static int kpatch_is_normal_static_local(struct symbol *sym)
{
if (sym->type != STT_OBJECT || sym->bind != STB_LOCAL)
return 0;
if (!strchr(sym->name, '.'))
return 0;
if (is_special_static(sym))
return 0;
return 1;
}
/*
* gcc renames static local variables by appending a period and a number. For
* example, __foo could be renamed to __foo.31452. Unfortunately this number
* can arbitrarily change. Correlate them by comparing which functions
* reference them, and rename the patched symbols to match the base symbol
* names.
*
* Some surprising facts about static local variable symbols:
*
* - It's possible for multiple functions to use the same
* static local variable if the variable is defined in an
* inlined function.
*
* - It's also possible for multiple static local variables
* with the same name to be used in the same function if they
* have different scopes. (We have to assume that in such
* cases, the order in which they're referenced remains the
* same between the base and patched objects, as there's no
* other way to distinguish them.)
*
* - Static locals are usually referenced by functions, but
* they can occasionally be referenced by data sections as
* well.
*/
static void kpatch_correlate_static_local_variables(struct kpatch_elf *base,
struct kpatch_elf *patched)
{
struct symbol *sym, *patched_sym;
struct section *sec;
struct rela *rela, *rela2;
int bundled, patched_bundled, found;
/*
* First undo the correlations for all static locals. Two static
* locals can have the same numbered suffix in the base and patched
* objects by coincidence.
*/
list_for_each_entry(sym, &base->symbols, list) {
if (!kpatch_is_normal_static_local(sym))
continue;
if (sym->twin) {
sym->twin->twin = NULL;
sym->twin = NULL;
}
bundled = sym == sym->sec->sym;
if (bundled && sym->sec->twin) {
sym->sec->twin->twin = NULL;
sym->sec->twin = NULL;
sym->sec->secsym->twin->twin = NULL;
sym->sec->secsym->twin = NULL;
if (sym->sec->rela) {
sym->sec->rela->twin->twin = NULL;
sym->sec->rela->twin = NULL;
}
}
}
/*
* Do the correlations: for each section reference to a static local,
* look for a corresponding reference in the section's twin.
*/
list_for_each_entry(sec, &base->sections, list) {
if (!is_rela_section(sec) ||
is_debug_section(sec) ||
!strcmp(sec->name, ".rela.toc"))
continue;
list_for_each_entry(rela, &sec->relas, list) {
if (!toc_rela(rela))
continue; /* skip toc constants */
sym = toc_rela(rela)->sym;
if (!kpatch_is_normal_static_local(sym))
continue;
if (sym->twin)
continue;
bundled = sym == sym->sec->sym;
if (bundled && sym->sec == sec->base) {
/*
* A rare case where a static local data
* structure references itself. There's no
* reliable way to correlate this. Hopefully
* there's another reference to the symbol
* somewhere that can be used.
*/
log_debug("can't correlate static local %s's reference to itself\n",
sym->name);
continue;
}
patched_sym = kpatch_find_static_twin(sec, sym);
if (!patched_sym)
DIFF_FATAL("reference to static local variable %s in %s was removed",
sym->name,
kpatch_section_function_name(sec));
patched_bundled = patched_sym == patched_sym->sec->sym;
if (bundled != patched_bundled)
ERROR("bundle mismatch for symbol %s", sym->name);
if (!bundled && sym->sec->twin != patched_sym->sec)
ERROR("sections %s and %s aren't correlated for symbol %s",
sym->sec->name, patched_sym->sec->name, sym->name);
log_debug("renaming and correlating static local %s to %s\n",
patched_sym->name, sym->name);
patched_sym->name = strdup(sym->name);
sym->twin = patched_sym;
patched_sym->twin = sym;
/* set initial status, might change */
sym->status = patched_sym->status = SAME;
if (bundled) {
sym->sec->twin = patched_sym->sec;
patched_sym->sec->twin = sym->sec;
sym->sec->secsym->twin = patched_sym->sec->secsym;
patched_sym->sec->secsym->twin = sym->sec->secsym;
if (sym->sec->rela && patched_sym->sec->rela) {
sym->sec->rela->twin = patched_sym->sec->rela;
patched_sym->sec->rela->twin = sym->sec->rela;
}
}
}
}
/*
* Make sure that:
*
* 1. all the base object's referenced static locals have been
* correlated; and
*
* 2. each reference to a static local in the base object has a
* corresponding reference in the patched object (because a static
* local can be referenced by more than one section).
*/
list_for_each_entry(sec, &base->sections, list) {
if (!is_rela_section(sec) ||
is_debug_section(sec))
continue;
list_for_each_entry(rela, &sec->relas, list) {
sym = rela->sym;
if (!kpatch_is_normal_static_local(sym))
continue;
if (!sym->twin || !sec->twin)
DIFF_FATAL("reference to static local variable %s in %s was removed",
sym->name,
kpatch_section_function_name(sec));
found = 0;
list_for_each_entry(rela2, &sec->twin->relas, list) {
if (rela2->sym == sym->twin) {
found = 1;
break;
}
}
if (!found)
DIFF_FATAL("static local %s has been correlated with %s, but patched %s is missing a reference to it",
sym->name, sym->twin->name,
kpatch_section_function_name(sec->twin));
}
}
/*
* Now go through the patched object and look for any uncorrelated
* static locals to see if we need to print any warnings about new
* variables.
*/
list_for_each_entry(sec, &patched->sections, list) {
if (!is_rela_section(sec) ||
is_debug_section(sec))
continue;
list_for_each_entry(rela, &sec->relas, list) {
sym = rela->sym;
if (!kpatch_is_normal_static_local(sym))
continue;
if (sym->twin)
continue;
log_normal("WARNING: unable to correlate static local variable %s used by %s, assuming variable is new\n",
sym->name,
kpatch_section_function_name(sec));
return;
}
}
}
static void kpatch_correlate_elfs(struct kpatch_elf *kelf1, struct kpatch_elf *kelf2)
{
kpatch_correlate_sections(&kelf1->sections, &kelf2->sections);
kpatch_correlate_symbols(&kelf1->symbols, &kelf2->symbols);
}
static void kpatch_compare_correlated_elements(struct kpatch_elf *kelf)
{
/* lists are already correlated at this point */
kpatch_compare_sections(&kelf->sections);
kpatch_compare_symbols(&kelf->symbols);
}
#ifdef __x86_64__
static void rela_insn(const struct section *sec, const struct rela *rela,
struct insn *insn)
{
unsigned long insn_addr, start, end, rela_addr;
start = (unsigned long)sec->base->data->d_buf;
end = start + sec->base->sh.sh_size;
if (end <= start)
ERROR("bad section size");
rela_addr = start + rela->offset;
for (insn_addr = start; insn_addr < end; insn_addr += insn->length) {
insn_init(insn, (void *)insn_addr, 1);
insn_get_length(insn);
if (!insn->length)
ERROR("can't decode instruction in section %s at offset 0x%lx",
sec->base->name, insn_addr);
if (rela_addr >= insn_addr &&
rela_addr < insn_addr + insn->length)
return;
}
}
#endif
static bool is_callback_section(struct section *sec) {
static char *callback_sections[] = {
".kpatch.callbacks.pre_patch",
".kpatch.callbacks.post_patch",
".kpatch.callbacks.pre_unpatch",
".kpatch.callbacks.post_unpatch",
".rela.kpatch.callbacks.pre_patch",
".rela.kpatch.callbacks.post_patch",
".rela.kpatch.callbacks.pre_unpatch",
".rela.kpatch.callbacks.post_unpatch",
NULL,
};
char **callback_sec;
for (callback_sec = callback_sections; *callback_sec; callback_sec++)
if (!strcmp(sec->name, *callback_sec))
return true;
return false;
}
/*
* Mangle the relas a little. The compiler will sometimes use section symbols
* to reference local objects and functions rather than the object or function
* symbols themselves. We substitute the object/function symbols for the
* section symbol in this case so that the relas can be properly correlated and
* so that the existing object/function in vmlinux can be linked to.
*/
static void kpatch_replace_sections_syms(struct kpatch_elf *kelf)
{
struct section *sec;
struct rela *rela;
struct symbol *sym;
int add_off;
log_debug("\n");
list_for_each_entry(sec, &kelf->sections, list) {
if (!is_rela_section(sec) ||
is_debug_section(sec))
continue;
list_for_each_entry(rela, &sec->relas, list) {
if (rela->sym->type != STT_SECTION || !rela->sym->sec)
continue;
/*
* Replace references to bundled sections with their
* symbols.
*/
if (rela->sym->sec->sym) {
rela->sym = rela->sym->sec->sym;
/*
* On ppc64le with GCC6+, even with
* -ffunction-sections, the function symbol
* starts 8 bytes past the beginning of the
* section, because the .TOC pointer is at the
* beginning, right before the code. So even
* though the symbol is bundled, we can't
* assume it's at offset 0 in the section.
*/
rela->addend -= rela->sym->sym.st_value;
continue;
}
#ifdef __powerpc64__
add_off = 0;
#else
if (rela->type == R_X86_64_PC32 ||
rela->type == R_X86_64_PLT32) {
struct insn insn;
rela_insn(sec, rela, &insn);
add_off = (long)insn.next_byte -
(long)sec->base->data->d_buf -
rela->offset;
} else if (rela->type == R_X86_64_64 ||
rela->type == R_X86_64_32S)
add_off = 0;
else
continue;
#endif
/*
* Attempt to replace references to unbundled sections
* with their symbols.
*/
list_for_each_entry(sym, &kelf->symbols, list) {
int start, end;
if (sym->type == STT_SECTION ||
sym->sec != rela->sym->sec)
continue;
start = sym->sym.st_value;
end = sym->sym.st_value + sym->sym.st_size;
if (!is_text_section(sym->sec) &&
rela->type == R_X86_64_32S &&
rela->addend == (int)sym->sec->sh.sh_size &&
end == (int)sym->sec->sh.sh_size) {
/*
* A special case where gcc needs a
* pointer to the address at the end of
* a data section.
*
* This is usually used with a compare
* instruction to determine when to end
* a loop. The code doesn't actually
* dereference the pointer so this is
* "normal" and we just replace the
* section reference with a reference
* to the last symbol in the section.
*
* Note that this only catches the
* issue when it happens at the end of
* a section. It can also happen in
* the middle of a section. In that
* case, the wrong symbol will be
* associated with the reference. But
* that's ok because:
*
* 1) This situation only occurs when
* gcc is trying to get the address
* of the symbol, not the contents
* of its data; and
*
* 2) Because kpatch doesn't allow data
* sections to change,
* &(var1+sizeof(var1)) will always
* be the same as &var2.
*/
} else if (rela->addend + add_off < start ||
rela->addend + add_off >= end)
continue;
log_debug("%s: replacing %s+%d reference with %s+%d\n",
sec->name,
rela->sym->name, rela->addend,
sym->name, rela->addend - start);
rela->sym = sym;
rela->addend -= start;
break;
}
}
}
log_debug("\n");
}
static void kpatch_check_func_profiling_calls(struct kpatch_elf *kelf)
{
struct symbol *sym;
int errs = 0;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->type != STT_FUNC || sym->status != CHANGED || sym->parent)
continue;
if (!sym->twin->has_func_profiling) {
log_normal("function %s has no fentry/mcount call, unable to patch\n",
sym->name);
errs++;
}
}
if (errs)
DIFF_FATAL("%d function(s) can not be patched", errs);
}
static void kpatch_verify_patchability(struct kpatch_elf *kelf)
{
struct section *sec;
int errs = 0;
list_for_each_entry(sec, &kelf->sections, list) {
if (sec->status == CHANGED && !sec->include) {
log_normal("changed section %s not selected for inclusion\n",
sec->name);
errs++;
}
if (sec->status != SAME && sec->grouped) {
log_normal("changed section %s is part of a section group\n",
sec->name);
errs++;
}
if (sec->sh.sh_type == SHT_GROUP && sec->status == NEW) {
log_normal("new/changed group sections are not supported\n");
errs++;
}
/*
* ensure we aren't including .data.* or .bss.*
* (.data.unlikely and .data.once is ok b/c it only has __warned vars)
*/
if (sec->include && sec->status != NEW &&
(!strncmp(sec->name, ".data", 5) || !strncmp(sec->name, ".bss", 4)) &&
(strcmp(sec->name, ".data.unlikely") && strcmp(sec->name, ".data.once"))) {
log_normal("data section %s selected for inclusion\n",
sec->name);
errs++;
}
}
if (errs)
DIFF_FATAL("%d unsupported section change(s)", errs);
}
static void kpatch_include_symbol(struct symbol *sym);
static void kpatch_include_section(struct section *sec)
{
struct rela *rela;
/* Include the section and its section symbol */
if (sec->include)
return;
sec->include = 1;
if (sec->secsym)
sec->secsym->include = 1;
/*
* Include the section's rela section and then recursively include the
* symbols needed by its relas.
*/
if (!sec->rela)
return;
sec->rela->include = 1;
list_for_each_entry(rela, &sec->rela->relas, list)
kpatch_include_symbol(rela->sym);
}
static void kpatch_include_symbol(struct symbol *sym)
{
/*
* This function is called recursively from kpatch_include_section().
* Make sure we don't get into an endless loop.
*/
if (sym->include)
return;
/*
* The symbol gets included even if its section isn't needed, as it
* might be needed: either permanently for a rela, or temporarily for
* the later creation of a dynrela.
*/
sym->include = 1;
/*
* For a function/object symbol, if it has a section, we only need to
* include the section if it has changed. Otherwise the symbol will be
* used by relas/dynrelas to link to the real symbol externally.
*
* For section symbols, we always include the section because
* references to them can't otherwise be resolved externally.
*/
if (sym->sec && (sym->type == STT_SECTION || sym->status != SAME))
kpatch_include_section(sym->sec);
}
static void kpatch_include_standard_elements(struct kpatch_elf *kelf)
{
struct section *sec;
list_for_each_entry(sec, &kelf->sections, list) {
/*
* Include the following sections even if they haven't changed.
*
* Notes about some of the more interesting sections:
*
* - With -fdata-sections, .rodata is only used for:
*
* switch jump tables;
* KASAN data (with KASAN enabled, which is rare); and
* an ugly hack in vmx_vcpu_run().
*
* Those data are all local to the functions which use them.
* So it's safe to include .rodata.
*
* - On ppc64le, the .toc section is used for all data
* accesses.
*
* Note that if any of these sections have rela sections, they
* will also be included in their entirety. That may result in
* some extra (unused) dynrelas getting created, which should
* be harmless.
*/
if (!strcmp(sec->name, ".shstrtab") ||
!strcmp(sec->name, ".strtab") ||
!strcmp(sec->name, ".symtab") ||
!strcmp(sec->name, ".toc") ||
!strcmp(sec->name, ".rodata") ||
(!strncmp(sec->name, ".rodata.", 8) &&
strstr(sec->name, ".str1."))) {
kpatch_include_section(sec);
}
}
/* include the NULL symbol */
list_entry(kelf->symbols.next, struct symbol, list)->include = 1;
}
static int kpatch_include_callback_elements(struct kpatch_elf *kelf)
{
struct section *sec;
struct symbol *sym;
struct rela *rela;
int found = 0;
/* include load/unload sections */
list_for_each_entry(sec, &kelf->sections, list) {
if (!is_callback_section(sec))
continue;
sec->include = 1;
found = 1;
if (is_rela_section(sec)) {
/* include callback dependencies */
rela = list_entry(sec->relas.next, struct rela, list);
sym = rela->sym;
log_normal("found callback: %s\n",sym->name);
kpatch_include_symbol(sym);
} else {
sec->secsym->include = 1;
}
}
/* Strip temporary global structures used by the callback macros. */
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->sec && is_callback_section(sym->sec))
sym->include = 0;
}
return found;
}
static void kpatch_include_force_elements(struct kpatch_elf *kelf)
{
struct section *sec;
struct symbol *sym;
struct rela *rela;
/* include force sections */
list_for_each_entry(sec, &kelf->sections, list) {
if (!strcmp(sec->name, ".kpatch.force") ||
!strcmp(sec->name, ".rela.kpatch.force")) {
sec->include = 1;
if (!is_rela_section(sec)) {
/* .kpatch.force */
sec->secsym->include = 1;
continue;
}
/* .rela.kpatch.force */
list_for_each_entry(rela, &sec->relas, list)
log_normal("function '%s' marked with KPATCH_FORCE_UNSAFE!\n",
rela->sym->name);
}
}
/* strip temporary global kpatch_force_func_* symbols */
list_for_each_entry(sym, &kelf->symbols, list)
if (!strncmp(sym->name, "__kpatch_force_func_",
strlen("__kpatch_force_func_")))
sym->include = 0;
}
static int kpatch_include_new_globals(struct kpatch_elf *kelf)
{
struct symbol *sym;
int nr = 0;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->bind == STB_GLOBAL && sym->sec &&
sym->status == NEW) {
kpatch_include_symbol(sym);
nr++;
}
}
return nr;
}
static int kpatch_include_changed_functions(struct kpatch_elf *kelf)
{
struct symbol *sym;
int changed_nr = 0;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->status == CHANGED &&
sym->type == STT_FUNC) {
changed_nr++;
kpatch_include_symbol(sym);
}
if (sym->type == STT_FILE)
sym->include = 1;
}
return changed_nr;
}
static void kpatch_print_changes(struct kpatch_elf *kelf)
{
struct symbol *sym;
list_for_each_entry(sym, &kelf->symbols, list) {
if (!sym->include || !sym->sec || sym->type != STT_FUNC || sym->parent)
continue;
if (sym->status == NEW)
log_normal("new function: %s\n", sym->name);
else if (sym->status == CHANGED)
log_normal("changed function: %s\n", sym->name);
}
}
static void kpatch_migrate_symbols(struct list_head *src,
struct list_head *dst,
int (*select)(struct symbol *))
{
struct symbol *sym, *safe;
list_for_each_entry_safe(sym, safe, src, list) {
if (select && !select(sym))
continue;
list_del(&sym->list);
list_add_tail(&sym->list, dst);
}
}
static void kpatch_migrate_included_elements(struct kpatch_elf *kelf, struct kpatch_elf **kelfout)
{
struct section *sec, *safesec;
struct symbol *sym, *safesym;
struct kpatch_elf *out;
/* allocate output kelf */
out = malloc(sizeof(*out));
if (!out)
ERROR("malloc");
memset(out, 0, sizeof(*out));
INIT_LIST_HEAD(&out->sections);
INIT_LIST_HEAD(&out->symbols);
INIT_LIST_HEAD(&out->strings);
/* migrate included sections from kelf to out */
list_for_each_entry_safe(sec, safesec, &kelf->sections, list) {
if (!sec->include)
continue;
list_del(&sec->list);
list_add_tail(&sec->list, &out->sections);
sec->index = 0;
if (!is_rela_section(sec) && sec->secsym && !sec->secsym->include)
/* break link to non-included section symbol */
sec->secsym = NULL;
}
/* migrate included symbols from kelf to out */
list_for_each_entry_safe(sym, safesym, &kelf->symbols, list) {
if (!sym->include)
continue;
list_del(&sym->list);
list_add_tail(&sym->list, &out->symbols);
sym->index = 0;
sym->strip = 0;
if (sym->sec && !sym->sec->include)
/* break link to non-included section */
sym->sec = NULL;
}
*kelfout = out;
}
static void kpatch_reorder_symbols(struct kpatch_elf *kelf)
{
LIST_HEAD(symbols);
/* migrate NULL sym */
kpatch_migrate_symbols(&kelf->symbols, &symbols, is_null_sym);
/* migrate LOCAL FILE sym */
kpatch_migrate_symbols(&kelf->symbols, &symbols, is_file_sym);
/* migrate LOCAL FUNC syms */
kpatch_migrate_symbols(&kelf->symbols, &symbols, is_local_func_sym);
/* migrate all other LOCAL syms */
kpatch_migrate_symbols(&kelf->symbols, &symbols, is_local_sym);
/* migrate all other (GLOBAL) syms */
kpatch_migrate_symbols(&kelf->symbols, &symbols, NULL);
list_replace(&symbols, &kelf->symbols);
}
static int bug_table_group_size(struct kpatch_elf *kelf, int offset)
{
static int size = 0;
char *str;
if (!size) {
str = getenv("BUG_STRUCT_SIZE");
if (!str)
ERROR("BUG_STRUCT_SIZE not set");
size = atoi(str);
}
return size;
}
static int ex_table_group_size(struct kpatch_elf *kelf, int offset)
{
static int size = 0;
char *str;
if (!size) {
str = getenv("EX_STRUCT_SIZE");
if (!str)
ERROR("EX_STRUCT_SIZE not set");
size = atoi(str);
}
return size;
}
static int jump_table_group_size(struct kpatch_elf *kelf, int offset)
{
static int size = 0;
char *str;
if (!size) {
str = getenv("JUMP_STRUCT_SIZE");
if (!str)
ERROR("JUMP_STRUCT_SIZE not set");
size = atoi(str);
}
return size;
}
#ifdef __x86_64__
static int parainstructions_group_size(struct kpatch_elf *kelf, int offset)
{
static int size = 0;
char *str;
if (!size) {
str = getenv("PARA_STRUCT_SIZE");
if (!str)
ERROR("PARA_STRUCT_SIZE not set");
size = atoi(str);
}
return size;
}
static int altinstructions_group_size(struct kpatch_elf *kelf, int offset)
{
static int size = 0;
char *str;
if (!size) {
str = getenv("ALT_STRUCT_SIZE");
if (!str)
ERROR("ALT_STRUCT_SIZE not set");
size = atoi(str);
}
return size;
}
static int smp_locks_group_size(struct kpatch_elf *kelf, int offset)
{
return 4;
}
#endif
#ifdef __powerpc64__
static int fixup_entry_group_size(struct kpatch_elf *kelf, int offset)
{
static int size = 0;
char *str;
if (!size) {
str = getenv("FIXUP_STRUCT_SIZE");
if (!str)
ERROR("FIXUP_STRUCT_SIZE not set");
size = atoi(str);
}
return size;
}
static int fixup_lwsync_group_size(struct kpatch_elf *kelf, int offset)
{
return 8;
}
static int fixup_barrier_nospec_group_size(struct kpatch_elf *kelf, int offset)
{
return 8;
}
#endif
/*
* The rela groups in the .fixup section vary in size. The beginning of each
* .fixup rela group is referenced by the __ex_table section. To find the size
* of a .fixup rela group, we have to traverse the __ex_table relas.
*/
static int fixup_group_size(struct kpatch_elf *kelf, int offset)
{
struct section *sec;
struct rela *rela;
int found;
sec = find_section_by_name(&kelf->sections, ".rela__ex_table");
if (!sec)
ERROR("missing .rela__ex_table section");
/* find beginning of this group */
found = 0;
list_for_each_entry(rela, &sec->relas, list) {
if (!strcmp(rela->sym->name, ".fixup") &&
rela->addend == offset) {
found = 1;
break;
}
}
if (!found)
ERROR("can't find .fixup rela group at offset %d\n", offset);
/* find beginning of next group */
found = 0;
list_for_each_entry_continue(rela, &sec->relas, list) {
if (!strcmp(rela->sym->name, ".fixup") &&
rela->addend > offset) {
found = 1;
break;
}
}
if (!found) {
/* last group */
struct section *fixupsec;
fixupsec = find_section_by_name(&kelf->sections, ".fixup");
if (!fixupsec)
ERROR("missing .fixup section");
return fixupsec->sh.sh_size - offset;
}
return rela->addend - offset;
}
static struct special_section special_sections[] = {
{
.name = "__bug_table",
.group_size = bug_table_group_size,
},
{
.name = ".fixup",
.group_size = fixup_group_size,
},
{
.name = "__ex_table", /* must come after .fixup */
.group_size = ex_table_group_size,
},
{
.name = "__jump_table",
.group_size = jump_table_group_size,
},
#ifdef __x86_64__
{
.name = ".smp_locks",
.group_size = smp_locks_group_size,
},
{
.name = ".parainstructions",
.group_size = parainstructions_group_size,
},
{
.name = ".altinstructions",
.group_size = altinstructions_group_size,
},
#endif
#ifdef __powerpc64__
{
.name = "__ftr_fixup",
.group_size = fixup_entry_group_size,
},
{
.name = "__mmu_ftr_fixup",
.group_size = fixup_entry_group_size,
},
{
.name = "__fw_ftr_fixup",
.group_size = fixup_entry_group_size,
},
{
.name = "__lwsync_fixup",
.group_size = fixup_lwsync_group_size,
},
{
.name = "__barrier_nospec_fixup",
.group_size = fixup_barrier_nospec_group_size,
},
#endif
{},
};
static int should_keep_rela_group(struct section *sec, unsigned int start,
unsigned int size)
{
struct rela *rela;
int found = 0;
/* check if any relas in the group reference any changed functions */
list_for_each_entry(rela, &sec->relas, list) {
if (rela->offset >= start &&
rela->offset < start + size &&
rela->sym->type == STT_FUNC &&
rela->sym->sec->include) {
found = 1;
log_debug("new/changed symbol %s found in special section %s\n",
rela->sym->name, sec->name);
}
}
return found;
}
/*
* When updating .fixup, the corresponding addends in .ex_table need to be
* updated too. Stash the result in rela.r_addend so that the calculation in
* fixup_group_size() is not affected.
*/
static void kpatch_update_ex_table_addend(struct kpatch_elf *kelf,
struct special_section *special,
int src_offset, int dest_offset,
int group_size)
{
struct rela *rela;
struct section *sec;
sec = find_section_by_name(&kelf->sections, ".rela__ex_table");
if (!sec)
ERROR("missing .rela__ex_table section");
list_for_each_entry(rela, &sec->relas, list) {
if (!strcmp(rela->sym->name, ".fixup") &&
rela->addend >= src_offset &&
rela->addend < src_offset + group_size)
rela->rela.r_addend = rela->addend - (src_offset - dest_offset);
}
}
static void kpatch_regenerate_special_section(struct kpatch_elf *kelf,
struct special_section *special,
struct section *sec)
{
struct rela *rela, *safe;
char *src, *dest;
unsigned int group_size, src_offset, dest_offset, include;
int jump_table = !strcmp(special->name, "__jump_table");
LIST_HEAD(newrelas);
src = sec->base->data->d_buf;
/* alloc buffer for new base section */
dest = malloc(sec->base->sh.sh_size);
if (!dest)
ERROR("malloc");
/* Restore the stashed r_addend from kpatch_update_ex_table_addend. */
if (!strcmp(special->name, "__ex_table")) {
list_for_each_entry(rela, &sec->relas, list) {
if (!strcmp(rela->sym->name, ".fixup"))
rela->addend = rela->rela.r_addend;
}
}
src_offset = 0;
dest_offset = 0;
for ( ; src_offset < sec->base->sh.sh_size; src_offset += group_size) {
group_size = special->group_size(kelf, src_offset);
/*
* In some cases the struct has padding at the end to ensure
* that all structs after it are properly aligned. But the
* last struct in the section may not be padded. In that case,
* shrink the group_size such that it still (hopefully)
* contains the data but doesn't go past the end of the
* section.
*/
if (src_offset + group_size > sec->base->sh.sh_size)
group_size = sec->base->sh.sh_size - src_offset;
include = should_keep_rela_group(sec, src_offset, group_size);
if (!include)
continue;
/*
* Jump labels (aka static keys or static branches) aren't
* actually supported for the time being. Warn on all
* non-tracepoint jump labels when they occur in a replacement
* function. An inert tracepoint is harmless enough, but a
* broken static key can cause unexpected behavior.
*
* Here we hard-code knowledge about the contents of the
* jump_label struct. It has three fields: code, target, and
* key.
*/
if (jump_table) {
struct rela *code, *key;
int i = 0;
list_for_each_entry(rela, &sec->relas, list) {
if (rela->offset >= src_offset &&
rela->offset < src_offset + group_size) {
if (i == 0)
code = rela;
else if (i == 2)
key = rela;
i++;
}
}
if (i != 3)
ERROR("BUG: __jump_table has an unexpected format");
/* inert tracepoints are harmless */
if (!strncmp(key->sym->name, "__tracepoint_", 13))
continue;
/* inert dynamic debug printks are harmless */
if (is_dynamic_debug_symbol(key->sym))
continue;
ERROR("Found a jump label at %s()+0x%x, using key %s. Jump labels aren't currently supported. Use static_key_enabled() instead.",
code->sym->name, code->addend, key->sym->name);
continue;
}
/*
* Copy all relas in the group. It's possible that the relas
* aren't sorted (e.g. .rela.fixup), so go through the entire
* rela list each time.
*/
list_for_each_entry_safe(rela, safe, &sec->relas, list) {
if (rela->offset >= src_offset &&
rela->offset < src_offset + group_size) {
/* copy rela entry */
list_del(&rela->list);
list_add_tail(&rela->list, &newrelas);
rela->offset -= src_offset - dest_offset;
rela->rela.r_offset = rela->offset;
rela->sym->include = 1;
if (!strcmp(special->name, ".fixup"))
kpatch_update_ex_table_addend(kelf, special,
src_offset,
dest_offset,
group_size);
}
}
/* copy base section group */
memcpy(dest + dest_offset, src + src_offset, group_size);
dest_offset += group_size;
}
if (!dest_offset) {
/* no changed or global functions referenced */
sec->status = sec->base->status = SAME;
sec->include = sec->base->include = 0;
free(dest);
return;
}
/* overwrite with new relas list */
list_replace(&newrelas, &sec->relas);
/* include both rela and base sections */
sec->include = 1;
sec->base->include = 1;
/* include secsym so .kpatch.arch relas can point to section symbols */
sec->base->secsym->include = 1;
/*
* Update text section data buf and size.
*
* The rela section's data buf and size will be regenerated in
* kpatch_rebuild_rela_section_data().
*/
sec->base->data->d_buf = dest;
sec->base->data->d_size = dest_offset;
}
#define ORC_IP_PTR_SIZE 4
/*
* This function is similar to kpatch_regenerate_special_section(), but
* customized for the ORC-related sections. ORC is more special than the other
* special sections because each ORC entry is split into .orc_unwind (struct
* orc_entry) and .orc_unwind_ip.
*/
static void kpatch_regenerate_orc_sections(struct kpatch_elf *kelf)
{
struct rela *rela, *safe;
char *src, *dest, *str;
unsigned int src_idx = 0, dest_idx = 0, orc_entry_size;
struct section *orc_sec, *ip_sec;
str = getenv("ORC_STRUCT_SIZE");
if (!str)
return;
orc_entry_size = atoi(str);
if (!orc_entry_size)
ERROR("bad ORC_STRUCT_SIZE");
LIST_HEAD(newrelas);
orc_sec = find_section_by_name(&kelf->sections, ".orc_unwind");
ip_sec = find_section_by_name(&kelf->sections, ".orc_unwind_ip");
if (!orc_sec || !ip_sec)
return;
if (orc_sec->sh.sh_size % orc_entry_size != 0)
ERROR("bad .orc_unwind size");
if (ip_sec->sh.sh_size !=
(orc_sec->sh.sh_size / orc_entry_size) * ORC_IP_PTR_SIZE)
ERROR(".orc_unwind/.orc_unwind_ip size mismatch");
src = orc_sec->data->d_buf;
dest = malloc(orc_sec->sh.sh_size);
if (!dest)
ERROR("malloc");
list_for_each_entry_safe(rela, safe, &ip_sec->rela->relas, list) {
if (rela->sym->type != STT_FUNC || !rela->sym->sec->include)
goto next;
/* copy orc entry */
memcpy(dest + (dest_idx * orc_entry_size),
src + (src_idx * orc_entry_size),
orc_entry_size);
/* move ip rela */
list_del(&rela->list);
list_add_tail(&rela->list, &newrelas);
rela->offset = dest_idx * ORC_IP_PTR_SIZE;
rela->sym->include = 1;
dest_idx++;
next:
src_idx++;
}
if (!dest_idx) {
/* no changed or global functions referenced */
orc_sec->status = ip_sec->status = ip_sec->rela->status = SAME;
orc_sec->include = ip_sec->include = ip_sec->rela->include = 0;
free(dest);
return;
}
/* overwrite with new relas list */
list_replace(&newrelas, &ip_sec->rela->relas);
/* include the sections */
orc_sec->include = ip_sec->include = ip_sec->rela->include = 1;
/*
* Update data buf/size.
*
* The ip section can keep its old (zeroed data), though its size has
* possibly decreased. The ip rela section's data buf and size will be
* regenerated in kpatch_rebuild_rela_section_data().
*/
orc_sec->data->d_buf = dest;
orc_sec->data->d_size = dest_idx * orc_entry_size;
ip_sec->data->d_size = dest_idx * ORC_IP_PTR_SIZE;
}
static void kpatch_check_relocations(struct kpatch_elf *kelf)
{
struct rela *rela;
struct section *sec;
Elf_Data *sdata;
list_for_each_entry(sec, &kelf->sections, list) {
if (!is_rela_section(sec))
continue;
list_for_each_entry(rela, &sec->relas, list) {
if (rela->sym->sec) {
sdata = rela->sym->sec->data;
if (rela->addend > (int)sdata->d_size) {
ERROR("out-of-range relocation %s+%x in %s", rela->sym->sec->name,
rela->addend, sec->name);
}
}
}
}
}
static void kpatch_include_debug_sections(struct kpatch_elf *kelf)
{
struct section *sec;
struct rela *rela, *saferela;
/* include all .debug_* sections */
list_for_each_entry(sec, &kelf->sections, list) {
if (is_debug_section(sec)) {
sec->include = 1;
if (!is_rela_section(sec))
sec->secsym->include = 1;
}
}
/*
* Go through the .rela.debug_ sections and strip entries
* referencing unchanged symbols
*/
list_for_each_entry(sec, &kelf->sections, list) {
if (!is_rela_section(sec) || !is_debug_section(sec))
continue;
list_for_each_entry_safe(rela, saferela, &sec->relas, list)
if (!rela->sym->sec->include)
list_del(&rela->list);
}
}
static void kpatch_mark_ignored_sections(struct kpatch_elf *kelf)
{
struct section *sec, *strsec, *ignoresec;
struct rela *rela;
char *name;
/* Ignore any discarded sections */
list_for_each_entry(sec, &kelf->sections, list) {
if (!strncmp(sec->name, ".discard", 8) ||
!strncmp(sec->name, ".rela.discard", 13))
sec->ignore = 1;
}
sec = find_section_by_name(&kelf->sections, ".kpatch.ignore.sections");
if (!sec)
return;
list_for_each_entry(rela, &sec->rela->relas, list) {
strsec = rela->sym->sec;
strsec->status = CHANGED;
/*
* Include the string section here. This is because the
* KPATCH_IGNORE_SECTION() macro is passed a literal string
* by the patch author, resulting in a change to the string
* section. If we don't include it, then we will potentially
* get a "changed section not included" error in
* kpatch_verify_patchability() if no other function based change
* also changes the string section. We could try to exclude each
* literal string added to the section by KPATCH_IGNORE_SECTION()
* from the section data comparison, but this is a simpler way.
*/
strsec->include = 1;
strsec->secsym->include = 1;
name = strsec->data->d_buf + rela->addend;
ignoresec = find_section_by_name(&kelf->sections, name);
if (!ignoresec)
ERROR("KPATCH_IGNORE_SECTION: can't find %s", name);
log_normal("ignoring section: %s\n", name);
if (is_rela_section(ignoresec))
ignoresec = ignoresec->base;
ignoresec->ignore = 1;
if (ignoresec->twin)
ignoresec->twin->ignore = 1;
}
}
static void kpatch_mark_ignored_sections_same(struct kpatch_elf *kelf)
{
struct section *sec;
struct symbol *sym;
list_for_each_entry(sec, &kelf->sections, list) {
if (!sec->ignore)
continue;
sec->status = SAME;
if (!is_rela_section(sec)) {
if (sec->secsym)
sec->secsym->status = SAME;
if (sec->rela)
sec->rela->status = SAME;
}
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->sec != sec)
continue;
sym->status = SAME;
}
}
/* strip temporary global __UNIQUE_ID_kpatch_ignore_section_* symbols */
list_for_each_entry(sym, &kelf->symbols, list)
if (!strncmp(sym->name, "__UNIQUE_ID_kpatch_ignore_section_",
strlen("__UNIQUE_ID_kpatch_ignore_section_")))
sym->status = SAME;
}
static void kpatch_mark_ignored_functions_same(struct kpatch_elf *kelf)
{
struct section *sec;
struct symbol *sym;
struct rela *rela;
sec = find_section_by_name(&kelf->sections, ".kpatch.ignore.functions");
if (!sec)
return;
list_for_each_entry(rela, &sec->rela->relas, list) {
if (!rela->sym->sec)
ERROR("expected bundled symbol");
if (rela->sym->type != STT_FUNC)
ERROR("expected function symbol");
log_normal("ignoring function: %s\n", rela->sym->name);
if (rela->sym->status != CHANGED)
log_normal("NOTICE: no change detected in function %s, unnecessary KPATCH_IGNORE_FUNCTION()?\n", rela->sym->name);
rela->sym->status = SAME;
rela->sym->sec->status = SAME;
if (rela->sym->child)
rela->sym->child->status = SAME;
if (rela->sym->sec->secsym)
rela->sym->sec->secsym->status = SAME;
if (rela->sym->sec->rela)
rela->sym->sec->rela->status = SAME;
}
/* strip temporary global kpatch_ignore_func_* symbols */
list_for_each_entry(sym, &kelf->symbols, list)
if (!strncmp(sym->name, "__kpatch_ignore_func_",
strlen("__kpatch_ignore_func_")))
sym->status = SAME;
}
static void kpatch_create_kpatch_arch_section(struct kpatch_elf *kelf, char *objname)
{
struct special_section *special;
struct symbol *strsym;
struct section *sec, *karch_sec;
struct rela *rela;
int nr, index = 0;
nr = sizeof(special_sections) / sizeof(special_sections[0]);
karch_sec = create_section_pair(kelf, ".kpatch.arch", sizeof(struct kpatch_arch), nr);
/* lookup strings symbol */
strsym = find_symbol_by_name(&kelf->symbols, ".kpatch.strings");
if (!strsym)
ERROR("can't find .kpatch.strings symbol");
for (special = special_sections; special->name; special++) {
if (strcmp(special->name, ".parainstructions") &&
strcmp(special->name, ".altinstructions"))
continue;
sec = find_section_by_name(&kelf->sections, special->name);
if (!sec)
continue;
/* entries[index].sec */
ALLOC_LINK(rela, &karch_sec->rela->relas);
rela->sym = sec->secsym;
rela->type = ABSOLUTE_RELA_TYPE;
rela->addend = 0;
rela->offset = index * sizeof(struct kpatch_arch) + \
offsetof(struct kpatch_arch, sec);
/* entries[index].objname */
ALLOC_LINK(rela, &karch_sec->rela->relas);
rela->sym = strsym;
rela->type = ABSOLUTE_RELA_TYPE;
rela->addend = offset_of_string(&kelf->strings, objname);
rela->offset = index * sizeof(struct kpatch_arch) + \
offsetof(struct kpatch_arch, objname);
index++;
}
karch_sec->data->d_size = index * sizeof(struct kpatch_arch);
karch_sec->sh.sh_size = karch_sec->data->d_size;
}
static void kpatch_process_special_sections(struct kpatch_elf *kelf)
{
struct special_section *special;
struct section *sec;
struct symbol *sym;
struct rela *rela;
int altinstr = 0;
for (special = special_sections; special->name; special++) {
sec = find_section_by_name(&kelf->sections, special->name);
if (!sec)
continue;
sec = sec->rela;
if (!sec)
continue;
kpatch_regenerate_special_section(kelf, special, sec);
if (!strcmp(special->name, ".altinstructions") && sec->base->include)
altinstr = 1;
}
/*
* The following special sections don't have relas which reference
* non-included symbols, so their entire rela section can be included.
*/
list_for_each_entry(sec, &kelf->sections, list) {
if (strcmp(sec->name, ".altinstr_replacement"))
continue;
/*
* Only include .altinstr_replacement if .altinstructions
* is also included.
*/
if (!altinstr)
break;
/* include base section */
sec->include = 1;
/* include all symbols in the section */
list_for_each_entry(sym, &kelf->symbols, list)
if (sym->sec == sec)
sym->include = 1;
/* include rela section */
if (sec->rela) {
sec->rela->include = 1;
/* include all symbols referenced by relas */
list_for_each_entry(rela, &sec->rela->relas, list)
kpatch_include_symbol(rela->sym);
}
}
/*
* The following special sections aren't supported, so make sure we
* don't ever try to include them. Otherwise the kernel will see the
* jump table during module loading and get confused. Generally it
* should be safe to exclude them, it just means that you can't modify
* jump labels and enable tracepoints in a patched function.
*/
list_for_each_entry(sec, &kelf->sections, list) {
if (strcmp(sec->name, "__jump_table") &&
strcmp(sec->name, "__tracepoints") &&
strcmp(sec->name, "__tracepoints_ptrs") &&
strcmp(sec->name, "__tracepoints_strings"))
continue;
sec->status = SAME;
sec->include = 0;
if (sec->rela) {
sec->rela->status = SAME;
sec->rela->include = 0;
}
}
kpatch_regenerate_orc_sections(kelf);
}
static struct sym_compare_type *kpatch_elf_locals(struct kpatch_elf *kelf)
{
struct symbol *sym;
int i = 0, sym_num = 0;
struct sym_compare_type *sym_array;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->bind != STB_LOCAL)
continue;
if (sym->type != STT_FUNC && sym->type != STT_OBJECT)
continue;
sym_num++;
}
if (!sym_num)
return NULL;
sym_array = malloc((sym_num + 1) * sizeof(struct sym_compare_type));
if (!sym_array)
ERROR("malloc");
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->bind != STB_LOCAL)
continue;
if (sym->type != STT_FUNC && sym->type != STT_OBJECT)
continue;
sym_array[i].type = sym->type;
sym_array[i++].name = strdup(sym->name);
}
sym_array[i].type = 0;
sym_array[i].name = NULL;
return sym_array;
}
static void kpatch_create_patches_sections(struct kpatch_elf *kelf,
struct lookup_table *table,
char *objname)
{
int nr, index, objname_offset;
struct section *sec, *relasec;
struct symbol *sym, *strsym;
struct rela *rela;
struct lookup_result result;
struct kpatch_patch_func *funcs;
/* count patched functions */
nr = 0;
list_for_each_entry(sym, &kelf->symbols, list)
if (sym->type == STT_FUNC && sym->status == CHANGED && !sym->parent)
nr++;
/* create text/rela section pair */
sec = create_section_pair(kelf, ".kpatch.funcs", sizeof(*funcs), nr);
relasec = sec->rela;
funcs = sec->data->d_buf;
/* lookup strings symbol */
strsym = find_symbol_by_name(&kelf->symbols, ".kpatch.strings");
if (!strsym)
ERROR("can't find .kpatch.strings symbol");
/* add objname to strings */
objname_offset = offset_of_string(&kelf->strings, objname);
/* populate sections */
index = 0;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->type == STT_FUNC && sym->status == CHANGED && !sym->parent) {
if (sym->bind == STB_LOCAL) {
if (lookup_local_symbol(table, sym->name,
&result))
ERROR("lookup_local_symbol %s",
sym->name);
} else {
if(lookup_global_symbol(table, sym->name,
&result))
ERROR("lookup_global_symbol %s",
sym->name);
}
log_debug("lookup for %s @ 0x%016lx len %lu\n",
sym->name, result.value, result.size);
/*
* Convert global symbols to local so other objects in
* the patch module (like the patch callback object's init
* code) won't link to this function and call it before
* its relocations have been applied.
*/
sym->bind = STB_LOCAL;
sym->sym.st_info = GELF_ST_INFO(sym->bind, sym->type);
/* add entry in text section */
funcs[index].old_addr = result.value;
funcs[index].old_size = result.size;
funcs[index].new_size = sym->sym.st_size;
funcs[index].sympos = result.pos;
/*
* Add a relocation that will populate
* the funcs[index].new_addr field at
* module load time.
*/
ALLOC_LINK(rela, &relasec->relas);
rela->sym = sym;
rela->type = ABSOLUTE_RELA_TYPE;
rela->addend = 0;
rela->offset = index * sizeof(*funcs);
/*
* Add a relocation that will populate
* the funcs[index].name field.
*/
ALLOC_LINK(rela, &relasec->relas);
rela->sym = strsym;
rela->type = ABSOLUTE_RELA_TYPE;
rela->addend = offset_of_string(&kelf->strings, sym->name);
rela->offset = index * sizeof(*funcs) +
offsetof(struct kpatch_patch_func, name);
/*
* Add a relocation that will populate
* the funcs[index].objname field.
*/
ALLOC_LINK(rela, &relasec->relas);
rela->sym = strsym;
rela->type = ABSOLUTE_RELA_TYPE;
rela->addend = objname_offset;
rela->offset = index * sizeof(*funcs) +
offsetof(struct kpatch_patch_func,objname);
index++;
}
}
/* sanity check, index should equal nr */
if (index != nr)
ERROR("size mismatch in funcs sections");
}
static int kpatch_is_core_module_symbol(char *name)
{
return (!strcmp(name, "kpatch_shadow_alloc") ||
!strcmp(name, "kpatch_shadow_free") ||
!strcmp(name, "kpatch_shadow_get"));
}
/*
* If the patched code refers to a symbol, for example, calls a function
* or stores a pointer to a function somewhere, the address of that symbol
* must be resolved somehow before the patch is applied. The symbol may be
* present in the original code too, so the patch may refer either to that
* version of the symbol (dynrela is used for that) or to its patched
* version directly (with a normal relocation).
*
* Dynrelas may be needed for the symbols not present in this object file
* (rela->sym->sec is NULL), because it is unknown if the patched versions
* of these symbols exist and where they are.
*
* The patched code can usually refer to a symbol from this object file
* directly. If it is a function, this may also improve performance because
* it will not be needed to call the original function first, find the
* patched one and then use Ftrace to pass control to it.
*
* There is an exception though, at least on x86. It is safer to use
* a dynrela if the patched code stores a pointer to a function somewhere
* (relocation of type R_X86_64_32S). The function could be used as
* a callback and some kinds of callbacks are called asynchronously. If
* the patch module sets such callback and is unloaded shortly after,
* the kernel could try to call the function via an invalid pointer and
* would crash. With dynrela, the kernel would call the original function
* in that case.
*/
static int function_ptr_rela(const struct rela *rela)
{
const struct rela *rela_toc = toc_rela(rela);
return (rela_toc && rela_toc->sym->type == STT_FUNC &&
!rela_toc->sym->parent &&
/* skip switch table on PowerPC */
rela_toc->addend == (int)rela_toc->sym->sym.st_value &&
(rela->type == R_X86_64_32S ||
rela->type == R_PPC64_TOC16_HA ||
rela->type == R_PPC64_TOC16_LO_DS));
}
static int may_need_dynrela(const struct rela *rela)
{
/*
* References to .TOC. are treated specially by the module loader and
* should never be converted to dynrelas.
*/
if (rela->type == R_PPC64_REL16_HA || rela->type == R_PPC64_REL16_LO ||
rela->type == R_PPC64_REL64)
return 0;
if (!rela->sym->sec)
return 1;
/*
* Nested functions used as callbacks are a special case.
* They are not supposed to be visible outside of the
* function that defines them. Their names may differ in
* the original and the patched kernels which makes it
* difficult to use dynrelas. Fortunately, nested functions
* are rare and are unlikely to be used as asynchronous
* callbacks, so the patched code can refer to them directly.
* It seems, one can only distinguish such functions by their
* names containing a dot. Other kinds of functions with
* such names (e.g. optimized copies of functions) are
* unlikely to be used as callbacks.
*/
return (function_ptr_rela(rela) &&
toc_rela(rela)->sym->status != NEW &&
!strchr(toc_rela(rela)->sym->name, '.'));
}
static void kpatch_create_intermediate_sections(struct kpatch_elf *kelf,
struct lookup_table *table,
char *objname,
char *pmod_name)
{
int nr, index;
struct section *sec, *ksym_sec, *krela_sec;
struct rela *rela, *rela2, *safe;
struct symbol *strsym, *ksym_sec_sym;
struct kpatch_symbol *ksyms;
struct kpatch_relocation *krelas;
struct lookup_result result;
char *sym_objname;
int ret, vmlinux, external;
vmlinux = !strcmp(objname, "vmlinux");
/* count rela entries that need to be dynamic */
nr = 0;
list_for_each_entry(sec, &kelf->sections, list) {
if (!is_rela_section(sec))
continue;
if (!strcmp(sec->name, ".rela.kpatch.funcs"))
continue;
list_for_each_entry(rela, &sec->relas, list) {
nr++; /* upper bound on number of kpatch relas and symbols */
/*
* Relocation section '.rela.toc' at offset 0xcc6b0 contains 46 entries:
* ...
* 0000000000000138 0000002a00000026 R_PPC64_ADDR64 0000000000000000 .text.deferred_put_nlk_sk + 8
*
* Relocation section '.rela.text.netlink_release' at offset 0xcadf0 contains 44 entries:
* ...
* 0000000000000398 0000007300000032 R_PPC64_TOC16_HA 0000000000000000 .toc + 138
* 00000000000003a0 0000007300000040 R_PPC64_TOC16_LO_DS 0000000000000000 .toc + 138
*
* On PowerPC, may_need_dynrela() should be using rela's reference in .rela.toc for
* the rela like in the example, where the sym name is .toc + offset. In such case,
* the checks are performed on both rela and its reference in .rela.toc. Where the
* rela is checked for rela->type and its corresponding rela in .rela.toc for function
* pointer/switch label. If rela->need_dynrela needs to be set, it's referenced rela
* in (.rela.toc)->need_dynrela is set, as they represent the function sym.
*/
if (may_need_dynrela(rela))
toc_rela(rela)->need_dynrela = 1;
}
}
/* create .kpatch.relocations text/rela section pair */
krela_sec = create_section_pair(kelf, ".kpatch.relocations", sizeof(*krelas), nr);
krelas = krela_sec->data->d_buf;
/* create .kpatch.symbols text/rela section pair */
ksym_sec = create_section_pair(kelf, ".kpatch.symbols", sizeof(*ksyms), nr);
ksyms = ksym_sec->data->d_buf;
/* create .kpatch.symbols section symbol (to set rela->sym later) */
ALLOC_LINK(ksym_sec_sym, &kelf->symbols);
ksym_sec_sym->sec = ksym_sec;
ksym_sec_sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
ksym_sec_sym->type = STT_SECTION;
ksym_sec_sym->bind = STB_LOCAL;
ksym_sec_sym->name = ".kpatch.symbols";
/* lookup strings symbol */
strsym = find_symbol_by_name(&kelf->symbols, ".kpatch.strings");
if (!strsym)
ERROR("can't find .kpatch.strings symbol");
/* populate sections */
index = 0;
list_for_each_entry(sec, &kelf->sections, list) {
if (!is_rela_section(sec))
continue;
if (!strcmp(sec->name, ".rela.kpatch.funcs") ||
!strcmp(sec->name, ".rela.kpatch.relocations") ||
!strcmp(sec->name, ".rela.kpatch.symbols"))
continue;
list_for_each_entry_safe(rela, safe, &sec->relas, list) {
if (!rela->need_dynrela)
continue;
/*
* Allow references to core module symbols to remain as
* normal relas, since the core module may not be
* compiled into the kernel, and they should be
* exported anyway.
*/
if (kpatch_is_core_module_symbol(rela->sym->name))
continue;
external = 0;
/*
* sym_objname is the name of the object to which
* rela->sym belongs. We'll need this to build
* ".klp.sym." symbol names later on.
*
* By default sym_objname is the name of the
* component being patched (vmlinux or module).
* If it's an external symbol, sym_objname
* will get reassigned appropriately.
*/
sym_objname = objname;
/*
* On ppc64le, the function prologue generated by GCC 6
* has the sequence:
*
* .globl my_func
* .type my_func, @function
* .quad .TOC.-my_func
* my_func:
* .reloc ., R_PPC64_ENTRY ; optional
* ld r2,-8(r12)
* add r2,r2,r12
* .localentry my_func, .-my_func
*
* The R_PPC64_ENTRY is optional and its symbol might
* have an empty name. Leave it as a normal rela.
*/
if (rela->type == R_PPC64_ENTRY)
continue;
if (rela->sym->bind == STB_LOCAL) {
/* An unchanged local symbol */
ret = lookup_local_symbol(table,
rela->sym->name, &result);
if (ret)
ERROR("lookup_local_symbol %s needed for %s",
rela->sym->name, sec->base->name);
}
else if (vmlinux) {
/*
* We have a patch to vmlinux which references
* a global symbol. Use a normal rela for
* exported symbols and a dynrela otherwise.
*/
#ifdef __powerpc64__
/*
* An exported symbol might be local to an
* object file and any access to the function
* might be through localentry (toc+offset)
* instead of global offset.
*
* fs/proc/proc_sysctl::sysctl_head_grab:
* 166: 0000000000000000 256 FUNC GLOBAL DEFAULT [<localentry>: 8] 42 unregister_sysctl_table
* 167: 0000000000000000 0 NOTYPE GLOBAL DEFAULT UND .TOC.
*
* These type of symbols have a type of
* STT_FUNC. Treat them like local symbols.
* They will be handled by the livepatch
* relocation code.
*/
if (lookup_is_exported_symbol(table, rela->sym->name)) {
if (rela->sym->type != STT_FUNC)
continue;
}
#else
if (lookup_is_exported_symbol(table, rela->sym->name))
continue;
#endif
/*
* If lookup_global_symbol() fails, assume the
* symbol is defined in another object in the
* patch module.
*/
if (lookup_global_symbol(table, rela->sym->name,
&result))
continue;
} else {
/*
* We have a patch to a module which references
* a global symbol. Try to find the symbol in
* the module being patched.
*/
if (lookup_global_symbol(table, rela->sym->name,
&result)) {
/*
* Not there, see if the symbol is
* exported, and set sym_objname to the
* object the exported symbol belongs
* to. If it's not exported, assume sym
* is provided by another .o in the
* patch module.
*/
sym_objname = lookup_exported_symbol_objname(table, rela->sym->name);
if (!sym_objname)
sym_objname = pmod_name;
/*
* For a symbol exported by vmlinux, use
* the original rela.
*
* For a symbol exported by a module,
* convert to a dynrela because the
* module might not be loaded yet.
*/
if (!strcmp(sym_objname, "vmlinux"))
continue;
external = 1;
}
}
log_debug("lookup for %s @ 0x%016lx len %lu\n",
rela->sym->name, result.value, result.size);
/* Fill in ksyms[index] */
if (vmlinux)
ksyms[index].src = result.value;
else
/* for modules, src is discovered at runtime */
ksyms[index].src = 0;
ksyms[index].pos = result.pos;
ksyms[index].type = rela->sym->type;
ksyms[index].bind = rela->sym->bind;
/* add rela to fill in ksyms[index].name field */
ALLOC_LINK(rela2, &ksym_sec->rela->relas);
rela2->sym = strsym;
rela2->type = ABSOLUTE_RELA_TYPE;
rela2->addend = offset_of_string(&kelf->strings, rela->sym->name);
rela2->offset = index * sizeof(*ksyms) + \
offsetof(struct kpatch_symbol, name);
/* add rela to fill in ksyms[index].objname field */
ALLOC_LINK(rela2, &ksym_sec->rela->relas);
rela2->sym = strsym;
rela2->type = ABSOLUTE_RELA_TYPE;
rela2->addend = offset_of_string(&kelf->strings, sym_objname);
rela2->offset = index * sizeof(*ksyms) + \
offsetof(struct kpatch_symbol, objname);
/* Fill in krelas[index] */
if (is_gcc6_localentry_bundled_sym(rela->sym) &&
rela->addend == (int)rela->sym->sym.st_value)
rela->addend -= rela->sym->sym.st_value;
krelas[index].addend = rela->addend;
krelas[index].type = rela->type;
krelas[index].external = external;
/* add rela to fill in krelas[index].dest field */
ALLOC_LINK(rela2, &krela_sec->rela->relas);
if (sec->base->secsym)
rela2->sym = sec->base->secsym;
else
ERROR("can't create dynrela for section %s (symbol %s): no bundled or section symbol",
sec->name, rela->sym->name);
rela2->type = ABSOLUTE_RELA_TYPE;
rela2->addend = rela->offset;
rela2->offset = index * sizeof(*krelas) + \
offsetof(struct kpatch_relocation, dest);
/* add rela to fill in krelas[index].objname field */
ALLOC_LINK(rela2, &krela_sec->rela->relas);
rela2->sym = strsym;
rela2->type = ABSOLUTE_RELA_TYPE;
rela2->addend = offset_of_string(&kelf->strings, objname);
rela2->offset = index * sizeof(*krelas) + \
offsetof(struct kpatch_relocation, objname);
/* add rela to fill in krelas[index].ksym field */
ALLOC_LINK(rela2, &krela_sec->rela->relas);
rela2->sym = ksym_sec_sym;
rela2->type = ABSOLUTE_RELA_TYPE;
rela2->addend = index * sizeof(*ksyms);
rela2->offset = index * sizeof(*krelas) + \
offsetof(struct kpatch_relocation, ksym);
/*
* Mark the referred to symbol for removal but
* only if it is not from this object file.
* The symbols from this object file may be needed
* later (for example, they may have relocations
* of their own which should be processed).
*/
if (!rela->sym->sec)
rela->sym->strip = 1;
list_del(&rela->list);
free(rela);
index++;
}
}
/* set size to actual number of ksyms/krelas */
ksym_sec->data->d_size = index * sizeof(struct kpatch_symbol);
ksym_sec->sh.sh_size = ksym_sec->data->d_size;
krela_sec->data->d_size = index * sizeof(struct kpatch_relocation);
krela_sec->sh.sh_size = krela_sec->data->d_size;
}
static void kpatch_create_callbacks_objname_rela(struct kpatch_elf *kelf, char *objname)
{
struct section *sec;
struct rela *rela;
struct symbol *strsym;
int objname_offset;
struct callback { char *name; int offset; };
static struct callback callbacks[] = {
{ .name = ".rela.kpatch.callbacks.pre_patch",
.offset = offsetof(struct kpatch_pre_patch_callback, objname) },
{ .name = ".rela.kpatch.callbacks.post_patch",
.offset = offsetof(struct kpatch_post_patch_callback, objname) },
{ .name = ".rela.kpatch.callbacks.pre_unpatch",
.offset = offsetof(struct kpatch_pre_unpatch_callback, objname) },
{ .name = ".rela.kpatch.callbacks.post_unpatch",
.offset = offsetof(struct kpatch_post_patch_callback, objname) },
{ .name = NULL, .offset = 0 },
};
struct callback *callbackp;
/* lookup strings symbol */
strsym = find_symbol_by_name(&kelf->symbols, ".kpatch.strings");
if (!strsym)
ERROR("can't find .kpatch.strings symbol");
/* add objname to strings */
objname_offset = offset_of_string(&kelf->strings, objname);
list_for_each_entry(sec, &kelf->sections, list) {
for (callbackp = callbacks; callbackp->name; callbackp++) {
if (!strcmp(callbackp->name, sec->name)) {
ALLOC_LINK(rela, &sec->relas);
rela->sym = strsym;
rela->type = ABSOLUTE_RELA_TYPE;
rela->addend = objname_offset;
rela->offset = callbackp->offset;
break;
}
}
}
}
#ifdef __powerpc64__
void kpatch_create_mcount_sections(struct kpatch_elf *kelf) { }
#else
/*
* This function basically reimplements the functionality of the Linux
* recordmcount script, so that patched functions can be recognized by ftrace.
*
* TODO: Eventually we can modify recordmount so that it recognizes our bundled
* sections as valid and does this work for us.
*/
static void kpatch_create_mcount_sections(struct kpatch_elf *kelf)
{
int nr, index;
struct section *sec, *relasec;
struct symbol *sym;
struct rela *rela;
void *newdata;
unsigned char *insn;
nr = 0;
list_for_each_entry(sym, &kelf->symbols, list)
if (sym->type == STT_FUNC && sym->status != SAME &&
sym->has_func_profiling)
nr++;
/* create text/rela section pair */
sec = create_section_pair(kelf, "__mcount_loc", sizeof(void*), nr);
relasec = sec->rela;
/* populate sections */
index = 0;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->type != STT_FUNC || sym->status == SAME)
continue;
if (!sym->has_func_profiling) {
log_debug("function %s has no fentry/mcount call, no mcount record is needed\n",
sym->name);
continue;
}
/* add rela in .rela__mcount_loc to fill in function pointer */
ALLOC_LINK(rela, &relasec->relas);
rela->sym = sym;
rela->type = R_X86_64_64;
rela->addend = 0;
rela->offset = index * sizeof(void*);
/*
* Modify the first instruction of the function to "callq
* __fentry__" so that ftrace will be happy.
*/
newdata = malloc(sym->sec->data->d_size);
if (!newdata)
ERROR("malloc");
memcpy(newdata, sym->sec->data->d_buf, sym->sec->data->d_size);
sym->sec->data->d_buf = newdata;
insn = newdata;
rela = list_first_entry(&sym->sec->rela->relas, struct rela,
list);
/*
* R_X86_64_NONE is only generated by older versions of kernel/gcc
* which use the mcount script.
*/
if (rela->type == R_X86_64_NONE) {
if (insn[0] != 0xf)
ERROR("%s: unexpected instruction at the start of the function",
sym->name);
insn[0] = 0xe8;
insn[1] = 0;
insn[2] = 0;
insn[3] = 0;
insn[4] = 0;
rela->type = R_X86_64_PC32;
}
index++;
}
/* sanity check, index should equal nr */
if (index != nr)
ERROR("size mismatch in funcs sections");
}
#endif
/*
* This function strips out symbols that were referenced by changed rela
* sections, but the rela entries that referenced them were converted to
* dynrelas and are no longer needed.
*/
static void kpatch_strip_unneeded_syms(struct kpatch_elf *kelf,
struct lookup_table *table)
{
struct symbol *sym, *safe;
list_for_each_entry_safe(sym, safe, &kelf->symbols, list) {
if (sym->strip) {
list_del(&sym->list);
free(sym);
}
}
}
static void kpatch_create_strings_elements(struct kpatch_elf *kelf)
{
struct section *sec;
struct symbol *sym;
/* create .kpatch.strings */
/* allocate section resources */
ALLOC_LINK(sec, &kelf->sections);
sec->name = ".kpatch.strings";
/* set data */
sec->data = malloc(sizeof(*sec->data));
if (!sec->data)
ERROR("malloc");
sec->data->d_type = ELF_T_BYTE;
/* set section header */
sec->sh.sh_type = SHT_PROGBITS;
sec->sh.sh_entsize = 1;
sec->sh.sh_addralign = 1;
sec->sh.sh_flags = SHF_ALLOC;
/* create .kpatch.strings section symbol (reuse sym variable) */
ALLOC_LINK(sym, &kelf->symbols);
sym->sec = sec;
sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
sym->type = STT_SECTION;
sym->bind = STB_LOCAL;
sym->name = ".kpatch.strings";
}
static void kpatch_build_strings_section_data(struct kpatch_elf *kelf)
{
struct string *string;
struct section *sec;
int size;
char *strtab;
sec = find_section_by_name(&kelf->sections, ".kpatch.strings");
if (!sec)
ERROR("can't find .kpatch.strings");
/* determine size */
size = 0;
list_for_each_entry(string, &kelf->strings, list)
size += strlen(string->name) + 1;
/* allocate section resources */
strtab = malloc(size);
if (!strtab)
ERROR("malloc");
sec->data->d_buf = strtab;
sec->data->d_size = size;
/* populate strings section data */
list_for_each_entry(string, &kelf->strings, list) {
strcpy(strtab, string->name);
strtab += strlen(string->name) + 1;
}
}
/*
* Don't allow sibling calls from patched functions on ppc64le. Before doing a
* sibling call, the patched function restores the stack to its caller's stack.
* The kernel-generated stub then writes the patch module's r2 (toc) value to
* the caller's stack, corrupting it, eventually causing a panic after it
* returns to the caller and the caller tries to use the livepatch module's toc
* value.
*
* In theory we could instead a) generate a custom stub, or b) modify the
* kernel livepatch_handler code to save/restore the stack r2 value, but this
* is easier for now.
*/
static void kpatch_no_sibling_calls_ppc64le(struct kpatch_elf *kelf)
{
#ifdef __powerpc64__
struct symbol *sym;
unsigned int insn;
unsigned long offset;
list_for_each_entry(sym, &kelf->symbols, list) {
if (sym->type != STT_FUNC || sym->status != CHANGED)
continue;
for (offset = 0; offset < sym->sec->data->d_size; offset += 4) {
insn = *(unsigned int *)(sym->sec->data->d_buf + offset);
/*
* The instruction 0x48000000 can be assumed to be a
* sibling call:
*
* Bits 0-5 (opcode) == 0x9: unconditional branch
* Bit 30 (absolute) == 0: relative address
* Bit 31 (link) == 0: doesn't set LR (not a call)
*
* Bits 6-29 (branch address) == zero, which means
* it's either a branch to self (infinite loop), or
* there's a REL24 relocation for the address which
* will be written by the linker or the kernel.
*/
if (insn != 0x48000000)
continue;
/* Make sure it's not a branch-to-self: */
if (!find_rela_by_offset(sym->sec->rela, offset))
continue;
ERROR("Found an unsupported sibling call at %s()+0x%lx. Add __attribute__((optimize(\"-fno-optimize-sibling-calls\"))) to %s() definition.",
sym->name, sym->sym.st_value + offset, sym->name);
}
}
#endif
}
struct arguments {
char *args[7];
int debug;
};
static char args_doc[] = "original.o patched.o parent-name parent-symtab Module.symvers patch-module-name output.o";
static struct argp_option options[] = {
{"debug", 'd', NULL, 0, "Show debug output" },
{ NULL }
};
static error_t parse_opt (int key, char *arg, struct argp_state *state)
{
/* Get the input argument from argp_parse, which we
know is a pointer to our arguments structure. */
struct arguments *arguments = state->input;
switch (key)
{
case 'd':
arguments->debug = 1;
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 7)
/* Too many arguments. */
argp_usage (state);
arguments->args[state->arg_num] = arg;
break;
case ARGP_KEY_END:
if (state->arg_num < 7)
/* Not enough arguments. */
argp_usage (state);
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
static struct argp argp = { options, parse_opt, args_doc, NULL };
int main(int argc, char *argv[])
{
struct kpatch_elf *kelf_base, *kelf_patched, *kelf_out;
struct arguments arguments;
int num_changed, callbacks_exist, new_globals_exist;
struct lookup_table *lookup;
struct section *sec, *symtab;
struct symbol *sym;
char *hint = NULL, *orig_obj, *patched_obj, *parent_name;
char *parent_symtab, *mod_symvers, *patch_name, *output_obj;
struct sym_compare_type *base_locals, *sym_comp;
arguments.debug = 0;
argp_parse (&argp, argc, argv, 0, NULL, &arguments);
if (arguments.debug)
loglevel = DEBUG;
elf_version(EV_CURRENT);
orig_obj = arguments.args[0];
patched_obj = arguments.args[1];
parent_name = arguments.args[2];
parent_symtab = arguments.args[3];
mod_symvers = arguments.args[4];
patch_name = arguments.args[5];
output_obj = arguments.args[6];
childobj = basename(orig_obj);
kelf_base = kpatch_elf_open(orig_obj);
kelf_patched = kpatch_elf_open(patched_obj);
kpatch_compare_elf_headers(kelf_base->elf, kelf_patched->elf);
kpatch_check_program_headers(kelf_base->elf);
kpatch_check_program_headers(kelf_patched->elf);
kpatch_bundle_symbols(kelf_base);
kpatch_bundle_symbols(kelf_patched);
kpatch_detect_child_functions(kelf_base);
kpatch_detect_child_functions(kelf_patched);
list_for_each_entry(sym, &kelf_base->symbols, list) {
if (sym->type == STT_FILE) {
hint = strdup(sym->name);
break;
}
}
if (!hint) {
log_normal("WARNING: FILE symbol not found in base. Stripped object file or assembly source?\n");
return EXIT_STATUS_NO_CHANGE;
}
base_locals = kpatch_elf_locals(kelf_base);
kpatch_mark_grouped_sections(kelf_patched);
kpatch_replace_sections_syms(kelf_base);
kpatch_replace_sections_syms(kelf_patched);
kpatch_correlate_elfs(kelf_base, kelf_patched);
kpatch_correlate_static_local_variables(kelf_base, kelf_patched);
/*
* After this point, we don't care about kelf_base anymore.
* We access its sections via the twin pointers in the
* section, symbol, and rela lists of kelf_patched.
*/
kpatch_mark_ignored_sections(kelf_patched);
kpatch_compare_correlated_elements(kelf_patched);
kpatch_check_func_profiling_calls(kelf_patched);
kpatch_elf_teardown(kelf_base);
kpatch_elf_free(kelf_base);
kpatch_mark_ignored_functions_same(kelf_patched);
kpatch_mark_ignored_sections_same(kelf_patched);
kpatch_include_standard_elements(kelf_patched);
num_changed = kpatch_include_changed_functions(kelf_patched);
kpatch_include_debug_sections(kelf_patched);
callbacks_exist = kpatch_include_callback_elements(kelf_patched);
kpatch_include_force_elements(kelf_patched);
new_globals_exist = kpatch_include_new_globals(kelf_patched);
kpatch_process_special_sections(kelf_patched);
kpatch_print_changes(kelf_patched);
kpatch_dump_kelf(kelf_patched);
kpatch_verify_patchability(kelf_patched);
if (!num_changed && !new_globals_exist) {
if (callbacks_exist)
log_debug("no changed functions were found, but callbacks exist\n");
else {
log_debug("no changed functions were found\n");
free(hint);
return EXIT_STATUS_NO_CHANGE;
}
}
/* this is destructive to kelf_patched */
kpatch_migrate_included_elements(kelf_patched, &kelf_out);
/*
* Teardown kelf_patched since we shouldn't access sections or symbols
* through it anymore. Don't free however, since our section and symbol
* name fields still point to strings in the Elf object owned by
* kpatch_patched.
*/
kpatch_elf_teardown(kelf_patched);
/* create symbol lookup table */
lookup = lookup_open(parent_symtab, mod_symvers, hint, base_locals);
for (sym_comp = base_locals; sym_comp && sym_comp->name; sym_comp++) {
free(sym_comp->name);
}
free(base_locals);
free(hint);
kpatch_no_sibling_calls_ppc64le(kelf_out);
/* create strings, patches, and dynrelas sections */
kpatch_create_strings_elements(kelf_out);
kpatch_create_patches_sections(kelf_out, lookup, parent_name);
kpatch_create_intermediate_sections(kelf_out, lookup, parent_name, patch_name);
kpatch_create_kpatch_arch_section(kelf_out, parent_name);
kpatch_create_callbacks_objname_rela(kelf_out, parent_name);
kpatch_build_strings_section_data(kelf_out);
kpatch_create_mcount_sections(kelf_out);
/*
* At this point, the set of output sections and symbols is
* finalized. Reorder the symbols into linker-compliant
* order and index all the symbols and sections. After the
* indexes have been established, update index data
* throughout the structure.
*/
kpatch_reorder_symbols(kelf_out);
kpatch_strip_unneeded_syms(kelf_out, lookup);
kpatch_reindex_elements(kelf_out);
/*
* Update rela section headers and rebuild the rela section data
* buffers from the relas lists.
*/
symtab = find_section_by_name(&kelf_out->sections, ".symtab");
if (!symtab)
ERROR("missing .symtab section");
list_for_each_entry(sec, &kelf_out->sections, list) {
if (!is_rela_section(sec))
continue;
sec->sh.sh_link = symtab->index;
sec->sh.sh_info = sec->base->index;
kpatch_rebuild_rela_section_data(sec);
}
kpatch_check_relocations(kelf_out);
kpatch_create_shstrtab(kelf_out);
kpatch_create_strtab(kelf_out);
kpatch_create_symtab(kelf_out);
kpatch_dump_kelf(kelf_out);
kpatch_write_output_elf(kelf_out, kelf_patched->elf, output_obj);
lookup_close(lookup);
kpatch_elf_free(kelf_patched);
kpatch_elf_teardown(kelf_out);
kpatch_elf_free(kelf_out);
return EXIT_STATUS_SUCCESS;
}