precalculate gnu hash rather than doing it lazily in find_sym inner loop

this change was suggested based on testing done by Timo Teräs almost
two years ago; the branch (and probably call prep overhead) in the
inner loop was found to contribute noticably to total symbol lookup
time. this change will make lookup slightly slower if libraries were
built with only the traditional "sysv" ELF hash table, but based on
how much slower lookup tends to be without the gnu hash table, it
seems reasonable to assume that (1) users building without gnu hash
don't care about dynamic linking performance, and (2) the extra time
spent computing the gnu hash is likely to be dominated by the slowness
of the sysv hash table lookup anyway.
This commit is contained in:
Rich Felker 2017-03-15 16:50:19 -04:00
parent 8cba1dc46c
commit a393d5cc8d
1 changed files with 2 additions and 8 deletions

View File

@ -258,18 +258,12 @@ static Sym *gnu_lookup_filtered(uint32_t h1, uint32_t *hashtab, struct dso *dso,
static struct symdef find_sym(struct dso *dso, const char *s, int need_def) static struct symdef find_sym(struct dso *dso, const char *s, int need_def)
{ {
uint32_t h = 0, gh, gho, *ght; uint32_t h = 0, gh = gnu_hash(s), gho = gh / (8*sizeof(size_t)), *ght;
size_t ghm = 0; size_t ghm = 1ul << gh % (8*sizeof(size_t));
struct symdef def = {0}; struct symdef def = {0};
for (; dso; dso=dso->syms_next) { for (; dso; dso=dso->syms_next) {
Sym *sym; Sym *sym;
if ((ght = dso->ghashtab)) { if ((ght = dso->ghashtab)) {
if (!ghm) {
gh = gnu_hash(s);
int maskbits = 8 * sizeof ghm;
gho = gh / maskbits;
ghm = 1ul << gh % maskbits;
}
sym = gnu_lookup_filtered(gh, ght, dso, s, gho, ghm); sym = gnu_lookup_filtered(gh, ght, dso, s, gho, ghm);
} else { } else {
if (!h) h = sysv_hash(s); if (!h) h = sysv_hash(s);