mirror of
git://git.musl-libc.org/musl
synced 2025-01-28 17:42:46 +00:00
a4a3e4dbc0
this will allow the compiler to cache and reuse the result, meaning we
no longer have to take care not to load it more than once for the sake
of archs where the load may be expensive.
depends on commit 1c84c99913
for
correctness, since otherwise the compiler could hoist loads during
stage 3 of dynamic linking before the initial thread-pointer setup.
19 lines
460 B
C
19 lines
460 B
C
/* or1k use variant I, but with the twist that tp points to the end of TCB */
|
|
static inline struct pthread *__pthread_self()
|
|
{
|
|
#ifdef __clang__
|
|
char *tp;
|
|
__asm__ ("l.ori %0, r10, 0" : "=r" (tp) );
|
|
#else
|
|
register char *tp __asm__("r10");
|
|
__asm__ ("" : "=r" (tp) );
|
|
#endif
|
|
return (struct pthread *) (tp - sizeof(struct pthread));
|
|
}
|
|
|
|
#define TLS_ABOVE_TP
|
|
#define GAP_ABOVE_TP 0
|
|
#define TP_ADJ(p) ((char *)(p) + sizeof(struct pthread))
|
|
|
|
#define MC_PC regs.pc
|