mirror of git://git.musl-libc.org/musl
improve pthread_exit synchronization with functions targeting tid
if the last thread exited via pthread_exit, the logic that marked it dead did not account for the possibility of it targeting itself via atexit handlers. for example, an atexit handler calling pthread_kill(pthread_self(), SIGKILL) would return success (previously, ESRCH) rather than causing termination via the signal. move the release of killlock after the determination is made whether the exiting thread is the last thread. in the case where it's not, move the release all the way to the end of the function. this way we can clear the tid rather than spending storage on a dedicated dead-flag. clearing the tid is also preferable in that it hardens against inadvertent use of the value after the thread has terminated but before it is joined.
This commit is contained in:
parent
4df4216351
commit
526e64f54d
|
@ -34,7 +34,6 @@ struct pthread {
|
||||||
void *result;
|
void *result;
|
||||||
struct __ptcb *cancelbuf;
|
struct __ptcb *cancelbuf;
|
||||||
void **tsd;
|
void **tsd;
|
||||||
volatile int dead;
|
|
||||||
struct {
|
struct {
|
||||||
volatile void *volatile head;
|
volatile void *volatile head;
|
||||||
long off;
|
long off;
|
||||||
|
|
|
@ -39,9 +39,11 @@ _Noreturn void __pthread_exit(void *result)
|
||||||
|
|
||||||
LOCK(self->exitlock);
|
LOCK(self->exitlock);
|
||||||
|
|
||||||
/* Mark this thread dead before decrementing count */
|
/* Access to target the exiting thread with syscalls that use
|
||||||
|
* its kernel tid is controlled by killlock. For detached threads,
|
||||||
|
* any use past this point would have undefined behavior, but for
|
||||||
|
* joinable threads it's a valid usage that must be handled. */
|
||||||
LOCK(self->killlock);
|
LOCK(self->killlock);
|
||||||
self->dead = 1;
|
|
||||||
|
|
||||||
/* Block all signals before decrementing the live thread count.
|
/* Block all signals before decrementing the live thread count.
|
||||||
* This is important to ensure that dynamically allocated TLS
|
* This is important to ensure that dynamically allocated TLS
|
||||||
|
@ -49,20 +51,14 @@ _Noreturn void __pthread_exit(void *result)
|
||||||
* reasons as well. */
|
* reasons as well. */
|
||||||
__block_all_sigs(&set);
|
__block_all_sigs(&set);
|
||||||
|
|
||||||
/* Wait to unlock the kill lock, which governs functions like
|
|
||||||
* pthread_kill which target a thread id, until signals have
|
|
||||||
* been blocked. This precludes observation of the thread id
|
|
||||||
* as a live thread (with application code running in it) after
|
|
||||||
* the thread was reported dead by ESRCH being returned. */
|
|
||||||
UNLOCK(self->killlock);
|
|
||||||
|
|
||||||
/* It's impossible to determine whether this is "the last thread"
|
/* It's impossible to determine whether this is "the last thread"
|
||||||
* until performing the atomic decrement, since multiple threads
|
* until performing the atomic decrement, since multiple threads
|
||||||
* could exit at the same time. For the last thread, revert the
|
* could exit at the same time. For the last thread, revert the
|
||||||
* decrement and unblock signals to give the atexit handlers and
|
* decrement, restore the tid, and unblock signals to give the
|
||||||
* stdio cleanup code a consistent state. */
|
* atexit handlers and stdio cleanup code a consistent state. */
|
||||||
if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
|
if (a_fetch_add(&libc.threads_minus_1, -1)==0) {
|
||||||
libc.threads_minus_1 = 0;
|
libc.threads_minus_1 = 0;
|
||||||
|
UNLOCK(self->killlock);
|
||||||
__restore_sigs(&set);
|
__restore_sigs(&set);
|
||||||
exit(0);
|
exit(0);
|
||||||
}
|
}
|
||||||
|
@ -113,6 +109,12 @@ _Noreturn void __pthread_exit(void *result)
|
||||||
__unmapself(self->map_base, self->map_size);
|
__unmapself(self->map_base, self->map_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* After the kernel thread exits, its tid may be reused. Clear it
|
||||||
|
* to prevent inadvertent use and inform functions that would use
|
||||||
|
* it that it's no longer available. */
|
||||||
|
self->tid = 0;
|
||||||
|
UNLOCK(self->killlock);
|
||||||
|
|
||||||
for (;;) __syscall(SYS_exit, 0);
|
for (;;) __syscall(SYS_exit, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ int pthread_getschedparam(pthread_t t, int *restrict policy, struct sched_param
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
LOCK(t->killlock);
|
LOCK(t->killlock);
|
||||||
if (t->dead) {
|
if (!t->tid) {
|
||||||
r = ESRCH;
|
r = ESRCH;
|
||||||
} else {
|
} else {
|
||||||
r = -__syscall(SYS_sched_getparam, t->tid, param);
|
r = -__syscall(SYS_sched_getparam, t->tid, param);
|
||||||
|
|
|
@ -4,8 +4,8 @@ int pthread_kill(pthread_t t, int sig)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
LOCK(t->killlock);
|
LOCK(t->killlock);
|
||||||
r = t->dead ? (sig+0U >= _NSIG ? EINVAL : 0)
|
r = t->tid ? -__syscall(SYS_tkill, t->tid, sig)
|
||||||
: -__syscall(SYS_tkill, t->tid, sig);
|
: (sig+0U >= _NSIG ? EINVAL : 0);
|
||||||
UNLOCK(t->killlock);
|
UNLOCK(t->killlock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ int pthread_setschedparam(pthread_t t, int policy, const struct sched_param *par
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
LOCK(t->killlock);
|
LOCK(t->killlock);
|
||||||
r = t->dead ? ESRCH : -__syscall(SYS_sched_setscheduler, t->tid, policy, param);
|
r = !t->tid ? ESRCH : -__syscall(SYS_sched_setscheduler, t->tid, policy, param);
|
||||||
UNLOCK(t->killlock);
|
UNLOCK(t->killlock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ int pthread_setschedprio(pthread_t t, int prio)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
LOCK(t->killlock);
|
LOCK(t->killlock);
|
||||||
r = t->dead ? ESRCH : -__syscall(SYS_sched_setparam, t->tid, &prio);
|
r = !t->tid ? ESRCH : -__syscall(SYS_sched_setparam, t->tid, &prio);
|
||||||
UNLOCK(t->killlock);
|
UNLOCK(t->killlock);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue