MEDIUM: fd/threads: only grab the fd's lock if the FD has more than one thread

The vast majority of FDs are only seen by one thread. Currently the lock
on FDs costs a lot because it's touched often, though there should be very
little contention. This patch ensures that the lock is only grabbed if the
FD is shared by more than one thread, since otherwise the situation is safe.
Doing so resulted in a 15% performance boost on a 12-threads test.
This commit is contained in:
Willy Tarreau 2018-10-15 09:44:46 +02:00
parent 9504dd64c6
commit 87d54a9a6d
2 changed files with 59 additions and 29 deletions

View File

@ -289,9 +289,11 @@ static inline void fd_stop_recv(int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable processing send events on fd <fd> */
@ -310,9 +312,11 @@ static inline void fd_stop_send(int fd)
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable processing of events on fd <fd> for both directions. */
@ -331,9 +335,11 @@ static inline void fd_stop_both(int fd)
if ((old ^ new) & FD_EV_POLLED_RW)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> cannot receive anymore without polling (EAGAIN detected). */
@ -353,9 +359,11 @@ static inline void fd_cant_recv(const int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> can receive anymore without polling. */
@ -364,9 +372,11 @@ static inline void fd_may_recv(const int fd)
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Disable readiness when polled. This is useful to interrupt reading when it
@ -390,9 +400,11 @@ static inline void fd_done_recv(const int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> cannot send anymore without polling (EAGAIN detected). */
@ -412,9 +424,11 @@ static inline void fd_cant_send(const int fd)
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Report that FD <fd> can send anymore without polling (EAGAIN detected). */
@ -423,9 +437,11 @@ static inline void fd_may_send(const int fd)
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Prepare FD <fd> to try to receive */
@ -445,9 +461,11 @@ static inline void fd_want_recv(int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Prepare FD <fd> to try to send */
@ -467,19 +485,23 @@ static inline void fd_want_send(int fd)
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Update events seen for FD <fd> and its state if needed. This should be called
* by the poller to set FD_POLL_* flags. */
static inline void fd_update_events(int fd, int evts)
{
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].ev &= FD_POLL_STICKY;
fdtab[fd].ev |= evts;
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
fd_may_recv(fd);
@ -491,7 +513,8 @@ static inline void fd_update_events(int fd, int evts)
/* Prepares <fd> for being polled */
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned long thread_mask)
{
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].owner = owner;
fdtab[fd].iocb = iocb;
fdtab[fd].ev = 0;
@ -501,7 +524,8 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned
/* note: do not reset polled_mask here as it indicates which poller
* still knows this FD from a possible previous round.
*/
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* These are replacements for FD_SET, FD_CLR, FD_ISSET, working on uints */

View File

@ -359,7 +359,10 @@ done:
*/
static void fd_dodelete(int fd, int do_close)
{
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
unsigned long locked = atleast2(fdtab[fd].thread_mask);
if (locked)
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].linger_risk) {
/* this is generally set when connecting to servers */
setsockopt(fd, SOL_SOCKET, SO_LINGER,
@ -379,7 +382,8 @@ static void fd_dodelete(int fd, int do_close)
polled_mask[fd] = 0;
close(fd);
}
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (locked)
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
/* Deletes an FD from the fdsets.
@ -417,7 +421,7 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
continue;
HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
if (atleast2(fdtab[fd].thread_mask) && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
activity[tid].fd_lock++;
continue;
}
@ -432,12 +436,14 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
fdtab[fd].ev |= FD_POLL_OUT;
if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].iocb(fd);
}
else {
fd_release_cache_entry(fd);
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
}
}