MINOR: poller: centralize poll return handling

When returning from the polling syscall, all pollers have a certain
dance to follow, made of wall clock updates, thread harmless updates,
idle time management and sleeping mask updates. Let's have a centralized
function to deal with all of this boring stuff: fd_leaving_poll(), and
make all the pollers use it.
This commit is contained in:
Willy Tarreau 2022-06-22 15:21:34 +02:00
parent bdcd32598f
commit 058b2c1015
7 changed files with 21 additions and 34 deletions

View File

@ -79,6 +79,7 @@ ssize_t fd_write_frag_line(int fd, size_t maxlen, const struct ist pfx[], size_t
void my_closefrom(int start); void my_closefrom(int start);
int compute_poll_timeout(int next); int compute_poll_timeout(int next);
void fd_leaving_poll(int wait_time, int status);
/* disable the specified poller */ /* disable the specified poller */
void disable_poller(const char *poller_name); void disable_poller(const char *poller_name);

View File

@ -208,13 +208,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
break; break;
} while (1); } while (1);
clock_leaving_poll(wait_time, status); fd_leaving_poll(wait_time, status);
thread_harmless_end();
thread_idle_end();
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
/* process polled events */ /* process polled events */

View File

@ -202,13 +202,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
break; break;
} while(1); } while(1);
clock_leaving_poll(wait_time, nevlist); fd_leaving_poll(wait_time, nevlist);
thread_harmless_end();
thread_idle_end();
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
if (nevlist > 0) if (nevlist > 0)
activity[tid].poll_io++; activity[tid].poll_io++;

View File

@ -174,13 +174,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
break; break;
} while (1); } while (1);
clock_leaving_poll(wait_time, status); fd_leaving_poll(wait_time, status);
thread_harmless_end();
thread_idle_end();
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
for (count = 0; count < status; count++) { for (count = 0; count < status; count++) {
unsigned int n = 0; unsigned int n = 0;

View File

@ -205,13 +205,8 @@ static void _do_poll(struct poller *p, int exp, int wake)
clock_entering_poll(); clock_entering_poll();
status = poll(poll_events, nbfd, wait_time); status = poll(poll_events, nbfd, wait_time);
clock_update_date(wait_time, status); clock_update_date(wait_time, status);
clock_leaving_poll(wait_time, status);
thread_harmless_end(); fd_leaving_poll(wait_time, status);
thread_idle_end();
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
if (status > 0) if (status > 0)
activity[tid].poll_io++; activity[tid].poll_io++;

View File

@ -180,13 +180,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
NULL, NULL,
&delta); &delta);
clock_update_date(delta_ms, status); clock_update_date(delta_ms, status);
clock_leaving_poll(delta_ms, status); fd_leaving_poll(delta_ms, status);
thread_harmless_end();
thread_idle_end();
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
if (status <= 0) if (status <= 0)
return; return;

View File

@ -759,6 +759,21 @@ int compute_poll_timeout(int next)
return wait_time; return wait_time;
} }
/* Handle the return of the poller, which consists in calculating the idle
* time, saving a few clocks, marking the thread harmful again etc. All that
* is some boring stuff that all pollers have to do anyway.
*/
void fd_leaving_poll(int wait_time, int status)
{
clock_leaving_poll(wait_time, status);
thread_harmless_end();
thread_idle_end();
if (sleeping_thread_mask & tid_bit)
_HA_ATOMIC_AND(&sleeping_thread_mask, ~tid_bit);
}
/* disable the specified poller */ /* disable the specified poller */
void disable_poller(const char *poller_name) void disable_poller(const char *poller_name)
{ {