REORG: fd: move the fd state management from ev_sepoll

ev_sepoll already provides everything needed to manage FD events
by only manipulating the speculative I/O list. Nothing there is
sepoll-specific so move all this to fd.
This commit is contained in:
Willy Tarreau 2012-11-11 16:05:19 +01:00
parent 7be79a41e1
commit 6ea20b1acb
3 changed files with 96 additions and 113 deletions

View File

@ -120,40 +120,115 @@ static inline void release_spec_entry(int fd)
}
}
/*
* Returns non-zero if <fd> is already monitored for events in direction <dir>.
*/
static inline int fd_ev_is_set(const int fd, int dir)
{
if (cur_poller.is_set)
return cur_poller.is_set(fd, dir);
return ((unsigned)fdtab[fd].spec_e >> dir) & FD_EV_STATUS;
}
/* Disable processing of events on fd <fd> for direction <dir>. Note: this
* function was optimized to be used with a constant for <dir>.
*/
static inline void fd_ev_clr(const int fd, int dir)
{
unsigned int i = ((unsigned int)fdtab[fd].spec_e) & (FD_EV_STATUS << dir);
if (i == 0)
return; /* already disabled */
fdtab[fd].spec_e ^= i;
updt_fd(fd); /* need an update entry to change the state */
}
/* Enable polling for events on fd <fd> for direction <dir>. Note: this
* function was optimized to be used with a constant for <dir>.
*/
static inline void fd_ev_wai(const int fd, int dir)
{
unsigned int i = ((unsigned int)fdtab[fd].spec_e) & (FD_EV_STATUS << dir);
if (i == (FD_EV_POLLED << dir))
return; /* already in desired state */
fdtab[fd].spec_e ^= i ^ (FD_EV_POLLED << dir);
updt_fd(fd); /* need an update entry to change the state */
}
/* Enable processing of events on fd <fd> for direction <dir>. Note: this
* function was optimized to be used with a constant for <dir>.
*/
static inline void fd_ev_set(int fd, int dir)
{
unsigned int i = ((unsigned int)fdtab[fd].spec_e) & (FD_EV_STATUS << dir);
/* note that we don't care about disabling the polled state when
* enabling the active state, since it brings no benefit but costs
* some syscalls.
*/
if (i & (FD_EV_ACTIVE << dir))
return; /* already in desired state */
fdtab[fd].spec_e |= (FD_EV_ACTIVE << dir);
updt_fd(fd); /* need an update entry to change the state */
}
/* Disable processing of events on fd <fd> for both directions. */
static inline void fd_ev_rem(const int fd)
{
unsigned int i = ((unsigned int)fdtab[fd].spec_e) & FD_EV_CURR_MASK;
if (i == 0)
return; /* already disabled */
fdtab[fd].spec_e ^= i;
updt_fd(fd); /* need an update entry to change the state */
}
/* event manipulation primitives for use by I/O callbacks */
static inline void fd_want_recv(int fd)
{
cur_poller.set(fd, DIR_RD);
if (cur_poller.set)
return cur_poller.set(fd, DIR_RD);
return fd_ev_set(fd, DIR_RD);
}
static inline void fd_stop_recv(int fd)
{
cur_poller.clr(fd, DIR_RD);
if (cur_poller.clr)
return cur_poller.clr(fd, DIR_RD);
return fd_ev_clr(fd, DIR_RD);
}
static inline void fd_poll_recv(int fd)
{
cur_poller.wai(fd, DIR_RD);
if (cur_poller.wai)
return cur_poller.wai(fd, DIR_RD);
return fd_ev_wai(fd, DIR_RD);
}
static inline void fd_want_send(int fd)
{
cur_poller.set(fd, DIR_WR);
if (cur_poller.set)
return cur_poller.set(fd, DIR_WR);
return fd_ev_set(fd, DIR_WR);
}
static inline void fd_stop_send(int fd)
{
cur_poller.clr(fd, DIR_WR);
if (cur_poller.clr)
return cur_poller.clr(fd, DIR_WR);
return fd_ev_clr(fd, DIR_WR);
}
static inline void fd_poll_send(int fd)
{
cur_poller.wai(fd, DIR_WR);
if (cur_poller.wai)
return cur_poller.wai(fd, DIR_WR);
return fd_ev_wai(fd, DIR_WR);
}
static inline void fd_stop_both(int fd)
{
cur_poller.rem(fd);
if (cur_poller.rem)
return cur_poller.rem(fd);
return fd_ev_rem(fd);
}
/* Prepares <fd> for being polled */

View File

@ -30,7 +30,6 @@
static int absmaxevents = 0; // absolute maximum amounts of polled events
static int in_poll_loop = 0; // non-null if polled events are being processed
/* private data */
static struct epoll_event *epoll_events;
@ -41,99 +40,6 @@ static int epoll_fd;
*/
static struct epoll_event ev;
/*
* Returns non-zero if <fd> is already monitored for events in direction <dir>.
*/
REGPRM2 static int __fd_is_set(const int fd, int dir)
{
#if DEBUG_DEV
if (!fdtab[fd].owner) {
fprintf(stderr, "sepoll.fd_isset called on closed fd #%d.\n", fd);
ABORT_NOW();
}
#endif
return ((unsigned)fdtab[fd].spec_e >> dir) & FD_EV_STATUS;
}
/*
* Don't worry about the strange constructs in __fd_set/__fd_clr, they are
* designed like this in order to reduce the number of jumps (verified).
*/
REGPRM2 static void __fd_wai(const int fd, int dir)
{
unsigned int i;
#if DEBUG_DEV
if (!fdtab[fd].owner) {
fprintf(stderr, "sepoll.fd_wai called on closed fd #%d.\n", fd);
ABORT_NOW();
}
#endif
i = ((unsigned)fdtab[fd].spec_e >> dir) & FD_EV_STATUS;
if (i == FD_EV_POLLED)
return; /* already in desired state */
updt_fd(fd); /* need an update entry to change the state */
fdtab[fd].spec_e ^= (i ^ (unsigned int)FD_EV_POLLED) << dir;
}
REGPRM2 static void __fd_set(const int fd, int dir)
{
unsigned int i;
#if DEBUG_DEV
if (!fdtab[fd].owner) {
fprintf(stderr, "sepoll.fd_set called on closed fd #%d.\n", fd);
ABORT_NOW();
}
#endif
i = ((unsigned)fdtab[fd].spec_e >> dir) & FD_EV_STATUS;
/* note that we don't care about disabling the polled state when
* enabling the active state, since it brings no benefit but costs
* some syscalls.
*/
if (i & FD_EV_ACTIVE)
return; /* already in desired state */
updt_fd(fd); /* need an update entry to change the state */
fdtab[fd].spec_e |= ((unsigned int)FD_EV_ACTIVE) << dir;
}
REGPRM2 static void __fd_clr(const int fd, int dir)
{
unsigned int i;
#if DEBUG_DEV
if (!fdtab[fd].owner) {
fprintf(stderr, "sepoll.fd_clr called on closed fd #%d.\n", fd);
ABORT_NOW();
}
#endif
i = ((unsigned)fdtab[fd].spec_e >> dir) & FD_EV_STATUS;
if (i == 0)
return /* already disabled */;
updt_fd(fd); /* need an update entry to change the state */
fdtab[fd].spec_e ^= i << dir;
}
/* normally unused */
REGPRM1 static void __fd_rem(int fd)
{
__fd_clr(fd, DIR_RD);
__fd_clr(fd, DIR_WR);
}
/*
* On valid epoll() implementations, a call to close() automatically removes
* the fds. This means that the FD will appear as previously unset.
*/
REGPRM1 static void __fd_clo(int fd)
{
release_spec_entry(fd);
fdtab[fd].spec_e &= ~(FD_EV_CURR_MASK | FD_EV_PREV_MASK);
}
/*
* speculative epoll() poller
*/
@ -229,8 +135,6 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
tv_update_date(wait_time, status);
measure_idle();
in_poll_loop = 1;
/* process polled events */
for (count = 0; count < status; count++) {
@ -259,10 +163,10 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
* to poll again.
*/
if (fdtab[fd].ev & (FD_POLL_IN|FD_POLL_HUP|FD_POLL_ERR))
__fd_set(fd, DIR_RD);
fd_ev_set(fd, DIR_RD);
if (fdtab[fd].ev & (FD_POLL_OUT|FD_POLL_ERR))
__fd_set(fd, DIR_WR);
fd_ev_set(fd, DIR_WR);
fdtab[fd].iocb(fd);
@ -338,7 +242,6 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
spec_idx++;
}
in_poll_loop = 0;
/* in the end, we have processed status + spec_processed FDs */
}
@ -448,12 +351,12 @@ static void _do_register(void)
p->poll = _do_poll;
p->fork = _do_fork;
p->is_set = __fd_is_set;
p->set = __fd_set;
p->wai = __fd_wai;
p->clr = __fd_clr;
p->rem = __fd_rem;
p->clo = __fd_clo;
p->is_set = NULL;
p->set = NULL;
p->wai = NULL;
p->clr = NULL;
p->rem = NULL;
p->clo = NULL;
}

View File

@ -116,7 +116,12 @@ unsigned int *fd_updt = NULL; // FD updates list
*/
void fd_delete(int fd)
{
cur_poller.clo(fd);
if (cur_poller.clo)
cur_poller.clo(fd);
release_spec_entry(fd);
fdtab[fd].spec_e &= ~(FD_EV_CURR_MASK | FD_EV_PREV_MASK);
port_range_release_port(fdinfo[fd].port_range, fdinfo[fd].local_port);
fdinfo[fd].port_range = NULL;
close(fd);