mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-17 00:44:33 +00:00
MEDIUM: task: extend the state field to 32 bits
It's been too short for quite a while now and is now full. It's still time to extend it to 32-bits since we have room for this without wasting any space, so we now gained 16 new bits for future flags. The values were not reassigned just in case there would be a few hidden u16 or short somewhere in which these flags are placed (as it used to be the case with stream->pending_events). The patch is tagged MEDIUM because this required to update the task's process() prototype to use an int instead of a short, that's quite a bunch of places.
This commit is contained in:
parent
e0d5942ddd
commit
144f84a09d
@ -33,7 +33,7 @@
|
||||
extern unsigned int nb_applets;
|
||||
extern struct pool_head *pool_head_appctx;
|
||||
|
||||
struct task *task_run_applet(struct task *t, void *context, unsigned short state);
|
||||
struct task *task_run_applet(struct task *t, void *context, unsigned int state);
|
||||
int appctx_buf_available(void *arg);
|
||||
|
||||
|
||||
|
@ -39,7 +39,7 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired);
|
||||
void check_notify_failure(struct check *check);
|
||||
void check_notify_stopping(struct check *check);
|
||||
void check_notify_success(struct check *check);
|
||||
struct task *process_chk(struct task *t, void *context, unsigned short state);
|
||||
struct task *process_chk(struct task *t, void *context, unsigned int state);
|
||||
|
||||
int check_buf_available(void *target);
|
||||
struct buffer *check_get_buf(struct check *check, struct buffer *bptr);
|
||||
|
@ -706,7 +706,7 @@ static inline void cs_attach(struct conn_stream *cs, void *data, const struct da
|
||||
cs->data = data;
|
||||
}
|
||||
|
||||
static inline struct wait_event *wl_set_waitcb(struct wait_event *wl, struct task *(*cb)(struct task *, void *, unsigned short), void *ctx)
|
||||
static inline struct wait_event *wl_set_waitcb(struct wait_event *wl, struct task *(*cb)(struct task *, void *, unsigned int), void *ctx)
|
||||
{
|
||||
if (!wl->tasklet->process) {
|
||||
wl->tasklet->process = cb;
|
||||
|
@ -27,7 +27,7 @@
|
||||
#include <haproxy/proxy-t.h>
|
||||
#include <haproxy/task-t.h>
|
||||
|
||||
struct task *process_chk_proc(struct task *t, void *context, unsigned short state);
|
||||
struct task *process_chk_proc(struct task *t, void *context, unsigned int state);
|
||||
int prepare_external_check(struct check *check);
|
||||
int init_pid_list(void);
|
||||
|
||||
|
@ -48,7 +48,7 @@ void hlua_init();
|
||||
int hlua_post_init();
|
||||
void hlua_applet_tcp_fct(struct appctx *ctx);
|
||||
void hlua_applet_http_fct(struct appctx *ctx);
|
||||
struct task *hlua_process_task(struct task *task, void *context, unsigned short state);
|
||||
struct task *hlua_process_task(struct task *task, void *context, unsigned int state);
|
||||
|
||||
#else /* USE_LUA */
|
||||
|
||||
|
@ -213,8 +213,8 @@ static inline const char *listener_state_str(const struct listener *l)
|
||||
return states[st];
|
||||
}
|
||||
|
||||
struct task *accept_queue_process(struct task *t, void *context, unsigned short state);
|
||||
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned short state);
|
||||
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
|
||||
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
|
||||
|
||||
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
|
||||
|
||||
|
@ -39,7 +39,7 @@ extern struct eb_root proxy_by_name; /* tree of proxies sorted by name */
|
||||
extern const struct cfg_opt cfg_opts[];
|
||||
extern const struct cfg_opt cfg_opts2[];
|
||||
|
||||
struct task *manage_proxy(struct task *t, void *context, unsigned short state);
|
||||
struct task *manage_proxy(struct task *t, void *context, unsigned int state);
|
||||
void proxy_cond_disable(struct proxy *p);
|
||||
void soft_stop(void);
|
||||
int pause_proxy(struct proxy *p);
|
||||
|
@ -68,8 +68,8 @@ int snr_resolution_cb(struct resolv_requester *requester, struct dns_counters *c
|
||||
int srvrq_resolution_error_cb(struct resolv_requester *requester, int error_code);
|
||||
int snr_resolution_error_cb(struct resolv_requester *requester, int error_code);
|
||||
struct server *snr_check_ip_callback(struct server *srv, void *ip, unsigned char *ip_family);
|
||||
struct task *srv_cleanup_idle_conns(struct task *task, void *ctx, unsigned short state);
|
||||
struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned short state);
|
||||
struct task *srv_cleanup_idle_conns(struct task *task, void *ctx, unsigned int state);
|
||||
struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state);
|
||||
|
||||
/*
|
||||
* Registers the server keyword list <kwl> as a list of valid keywords for next
|
||||
|
@ -37,7 +37,7 @@ struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type
|
||||
void session_free(struct session *sess);
|
||||
int session_accept_fd(struct connection *cli_conn);
|
||||
int conn_complete_session(struct connection *conn);
|
||||
struct task *session_expire_embryonic(struct task *t, void *context, unsigned short state);
|
||||
struct task *session_expire_embryonic(struct task *t, void *context, unsigned int state);
|
||||
|
||||
/* Remove the refcount from the session to the tracked counters, and clear the
|
||||
* pointer to ensure this is only performed once. The caller is responsible for
|
||||
|
@ -134,7 +134,7 @@ struct stream {
|
||||
struct http_txn *txn; /* current HTTP transaction being processed. Should become a list. */
|
||||
|
||||
struct task *task; /* the task associated with this stream */
|
||||
unsigned short pending_events; /* the pending events not yet processed by the stream.
|
||||
unsigned int pending_events; /* the pending events not yet processed by the stream.
|
||||
* This is a bit field of TASK_WOKEN_* */
|
||||
int16_t priority_class; /* priority class of the stream for the pending queue */
|
||||
int32_t priority_offset; /* priority offset of the stream for the pending queue */
|
||||
|
@ -70,7 +70,7 @@ struct ist stream_generate_unique_id(struct stream *strm, struct list *format);
|
||||
|
||||
void stream_process_counters(struct stream *s);
|
||||
void sess_change_server(struct stream *sess, struct server *newsrv);
|
||||
struct task *process_stream(struct task *t, void *context, unsigned short state);
|
||||
struct task *process_stream(struct task *t, void *context, unsigned int state);
|
||||
void default_srv_error(struct stream *s, struct stream_interface *si);
|
||||
int parse_track_counters(char **args, int *arg,
|
||||
int section_type, struct proxy *curpx,
|
||||
|
@ -45,7 +45,7 @@ void si_applet_wake_cb(struct stream_interface *si);
|
||||
void si_update_rx(struct stream_interface *si);
|
||||
void si_update_tx(struct stream_interface *si);
|
||||
int si_cs_recv(struct conn_stream *cs);
|
||||
struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned int state);
|
||||
void si_update_both(struct stream_interface *si_f, struct stream_interface *si_b);
|
||||
void si_sync_send(struct stream_interface *si);
|
||||
|
||||
|
@ -30,33 +30,35 @@
|
||||
#include <haproxy/api-t.h>
|
||||
#include <haproxy/thread-t.h>
|
||||
|
||||
/* values for task->state */
|
||||
#define TASK_SLEEPING 0x0000 /* task sleeping */
|
||||
#define TASK_RUNNING 0x0001 /* the task is currently running */
|
||||
#define TASK_GLOBAL 0x0002 /* The task is currently in the global runqueue */
|
||||
#define TASK_QUEUED 0x0004 /* The task has been (re-)added to the run queue */
|
||||
#define TASK_SHARED_WQ 0x0008 /* The task's expiration may be updated by other
|
||||
* threads, must be set before first queue/wakeup */
|
||||
#define TASK_SELF_WAKING 0x0010 /* task/tasklet found waking itself */
|
||||
#define TASK_KILLED 0x0020 /* task/tasklet killed, may now be freed */
|
||||
#define TASK_IN_LIST 0x0040 /* tasklet is in a tasklet list */
|
||||
#define TASK_HEAVY 0x0080 /* this task/tasklet is extremely heavy */
|
||||
/* values for task->state (32 bits) */
|
||||
#define TASK_SLEEPING 0x00000000 /* task sleeping */
|
||||
#define TASK_RUNNING 0x00000001 /* the task is currently running */
|
||||
#define TASK_GLOBAL 0x00000002 /* The task is currently in the global runqueue */
|
||||
#define TASK_QUEUED 0x00000004 /* The task has been (re-)added to the run queue */
|
||||
#define TASK_SHARED_WQ 0x00000008 /* The task's expiration may be updated by other
|
||||
* threads, must be set before first queue/wakeup */
|
||||
#define TASK_SELF_WAKING 0x00000010 /* task/tasklet found waking itself */
|
||||
#define TASK_KILLED 0x00000020 /* task/tasklet killed, may now be freed */
|
||||
#define TASK_IN_LIST 0x00000040 /* tasklet is in a tasklet list */
|
||||
#define TASK_HEAVY 0x00000080 /* this task/tasklet is extremely heavy */
|
||||
|
||||
#define TASK_WOKEN_INIT 0x0100 /* woken up for initialisation purposes */
|
||||
#define TASK_WOKEN_TIMER 0x0200 /* woken up because of expired timer */
|
||||
#define TASK_WOKEN_IO 0x0400 /* woken up because of completed I/O */
|
||||
#define TASK_WOKEN_SIGNAL 0x0800 /* woken up by a system signal */
|
||||
#define TASK_WOKEN_MSG 0x1000 /* woken up by another task's message */
|
||||
#define TASK_WOKEN_RES 0x2000 /* woken up because of available resource */
|
||||
#define TASK_WOKEN_OTHER 0x4000 /* woken up for an unspecified reason */
|
||||
|
||||
#define TASK_F_TASKLET 0x8000 /* nature of this task: 0=task 1=tasklet */
|
||||
#define TASK_WOKEN_INIT 0x00000100 /* woken up for initialisation purposes */
|
||||
#define TASK_WOKEN_TIMER 0x00000200 /* woken up because of expired timer */
|
||||
#define TASK_WOKEN_IO 0x00000400 /* woken up because of completed I/O */
|
||||
#define TASK_WOKEN_SIGNAL 0x00000800 /* woken up by a system signal */
|
||||
#define TASK_WOKEN_MSG 0x00001000 /* woken up by another task's message */
|
||||
#define TASK_WOKEN_RES 0x00002000 /* woken up because of available resource */
|
||||
#define TASK_WOKEN_OTHER 0x00004000 /* woken up for an unspecified reason */
|
||||
|
||||
/* use this to check a task state or to clean it up before queueing */
|
||||
#define TASK_WOKEN_ANY (TASK_WOKEN_OTHER|TASK_WOKEN_INIT|TASK_WOKEN_TIMER| \
|
||||
TASK_WOKEN_IO|TASK_WOKEN_SIGNAL|TASK_WOKEN_MSG| \
|
||||
TASK_WOKEN_RES)
|
||||
|
||||
#define TASK_F_TASKLET 0x00008000 /* nature of this task: 0=task 1=tasklet */
|
||||
/* unused: 0x10000..0x80000000 */
|
||||
|
||||
|
||||
enum {
|
||||
TL_URGENT = 0, /* urgent tasklets (I/O callbacks) */
|
||||
TL_NORMAL = 1, /* normal tasks */
|
||||
@ -114,10 +116,10 @@ struct task_per_thread {
|
||||
*/
|
||||
#define TASK_COMMON \
|
||||
struct { \
|
||||
unsigned short state; /* task state : bitfield of TASK_ */ \
|
||||
unsigned int state; /* task state : bitfield of TASK_ */ \
|
||||
/* 16-bit hole here */ \
|
||||
unsigned int calls; /* number of times process was called */ \
|
||||
struct task *(*process)(struct task *t, void *ctx, unsigned short state); /* the function which processes the task */ \
|
||||
struct task *(*process)(struct task *t, void *ctx, unsigned int state); /* the function which processes the task */ \
|
||||
void *context; /* the task's context */ \
|
||||
TASK_DEBUG_STORAGE; \
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ void __task_wakeup(struct task *t);
|
||||
void __task_queue(struct task *task, struct eb_root *wq);
|
||||
|
||||
struct work_list *work_list_create(int nbthread,
|
||||
struct task *(*fct)(struct task *, void *, unsigned short),
|
||||
struct task *(*fct)(struct task *, void *, unsigned int),
|
||||
void *arg);
|
||||
void work_list_destroy(struct work_list *work, int nbthread);
|
||||
unsigned int run_tasks_from_lists(unsigned int budgets[]);
|
||||
@ -205,7 +205,7 @@ static inline int thread_has_tasks(void)
|
||||
#define task_wakeup(t, f) _task_wakeup(t, f, __FILE__, __LINE__)
|
||||
static inline void _task_wakeup(struct task *t, unsigned int f, const char *file, int line)
|
||||
{
|
||||
unsigned short state;
|
||||
unsigned int state;
|
||||
|
||||
state = _HA_ATOMIC_OR(&t->state, f);
|
||||
while (!(state & (TASK_RUNNING | TASK_QUEUED))) {
|
||||
@ -353,7 +353,7 @@ static inline struct task *task_unlink_rq(struct task *t)
|
||||
#define tasklet_wakeup_on(tl, thr) _tasklet_wakeup_on(tl, thr, __FILE__, __LINE__)
|
||||
static inline void _tasklet_wakeup_on(struct tasklet *tl, int thr, const char *file, int line)
|
||||
{
|
||||
unsigned short state = tl->state;
|
||||
unsigned int state = tl->state;
|
||||
|
||||
do {
|
||||
/* do nothing if someone else already added it */
|
||||
|
@ -58,7 +58,7 @@ int appctx_buf_available(void *arg)
|
||||
}
|
||||
|
||||
/* Default applet handler */
|
||||
struct task *task_run_applet(struct task *t, void *context, unsigned short state)
|
||||
struct task *task_run_applet(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct appctx *app = context;
|
||||
struct stream_interface *si = app->owner;
|
||||
|
@ -834,7 +834,7 @@ static int wake_srv_chk(struct conn_stream *cs)
|
||||
}
|
||||
|
||||
/* This function checks if any I/O is wanted, and if so, attempts to do so */
|
||||
struct task *event_srv_chk_io(struct task *t, void *ctx, unsigned short state)
|
||||
struct task *event_srv_chk_io(struct task *t, void *ctx, unsigned int state)
|
||||
{
|
||||
struct check *check = ctx;
|
||||
struct conn_stream *cs = check->cs;
|
||||
@ -849,7 +849,7 @@ struct task *event_srv_chk_io(struct task *t, void *ctx, unsigned short state)
|
||||
* Please do NOT place any return statement in this function and only leave
|
||||
* via the out_unlock label.
|
||||
*/
|
||||
struct task *process_chk_conn(struct task *t, void *context, unsigned short state)
|
||||
struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct check *check = context;
|
||||
struct proxy *proxy = check->proxy;
|
||||
@ -1071,7 +1071,7 @@ void free_check(struct check *check)
|
||||
/* manages a server health-check. Returns the time the task accepts to wait, or
|
||||
* TIME_ETERNITY for infinity.
|
||||
*/
|
||||
struct task *process_chk(struct task *t, void *context, unsigned short state)
|
||||
struct task *process_chk(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct check *check = context;
|
||||
|
||||
@ -1120,7 +1120,7 @@ static int start_check_task(struct check *check, int mininter,
|
||||
* reached, the task automatically stops. Note that any server status change
|
||||
* must have updated s->last_change accordingly.
|
||||
*/
|
||||
struct task *server_warmup(struct task *t, void *context, unsigned short state)
|
||||
struct task *server_warmup(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct server *s = context;
|
||||
|
||||
|
@ -729,7 +729,7 @@ static int debug_parse_cli_stream(char **args, char *payload, struct appctx *app
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct task *debug_task_handler(struct task *t, void *ctx, unsigned short state)
|
||||
static struct task *debug_task_handler(struct task *t, void *ctx, unsigned int state)
|
||||
{
|
||||
unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
|
||||
unsigned long inter = tctx[1];
|
||||
@ -752,7 +752,7 @@ static struct task *debug_task_handler(struct task *t, void *ctx, unsigned short
|
||||
return t;
|
||||
}
|
||||
|
||||
static struct task *debug_tasklet_handler(struct task *t, void *ctx, unsigned short state)
|
||||
static struct task *debug_tasklet_handler(struct task *t, void *ctx, unsigned int state)
|
||||
{
|
||||
unsigned long *tctx = ctx; // [0] = #tasks, [1] = inter, [2+] = { tl | (tsk+1) }
|
||||
unsigned long rnd;
|
||||
|
@ -925,7 +925,7 @@ static struct appctx *dns_session_create(struct dns_session *ds)
|
||||
/* Task processing expiration of unresponded queries, this one is supposed
|
||||
* to be stuck on the same thread than the appctx handler
|
||||
*/
|
||||
static struct task *dns_process_query_exp(struct task *t, void *context, unsigned short state)
|
||||
static struct task *dns_process_query_exp(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct dns_session *ds = (struct dns_session *)context;
|
||||
struct dns_query *query, *queryb;
|
||||
@ -949,7 +949,7 @@ static struct task *dns_process_query_exp(struct task *t, void *context, unsigne
|
||||
}
|
||||
|
||||
/* Task processing expiration of idle sessions */
|
||||
static struct task *dns_process_idle_exp(struct task *t, void *context, unsigned short state)
|
||||
static struct task *dns_process_idle_exp(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct dns_stream_server *dss = (struct dns_stream_server *)context;
|
||||
struct dns_session *ds, *dsb;
|
||||
@ -1059,7 +1059,7 @@ error:
|
||||
* and forward them to dns_session ring.
|
||||
* Note: If no slot found a new dns_session is allocated
|
||||
*/
|
||||
static struct task *dns_process_req(struct task *t, void *context, unsigned short state)
|
||||
static struct task *dns_process_req(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct dns_nameserver *ns = (struct dns_nameserver *)context;
|
||||
struct dns_stream_server *dss = ns->stream;
|
||||
@ -1193,7 +1193,7 @@ static struct task *dns_process_req(struct task *t, void *context, unsigned shor
|
||||
* Task used to consume response
|
||||
* Note: upper layer callback is called
|
||||
*/
|
||||
static struct task *dns_process_rsp(struct task *t, void *context, unsigned short state)
|
||||
static struct task *dns_process_rsp(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct dns_nameserver *ns = (struct dns_nameserver *)context;
|
||||
|
||||
|
@ -475,7 +475,7 @@ out:
|
||||
* Please do NOT place any return statement in this function and only leave
|
||||
* via the out_unlock label.
|
||||
*/
|
||||
struct task *process_chk_proc(struct task *t, void *context, unsigned short state)
|
||||
struct task *process_chk_proc(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct check *check = context;
|
||||
struct server *s = check->server;
|
||||
|
@ -1196,7 +1196,7 @@ spoe_wakeup_appctx(struct appctx *appctx)
|
||||
/* Callback function that catches applet timeouts. If a timeout occurred, we set
|
||||
* <appctx->st1> flag and the SPOE applet is woken up. */
|
||||
static struct task *
|
||||
spoe_process_appctx(struct task * task, void *context, unsigned short state)
|
||||
spoe_process_appctx(struct task * task, void *context, unsigned int state)
|
||||
{
|
||||
struct appctx *appctx = context;
|
||||
|
||||
|
@ -6298,7 +6298,7 @@ __LJMP static int hlua_set_nice(lua_State *L)
|
||||
* Task wrapper are longjmp safe because the only one Lua code
|
||||
* executed is the safe hlua_ctx_resume();
|
||||
*/
|
||||
struct task *hlua_process_task(struct task *task, void *context, unsigned short state)
|
||||
struct task *hlua_process_task(struct task *task, void *context, unsigned int state)
|
||||
{
|
||||
struct hlua *hlua = context;
|
||||
enum hlua_exec status;
|
||||
@ -7042,7 +7042,7 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
|
||||
return act_ret;
|
||||
}
|
||||
|
||||
struct task *hlua_applet_wakeup(struct task *t, void *context, unsigned short state)
|
||||
struct task *hlua_applet_wakeup(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct appctx *ctx = context;
|
||||
|
||||
|
@ -120,7 +120,7 @@ int accept_queue_push_mp(struct accept_queue_ring *ring, struct connection *conn
|
||||
/* proceed with accepting new connections. Don't mark it static so that it appears
|
||||
* in task dumps.
|
||||
*/
|
||||
struct task *accept_queue_process(struct task *t, void *context, unsigned short state)
|
||||
struct task *accept_queue_process(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct accept_queue_ring *ring = context;
|
||||
struct connection *conn;
|
||||
@ -1170,7 +1170,7 @@ REGISTER_POST_DEINIT(listener_queue_deinit);
|
||||
* a while. It is designed to be called as a task. It's exported so that it's easy
|
||||
* to spot in "show tasks" or "show profiling".
|
||||
*/
|
||||
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned short state)
|
||||
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
/* If there are still too many concurrent connections, let's wait for
|
||||
* some of them to go away. We don't need to re-arm the timer because
|
||||
|
@ -55,7 +55,7 @@ void email_alert_free(struct email_alert *alert)
|
||||
pool_free(pool_head_email_alert, alert);
|
||||
}
|
||||
|
||||
static struct task *process_email_alert(struct task *t, void *context, unsigned short state)
|
||||
static struct task *process_email_alert(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct check *check = context;
|
||||
struct email_alertq *q;
|
||||
|
@ -353,12 +353,12 @@ INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
|
||||
DECLARE_STATIC_POOL(pool_head_fcgi_conn, "fcgi_conn", sizeof(struct fcgi_conn));
|
||||
DECLARE_STATIC_POOL(pool_head_fcgi_strm, "fcgi_strm", sizeof(struct fcgi_strm));
|
||||
|
||||
struct task *fcgi_timeout_task(struct task *t, void *context, unsigned short state);
|
||||
struct task *fcgi_timeout_task(struct task *t, void *context, unsigned int state);
|
||||
static int fcgi_process(struct fcgi_conn *fconn);
|
||||
/* fcgi_io_cb is exported to see it resolved in "show fd" */
|
||||
struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state);
|
||||
static inline struct fcgi_strm *fcgi_conn_st_by_id(struct fcgi_conn *fconn, int id);
|
||||
struct task *fcgi_deferred_shut(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *fcgi_deferred_shut(struct task *t, void *ctx, unsigned int state);
|
||||
static struct fcgi_strm *fcgi_conn_stream_new(struct fcgi_conn *fconn, struct conn_stream *cs, struct session *sess);
|
||||
static void fcgi_strm_notify_recv(struct fcgi_strm *fstrm);
|
||||
static void fcgi_strm_notify_send(struct fcgi_strm *fstrm);
|
||||
@ -2975,7 +2975,7 @@ schedule:
|
||||
}
|
||||
|
||||
/* this is the tasklet referenced in fconn->wait_event.tasklet */
|
||||
struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int status)
|
||||
{
|
||||
struct connection *conn;
|
||||
struct fcgi_conn *fconn;
|
||||
@ -3147,7 +3147,7 @@ static int fcgi_ctl(struct connection *conn, enum mux_ctl_type mux_ctl, void *ou
|
||||
* immediately killed. If it's allocatable and empty, we attempt to send a
|
||||
* ABORT records.
|
||||
*/
|
||||
struct task *fcgi_timeout_task(struct task *t, void *context, unsigned short state)
|
||||
struct task *fcgi_timeout_task(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct fcgi_conn *fconn = context;
|
||||
int expired = tick_is_expired(t->expire, now_ms);
|
||||
@ -3758,7 +3758,7 @@ static void fcgi_do_shutw(struct fcgi_strm *fstrm)
|
||||
* deferred shutdowns when the fcgi_detach() was done but the mux buffer was full
|
||||
* and prevented the last record from being emitted.
|
||||
*/
|
||||
struct task *fcgi_deferred_shut(struct task *t, void *ctx, unsigned short state)
|
||||
struct task *fcgi_deferred_shut(struct task *t, void *ctx, unsigned int state)
|
||||
{
|
||||
struct fcgi_strm *fstrm = ctx;
|
||||
struct fcgi_conn *fconn = fstrm->fconn;
|
||||
|
@ -266,8 +266,8 @@ static int h1_recv(struct h1c *h1c);
|
||||
static int h1_send(struct h1c *h1c);
|
||||
static int h1_process(struct h1c *h1c);
|
||||
/* h1_io_cb is exported to see it resolved in "show fd" */
|
||||
struct task *h1_io_cb(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *h1_timeout_task(struct task *t, void *context, unsigned short state);
|
||||
struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state);
|
||||
struct task *h1_timeout_task(struct task *t, void *context, unsigned int state);
|
||||
static void h1_shutw_conn(struct connection *conn, enum cs_shw_mode mode);
|
||||
static void h1_wake_stream_for_recv(struct h1s *h1s);
|
||||
static void h1_wake_stream_for_send(struct h1s *h1s);
|
||||
@ -2799,7 +2799,7 @@ static int h1_process(struct h1c * h1c)
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct task *h1_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
struct task *h1_io_cb(struct task *t, void *ctx, unsigned int status)
|
||||
{
|
||||
struct connection *conn;
|
||||
struct tasklet *tl = (struct tasklet *)t;
|
||||
@ -2882,7 +2882,7 @@ static int h1_wake(struct connection *conn)
|
||||
/* Connection timeout management. The principle is that if there's no receipt
|
||||
* nor sending for a certain amount of time, the connection is closed.
|
||||
*/
|
||||
struct task *h1_timeout_task(struct task *t, void *context, unsigned short state)
|
||||
struct task *h1_timeout_task(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct h1c *h1c = context;
|
||||
int expired = tick_is_expired(t->expire, now_ms);
|
||||
|
12
src/mux_h2.c
12
src/mux_h2.c
@ -551,16 +551,16 @@ static const struct h2s *h2_idle_stream = &(const struct h2s){
|
||||
.id = 0,
|
||||
};
|
||||
|
||||
struct task *h2_timeout_task(struct task *t, void *context, unsigned short state);
|
||||
struct task *h2_timeout_task(struct task *t, void *context, unsigned int state);
|
||||
static int h2_send(struct h2c *h2c);
|
||||
static int h2_recv(struct h2c *h2c);
|
||||
static int h2_process(struct h2c *h2c);
|
||||
/* h2_io_cb is exported to see it resolved in "show fd" */
|
||||
struct task *h2_io_cb(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state);
|
||||
static inline struct h2s *h2c_st_by_id(struct h2c *h2c, int id);
|
||||
static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *flags, unsigned long long *body_len, char *upgrade_protocol);
|
||||
static int h2_frt_transfer_data(struct h2s *h2s);
|
||||
struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state);
|
||||
static struct h2s *h2c_bck_stream_new(struct h2c *h2c, struct conn_stream *cs, struct session *sess);
|
||||
static void h2s_alert(struct h2s *h2s);
|
||||
|
||||
@ -3773,7 +3773,7 @@ schedule:
|
||||
}
|
||||
|
||||
/* this is the tasklet referenced in h2c->wait_event.tasklet */
|
||||
struct task *h2_io_cb(struct task *t, void *ctx, unsigned short status)
|
||||
struct task *h2_io_cb(struct task *t, void *ctx, unsigned int status)
|
||||
{
|
||||
struct connection *conn;
|
||||
struct tasklet *tl = (struct tasklet *)t;
|
||||
@ -3963,7 +3963,7 @@ static int h2_wake(struct connection *conn)
|
||||
* immediately killed. If it's allocatable and empty, we attempt to send a
|
||||
* GOAWAY frame.
|
||||
*/
|
||||
struct task *h2_timeout_task(struct task *t, void *context, unsigned short state)
|
||||
struct task *h2_timeout_task(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct h2c *h2c = context;
|
||||
int expired = tick_is_expired(t->expire, now_ms);
|
||||
@ -4431,7 +4431,7 @@ static void h2_do_shutw(struct h2s *h2s)
|
||||
* deferred shutdowns when the h2_detach() was done but the mux buffer was full
|
||||
* and prevented the last frame from being emitted.
|
||||
*/
|
||||
struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned short state)
|
||||
struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
|
||||
{
|
||||
struct h2s *h2s = ctx;
|
||||
struct h2c *h2c = h2s->h2c;
|
||||
|
@ -56,7 +56,7 @@ static void mux_pt_destroy(struct mux_pt_ctx *ctx)
|
||||
/* Callback, used when we get I/Os while in idle mode. This one is exported so
|
||||
* that "show fd" can resolve it.
|
||||
*/
|
||||
struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned short status)
|
||||
struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned int status)
|
||||
{
|
||||
struct mux_pt_ctx *ctx = tctx;
|
||||
|
||||
|
@ -2873,7 +2873,7 @@ static struct appctx *peer_session_create(struct peers *peers, struct peer *peer
|
||||
* tasks wakeup on local update and heartbeat. Let's keep it exported so that it
|
||||
* resolves in stack traces and "show tasks".
|
||||
*/
|
||||
struct task *process_peer_sync(struct task * task, void *context, unsigned short state)
|
||||
struct task *process_peer_sync(struct task * task, void *context, unsigned int state)
|
||||
{
|
||||
struct peers *peers = context;
|
||||
struct peer *ps;
|
||||
|
@ -1506,7 +1506,7 @@ void proxy_cond_disable(struct proxy *p)
|
||||
* called as a task which is woken up upon stopping or when rate limiting must
|
||||
* be enforced.
|
||||
*/
|
||||
struct task *manage_proxy(struct task *t, void *context, unsigned short state)
|
||||
struct task *manage_proxy(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct proxy *p = context;
|
||||
int next = TICK_ETERNITY;
|
||||
@ -1604,7 +1604,7 @@ static int proxy_parse_hard_stop_after(char **args, int section_type, struct pro
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct task *hard_stop(struct task *t, void *context, unsigned short state)
|
||||
struct task *hard_stop(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct proxy *p;
|
||||
struct stream *s;
|
||||
|
@ -2005,7 +2005,7 @@ static int resolv_process_responses(struct dns_nameserver *ns)
|
||||
* resolutions and retry them if possible. Else a timeout is reported. Then, it
|
||||
* checks the wait list to trigger new resolutions.
|
||||
*/
|
||||
static struct task *process_resolvers(struct task *t, void *context, unsigned short state)
|
||||
static struct task *process_resolvers(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct resolvers *resolvers = context;
|
||||
struct resolv_resolution *res, *resback;
|
||||
|
@ -4561,7 +4561,7 @@ static void srv_update_status(struct server *s)
|
||||
*s->adm_st_chg_cause = 0;
|
||||
}
|
||||
|
||||
struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned short state)
|
||||
struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state)
|
||||
{
|
||||
struct connection *conn;
|
||||
|
||||
@ -4629,7 +4629,7 @@ static void srv_cleanup_connections(struct server *srv)
|
||||
}
|
||||
}
|
||||
|
||||
struct task *srv_cleanup_idle_conns(struct task *task, void *context, unsigned short state)
|
||||
struct task *srv_cleanup_idle_conns(struct task *task, void *context, unsigned int state)
|
||||
{
|
||||
struct server *srv;
|
||||
struct eb32_node *eb;
|
||||
|
@ -324,7 +324,7 @@ static void session_prepare_log_prefix(struct session *sess)
|
||||
* disabled and finally kills the file descriptor. This function requires that
|
||||
* sess->origin points to the incoming connection.
|
||||
*/
|
||||
static void session_kill_embryonic(struct session *sess, unsigned short state)
|
||||
static void session_kill_embryonic(struct session *sess, unsigned int state)
|
||||
{
|
||||
int level = LOG_INFO;
|
||||
struct connection *conn = __objt_conn(sess->origin);
|
||||
@ -378,7 +378,7 @@ static void session_kill_embryonic(struct session *sess, unsigned short state)
|
||||
* strikes and performs the required cleanup. It's only exported to make it
|
||||
* resolve in "show tasks".
|
||||
*/
|
||||
struct task *session_expire_embryonic(struct task *t, void *context, unsigned short state)
|
||||
struct task *session_expire_embryonic(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct session *sess = context;
|
||||
|
||||
|
@ -693,7 +693,7 @@ static struct appctx *sink_forward_session_create(struct sink *sink, struct sink
|
||||
/*
|
||||
* Task to handle connctions to forward servers
|
||||
*/
|
||||
static struct task *process_sink_forward(struct task * task, void *context, unsigned short state)
|
||||
static struct task *process_sink_forward(struct task * task, void *context, unsigned int state)
|
||||
{
|
||||
struct sink *sink = (struct sink *)context;
|
||||
struct sink_forward_target *sft = sink->sft;
|
||||
|
@ -183,7 +183,7 @@ static struct stats_module ssl_stats_module = {
|
||||
INITCALL1(STG_REGISTER, stats_register_module, &ssl_stats_module);
|
||||
|
||||
/* ssl_sock_io_cb is exported to see it resolved in "show fd" */
|
||||
struct task *ssl_sock_io_cb(struct task *, void *, unsigned short);
|
||||
struct task *ssl_sock_io_cb(struct task *, void *, unsigned int);
|
||||
static int ssl_sock_handshake(struct connection *conn, unsigned int flag);
|
||||
|
||||
/* Methods to implement OpenSSL BIO */
|
||||
@ -5797,7 +5797,7 @@ static int ssl_remove_xprt(struct connection *conn, void *xprt_ctx, void *toremo
|
||||
return (ctx->xprt->remove_xprt(conn, ctx->xprt_ctx, toremove_ctx, newops, newctx));
|
||||
}
|
||||
|
||||
struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned short state)
|
||||
struct task *ssl_sock_io_cb(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct tasklet *tl = (struct tasklet *)t;
|
||||
struct ssl_sock_ctx *ctx = context;
|
||||
|
@ -626,7 +626,7 @@ out_unlock:
|
||||
* Task processing function to trash expired sticky sessions. A pointer to the
|
||||
* task itself is returned since it never dies.
|
||||
*/
|
||||
struct task *process_table_expire(struct task *task, void *context, unsigned short state)
|
||||
struct task *process_table_expire(struct task *task, void *context, unsigned int state)
|
||||
{
|
||||
struct stktable *t = context;
|
||||
|
||||
|
@ -1525,7 +1525,7 @@ static int process_store_rules(struct stream *s, struct channel *rep, int an_bit
|
||||
* and each function is called only if at least another function has changed at
|
||||
* least one flag it is interested in.
|
||||
*/
|
||||
struct task *process_stream(struct task *t, void *context, unsigned short state)
|
||||
struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct server *srv;
|
||||
struct stream *s = context;
|
||||
|
@ -767,7 +767,7 @@ int si_cs_send(struct conn_stream *cs)
|
||||
* stream interface. Thus it is always safe to perform a tasklet_wakeup() on a
|
||||
* stream interface, as the presence of the CS is checked there.
|
||||
*/
|
||||
struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned short state)
|
||||
struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
{
|
||||
struct stream_interface *si = ctx;
|
||||
struct conn_stream *cs = objt_cs(si->end);
|
||||
|
@ -64,7 +64,7 @@ struct task_per_thread task_per_thread[MAX_THREADS];
|
||||
*/
|
||||
void task_kill(struct task *t)
|
||||
{
|
||||
unsigned short state = t->state;
|
||||
unsigned int state = t->state;
|
||||
unsigned int thr;
|
||||
|
||||
BUG_ON(state & TASK_KILLED);
|
||||
@ -433,14 +433,14 @@ int next_timer_expiry()
|
||||
*/
|
||||
unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
{
|
||||
struct task *(*process)(struct task *t, void *ctx, unsigned short state);
|
||||
struct task *(*process)(struct task *t, void *ctx, unsigned int state);
|
||||
struct list *tl_queues = sched->tasklets;
|
||||
struct task *t;
|
||||
uint8_t budget_mask = (1 << TL_CLASSES) - 1;
|
||||
struct sched_activity *profile_entry = NULL;
|
||||
unsigned int done = 0;
|
||||
unsigned int queue;
|
||||
unsigned short state;
|
||||
unsigned int state;
|
||||
void *ctx;
|
||||
|
||||
for (queue = 0; queue < TL_CLASSES;) {
|
||||
@ -806,7 +806,7 @@ void process_runnable_tasks()
|
||||
* is returned on success, otherwise NULL on failure.
|
||||
*/
|
||||
struct work_list *work_list_create(int nbthread,
|
||||
struct task *(*fct)(struct task *, void *, unsigned short),
|
||||
struct task *(*fct)(struct task *, void *, unsigned int),
|
||||
void *arg)
|
||||
{
|
||||
struct work_list *wl;
|
||||
|
@ -37,7 +37,7 @@ static size_t xprt_handshake_to_buf(struct connection *conn, void *xprt_ctx, str
|
||||
}
|
||||
|
||||
/* xprt_handshake_io_cb is exported to see it resolved in "show fd" */
|
||||
struct task *xprt_handshake_io_cb(struct task *t, void *bctx, unsigned short state)
|
||||
struct task *xprt_handshake_io_cb(struct task *t, void *bctx, unsigned int state)
|
||||
{
|
||||
struct xprt_handshake_ctx *ctx = bctx;
|
||||
struct connection *conn = ctx->conn;
|
||||
|
@ -2462,7 +2462,7 @@ static void quic_conn_free(struct quic_conn *conn)
|
||||
}
|
||||
|
||||
/* Callback called upon loss detection and PTO timer expirations. */
|
||||
static struct task *process_timer(struct task *task, void *ctx, unsigned short state)
|
||||
static struct task *process_timer(struct task *task, void *ctx, unsigned int state)
|
||||
{
|
||||
struct quic_conn_ctx *conn_ctx;
|
||||
struct quic_conn *qc;
|
||||
@ -3848,7 +3848,7 @@ int qc_prep_phdshk_pkts(struct quic_conn *qc)
|
||||
}
|
||||
|
||||
/* QUIC connection packet handler task. */
|
||||
struct task *quic_conn_io_cb(struct task *t, void *context, unsigned short state)
|
||||
struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
|
||||
{
|
||||
struct quic_conn_ctx *ctx = context;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user