mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-24 14:33:16 +00:00
MEDIUM: freq_ctr: replace the per-second counters with the generic ones
It remains cumbersome to preserve two versions of the freq counters and two different internal clocks just for this. In addition, the savings from using two different mechanisms are not that important as the only saving is a divide that is replaced by a multiply, but now thanks to the freq_ctr_total() unificaiton the code could also be simplified to optimize it in case of constants. This patch turns all non-period freq_ctr functions to static inlines which call the period-based ones with a period of 1 second. A direct benefit is that a single internal clock is now needed for any counter and that they now all rely on ticks. These 1-second counters are essentially used to report request rates and to enforce a connection rate limitation in listeners. It was verified that these continue to work like before.
This commit is contained in:
parent
fa1258f02c
commit
fc6323ad82
@ -25,58 +25,12 @@
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/freq_ctr-t.h>
|
||||
#include <haproxy/intops.h>
|
||||
#include <haproxy/ticks.h>
|
||||
#include <haproxy/time.h>
|
||||
|
||||
/* exported functions from freq_ctr.c */
|
||||
ullong freq_ctr_total(struct freq_ctr *ctr, uint period, int pend);
|
||||
|
||||
/* Update a frequency counter by <inc> incremental units. It is automatically
|
||||
* rotated if the period is over. It is important that it correctly initializes
|
||||
* a null area.
|
||||
*/
|
||||
static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int inc)
|
||||
{
|
||||
int elapsed;
|
||||
unsigned int curr_sec;
|
||||
uint32_t now_tmp;
|
||||
|
||||
|
||||
/* we manipulate curr_ctr using atomic ops out of the lock, since
|
||||
* it's the most frequent access. However if we detect that a change
|
||||
* is needed, it's done under the date lock. We don't care whether
|
||||
* the value we're adding is considered as part of the current or
|
||||
* new period if another thread starts to rotate the period while
|
||||
* we operate, since timing variations would have resulted in the
|
||||
* same uncertainty as well.
|
||||
*/
|
||||
curr_sec = ctr->curr_tick;
|
||||
do {
|
||||
now_tmp = global_now >> 32;
|
||||
if (curr_sec == (now_tmp & 0x7fffffff))
|
||||
return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
|
||||
|
||||
/* remove the bit, used for the lock */
|
||||
curr_sec &= 0x7fffffff;
|
||||
} while (!_HA_ATOMIC_CAS(&ctr->curr_tick, &curr_sec, curr_sec | 0x80000000));
|
||||
__ha_barrier_atomic_store();
|
||||
|
||||
elapsed = (now_tmp & 0x7fffffff) - curr_sec;
|
||||
if (unlikely(elapsed > 0)) {
|
||||
ctr->prev_ctr = ctr->curr_ctr;
|
||||
_HA_ATOMIC_SUB(&ctr->curr_ctr, ctr->prev_ctr);
|
||||
if (likely(elapsed != 1)) {
|
||||
/* we missed more than one second */
|
||||
ctr->prev_ctr = 0;
|
||||
}
|
||||
curr_sec = now_tmp;
|
||||
}
|
||||
|
||||
/* release the lock and update the time in case of rotate. */
|
||||
_HA_ATOMIC_STORE(&ctr->curr_tick, curr_sec & 0x7fffffff);
|
||||
|
||||
return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
|
||||
}
|
||||
|
||||
/* Update a frequency counter by <inc> incremental units. It is automatically
|
||||
* rotated if the period is over. It is important that it correctly initializes
|
||||
* a null area. This one works on frequency counters which have a period
|
||||
@ -117,10 +71,14 @@ static inline unsigned int update_freq_ctr_period(struct freq_ctr *ctr,
|
||||
return _HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
|
||||
}
|
||||
|
||||
/* Read a frequency counter taking history into account for missing time in
|
||||
* current period.
|
||||
/* Update a 1-sec frequency counter by <inc> incremental units. It is automatically
|
||||
* rotated if the period is over. It is important that it correctly initializes
|
||||
* a null area.
|
||||
*/
|
||||
unsigned int read_freq_ctr(struct freq_ctr *ctr);
|
||||
static inline unsigned int update_freq_ctr(struct freq_ctr *ctr, unsigned int inc)
|
||||
{
|
||||
return update_freq_ctr_period(ctr, MS_TO_TICKS(1000), inc);
|
||||
}
|
||||
|
||||
/* Reads a frequency counter taking history into account for missing time in
|
||||
* current period. The period has to be passed in number of ticks and must
|
||||
@ -142,11 +100,13 @@ static inline uint read_freq_ctr_period(struct freq_ctr *ctr, uint period)
|
||||
return div64_32(total, period);
|
||||
}
|
||||
|
||||
/* returns the number of remaining events that can occur on this freq counter
|
||||
* while respecting <freq> and taking into account that <pend> events are
|
||||
* already known to be pending. Returns 0 if limit was reached.
|
||||
/* Read a 1-sec frequency counter taking history into account for missing time
|
||||
* in current period.
|
||||
*/
|
||||
unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend);
|
||||
static inline unsigned int read_freq_ctr(struct freq_ctr *ctr)
|
||||
{
|
||||
return read_freq_ctr_period(ctr, MS_TO_TICKS(1000));
|
||||
}
|
||||
|
||||
/* Returns the number of remaining events that can occur on this freq counter
|
||||
* while respecting <freq> events per period, and taking into account that
|
||||
@ -162,13 +122,14 @@ static inline uint freq_ctr_remain_period(struct freq_ctr *ctr, uint period, uin
|
||||
return freq - avg;
|
||||
}
|
||||
|
||||
/* return the expected wait time in ms before the next event may occur,
|
||||
* respecting frequency <freq>, and assuming there may already be some pending
|
||||
* events. It returns zero if we can proceed immediately, otherwise the wait
|
||||
* time, which will be rounded down 1ms for better accuracy, with a minimum
|
||||
* of one ms.
|
||||
/* returns the number of remaining events that can occur on this freq counter
|
||||
* while respecting <freq> and taking into account that <pend> events are
|
||||
* already known to be pending. Returns 0 if limit was reached.
|
||||
*/
|
||||
unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend);
|
||||
static inline unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
||||
{
|
||||
return freq_ctr_remain_period(ctr, MS_TO_TICKS(1000), freq, pend);
|
||||
}
|
||||
|
||||
/* return the expected wait time in ms before the next event may occur,
|
||||
* respecting frequency <freq>, and assuming there may already be some pending
|
||||
@ -195,8 +156,16 @@ static inline uint next_event_delay_period(struct freq_ctr *ctr, uint period, ui
|
||||
return MAX(wait, 1);
|
||||
}
|
||||
|
||||
/* process freq counters over configurable periods */
|
||||
unsigned int read_freq_ctr_period(struct freq_ctr *ctr, unsigned int period);
|
||||
/* Returns the expected wait time in ms before the next event may occur,
|
||||
* respecting frequency <freq> over 1 second, and assuming there may already be
|
||||
* some pending events. It returns zero if we can proceed immediately, otherwise
|
||||
* the wait time, which will be rounded down 1ms for better accuracy, with a
|
||||
* minimum of one ms.
|
||||
*/
|
||||
static inline unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
||||
{
|
||||
return next_event_delay_period(ctr, MS_TO_TICKS(1000), freq, pend);
|
||||
}
|
||||
|
||||
/* While the functions above report average event counts per period, we are
|
||||
* also interested in average values per event. For this we use a different
|
||||
|
151
src/freq_ctr.c
151
src/freq_ctr.c
@ -15,157 +15,6 @@
|
||||
#include <haproxy/time.h>
|
||||
#include <haproxy/tools.h>
|
||||
|
||||
/* Read a frequency counter taking history into account for missing time in
|
||||
* current period. Current second is sub-divided in 1000 chunks of one ms,
|
||||
* and the missing ones are read proportionally from previous value. The
|
||||
* return value has the same precision as one input data sample, so low rates
|
||||
* will be inaccurate still appropriate for max checking. One trick we use for
|
||||
* low values is to specially handle the case where the rate is between 0 and 1
|
||||
* in order to avoid flapping while waiting for the next event.
|
||||
*
|
||||
* For immediate limit checking, it's recommended to use freq_ctr_remain() and
|
||||
* next_event_delay() instead which do not have the flapping correction, so
|
||||
* that even frequencies as low as one event/period are properly handled.
|
||||
*/
|
||||
unsigned int read_freq_ctr(struct freq_ctr *ctr)
|
||||
{
|
||||
unsigned int curr, past, _curr, _past;
|
||||
unsigned int age, curr_sec, _curr_sec;
|
||||
|
||||
while (1) {
|
||||
_curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_sec & 0x80000000)
|
||||
continue;
|
||||
curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
||||
break;
|
||||
}
|
||||
|
||||
age = (global_now >> 32) - curr_sec;
|
||||
if (unlikely(age > 1))
|
||||
return 0;
|
||||
|
||||
if (unlikely(age)) {
|
||||
past = curr;
|
||||
curr = 0;
|
||||
}
|
||||
|
||||
if (past <= 1 && !curr)
|
||||
return past; /* very low rate, avoid flapping */
|
||||
|
||||
return curr + mul32hi(past, ms_left_scaled);
|
||||
}
|
||||
|
||||
/* returns the number of remaining events that can occur on this freq counter
|
||||
* while respecting <freq> and taking into account that <pend> events are
|
||||
* already known to be pending. Returns 0 if limit was reached.
|
||||
*/
|
||||
unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
||||
{
|
||||
unsigned int curr, past, _curr, _past;
|
||||
unsigned int age, curr_sec, _curr_sec;
|
||||
|
||||
while (1) {
|
||||
_curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_sec & 0x80000000)
|
||||
continue;
|
||||
curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
||||
break;
|
||||
}
|
||||
|
||||
age = (global_now >> 32) - curr_sec;
|
||||
if (unlikely(age > 1))
|
||||
curr = 0;
|
||||
else {
|
||||
if (unlikely(age == 1)) {
|
||||
past = curr;
|
||||
curr = 0;
|
||||
}
|
||||
curr += mul32hi(past, ms_left_scaled);
|
||||
}
|
||||
curr += pend;
|
||||
|
||||
if (curr >= freq)
|
||||
return 0;
|
||||
return freq - curr;
|
||||
}
|
||||
|
||||
/* return the expected wait time in ms before the next event may occur,
|
||||
* respecting frequency <freq>, and assuming there may already be some pending
|
||||
* events. It returns zero if we can proceed immediately, otherwise the wait
|
||||
* time, which will be rounded down 1ms for better accuracy, with a minimum
|
||||
* of one ms.
|
||||
*/
|
||||
unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
||||
{
|
||||
unsigned int curr, past, _curr, _past;
|
||||
unsigned int wait, age, curr_sec, _curr_sec;
|
||||
|
||||
while (1) {
|
||||
_curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
_curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr_sec & 0x80000000)
|
||||
continue;
|
||||
curr = ctr->curr_ctr;
|
||||
__ha_compiler_barrier();
|
||||
past = ctr->prev_ctr;
|
||||
__ha_compiler_barrier();
|
||||
curr_sec = ctr->curr_tick;
|
||||
__ha_compiler_barrier();
|
||||
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
||||
break;
|
||||
}
|
||||
|
||||
age = (global_now >> 32) - curr_sec;
|
||||
if (unlikely(age > 1))
|
||||
curr = 0;
|
||||
else {
|
||||
if (unlikely(age == 1)) {
|
||||
past = curr;
|
||||
curr = 0;
|
||||
}
|
||||
curr += mul32hi(past, ms_left_scaled);
|
||||
}
|
||||
curr += pend;
|
||||
|
||||
if (curr < freq)
|
||||
return 0;
|
||||
|
||||
/* too many events already, let's count how long to wait before they're
|
||||
* processed. For this we'll subtract from the number of pending events
|
||||
* the ones programmed for the current period, to know how long to wait
|
||||
* for the next period. Each event takes 1/freq sec, thus 1000/freq ms.
|
||||
*/
|
||||
curr -= freq;
|
||||
wait = curr * 1000 / (freq ? freq : 1);
|
||||
return MAX(wait, 1);
|
||||
}
|
||||
|
||||
/* Returns the total number of events over the current + last period, including
|
||||
* a number of already pending events <pend>. The average frequency will be
|
||||
* obtained by dividing the output by <period>. This is essentially made to
|
||||
|
Loading…
Reference in New Issue
Block a user