2009-03-05 17:43:00 +00:00
|
|
|
/*
|
|
|
|
* Event rate calculation functions.
|
|
|
|
*
|
2010-06-20 05:15:43 +00:00
|
|
|
* Copyright 2000-2010 Willy Tarreau <w@1wt.eu>
|
2009-03-05 17:43:00 +00:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2020-05-27 10:58:42 +00:00
|
|
|
#include <haproxy/api.h>
|
2020-06-01 10:18:08 +00:00
|
|
|
#include <haproxy/freq_ctr.h>
|
2020-06-09 07:07:15 +00:00
|
|
|
#include <haproxy/time.h>
|
|
|
|
#include <haproxy/tools.h>
|
2009-03-05 17:43:00 +00:00
|
|
|
|
|
|
|
/* Read a frequency counter taking history into account for missing time in
|
|
|
|
* current period. Current second is sub-divided in 1000 chunks of one ms,
|
|
|
|
* and the missing ones are read proportionally from previous value. The
|
|
|
|
* return value has the same precision as one input data sample, so low rates
|
|
|
|
* will be inaccurate still appropriate for max checking. One trick we use for
|
|
|
|
* low values is to specially handle the case where the rate is between 0 and 1
|
|
|
|
* in order to avoid flapping while waiting for the next event.
|
2009-03-06 08:18:27 +00:00
|
|
|
*
|
|
|
|
* For immediate limit checking, it's recommended to use freq_ctr_remain() and
|
|
|
|
* next_event_delay() instead which do not have the flapping correction, so
|
|
|
|
* that even frequencies as low as one event/period are properly handled.
|
2009-03-05 17:43:00 +00:00
|
|
|
*/
|
|
|
|
unsigned int read_freq_ctr(struct freq_ctr *ctr)
|
|
|
|
{
|
2017-10-30 17:04:28 +00:00
|
|
|
unsigned int curr, past, _curr, _past;
|
|
|
|
unsigned int age, curr_sec, _curr_sec;
|
|
|
|
|
2017-10-31 16:54:15 +00:00
|
|
|
while (1) {
|
|
|
|
_curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
_past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
_curr_sec = ctr->curr_sec;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr_sec & 0x80000000)
|
|
|
|
continue;
|
2017-10-31 16:54:15 +00:00
|
|
|
curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
curr_sec = ctr->curr_sec;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
|
|
|
break;
|
|
|
|
}
|
2017-10-12 07:49:09 +00:00
|
|
|
|
|
|
|
age = now.tv_sec - curr_sec;
|
2009-03-06 13:29:25 +00:00
|
|
|
if (unlikely(age > 1))
|
|
|
|
return 0;
|
|
|
|
|
2017-10-12 07:49:09 +00:00
|
|
|
if (unlikely(age)) {
|
|
|
|
past = curr;
|
|
|
|
curr = 0;
|
2009-03-06 13:29:25 +00:00
|
|
|
}
|
2009-03-05 17:43:00 +00:00
|
|
|
|
2009-03-06 13:29:25 +00:00
|
|
|
if (past <= 1 && !curr)
|
|
|
|
return past; /* very low rate, avoid flapping */
|
|
|
|
|
BUG/MINOR: time: frequency counters are not totally accurate
When a frontend is rate-limited to 1000 connections per second, the
effective rate measured from the client is 999/s, and connections
experience an average response time of 99.5 ms with a standard
deviation of 2 ms.
The reason for this inaccuracy is that when computing frequency
counters, we use one part of the previous value proportional to the
number of milliseconds remaining in the current second. But even the
last millisecond still uses a part of the past value, which is wrong :
since we have a 1ms resolution, the last millisecond must be dedicated
only to filling the current second.
So we slightly adjust the algorithm to use 999/1000 of the past value
during the first millisecond, and 0/1000 of the past value during the
last millisecond. We also slightly improve the computation by computing
the remaining time instead of the current time in tv_update_date(), so
that we don't have to negate the value in each frequency counter.
Now with the fix, the connection rate measured by both the client and
haproxy is a steady 1000/s, the average response time measured is 99.2ms
and more importantly, the standard deviation has been divided by 3 to
0.6 millisecond.
This fix should also be backported to 1.4 which has the same issue.
2012-12-29 20:50:07 +00:00
|
|
|
return curr + mul32hi(past, ms_left_scaled);
|
2009-03-05 17:43:00 +00:00
|
|
|
}
|
|
|
|
|
2009-03-06 08:18:27 +00:00
|
|
|
/* returns the number of remaining events that can occur on this freq counter
|
|
|
|
* while respecting <freq> and taking into account that <pend> events are
|
|
|
|
* already known to be pending. Returns 0 if limit was reached.
|
|
|
|
*/
|
|
|
|
unsigned int freq_ctr_remain(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
|
|
|
{
|
2017-10-30 17:04:28 +00:00
|
|
|
unsigned int curr, past, _curr, _past;
|
|
|
|
unsigned int age, curr_sec, _curr_sec;
|
|
|
|
|
2017-10-31 16:54:15 +00:00
|
|
|
while (1) {
|
|
|
|
_curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
_past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
_curr_sec = ctr->curr_sec;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr_sec & 0x80000000)
|
|
|
|
continue;
|
2017-10-31 16:54:15 +00:00
|
|
|
curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
curr_sec = ctr->curr_sec;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
|
|
|
break;
|
|
|
|
}
|
2009-03-06 08:18:27 +00:00
|
|
|
|
2017-10-12 07:49:09 +00:00
|
|
|
age = now.tv_sec - curr_sec;
|
|
|
|
if (unlikely(age > 1))
|
|
|
|
curr = 0;
|
|
|
|
else {
|
|
|
|
if (unlikely(age == 1)) {
|
|
|
|
past = curr;
|
|
|
|
curr = 0;
|
2009-03-06 13:29:25 +00:00
|
|
|
}
|
BUG/MINOR: time: frequency counters are not totally accurate
When a frontend is rate-limited to 1000 connections per second, the
effective rate measured from the client is 999/s, and connections
experience an average response time of 99.5 ms with a standard
deviation of 2 ms.
The reason for this inaccuracy is that when computing frequency
counters, we use one part of the previous value proportional to the
number of milliseconds remaining in the current second. But even the
last millisecond still uses a part of the past value, which is wrong :
since we have a 1ms resolution, the last millisecond must be dedicated
only to filling the current second.
So we slightly adjust the algorithm to use 999/1000 of the past value
during the first millisecond, and 0/1000 of the past value during the
last millisecond. We also slightly improve the computation by computing
the remaining time instead of the current time in tv_update_date(), so
that we don't have to negate the value in each frequency counter.
Now with the fix, the connection rate measured by both the client and
haproxy is a steady 1000/s, the average response time measured is 99.2ms
and more importantly, the standard deviation has been divided by 3 to
0.6 millisecond.
This fix should also be backported to 1.4 which has the same issue.
2012-12-29 20:50:07 +00:00
|
|
|
curr += mul32hi(past, ms_left_scaled);
|
2009-03-06 13:29:25 +00:00
|
|
|
}
|
|
|
|
curr += pend;
|
2009-03-06 08:18:27 +00:00
|
|
|
|
2009-03-06 13:29:25 +00:00
|
|
|
if (curr >= freq)
|
2009-03-06 08:18:27 +00:00
|
|
|
return 0;
|
2009-03-06 13:29:25 +00:00
|
|
|
return freq - curr;
|
2009-03-06 08:18:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return the expected wait time in ms before the next event may occur,
|
|
|
|
* respecting frequency <freq>, and assuming there may already be some pending
|
|
|
|
* events. It returns zero if we can proceed immediately, otherwise the wait
|
|
|
|
* time, which will be rounded down 1ms for better accuracy, with a minimum
|
|
|
|
* of one ms.
|
|
|
|
*/
|
|
|
|
unsigned int next_event_delay(struct freq_ctr *ctr, unsigned int freq, unsigned int pend)
|
|
|
|
{
|
2017-10-30 17:04:28 +00:00
|
|
|
unsigned int curr, past, _curr, _past;
|
|
|
|
unsigned int wait, age, curr_sec, _curr_sec;
|
|
|
|
|
2017-10-31 16:54:15 +00:00
|
|
|
while (1) {
|
|
|
|
_curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
_past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
_curr_sec = ctr->curr_sec;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr_sec & 0x80000000)
|
|
|
|
continue;
|
2017-10-31 16:54:15 +00:00
|
|
|
curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-31 16:54:15 +00:00
|
|
|
curr_sec = ctr->curr_sec;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr == curr && _past == past && _curr_sec == curr_sec)
|
|
|
|
break;
|
|
|
|
}
|
2009-03-06 08:18:27 +00:00
|
|
|
|
2017-10-12 07:49:09 +00:00
|
|
|
age = now.tv_sec - curr_sec;
|
|
|
|
if (unlikely(age > 1))
|
|
|
|
curr = 0;
|
|
|
|
else {
|
|
|
|
if (unlikely(age == 1)) {
|
|
|
|
past = curr;
|
|
|
|
curr = 0;
|
2009-03-06 13:29:25 +00:00
|
|
|
}
|
BUG/MINOR: time: frequency counters are not totally accurate
When a frontend is rate-limited to 1000 connections per second, the
effective rate measured from the client is 999/s, and connections
experience an average response time of 99.5 ms with a standard
deviation of 2 ms.
The reason for this inaccuracy is that when computing frequency
counters, we use one part of the previous value proportional to the
number of milliseconds remaining in the current second. But even the
last millisecond still uses a part of the past value, which is wrong :
since we have a 1ms resolution, the last millisecond must be dedicated
only to filling the current second.
So we slightly adjust the algorithm to use 999/1000 of the past value
during the first millisecond, and 0/1000 of the past value during the
last millisecond. We also slightly improve the computation by computing
the remaining time instead of the current time in tv_update_date(), so
that we don't have to negate the value in each frequency counter.
Now with the fix, the connection rate measured by both the client and
haproxy is a steady 1000/s, the average response time measured is 99.2ms
and more importantly, the standard deviation has been divided by 3 to
0.6 millisecond.
This fix should also be backported to 1.4 which has the same issue.
2012-12-29 20:50:07 +00:00
|
|
|
curr += mul32hi(past, ms_left_scaled);
|
2009-03-06 13:29:25 +00:00
|
|
|
}
|
|
|
|
curr += pend;
|
2009-03-06 08:18:27 +00:00
|
|
|
|
2009-03-06 13:29:25 +00:00
|
|
|
if (curr < freq)
|
2009-03-06 08:18:27 +00:00
|
|
|
return 0;
|
|
|
|
|
2009-03-06 13:29:25 +00:00
|
|
|
wait = 999 / curr;
|
2009-03-06 08:18:27 +00:00
|
|
|
return MAX(wait, 1);
|
|
|
|
}
|
|
|
|
|
2010-06-20 05:15:43 +00:00
|
|
|
/* Reads a frequency counter taking history into account for missing time in
|
|
|
|
* current period. The period has to be passed in number of ticks and must
|
|
|
|
* match the one used to feed the counter. The counter value is reported for
|
|
|
|
* current date (now_ms). The return value has the same precision as one input
|
|
|
|
* data sample, so low rates over the period will be inaccurate but still
|
|
|
|
* appropriate for max checking. One trick we use for low values is to specially
|
|
|
|
* handle the case where the rate is between 0 and 1 in order to avoid flapping
|
|
|
|
* while waiting for the next event.
|
|
|
|
*
|
|
|
|
* For immediate limit checking, it's recommended to use freq_ctr_period_remain()
|
|
|
|
* instead which does not have the flapping correction, so that even frequencies
|
|
|
|
* as low as one event/period are properly handled.
|
|
|
|
*
|
|
|
|
* For measures over a 1-second period, it's better to use the implicit functions
|
|
|
|
* above.
|
|
|
|
*/
|
|
|
|
unsigned int read_freq_ctr_period(struct freq_ctr_period *ctr, unsigned int period)
|
|
|
|
{
|
2017-10-30 17:04:28 +00:00
|
|
|
unsigned int _curr, _past, curr, past;
|
|
|
|
unsigned int remain, _curr_tick, curr_tick;
|
|
|
|
|
2017-10-31 16:54:15 +00:00
|
|
|
while (1) {
|
2017-10-30 17:04:28 +00:00
|
|
|
_curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
_past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
_curr_tick = ctr->curr_tick;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr_tick & 0x1)
|
|
|
|
continue;
|
2017-10-12 07:49:09 +00:00
|
|
|
curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-12 07:49:09 +00:00
|
|
|
past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-12 07:49:09 +00:00
|
|
|
curr_tick = ctr->curr_tick;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr == curr && _past == past && _curr_tick == curr_tick)
|
|
|
|
break;
|
|
|
|
};
|
2010-06-20 05:15:43 +00:00
|
|
|
|
2017-10-12 07:49:09 +00:00
|
|
|
remain = curr_tick + period - now_ms;
|
2010-06-20 05:15:43 +00:00
|
|
|
if (unlikely((int)remain < 0)) {
|
|
|
|
/* We're past the first period, check if we can still report a
|
|
|
|
* part of last period or if we're too far away.
|
|
|
|
*/
|
|
|
|
remain += period;
|
|
|
|
if ((int)remain < 0)
|
|
|
|
return 0;
|
|
|
|
past = curr;
|
|
|
|
curr = 0;
|
|
|
|
}
|
|
|
|
if (past <= 1 && !curr)
|
|
|
|
return past; /* very low rate, avoid flapping */
|
|
|
|
|
|
|
|
curr += div64_32((unsigned long long)past * remain, period);
|
|
|
|
return curr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns the number of remaining events that can occur on this freq counter
|
|
|
|
* while respecting <freq> events per period, and taking into account that
|
|
|
|
* <pend> events are already known to be pending. Returns 0 if limit was reached.
|
|
|
|
*/
|
|
|
|
unsigned int freq_ctr_remain_period(struct freq_ctr_period *ctr, unsigned int period,
|
|
|
|
unsigned int freq, unsigned int pend)
|
|
|
|
{
|
2017-10-30 17:04:28 +00:00
|
|
|
unsigned int _curr, _past, curr, past;
|
|
|
|
unsigned int remain, _curr_tick, curr_tick;
|
|
|
|
|
2017-10-31 16:54:15 +00:00
|
|
|
while (1) {
|
2017-10-30 17:04:28 +00:00
|
|
|
_curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
_past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
_curr_tick = ctr->curr_tick;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr_tick & 0x1)
|
|
|
|
continue;
|
2017-10-12 07:49:09 +00:00
|
|
|
curr = ctr->curr_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-12 07:49:09 +00:00
|
|
|
past = ctr->prev_ctr;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-12 07:49:09 +00:00
|
|
|
curr_tick = ctr->curr_tick;
|
2020-05-28 13:29:33 +00:00
|
|
|
__ha_compiler_barrier();
|
2017-10-30 17:04:28 +00:00
|
|
|
if (_curr == curr && _past == past && _curr_tick == curr_tick)
|
|
|
|
break;
|
|
|
|
};
|
2010-06-20 05:15:43 +00:00
|
|
|
|
2017-10-12 07:49:09 +00:00
|
|
|
remain = curr_tick + period - now_ms;
|
2010-06-20 05:15:43 +00:00
|
|
|
if (likely((int)remain < 0)) {
|
|
|
|
/* We're past the first period, check if we can still report a
|
|
|
|
* part of last period or if we're too far away.
|
|
|
|
*/
|
|
|
|
past = curr;
|
|
|
|
curr = 0;
|
|
|
|
remain += period;
|
|
|
|
if ((int)remain < 0)
|
|
|
|
past = 0;
|
|
|
|
}
|
|
|
|
if (likely(past))
|
|
|
|
curr += div64_32((unsigned long long)past * remain, period);
|
|
|
|
|
|
|
|
curr += pend;
|
|
|
|
freq -= curr;
|
|
|
|
if ((int)freq < 0)
|
|
|
|
freq = 0;
|
|
|
|
return freq;
|
|
|
|
}
|
|
|
|
|
2009-03-05 17:43:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|