REORG: acitvity: uninline sched_activity_entry()

This one is expensive in code size because it comes with xxhash.h at a
low level of dependency that's inherited at plenty of places, and for
a function does doesn't benefit from inlining and could possibly even
benefit from not being inline given that it's large and called from the
scheduler.

Moving it to activity.c reduces the LoC by 1.2% and the binary size by
~1kB.
This commit is contained in:
Willy Tarreau 2021-10-06 16:26:33 +02:00
parent e0650224b8
commit a26be37e20
2 changed files with 28 additions and 28 deletions

View File

@ -24,8 +24,6 @@
#include <haproxy/activity-t.h>
#include <haproxy/api.h>
#include <haproxy/freq_ctr.h>
#include <haproxy/xxhash.h>
extern unsigned int profiling;
extern unsigned long task_profiling_mask;
@ -34,32 +32,7 @@ extern struct sched_activity sched_activity[256];
void report_stolen_time(uint64_t stolen);
void activity_count_runtime();
/* Computes the index of function pointer <func> for use with sched_activity[]
* or any other similar array passed in <array>, and returns a pointer to the
* entry after having atomically assigned it to this function pointer. Note
* that in case of collision, the first entry is returned instead ("other").
*/
static inline struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func)
{
uint64_t hash = XXH64_avalanche(XXH64_mergeRound((size_t)func, (size_t)func));
struct sched_activity *ret;
const void *old = NULL;
hash ^= (hash >> 32);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
hash &= 0xff;
ret = &array[hash];
if (likely(ret->func == func))
return ret;
if (HA_ATOMIC_CAS(&ret->func, &old, func))
return ret;
return array;
}
struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func);
#endif /* _HAPROXY_ACTIVITY_H */

View File

@ -19,6 +19,7 @@
#include <haproxy/stream_interface.h>
#include <haproxy/time.h>
#include <haproxy/tools.h>
#include <haproxy/xxhash.h>
#if defined(DEBUG_MEM_STATS)
/* these ones are macros in bug.h when DEBUG_MEM_STATS is set, and will
@ -558,6 +559,32 @@ static int cmp_memprof_addr(const void *a, const void *b)
}
#endif // USE_MEMORY_PROFILING
/* Computes the index of function pointer <func> for use with sched_activity[]
* or any other similar array passed in <array>, and returns a pointer to the
* entry after having atomically assigned it to this function pointer. Note
* that in case of collision, the first entry is returned instead ("other").
*/
struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func)
{
uint64_t hash = XXH64_avalanche(XXH64_mergeRound((size_t)func, (size_t)func));
struct sched_activity *ret;
const void *old = NULL;
hash ^= (hash >> 32);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
hash &= 0xff;
ret = &array[hash];
if (likely(ret->func == func))
return ret;
if (HA_ATOMIC_CAS(&ret->func, &old, func))
return ret;
return array;
}
/* This function dumps all profiling settings. It returns 0 if the output
* buffer is full and it needs to be called again, otherwise non-zero.
* It dumps some parts depending on the following states: