mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-01-01 09:42:02 +00:00
MINOR: thread/debug: replace nsec_now() with now_mono_time()
The two functions do exactly the same except that the second one is already provided by time.h and still defined if not available.
This commit is contained in:
parent
407ef893e7
commit
dced3ebb4a
54
src/thread.c
54
src/thread.c
@ -28,6 +28,7 @@
|
||||
#include <haproxy/global.h>
|
||||
#include <haproxy/log.h>
|
||||
#include <haproxy/thread.h>
|
||||
#include <haproxy/time.h>
|
||||
#include <haproxy/tools.h>
|
||||
|
||||
struct thread_info ha_thread_info[MAX_THREADS] = { };
|
||||
@ -358,15 +359,6 @@ void show_lock_stats()
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t nsec_now(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
return ((uint64_t) ts.tv_sec * 1000000000ULL +
|
||||
(uint64_t) ts.tv_nsec);
|
||||
}
|
||||
|
||||
void __ha_rwlock_init(struct ha_rwlock *l)
|
||||
{
|
||||
memset(l, 0, sizeof(struct ha_rwlock));
|
||||
@ -390,9 +382,9 @@ void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_WRLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
@ -416,9 +408,9 @@ int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
/* We set waiting writer because trywrlock could wait for readers to quit */
|
||||
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
r = __RWLOCK_TRYWRLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
|
||||
if (unlikely(r)) {
|
||||
HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
|
||||
return r;
|
||||
@ -462,9 +454,9 @@ void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_RDLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
||||
@ -517,9 +509,9 @@ void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_WRTORD(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
@ -545,9 +537,9 @@ void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_WRTOSK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
|
||||
|
||||
@ -570,9 +562,9 @@ void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_SKLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
|
||||
|
||||
@ -597,9 +589,9 @@ void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_writers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_SKTOWR(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
@ -625,9 +617,9 @@ void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_readers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__RWLOCK_SKTORD(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
@ -667,9 +659,9 @@ int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
r = __RWLOCK_TRYSKLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
|
||||
|
||||
if (likely(!r)) {
|
||||
/* got the lock ! */
|
||||
@ -698,9 +690,9 @@ int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
r = __RWLOCK_TRYRDTOSK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time));
|
||||
|
||||
if (likely(!r)) {
|
||||
/* got the lock ! */
|
||||
@ -740,9 +732,9 @@ void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
|
||||
|
||||
HA_ATOMIC_OR(&l->info.waiters, tid_bit);
|
||||
|
||||
start_time = nsec_now();
|
||||
start_time = now_mono_time();
|
||||
__SPIN_LOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time));
|
||||
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user