From dced3ebb4a686026c21c54c21ee5dbe27d7ccad4 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Tue, 5 Oct 2021 18:48:23 +0200 Subject: [PATCH] MINOR: thread/debug: replace nsec_now() with now_mono_time() The two functions do exactly the same except that the second one is already provided by time.h and still defined if not available. --- src/thread.c | 54 ++++++++++++++++++++++------------------------------ 1 file changed, 23 insertions(+), 31 deletions(-) diff --git a/src/thread.c b/src/thread.c index b2bfd4d45..a6e76c39f 100644 --- a/src/thread.c +++ b/src/thread.c @@ -28,6 +28,7 @@ #include #include #include +#include #include struct thread_info ha_thread_info[MAX_THREADS] = { }; @@ -358,15 +359,6 @@ void show_lock_stats() } } -static uint64_t nsec_now(void) -{ - struct timespec ts; - - clock_gettime(CLOCK_MONOTONIC, &ts); - return ((uint64_t) ts.tv_sec * 1000000000ULL + - (uint64_t) ts.tv_nsec); -} - void __ha_rwlock_init(struct ha_rwlock *l) { memset(l, 0, sizeof(struct ha_rwlock)); @@ -390,9 +382,9 @@ void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_writers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_WRLOCK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked); @@ -416,9 +408,9 @@ int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l, /* We set waiting writer because trywrlock could wait for readers to quit */ HA_ATOMIC_OR(&l->info.wait_writers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); r = __RWLOCK_TRYWRLOCK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time)); if (unlikely(r)) { HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit); return r; @@ -462,9 +454,9 @@ void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l) HA_ATOMIC_OR(&l->info.wait_readers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_RDLOCK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked); HA_ATOMIC_OR(&l->info.cur_readers, tid_bit); @@ -517,9 +509,9 @@ void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_readers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_WRTORD(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked); @@ -545,9 +537,9 @@ void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_WRTOSK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked); @@ -570,9 +562,9 @@ void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_SKLOCK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked); @@ -597,9 +589,9 @@ void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_writers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_SKTOWR(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked); @@ -625,9 +617,9 @@ void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_readers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __RWLOCK_SKTORD(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked); @@ -667,9 +659,9 @@ int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); r = __RWLOCK_TRYSKLOCK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time)); if (likely(!r)) { /* got the lock ! */ @@ -698,9 +690,9 @@ int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l, HA_ATOMIC_OR(&l->info.wait_seekers, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); r = __RWLOCK_TRYRDTOSK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (now_mono_time() - start_time)); if (likely(!r)) { /* got the lock ! */ @@ -740,9 +732,9 @@ void __spin_lock(enum lock_label lbl, struct ha_spinlock *l, HA_ATOMIC_OR(&l->info.waiters, tid_bit); - start_time = nsec_now(); + start_time = now_mono_time(); __SPIN_LOCK(&l->lock); - HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (now_mono_time() - start_time)); HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);