diff options
author | Mateusz Guzik <mjg@FreeBSD.org> | 2020-11-24 03:49:37 +0000 |
---|---|---|
committer | Mateusz Guzik <mjg@FreeBSD.org> | 2020-11-24 03:49:37 +0000 |
commit | f90d57b808449f1bf8c6856a818174253d7e5e46 (patch) | |
tree | 9e3fd43bc70acf05e454ff91ca7f5a5d9c8ac358 | |
parent | 094c148b7a98d1b59ab2cf266cb841f71c590513 (diff) |
Notes
-rw-r--r-- | sys/kern/kern_lock.c | 2 | ||||
-rw-r--r-- | sys/kern/kern_mutex.c | 16 | ||||
-rw-r--r-- | sys/kern/kern_rwlock.c | 11 | ||||
-rw-r--r-- | sys/kern/kern_sx.c | 12 |
4 files changed, 21 insertions, 20 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 1490e0595714..98c6cafde702 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -603,10 +603,10 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk, if (LK_CAN_WITNESS(flags)) WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, file, line, flags & LK_INTERLOCK ? ilk : NULL); + x = lockmgr_read_value(lk); lock_delay_arg_init(&lda, &lockmgr_delay); if (!lk_adaptive) flags &= ~LK_ADAPTIVE; - x = lockmgr_read_value(lk); /* * The lock may already be locked exclusive by curthread, * avoid deadlock. diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 0530a99ce40d..0c384281f711 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -535,12 +535,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) if (SCHEDULER_STOPPED_TD(td)) return; -#if defined(ADAPTIVE_MUTEXES) - lock_delay_arg_init(&lda, &mtx_delay); -#elif defined(KDTRACE_HOOKS) - lock_delay_arg_init_noadapt(&lda); -#endif - if (__predict_false(v == MTX_UNOWNED)) v = MTX_READ_VALUE(m); @@ -562,6 +556,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v) opts &= ~MTX_RECURSE; #endif +#if defined(ADAPTIVE_MUTEXES) + lock_delay_arg_init(&lda, &mtx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init_noadapt(&lda); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif @@ -746,13 +746,13 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v) if (SCHEDULER_STOPPED()) return; - lock_delay_arg_init(&lda, &mtx_spin_delay); - if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid), "spinning", "lockname:\"%s\"", m->lock_object.lo_name); + lock_delay_arg_init(&lda, &mtx_spin_delay); + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 2489d029cbb3..cf1af0ee7af9 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -948,11 +948,6 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) if (SCHEDULER_STOPPED()) return; -#if defined(ADAPTIVE_RWLOCKS) - lock_delay_arg_init(&lda, &rw_delay); -#elif defined(KDTRACE_HOOKS) - lock_delay_arg_init_noadapt(&lda); -#endif if (__predict_false(v == RW_UNLOCKED)) v = RW_READ_VALUE(rw); @@ -971,6 +966,12 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); +#if defined(ADAPTIVE_RWLOCKS) + lock_delay_arg_init(&lda, &rw_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init_noadapt(&lda); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 805cdf316807..0d914375ec87 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -620,12 +620,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) if (SCHEDULER_STOPPED()) return (0); -#if defined(ADAPTIVE_SX) - lock_delay_arg_init(&lda, &sx_delay); -#elif defined(KDTRACE_HOOKS) - lock_delay_arg_init_noadapt(&lda); -#endif - if (__predict_false(x == SX_LOCK_UNLOCKED)) x = SX_READ_VALUE(sx); @@ -645,6 +639,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); +#if defined(ADAPTIVE_SX) + lock_delay_arg_init(&lda, &sx_delay); +#elif defined(KDTRACE_HOOKS) + lock_delay_arg_init_noadapt(&lda); +#endif + #ifdef HWPMC_HOOKS PMC_SOFT_CALL( , , lock, failed); #endif |