summaryrefslogtreecommitdiff
path: root/sys/kern/kern_mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_mutex.c')
-rw-r--r--sys/kern/kern_mutex.c43
1 files changed, 23 insertions, 20 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 35bbd5f78c3f..83977bb6425e 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -265,6 +265,7 @@ void
__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
{
struct mtx *m;
+ uintptr_t tid, v;
if (SCHEDULER_STOPPED())
return;
@@ -282,7 +283,13 @@ __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
- __mtx_lock(m, curthread, opts, file, line);
+ tid = (uintptr_t)curthread;
+ v = MTX_UNOWNED;
+ if (!_mtx_obtain_lock_fetch(m, &v, tid))
+ _mtx_lock_sleep(m, v, tid, opts, file, line);
+ else
+ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire,
+ m, 0, 0, file, line);
LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
line);
WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
@@ -310,7 +317,7 @@ __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line)
line);
mtx_assert(m, MA_OWNED);
- __mtx_unlock(m, curthread, opts, file, line);
+ __mtx_unlock_sleep(c, opts, file, line);
TD_LOCKS_DEC(curthread);
}
@@ -455,12 +462,11 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line)
* sleep waiting for it), or if we need to recurse on it.
*/
void
-__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
+__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts,
const char *file, int line)
{
struct mtx *m;
struct turnstile *ts;
- uintptr_t v;
#ifdef ADAPTIVE_MUTEXES
volatile struct thread *owner;
#endif
@@ -489,7 +495,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
m = mtxlock2mtx(c);
- v = MTX_READ_VALUE(m);
if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
@@ -520,9 +525,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
for (;;) {
if (v == MTX_UNOWNED) {
- if (_mtx_obtain_lock(m, tid))
+ if (_mtx_obtain_lock_fetch(m, &v, tid))
break;
- v = MTX_READ_VALUE(m);
continue;
}
#ifdef KDTRACE_HOOKS
@@ -674,12 +678,11 @@ _mtx_lock_spin_failed(struct mtx *m)
* is handled inline.
*/
void
-_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
- const char *file, int line)
+_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
+ int opts, const char *file, int line)
{
struct mtx *m;
struct lock_delay_arg lda;
- uintptr_t v;
#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
@@ -706,12 +709,10 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
#ifdef KDTRACE_HOOKS
spin_time -= lockstat_nsecs(&m->lock_object);
#endif
- v = MTX_READ_VALUE(m);
for (;;) {
if (v == MTX_UNOWNED) {
- if (_mtx_obtain_lock(m, tid))
+ if (_mtx_obtain_lock_fetch(m, &v, tid))
break;
- v = MTX_READ_VALUE(m);
continue;
}
/* Give interrupts a chance while we spin. */
@@ -783,6 +784,7 @@ thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
#endif
for (;;) {
retry:
+ v = MTX_UNOWNED;
spinlock_enter();
m = td->td_lock;
KASSERT(m->mtx_lock != MTX_DESTROYED,
@@ -796,14 +798,11 @@ retry:
m->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->lock_object,
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
- v = MTX_READ_VALUE(m);
for (;;) {
- if (v == MTX_UNOWNED) {
- if (_mtx_obtain_lock(m, tid))
- break;
- v = MTX_READ_VALUE(m);
+ if (_mtx_obtain_lock_fetch(m, &v, tid))
+ break;
+ if (v == MTX_UNOWNED)
continue;
- }
if (v == tid) {
m->mtx_recurse++;
break;
@@ -902,7 +901,11 @@ __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line)
m = mtxlock2mtx(c);
- if (mtx_recursed(m)) {
+ if (!mtx_recursed(m)) {
+ LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
+ if (_mtx_release_lock(m, (uintptr_t)curthread))
+ return;
+ } else {
if (--(m->mtx_recurse) == 0)
atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
if (LOCK_LOG_TEST(&m->lock_object, opts))