summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-03-16 01:32:56 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-03-16 01:32:56 +0000
commit53f33c99425a2aff5fa74b3724e5fb148b714518 (patch)
treec70c8493ffb9eaf58e9abd598acfdce0bf2d21d0
parenta6acb428e2f22d4740b7bbfca4bad1256fa2d4ac (diff)
Notes
-rw-r--r--sys/kern/kern_mutex.c107
-rw-r--r--sys/kern/kern_rwlock.c64
-rw-r--r--sys/kern/kern_sx.c68
-rw-r--r--sys/sys/mutex.h8
-rw-r--r--sys/sys/rwlock.h2
-rw-r--r--sys/sys/sx.h5
6 files changed, 164 insertions, 90 deletions
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index b4b41b866ee5..87dfd3f003ea 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -94,8 +94,6 @@ PMC_SOFT_DEFINE( , , lock, failed);
#define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
-#define mtx_owner(m) ((struct thread *)((m)->mtx_lock & ~MTX_FLAGMASK))
-
static void assert_mtx(const struct lock_object *lock, int what);
#ifdef DDB
static void db_show_mtx(const struct lock_object *lock);
@@ -452,8 +450,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
lock_delay_arg_init(&lda, NULL);
#endif
m = mtxlock2mtx(c);
+ v = MTX_READ_VALUE(m);
- if (mtx_owned(m)) {
+ if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
(opts & MTX_RECURSE) != 0,
("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -481,8 +480,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
#endif
for (;;) {
- if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
- break;
+ if (v == MTX_UNOWNED) {
+ if (_mtx_obtain_lock(m, tid))
+ break;
+ v = MTX_READ_VALUE(m);
+ continue;
+ }
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
@@ -491,31 +494,30 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
* If the owner is running on another CPU, spin until the
* owner stops running or the state of the lock changes.
*/
- v = m->mtx_lock;
- if (v != MTX_UNOWNED) {
- owner = (struct thread *)(v & ~MTX_FLAGMASK);
- if (TD_IS_RUNNING(owner)) {
- if (LOCK_LOG_TEST(&m->lock_object, 0))
- CTR3(KTR_LOCK,
- "%s: spinning on %p held by %p",
- __func__, m, owner);
- KTR_STATE1(KTR_SCHED, "thread",
- sched_tdname((struct thread *)tid),
- "spinning", "lockname:\"%s\"",
- m->lock_object.lo_name);
- while (mtx_owner(m) == owner &&
- TD_IS_RUNNING(owner))
- lock_delay(&lda);
- KTR_STATE0(KTR_SCHED, "thread",
- sched_tdname((struct thread *)tid),
- "running");
- continue;
- }
+ owner = lv_mtx_owner(v);
+ if (TD_IS_RUNNING(owner)) {
+ if (LOCK_LOG_TEST(&m->lock_object, 0))
+ CTR3(KTR_LOCK,
+ "%s: spinning on %p held by %p",
+ __func__, m, owner);
+ KTR_STATE1(KTR_SCHED, "thread",
+ sched_tdname((struct thread *)tid),
+ "spinning", "lockname:\"%s\"",
+ m->lock_object.lo_name);
+ do {
+ lock_delay(&lda);
+ v = MTX_READ_VALUE(m);
+ owner = lv_mtx_owner(v);
+ } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
+ KTR_STATE0(KTR_SCHED, "thread",
+ sched_tdname((struct thread *)tid),
+ "running");
+ continue;
}
#endif
ts = turnstile_trywait(&m->lock_object);
- v = m->mtx_lock;
+ v = MTX_READ_VALUE(m);
/*
* Check if the lock has been released while spinning for
@@ -534,7 +536,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
* chain lock. If so, drop the turnstile lock and try
* again.
*/
- owner = (struct thread *)(v & ~MTX_FLAGMASK);
+ owner = lv_mtx_owner(v);
if (TD_IS_RUNNING(owner)) {
turnstile_cancel(ts);
continue;
@@ -549,6 +551,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
if ((v & MTX_CONTESTED) == 0 &&
!atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
turnstile_cancel(ts);
+ v = MTX_READ_VALUE(m);
continue;
}
@@ -579,6 +582,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts,
sleep_time += lockstat_nsecs(&m->lock_object);
sleep_cnt++;
#endif
+ v = MTX_READ_VALUE(m);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&m->lock_object);
@@ -636,6 +640,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
{
struct mtx *m;
struct lock_delay_arg lda;
+ uintptr_t v;
#ifdef LOCK_PROFILING
int contested = 0;
uint64_t waittime = 0;
@@ -662,24 +667,30 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts,
#ifdef KDTRACE_HOOKS
spin_time -= lockstat_nsecs(&m->lock_object);
#endif
+ v = MTX_READ_VALUE(m);
for (;;) {
- if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
- break;
+ if (v == MTX_UNOWNED) {
+ if (_mtx_obtain_lock(m, tid))
+ break;
+ v = MTX_READ_VALUE(m);
+ continue;
+ }
/* Give interrupts a chance while we spin. */
spinlock_exit();
- while (m->mtx_lock != MTX_UNOWNED) {
+ do {
if (lda.spin_cnt < 10000000) {
lock_delay(&lda);
- continue;
+ } else {
+ lda.spin_cnt++;
+ if (lda.spin_cnt < 60000000 || kdb_active ||
+ panicstr != NULL)
+ DELAY(1);
+ else
+ _mtx_lock_spin_failed(m);
+ cpu_spinwait();
}
- lda.spin_cnt++;
- if (lda.spin_cnt < 60000000 || kdb_active ||
- panicstr != NULL)
- DELAY(1);
- else
- _mtx_lock_spin_failed(m);
- cpu_spinwait();
- }
+ v = MTX_READ_VALUE(m);
+ } while (v != MTX_UNOWNED);
spinlock_enter();
}
#ifdef KDTRACE_HOOKS
@@ -704,7 +715,7 @@ void
thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
{
struct mtx *m;
- uintptr_t tid;
+ uintptr_t tid, v;
struct lock_delay_arg lda;
#ifdef LOCK_PROFILING
int contested = 0;
@@ -746,10 +757,15 @@ retry:
m->lock_object.lo_name, file, line));
WITNESS_CHECKORDER(&m->lock_object,
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
+ v = MTX_READ_VALUE(m);
for (;;) {
- if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
- break;
- if (m->mtx_lock == tid) {
+ if (v == MTX_UNOWNED) {
+ if (_mtx_obtain_lock(m, tid))
+ break;
+ v = MTX_READ_VALUE(m);
+ continue;
+ }
+ if (v == tid) {
m->mtx_recurse++;
break;
}
@@ -760,7 +776,7 @@ retry:
&contested, &waittime);
/* Give interrupts a chance while we spin. */
spinlock_exit();
- while (m->mtx_lock != MTX_UNOWNED) {
+ do {
if (lda.spin_cnt < 10000000) {
lock_delay(&lda);
} else {
@@ -774,7 +790,8 @@ retry:
}
if (m != td->td_lock)
goto retry;
- }
+ v = MTX_READ_VALUE(m);
+ } while (v != MTX_UNOWNED);
spinlock_enter();
}
if (m == td->td_lock)
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index ca522aa146e1..983b4e5a0339 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -114,9 +114,12 @@ LOCK_DELAY_SYSINIT_DEFAULT(rw_delay);
* Return a pointer to the owning thread if the lock is write-locked or
* NULL if the lock is unlocked or read-locked.
*/
-#define rw_wowner(rw) \
- ((rw)->rw_lock & RW_LOCK_READ ? NULL : \
- (struct thread *)RW_OWNER((rw)->rw_lock))
+
+#define lv_rw_wowner(v) \
+ ((v) & RW_LOCK_READ ? NULL : \
+ (struct thread *)RW_OWNER((v)))
+
+#define rw_wowner(rw) lv_rw_wowner(RW_READ_VALUE(rw))
/*
* Returns if a write owner is recursed. Write ownership is not assured
@@ -397,7 +400,10 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
#ifdef KDTRACE_HOOKS
all_time -= lockstat_nsecs(&rw->lock_object);
- state = rw->rw_lock;
+#endif
+ v = RW_READ_VALUE(rw);
+#ifdef KDTRACE_HOOKS
+ state = v;
#endif
for (;;) {
/*
@@ -410,7 +416,6 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
* completely unlocked rwlock since such a lock is encoded
* as a read lock with no waiters.
*/
- v = rw->rw_lock;
if (RW_CAN_READ(v)) {
/*
* The RW_LOCK_READ_WAITERS flag should only be set
@@ -426,6 +431,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
(void *)(v + RW_ONE_READER));
break;
}
+ v = RW_READ_VALUE(rw);
continue;
}
#ifdef KDTRACE_HOOKS
@@ -453,9 +459,11 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
KTR_STATE1(KTR_SCHED, "thread",
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", rw->lock_object.lo_name);
- while ((struct thread*)RW_OWNER(rw->rw_lock) ==
- owner && TD_IS_RUNNING(owner))
+ do {
lock_delay(&lda);
+ v = RW_READ_VALUE(rw);
+ owner = lv_rw_wowner(v);
+ } while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -466,11 +474,12 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
for (i = 0; i < rowner_loops; i++) {
- v = rw->rw_lock;
+ v = RW_READ_VALUE(rw);
if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
break;
cpu_spinwait();
}
+ v = RW_READ_VALUE(rw);
#ifdef KDTRACE_HOOKS
lda.spin_cnt += rowner_loops - i;
#endif
@@ -493,7 +502,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
* The lock might have been released while we spun, so
* recheck its state and restart the loop if needed.
*/
- v = rw->rw_lock;
+ v = RW_READ_VALUE(rw);
if (RW_CAN_READ(v)) {
turnstile_cancel(ts);
continue;
@@ -531,6 +540,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_READ_WAITERS)) {
turnstile_cancel(ts);
+ v = RW_READ_VALUE(rw);
continue;
}
if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -556,6 +566,7 @@ __rw_rlock(volatile uintptr_t *c, const char *file, int line)
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
__func__, rw);
+ v = RW_READ_VALUE(rw);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&rw->lock_object);
@@ -639,13 +650,12 @@ _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
/* TODO: drop "owner of record" here. */
-
+ x = RW_READ_VALUE(rw);
for (;;) {
/*
* See if there is more than one read lock held. If so,
* just drop one and return.
*/
- x = rw->rw_lock;
if (RW_READERS(x) > 1) {
if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
x - RW_ONE_READER)) {
@@ -656,6 +666,7 @@ _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
(void *)(x - RW_ONE_READER));
break;
}
+ x = RW_READ_VALUE(rw);
continue;
}
/*
@@ -672,6 +683,7 @@ _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
__func__, rw);
break;
}
+ x = RW_READ_VALUE(rw);
continue;
}
/*
@@ -707,6 +719,7 @@ _rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
x)) {
turnstile_chain_unlock(&rw->lock_object);
+ x = RW_READ_VALUE(rw);
continue;
}
if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -772,8 +785,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
lock_delay_arg_init(&lda, NULL);
#endif
rw = rwlock2rw(c);
+ v = RW_READ_VALUE(rw);
- if (rw_wlocked(rw)) {
+ if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
("%s: recursing but non-recursive rw %s @ %s:%d\n",
__func__, rw->lock_object.lo_name, file, line));
@@ -789,11 +803,15 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
#ifdef KDTRACE_HOOKS
all_time -= lockstat_nsecs(&rw->lock_object);
- state = rw->rw_lock;
+ state = v;
#endif
for (;;) {
- if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
- break;
+ if (v == RW_UNLOCKED) {
+ if (_rw_write_lock(rw, tid))
+ break;
+ v = RW_READ_VALUE(rw);
+ continue;
+ }
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
@@ -808,8 +826,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
- v = rw->rw_lock;
- owner = (struct thread *)RW_OWNER(v);
+ owner = lv_rw_wowner(v);
if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
@@ -817,9 +834,11 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
"spinning", "lockname:\"%s\"",
rw->lock_object.lo_name);
- while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
- TD_IS_RUNNING(owner))
+ do {
lock_delay(&lda);
+ v = RW_READ_VALUE(rw);
+ owner = lv_rw_wowner(v);
+ } while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
continue;
@@ -829,6 +848,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
if (!(v & RW_LOCK_WRITE_SPINNER)) {
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_WRITE_SPINNER)) {
+ v = RW_READ_VALUE(rw);
continue;
}
}
@@ -843,6 +863,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
}
KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
"running");
+ v = RW_READ_VALUE(rw);
#ifdef KDTRACE_HOOKS
lda.spin_cnt += rowner_loops - i;
#endif
@@ -851,7 +872,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
}
#endif
ts = turnstile_trywait(&rw->lock_object);
- v = rw->rw_lock;
+ v = RW_READ_VALUE(rw);
#ifdef ADAPTIVE_RWLOCKS
/*
@@ -887,6 +908,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
break;
}
turnstile_cancel(ts);
+ v = RW_READ_VALUE(rw);
continue;
}
/*
@@ -898,6 +920,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
if (!atomic_cmpset_ptr(&rw->rw_lock, v,
v | RW_LOCK_WRITE_WAITERS)) {
turnstile_cancel(ts);
+ v = RW_READ_VALUE(rw);
continue;
}
if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -925,6 +948,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
#ifdef ADAPTIVE_RWLOCKS
spintries = 0;
#endif
+ v = RW_READ_VALUE(rw);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&rw->lock_object);
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index c1830059bd53..d370d287fce5 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -543,8 +543,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
lock_delay_arg_init(&lda, NULL);
#endif
+ x = SX_READ_VALUE(sx);
+
/* If we already hold an exclusive lock, then recurse. */
- if (sx_xlocked(sx)) {
+ if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
sx->lock_object.lo_name, file, line));
@@ -561,12 +563,15 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
#ifdef KDTRACE_HOOKS
all_time -= lockstat_nsecs(&sx->lock_object);
- state = sx->sx_lock;
+ state = x;
#endif
for (;;) {
- if (sx->sx_lock == SX_LOCK_UNLOCKED &&
- atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
- break;
+ if (x == SX_LOCK_UNLOCKED) {
+ if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, tid))
+ break;
+ x = SX_READ_VALUE(sx);
+ continue;
+ }
#ifdef KDTRACE_HOOKS
lda.spin_cnt++;
#endif
@@ -581,11 +586,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
* running on another CPU, spin until the owner stops
* running or the state of the lock changes.
*/
- x = sx->sx_lock;
if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
if ((x & SX_LOCK_SHARED) == 0) {
- x = SX_OWNER(x);
- owner = (struct thread *)x;
+ owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
@@ -596,9 +599,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
"lockname:\"%s\"",
sx->lock_object.lo_name);
GIANT_SAVE();
- while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner))
+ do {
lock_delay(&lda);
+ x = SX_READ_VALUE(sx);
+ owner = lv_sx_owner(x);
+ } while (owner != NULL &&
+ TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -625,6 +631,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
}
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
+ x = SX_READ_VALUE(sx);
if (i != asx_loops)
continue;
}
@@ -632,7 +639,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
#endif
sleepq_lock(&sx->lock_object);
- x = sx->sx_lock;
+ x = SX_READ_VALUE(sx);
/*
* If the lock was released while spinning on the
@@ -681,6 +688,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
break;
}
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
@@ -692,6 +700,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
if (!atomic_cmpset_ptr(&sx->sx_lock, x,
x | SX_LOCK_EXCLUSIVE_WAITERS)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -733,6 +742,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
+ x = SX_READ_VALUE(sx);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&sx->lock_object);
@@ -852,20 +862,18 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
lock_delay_arg_init(&lda, NULL);
#endif
#ifdef KDTRACE_HOOKS
- state = sx->sx_lock;
all_time -= lockstat_nsecs(&sx->lock_object);
#endif
+ x = SX_READ_VALUE(sx);
+#ifdef KDTRACE_HOOKS
+ state = x;
+#endif
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
*/
for (;;) {
-#ifdef KDTRACE_HOOKS
- lda.spin_cnt++;
-#endif
- x = sx->sx_lock;
-
/*
* If no other thread has an exclusive lock then try to bump up
* the count of sharers. Since we have to preserve the state
@@ -883,8 +891,13 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
(void *)(x + SX_ONE_SHARER));
break;
}
+ x = SX_READ_VALUE(sx);
continue;
}
+#ifdef KDTRACE_HOOKS
+ lda.spin_cnt++;
+#endif
+
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -898,8 +911,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* changes.
*/
if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
- x = SX_OWNER(x);
- owner = (struct thread *)x;
+ owner = lv_sx_owner(x);
if (TD_IS_RUNNING(owner)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR3(KTR_LOCK,
@@ -909,9 +921,11 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
sched_tdname(curthread), "spinning",
"lockname:\"%s\"", sx->lock_object.lo_name);
GIANT_SAVE();
- while (SX_OWNER(sx->sx_lock) == x &&
- TD_IS_RUNNING(owner))
+ do {
lock_delay(&lda);
+ x = SX_READ_VALUE(sx);
+ owner = lv_sx_owner(x);
+ } while (owner != NULL && TD_IS_RUNNING(owner));
KTR_STATE0(KTR_SCHED, "thread",
sched_tdname(curthread), "running");
continue;
@@ -924,7 +938,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
* start the process of blocking.
*/
sleepq_lock(&sx->lock_object);
- x = sx->sx_lock;
+ x = SX_READ_VALUE(sx);
/*
* The lock could have been released while we spun.
@@ -946,6 +960,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
}
@@ -960,6 +975,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
if (!atomic_cmpset_ptr(&sx->sx_lock, x,
x | SX_LOCK_SHARED_WAITERS)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -1000,6 +1016,7 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
+ x = SX_READ_VALUE(sx);
}
#ifdef KDTRACE_HOOKS
all_time += lockstat_nsecs(&sx->lock_object);
@@ -1034,9 +1051,8 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
if (SCHEDULER_STOPPED())
return;
+ x = SX_READ_VALUE(sx);
for (;;) {
- x = sx->sx_lock;
-
/*
* We should never have sharers while at least one thread
* holds a shared lock.
@@ -1058,6 +1074,8 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
(void *)(x - SX_ONE_SHARER));
break;
}
+
+ x = SX_READ_VALUE(sx);
continue;
}
@@ -1074,6 +1092,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
__func__, sx);
break;
}
+ x = SX_READ_VALUE(sx);
continue;
}
@@ -1095,6 +1114,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
SX_LOCK_UNLOCKED)) {
sleepq_release(&sx->lock_object);
+ x = SX_READ_VALUE(sx);
continue;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
index 374aaabf59d4..11ed9d766d95 100644
--- a/sys/sys/mutex.h
+++ b/sys/sys/mutex.h
@@ -420,9 +420,15 @@ extern struct mtx_pool *mtxpool_sleep;
_sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \
tick_sbt * (timo), 0, C_HARDCLOCK)
+#define MTX_READ_VALUE(m) ((m)->mtx_lock)
+
#define mtx_initialized(m) lock_initialized(&(m)->lock_object)
-#define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread)
+#define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK))
+
+#define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m))
+
+#define mtx_owned(m) (mtx_owner(m) == curthread)
#define mtx_recursed(m) ((m)->mtx_recurse != 0)
diff --git a/sys/sys/rwlock.h b/sys/sys/rwlock.h
index 6b4f505e0d01..e5b1166cedc6 100644
--- a/sys/sys/rwlock.h
+++ b/sys/sys/rwlock.h
@@ -76,6 +76,8 @@
#define rw_recurse lock_object.lo_data
+#define RW_READ_VALUE(x) ((x)->rw_lock)
+
/* Very simple operations on rw_lock. */
/* Try to obtain a write lock once. */
diff --git a/sys/sys/sx.h b/sys/sys/sx.h
index 57a31d9aace8..e8cffaa2593d 100644
--- a/sys/sys/sx.h
+++ b/sys/sys/sx.h
@@ -88,6 +88,11 @@
#define sx_recurse lock_object.lo_data
+#define SX_READ_VALUE(sx) ((sx)->sx_lock)
+
+#define lv_sx_owner(v) \
+ ((v & SX_LOCK_SHARED) ? NULL : (struct thread *)SX_OWNER(v))
+
/*
* Function prototipes. Routines that start with an underscore are not part
* of the public interface and are wrappered with a macro.