summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2017-11-25 20:10:33 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2017-11-25 20:10:33 +0000
commit93118b62f9b7a180f15b054b5bf0889f2ef854b0 (patch)
treee52cad03455cbb9edadedf60786cacb49315e5c5 /sys
parent2e106e0427f1a2b32c90a2bfdf7ebbb880f65d70 (diff)
Notes
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_rwlock.c39
-rw-r--r--sys/kern/kern_sx.c34
2 files changed, 29 insertions, 44 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index c0bc0c7510b27..3c2cab87b7060 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -526,6 +526,7 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
* recheck its state and restart the loop if needed.
*/
v = RW_READ_VALUE(rw);
+retry_ts:
if (__rw_can_read(td, v, false)) {
turnstile_cancel(ts);
continue;
@@ -561,12 +562,9 @@ __rw_rlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
* lock and restart the loop.
*/
if (!(v & RW_LOCK_READ_WAITERS)) {
- if (!atomic_cmpset_ptr(&rw->rw_lock, v,
- v | RW_LOCK_READ_WAITERS)) {
- turnstile_cancel(ts);
- v = RW_READ_VALUE(rw);
- continue;
- }
+ if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
+ v | RW_LOCK_READ_WAITERS))
+ goto retry_ts;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p set read waiters flag",
__func__, rw);
@@ -757,7 +755,9 @@ __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
* last reader, so grab the turnstile lock.
*/
turnstile_chain_lock(&rw->lock_object);
- v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
+ v = RW_READ_VALUE(rw);
+retry_ts:
+ v &= (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
MPASS(v & RW_LOCK_WAITERS);
/*
@@ -782,12 +782,9 @@ __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
x |= (v & RW_LOCK_READ_WAITERS);
} else
queue = TS_SHARED_QUEUE;
- if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
- x)) {
- turnstile_chain_unlock(&rw->lock_object);
- v = RW_READ_VALUE(rw);
- continue;
- }
+ v |= RW_READERS_LOCK(1);
+ if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, x))
+ goto retry_ts;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
__func__, rw);
@@ -983,6 +980,7 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
#endif
ts = turnstile_trywait(&rw->lock_object);
v = RW_READ_VALUE(rw);
+retry_ts:
owner = lv_rw_wowner(v);
#ifdef ADAPTIVE_RWLOCKS
@@ -1010,16 +1008,14 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
if ((v & ~x) == RW_UNLOCKED) {
x &= ~RW_LOCK_WRITE_SPINNER;
- if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
+ if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | x)) {
if (x)
turnstile_claim(ts);
else
turnstile_cancel(ts);
break;
}
- turnstile_cancel(ts);
- v = RW_READ_VALUE(rw);
- continue;
+ goto retry_ts;
}
/*
* If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
@@ -1027,12 +1023,9 @@ __rw_wlock_hard(volatile uintptr_t *c, uintptr_t v LOCK_FILE_LINE_ARG_DEF)
* again.
*/
if (!(v & RW_LOCK_WRITE_WAITERS)) {
- if (!atomic_cmpset_ptr(&rw->rw_lock, v,
- v | RW_LOCK_WRITE_WAITERS)) {
- turnstile_cancel(ts);
- v = RW_READ_VALUE(rw);
- continue;
- }
+ if (!atomic_fcmpset_ptr(&rw->rw_lock, &v,
+ v | RW_LOCK_WRITE_WAITERS))
+ goto retry_ts;
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p set write waiters flag",
__func__, rw);
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index d5acd0ad89f9c..44d0bd796d831 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -665,6 +665,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
sleepq_lock(&sx->lock_object);
x = SX_READ_VALUE(sx);
+retry_sleepq:
/*
* If the lock was released while spinning on the
@@ -704,17 +705,13 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
* fail, restart the loop.
*/
if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
- if (atomic_cmpset_acq_ptr(&sx->sx_lock,
- SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
- tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
- sleepq_release(&sx->lock_object);
- CTR2(KTR_LOCK, "%s: %p claimed by new writer",
- __func__, sx);
- break;
- }
+ if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x,
+ tid | SX_LOCK_EXCLUSIVE_WAITERS))
+ goto retry_sleepq;
sleepq_release(&sx->lock_object);
- x = SX_READ_VALUE(sx);
- continue;
+ CTR2(KTR_LOCK, "%s: %p claimed by new writer",
+ __func__, sx);
+ break;
}
/*
@@ -722,11 +719,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
* than loop back and retry.
*/
if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
- if (!atomic_cmpset_ptr(&sx->sx_lock, x,
+ if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
x | SX_LOCK_EXCLUSIVE_WAITERS)) {
- sleepq_release(&sx->lock_object);
- x = SX_READ_VALUE(sx);
- continue;
+ goto retry_sleepq;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
@@ -986,7 +981,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
*/
sleepq_lock(&sx->lock_object);
x = SX_READ_VALUE(sx);
-
+retry_sleepq:
/*
* The lock could have been released while we spun.
* In this case loop back and retry.
@@ -1019,12 +1014,9 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
* back.
*/
if (!(x & SX_LOCK_SHARED_WAITERS)) {
- if (!atomic_cmpset_ptr(&sx->sx_lock, x,
- x | SX_LOCK_SHARED_WAITERS)) {
- sleepq_release(&sx->lock_object);
- x = SX_READ_VALUE(sx);
- continue;
- }
+ if (!atomic_fcmpset_ptr(&sx->sx_lock, &x,
+ x | SX_LOCK_SHARED_WAITERS))
+ goto retry_sleepq;
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
__func__, sx);