aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authorMateusz Guzik <mjg@FreeBSD.org>2018-12-05 16:43:03 +0000
committerMateusz Guzik <mjg@FreeBSD.org>2018-12-05 16:43:03 +0000
commitf26db6948d5a49459358114afc35e07b4f846594 (patch)
tree8d471c303b737a8d7711ea3918f2237c8325f72a /sys/kern/kern_sx.c
parent52da58896195a574a0a7361148b642859a5e5026 (diff)
Notes
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c43
1 files changed, 11 insertions, 32 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 600a3d03558a..d0ada0956155 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -71,8 +71,6 @@ __FBSDID("$FreeBSD$");
#define ADAPTIVE_SX
#endif
-CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
-
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
PMC_SOFT_DECLARE( , , lock, failed);
@@ -233,7 +231,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
- SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
+ SX_NOPROFILE | SX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
("%s: sx_lock not aligned for %s: %p", __func__, description,
&sx->sx_lock));
@@ -252,7 +250,6 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
if (opts & SX_NEW)
flags |= LO_NEW;
- flags |= opts & SX_NOADAPTIVE;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
@@ -572,7 +569,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
volatile struct thread *owner;
u_int i, n, spintries = 0;
enum { READERS, WRITER } sleep_reason = READERS;
- bool adaptive;
bool in_critical = false;
#endif
#ifdef LOCK_PROFILING
@@ -642,10 +638,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
-#ifdef ADAPTIVE_SX
- adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
-#endif
-
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -669,8 +661,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_SX
- if (__predict_false(!adaptive))
- goto sleepq;
/*
* If the lock is write locked and the owner is
* running on another CPU, spin until the owner stops
@@ -762,20 +752,18 @@ retry_sleepq:
* chain lock. If so, drop the sleep queue lock and try
* again.
*/
- if (adaptive) {
- if (!(x & SX_LOCK_SHARED)) {
- owner = (struct thread *)SX_OWNER(x);
- if (TD_IS_RUNNING(owner)) {
- sleepq_release(&sx->lock_object);
- sx_drop_critical(x, &in_critical,
- &extra_work);
- continue;
- }
- } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
+ if (!(x & SX_LOCK_SHARED)) {
+ owner = (struct thread *)SX_OWNER(x);
+ if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
- sx_drop_critical(x, &in_critical, &extra_work);
+ sx_drop_critical(x, &in_critical,
+ &extra_work);
continue;
}
+ } else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
+ sleepq_release(&sx->lock_object);
+ sx_drop_critical(x, &in_critical, &extra_work);
+ continue;
}
#endif
@@ -1021,7 +1009,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
u_int i, n, spintries = 0;
- bool adaptive;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@@ -1066,10 +1053,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
lock_delay_arg_init(&lda, NULL);
#endif
-#ifdef ADAPTIVE_SX
- adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
-#endif
-
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@@ -1095,9 +1078,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#endif
#ifdef ADAPTIVE_SX
- if (__predict_false(!adaptive))
- goto sleepq;
-
/*
* If the owner is running on another CPU, spin until
* the owner stops running or the state of the lock
@@ -1154,7 +1134,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
continue;
}
}
-sleepq:
#endif
/*
@@ -1176,7 +1155,7 @@ retry_sleepq:
* the owner stops running or the state of the lock
* changes.
*/
- if (!(x & SX_LOCK_SHARED) && adaptive) {
+ if (!(x & SX_LOCK_SHARED)) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);