aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/kern_sx.c
diff options
context:
space:
mode:
authorMark Johnston <markj@FreeBSD.org>2024-07-29 01:42:19 +0000
committerMark Johnston <markj@FreeBSD.org>2024-07-29 01:43:59 +0000
commit01518f5eede79cf65319d455eb50e78c9efa2b51 (patch)
tree274dafdc5fcc4a8edf1a4bac74fb766daeafaf60 /sys/kern/kern_sx.c
parentd4c4ca856b267669b9da18a8ea676f5e556ee5c8 (diff)
Diffstat (limited to 'sys/kern/kern_sx.c')
-rw-r--r--sys/kern/kern_sx.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c
index 18e6ba232c4a..4cc8ac8ce106 100644
--- a/sys/kern/kern_sx.c
+++ b/sys/kern/kern_sx.c
@@ -474,7 +474,6 @@ void
sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
- int wakeup_swapper;
if (SCHEDULER_STOPPED())
return;
@@ -516,18 +515,14 @@ sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
* Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
* shared lock. If there are any shared waiters, wake them up.
*/
- wakeup_swapper = 0;
x = sx->sx_lock;
atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
(x & SX_LOCK_EXCLUSIVE_WAITERS));
if (x & SX_LOCK_SHARED_WAITERS)
- wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
- 0, SQ_SHARED_QUEUE);
+ sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
+ SQ_SHARED_QUEUE);
sleepq_release(&sx->lock_object);
- if (wakeup_swapper)
- kick_proc0();
-
out:
curthread->td_sx_slocks++;
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
@@ -920,7 +915,7 @@ void
_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t tid, setx;
- int queue, wakeup_swapper;
+ int queue;
if (SCHEDULER_STOPPED())
return;
@@ -977,11 +972,8 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
- wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
- queue);
+ sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
sleepq_release(&sx->lock_object);
- if (wakeup_swapper)
- kick_proc0();
}
static __always_inline bool
@@ -1333,7 +1325,6 @@ static void __noinline
_sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
LOCK_FILE_LINE_ARG_DEF)
{
- int wakeup_swapper = 0;
uintptr_t setx, queue;
if (SCHEDULER_STOPPED())
@@ -1366,14 +1357,11 @@ _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p waking up all thread on"
"exclusive queue", __func__, sx);
- wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
- 0, queue);
+ sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
td->td_sx_slocks--;
break;
}
sleepq_release(&sx->lock_object);
- if (wakeup_swapper)
- kick_proc0();
out_lockstat:
LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
}