summaryrefslogtreecommitdiff
path: root/sys/kern/kern_lock.c
diff options
context:
space:
mode:
authorAttilio Rao <attilio@FreeBSD.org>2009-09-09 09:17:31 +0000
committerAttilio Rao <attilio@FreeBSD.org>2009-09-09 09:17:31 +0000
commitdb0c92ce82f76f9aa389bc2b0e76deee6a5009ab (patch)
treef53b14eeeece3012fa6fba1d77e35a78198d9b39 /sys/kern/kern_lock.c
parent6a89c3ede1df004863468a09fc885baa80e82937 (diff)
Notes
Diffstat (limited to 'sys/kern/kern_lock.c')
-rw-r--r--sys/kern/kern_lock.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 29ae4accfa71..e6f2f5362494 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -467,7 +467,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
/*
* If the owner is running on another CPU, spin until
* the owner stops running or the state of the lock
- * changes.
+ * changes. We need a double-state handle here
+ * because for a failed acquisition the lock can be
+ * either held in exclusive mode or shared mode
+ * (for the writer starvation avoidance technique).
*/
if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
LK_HOLDER(x) != LK_KERNPROC) {
@@ -491,8 +494,10 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ GIANT_RESTORE();
+ continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
- (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
+ (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
spintries < alk_retries) {
if (flags & LK_INTERLOCK) {
class->lc_unlock(ilk);
@@ -511,6 +516,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ GIANT_RESTORE();
if (i != alk_loops)
continue;
}
@@ -704,6 +710,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
while (LK_HOLDER(lk->lk_lock) ==
(uintptr_t)owner && TD_IS_RUNNING(owner))
cpu_spinwait();
+ GIANT_RESTORE();
+ continue;
} else if (LK_CAN_ADAPT(lk, flags) &&
(x & LK_SHARE) != 0 && LK_SHARERS(x) &&
spintries < alk_retries) {
@@ -727,6 +735,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
break;
cpu_spinwait();
}
+ GIANT_RESTORE();
if (i != alk_loops)
continue;
}