aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/kern/kern_lock.c276
-rw-r--r--sys/sys/lock.h15
-rw-r--r--sys/sys/lockmgr.h15
-rw-r--r--sys/vm/vm_map.c21
-rw-r--r--sys/vm/vm_map.h62
5 files changed, 232 insertions, 157 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index 9fbbb7484af3..32c30c5fc9a2 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -2,6 +2,9 @@
* Copyright (c) 1995
* The Regents of the University of California. All rights reserved.
*
+ * Copyright (C) 1997
+ * John S. Dyson. All rights reserved.
+ *
* This code contains ideas from software contributed to Berkeley by
* Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
* System project at Carnegie-Mellon University.
@@ -35,7 +38,7 @@
* SUCH DAMAGE.
*
* @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
- * $Id: kern_lock.c,v 1.1 1997/08/04 17:46:51 smp Exp smp $
+ * $Id: kern_lock.c,v 1.7 1997/08/04 19:11:12 fsmp Exp $
*/
#include <sys/param.h>
@@ -54,58 +57,123 @@
#define COUNT(p, x)
#endif
-#if NCPUS > 1
+#define LOCK_WAIT_TIME 100
+#define LOCK_SAMPLE_WAIT 7
-/*
- * For multiprocessor system, try spin lock first.
- *
- * This should be inline expanded below, but we cannot have #if
- * inside a multiline define.
- */
-int lock_wait_time = 100;
-#define PAUSE(lkp, wanted) \
- if (lock_wait_time > 0) { \
- int i; \
- \
- simple_unlock(&lkp->lk_interlock); \
- for (i = lock_wait_time; i > 0; i--) \
- if (!(wanted)) \
- break; \
- simple_lock(&lkp->lk_interlock); \
- } \
- if (!(wanted)) \
- break;
+#if defined(DIAGNOSTIC)
+#define LOCK_INLINE
+#else
+#define LOCK_INLINE inline
+#endif
-#else /* NCPUS == 1 */
+static int acquire(struct lock *lkp, int extflags, int wanted);
-/*
- * It is an error to spin on a uniprocessor as nothing will ever cause
- * the simple lock to clear while we are executing.
- */
-#define PAUSE(lkp, wanted)
+static LOCK_INLINE void
+sharelock(struct lock *lkp, int incr) {
+ lkp->lk_flags |= LK_SHARE_NONZERO;
+ lkp->lk_sharecount += incr;
+}
+
+static LOCK_INLINE void
+shareunlock(struct lock *lkp, int decr) {
+#if defined(DIAGNOSTIC)
+ if (lkp->lk_sharecount < decr)
+#if defined(DDB)
+ Debugger("shareunlock: count < decr");
+#else
+ panic("shareunlock: count < decr");
+#endif
+#endif
-#endif /* NCPUS == 1 */
+ lkp->lk_sharecount -= decr;
+ if (lkp->lk_sharecount == 0)
+ lkp->lk_flags &= ~LK_SHARE_NONZERO;
+}
-/*
- * Acquire a resource.
- */
-#define ACQUIRE(lkp, error, extflags, wanted) \
- PAUSE(lkp, wanted); \
- for (error = 0; wanted; ) { \
- (lkp)->lk_waitcount++; \
- simple_unlock(&(lkp)->lk_interlock); \
- error = tsleep((void *)lkp, (lkp)->lk_prio, \
- (lkp)->lk_wmesg, (lkp)->lk_timo); \
- simple_lock(&(lkp)->lk_interlock); \
- (lkp)->lk_waitcount--; \
- if (error) \
- break; \
- if ((extflags) & LK_SLEEPFAIL) { \
- error = ENOLCK; \
- break; \
- } \
+static int
+apause(struct lock *lkp, int flags) {
+ int lock_wait;
+ lock_wait = LOCK_WAIT_TIME;
+ for (; lock_wait > 0; lock_wait--) {
+ int i;
+ if ((lkp->lk_flags & flags) == 0)
+ return 0;
+ simple_unlock(&lkp->lk_interlock);
+ for (i = LOCK_SAMPLE_WAIT; i > 0; i--) {
+ if ((lkp->lk_flags & flags) == 0) {
+ simple_lock(&lkp->lk_interlock);
+ if ((lkp->lk_flags & flags) == 0)
+ return 0;
+ break;
+ }
+ }
+ }
+ return 1;
+}
+
+
+static int
+acquire(struct lock *lkp, int extflags, int wanted) {
+ int error;
+ int lock_wait;
+
+ if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) {
+ return EBUSY;
}
+ error = apause(lkp, wanted);
+ if (error == 0)
+ return 0;
+
+ while ((lkp->lk_flags & wanted) != 0) {
+ lkp->lk_flags |= LK_WAIT_NONZERO;
+ lkp->lk_waitcount++;
+ simple_unlock(&lkp->lk_interlock);
+ error = tsleep(lkp, lkp->lk_prio, lkp->lk_wmesg, lkp->lk_timo);
+ simple_lock(&lkp->lk_interlock);
+ lkp->lk_waitcount--;
+ if (lkp->lk_waitcount == 0)
+ lkp->lk_flags &= ~LK_WAIT_NONZERO;
+ if (error)
+ return error;
+ if (extflags & LK_SLEEPFAIL) {
+ return ENOLCK;
+ }
+ }
+ return 0;
+}
+
+#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
+ LK_SHARE_NONZERO | LK_WAIT_NONZERO)
+
+static int
+acquiredrain(struct lock *lkp, int extflags) {
+ int error;
+ int lock_wait;
+
+ if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) {
+ return EBUSY;
+ }
+
+ error = apause(lkp, LK_ALL);
+ if (error == 0)
+ return 0;
+
+ while (lkp->lk_flags & LK_ALL) {
+ lkp->lk_flags |= LK_WAITDRAIN;
+ simple_unlock(&lkp->lk_interlock);
+ error = tsleep(&lkp->lk_flags, lkp->lk_prio,
+ lkp->lk_wmesg, lkp->lk_timo);
+ simple_lock(&lkp->lk_interlock);
+ if (error)
+ return error;
+ if (extflags & LK_SLEEPFAIL) {
+ return ENOLCK;
+ }
+ }
+ return 0;
+}
+
/*
* Initialize a lock; required before use.
*/
@@ -119,7 +187,7 @@ lockinit(lkp, prio, wmesg, timo, flags)
{
simple_lock_init(&lkp->lk_interlock);
- lkp->lk_flags = flags & LK_EXTFLG_MASK;
+ lkp->lk_flags = (flags & LK_EXTFLG_MASK);
lkp->lk_sharecount = 0;
lkp->lk_waitcount = 0;
lkp->lk_exclusivecount = 0;
@@ -166,59 +234,25 @@ lockmgr(lkp, flags, interlkp, p)
int extflags;
error = 0;
- if (p)
- pid = p->p_pid;
- else
- pid = LK_KERNPROC;
+ if (p == NULL)
+ panic("lockmgr: called with null process");
+ pid = p->p_pid;
+
simple_lock(&lkp->lk_interlock);
if (flags & LK_INTERLOCK)
simple_unlock(interlkp);
+
extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
-#ifdef DIAGNOSTIC
- /*
- * Once a lock has drained, the LK_DRAINING flag is set and an
- * exclusive lock is returned. The only valid operation thereafter
- * is a single release of that exclusive lock. This final release
- * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
- * further requests of any sort will result in a panic. The bits
- * selected for these two flags are chosen so that they will be set
- * in memory that is freed (freed memory is filled with 0xdeadbeef).
- * The final release is permitted to give a new lease on life to
- * the lock by specifying LK_REENABLE.
- */
- if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
- if (lkp->lk_flags & LK_DRAINED)
- panic("lockmgr: using decommissioned lock");
- if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
- lkp->lk_lockholder != pid)
- panic("lockmgr: non-release on draining lock: %d\n",
- flags & LK_TYPE_MASK);
- lkp->lk_flags &= ~LK_DRAINING;
- if ((flags & LK_REENABLE) == 0)
- lkp->lk_flags |= LK_DRAINED;
- }
-#endif DIAGNOSTIC
switch (flags & LK_TYPE_MASK) {
case LK_SHARED:
if (lkp->lk_lockholder != pid) {
- /*
- * If just polling, check to see if we will block.
- */
- if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
- error = EBUSY;
- break;
- }
- /*
- * Wait for exclusive locks and upgrades to clear.
- */
- ACQUIRE(lkp, error, extflags, lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
+ error = acquire(lkp, extflags,
+ LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE);
if (error)
break;
- lkp->lk_sharecount++;
+ sharelock(lkp, 1);
COUNT(p, 1);
break;
}
@@ -226,14 +260,14 @@ lockmgr(lkp, flags, interlkp, p)
* We hold an exclusive lock, so downgrade it to shared.
* An alternative would be to fail with EDEADLK.
*/
- lkp->lk_sharecount++;
+ sharelock(lkp, 1);
COUNT(p, 1);
/* fall into downgrade */
case LK_DOWNGRADE:
if (lkp->lk_lockholder != pid || lkp->lk_exclusivecount == 0)
panic("lockmgr: not holding exclusive lock");
- lkp->lk_sharecount += lkp->lk_exclusivecount;
+ sharelock(lkp, lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
lkp->lk_lockholder = LK_NOPROC;
@@ -248,7 +282,7 @@ lockmgr(lkp, flags, interlkp, p)
* exclusive access.
*/
if (lkp->lk_flags & LK_WANT_UPGRADE) {
- lkp->lk_sharecount--;
+ shareunlock(lkp, 1);
COUNT(p, -1);
error = EBUSY;
break;
@@ -264,9 +298,9 @@ lockmgr(lkp, flags, interlkp, p)
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
- if (lkp->lk_lockholder == pid || lkp->lk_sharecount <= 0)
+ if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
panic("lockmgr: upgrade exclusive lock");
- lkp->lk_sharecount--;
+ shareunlock(lkp, 1);
COUNT(p, -1);
/*
* If we are just polling, check to see if we will block.
@@ -284,7 +318,7 @@ lockmgr(lkp, flags, interlkp, p)
* drop to zero, then take exclusive lock.
*/
lkp->lk_flags |= LK_WANT_UPGRADE;
- ACQUIRE(lkp, error, extflags, lkp->lk_sharecount);
+ error = acquire(lkp, extflags , LK_SHARE_NONZERO);
lkp->lk_flags &= ~LK_WANT_UPGRADE;
if (error)
break;
@@ -301,7 +335,8 @@ lockmgr(lkp, flags, interlkp, p)
* lock, awaken upgrade requestor if we are the last shared
* lock, then request an exclusive lock.
*/
- if (lkp->lk_sharecount == 0 && lkp->lk_waitcount)
+ if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) ==
+ LK_WAIT_NONZERO)
wakeup((void *)lkp);
/* fall into exclusive request */
@@ -319,25 +354,22 @@ lockmgr(lkp, flags, interlkp, p)
/*
* If we are just polling, check to see if we will sleep.
*/
- if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
- lkp->lk_sharecount != 0)) {
+ if ((extflags & LK_NOWAIT) &&
+ (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) {
error = EBUSY;
break;
}
/*
* Try to acquire the want_exclusive flag.
*/
- ACQUIRE(lkp, error, extflags, lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL));
+ error = acquire(lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL));
if (error)
break;
lkp->lk_flags |= LK_WANT_EXCL;
/*
* Wait for shared locks and upgrades to finish.
*/
- ACQUIRE(lkp, error, extflags, lkp->lk_sharecount != 0 ||
- (lkp->lk_flags & LK_WANT_UPGRADE));
+ error = acquire(lkp, extflags, LK_WANT_UPGRADE | LK_SHARE_NONZERO);
lkp->lk_flags &= ~LK_WANT_EXCL;
if (error)
break;
@@ -361,11 +393,11 @@ lockmgr(lkp, flags, interlkp, p)
lkp->lk_flags &= ~LK_HAVE_EXCL;
lkp->lk_lockholder = LK_NOPROC;
}
- } else if (lkp->lk_sharecount != 0) {
- lkp->lk_sharecount--;
+ } else if (lkp->lk_flags & LK_SHARE_NONZERO) {
+ shareunlock(lkp, 1);
COUNT(p, -1);
}
- if (lkp->lk_waitcount)
+ if (lkp->lk_flags & LK_WAIT_NONZERO)
wakeup((void *)lkp);
break;
@@ -378,30 +410,10 @@ lockmgr(lkp, flags, interlkp, p)
*/
if (lkp->lk_lockholder == pid)
panic("lockmgr: draining against myself");
- /*
- * If we are just polling, check to see if we will sleep.
- */
- if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
- lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
- error = EBUSY;
+
+ error = acquiredrain(lkp, extflags);
+ if (error)
break;
- }
- PAUSE(lkp, ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
- lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0));
- for (error = 0; ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
- lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0); ) {
- lkp->lk_flags |= LK_WAITDRAIN;
- simple_unlock(&lkp->lk_interlock);
- if (error = tsleep((void *)&lkp->lk_flags, lkp->lk_prio,
- lkp->lk_wmesg, lkp->lk_timo))
- return (error);
- if ((extflags) & LK_SLEEPFAIL)
- return (ENOLCK);
- simple_lock(&lkp->lk_interlock);
- }
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
lkp->lk_lockholder = pid;
lkp->lk_exclusivecount = 1;
@@ -414,9 +426,9 @@ lockmgr(lkp, flags, interlkp, p)
flags & LK_TYPE_MASK);
/* NOTREACHED */
}
- if ((lkp->lk_flags & LK_WAITDRAIN) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
- lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
+ if ((lkp->lk_flags & LK_WAITDRAIN) &&
+ (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
+ LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) {
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);
}
diff --git a/sys/sys/lock.h b/sys/sys/lock.h
index 07841baf203d..8807bb0f648b 100644
--- a/sys/sys/lock.h
+++ b/sys/sys/lock.h
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* @(#)lock.h 8.12 (Berkeley) 5/19/95
- * $Id: lock.h,v 1.6 1997/07/24 18:01:34 fsmp Exp $
+ * $Id: lock.h,v 1.7 1997/08/04 19:11:26 fsmp Exp $
*/
#ifndef _LOCK_H_
@@ -126,9 +126,15 @@ struct lock {
*
* Non-persistent external flags.
*/
-#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
- getting lk_interlock */
-#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
+#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
+ getting lk_interlock */
+#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
+
+/*
+ * Internal state flags corresponding to lk_sharecount, and lk_waitcount
+ */
+#define LK_SHARE_NONZERO 0x00100000
+#define LK_WAIT_NONZERO 0x00200000
/*
* Lock return status.
@@ -153,6 +159,7 @@ struct lock {
#define LK_KERNPROC ((pid_t) -2)
#define LK_NOPROC ((pid_t) -1)
+void dumplockinfo(struct lock *lkp);
struct proc;
void lockinit __P((struct lock *, int prio, char *wmesg, int timo,
diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h
index 07841baf203d..8807bb0f648b 100644
--- a/sys/sys/lockmgr.h
+++ b/sys/sys/lockmgr.h
@@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* @(#)lock.h 8.12 (Berkeley) 5/19/95
- * $Id: lock.h,v 1.6 1997/07/24 18:01:34 fsmp Exp $
+ * $Id: lock.h,v 1.7 1997/08/04 19:11:26 fsmp Exp $
*/
#ifndef _LOCK_H_
@@ -126,9 +126,15 @@ struct lock {
*
* Non-persistent external flags.
*/
-#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
- getting lk_interlock */
-#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
+#define LK_INTERLOCK 0x00010000 /* unlock passed simple lock after
+ getting lk_interlock */
+#define LK_RETRY 0x00020000 /* vn_lock: retry until locked */
+
+/*
+ * Internal state flags corresponding to lk_sharecount, and lk_waitcount
+ */
+#define LK_SHARE_NONZERO 0x00100000
+#define LK_WAIT_NONZERO 0x00200000
/*
* Lock return status.
@@ -153,6 +159,7 @@ struct lock {
#define LK_KERNPROC ((pid_t) -2)
#define LK_NOPROC ((pid_t) -1)
+void dumplockinfo(struct lock *lkp);
struct proc;
void lockinit __P((struct lock *, int prio, char *wmesg, int timo,
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 4019599faf80..640c2aaeddf9 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.c,v 1.84 1997/08/05 23:03:23 dyson Exp $
+ * $Id: vm_map.c,v 1.85 1997/08/06 04:58:03 dyson Exp $
*/
/*
@@ -1378,7 +1378,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
/* First we need to allow map modifications */
vm_map_set_recursive(map);
- lockmgr(&map->lock, LK_DOWNGRADE,(void *)0, curproc);
+ vm_map_lock_downgrade(map);
rv = vm_fault_user_wire(map, entry->start, entry->end);
if (rv) {
@@ -1394,7 +1394,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
}
vm_map_clear_recursive(map);
- lockmgr(&map->lock, LK_UPGRADE, (void *)0, curproc);
+ vm_map_lock_upgrade(map);
goto rescan;
}
@@ -1594,7 +1594,7 @@ vm_map_pageable(map, start, end, new_pageable)
vm_map_unlock(map); /* trust me ... */
} else {
vm_map_set_recursive(map);
- lockmgr(&map->lock, LK_DOWNGRADE, (void*)0, curproc);
+ vm_map_lock_downgrade(map);
}
rv = 0;
@@ -2374,9 +2374,7 @@ RetryLookup:;
* object.
*/
- if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
- (void *)0, curproc)) {
-
+ if (vm_map_lock_upgrade(share_map)) {
if (share_map != map)
vm_map_unlock_read(map);
@@ -2388,9 +2386,7 @@ RetryLookup:;
OFF_TO_IDX(entry->end - entry->start));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
-
- lockmgr(&share_map->lock, LK_DOWNGRADE,
- (void *)0, curproc);
+ vm_map_lock_downgrade(share_map);
} else {
/*
* We're attempting to read a copy-on-write page --
@@ -2405,8 +2401,7 @@ RetryLookup:;
*/
if (entry->object.vm_object == NULL) {
- if (lockmgr(&share_map->lock, LK_EXCLUPGRADE,
- (void *)0, curproc)) {
+ if (vm_map_lock_upgrade(share_map)) {
if (share_map != map)
vm_map_unlock_read(map);
goto RetryLookup;
@@ -2414,7 +2409,7 @@ RetryLookup:;
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
entry->offset = 0;
- lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc);
+ vm_map_lock_downgrade(share_map);
}
if (entry->object.vm_object != NULL)
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 7b87856ad347..aa8ddb249c1b 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
- * $Id: vm_map.h,v 1.26 1997/04/07 07:16:06 peter Exp $
+ * $Id: vm_map.h,v 1.27 1997/08/05 00:01:58 dyson Exp $
*/
/*
@@ -193,25 +193,78 @@ typedef struct {
&(map)->ref_lock, curproc); \
(map)->timestamp++; \
}
+
#ifdef DIAGNOSTIC
+/* #define MAP_LOCK_DIAGNOSTIC 1 */
+#ifdef MAP_LOCK_DIAGNOSTIC
+#define vm_map_lock(map) { \
+ printf ("locking map LK_EXCLUSIVE: 0x%x\n", map); \
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
+ panic("vm_map_lock: failed to get lock"); \
+ } \
+ (map)->timestamp++; \
+}
+#else
#define vm_map_lock(map) { \
if (lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc) != 0) { \
panic("vm_map_lock: failed to get lock"); \
} \
(map)->timestamp++; \
}
+#endif
#else
#define vm_map_lock(map) { \
lockmgr(&(map)->lock, LK_EXCLUSIVE, (void *)0, curproc); \
(map)->timestamp++; \
}
#endif /* DIAGNOSTIC */
+
+#if defined(MAP_LOCK_DIAGNOSTIC)
+#define vm_map_unlock(map) \
+ do { \
+ printf ("locking map LK_RELEASE: 0x%x\n", map); \
+ lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
+ } while (0);
+#define vm_map_lock_read(map) \
+ do { \
+ printf ("locking map LK_SHARED: 0x%x\n", map); \
+ lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc); \
+ } while (0);
+#define vm_map_unlock_read(map) \
+ do { \
+ printf ("locking map LK_RELEASE: 0x%x\n", map); \
+ lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc); \
+ } while (0);
+#else
#define vm_map_unlock(map) \
- lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
+ lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc);
#define vm_map_lock_read(map) \
- lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc)
+ lockmgr(&(map)->lock, LK_SHARED, (void *)0, curproc);
#define vm_map_unlock_read(map) \
- lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc)
+ lockmgr(&(map)->lock, LK_RELEASE, (void *)0, curproc);
+#endif
+
+static __inline__ int
+_vm_map_lock_upgrade(vm_map_t map, struct proc *p) {
+#if defined(MAP_LOCK_DIAGNOSTIC)
+ printf("locking map LK_EXCLUPGRADE: 0x%x\n", map);
+#endif
+ return lockmgr(&(map)->lock, LK_EXCLUPGRADE, (void *)0, p);
+}
+
+#define vm_map_lock_upgrade(map) _vm_map_lock_upgrade(map, curproc)
+
+#if defined(MAP_LOCK_DIAGNOSTIC)
+#define vm_map_lock_downgrade(map) \
+ do { \
+ printf ("locking map LK_DOWNGRADE: 0x%x\n", map); \
+ lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc); \
+ } while (0);
+#else
+#define vm_map_lock_downgrade(map) \
+ lockmgr(&(map)->lock, LK_DOWNGRADE, (void *)0, curproc);
+#endif
+
#define vm_map_set_recursive(map) { \
simple_lock(&(map)->lock.lk_interlock); \
(map)->lock.lk_flags |= LK_CANRECURSE; \
@@ -222,6 +275,7 @@ typedef struct {
(map)->lock.lk_flags &= ~LK_CANRECURSE; \
simple_unlock(&(map)->lock.lk_interlock); \
}
+
/*
* Functions implemented as macros
*/