summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2000-10-20 07:26:37 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2000-10-20 07:26:37 +0000
commit36412d79b401fbd3fccb8827480407cc7602e210 (patch)
tree98d3dd4fbc6a5a7fc80fe85bbbeaed1bf5a50528
parentccbdd9ee59ad76612869a4a1fd41a0e85029ef4c (diff)
Notes
-rw-r--r--sys/alpha/alpha/synch_machdep.c550
-rw-r--r--sys/alpha/include/mutex.h480
-rw-r--r--sys/amd64/include/mutex.h535
-rw-r--r--sys/conf/files.alpha1
-rw-r--r--sys/conf/files.i3861
-rw-r--r--sys/conf/files.ia641
-rw-r--r--sys/conf/files.pc981
-rw-r--r--sys/conf/options2
-rw-r--r--sys/i386/i386/synch_machdep.c550
-rw-r--r--sys/i386/include/mutex.h535
-rw-r--r--sys/ia64/ia64/synch_machdep.c549
-rw-r--r--sys/ia64/include/mutex.h497
-rw-r--r--sys/kern/kern_mutex.c605
-rw-r--r--sys/kern/subr_turnstile.c605
-rw-r--r--sys/kern/subr_witness.c605
-rw-r--r--sys/powerpc/include/mutex.h480
-rw-r--r--sys/sys/mutex.h606
17 files changed, 2476 insertions, 4127 deletions
diff --git a/sys/alpha/alpha/synch_machdep.c b/sys/alpha/alpha/synch_machdep.c
deleted file mode 100644
index 184da83a9713..000000000000
--- a/sys/alpha/alpha/synch_machdep.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*-
- * Copyright (c) 1997, 1998 Berkeley Software Design, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Berkeley Software Design Inc's name may not be used to endorse or
- * promote products derived from this software without specific prior
- * written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
- * $FreeBSD$
- */
-
-#define MTX_STRS /* define common strings */
-
-#include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/ktr.h>
-#include <vm/vm.h>
-#include <vm/vm_extern.h>
-#include <ddb/ddb.h>
-#include <machine/atomic.h>
-#include <machine/clock.h>
-#include <machine/cpu.h>
-#include <machine/mutex.h>
-
-/* All mutexes in system (used for debug/panic) */
-struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
- TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
- { NULL, NULL }, &all_mtx, &all_mtx
-#ifdef SMP_DEBUG
- , NULL, { NULL, NULL }, NULL, 0
-#endif
-};
-
-int mtx_cur_cnt;
-int mtx_max_cnt;
-
-extern void _mtx_enter_giant_def(void);
-extern void _mtx_exit_giant_def(void);
-
-static void propagate_priority(struct proc *) __unused;
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((u_int64_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-
-/*
- * XXX Temporary, for use from assembly language
- */
-
-void
-_mtx_enter_giant_def(void)
-{
-
- mtx_enter(&Giant, MTX_DEF);
-}
-
-void
-_mtx_exit_giant_def(void)
-{
-
- mtx_exit(&Giant, MTX_DEF);
-}
-
-static void
-propagate_priority(struct proc *p)
-{
- int pri = p->p_priority;
- struct mtx *m = p->p_blocked;
-
- for (;;) {
- struct proc *p1;
-
- p = mtx_owner(m);
-
- if (p == NULL) {
- /*
- * This really isn't quite right. Really
- * ought to bump priority of process that
- * next acquires the mutex.
- */
- MPASS(m->mtx_lock == MTX_CONTESTED);
- return;
- }
- MPASS(p->p_magic == P_MAGIC);
- if (p->p_priority <= pri)
- return;
- /*
- * If lock holder is actually running, just bump priority.
- */
- if (TAILQ_NEXT(p, p_procq) == NULL) {
- MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
- SET_PRIO(p, pri);
- return;
- }
- /*
- * If on run queue move to new run queue, and
- * quit.
- */
- if (p->p_stat == SRUN) {
- MPASS(p->p_blocked == NULL);
- remrunqueue(p);
- SET_PRIO(p, pri);
- setrunqueue(p);
- return;
- }
-
- /*
- * If we aren't blocked on a mutex, give up and quit.
- */
- if (p->p_stat != SMTX) {
- return;
- }
-
- /*
- * Pick up the mutex that p is blocked on.
- */
- m = p->p_blocked;
- MPASS(m != NULL);
-
- /*
- * Check if the proc needs to be moved up on
- * the blocked chain
- */
- if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
- p1->p_priority <= pri)
- continue;
-
- /*
- * Remove proc from blocked chain
- */
- TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
- MPASS(p1->p_magic == P_MAGIC);
- if (p1->p_priority > pri)
- break;
- }
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- CTR4(KTR_LOCK,
- "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
- p, p1, m, m->mtx_description);
- }
-}
-
-void
-mtx_enter_hard(struct mtx *m, int type, int ipl)
-{
- struct proc *p = CURPROC;
-
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
-
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (u_int64_t)p) {
- m->mtx_recurse++;
- atomic_set_64(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
- return;
- }
- CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%lx) [0x%lx]",
- m, m->mtx_lock, RETIP(m));
- while (!atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
- (u_int64_t)p)) {
- int v;
- struct proc *p1;
-
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (u_int64_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_64(&m->mtx_lock, v,
- v | MTX_CONTESTED)) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
-
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
-
-#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
-
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
-
- p->p_blocked = m; /* Who we're blocked on */
- p->p_stat = SMTX;
-#if 0
- propagate_priority(p);
-#endif
- CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- CTR3(KTR_LOCK,
- "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- alpha_mb();
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (u_int64_t)p) {
- m->mtx_recurse++;
- return;
- }
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
- (u_int64_t)p)) {
- alpha_mb();
- break;
- }
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
-#ifdef DDB
- else if (!db_active)
-#else
- else
-#endif
- panic(
- "spin lock %s held by 0x%lx for > 5 seconds",
- m->mtx_description, m->mtx_lock);
- }
- }
-
-#ifdef SMP_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveipl = 0xbeefface;
- else
-#endif
- m->mtx_saveipl = ipl;
- CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
- return;
- }
- }
-}
-
-void
-mtx_exit_hard(struct mtx *m, int type)
-{
- struct proc *p, *p1;
- struct mtx *m1;
- int pri;
-
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (m->mtx_recurse != 0) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_64(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
- p = CURPROC;
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- atomic_cmpset_64(&m->mtx_lock, m->mtx_lock,
- MTX_UNOWNED);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
- } else
- m->mtx_lock = MTX_CONTESTED;
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
- m, p1);
- p1->p_blocked = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
-#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
- setrunqueue(p);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%lx",
- m, m->mtx_lock);
- mi_switch();
- CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%lx",
- m, m->mtx_lock);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- alpha_mb();
- if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED)) {
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveipl != 0xbeefface);
- alpha_pal_swpipl(m->mtx_saveipl);
- }
- return;
- }
- panic("unsucuessful release of spin lock");
- case MTX_SPIN | MTX_TOPHALF:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- alpha_mb();
- if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED))
- return;
- panic("unsucuessful release of spin lock");
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
- }
-}
-
-#define MV_DESTROY 0 /* validate before destory */
-#define MV_INIT 1 /* validate before init */
-
-#ifdef SMP_DEBUG
-
-#define ISK0SEG(va) \
- ((va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END))
-
-int mtx_validate __P((struct mtx *, int));
-
-int
-mtx_validate(struct mtx *m, int when)
-{
- struct mtx *mp;
- int i;
- int retval = 0;
-
- if (m == &all_mtx || cold)
- return 0;
-
- mtx_enter(&all_mtx, MTX_DEF);
- ASS(ISK0SEG((vm_offset_t)all_mtx.mtx_next) ||
- kernacc((caddr_t)all_mtx.mtx_next, 4, 1) == 1);
- ASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
- for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- if (!ISK0SEG((vm_offset_t)all_mtx.mtx_next) &&
- kernacc((caddr_t)mp->mtx_next, 4, 1) != 1) {
- panic("mtx_validate: mp=%p mp->mtx_next=%p",
- mp, mp->mtx_next);
- }
- i++;
- if (i > mtx_cur_cnt) {
- panic("mtx_validate: too many in chain, known=%d\n",
- mtx_cur_cnt);
- }
- }
- ASS(i == mtx_cur_cnt);
- switch (when) {
- case MV_DESTROY:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m)
- break;
- ASS(mp == m);
- break;
- case MV_INIT:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m) {
- /*
- * Not good. This mutex already exists.
- */
- retval = 1;
-#if 1
- printf("re-initing existing mutex %s\n",
- m->mtx_description);
- ASS(m->mtx_lock == MTX_UNOWNED);
- retval = 1;
-#else
- panic("re-initing existing mutex %s",
- m->mtx_description);
-#endif
- }
- }
- mtx_exit(&all_mtx, MTX_DEF);
- return (retval);
-}
-#endif
-
-void
-mtx_init(struct mtx *m, char *t, int flag)
-{
-
- CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
-#ifdef SMP_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
- return;
-#endif
- bzero((void *)m, sizeof *m);
- TAILQ_INIT(&m->mtx_blocked);
- m->mtx_description = t;
- m->mtx_lock = MTX_UNOWNED;
- /* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next = &all_mtx;
- m->mtx_prev = all_mtx.mtx_prev;
- m->mtx_prev->mtx_next = m;
- all_mtx.mtx_prev = m;
- if (++mtx_cur_cnt > mtx_max_cnt)
- mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
- witness_init(m, flag);
-}
-
-void
-mtx_destroy(struct mtx *m)
-{
-
- CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
-#ifdef SMP_DEBUG
- if (m->mtx_next == NULL)
- panic("mtx_destroy: %p (%s) already destroyed",
- m, m->mtx_description);
-
- if (!mtx_owned(m)) {
- ASS(m->mtx_lock == MTX_UNOWNED);
- } else {
- ASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
- }
- mtx_validate(m, MV_DESTROY); /* diagnostic */
-#endif
-
-#ifdef WITNESS
- if (m->mtx_witness)
- witness_destroy(m);
-#endif /* WITNESS */
-
- /* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next->mtx_prev = m->mtx_prev;
- m->mtx_prev->mtx_next = m->mtx_next;
-#ifdef SMP_DEBUG
- m->mtx_next = m->mtx_prev = NULL;
-#endif
- mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
-}
diff --git a/sys/alpha/include/mutex.h b/sys/alpha/include/mutex.h
index 12b12a8011c5..5ccc28d78753 100644
--- a/sys/alpha/include/mutex.h
+++ b/sys/alpha/include/mutex.h
@@ -33,293 +33,39 @@
#define _MACHINE_MUTEX_H_
#ifndef LOCORE
-#include <sys/queue.h>
#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globaldata.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x1 /* Default (spin/sleep) */
-#define MTX_SPIN 0x2 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_DEF | MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
-
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int64_t mtx_lock; /* lock owner/gate/flags */
- volatile u_int32_t mtx_recurse; /* number of recursive holds */
- u_int32_t mtx_saveipl; /* saved ipl (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
-
-#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int64_t)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int ipl);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
-
-/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
-
-/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
-
/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
+#ifdef MUTEX_DEBUG
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
-
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
-char STR_IEN[] = "fl & 0x200";
-char STR_IDIS[] = "!(fl & 0x200)";
-#else /* MTX_STRS */
+#ifdef _KERN_MUTEX_C_
+char STR_IEN[] = "ps & IPL != IPL_HIGH";
+char STR_IDIS[] = "ps & IPL == IPL_HIGH";
+char STR_SIEN[] = "mpp->mtx_saveintr != IPL_HIGH";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
+
+#endif /* MUTEX_DEBUG */
+
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
== ALPHA_PSL_IPL_HIGH, STR_IEN)
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
!= ALPHA_PSL_IPL_HIGH, STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
-
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+#define ASS_SIEN(mpp) MPASS2((mpp)->saveintr != ALPHA_PSL_IPL_HIGH, STR_SIEN)
/*
* Assembly macros (for internal use only)
*--------------------------------------------------------------------------
*/
-/*
- * Get a sleep lock, deal with recursion inline
- */
-
#define _V(x) __STRING(x)
-#define _getlock_sleep(mp, tid, type) do { \
- if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_64(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } else { \
- alpha_mb(); \
- } \
-} while (0)
-
/*
* Get a spin lock, handle recusion inline (as the less common case)
*/
@@ -334,208 +80,6 @@ void witness_restore(struct mtx *, const char *, int);
} \
} while (0)
-/*
- * Get a lock without any recursion handling. Calls the hard enter
- * function if we can't get it inline.
- */
-
-#define _getlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
- else \
- alpha_mb(); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is
- * handled in the hard function.
- */
-
-#define _exitlock_norecurse(mp, tid, type) do { \
- alpha_mb(); \
- if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-
-#define _exitlock(mp, tid, type) do { \
- alpha_mb(); \
- if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) {\
- if (((mp)->mtx_lock & MTX_RECURSE) && \
- (--(mp)->mtx_recurse == 0)) \
- atomic_clear_64(&(mp)->mtx_lock, MTX_RECURSE); \
- else \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
-} while (0)
-
-/*
- * Release a spin lock (with possible recursion)
- */
-
-#define _exitlock_spin(mp) do { \
- alpha_mb(); \
- if ((mp)->mtx_recurse == 0) { \
- int _ipl = (mp)->mtx_saveipl; \
- atomic_cmpset_64(&(mp)->mtx_lock, (mp)->mtx_lock, \
- MTX_UNOWNED); \
- alpha_pal_swpipl(_ipl); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
-/*
- * Externally visible mutex functions
- *------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-char STR_mtx_enter_fmt[] = "GOT %s [%p] at %s:%d r=%d";
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_exit_fmt[] = "REL %s [%p] at %s:%d r=%d";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] at %s:%d result=%d";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and
- * leaves the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this lock we
- * just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- _getlock_spin_block(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_64(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-/*
- * Release lock m
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
-
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- atomic_cmpset_64(&mpp->mtx_lock, mpp->mtx_lock,
- MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0)
- alpha_pal_swpipl(mpp->mtx_saveipl);
- } else
- if ((type) & MTX_TOPHALF) {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else
- _exitlock_spin(mpp);
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
-
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
diff --git a/sys/amd64/include/mutex.h b/sys/amd64/include/mutex.h
index 881cbfae3c72..1a8a7b65e7f6 100644
--- a/sys/amd64/include/mutex.h
+++ b/sys/amd64/include/mutex.h
@@ -33,266 +33,32 @@
#define _MACHINE_MUTEX_H_
#ifndef LOCORE
-#include <sys/queue.h>
#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x0 /* Default (spin/sleep) */
-#define MTX_SPIN 0x1 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
-
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int mtx_lock; /* lock owner/gate/flags */
- volatile u_int mtx_recurse; /* number of recursive holds */
- u_int mtx_savefl; /* saved flags (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
-
-#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int flags);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
+#include <machine/psl.h>
/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
extern struct mtx clock_lock;
/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
-
-
-/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
-
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
+#ifdef MUTEX_DEBUG
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
-char STR_IEN[] = "fl & 0x200";
-char STR_IDIS[] = "!(fl & 0x200)";
-#else /* MTX_STRS */
+#ifdef _KERN_MUTEX_C_
+char STR_IEN[] = "fl & PSL_I";
+char STR_IDIS[] = "!(fl & PSL_I)";
+char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
-#define ASS_IEN MPASS2(read_eflags() & 0x200, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & 0x200) == 0, STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
+#endif /* MUTEX_DEBUG */
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
/*
* Assembly macros (for internal use only)
@@ -358,7 +124,7 @@ void witness_restore(struct mtx *, const char *, int);
"# getlock_spin_block" \
: "=&a" (_res), /* 0 (dummy output) */ \
"+m" (mtxp->mtx_lock), /* 1 */ \
- "=m" (mtxp->mtx_savefl) /* 2 */ \
+ "=m" (mtxp->mtx_saveintr) /* 2 */ \
: "r" (tid), /* 3 (input) */ \
"gi" (type), /* 4 */ \
"g" (mtxp) /* 5 */ \
@@ -456,7 +222,7 @@ void witness_restore(struct mtx *, const char *, int);
* We use cmpxchgl to clear lock (instead of simple store) to flush posting
* buffers and make the change visible to other CPU's.
*/
-#define _exitlock_spin(mtxp, inten1, inten2) ({ \
+#define _exitlock_spin(mtxp) ({ \
int _res; \
\
__asm __volatile ( \
@@ -467,276 +233,21 @@ void witness_restore(struct mtx *, const char *, int);
" jmp 2f;" \
"1: movl %0,%%eax;" \
" movl $ " _V(MTX_UNOWNED) ",%%ecx;" \
-" " inten1 ";" \
+" pushl %3;" \
" " MPLOCKED "" \
" cmpxchgl %%ecx,%0;" \
-" " inten2 ";" \
+" popfl;" \
"2:" \
"# exitlock_spin" \
: "+m" (mtxp->mtx_lock), /* 0 */ \
"+m" (mtxp->mtx_recurse), /* 1 */ \
"=&a" (_res) /* 2 */ \
- : "g" (mtxp->mtx_savefl) /* 3 (used in 'inten') */ \
+ : "g" (mtxp->mtx_saveintr) /* 3 */ \
: "memory", "ecx" /* used */ ); \
})
-#else /* I386_CPU */
-
-/*
- * For 386 processors only.
- */
-
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_int(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_fl = read_eflags(); \
- disable_intr(); \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_fl); \
- else \
- (mp)->mtx_savefl = _mtx_fl; \
-} while (0)
-
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
- */
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) { \
- if ((mp)->mtx_lock & MTX_RECURSE) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_int(&(mp)->mtx_lock, \
- MTX_RECURSE); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp, inten1, inten2) do { \
- if ((mp)->mtx_recurse == 0) { \
- atomic_cmpset_int(&(mp)->mtx_lock, (mp)->mtx_lock, \
- MTX_UNOWNED); \
- write_eflags((mp)->mtx_savefl); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
#endif /* I386_CPU */
-/*
- * Externally visible mutex functions.
- *------------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread.
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-#ifdef KTR_EXTEND
-
-/*
- * KTR_EXTEND saves file name and line for all entries, so we don't need them
- * here. Theoretically we should also change the entries which refer to them
- * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
- * parameters, it doesn't do any harm to leave them.
- */
-char STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
-char STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
-#else
-char STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
-char STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
-#endif
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_int(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-#define mtx_legal2block() (read_eflags() & 0x200)
-
-/*
- * Release lock m.
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
-
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- atomic_cmpset_int(&mpp->mtx_lock, mpp->mtx_lock,
- MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- write_eflags(mpp->mtx_savefl);
- }
- } else {
- if ((type) & MTX_TOPHALF)
- _exitlock_spin(mpp,,);
- else {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- _exitlock_spin(mpp,, "sti");
- } else {
- _exitlock_spin(mpp,
- "pushl %3", "popfl");
- }
- }
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
-
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
@@ -748,7 +259,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
#if defined(I386_CPU)
#define MTX_EXIT(lck, reg) \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
movl $ MTX_UNOWNED,lck+MTX_LOCK; \
popf
@@ -761,11 +272,11 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
MPLOCKED \
cmpxchgl reg,lck+MTX_LOCK; \
jnz 9b; \
- popl lck+MTX_SAVEFL;
+ popl lck+MTX_SAVEINTR;
/* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */
#define MTX_EXIT(lck,reg) \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
movl lck+MTX_LOCK,%eax; \
movl $ MTX_UNOWNED,reg; \
MPLOCKED \
@@ -784,7 +295,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
MPLOCKED \
cmpxchgl reg,lck+MTX_LOCK; \
jnz 9b; \
- popl lck+MTX_SAVEFL; \
+ popl lck+MTX_SAVEINTR; \
jmp 10f; \
8: add $4,%esp; \
10:
@@ -795,7 +306,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
js 9f; \
movl %eax,lck+MTX_RECURSE; \
jmp 8f; \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
9: movl lck+MTX_LOCK,%eax; \
movl $ MTX_UNOWNED,reg; \
MPLOCKED \
diff --git a/sys/conf/files.alpha b/sys/conf/files.alpha
index b13639533306..f0cbc066920c 100644
--- a/sys/conf/files.alpha
+++ b/sys/conf/files.alpha
@@ -76,7 +76,6 @@ alpha/alpha/simplelock.s optional smp
alpha/alpha/support.s standard
alpha/alpha/swtch.s standard
alpha/alpha/sys_machdep.c standard
-alpha/alpha/synch_machdep.c standard
alpha/alpha/trap.c standard
alpha/alpha/userconfig.c optional userconfig
alpha/alpha/vm_machdep.c standard
diff --git a/sys/conf/files.i386 b/sys/conf/files.i386
index 56246840503b..1ac09b96e135 100644
--- a/sys/conf/files.i386
+++ b/sys/conf/files.i386
@@ -186,7 +186,6 @@ i386/i386/simplelock.s optional smp
i386/i386/support.s standard
i386/i386/swtch.s standard
i386/i386/sys_machdep.c standard
-i386/i386/synch_machdep.c standard
i386/i386/trap.c standard
i386/i386/userconfig.c optional userconfig
i386/i386/vm86.c standard
diff --git a/sys/conf/files.ia64 b/sys/conf/files.ia64
index 726ab2f2bced..406e0c1b426e 100644
--- a/sys/conf/files.ia64
+++ b/sys/conf/files.ia64
@@ -53,7 +53,6 @@ ia64/ia64/sscclock.c standard
ia64/ia64/sscdisk.c standard
ia64/ia64/swtch.s standard
ia64/ia64/sys_machdep.c standard
-ia64/ia64/synch_machdep.c standard
ia64/ia64/trap.c standard
ia64/ia64/userconfig.c optional userconfig
ia64/ia64/vm_machdep.c standard
diff --git a/sys/conf/files.pc98 b/sys/conf/files.pc98
index 4c34759e02e5..648a820f21df 100644
--- a/sys/conf/files.pc98
+++ b/sys/conf/files.pc98
@@ -178,7 +178,6 @@ i386/i386/simplelock.s optional smp
i386/i386/support.s standard
i386/i386/swtch.s standard
i386/i386/sys_machdep.c standard
-i386/i386/synch_machdep.c standard
i386/i386/trap.c standard
i386/i386/vm86.c standard
i386/i386/vm_machdep.c standard
diff --git a/sys/conf/options b/sys/conf/options
index 1a36439286c8..9d21ee6b0bd9 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -464,7 +464,7 @@ KTR_CPUMASK opt_global.h
KTR_COMPILE opt_global.h
KTR_ENTRIES opt_global.h
KTR_EXTEND opt_global.h
-SMP_DEBUG opt_global.h
+MUTEX_DEBUG opt_global.h
WITNESS opt_global.h
# options for ACPI support
diff --git a/sys/i386/i386/synch_machdep.c b/sys/i386/i386/synch_machdep.c
deleted file mode 100644
index 3dac9766ffd1..000000000000
--- a/sys/i386/i386/synch_machdep.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/*-
- * Copyright (c) 1997, 1998 Berkeley Software Design, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Berkeley Software Design Inc's name may not be used to endorse or
- * promote products derived from this software without specific prior
- * written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
- * $FreeBSD$
- */
-
-#define MTX_STRS /* define common strings */
-
-#include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/ktr.h>
-#include <vm/vm.h>
-#include <vm/vm_extern.h>
-#include <ddb/ddb.h>
-#include <machine/atomic.h>
-#include <machine/cpu.h>
-#include <machine/mutex.h>
-
-/* All mutexes in system (used for debug/panic) */
-struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
- TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
- { NULL, NULL }, &all_mtx, &all_mtx
-#ifdef SMP_DEBUG
- , NULL, { NULL, NULL }, NULL, 0
-#endif
-};
-
-int mtx_cur_cnt;
-int mtx_max_cnt;
-
-extern void _mtx_enter_giant_def(void);
-extern void _mtx_exit_giant_def(void);
-
-static void propagate_priority(struct proc *) __unused;
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((int *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-
-/*
- * XXX Temporary, for use from assembly language
- */
-
-void
-_mtx_enter_giant_def(void)
-{
-
- mtx_enter(&Giant, MTX_DEF);
-}
-
-void
-_mtx_exit_giant_def(void)
-{
-
- mtx_exit(&Giant, MTX_DEF);
-}
-
-static void
-propagate_priority(struct proc *p)
-{
- int pri = p->p_priority;
- struct mtx *m = p->p_blocked;
-
- for (;;) {
- struct proc *p1;
-
- p = mtx_owner(m);
-
- if (p == NULL) {
- /*
- * This really isn't quite right. Really
- * ought to bump priority of process that
- * next acquires the mutex.
- */
- MPASS(m->mtx_lock == MTX_CONTESTED);
- return;
- }
- MPASS(p->p_magic == P_MAGIC);
- if (p->p_priority <= pri)
- return;
- /*
- * If lock holder is actually running, just bump priority.
- */
- if (TAILQ_NEXT(p, p_procq) == NULL) {
- MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
- SET_PRIO(p, pri);
- return;
- }
- /*
- * If on run queue move to new run queue, and
- * quit.
- */
- if (p->p_stat == SRUN) {
- MPASS(p->p_blocked == NULL);
- remrunqueue(p);
- SET_PRIO(p, pri);
- setrunqueue(p);
- return;
- }
-
- /*
- * If we aren't blocked on a mutex, give up and quit.
- */
- if (p->p_stat != SMTX) {
- printf(
- "XXX: process %d(%s):%d holds %s but isn't blocked on a mutex\n",
- p->p_pid, p->p_comm, p->p_stat, m->mtx_description);
- return;
- }
-
- /*
- * Pick up the mutex that p is blocked on.
- */
- m = p->p_blocked;
- MPASS(m != NULL);
-
- printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
- p->p_comm, m->mtx_description);
- /*
- * Check if the proc needs to be moved up on
- * the blocked chain
- */
- if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
- p1->p_priority <= pri) {
- if (p1)
- printf(
- "XXX: previous process %d(%s) has higher priority\n",
- p->p_pid, p->p_comm);
- else
- printf("XXX: process at head of run queue\n");
- continue;
- }
-
- /*
- * Remove proc from blocked chain
- */
- TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
- MPASS(p1->p_magic == P_MAGIC);
- if (p1->p_priority > pri)
- break;
- }
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- CTR4(KTR_LOCK,
- "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
- p, p1, m, m->mtx_description);
- }
-}
-
-void
-mtx_enter_hard(struct mtx *m, int type, int flags)
-{
- struct proc *p = CURPROC;
-
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
-
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (u_int)p) {
- m->mtx_recurse++;
- atomic_set_int(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
- return;
- }
- CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%x) [0x%x]",
- m, m->mtx_lock, RETIP(m));
- while (!atomic_cmpset_int(&m->mtx_lock, MTX_UNOWNED, (int)p)) {
- int v;
- struct proc *p1;
-
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (int)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_int(&m->mtx_lock, v,
- v | MTX_CONTESTED)) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
-
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
-
-#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
-
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
-
- p->p_blocked = m; /* Who we're blocked on */
- p->p_stat = SMTX;
-#if 0
- propagate_priority(p);
-#endif
- CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- CTR3(KTR_LOCK,
- "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (u_int)p) {
- m->mtx_recurse++;
- return;
- }
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (atomic_cmpset_int(&m->mtx_lock, MTX_UNOWNED,
- (u_int)p))
- break;
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
-#ifdef DDB
- else if (!db_active)
-#else
- else
-#endif
- panic(
- "spin lock %s held by 0x%x for > 5 seconds",
- m->mtx_description, m->mtx_lock);
- }
- }
-
-#ifdef SMP_DEBUG
- if (type != MTX_SPIN)
- m->mtx_savefl = 0xdeadbeef;
- else
-#endif
- m->mtx_savefl = flags;
- CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
- return;
- }
- }
-}
-
-void
-mtx_exit_hard(struct mtx *m, int type)
-{
- struct proc *p, *p1;
- struct mtx *m1;
- int pri;
-
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (m->mtx_recurse != 0) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_int(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
- p = CURPROC;
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- atomic_cmpset_int(&m->mtx_lock, m->mtx_lock,
- MTX_UNOWNED);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
- } else
- m->mtx_lock = MTX_CONTESTED;
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
- m, p1);
- p1->p_blocked = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
-#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
- setrunqueue(p);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%x",
- m, m->mtx_lock);
- mi_switch();
- CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%x",
- m, m->mtx_lock);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- if (atomic_cmpset_int(&m->mtx_lock, CURTHD, MTX_UNOWNED)) {
- if (type & MTX_FIRST) {
- enable_intr(); /* XXX is this kosher? */
- } else {
- MPASS(m->mtx_savefl != 0xdeadbeef);
- write_eflags(m->mtx_savefl);
- }
- return;
- }
- panic("unsucuessful release of spin lock");
- case MTX_SPIN | MTX_TOPHALF:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- if (atomic_cmpset_int(&m->mtx_lock, CURTHD, MTX_UNOWNED))
- return;
- panic("unsucuessful release of spin lock");
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
- }
-}
-
-#define MV_DESTROY 0 /* validate before destory */
-#define MV_INIT 1 /* validate before init */
-
-#ifdef SMP_DEBUG
-
-int mtx_validate __P((struct mtx *, int));
-
-int
-mtx_validate(struct mtx *m, int when)
-{
- struct mtx *mp;
- int i;
- int retval = 0;
-
- if (m == &all_mtx || cold)
- return 0;
-
- mtx_enter(&all_mtx, MTX_DEF);
- ASS(kernacc((caddr_t)all_mtx.mtx_next, 4, 1) == 1);
- ASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
- for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- if (kernacc((caddr_t)mp->mtx_next, 4, 1) != 1) {
- panic("mtx_validate: mp=%p mp->mtx_next=%p",
- mp, mp->mtx_next);
- }
- i++;
- if (i > mtx_cur_cnt) {
- panic("mtx_validate: too many in chain, known=%d\n",
- mtx_cur_cnt);
- }
- }
- ASS(i == mtx_cur_cnt);
- switch (when) {
- case MV_DESTROY:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m)
- break;
- ASS(mp == m);
- break;
- case MV_INIT:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m) {
- /*
- * Not good. This mutex already exists.
- */
- retval = 1;
-#if 1
- printf("re-initing existing mutex %s\n",
- m->mtx_description);
- ASS(m->mtx_lock == MTX_UNOWNED);
- retval = 1;
-#else
- panic("re-initing existing mutex %s",
- m->mtx_description);
-#endif
- }
- }
- mtx_exit(&all_mtx, MTX_DEF);
- return (retval);
-}
-#endif
-
-void
-mtx_init(struct mtx *m, char *t, int flag)
-{
-
- CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
-#ifdef SMP_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
- return;
-#endif
- bzero((void *)m, sizeof *m);
- TAILQ_INIT(&m->mtx_blocked);
- m->mtx_description = t;
- m->mtx_lock = MTX_UNOWNED;
- /* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next = &all_mtx;
- m->mtx_prev = all_mtx.mtx_prev;
- m->mtx_prev->mtx_next = m;
- all_mtx.mtx_prev = m;
- if (++mtx_cur_cnt > mtx_max_cnt)
- mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
- witness_init(m, flag);
-}
-
-void
-mtx_destroy(struct mtx *m)
-{
-
- CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
-#ifdef SMP_DEBUG
- if (m->mtx_next == NULL)
- panic("mtx_destroy: %p (%s) already destroyed",
- m, m->mtx_description);
-
- if (!mtx_owned(m)) {
- ASS(m->mtx_lock == MTX_UNOWNED);
- } else {
- ASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
- }
- mtx_validate(m, MV_DESTROY); /* diagnostic */
-#endif
-
-#ifdef WITNESS
- if (m->mtx_witness)
- witness_destroy(m);
-#endif /* WITNESS */
-
- /* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next->mtx_prev = m->mtx_prev;
- m->mtx_prev->mtx_next = m->mtx_next;
-#ifdef SMP_DEBUG
- m->mtx_next = m->mtx_prev = NULL;
-#endif
- mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
-}
diff --git a/sys/i386/include/mutex.h b/sys/i386/include/mutex.h
index 881cbfae3c72..1a8a7b65e7f6 100644
--- a/sys/i386/include/mutex.h
+++ b/sys/i386/include/mutex.h
@@ -33,266 +33,32 @@
#define _MACHINE_MUTEX_H_
#ifndef LOCORE
-#include <sys/queue.h>
#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x0 /* Default (spin/sleep) */
-#define MTX_SPIN 0x1 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
-
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int mtx_lock; /* lock owner/gate/flags */
- volatile u_int mtx_recurse; /* number of recursive holds */
- u_int mtx_savefl; /* saved flags (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
-
-#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int flags);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
+#include <machine/psl.h>
/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
extern struct mtx clock_lock;
/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
-
-
-/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
-
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
+#ifdef MUTEX_DEBUG
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
-char STR_IEN[] = "fl & 0x200";
-char STR_IDIS[] = "!(fl & 0x200)";
-#else /* MTX_STRS */
+#ifdef _KERN_MUTEX_C_
+char STR_IEN[] = "fl & PSL_I";
+char STR_IDIS[] = "!(fl & PSL_I)";
+char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
-#define ASS_IEN MPASS2(read_eflags() & 0x200, STR_IEN)
-#define ASS_IDIS MPASS2((read_eflags() & 0x200) == 0, STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
+#endif /* MUTEX_DEBUG */
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN)
+#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS)
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN)
/*
* Assembly macros (for internal use only)
@@ -358,7 +124,7 @@ void witness_restore(struct mtx *, const char *, int);
"# getlock_spin_block" \
: "=&a" (_res), /* 0 (dummy output) */ \
"+m" (mtxp->mtx_lock), /* 1 */ \
- "=m" (mtxp->mtx_savefl) /* 2 */ \
+ "=m" (mtxp->mtx_saveintr) /* 2 */ \
: "r" (tid), /* 3 (input) */ \
"gi" (type), /* 4 */ \
"g" (mtxp) /* 5 */ \
@@ -456,7 +222,7 @@ void witness_restore(struct mtx *, const char *, int);
* We use cmpxchgl to clear lock (instead of simple store) to flush posting
* buffers and make the change visible to other CPU's.
*/
-#define _exitlock_spin(mtxp, inten1, inten2) ({ \
+#define _exitlock_spin(mtxp) ({ \
int _res; \
\
__asm __volatile ( \
@@ -467,276 +233,21 @@ void witness_restore(struct mtx *, const char *, int);
" jmp 2f;" \
"1: movl %0,%%eax;" \
" movl $ " _V(MTX_UNOWNED) ",%%ecx;" \
-" " inten1 ";" \
+" pushl %3;" \
" " MPLOCKED "" \
" cmpxchgl %%ecx,%0;" \
-" " inten2 ";" \
+" popfl;" \
"2:" \
"# exitlock_spin" \
: "+m" (mtxp->mtx_lock), /* 0 */ \
"+m" (mtxp->mtx_recurse), /* 1 */ \
"=&a" (_res) /* 2 */ \
- : "g" (mtxp->mtx_savefl) /* 3 (used in 'inten') */ \
+ : "g" (mtxp->mtx_saveintr) /* 3 */ \
: "memory", "ecx" /* used */ ); \
})
-#else /* I386_CPU */
-
-/*
- * For 386 processors only.
- */
-
-/* Get a sleep lock, deal with recursion inline. */
-#define _getlock_sleep(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_int(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-
-/* Get a spin lock, handle recursion inline (as the less common case) */
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _mtx_fl = read_eflags(); \
- disable_intr(); \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_fl); \
- else \
- (mp)->mtx_savefl = _mtx_fl; \
-} while (0)
-
-/*
- * Get a lock without any recursion handling. Calls the hard enter function if
- * we can't get it inline.
- */
-#define _getlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is handled
- * in the hard function.
- */
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-#define _exitlock(mp, tid, type) do { \
- if (atomic_cmpset_int(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) { \
- if ((mp)->mtx_lock & MTX_RECURSE) { \
- if (--((mp)->mtx_recurse) == 0) \
- atomic_clear_int(&(mp)->mtx_lock, \
- MTX_RECURSE); \
- } else { \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
- } \
-} while (0)
-
-/* Release a spin lock (with possible recursion). */
-#define _exitlock_spin(mp, inten1, inten2) do { \
- if ((mp)->mtx_recurse == 0) { \
- atomic_cmpset_int(&(mp)->mtx_lock, (mp)->mtx_lock, \
- MTX_UNOWNED); \
- write_eflags((mp)->mtx_savefl); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
#endif /* I386_CPU */
-/*
- * Externally visible mutex functions.
- *------------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread.
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-#ifdef KTR_EXTEND
-
-/*
- * KTR_EXTEND saves file name and line for all entries, so we don't need them
- * here. Theoretically we should also change the entries which refer to them
- * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
- * parameters, it doesn't do any harm to leave them.
- */
-char STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
-char STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
-#else
-char STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
-char STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
-#endif
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and leaves
- * the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out.
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this
- * lock we just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- if ((type) & MTX_FIRST) {
- ASS_IEN;
- disable_intr();
- _getlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else {
- _getlock_spin_block(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_int(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-#define mtx_legal2block() (read_eflags() & 0x200)
-
-/*
- * Release lock m.
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
-
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- atomic_cmpset_int(&mpp->mtx_lock, mpp->mtx_lock,
- MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0) {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- enable_intr();
- } else
- write_eflags(mpp->mtx_savefl);
- }
- } else {
- if ((type) & MTX_TOPHALF)
- _exitlock_spin(mpp,,);
- else {
- if ((type) & MTX_FIRST) {
- ASS_IDIS;
- _exitlock_spin(mpp,, "sti");
- } else {
- _exitlock_spin(mpp,
- "pushl %3", "popfl");
- }
- }
- }
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
-
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
@@ -748,7 +259,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
#if defined(I386_CPU)
#define MTX_EXIT(lck, reg) \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
movl $ MTX_UNOWNED,lck+MTX_LOCK; \
popf
@@ -761,11 +272,11 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
MPLOCKED \
cmpxchgl reg,lck+MTX_LOCK; \
jnz 9b; \
- popl lck+MTX_SAVEFL;
+ popl lck+MTX_SAVEINTR;
/* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */
#define MTX_EXIT(lck,reg) \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
movl lck+MTX_LOCK,%eax; \
movl $ MTX_UNOWNED,reg; \
MPLOCKED \
@@ -784,7 +295,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
MPLOCKED \
cmpxchgl reg,lck+MTX_LOCK; \
jnz 9b; \
- popl lck+MTX_SAVEFL; \
+ popl lck+MTX_SAVEINTR; \
jmp 10f; \
8: add $4,%esp; \
10:
@@ -795,7 +306,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
js 9f; \
movl %eax,lck+MTX_RECURSE; \
jmp 8f; \
- pushl lck+MTX_SAVEFL; \
+ pushl lck+MTX_SAVEINTR; \
9: movl lck+MTX_LOCK,%eax; \
movl $ MTX_UNOWNED,reg; \
MPLOCKED \
diff --git a/sys/ia64/ia64/synch_machdep.c b/sys/ia64/ia64/synch_machdep.c
deleted file mode 100644
index 33a586a08cd7..000000000000
--- a/sys/ia64/ia64/synch_machdep.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*-
- * Copyright (c) 1997, 1998 Berkeley Software Design, Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Berkeley Software Design Inc's name may not be used to endorse or
- * promote products derived from this software without specific prior
- * written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * from BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
- * $FreeBSD$
- */
-
-#define MTX_STRS /* define common strings */
-
-#include <sys/param.h>
-#include <sys/proc.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/ktr.h>
-#include <sys/lock.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
-#include <vm/vm_extern.h>
-#include <vm/vm_map.h>
-#include <sys/user.h>
-#include <ddb/ddb.h>
-#include <machine/atomic.h>
-#include <machine/clock.h>
-#include <machine/cpu.h>
-#include <machine/mutex.h>
-
-/* All mutexes in system (used for debug/panic) */
-struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
- TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
- { NULL, NULL }, &all_mtx, &all_mtx
-#ifdef SMP_DEBUG
- , NULL, { NULL, NULL }, NULL, 0
-#endif
-};
-
-int mtx_cur_cnt;
-int mtx_max_cnt;
-
-extern void _mtx_enter_giant_def(void);
-extern void _mtx_exit_giant_def(void);
-
-static void propagate_priority(struct proc *) __unused;
-
-#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
-#define mtx_owner(m) (mtx_unowned(m) ? NULL \
- : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
-
-#define RETIP(x) *(((u_int64_t *)(&x)) - 1)
-#define SET_PRIO(p, pri) (p)->p_priority = (pri)
-
-/*
- * XXX Temporary, for use from assembly language
- */
-
-void
-_mtx_enter_giant_def(void)
-{
-
- mtx_enter(&Giant, MTX_DEF);
-}
-
-void
-_mtx_exit_giant_def(void)
-{
-
- mtx_exit(&Giant, MTX_DEF);
-}
-
-static void
-propagate_priority(struct proc *p)
-{
- int pri = p->p_priority;
- struct mtx *m = p->p_blocked;
-
- for (;;) {
- struct proc *p1;
-
- p = mtx_owner(m);
-
- if (p == NULL) {
- /*
- * This really isn't quite right. Really
- * ought to bump priority of process that
- * next acquires the mutex.
- */
- MPASS(m->mtx_lock == MTX_CONTESTED);
- return;
- }
- MPASS(p->p_magic == P_MAGIC);
- if (p->p_priority <= pri)
- return;
- /*
- * If lock holder is actually running, just bump priority.
- */
- if (TAILQ_NEXT(p, p_procq) == NULL) {
- MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
- SET_PRIO(p, pri);
- return;
- }
- /*
- * If on run queue move to new run queue, and
- * quit.
- */
- if (p->p_stat == SRUN) {
- MPASS(p->p_blocked == NULL);
- remrunqueue(p);
- SET_PRIO(p, pri);
- setrunqueue(p);
- return;
- }
-
- /*
- * If we aren't blocked on a mutex, give up and quit.
- */
- if (p->p_stat != SMTX) {
- return;
- }
-
- /*
- * Pick up the mutex that p is blocked on.
- */
- m = p->p_blocked;
- MPASS(m != NULL);
-
- /*
- * Check if the proc needs to be moved up on
- * the blocked chain
- */
- if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
- p1->p_priority <= pri)
- continue;
-
- /*
- * Remove proc from blocked chain
- */
- TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
- MPASS(p1->p_magic == P_MAGIC);
- if (p1->p_priority > pri)
- break;
- }
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- CTR4(KTR_LOCK,
- "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
- p, p1, m, m->mtx_description);
- }
-}
-
-void
-mtx_enter_hard(struct mtx *m, int type, int psr)
-{
- struct proc *p = CURPROC;
-
- KASSERT(p != NULL, ("curproc is NULL in mutex"));
-
- switch (type) {
- case MTX_DEF:
- if ((m->mtx_lock & MTX_FLAGMASK) == (u_int64_t)p) {
- m->mtx_recurse++;
- atomic_set_64(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
- return;
- }
- CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%lx) [0x%lx]",
- m, m->mtx_lock, RETIP(m));
- while (!atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
- (u_int64_t)p)) {
- int v;
- struct proc *p1;
-
- mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
- /*
- * check if the lock has been released while
- * waiting for the schedlock.
- */
- if ((v = m->mtx_lock) == MTX_UNOWNED) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
- /*
- * The mutex was marked contested on release. This
- * means that there are processes blocked on it.
- */
- if (v == MTX_CONTESTED) {
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- KASSERT(p1 != NULL, ("contested mutex has no contesters"));
- KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
- m->mtx_lock = (u_int64_t)p | MTX_CONTESTED;
- if (p1->p_priority < p->p_priority) {
- SET_PRIO(p, p1->p_priority);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- }
- /*
- * If the mutex isn't already contested and
- * a failure occurs setting the contested bit the
- * mutex was either release or the
- * state of the RECURSION bit changed.
- */
- if ((v & MTX_CONTESTED) == 0 &&
- !atomic_cmpset_64(&m->mtx_lock, v,
- v | MTX_CONTESTED)) {
- mtx_exit(&sched_lock, MTX_SPIN);
- continue;
- }
-
- /* We definitely have to sleep for this lock */
- mtx_assert(m, MA_NOTOWNED);
-
-#ifdef notyet
- /*
- * If we're borrowing an interrupted thread's VM
- * context must clean up before going to sleep.
- */
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_enter: 0x%x interrupted 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
-
- /* Put us on the list of procs blocked on this mutex */
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- p1 = (struct proc *)(m->mtx_lock &
- MTX_FLAGMASK);
- LIST_INSERT_HEAD(&p1->p_contested, m,
- mtx_contested);
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
- } else {
- TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
- if (p1->p_priority > p->p_priority)
- break;
- if (p1)
- TAILQ_INSERT_BEFORE(p1, p, p_procq);
- else
- TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
- p_procq);
- }
-
- p->p_blocked = m; /* Who we're blocked on */
- p->p_stat = SMTX;
-#if 0
- propagate_priority(p);
-#endif
- CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mi_switch();
- CTR3(KTR_LOCK,
- "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
- p, m, m->mtx_description);
- mtx_exit(&sched_lock, MTX_SPIN);
- }
- ia64_mf();
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- case MTX_SPIN | MTX_TOPHALF:
- {
- int i = 0;
-
- if (m->mtx_lock == (u_int64_t)p) {
- m->mtx_recurse++;
- return;
- }
- CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
- for (;;) {
- if (atomic_cmpset_64(&m->mtx_lock, MTX_UNOWNED,
- (u_int64_t)p)) {
- ia64_mf();
- break;
- }
- while (m->mtx_lock != MTX_UNOWNED) {
- if (i++ < 1000000)
- continue;
- if (i++ < 6000000)
- DELAY (1);
-#ifdef DDB
- else if (!db_active)
-#else
- else
-#endif
- panic(
- "spin lock %s held by 0x%lx for > 5 seconds",
- m->mtx_description, m->mtx_lock);
- }
- }
-
-#ifdef SMP_DEBUG
- if (type != MTX_SPIN)
- m->mtx_saveipl = 0xbeefface;
- else
-#endif
- m->mtx_savepsr = psr;
- CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
- return;
- }
- }
-}
-
-void
-mtx_exit_hard(struct mtx *m, int type)
-{
- struct proc *p, *p1;
- struct mtx *m1;
- int pri;
-
- switch (type) {
- case MTX_DEF:
- case MTX_DEF | MTX_NOSWITCH:
- if (m->mtx_recurse != 0) {
- if (--(m->mtx_recurse) == 0)
- atomic_clear_64(&m->mtx_lock, MTX_RECURSE);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
- return;
- }
- mtx_enter(&sched_lock, MTX_SPIN);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
- p = CURPROC;
- p1 = TAILQ_FIRST(&m->mtx_blocked);
- MPASS(p->p_magic == P_MAGIC);
- MPASS(p1->p_magic == P_MAGIC);
- TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
- if (TAILQ_EMPTY(&m->mtx_blocked)) {
- LIST_REMOVE(m, mtx_contested);
- atomic_cmpset_64(&m->mtx_lock, m->mtx_lock,
- MTX_UNOWNED);
- CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
- } else
- m->mtx_lock = MTX_CONTESTED;
- pri = MAXPRI;
- LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
- int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
- if (cp < pri)
- pri = cp;
- }
- if (pri > p->p_nativepri)
- pri = p->p_nativepri;
- SET_PRIO(p, pri);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
- m, p1);
- p1->p_blocked = NULL;
- p1->p_stat = SRUN;
- setrunqueue(p1);
- if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
-#ifdef notyet
- if (p->p_flag & (P_ITHD | P_SITHD)) {
- ithd_t *it = (ithd_t *)p;
-
- if (it->it_interrupted) {
- CTR2(KTR_LOCK,
- "mtx_exit: 0x%x interruped 0x%x",
- it, it->it_interrupted);
- intr_thd_fixup(it);
- }
- }
-#endif
- setrunqueue(p);
- CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%lx",
- m, m->mtx_lock);
- mi_switch();
- CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%lx",
- m, m->mtx_lock);
- }
- mtx_exit(&sched_lock, MTX_SPIN);
- return;
- case MTX_SPIN:
- case MTX_SPIN | MTX_FIRST:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- ia64_mf();
- if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED)) {
- if (type & MTX_FIRST)
- enable_intr(); /* XXX is this kosher? */
- else {
- MPASS(m->mtx_saveipl != 0xbeefface);
- restore_intr(m->mtx_savepsr);
- }
- return;
- }
- panic("unsucuessful release of spin lock");
- case MTX_SPIN | MTX_TOPHALF:
- if (m->mtx_recurse != 0) {
- m->mtx_recurse--;
- return;
- }
- ia64_mf();
- if (atomic_cmpset_64(&m->mtx_lock, CURTHD, MTX_UNOWNED))
- return;
- panic("unsucuessful release of spin lock");
- default:
- panic("mtx_exit_hard: unsupported type 0x%x\n", type);
- }
-}
-
-#define MV_DESTROY 0 /* validate before destory */
-#define MV_INIT 1 /* validate before init */
-
-#ifdef SMP_DEBUG
-
-int mtx_validate __P((struct mtx *, int));
-
-int
-mtx_validate(struct mtx *m, int when)
-{
- struct mtx *mp;
- int i;
- int retval = 0;
-
- if (m == &all_mtx || cold)
- return 0;
-
- mtx_enter(&all_mtx, MTX_DEF);
- ASS(kernacc((caddr_t)all_mtx.mtx_next, 4, 1) == 1);
- ASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
- for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
- if (kernacc((caddr_t)mp->mtx_next, 4, 1) != 1) {
- panic("mtx_validate: mp=%p mp->mtx_next=%p",
- mp, mp->mtx_next);
- }
- i++;
- if (i > mtx_cur_cnt) {
- panic("mtx_validate: too many in chain, known=%d\n",
- mtx_cur_cnt);
- }
- }
- ASS(i == mtx_cur_cnt);
- switch (when) {
- case MV_DESTROY:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m)
- break;
- ASS(mp == m);
- break;
- case MV_INIT:
- for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
- if (mp == m) {
- /*
- * Not good. This mutex already exits
- */
- retval = 1;
-#if 1
- printf("re-initing existing mutex %s\n",
- m->mtx_description);
- ASS(m->mtx_lock == MTX_UNOWNED);
- retval = 1;
-#else
- panic("re-initing existing mutex %s",
- m->mtx_description);
-#endif
- }
- }
- mtx_exit(&all_mtx, MTX_DEF);
- return (retval);
-}
-#endif
-
-void
-mtx_init(struct mtx *m, char *t, int flag)
-{
-
- CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
-#ifdef SMP_DEBUG
- if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
- return;
-#endif
- bzero((void *)m, sizeof *m);
- TAILQ_INIT(&m->mtx_blocked);
- m->mtx_description = t;
- m->mtx_lock = MTX_UNOWNED;
- /* Put on all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next = &all_mtx;
- m->mtx_prev = all_mtx.mtx_prev;
- m->mtx_prev->mtx_next = m;
- all_mtx.mtx_prev = m;
- if (++mtx_cur_cnt > mtx_max_cnt)
- mtx_max_cnt = mtx_cur_cnt;
- mtx_exit(&all_mtx, MTX_DEF);
- witness_init(m, flag);
-}
-
-void
-mtx_destroy(struct mtx *m)
-{
-
- CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
-#ifdef SMP_DEBUG
- if (m->mtx_next == NULL)
- panic("mtx_destroy: %p (%s) already destroyed",
- m, m->mtx_description);
-
- if (!mtx_owned(m)) {
- ASS(m->mtx_lock == MTX_UNOWNED);
- } else {
- ASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
- }
- mtx_validate(m, MV_DESTROY); /* diagnostic */
-#endif
-
-#ifdef WITNESS
- if (m->mtx_witness)
- witness_destroy(m);
-#endif /* WITNESS */
-
- /* Remove from the all mutex queue */
- mtx_enter(&all_mtx, MTX_DEF);
- m->mtx_next->mtx_prev = m->mtx_prev;
- m->mtx_prev->mtx_next = m->mtx_next;
-#ifdef SMP_DEBUG
- m->mtx_next = m->mtx_prev = NULL;
-#endif
- mtx_cur_cnt--;
- mtx_exit(&all_mtx, MTX_DEF);
-}
diff --git a/sys/ia64/include/mutex.h b/sys/ia64/include/mutex.h
index cd8df648f81b..fe9c646ba112 100644
--- a/sys/ia64/include/mutex.h
+++ b/sys/ia64/include/mutex.h
@@ -32,502 +32,33 @@
#ifndef _MACHINE_MUTEX_H_
#define _MACHINE_MUTEX_H_
-#ifndef LOCORE
-#include <sys/queue.h>
-
-#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globaldata.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x1 /* Default (spin/sleep) */
-#define MTX_SPIN 0x2 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_DEF | MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
+#include <machine/ia64_cpu.h>
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int64_t mtx_lock; /* lock owner/gate/flags */
- volatile u_int32_t mtx_recurse; /* number of recursive holds */
- u_int32_t mtx_savepsr; /* saved psr (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
+#ifndef LOCORE
#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int64_t)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int psr);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
-
-/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
-
-/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
+#ifdef MUTEX_DEBUG
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
-
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
+#ifdef _KERN_MUTEX_C_
char STR_IEN[] = "psr.i";
char STR_IDIS[] = "!psr.i";
-#else /* MTX_STRS */
+char STR_SIEN[] = "mpp->mtx_saveintr & PSR_I";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
-#define ASS_IEN MPASS2((save_intr() & (1 << 14)), STR_IEN)
-#define ASS_IDIS MPASS2(!(save_intr() & (1 << 14)), STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
-
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
-/*
- * Assembly macros (for internal use only)
- *--------------------------------------------------------------------------
- */
-
-/*
- * Get a sleep lock, deal with recursion inline
- */
-
-#define _V(x) __STRING(x)
-
-#define _getlock_sleep(mp, tid, type) do { \
- if (ia64_cmpxchg_acq_64(&(mp)->mtx_lock, \
- MTX_UNOWNED, (tid)) != MTX_UNOWNED) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_64(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } \
-} while (0)
-
-/*
- * Get a spin lock, handle recusion inline (as the less common case)
- */
-
-#define _getlock_spin_block(mp, tid, type) do { \
- u_int _psr = save_intr(); \
- disable_intr(); \
- if (ia64_cmpxchg_acq_64(&(mp)->mtx_lock, \
- MTX_UNOWNED, (tid)) != MTX_UNOWNED) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _psr); \
- else \
- (mp)->mtx_savepsr = _psr; \
-} while (0)
-
-/*
- * Get a lock without any recursion handling. Calls the hard enter
- * function if we can't get it inline.
- */
-
-#define _getlock_norecurse(mp, tid, type) do { \
- if (ia64_cmpxchg_acq_64(&(mp)->mtx_lock, \
- MTX_UNOWNED, (tid)) != MTX_UNOWNED) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is
- * handled in the hard function.
- */
-
-#define _exitlock_norecurse(mp, tid, type) do { \
- if (ia64_cmpxchg_rel_64(&(mp)->mtx_lock, \
- (tid), MTX_UNOWNED) != (tid)) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-
-#define _exitlock(mp, tid, type) do { \
- if (ia64_cmpxchg_rel_64(&(mp)->mtx_lock, \
- (tid), MTX_UNOWNED) != (tid)) { \
- if (((mp)->mtx_lock & MTX_RECURSE) && \
- (--(mp)->mtx_recurse == 0)) \
- atomic_clear_64(&(mp)->mtx_lock, MTX_RECURSE); \
- else \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
-} while (0)
-
-/*
- * Release a spin lock (with possible recursion)
- */
-
-#define _exitlock_spin(mp) do { \
- if ((mp)->mtx_recurse == 0) { \
- int _psr = (mp)->mtx_savepsr; \
- ia64_st_rel_64(&(mp)->mtx_lock, MTX_UNOWNED); \
- restore_intr(_psr); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
-/*
- * Externally visible mutex functions
- *------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-char STR_mtx_enter_fmt[] = "GOT %s [%p] at %s:%d r=%d";
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_exit_fmt[] = "REL %s [%p] at %s:%d r=%d";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] at %s:%d result=%d";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and
- * leaves the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this lock we
- * just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- _getlock_spin_block(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_64(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-/*
- * Release lock m
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
+#endif /* MUTEX_DEBUG */
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- ia64_st_rel_64(&mpp->mtx_lock, MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0)
- restore_intr(mpp->mtx_savepsr);
- } else
- if ((type) & MTX_TOPHALF) {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else
- _exitlock_spin(mpp);
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY) {
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
+#define ASS_IEN MPASS2((save_intr() & IA64_PSR_I), STR_IEN)
+#define ASS_IDIS MPASS2(!(save_intr() & IA64_PSR_I), STR_IDIS)
+#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & IA64_PSR_I), STR_SIEN)
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
@@ -550,7 +81,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
(p1) br.cond.spnt.few 1b ;; \
addl rLCK=@ltoff(lck),gp ;; \
ld8 rLCK=[rLCK] ;; \
- add rLCK=MTX_SAVEPSR,rLCK ;; \
+ add rLCK=MTX_SAVEINTR,rLCK ;; \
st4 [rLCK]=rPSR
#define MTX_EXIT(lck, rTMP, rLCK) \
@@ -558,7 +89,7 @@ _mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
addl rLCK=@ltoff(lck),gp;; \
ld8 rLCK=[rLCK];; \
add rLCK=MTX_LOCK,rLCK;; \
- st8.rel [rLCK]=rTMP,MTX_SAVEPSR-MTX_LOCK ;; \
+ st8.rel [rLCK]=rTMP,MTX_SAVEINTR-MTX_LOCK ;; \
ld4 rTMP=[rLCK] ;; \
mov psr.l=rTMP ;; \
srlz.d
diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c
index 96bbdf9d8dd1..4141dbf5a473 100644
--- a/sys/kern/kern_mutex.c
+++ b/sys/kern/kern_mutex.c
@@ -26,6 +26,7 @@
* SUCH DAMAGE.
*
* from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
+ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
* $FreeBSD$
*/
@@ -50,20 +51,604 @@
*/
#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/systm.h>
+#include <sys/vmmeter.h>
#include <sys/ktr.h>
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
#define _KERN_MUTEX_C_ /* Cause non-inlined mtx_*() to be compiled. */
-#include <machine/mutex.h>
+#include <sys/mutex.h>
+
+/*
+ * Machine independent bits of the mutex implementation
+ */
+/* All mutexes in system (used for debug/panic) */
+#ifdef MUTEX_DEBUG
+static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0,
+ "All mutexes queue head" };
+static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, &all_mtx_debug,
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx };
+#else /* MUTEX_DEBUG */
+static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx };
+#endif /* MUTEX_DEBUG */
+
+static int mtx_cur_cnt;
+static int mtx_max_cnt;
+
+void _mtx_enter_giant_def(void);
+void _mtx_exit_giant_def(void);
+static void propagate_priority(struct proc *) __unused;
+
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
+#define mtx_owner(m) (mtx_unowned(m) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
+
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
+
+/*
+ * XXX Temporary, for use from assembly language
+ */
+
+void
+_mtx_enter_giant_def(void)
+{
+
+ mtx_enter(&Giant, MTX_DEF);
+}
+
+void
+_mtx_exit_giant_def(void)
+{
+
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+static void
+propagate_priority(struct proc *p)
+{
+ int pri = p->p_priority;
+ struct mtx *m = p->p_blocked;
+
+ for (;;) {
+ struct proc *p1;
+
+ p = mtx_owner(m);
+
+ if (p == NULL) {
+ /*
+ * This really isn't quite right. Really
+ * ought to bump priority of process that
+ * next acquires the mutex.
+ */
+ MPASS(m->mtx_lock == MTX_CONTESTED);
+ return;
+ }
+ MPASS(p->p_magic == P_MAGIC);
+ if (p->p_priority <= pri)
+ return;
+ /*
+ * If lock holder is actually running, just bump priority.
+ */
+ if (TAILQ_NEXT(p, p_procq) == NULL) {
+ MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
+ SET_PRIO(p, pri);
+ return;
+ }
+ /*
+ * If on run queue move to new run queue, and
+ * quit.
+ */
+ if (p->p_stat == SRUN) {
+ MPASS(p->p_blocked == NULL);
+ remrunqueue(p);
+ SET_PRIO(p, pri);
+ setrunqueue(p);
+ return;
+ }
+
+ /*
+ * If we aren't blocked on a mutex, give up and quit.
+ */
+ if (p->p_stat != SMTX) {
+ printf(
+ "XXX: process %d(%s):%d holds %s but isn't blocked on a mutex\n",
+ p->p_pid, p->p_comm, p->p_stat, m->mtx_description);
+ return;
+ }
+
+ /*
+ * Pick up the mutex that p is blocked on.
+ */
+ m = p->p_blocked;
+ MPASS(m != NULL);
+
+ printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
+ p->p_comm, m->mtx_description);
+ /*
+ * Check if the proc needs to be moved up on
+ * the blocked chain
+ */
+ if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
+ p1->p_priority <= pri) {
+ if (p1)
+ printf(
+ "XXX: previous process %d(%s) has higher priority\n",
+ p->p_pid, p->p_comm);
+ else
+ printf("XXX: process at head of run queue\n");
+ continue;
+ }
+
+ /*
+ * Remove proc from blocked chain
+ */
+ TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
+ MPASS(p1->p_magic == P_MAGIC);
+ if (p1->p_priority > pri)
+ break;
+ }
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ CTR4(KTR_LOCK,
+ "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
+ p, p1, m, m->mtx_description);
+ }
+}
+
+void
+mtx_enter_hard(struct mtx *m, int type, int saveintr)
+{
+ struct proc *p = CURPROC;
+ struct timeval new_switchtime;
+
+ KASSERT(p != NULL, ("curproc is NULL in mutex"));
+
+ switch (type) {
+ case MTX_DEF:
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
+ return;
+ }
+ CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%p) [0x%p]",
+ m, m->mtx_lock, RETIP(m));
+ while (!_obtain_lock(m, p)) {
+ int v;
+ struct proc *p1;
+
+ mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
+ /*
+ * check if the lock has been released while
+ * waiting for the schedlock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+ /*
+ * The mutex was marked contested on release. This
+ * means that there are processes blocked on it.
+ */
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL, ("contested mutex has no contesters"));
+ KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+ if (p1->p_priority < p->p_priority) {
+ SET_PRIO(p, p1->p_priority);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ return;
+ }
+ /*
+ * If the mutex isn't already contested and
+ * a failure occurs setting the contested bit the
+ * mutex was either release or the
+ * state of the RECURSION bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+
+ /* We definitely have to sleep for this lock */
+ mtx_assert(m, MA_NOTOWNED);
+
+#ifdef notyet
+ /*
+ * If we're borrowing an interrupted thread's VM
+ * context must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_enter: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+
+ /* Put us on the list of procs blocked on this mutex */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock &
+ MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m,
+ mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
+ p_procq);
+ }
+
+ p->p_blocked = m; /* Who we're blocked on */
+ p->p_stat = SMTX;
+#if 0
+ propagate_priority(p);
+#endif
+ CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ /*
+ * Blatantly copied from mi_switch nearly verbatim.
+ * When Giant goes away and we stop dinking with it
+ * in mi_switch, we can go back to calling mi_switch
+ * directly here.
+ */
+
+ /*
+ * Compute the amount of time during which the current
+ * process was running, and add that to its total so
+ * far.
+ */
+ microuptime(&new_switchtime);
+ if (timevalcmp(&new_switchtime, &switchtime, <)) {
+ printf(
+ "microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
+ switchtime.tv_sec, switchtime.tv_usec,
+ new_switchtime.tv_sec,
+ new_switchtime.tv_usec);
+ new_switchtime = switchtime;
+ } else {
+ p->p_runtime += (new_switchtime.tv_usec -
+ switchtime.tv_usec) +
+ (new_switchtime.tv_sec - switchtime.tv_sec) *
+ (int64_t)1000000;
+ }
+
+ /*
+ * Pick a new current process and record its start time.
+ */
+ cnt.v_swtch++;
+ switchtime = new_switchtime;
+ cpu_switch();
+ if (switchtime.tv_sec == 0)
+ microuptime(&switchtime);
+ switchticks = ticks;
+ CTR3(KTR_LOCK,
+ "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ mtx_exit(&sched_lock, MTX_SPIN);
+ }
+ return;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ case MTX_SPIN | MTX_TOPHALF:
+ {
+ int i = 0;
+
+ if (m->mtx_lock == (uintptr_t)p) {
+ m->mtx_recurse++;
+ return;
+ }
+ CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
+ for (;;) {
+ if (_obtain_lock(m, p))
+ break;
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY (1);
+#ifdef DDB
+ else if (!db_active)
+#else
+ else
+#endif
+ panic(
+ "spin lock %s held by 0x%p for > 5 seconds",
+ m->mtx_description,
+ (void *)m->mtx_lock);
+ }
+ }
+
+#ifdef MUTEX_DEBUG
+ if (type != MTX_SPIN)
+ m->mtx_saveintr = 0xbeefface;
+ else
+#endif
+ m->mtx_saveintr = saveintr;
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
+ return;
+ }
+ }
+}
+
+void
+mtx_exit_hard(struct mtx *m, int type)
+{
+ struct proc *p, *p1;
+ struct mtx *m1;
+ int pri;
+
+ p = CURPROC;
+ switch (type) {
+ case MTX_DEF:
+ case MTX_DEF | MTX_NOSWITCH:
+ if (m->mtx_recurse != 0) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
+ return;
+ }
+ mtx_enter(&sched_lock, MTX_SPIN);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
+ } else
+ m->mtx_lock = MTX_CONTESTED;
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
+ m, p1);
+ p1->p_blocked = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+ if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+#ifdef notyet
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_exit: 0x%x interruped 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+ setrunqueue(p);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%p",
+ m, m->mtx_lock);
+ mi_switch();
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%p",
+ m, m->mtx_lock);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ break;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ MPASS(mtx_owned(m));
+ _release_lock_quick(m);
+ if (type & MTX_FIRST)
+ enable_intr(); /* XXX is this kosher? */
+ else {
+ MPASS(m->mtx_saveintr != 0xbeefface);
+ restore_intr(m->mtx_saveintr);
+ }
+ break;
+ case MTX_SPIN | MTX_TOPHALF:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ MPASS(mtx_owned(m));
+ _release_lock_quick(m);
+ break;
+ default:
+ panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+ }
+}
+
+#define MV_DESTROY 0 /* validate before destory */
+#define MV_INIT 1 /* validate before init */
+
+#ifdef MUTEX_DEBUG
+
+int mtx_validate __P((struct mtx *, int));
+
+int
+mtx_validate(struct mtx *m, int when)
+{
+ struct mtx *mp;
+ int i;
+ int retval = 0;
+
+ if (m == &all_mtx || cold)
+ return 0;
+
+ mtx_enter(&all_mtx, MTX_DEF);
+/*
+ * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
+ * we can re-enable the kernacc() checks.
+ */
+#ifndef __alpha__
+ MPASS(kernacc((caddr_t)all_mtx.mtx_next, sizeof(uintptr_t),
+ VM_PROT_READ) == 1);
+#endif
+ MPASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
+ for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
+#ifndef __alpha__
+ if (kernacc((caddr_t)mp->mtx_next, sizeof(uintptr_t),
+ VM_PROT_READ) != 1) {
+ panic("mtx_validate: mp=%p mp->mtx_next=%p",
+ mp, mp->mtx_next);
+ }
+#endif
+ i++;
+ if (i > mtx_cur_cnt) {
+ panic("mtx_validate: too many in chain, known=%d\n",
+ mtx_cur_cnt);
+ }
+ }
+ MPASS(i == mtx_cur_cnt);
+ switch (when) {
+ case MV_DESTROY:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m)
+ break;
+ MPASS(mp == m);
+ break;
+ case MV_INIT:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m) {
+ /*
+ * Not good. This mutex already exists.
+ */
+ printf("re-initing existing mutex %s\n",
+ m->mtx_description);
+ MPASS(m->mtx_lock == MTX_UNOWNED);
+ retval = 1;
+ }
+ }
+ mtx_exit(&all_mtx, MTX_DEF);
+ return (retval);
+}
+#endif
+
+void
+mtx_init(struct mtx *m, const char *t, int flag)
+{
+#ifdef MUTEX_DEBUG
+ struct mtx_debug *debug;
+#endif
+
+ CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
+#ifdef MUTEX_DEBUG
+ if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ return;
+ if (flag & MTX_COLD)
+ debug = m->mtx_debug;
+ else
+ debug = NULL;
+ if (debug == NULL) {
+#ifdef DIAGNOSTIC
+ if(cold && bootverbose)
+ printf("malloc'ing mtx_debug while cold for %s\n", t);
+#endif
+
+ /* XXX - should not use DEVBUF */
+ debug = malloc(sizeof(struct mtx_debug), M_DEVBUF, M_NOWAIT);
+ MPASS(debug != NULL);
+ bzero(debug, sizeof(struct mtx_debug));
+ }
+#endif
+ bzero((void *)m, sizeof *m);
+ TAILQ_INIT(&m->mtx_blocked);
+#ifdef MUTEX_DEBUG
+ m->mtx_debug = debug;
+#endif
+ m->mtx_description = t;
+ m->mtx_lock = MTX_UNOWNED;
+ /* Put on all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next = &all_mtx;
+ m->mtx_prev = all_mtx.mtx_prev;
+ m->mtx_prev->mtx_next = m;
+ all_mtx.mtx_prev = m;
+ if (++mtx_cur_cnt > mtx_max_cnt)
+ mtx_max_cnt = mtx_cur_cnt;
+ mtx_exit(&all_mtx, MTX_DEF);
+ witness_init(m, flag);
+}
+
+void
+mtx_destroy(struct mtx *m)
+{
+
+ CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
+#ifdef MUTEX_DEBUG
+ if (m->mtx_next == NULL)
+ panic("mtx_destroy: %p (%s) already destroyed",
+ m, m->mtx_description);
+
+ if (!mtx_owned(m)) {
+ MPASS(m->mtx_lock == MTX_UNOWNED);
+ } else {
+ MPASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
+ }
+ mtx_validate(m, MV_DESTROY); /* diagnostic */
+#endif
+
+#ifdef WITNESS
+ if (m->mtx_witness)
+ witness_destroy(m);
+#endif /* WITNESS */
+
+ /* Remove from the all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next->mtx_prev = m->mtx_prev;
+ m->mtx_prev->mtx_next = m->mtx_next;
+#ifdef MUTEX_DEBUG
+ m->mtx_next = m->mtx_prev = NULL;
+ free(m->mtx_debug, M_DEVBUF);
+ m->mtx_debug = NULL;
+#endif
+ mtx_cur_cnt--;
+ mtx_exit(&all_mtx, MTX_DEF);
+}
/*
* The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the SMP_DEBUG and WITNESS kernel options
+ * but the witness code depends on the MUTEX_DEBUG and WITNESS kernel options
* being specified.
*/
-#if (defined(SMP_DEBUG) && defined(WITNESS))
+#if (defined(MUTEX_DEBUG) && defined(WITNESS))
#define WITNESS_COUNT 200
#define WITNESS_NCHILDREN 2
@@ -306,7 +891,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
- ASS(i < 200);
+ MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
mtx_exit(&w_mtx, MTX_SPIN);
@@ -355,7 +940,7 @@ out:
* is acquired in hardclock. Put it in the ignore list. It is
* likely not the mutex this assert fails on.
*/
- ASS(m->mtx_held.le_prev == NULL);
+ MPASS(m->mtx_held.le_prev == NULL);
LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
}
@@ -422,7 +1007,7 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_line = line;
m->mtx_file = file;
p = CURPROC;
- ASS(m->mtx_held.le_prev == NULL);
+ MPASS(m->mtx_held.le_prev == NULL);
LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
}
@@ -564,7 +1149,7 @@ itismychild(struct witness *parent, struct witness *child)
return (1);
parent = parent->w_morechildren;
}
- ASS(child != NULL);
+ MPASS(child != NULL);
parent->w_children[parent->w_childcnt++] = child;
/*
* now prune whole tree
@@ -603,7 +1188,7 @@ found:
for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren)
continue;
w->w_children[i] = w1->w_children[--w1->w_childcnt];
- ASS(w->w_children[i] != NULL);
+ MPASS(w->w_children[i] != NULL);
if (w1->w_childcnt != 0)
return;
@@ -639,7 +1224,7 @@ isitmydescendant(struct witness *parent, struct witness *child)
int j;
for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) {
- ASS(j < 1000);
+ MPASS(j < 1000);
for (i = 0; i < w->w_childcnt; i++) {
if (w->w_children[i] == child)
return (1);
@@ -795,4 +1380,4 @@ witness_restore(struct mtx *m, const char *file, int line)
m->mtx_witness->w_line = line;
}
-#endif /* (defined(SMP_DEBUG) && defined(WITNESS)) */
+#endif /* (defined(MUTEX_DEBUG) && defined(WITNESS)) */
diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c
index 96bbdf9d8dd1..4141dbf5a473 100644
--- a/sys/kern/subr_turnstile.c
+++ b/sys/kern/subr_turnstile.c
@@ -26,6 +26,7 @@
* SUCH DAMAGE.
*
* from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
+ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
* $FreeBSD$
*/
@@ -50,20 +51,604 @@
*/
#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/systm.h>
+#include <sys/vmmeter.h>
#include <sys/ktr.h>
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
#define _KERN_MUTEX_C_ /* Cause non-inlined mtx_*() to be compiled. */
-#include <machine/mutex.h>
+#include <sys/mutex.h>
+
+/*
+ * Machine independent bits of the mutex implementation
+ */
+/* All mutexes in system (used for debug/panic) */
+#ifdef MUTEX_DEBUG
+static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0,
+ "All mutexes queue head" };
+static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, &all_mtx_debug,
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx };
+#else /* MUTEX_DEBUG */
+static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx };
+#endif /* MUTEX_DEBUG */
+
+static int mtx_cur_cnt;
+static int mtx_max_cnt;
+
+void _mtx_enter_giant_def(void);
+void _mtx_exit_giant_def(void);
+static void propagate_priority(struct proc *) __unused;
+
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
+#define mtx_owner(m) (mtx_unowned(m) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
+
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
+
+/*
+ * XXX Temporary, for use from assembly language
+ */
+
+void
+_mtx_enter_giant_def(void)
+{
+
+ mtx_enter(&Giant, MTX_DEF);
+}
+
+void
+_mtx_exit_giant_def(void)
+{
+
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+static void
+propagate_priority(struct proc *p)
+{
+ int pri = p->p_priority;
+ struct mtx *m = p->p_blocked;
+
+ for (;;) {
+ struct proc *p1;
+
+ p = mtx_owner(m);
+
+ if (p == NULL) {
+ /*
+ * This really isn't quite right. Really
+ * ought to bump priority of process that
+ * next acquires the mutex.
+ */
+ MPASS(m->mtx_lock == MTX_CONTESTED);
+ return;
+ }
+ MPASS(p->p_magic == P_MAGIC);
+ if (p->p_priority <= pri)
+ return;
+ /*
+ * If lock holder is actually running, just bump priority.
+ */
+ if (TAILQ_NEXT(p, p_procq) == NULL) {
+ MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
+ SET_PRIO(p, pri);
+ return;
+ }
+ /*
+ * If on run queue move to new run queue, and
+ * quit.
+ */
+ if (p->p_stat == SRUN) {
+ MPASS(p->p_blocked == NULL);
+ remrunqueue(p);
+ SET_PRIO(p, pri);
+ setrunqueue(p);
+ return;
+ }
+
+ /*
+ * If we aren't blocked on a mutex, give up and quit.
+ */
+ if (p->p_stat != SMTX) {
+ printf(
+ "XXX: process %d(%s):%d holds %s but isn't blocked on a mutex\n",
+ p->p_pid, p->p_comm, p->p_stat, m->mtx_description);
+ return;
+ }
+
+ /*
+ * Pick up the mutex that p is blocked on.
+ */
+ m = p->p_blocked;
+ MPASS(m != NULL);
+
+ printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
+ p->p_comm, m->mtx_description);
+ /*
+ * Check if the proc needs to be moved up on
+ * the blocked chain
+ */
+ if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
+ p1->p_priority <= pri) {
+ if (p1)
+ printf(
+ "XXX: previous process %d(%s) has higher priority\n",
+ p->p_pid, p->p_comm);
+ else
+ printf("XXX: process at head of run queue\n");
+ continue;
+ }
+
+ /*
+ * Remove proc from blocked chain
+ */
+ TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
+ MPASS(p1->p_magic == P_MAGIC);
+ if (p1->p_priority > pri)
+ break;
+ }
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ CTR4(KTR_LOCK,
+ "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
+ p, p1, m, m->mtx_description);
+ }
+}
+
+void
+mtx_enter_hard(struct mtx *m, int type, int saveintr)
+{
+ struct proc *p = CURPROC;
+ struct timeval new_switchtime;
+
+ KASSERT(p != NULL, ("curproc is NULL in mutex"));
+
+ switch (type) {
+ case MTX_DEF:
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
+ return;
+ }
+ CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%p) [0x%p]",
+ m, m->mtx_lock, RETIP(m));
+ while (!_obtain_lock(m, p)) {
+ int v;
+ struct proc *p1;
+
+ mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
+ /*
+ * check if the lock has been released while
+ * waiting for the schedlock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+ /*
+ * The mutex was marked contested on release. This
+ * means that there are processes blocked on it.
+ */
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL, ("contested mutex has no contesters"));
+ KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+ if (p1->p_priority < p->p_priority) {
+ SET_PRIO(p, p1->p_priority);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ return;
+ }
+ /*
+ * If the mutex isn't already contested and
+ * a failure occurs setting the contested bit the
+ * mutex was either release or the
+ * state of the RECURSION bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+
+ /* We definitely have to sleep for this lock */
+ mtx_assert(m, MA_NOTOWNED);
+
+#ifdef notyet
+ /*
+ * If we're borrowing an interrupted thread's VM
+ * context must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_enter: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+
+ /* Put us on the list of procs blocked on this mutex */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock &
+ MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m,
+ mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
+ p_procq);
+ }
+
+ p->p_blocked = m; /* Who we're blocked on */
+ p->p_stat = SMTX;
+#if 0
+ propagate_priority(p);
+#endif
+ CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ /*
+ * Blatantly copied from mi_switch nearly verbatim.
+ * When Giant goes away and we stop dinking with it
+ * in mi_switch, we can go back to calling mi_switch
+ * directly here.
+ */
+
+ /*
+ * Compute the amount of time during which the current
+ * process was running, and add that to its total so
+ * far.
+ */
+ microuptime(&new_switchtime);
+ if (timevalcmp(&new_switchtime, &switchtime, <)) {
+ printf(
+ "microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
+ switchtime.tv_sec, switchtime.tv_usec,
+ new_switchtime.tv_sec,
+ new_switchtime.tv_usec);
+ new_switchtime = switchtime;
+ } else {
+ p->p_runtime += (new_switchtime.tv_usec -
+ switchtime.tv_usec) +
+ (new_switchtime.tv_sec - switchtime.tv_sec) *
+ (int64_t)1000000;
+ }
+
+ /*
+ * Pick a new current process and record its start time.
+ */
+ cnt.v_swtch++;
+ switchtime = new_switchtime;
+ cpu_switch();
+ if (switchtime.tv_sec == 0)
+ microuptime(&switchtime);
+ switchticks = ticks;
+ CTR3(KTR_LOCK,
+ "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ mtx_exit(&sched_lock, MTX_SPIN);
+ }
+ return;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ case MTX_SPIN | MTX_TOPHALF:
+ {
+ int i = 0;
+
+ if (m->mtx_lock == (uintptr_t)p) {
+ m->mtx_recurse++;
+ return;
+ }
+ CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
+ for (;;) {
+ if (_obtain_lock(m, p))
+ break;
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY (1);
+#ifdef DDB
+ else if (!db_active)
+#else
+ else
+#endif
+ panic(
+ "spin lock %s held by 0x%p for > 5 seconds",
+ m->mtx_description,
+ (void *)m->mtx_lock);
+ }
+ }
+
+#ifdef MUTEX_DEBUG
+ if (type != MTX_SPIN)
+ m->mtx_saveintr = 0xbeefface;
+ else
+#endif
+ m->mtx_saveintr = saveintr;
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
+ return;
+ }
+ }
+}
+
+void
+mtx_exit_hard(struct mtx *m, int type)
+{
+ struct proc *p, *p1;
+ struct mtx *m1;
+ int pri;
+
+ p = CURPROC;
+ switch (type) {
+ case MTX_DEF:
+ case MTX_DEF | MTX_NOSWITCH:
+ if (m->mtx_recurse != 0) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
+ return;
+ }
+ mtx_enter(&sched_lock, MTX_SPIN);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
+ } else
+ m->mtx_lock = MTX_CONTESTED;
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
+ m, p1);
+ p1->p_blocked = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+ if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+#ifdef notyet
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_exit: 0x%x interruped 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+ setrunqueue(p);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%p",
+ m, m->mtx_lock);
+ mi_switch();
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%p",
+ m, m->mtx_lock);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ break;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ MPASS(mtx_owned(m));
+ _release_lock_quick(m);
+ if (type & MTX_FIRST)
+ enable_intr(); /* XXX is this kosher? */
+ else {
+ MPASS(m->mtx_saveintr != 0xbeefface);
+ restore_intr(m->mtx_saveintr);
+ }
+ break;
+ case MTX_SPIN | MTX_TOPHALF:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ MPASS(mtx_owned(m));
+ _release_lock_quick(m);
+ break;
+ default:
+ panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+ }
+}
+
+#define MV_DESTROY 0 /* validate before destory */
+#define MV_INIT 1 /* validate before init */
+
+#ifdef MUTEX_DEBUG
+
+int mtx_validate __P((struct mtx *, int));
+
+int
+mtx_validate(struct mtx *m, int when)
+{
+ struct mtx *mp;
+ int i;
+ int retval = 0;
+
+ if (m == &all_mtx || cold)
+ return 0;
+
+ mtx_enter(&all_mtx, MTX_DEF);
+/*
+ * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
+ * we can re-enable the kernacc() checks.
+ */
+#ifndef __alpha__
+ MPASS(kernacc((caddr_t)all_mtx.mtx_next, sizeof(uintptr_t),
+ VM_PROT_READ) == 1);
+#endif
+ MPASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
+ for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
+#ifndef __alpha__
+ if (kernacc((caddr_t)mp->mtx_next, sizeof(uintptr_t),
+ VM_PROT_READ) != 1) {
+ panic("mtx_validate: mp=%p mp->mtx_next=%p",
+ mp, mp->mtx_next);
+ }
+#endif
+ i++;
+ if (i > mtx_cur_cnt) {
+ panic("mtx_validate: too many in chain, known=%d\n",
+ mtx_cur_cnt);
+ }
+ }
+ MPASS(i == mtx_cur_cnt);
+ switch (when) {
+ case MV_DESTROY:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m)
+ break;
+ MPASS(mp == m);
+ break;
+ case MV_INIT:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m) {
+ /*
+ * Not good. This mutex already exists.
+ */
+ printf("re-initing existing mutex %s\n",
+ m->mtx_description);
+ MPASS(m->mtx_lock == MTX_UNOWNED);
+ retval = 1;
+ }
+ }
+ mtx_exit(&all_mtx, MTX_DEF);
+ return (retval);
+}
+#endif
+
+void
+mtx_init(struct mtx *m, const char *t, int flag)
+{
+#ifdef MUTEX_DEBUG
+ struct mtx_debug *debug;
+#endif
+
+ CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
+#ifdef MUTEX_DEBUG
+ if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ return;
+ if (flag & MTX_COLD)
+ debug = m->mtx_debug;
+ else
+ debug = NULL;
+ if (debug == NULL) {
+#ifdef DIAGNOSTIC
+ if(cold && bootverbose)
+ printf("malloc'ing mtx_debug while cold for %s\n", t);
+#endif
+
+ /* XXX - should not use DEVBUF */
+ debug = malloc(sizeof(struct mtx_debug), M_DEVBUF, M_NOWAIT);
+ MPASS(debug != NULL);
+ bzero(debug, sizeof(struct mtx_debug));
+ }
+#endif
+ bzero((void *)m, sizeof *m);
+ TAILQ_INIT(&m->mtx_blocked);
+#ifdef MUTEX_DEBUG
+ m->mtx_debug = debug;
+#endif
+ m->mtx_description = t;
+ m->mtx_lock = MTX_UNOWNED;
+ /* Put on all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next = &all_mtx;
+ m->mtx_prev = all_mtx.mtx_prev;
+ m->mtx_prev->mtx_next = m;
+ all_mtx.mtx_prev = m;
+ if (++mtx_cur_cnt > mtx_max_cnt)
+ mtx_max_cnt = mtx_cur_cnt;
+ mtx_exit(&all_mtx, MTX_DEF);
+ witness_init(m, flag);
+}
+
+void
+mtx_destroy(struct mtx *m)
+{
+
+ CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
+#ifdef MUTEX_DEBUG
+ if (m->mtx_next == NULL)
+ panic("mtx_destroy: %p (%s) already destroyed",
+ m, m->mtx_description);
+
+ if (!mtx_owned(m)) {
+ MPASS(m->mtx_lock == MTX_UNOWNED);
+ } else {
+ MPASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
+ }
+ mtx_validate(m, MV_DESTROY); /* diagnostic */
+#endif
+
+#ifdef WITNESS
+ if (m->mtx_witness)
+ witness_destroy(m);
+#endif /* WITNESS */
+
+ /* Remove from the all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next->mtx_prev = m->mtx_prev;
+ m->mtx_prev->mtx_next = m->mtx_next;
+#ifdef MUTEX_DEBUG
+ m->mtx_next = m->mtx_prev = NULL;
+ free(m->mtx_debug, M_DEVBUF);
+ m->mtx_debug = NULL;
+#endif
+ mtx_cur_cnt--;
+ mtx_exit(&all_mtx, MTX_DEF);
+}
/*
* The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the SMP_DEBUG and WITNESS kernel options
+ * but the witness code depends on the MUTEX_DEBUG and WITNESS kernel options
* being specified.
*/
-#if (defined(SMP_DEBUG) && defined(WITNESS))
+#if (defined(MUTEX_DEBUG) && defined(WITNESS))
#define WITNESS_COUNT 200
#define WITNESS_NCHILDREN 2
@@ -306,7 +891,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
- ASS(i < 200);
+ MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
mtx_exit(&w_mtx, MTX_SPIN);
@@ -355,7 +940,7 @@ out:
* is acquired in hardclock. Put it in the ignore list. It is
* likely not the mutex this assert fails on.
*/
- ASS(m->mtx_held.le_prev == NULL);
+ MPASS(m->mtx_held.le_prev == NULL);
LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
}
@@ -422,7 +1007,7 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_line = line;
m->mtx_file = file;
p = CURPROC;
- ASS(m->mtx_held.le_prev == NULL);
+ MPASS(m->mtx_held.le_prev == NULL);
LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
}
@@ -564,7 +1149,7 @@ itismychild(struct witness *parent, struct witness *child)
return (1);
parent = parent->w_morechildren;
}
- ASS(child != NULL);
+ MPASS(child != NULL);
parent->w_children[parent->w_childcnt++] = child;
/*
* now prune whole tree
@@ -603,7 +1188,7 @@ found:
for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren)
continue;
w->w_children[i] = w1->w_children[--w1->w_childcnt];
- ASS(w->w_children[i] != NULL);
+ MPASS(w->w_children[i] != NULL);
if (w1->w_childcnt != 0)
return;
@@ -639,7 +1224,7 @@ isitmydescendant(struct witness *parent, struct witness *child)
int j;
for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) {
- ASS(j < 1000);
+ MPASS(j < 1000);
for (i = 0; i < w->w_childcnt; i++) {
if (w->w_children[i] == child)
return (1);
@@ -795,4 +1380,4 @@ witness_restore(struct mtx *m, const char *file, int line)
m->mtx_witness->w_line = line;
}
-#endif /* (defined(SMP_DEBUG) && defined(WITNESS)) */
+#endif /* (defined(MUTEX_DEBUG) && defined(WITNESS)) */
diff --git a/sys/kern/subr_witness.c b/sys/kern/subr_witness.c
index 96bbdf9d8dd1..4141dbf5a473 100644
--- a/sys/kern/subr_witness.c
+++ b/sys/kern/subr_witness.c
@@ -26,6 +26,7 @@
* SUCH DAMAGE.
*
* from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
+ * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
* $FreeBSD$
*/
@@ -50,20 +51,604 @@
*/
#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
#include <sys/proc.h>
#include <sys/systm.h>
+#include <sys/vmmeter.h>
#include <sys/ktr.h>
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/clock.h>
#include <machine/cpu.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+
#define _KERN_MUTEX_C_ /* Cause non-inlined mtx_*() to be compiled. */
-#include <machine/mutex.h>
+#include <sys/mutex.h>
+
+/*
+ * Machine independent bits of the mutex implementation
+ */
+/* All mutexes in system (used for debug/panic) */
+#ifdef MUTEX_DEBUG
+static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0,
+ "All mutexes queue head" };
+static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, &all_mtx_debug,
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx };
+#else /* MUTEX_DEBUG */
+static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, "All mutexes queue head",
+ TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked),
+ { NULL, NULL }, &all_mtx, &all_mtx };
+#endif /* MUTEX_DEBUG */
+
+static int mtx_cur_cnt;
+static int mtx_max_cnt;
+
+void _mtx_enter_giant_def(void);
+void _mtx_exit_giant_def(void);
+static void propagate_priority(struct proc *) __unused;
+
+#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
+#define mtx_owner(m) (mtx_unowned(m) ? NULL \
+ : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
+
+#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
+#define SET_PRIO(p, pri) (p)->p_priority = (pri)
+
+/*
+ * XXX Temporary, for use from assembly language
+ */
+
+void
+_mtx_enter_giant_def(void)
+{
+
+ mtx_enter(&Giant, MTX_DEF);
+}
+
+void
+_mtx_exit_giant_def(void)
+{
+
+ mtx_exit(&Giant, MTX_DEF);
+}
+
+static void
+propagate_priority(struct proc *p)
+{
+ int pri = p->p_priority;
+ struct mtx *m = p->p_blocked;
+
+ for (;;) {
+ struct proc *p1;
+
+ p = mtx_owner(m);
+
+ if (p == NULL) {
+ /*
+ * This really isn't quite right. Really
+ * ought to bump priority of process that
+ * next acquires the mutex.
+ */
+ MPASS(m->mtx_lock == MTX_CONTESTED);
+ return;
+ }
+ MPASS(p->p_magic == P_MAGIC);
+ if (p->p_priority <= pri)
+ return;
+ /*
+ * If lock holder is actually running, just bump priority.
+ */
+ if (TAILQ_NEXT(p, p_procq) == NULL) {
+ MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
+ SET_PRIO(p, pri);
+ return;
+ }
+ /*
+ * If on run queue move to new run queue, and
+ * quit.
+ */
+ if (p->p_stat == SRUN) {
+ MPASS(p->p_blocked == NULL);
+ remrunqueue(p);
+ SET_PRIO(p, pri);
+ setrunqueue(p);
+ return;
+ }
+
+ /*
+ * If we aren't blocked on a mutex, give up and quit.
+ */
+ if (p->p_stat != SMTX) {
+ printf(
+ "XXX: process %d(%s):%d holds %s but isn't blocked on a mutex\n",
+ p->p_pid, p->p_comm, p->p_stat, m->mtx_description);
+ return;
+ }
+
+ /*
+ * Pick up the mutex that p is blocked on.
+ */
+ m = p->p_blocked;
+ MPASS(m != NULL);
+
+ printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
+ p->p_comm, m->mtx_description);
+ /*
+ * Check if the proc needs to be moved up on
+ * the blocked chain
+ */
+ if ((p1 = TAILQ_PREV(p, rq, p_procq)) == NULL ||
+ p1->p_priority <= pri) {
+ if (p1)
+ printf(
+ "XXX: previous process %d(%s) has higher priority\n",
+ p->p_pid, p->p_comm);
+ else
+ printf("XXX: process at head of run queue\n");
+ continue;
+ }
+
+ /*
+ * Remove proc from blocked chain
+ */
+ TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
+ MPASS(p1->p_magic == P_MAGIC);
+ if (p1->p_priority > pri)
+ break;
+ }
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ CTR4(KTR_LOCK,
+ "propagate priority: p 0x%p moved before 0x%p on [0x%p] %s",
+ p, p1, m, m->mtx_description);
+ }
+}
+
+void
+mtx_enter_hard(struct mtx *m, int type, int saveintr)
+{
+ struct proc *p = CURPROC;
+ struct timeval new_switchtime;
+
+ KASSERT(p != NULL, ("curproc is NULL in mutex"));
+
+ switch (type) {
+ case MTX_DEF:
+ if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) {
+ m->mtx_recurse++;
+ atomic_set_ptr(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p recurse", m);
+ return;
+ }
+ CTR3(KTR_LOCK, "mtx_enter: 0x%p contested (lock=%p) [0x%p]",
+ m, m->mtx_lock, RETIP(m));
+ while (!_obtain_lock(m, p)) {
+ int v;
+ struct proc *p1;
+
+ mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY);
+ /*
+ * check if the lock has been released while
+ * waiting for the schedlock.
+ */
+ if ((v = m->mtx_lock) == MTX_UNOWNED) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+ /*
+ * The mutex was marked contested on release. This
+ * means that there are processes blocked on it.
+ */
+ if (v == MTX_CONTESTED) {
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ KASSERT(p1 != NULL, ("contested mutex has no contesters"));
+ KASSERT(p != NULL, ("curproc is NULL for contested mutex"));
+ m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
+ if (p1->p_priority < p->p_priority) {
+ SET_PRIO(p, p1->p_priority);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ return;
+ }
+ /*
+ * If the mutex isn't already contested and
+ * a failure occurs setting the contested bit the
+ * mutex was either release or the
+ * state of the RECURSION bit changed.
+ */
+ if ((v & MTX_CONTESTED) == 0 &&
+ !atomic_cmpset_ptr(&m->mtx_lock, (void *)v,
+ (void *)(v | MTX_CONTESTED))) {
+ mtx_exit(&sched_lock, MTX_SPIN);
+ continue;
+ }
+
+ /* We definitely have to sleep for this lock */
+ mtx_assert(m, MA_NOTOWNED);
+
+#ifdef notyet
+ /*
+ * If we're borrowing an interrupted thread's VM
+ * context must clean up before going to sleep.
+ */
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_enter: 0x%x interrupted 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+
+ /* Put us on the list of procs blocked on this mutex */
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ p1 = (struct proc *)(m->mtx_lock &
+ MTX_FLAGMASK);
+ LIST_INSERT_HEAD(&p1->p_contested, m,
+ mtx_contested);
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
+ } else {
+ TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
+ if (p1->p_priority > p->p_priority)
+ break;
+ if (p1)
+ TAILQ_INSERT_BEFORE(p1, p, p_procq);
+ else
+ TAILQ_INSERT_TAIL(&m->mtx_blocked, p,
+ p_procq);
+ }
+
+ p->p_blocked = m; /* Who we're blocked on */
+ p->p_stat = SMTX;
+#if 0
+ propagate_priority(p);
+#endif
+ CTR3(KTR_LOCK, "mtx_enter: p 0x%p blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ /*
+ * Blatantly copied from mi_switch nearly verbatim.
+ * When Giant goes away and we stop dinking with it
+ * in mi_switch, we can go back to calling mi_switch
+ * directly here.
+ */
+
+ /*
+ * Compute the amount of time during which the current
+ * process was running, and add that to its total so
+ * far.
+ */
+ microuptime(&new_switchtime);
+ if (timevalcmp(&new_switchtime, &switchtime, <)) {
+ printf(
+ "microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
+ switchtime.tv_sec, switchtime.tv_usec,
+ new_switchtime.tv_sec,
+ new_switchtime.tv_usec);
+ new_switchtime = switchtime;
+ } else {
+ p->p_runtime += (new_switchtime.tv_usec -
+ switchtime.tv_usec) +
+ (new_switchtime.tv_sec - switchtime.tv_sec) *
+ (int64_t)1000000;
+ }
+
+ /*
+ * Pick a new current process and record its start time.
+ */
+ cnt.v_swtch++;
+ switchtime = new_switchtime;
+ cpu_switch();
+ if (switchtime.tv_sec == 0)
+ microuptime(&switchtime);
+ switchticks = ticks;
+ CTR3(KTR_LOCK,
+ "mtx_enter: p 0x%p free from blocked on [0x%p] %s",
+ p, m, m->mtx_description);
+ mtx_exit(&sched_lock, MTX_SPIN);
+ }
+ return;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ case MTX_SPIN | MTX_TOPHALF:
+ {
+ int i = 0;
+
+ if (m->mtx_lock == (uintptr_t)p) {
+ m->mtx_recurse++;
+ return;
+ }
+ CTR1(KTR_LOCK, "mtx_enter: %p spinning", m);
+ for (;;) {
+ if (_obtain_lock(m, p))
+ break;
+ while (m->mtx_lock != MTX_UNOWNED) {
+ if (i++ < 1000000)
+ continue;
+ if (i++ < 6000000)
+ DELAY (1);
+#ifdef DDB
+ else if (!db_active)
+#else
+ else
+#endif
+ panic(
+ "spin lock %s held by 0x%p for > 5 seconds",
+ m->mtx_description,
+ (void *)m->mtx_lock);
+ }
+ }
+
+#ifdef MUTEX_DEBUG
+ if (type != MTX_SPIN)
+ m->mtx_saveintr = 0xbeefface;
+ else
+#endif
+ m->mtx_saveintr = saveintr;
+ CTR1(KTR_LOCK, "mtx_enter: 0x%p spin done", m);
+ return;
+ }
+ }
+}
+
+void
+mtx_exit_hard(struct mtx *m, int type)
+{
+ struct proc *p, *p1;
+ struct mtx *m1;
+ int pri;
+
+ p = CURPROC;
+ switch (type) {
+ case MTX_DEF:
+ case MTX_DEF | MTX_NOSWITCH:
+ if (m->mtx_recurse != 0) {
+ if (--(m->mtx_recurse) == 0)
+ atomic_clear_ptr(&m->mtx_lock, MTX_RECURSE);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p unrecurse", m);
+ return;
+ }
+ mtx_enter(&sched_lock, MTX_SPIN);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p contested", m);
+ p1 = TAILQ_FIRST(&m->mtx_blocked);
+ MPASS(p->p_magic == P_MAGIC);
+ MPASS(p1->p_magic == P_MAGIC);
+ TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq);
+ if (TAILQ_EMPTY(&m->mtx_blocked)) {
+ LIST_REMOVE(m, mtx_contested);
+ _release_lock_quick(m);
+ CTR1(KTR_LOCK, "mtx_exit: 0x%p not held", m);
+ } else
+ m->mtx_lock = MTX_CONTESTED;
+ pri = MAXPRI;
+ LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
+ int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
+ if (cp < pri)
+ pri = cp;
+ }
+ if (pri > p->p_nativepri)
+ pri = p->p_nativepri;
+ SET_PRIO(p, pri);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p contested setrunqueue 0x%p",
+ m, p1);
+ p1->p_blocked = NULL;
+ p1->p_stat = SRUN;
+ setrunqueue(p1);
+ if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
+#ifdef notyet
+ if (p->p_flag & (P_ITHD | P_SITHD)) {
+ ithd_t *it = (ithd_t *)p;
+
+ if (it->it_interrupted) {
+ CTR2(KTR_LOCK,
+ "mtx_exit: 0x%x interruped 0x%x",
+ it, it->it_interrupted);
+ intr_thd_fixup(it);
+ }
+ }
+#endif
+ setrunqueue(p);
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p switching out lock=0x%p",
+ m, m->mtx_lock);
+ mi_switch();
+ CTR2(KTR_LOCK, "mtx_exit: 0x%p resuming lock=0x%p",
+ m, m->mtx_lock);
+ }
+ mtx_exit(&sched_lock, MTX_SPIN);
+ break;
+ case MTX_SPIN:
+ case MTX_SPIN | MTX_FIRST:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ MPASS(mtx_owned(m));
+ _release_lock_quick(m);
+ if (type & MTX_FIRST)
+ enable_intr(); /* XXX is this kosher? */
+ else {
+ MPASS(m->mtx_saveintr != 0xbeefface);
+ restore_intr(m->mtx_saveintr);
+ }
+ break;
+ case MTX_SPIN | MTX_TOPHALF:
+ if (m->mtx_recurse != 0) {
+ m->mtx_recurse--;
+ return;
+ }
+ MPASS(mtx_owned(m));
+ _release_lock_quick(m);
+ break;
+ default:
+ panic("mtx_exit_hard: unsupported type 0x%x\n", type);
+ }
+}
+
+#define MV_DESTROY 0 /* validate before destory */
+#define MV_INIT 1 /* validate before init */
+
+#ifdef MUTEX_DEBUG
+
+int mtx_validate __P((struct mtx *, int));
+
+int
+mtx_validate(struct mtx *m, int when)
+{
+ struct mtx *mp;
+ int i;
+ int retval = 0;
+
+ if (m == &all_mtx || cold)
+ return 0;
+
+ mtx_enter(&all_mtx, MTX_DEF);
+/*
+ * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly
+ * we can re-enable the kernacc() checks.
+ */
+#ifndef __alpha__
+ MPASS(kernacc((caddr_t)all_mtx.mtx_next, sizeof(uintptr_t),
+ VM_PROT_READ) == 1);
+#endif
+ MPASS(all_mtx.mtx_next->mtx_prev == &all_mtx);
+ for (i = 0, mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) {
+#ifndef __alpha__
+ if (kernacc((caddr_t)mp->mtx_next, sizeof(uintptr_t),
+ VM_PROT_READ) != 1) {
+ panic("mtx_validate: mp=%p mp->mtx_next=%p",
+ mp, mp->mtx_next);
+ }
+#endif
+ i++;
+ if (i > mtx_cur_cnt) {
+ panic("mtx_validate: too many in chain, known=%d\n",
+ mtx_cur_cnt);
+ }
+ }
+ MPASS(i == mtx_cur_cnt);
+ switch (when) {
+ case MV_DESTROY:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m)
+ break;
+ MPASS(mp == m);
+ break;
+ case MV_INIT:
+ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next)
+ if (mp == m) {
+ /*
+ * Not good. This mutex already exists.
+ */
+ printf("re-initing existing mutex %s\n",
+ m->mtx_description);
+ MPASS(m->mtx_lock == MTX_UNOWNED);
+ retval = 1;
+ }
+ }
+ mtx_exit(&all_mtx, MTX_DEF);
+ return (retval);
+}
+#endif
+
+void
+mtx_init(struct mtx *m, const char *t, int flag)
+{
+#ifdef MUTEX_DEBUG
+ struct mtx_debug *debug;
+#endif
+
+ CTR2(KTR_LOCK, "mtx_init 0x%p (%s)", m, t);
+#ifdef MUTEX_DEBUG
+ if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */
+ return;
+ if (flag & MTX_COLD)
+ debug = m->mtx_debug;
+ else
+ debug = NULL;
+ if (debug == NULL) {
+#ifdef DIAGNOSTIC
+ if(cold && bootverbose)
+ printf("malloc'ing mtx_debug while cold for %s\n", t);
+#endif
+
+ /* XXX - should not use DEVBUF */
+ debug = malloc(sizeof(struct mtx_debug), M_DEVBUF, M_NOWAIT);
+ MPASS(debug != NULL);
+ bzero(debug, sizeof(struct mtx_debug));
+ }
+#endif
+ bzero((void *)m, sizeof *m);
+ TAILQ_INIT(&m->mtx_blocked);
+#ifdef MUTEX_DEBUG
+ m->mtx_debug = debug;
+#endif
+ m->mtx_description = t;
+ m->mtx_lock = MTX_UNOWNED;
+ /* Put on all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next = &all_mtx;
+ m->mtx_prev = all_mtx.mtx_prev;
+ m->mtx_prev->mtx_next = m;
+ all_mtx.mtx_prev = m;
+ if (++mtx_cur_cnt > mtx_max_cnt)
+ mtx_max_cnt = mtx_cur_cnt;
+ mtx_exit(&all_mtx, MTX_DEF);
+ witness_init(m, flag);
+}
+
+void
+mtx_destroy(struct mtx *m)
+{
+
+ CTR2(KTR_LOCK, "mtx_destroy 0x%p (%s)", m, m->mtx_description);
+#ifdef MUTEX_DEBUG
+ if (m->mtx_next == NULL)
+ panic("mtx_destroy: %p (%s) already destroyed",
+ m, m->mtx_description);
+
+ if (!mtx_owned(m)) {
+ MPASS(m->mtx_lock == MTX_UNOWNED);
+ } else {
+ MPASS((m->mtx_lock & (MTX_RECURSE|MTX_CONTESTED)) == 0);
+ }
+ mtx_validate(m, MV_DESTROY); /* diagnostic */
+#endif
+
+#ifdef WITNESS
+ if (m->mtx_witness)
+ witness_destroy(m);
+#endif /* WITNESS */
+
+ /* Remove from the all mutex queue */
+ mtx_enter(&all_mtx, MTX_DEF);
+ m->mtx_next->mtx_prev = m->mtx_prev;
+ m->mtx_prev->mtx_next = m->mtx_next;
+#ifdef MUTEX_DEBUG
+ m->mtx_next = m->mtx_prev = NULL;
+ free(m->mtx_debug, M_DEVBUF);
+ m->mtx_debug = NULL;
+#endif
+ mtx_cur_cnt--;
+ mtx_exit(&all_mtx, MTX_DEF);
+}
/*
* The non-inlined versions of the mtx_*() functions are always built (above),
- * but the witness code depends on the SMP_DEBUG and WITNESS kernel options
+ * but the witness code depends on the MUTEX_DEBUG and WITNESS kernel options
* being specified.
*/
-#if (defined(SMP_DEBUG) && defined(WITNESS))
+#if (defined(MUTEX_DEBUG) && defined(WITNESS))
#define WITNESS_COUNT 200
#define WITNESS_NCHILDREN 2
@@ -306,7 +891,7 @@ witness_enter(struct mtx *m, int flags, const char *file, int line)
}
for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) {
- ASS(i < 200);
+ MPASS(i < 200);
w1 = m1->mtx_witness;
if (isitmydescendant(w, w1)) {
mtx_exit(&w_mtx, MTX_SPIN);
@@ -355,7 +940,7 @@ out:
* is acquired in hardclock. Put it in the ignore list. It is
* likely not the mutex this assert fails on.
*/
- ASS(m->mtx_held.le_prev == NULL);
+ MPASS(m->mtx_held.le_prev == NULL);
LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
}
@@ -422,7 +1007,7 @@ witness_try_enter(struct mtx *m, int flags, const char *file, int line)
m->mtx_line = line;
m->mtx_file = file;
p = CURPROC;
- ASS(m->mtx_held.le_prev == NULL);
+ MPASS(m->mtx_held.le_prev == NULL);
LIST_INSERT_HEAD(&p->p_heldmtx, (struct mtx*)m, mtx_held);
}
@@ -564,7 +1149,7 @@ itismychild(struct witness *parent, struct witness *child)
return (1);
parent = parent->w_morechildren;
}
- ASS(child != NULL);
+ MPASS(child != NULL);
parent->w_children[parent->w_childcnt++] = child;
/*
* now prune whole tree
@@ -603,7 +1188,7 @@ found:
for (w1 = w; w1->w_morechildren != NULL; w1 = w1->w_morechildren)
continue;
w->w_children[i] = w1->w_children[--w1->w_childcnt];
- ASS(w->w_children[i] != NULL);
+ MPASS(w->w_children[i] != NULL);
if (w1->w_childcnt != 0)
return;
@@ -639,7 +1224,7 @@ isitmydescendant(struct witness *parent, struct witness *child)
int j;
for (j = 0, w = parent; w != NULL; w = w->w_morechildren, j++) {
- ASS(j < 1000);
+ MPASS(j < 1000);
for (i = 0; i < w->w_childcnt; i++) {
if (w->w_children[i] == child)
return (1);
@@ -795,4 +1380,4 @@ witness_restore(struct mtx *m, const char *file, int line)
m->mtx_witness->w_line = line;
}
-#endif /* (defined(SMP_DEBUG) && defined(WITNESS)) */
+#endif /* (defined(MUTEX_DEBUG) && defined(WITNESS)) */
diff --git a/sys/powerpc/include/mutex.h b/sys/powerpc/include/mutex.h
index 12b12a8011c5..5ccc28d78753 100644
--- a/sys/powerpc/include/mutex.h
+++ b/sys/powerpc/include/mutex.h
@@ -33,293 +33,39 @@
#define _MACHINE_MUTEX_H_
#ifndef LOCORE
-#include <sys/queue.h>
#ifdef _KERNEL
-#include <sys/ktr.h>
-#include <sys/proc.h> /* Needed for curproc. */
-#include <machine/atomic.h>
-#include <machine/cpufunc.h>
-#include <machine/globaldata.h>
-#include <machine/globals.h>
-
-/*
- * If kern_mutex.c is being built, compile non-inlined versions of various
- * functions so that kernel modules can use them.
- */
-#ifndef _KERN_MUTEX_C_
-#define _MTX_INLINE static __inline
-#else
-#define _MTX_INLINE
-#endif
-
-/*
- * Mutex flags
- *
- * Types
- */
-#define MTX_DEF 0x1 /* Default (spin/sleep) */
-#define MTX_SPIN 0x2 /* Spin only lock */
-
-/* Options */
-#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
-#define MTX_NORECURSE 0x8 /* No recursion possible */
-#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
-#define MTX_NOSWITCH 0x20 /* Do not switch on release */
-#define MTX_FIRST 0x40 /* First spin lock holder */
-#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
-
-/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
-#define MTX_HARDOPTS (MTX_DEF | MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
-
-/* Flags/value used in mtx_lock */
-#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
-#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
-#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
-#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
-
-#endif /* _KERNEL */
-
-/*
- * Sleep/spin mutex
- */
-struct mtx {
- volatile u_int64_t mtx_lock; /* lock owner/gate/flags */
- volatile u_int32_t mtx_recurse; /* number of recursive holds */
- u_int32_t mtx_saveipl; /* saved ipl (for spin locks) */
- char *mtx_description;
- TAILQ_HEAD(, proc) mtx_blocked;
- LIST_ENTRY(mtx) mtx_contested;
- struct mtx *mtx_next; /* all locks in system */
- struct mtx *mtx_prev;
-#ifdef SMP_DEBUG
- /* If you add anything here, adjust the mtxf_t definition below */
- struct witness *mtx_witness;
- LIST_ENTRY(mtx) mtx_held;
- const char *mtx_file;
- int mtx_line;
-#endif /* SMP_DEBUG */
-};
-
-/*
- * Filler for structs which need to remain the same size
- * whether or not SMP_DEBUG is turned on.
- */
-typedef struct mtxf {
-#ifdef SMP_DEBUG
- char mtxf_data[0];
-#else
- char mtxf_data[4*sizeof(void *) + sizeof(int)];
-#endif
-} mtxf_t;
-
-#define mp_fixme(string)
-
-#ifdef _KERNEL
-/* Misc */
-#define CURTHD ((u_int64_t)CURPROC) /* Current thread ID */
-
-/* Prototypes */
-void mtx_init(struct mtx *m, char *description, int flag);
-void mtx_enter_hard(struct mtx *, int type, int ipl);
-void mtx_exit_hard(struct mtx *, int type);
-void mtx_destroy(struct mtx *m);
-
-/*
- * Wrap the following functions with cpp macros so that filenames and line
- * numbers are embedded in the code correctly.
- */
-#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
-void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
-int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
-void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
-#endif
-
-#define mtx_enter(mtxp, type) \
- _mtx_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_try_enter(mtxp, type) \
- _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
-
-#define mtx_exit(mtxp, type) \
- _mtx_exit((mtxp), (type), __FILE__, __LINE__)
-
-/* Global locks */
-extern struct mtx sched_lock;
-extern struct mtx Giant;
-
-/*
- * Used to replace return with an exit Giant and return.
- */
-
-#define EGAR(a) \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return (a); \
-} while (0)
-
-#define VEGAR \
-do { \
- mtx_exit(&Giant, MTX_DEF); \
- return; \
-} while (0)
-
-#define DROP_GIANT() \
-do { \
- int _giantcnt; \
- WITNESS_SAVE_DECL(Giant); \
- \
- WITNESS_SAVE(&Giant, Giant); \
- for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
- mtx_exit(&Giant, MTX_DEF)
-
-#define PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant); \
-} while (0)
-
-#define PARTIAL_PICKUP_GIANT() \
- mtx_assert(&Giant, MA_NOTOWNED); \
- while (_giantcnt--) \
- mtx_enter(&Giant, MTX_DEF); \
- WITNESS_RESTORE(&Giant, Giant)
-
/*
* Debugging
*/
-#ifndef SMP_DEBUG
-#define mtx_assert(m, what)
-#else /* SMP_DEBUG */
+#ifdef MUTEX_DEBUG
-#define MA_OWNED 1
-#define MA_NOTOWNED 2
-#define mtx_assert(m, what) { \
- switch ((what)) { \
- case MA_OWNED: \
- ASS(mtx_owned((m))); \
- break; \
- case MA_NOTOWNED: \
- ASS(!mtx_owned((m))); \
- break; \
- default: \
- panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
- } \
-}
-
-#ifdef INVARIANTS
-#define ASS(ex) MPASS(ex)
-#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- #ex, __FILE__, __LINE__)
-#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
- what, __FILE__, __LINE__)
-
-#ifdef MTX_STRS
-char STR_IEN[] = "fl & 0x200";
-char STR_IDIS[] = "!(fl & 0x200)";
-#else /* MTX_STRS */
+#ifdef _KERN_MUTEX_C_
+char STR_IEN[] = "ps & IPL != IPL_HIGH";
+char STR_IDIS[] = "ps & IPL == IPL_HIGH";
+char STR_SIEN[] = "mpp->mtx_saveintr != IPL_HIGH";
+#else /* _KERN_MUTEX_C_ */
extern char STR_IEN[];
extern char STR_IDIS[];
-#endif /* MTX_STRS */
+extern char STR_SIEN[];
+#endif /* _KERN_MUTEX_C_ */
+
+#endif /* MUTEX_DEBUG */
+
#define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
== ALPHA_PSL_IPL_HIGH, STR_IEN)
#define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \
!= ALPHA_PSL_IPL_HIGH, STR_IDIS)
-#endif /* INVARIANTS */
-
-#endif /* SMP_DEBUG */
-
-#if !defined(SMP_DEBUG) || !defined(INVARIANTS)
-#define ASS(ex)
-#define MPASS(ex)
-#define MPASS2(ex, where)
-#define ASS_IEN
-#define ASS_IDIS
-#endif /* !defined(SMP_DEBUG) || !defined(INVARIANTS) */
-
-#ifdef WITNESS
-#ifndef SMP_DEBUG
-#error WITNESS requires SMP_DEBUG
-#endif /* SMP_DEBUG */
-#define WITNESS_ENTER(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_enter((m), (t), (f), (l))
-#define WITNESS_EXIT(m, t, f, l) \
- if ((m)->mtx_witness != NULL) \
- witness_exit((m), (t), (f), (l))
-
-#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
-#define WITNESS_SAVE_DECL(n) \
- const char * __CONCAT(n, __wf); \
- int __CONCAT(n, __wl)
-
-#define WITNESS_SAVE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
-} while (0)
-
-#define WITNESS_RESTORE(m, n) \
-do { \
- if ((m)->mtx_witness != NULL) \
- witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
-} while (0)
-
-void witness_init(struct mtx *, int flag);
-void witness_destroy(struct mtx *);
-void witness_enter(struct mtx *, int, const char *, int);
-void witness_try_enter(struct mtx *, int, const char *, int);
-void witness_exit(struct mtx *, int, const char *, int);
-void witness_display(void(*)(const char *fmt, ...));
-void witness_list(struct proc *);
-int witness_sleep(int, struct mtx *, const char *, int);
-void witness_save(struct mtx *, const char **, int *);
-void witness_restore(struct mtx *, const char *, int);
-#else /* WITNESS */
-#define WITNESS_ENTER(m, t, f, l)
-#define WITNESS_EXIT(m, t, f, l)
-#define WITNESS_SLEEP(check, m)
-#define WITNESS_SAVE_DECL(n)
-#define WITNESS_SAVE(m, n)
-#define WITNESS_RESTORE(m, n)
-
-/*
- * flag++ is slezoid way of shutting up unused parameter warning
- * in mtx_init()
- */
-#define witness_init(m, flag) flag++
-#define witness_destroy(m)
-#define witness_enter(m, t, f, l)
-#define witness_try_enter(m, t, f, l)
-#define witness_exit(m, t, f, l)
-#endif /* WITNESS */
+#define ASS_SIEN(mpp) MPASS2((mpp)->saveintr != ALPHA_PSL_IPL_HIGH, STR_SIEN)
/*
* Assembly macros (for internal use only)
*--------------------------------------------------------------------------
*/
-/*
- * Get a sleep lock, deal with recursion inline
- */
-
#define _V(x) __STRING(x)
-#define _getlock_sleep(mp, tid, type) do { \
- if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) { \
- if (((mp)->mtx_lock & MTX_FLAGMASK) != (tid)) \
- mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
- else { \
- atomic_set_64(&(mp)->mtx_lock, MTX_RECURSE); \
- (mp)->mtx_recurse++; \
- } \
- } else { \
- alpha_mb(); \
- } \
-} while (0)
-
/*
* Get a spin lock, handle recusion inline (as the less common case)
*/
@@ -334,208 +80,6 @@ void witness_restore(struct mtx *, const char *, int);
} \
} while (0)
-/*
- * Get a lock without any recursion handling. Calls the hard enter
- * function if we can't get it inline.
- */
-
-#define _getlock_norecurse(mp, tid, type) do { \
- if (atomic_cmpset_64(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) == 0) \
- mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
- else \
- alpha_mb(); \
-} while (0)
-
-/*
- * Release a sleep lock assuming we haven't recursed on it, recursion is
- * handled in the hard function.
- */
-
-#define _exitlock_norecurse(mp, tid, type) do { \
- alpha_mb(); \
- if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
-} while (0)
-
-/*
- * Release a sleep lock when its likely we recursed (the code to
- * deal with simple recursion is inline).
- */
-
-#define _exitlock(mp, tid, type) do { \
- alpha_mb(); \
- if (atomic_cmpset_64(&(mp)->mtx_lock, (tid), MTX_UNOWNED) == 0) {\
- if (((mp)->mtx_lock & MTX_RECURSE) && \
- (--(mp)->mtx_recurse == 0)) \
- atomic_clear_64(&(mp)->mtx_lock, MTX_RECURSE); \
- else \
- mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
- } \
-} while (0)
-
-/*
- * Release a spin lock (with possible recursion)
- */
-
-#define _exitlock_spin(mp) do { \
- alpha_mb(); \
- if ((mp)->mtx_recurse == 0) { \
- int _ipl = (mp)->mtx_saveipl; \
- atomic_cmpset_64(&(mp)->mtx_lock, (mp)->mtx_lock, \
- MTX_UNOWNED); \
- alpha_pal_swpipl(_ipl); \
- } else { \
- (mp)->mtx_recurse--; \
- } \
-} while (0)
-
-/*
- * Externally visible mutex functions
- *------------------------------------------------------------------------
- */
-
-/*
- * Return non-zero if a mutex is already owned by the current thread
- */
-#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == CURTHD)
-
-/* Common strings */
-#ifdef MTX_STRS
-char STR_mtx_enter_fmt[] = "GOT %s [%p] at %s:%d r=%d";
-char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
-char STR_mtx_exit_fmt[] = "REL %s [%p] at %s:%d r=%d";
-char STR_mtx_owned[] = "mtx_owned(mpp)";
-char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
-char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] at %s:%d result=%d";
-#else /* MTX_STRS */
-extern char STR_mtx_enter_fmt[];
-extern char STR_mtx_bad_type[];
-extern char STR_mtx_exit_fmt[];
-extern char STR_mtx_owned[];
-extern char STR_mtx_recurse[];
-extern char STR_mtx_try_enter_fmt[];
-#endif /* MTX_STRS */
-
-#ifndef KLD_MODULE
-/*
- * Get lock 'm', the macro handles the easy (and most common cases) and
- * leaves the slow stuff to the mtx_enter_hard() function.
- *
- * Note: since type is usually a constant much of this code is optimized out
- */
-_MTX_INLINE void
-_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *mpp = mtxp;
-
- /* bits only valid on mtx_exit() */
- MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
- STR_mtx_bad_type);
-
- if ((type) & MTX_SPIN) {
- /*
- * Easy cases of spin locks:
- *
- * 1) We already own the lock and will simply recurse on it (if
- * RLIKELY)
- *
- * 2) The lock is free, we just get it
- */
- if ((type) & MTX_RLIKELY) {
- /*
- * Check for recursion, if we already have this lock we
- * just bump the recursion count.
- */
- if (mpp->mtx_lock == CURTHD) {
- mpp->mtx_recurse++;
- goto done;
- }
- }
-
- if (((type) & MTX_TOPHALF) == 0) {
- /*
- * If an interrupt thread uses this we must block
- * interrupts here.
- */
- _getlock_spin_block(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- } else {
- /* Sleep locks */
- if ((type) & MTX_RLIKELY)
- _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else
- _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
- }
- done:
- WITNESS_ENTER(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_enter_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
-}
-
-/*
- * Attempt to get MTX_DEF lock, return non-zero if lock acquired
- *
- * XXX DOES NOT HANDLE RECURSION
- */
-_MTX_INLINE int
-_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
- int rval;
-
- rval = atomic_cmpset_64(&mpp->mtx_lock, MTX_UNOWNED, CURTHD);
-#ifdef SMP_DEBUG
- if (rval && mpp->mtx_witness != NULL) {
- ASS(mpp->mtx_recurse == 0);
- witness_try_enter(mpp, type, file, line);
- }
-#endif
- CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
- mpp->mtx_description, mpp, file, line, rval);
-
- return rval;
-}
-
-/*
- * Release lock m
- */
-_MTX_INLINE void
-_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
-{
- struct mtx *const mpp = mtxp;
-
- MPASS2(mtx_owned(mpp), STR_mtx_owned);
- WITNESS_EXIT(mpp, type, file, line);
- CTR5(KTR_LOCK, STR_mtx_exit_fmt,
- mpp->mtx_description, mpp, file, line,
- mpp->mtx_recurse);
- if ((type) & MTX_SPIN) {
- if ((type) & MTX_NORECURSE) {
- MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
- atomic_cmpset_64(&mpp->mtx_lock, mpp->mtx_lock,
- MTX_UNOWNED);
- if (((type) & MTX_TOPHALF) == 0)
- alpha_pal_swpipl(mpp->mtx_saveipl);
- } else
- if ((type) & MTX_TOPHALF) {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- } else
- _exitlock_spin(mpp);
- } else {
- /* Handle sleep locks */
- if ((type) & MTX_RLIKELY)
- _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
- else {
- _exitlock_norecurse(mpp, CURTHD,
- (type) & MTX_HARDOPTS);
- }
- }
-}
-
-#endif /* KLD_MODULE */
#endif /* _KERNEL */
#else /* !LOCORE */
diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h
new file mode 100644
index 000000000000..ddc9e08b88d6
--- /dev/null
+++ b/sys/sys/mutex.h
@@ -0,0 +1,606 @@
+/*-
+ * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Berkeley Software Design Inc's name may not be used to endorse or
+ * promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
+ * $FreeBSD$
+ */
+
+#ifndef _SYS_MUTEX_H_
+#define _SYS_MUTEX_H_
+
+#ifndef LOCORE
+#include <sys/queue.h>
+
+#ifdef _KERNEL
+#include <sys/ktr.h>
+#include <sys/proc.h> /* Needed for curproc. */
+#include <machine/atomic.h>
+#include <machine/bus.h>
+#include <machine/cpufunc.h>
+#include <machine/globals.h>
+#endif /* _KERNEL_ */
+#endif /* !LOCORE */
+
+#include <machine/mutex.h>
+
+#ifndef LOCORE
+#ifdef _KERNEL
+
+/*
+ * If kern_mutex.c is being built, compile non-inlined versions of various
+ * functions so that kernel modules can use them.
+ */
+#ifndef _KERN_MUTEX_C_
+#define _MTX_INLINE static __inline
+#else
+#define _MTX_INLINE
+#endif
+
+/*
+ * Mutex flags
+ *
+ * Types
+ */
+#define MTX_DEF 0x0 /* Default (spin/sleep) */
+#define MTX_SPIN 0x1 /* Spin only lock */
+
+/* Options */
+#define MTX_RLIKELY 0x4 /* (opt) Recursion likely */
+#define MTX_NORECURSE 0x8 /* No recursion possible */
+#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */
+#define MTX_NOSWITCH 0x20 /* Do not switch on release */
+#define MTX_FIRST 0x40 /* First spin lock holder */
+#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */
+#define MTX_COLD 0x100 /* Mutex init'd before malloc works */
+
+/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */
+#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH)
+
+/* Flags/value used in mtx_lock */
+#define MTX_RECURSE 0x01 /* (non-spin) lock held recursively */
+#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */
+#define MTX_FLAGMASK ~(MTX_RECURSE | MTX_CONTESTED)
+#define MTX_UNOWNED 0x8 /* Cookie for free mutex */
+
+#endif /* _KERNEL */
+
+#ifdef MUTEX_DEBUG
+struct mtx_debug {
+ /* If you add anything here, adjust the mtxf_t definition below */
+ struct witness *mtxd_witness;
+ LIST_ENTRY(mtx) mtxd_held;
+ const char *mtxd_file;
+ int mtxd_line;
+ const char *mtxd_description;
+};
+
+#define mtx_description mtx_debug->mtxd_description
+#define mtx_held mtx_debug->mtxd_held
+#define mtx_line mtx_debug->mtxd_line
+#define mtx_file mtx_debug->mtxd_file
+#define mtx_witness mtx_debug->mtxd_witness
+#endif
+
+/*
+ * Sleep/spin mutex
+ */
+struct mtx {
+ volatile uintptr_t mtx_lock; /* lock owner/gate/flags */
+ volatile u_int mtx_recurse; /* number of recursive holds */
+ u_int mtx_saveintr; /* saved flags (for spin locks) */
+#ifdef MUTEX_DEBUG
+ struct mtx_debug *mtx_debug;
+#else
+ const char *mtx_description;
+#endif
+ TAILQ_HEAD(, proc) mtx_blocked;
+ LIST_ENTRY(mtx) mtx_contested;
+ struct mtx *mtx_next; /* all locks in system */
+ struct mtx *mtx_prev;
+};
+
+#ifdef MUTEX_DEBUG
+#define MUTEX_DECLARE(modifiers, name) \
+ static struct mtx_debug __mtx_debug_##name; \
+ modifiers struct mtx name = { 0, 0, 0, &__mtx_debug_##name }
+#else
+#define MUTEX_DECLARE(modifiers, name) modifiers struct mtx name
+#endif
+
+#define mp_fixme(string)
+
+#ifdef _KERNEL
+/* Misc */
+#define CURTHD CURPROC /* Current thread ID */
+
+/* Prototypes */
+void mtx_init(struct mtx *m, const char *description, int flag);
+void mtx_enter_hard(struct mtx *, int type, int saveintr);
+void mtx_exit_hard(struct mtx *, int type);
+void mtx_destroy(struct mtx *m);
+
+/*
+ * Wrap the following functions with cpp macros so that filenames and line
+ * numbers are embedded in the code correctly.
+ */
+#if (defined(KLD_MODULE) || defined(_KERN_MUTEX_C_))
+void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line);
+int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line);
+void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line);
+#endif
+
+#define mtx_enter(mtxp, type) \
+ _mtx_enter((mtxp), (type), __FILE__, __LINE__)
+
+#define mtx_try_enter(mtxp, type) \
+ _mtx_try_enter((mtxp), (type), __FILE__, __LINE__)
+
+#define mtx_exit(mtxp, type) \
+ _mtx_exit((mtxp), (type), __FILE__, __LINE__)
+
+/* Global locks */
+extern struct mtx sched_lock;
+extern struct mtx Giant;
+
+/*
+ * Used to replace return with an exit Giant and return.
+ */
+
+#define EGAR(a) \
+do { \
+ mtx_exit(&Giant, MTX_DEF); \
+ return (a); \
+} while (0)
+
+#define VEGAR \
+do { \
+ mtx_exit(&Giant, MTX_DEF); \
+ return; \
+} while (0)
+
+#define DROP_GIANT() \
+do { \
+ int _giantcnt; \
+ WITNESS_SAVE_DECL(Giant); \
+ \
+ if (mtx_owned(&Giant)) \
+ WITNESS_SAVE(&Giant, Giant); \
+ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \
+ mtx_exit(&Giant, MTX_DEF)
+
+#define PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ while (_giantcnt--) \
+ mtx_enter(&Giant, MTX_DEF); \
+ if (mtx_owned(&Giant)) \
+ WITNESS_RESTORE(&Giant, Giant); \
+} while (0)
+
+#define PARTIAL_PICKUP_GIANT() \
+ mtx_assert(&Giant, MA_NOTOWNED); \
+ while (_giantcnt--) \
+ mtx_enter(&Giant, MTX_DEF); \
+ if (mtx_owned(&Giant)) \
+ WITNESS_RESTORE(&Giant, Giant)
+
+
+/*
+ * Debugging
+ */
+#ifdef INVARIANTS
+#define MA_OWNED 1
+#define MA_NOTOWNED 2
+#define mtx_assert(m, what) { \
+ switch ((what)) { \
+ case MA_OWNED: \
+ if (!mtx_owned((m))) \
+ panic("mutex %s not owned at %s:%d", \
+ (m)->mtx_description, __FILE__, __LINE__); \
+ break; \
+ case MA_NOTOWNED: \
+ if (mtx_owned((m))) \
+ panic("mutex %s owned at %s:%d", \
+ (m)->mtx_description, __FILE__, __LINE__); \
+ break; \
+ default: \
+ panic("unknown mtx_assert at %s:%d", __FILE__, __LINE__); \
+ } \
+}
+#else /* INVARIANTS */
+#define mtx_assert(m, what)
+#endif /* INVARIANTS */
+
+#ifdef MUTEX_DEBUG
+#define MPASS(ex) if (!(ex)) panic("Assertion %s failed at %s:%d", \
+ #ex, __FILE__, __LINE__)
+#define MPASS2(ex, what) if (!(ex)) panic("Assertion %s failed at %s:%d", \
+ what, __FILE__, __LINE__)
+
+#else /* MUTEX_DEBUG */
+#define MPASS(ex)
+#define MPASS2(ex, where)
+#endif /* MUTEX_DEBUG */
+
+#ifdef WITNESS
+#ifndef MUTEX_DEBUG
+#error WITNESS requires MUTEX_DEBUG
+#endif /* MUTEX_DEBUG */
+#define WITNESS_ENTER(m, t, f, l) \
+ if ((m)->mtx_witness != NULL) \
+ witness_enter((m), (t), (f), (l))
+#define WITNESS_EXIT(m, t, f, l) \
+ if ((m)->mtx_witness != NULL) \
+ witness_exit((m), (t), (f), (l))
+
+#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__)
+#define WITNESS_SAVE_DECL(n) \
+ const char * __CONCAT(n, __wf); \
+ int __CONCAT(n, __wl)
+
+#define WITNESS_SAVE(m, n) \
+do { \
+ if ((m)->mtx_witness != NULL) \
+ witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)); \
+} while (0)
+
+#define WITNESS_RESTORE(m, n) \
+do { \
+ if ((m)->mtx_witness != NULL) \
+ witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)); \
+} while (0)
+
+void witness_init(struct mtx *, int flag);
+void witness_destroy(struct mtx *);
+void witness_enter(struct mtx *, int, const char *, int);
+void witness_try_enter(struct mtx *, int, const char *, int);
+void witness_exit(struct mtx *, int, const char *, int);
+void witness_display(void(*)(const char *fmt, ...));
+void witness_list(struct proc *);
+int witness_sleep(int, struct mtx *, const char *, int);
+void witness_save(struct mtx *, const char **, int *);
+void witness_restore(struct mtx *, const char *, int);
+#else /* WITNESS */
+#define WITNESS_ENTER(m, t, f, l)
+#define WITNESS_EXIT(m, t, f, l)
+#define WITNESS_SLEEP(check, m)
+#define WITNESS_SAVE_DECL(n)
+#define WITNESS_SAVE(m, n)
+#define WITNESS_RESTORE(m, n)
+
+/*
+ * flag++ is slezoid way of shutting up unused parameter warning
+ * in mtx_init()
+ */
+#define witness_init(m, flag) flag++
+#define witness_destroy(m)
+#define witness_enter(m, t, f, l)
+#define witness_try_enter(m, t, f, l)
+#define witness_exit(m, t, f, l)
+#endif /* WITNESS */
+
+/*
+ * Assembly macros (for internal use only)
+ *------------------------------------------------------------------------------
+ */
+
+#define _V(x) __STRING(x)
+
+/*
+ * Default, unoptimized mutex micro-operations
+ */
+
+#ifndef _obtain_lock
+/* Actually obtain mtx_lock */
+#define _obtain_lock(mp, tid) \
+ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid))
+#endif
+
+#ifndef _release_lock
+/* Actually release mtx_lock */
+#define _release_lock(mp, tid) \
+ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED)
+#endif
+
+#ifndef _release_lock_quick
+/* Actually release mtx_lock quickly assuming that we own it */
+#define _release_lock_quick(mp) \
+ atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED)
+#endif
+
+#ifndef _getlock_sleep
+/* Get a sleep lock, deal with recursion inline. */
+#define _getlock_sleep(mp, tid, type) do { \
+ if (!_obtain_lock(mp, tid)) { \
+ if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \
+ else { \
+ atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSE); \
+ (mp)->mtx_recurse++; \
+ } \
+ } \
+} while (0)
+#endif
+
+#ifndef _getlock_spin_block
+/* Get a spin lock, handle recursion inline (as the less common case) */
+#define _getlock_spin_block(mp, tid, type) do { \
+ u_int _mtx_intr = save_intr(); \
+ disable_intr(); \
+ if (!_obtain_lock(mp, tid)) \
+ mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \
+ else \
+ (mp)->mtx_saveintr = _mtx_intr; \
+} while (0)
+#endif
+
+#ifndef _getlock_norecurse
+/*
+ * Get a lock without any recursion handling. Calls the hard enter function if
+ * we can't get it inline.
+ */
+#define _getlock_norecurse(mp, tid, type) do { \
+ if (!_obtain_lock(mp, tid)) \
+ mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \
+} while (0)
+#endif
+
+#ifndef _exitlock_norecurse
+/*
+ * Release a sleep lock assuming we haven't recursed on it, recursion is handled
+ * in the hard function.
+ */
+#define _exitlock_norecurse(mp, tid, type) do { \
+ if (!_release_lock(mp, tid)) \
+ mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
+} while (0)
+#endif
+
+#ifndef _exitlock
+/*
+ * Release a sleep lock when its likely we recursed (the code to
+ * deal with simple recursion is inline).
+ */
+#define _exitlock(mp, tid, type) do { \
+ if (!_release_lock(mp, tid)) { \
+ if ((mp)->mtx_lock & MTX_RECURSE) { \
+ if (--((mp)->mtx_recurse) == 0) \
+ atomic_clear_ptr(&(mp)->mtx_lock, \
+ MTX_RECURSE); \
+ } else { \
+ mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \
+ } \
+ } \
+} while (0)
+#endif
+
+#ifndef _exitlock_spin
+/* Release a spin lock (with possible recursion). */
+#define _exitlock_spin(mp) do { \
+ if ((mp)->mtx_recurse == 0) { \
+ int _mtx_intr = (mp)->mtx_saveintr; \
+ \
+ _release_lock_quick(mp); \
+ restore_intr(_mtx_intr); \
+ } else { \
+ (mp)->mtx_recurse--; \
+ } \
+} while (0)
+#endif
+
+/*
+ * Externally visible mutex functions.
+ *------------------------------------------------------------------------------
+ */
+
+/*
+ * Return non-zero if a mutex is already owned by the current thread.
+ */
+#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD)
+
+/* Common strings */
+#ifdef _KERN_MUTEX_C_
+#ifdef KTR_EXTEND
+
+/*
+ * KTR_EXTEND saves file name and line for all entries, so we don't need them
+ * here. Theoretically we should also change the entries which refer to them
+ * (from CTR5 to CTR3), but since they're just passed to snprintf as the last
+ * parameters, it doesn't do any harm to leave them.
+ */
+char STR_mtx_enter_fmt[] = "GOT %s [%x] r=%d";
+char STR_mtx_exit_fmt[] = "REL %s [%x] r=%d";
+char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] result=%d";
+#else
+char STR_mtx_enter_fmt[] = "GOT %s [%x] at %s:%d r=%d";
+char STR_mtx_exit_fmt[] = "REL %s [%x] at %s:%d r=%d";
+char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%x] at %s:%d result=%d";
+#endif
+char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0";
+char STR_mtx_owned[] = "mtx_owned(mpp)";
+char STR_mtx_recurse[] = "mpp->mtx_recurse == 0";
+#else /* _KERN_MUTEX_C_ */
+extern char STR_mtx_enter_fmt[];
+extern char STR_mtx_bad_type[];
+extern char STR_mtx_exit_fmt[];
+extern char STR_mtx_owned[];
+extern char STR_mtx_recurse[];
+extern char STR_mtx_try_enter_fmt[];
+#endif /* _KERN_MUTEX_C_ */
+
+#ifndef KLD_MODULE
+/*
+ * Get lock 'm', the macro handles the easy (and most common cases) and leaves
+ * the slow stuff to the mtx_enter_hard() function.
+ *
+ * Note: since type is usually a constant much of this code is optimized out.
+ */
+_MTX_INLINE void
+_mtx_enter(struct mtx *mtxp, int type, const char *file, int line)
+{
+ struct mtx *mpp = mtxp;
+
+ /* bits only valid on mtx_exit() */
+ MPASS2(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0,
+ STR_mtx_bad_type);
+
+ if ((type) & MTX_SPIN) {
+ /*
+ * Easy cases of spin locks:
+ *
+ * 1) We already own the lock and will simply recurse on it (if
+ * RLIKELY)
+ *
+ * 2) The lock is free, we just get it
+ */
+ if ((type) & MTX_RLIKELY) {
+ /*
+ * Check for recursion, if we already have this
+ * lock we just bump the recursion count.
+ */
+ if (mpp->mtx_lock == (uintptr_t)CURTHD) {
+ mpp->mtx_recurse++;
+ goto done;
+ }
+ }
+
+ if (((type) & MTX_TOPHALF) == 0) {
+ /*
+ * If an interrupt thread uses this we must block
+ * interrupts here.
+ */
+ if ((type) & MTX_FIRST) {
+ ASS_IEN;
+ disable_intr();
+ _getlock_norecurse(mpp, CURTHD,
+ (type) & MTX_HARDOPTS);
+ } else {
+ _getlock_spin_block(mpp, CURTHD,
+ (type) & MTX_HARDOPTS);
+ }
+ } else
+ _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ } else {
+ /* Sleep locks */
+ if ((type) & MTX_RLIKELY)
+ _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ else
+ _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ }
+ done:
+ WITNESS_ENTER(mpp, type, file, line);
+ CTR5(KTR_LOCK, STR_mtx_enter_fmt,
+ mpp->mtx_description, mpp, file, line,
+ mpp->mtx_recurse);
+}
+
+/*
+ * Attempt to get MTX_DEF lock, return non-zero if lock acquired.
+ *
+ * XXX DOES NOT HANDLE RECURSION
+ */
+_MTX_INLINE int
+_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line)
+{
+ struct mtx *const mpp = mtxp;
+ int rval;
+
+ rval = _obtain_lock(mpp, CURTHD);
+#ifdef MUTEX_DEBUG
+ if (rval && mpp->mtx_witness != NULL) {
+ MPASS(mpp->mtx_recurse == 0);
+ witness_try_enter(mpp, type, file, line);
+ }
+#endif
+ CTR5(KTR_LOCK, STR_mtx_try_enter_fmt,
+ mpp->mtx_description, mpp, file, line, rval);
+
+ return rval;
+}
+
+/*
+ * Release lock m.
+ */
+_MTX_INLINE void
+_mtx_exit(struct mtx *mtxp, int type, const char *file, int line)
+{
+ struct mtx *const mpp = mtxp;
+
+ MPASS2(mtx_owned(mpp), STR_mtx_owned);
+ WITNESS_EXIT(mpp, type, file, line);
+ CTR5(KTR_LOCK, STR_mtx_exit_fmt,
+ mpp->mtx_description, mpp, file, line,
+ mpp->mtx_recurse);
+ if ((type) & MTX_SPIN) {
+ if ((type) & MTX_NORECURSE) {
+ int mtx_intr = mpp->mtx_saveintr;
+
+ MPASS2(mpp->mtx_recurse == 0, STR_mtx_recurse);
+ _release_lock_quick(mpp);
+ if (((type) & MTX_TOPHALF) == 0) {
+ if ((type) & MTX_FIRST) {
+ ASS_IDIS;
+ enable_intr();
+ } else
+ restore_intr(mtx_intr);
+ }
+ } else {
+ if (((type & MTX_TOPHALF) == 0) &&
+ (type & MTX_FIRST)) {
+ ASS_IDIS;
+ ASS_SIEN(mpp);
+ }
+ _exitlock_spin(mpp);
+ }
+ } else {
+ /* Handle sleep locks */
+ if ((type) & MTX_RLIKELY)
+ _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS);
+ else {
+ _exitlock_norecurse(mpp, CURTHD,
+ (type) & MTX_HARDOPTS);
+ }
+ }
+}
+
+#endif /* KLD_MODULE */
+
+/* Avoid namespace pollution */
+#ifndef _KERN_MUTEX_C_
+#undef _obtain_lock
+#undef _release_lock
+#undef _release_lock_quick
+#undef _getlock_sleep
+#undef _getlock_spin_block
+#undef _getlock_norecurse
+#undef _exitlock_norecurse
+#undef _exitlock
+#undef _exitlock_spin
+#endif /* !_KERN_MUTEX_C_ */
+
+#endif /* _KERNEL */
+#endif /* !LOCORE */
+#endif /* _SYS_MUTEX_H_ */