summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/cpu_switch.S76
-rw-r--r--sys/amd64/amd64/swtch.s76
-rw-r--r--sys/i386/i386/mpapic.c4
-rw-r--r--sys/i386/i386/mplock.s78
-rw-r--r--sys/i386/i386/swtch.s76
5 files changed, 173 insertions, 137 deletions
diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
index 55932a56b7d9..9b0df296f7c3 100644
--- a/sys/amd64/amd64/cpu_switch.S
+++ b/sys/amd64/amd64/cpu_switch.S
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $
+ * $Id: swtch.s,v 1.2 1997/07/15 00:12:55 smp Exp smp $
*/
#include "npx.h"
@@ -45,10 +45,10 @@
#include <machine/ipl.h>
#include <machine/smptests.h> /** TEST_LOPRIO */
-#if defined(SMP)
+#ifdef SMP
#include <machine/pmap.h>
#include <machine/apic.h>
-#endif
+#endif /* SMP */
#include "assym.s"
@@ -67,23 +67,25 @@
* queues.
*/
.data
+
#ifndef SMP
.globl _curpcb
-_curpcb: .long 0 /* pointer to curproc's PCB area */
-#endif
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+#endif /* !SMP */
+
.globl _whichqs, _whichrtqs, _whichidqs
-_whichqs: .long 0 /* which run queues have data */
-_whichrtqs: .long 0 /* which realtime run queues have data */
-_whichidqs: .long 0 /* which idletime run queues have data */
- .globl _hlt_vector
-_hlt_vector: .long _default_halt /* pointer to halt routine */
+_whichqs: .long 0 /* which run queues have data */
+_whichrtqs: .long 0 /* which realtime run qs have data */
+_whichidqs: .long 0 /* which idletime run qs have data */
+ .globl _hlt_vector
+_hlt_vector: .long _default_halt /* pointer to halt routine */
.globl _qs,_cnt,_panic
.globl _want_resched
-_want_resched: .long 0 /* we need to re-run the scheduler */
+_want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
@@ -246,11 +248,10 @@ rem3id: .asciz "remrq.id"
*/
ALIGN_TEXT
_idle:
-#ifdef SMP
- movl _smp_active, %eax
- cmpl $0, %eax
+#if defined(SMP) && defined(DIAGNOSTIC)
+ cmpl $0, _smp_active
jnz badsw3
-#endif /* SMP */
+#endif /* SMP && DIAGNOSTIC */
xorl %ebp,%ebp
movl $HIDENAME(tmpstk),%esp
movl _IdlePTD,%ecx
@@ -314,7 +315,7 @@ ENTRY(cpu_switch)
movb P_ONCPU(%ecx), %al /* save "last" cpu */
movb %al, P_LASTCPU(%ecx)
movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
-#endif
+#endif /* SMP */
movl P_ADDR(%ecx),%ecx
@@ -330,10 +331,12 @@ ENTRY(cpu_switch)
#ifdef SMP
movl _mp_lock, %eax
- cmpl $0xffffffff, %eax /* is it free? */
+#ifdef DIAGNOSTIC
+ cmpl $FREE_LOCK, %eax /* is it free? */
je badsw4 /* yes, bad medicine! */
- andl $0x00ffffff, %eax /* clear CPU portion */
- movl %eax,PCB_MPNEST(%ecx) /* store it */
+#endif /* DIAGNOSTIC */
+ andl $COUNT_FIELD, %eax /* clear CPU portion */
+ movl %eax, PCB_MPNEST(%ecx) /* store it */
#endif /* SMP */
#if NNPX > 0
@@ -446,16 +449,16 @@ swtch_com:
movl P_ADDR(%ecx),%edx
movl PCB_CR3(%edx),%ebx
-#if defined(SMP)
+#ifdef SMP
/* Grab the private PT pointer from the outgoing process's PTD */
- movl $_PTD,%esi
+ movl $_PTD, %esi
movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */
-#endif
+#endif /* SMP */
/* switch address space */
movl %ebx,%cr3
-#if defined(SMP)
+#ifdef SMP
/* Copy the private PT to the new process's PTD */
/* XXX yuck, the _PTD changes when we switch, so we have to
* reload %cr3 after changing the address space.
@@ -466,8 +469,8 @@ swtch_com:
movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */
/* XXX: we have just changed the page tables.. reload.. */
- movl %ebx,%cr3
-#endif
+ movl %ebx, %cr3
+#endif /* SMP */
#ifdef HOW_TO_SWITCH_TSS /* example only */
/* Fix up tss pointer to floating pcb/stack structure */
@@ -509,18 +512,19 @@ swtch_com:
#ifdef SMP
movl _cpuid,%eax
movb %al, P_ONCPU(%ecx)
-#endif
- movl %edx,_curpcb
- movl %ecx,_curproc /* into next process */
+#endif /* SMP */
+ movl %edx, _curpcb
+ movl %ecx, _curproc /* into next process */
#ifdef SMP
-#if defined(TEST_LOPRIO)
- /* Set us to prefer to get irq's from the apic since we have the lock */
- movl lapic_tpr, %eax /* get TPR register contents */
- andl $0xffffff00, %eax /* clear the prio field */
- movl %eax, lapic_tpr /* now hold loprio for INTs */
+#ifdef TEST_LOPRIO /* hold LOPRIO for INTs */
+#ifdef CHEAP_TPR
+ movl $0, lapic_tpr
+#else
+ andl $~APIC_TPR_PRIO, lapic_tpr
+#endif /* CHEAP_TPR */
#endif /* TEST_LOPRIO */
- movl _cpu_lockid,%eax
+ movl _cpu_lockid, %eax
orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
movl %eax, _mp_lock /* load the mp_lock */
#endif /* SMP */
@@ -569,7 +573,7 @@ badsw2:
sw0_2: .asciz "cpu_switch: not SRUN"
#endif
-#ifdef SMP
+#if defined(SMP) && defined(DIAGNOSTIC)
badsw3:
pushl $sw0_3
call _panic
@@ -581,7 +585,7 @@ badsw4:
call _panic
sw0_4: .asciz "cpu_switch: do not have lock"
-#endif
+#endif /* SMP && DIAGNOSTIC */
/*
* savectx(pcb)
diff --git a/sys/amd64/amd64/swtch.s b/sys/amd64/amd64/swtch.s
index 55932a56b7d9..9b0df296f7c3 100644
--- a/sys/amd64/amd64/swtch.s
+++ b/sys/amd64/amd64/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $
+ * $Id: swtch.s,v 1.2 1997/07/15 00:12:55 smp Exp smp $
*/
#include "npx.h"
@@ -45,10 +45,10 @@
#include <machine/ipl.h>
#include <machine/smptests.h> /** TEST_LOPRIO */
-#if defined(SMP)
+#ifdef SMP
#include <machine/pmap.h>
#include <machine/apic.h>
-#endif
+#endif /* SMP */
#include "assym.s"
@@ -67,23 +67,25 @@
* queues.
*/
.data
+
#ifndef SMP
.globl _curpcb
-_curpcb: .long 0 /* pointer to curproc's PCB area */
-#endif
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+#endif /* !SMP */
+
.globl _whichqs, _whichrtqs, _whichidqs
-_whichqs: .long 0 /* which run queues have data */
-_whichrtqs: .long 0 /* which realtime run queues have data */
-_whichidqs: .long 0 /* which idletime run queues have data */
- .globl _hlt_vector
-_hlt_vector: .long _default_halt /* pointer to halt routine */
+_whichqs: .long 0 /* which run queues have data */
+_whichrtqs: .long 0 /* which realtime run qs have data */
+_whichidqs: .long 0 /* which idletime run qs have data */
+ .globl _hlt_vector
+_hlt_vector: .long _default_halt /* pointer to halt routine */
.globl _qs,_cnt,_panic
.globl _want_resched
-_want_resched: .long 0 /* we need to re-run the scheduler */
+_want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
@@ -246,11 +248,10 @@ rem3id: .asciz "remrq.id"
*/
ALIGN_TEXT
_idle:
-#ifdef SMP
- movl _smp_active, %eax
- cmpl $0, %eax
+#if defined(SMP) && defined(DIAGNOSTIC)
+ cmpl $0, _smp_active
jnz badsw3
-#endif /* SMP */
+#endif /* SMP && DIAGNOSTIC */
xorl %ebp,%ebp
movl $HIDENAME(tmpstk),%esp
movl _IdlePTD,%ecx
@@ -314,7 +315,7 @@ ENTRY(cpu_switch)
movb P_ONCPU(%ecx), %al /* save "last" cpu */
movb %al, P_LASTCPU(%ecx)
movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
-#endif
+#endif /* SMP */
movl P_ADDR(%ecx),%ecx
@@ -330,10 +331,12 @@ ENTRY(cpu_switch)
#ifdef SMP
movl _mp_lock, %eax
- cmpl $0xffffffff, %eax /* is it free? */
+#ifdef DIAGNOSTIC
+ cmpl $FREE_LOCK, %eax /* is it free? */
je badsw4 /* yes, bad medicine! */
- andl $0x00ffffff, %eax /* clear CPU portion */
- movl %eax,PCB_MPNEST(%ecx) /* store it */
+#endif /* DIAGNOSTIC */
+ andl $COUNT_FIELD, %eax /* clear CPU portion */
+ movl %eax, PCB_MPNEST(%ecx) /* store it */
#endif /* SMP */
#if NNPX > 0
@@ -446,16 +449,16 @@ swtch_com:
movl P_ADDR(%ecx),%edx
movl PCB_CR3(%edx),%ebx
-#if defined(SMP)
+#ifdef SMP
/* Grab the private PT pointer from the outgoing process's PTD */
- movl $_PTD,%esi
+ movl $_PTD, %esi
movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */
-#endif
+#endif /* SMP */
/* switch address space */
movl %ebx,%cr3
-#if defined(SMP)
+#ifdef SMP
/* Copy the private PT to the new process's PTD */
/* XXX yuck, the _PTD changes when we switch, so we have to
* reload %cr3 after changing the address space.
@@ -466,8 +469,8 @@ swtch_com:
movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */
/* XXX: we have just changed the page tables.. reload.. */
- movl %ebx,%cr3
-#endif
+ movl %ebx, %cr3
+#endif /* SMP */
#ifdef HOW_TO_SWITCH_TSS /* example only */
/* Fix up tss pointer to floating pcb/stack structure */
@@ -509,18 +512,19 @@ swtch_com:
#ifdef SMP
movl _cpuid,%eax
movb %al, P_ONCPU(%ecx)
-#endif
- movl %edx,_curpcb
- movl %ecx,_curproc /* into next process */
+#endif /* SMP */
+ movl %edx, _curpcb
+ movl %ecx, _curproc /* into next process */
#ifdef SMP
-#if defined(TEST_LOPRIO)
- /* Set us to prefer to get irq's from the apic since we have the lock */
- movl lapic_tpr, %eax /* get TPR register contents */
- andl $0xffffff00, %eax /* clear the prio field */
- movl %eax, lapic_tpr /* now hold loprio for INTs */
+#ifdef TEST_LOPRIO /* hold LOPRIO for INTs */
+#ifdef CHEAP_TPR
+ movl $0, lapic_tpr
+#else
+ andl $~APIC_TPR_PRIO, lapic_tpr
+#endif /* CHEAP_TPR */
#endif /* TEST_LOPRIO */
- movl _cpu_lockid,%eax
+ movl _cpu_lockid, %eax
orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
movl %eax, _mp_lock /* load the mp_lock */
#endif /* SMP */
@@ -569,7 +573,7 @@ badsw2:
sw0_2: .asciz "cpu_switch: not SRUN"
#endif
-#ifdef SMP
+#if defined(SMP) && defined(DIAGNOSTIC)
badsw3:
pushl $sw0_3
call _panic
@@ -581,7 +585,7 @@ badsw4:
call _panic
sw0_4: .asciz "cpu_switch: do not have lock"
-#endif
+#endif /* SMP && DIAGNOSTIC */
/*
* savectx(pcb)
diff --git a/sys/i386/i386/mpapic.c b/sys/i386/i386/mpapic.c
index 34027fb5f1c0..4197ef753ca1 100644
--- a/sys/i386/i386/mpapic.c
+++ b/sys/i386/i386/mpapic.c
@@ -22,7 +22,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: mpapic.c,v 1.10 1997/07/13 00:42:14 smp Exp smp $
+ * $Id: mpapic.c,v 1.11 1997/07/15 00:09:53 smp Exp smp $
*/
#include "opt_smp.h"
@@ -84,7 +84,7 @@ apic_initialize(void)
#if defined(TEST_LOPRIO)
#if 1
/* The new order of startup since private pages makes this possible. */
- temp |= 0x10; /* allow INT arbitration */
+ temp |= LOPRIO_LEVEL; /* allow INT arbitration */
#else
if (cpuid == 0)
temp |= 0x10; /* allow INT arbitration */
diff --git a/sys/i386/i386/mplock.s b/sys/i386/i386/mplock.s
index d897f95dfea4..3762fdb86838 100644
--- a/sys/i386/i386/mplock.s
+++ b/sys/i386/i386/mplock.s
@@ -6,7 +6,7 @@
* this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
* ----------------------------------------------------------------------------
*
- * $Id: mplock.s,v 1.7 1997/07/13 00:19:24 smp Exp smp $
+ * $Id: mplock.s,v 1.8 1997/07/15 00:09:53 smp Exp smp $
*
* Functions for locking between CPUs in a SMP system.
*
@@ -32,6 +32,30 @@
#endif
#include <machine/apic.h>
+/*
+ * claim LOW PRIO, ie. accept ALL INTerrupts
+ */
+#ifdef TEST_LOPRIO
+
+#ifdef TEST_CPUSTOP
+#define TPR_TARGET 20(%esp)
+#else
+#define TPR_TARGET lapic_tpr
+#endif /** TEST_CPUSTOP */
+
+#ifdef CHEAP_TPR
+#define ACCEPT_INTS \
+ movl $0, TPR_TARGET /* clear TPR */
+#else
+#define ACCEPT_INTS \
+ andl $~APIC_TPR_PRIO, TPR_TARGET /* clear TPR */
+#endif /** CHEAP_TPR */
+
+#else
+
+#define ACCEPT_INTS
+
+#endif /** TEST_LOPRIO */
.text
/***********************************************************************
@@ -43,7 +67,7 @@
NON_GPROF_ENTRY(MPgetlock)
1: movl 4(%esp), %edx /* Get the address of the lock */
movl (%edx), %eax /* Try to see if we have it already */
- andl $0x00ffffff, %eax /* - get count */
+ andl $COUNT_FIELD, %eax /* - get count */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
orl %ecx, %eax /* - combine them */
movl %eax, %ecx
@@ -52,27 +76,15 @@ NON_GPROF_ENTRY(MPgetlock)
cmpxchg %ecx, (%edx) /* - try it atomically */
jne 2f /* - miss */
ret
-2: movl $0xffffffff, %eax /* Assume it's free */
+2: movl $FREE_LOCK, %eax /* Assume it's free */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
incl %ecx /* - new count is one */
lock
cmpxchg %ecx, (%edx) /* - try it atomically */
jne 3f /* ...do not collect $200 */
-#if defined(TEST_LOPRIO)
- /* 1st acquire, claim LOW PRIO (ie, ALL INTerrupts) */
-#ifdef TEST_CPUSTOP
-#define TPR_STACKOFFSET 20(%esp)
- movl TPR_STACKOFFSET, %eax /* saved copy */
- andl $0xffffff00, %eax /* clear task priority field */
- movl %eax, TPR_STACKOFFSET /* 're-save' it */
-#else
- movl lapic_tpr, %eax /* Task Priority Register */
- andl $0xffffff00, %eax /* clear task priority field */
- movl %eax, lapic_tpr /* set it */
-#endif /** TEST_CPUSTOP */
-#endif /** TEST_LOPRIO */
+ ACCEPT_INTS /* 1st acquire, accept INTs */
ret
-3: cmpl $0xffffffff, (%edx) /* Wait for it to become free */
+3: cmpl $FREE_LOCK, (%edx) /* Wait for it to become free */
jne 3b
jmp 2b /* XXX 1b ? */
@@ -86,7 +98,7 @@ NON_GPROF_ENTRY(MPgetlock)
NON_GPROF_ENTRY(MPtrylock)
1: movl 4(%esp), %edx /* Get the address of the lock */
movl (%edx), %eax /* Try to see if we have it already */
- andl $0x00ffffff, %eax /* - get count */
+ andl $COUNT_FIELD, %eax /* - get count */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
orl %ecx, %eax /* - combine them */
movl %eax, %ecx
@@ -96,12 +108,13 @@ NON_GPROF_ENTRY(MPtrylock)
jne 2f /* - miss */
movl $1, %eax
ret
-2: movl $0xffffffff, %eax /* Assume it's free */
+2: movl $FREE_LOCK, %eax /* Assume it's free */
movl _cpu_lockid, %ecx /* - get pre-shifted logical cpu id */
incl %ecx /* - new count is one */
lock
cmpxchg %ecx, (%edx) /* - try it atomically */
jne 3f /* ...do not collect $200 */
+ ACCEPT_INTS /* 1st acquire, accept INTs */
movl $1, %eax
ret
3: movl $0, %eax
@@ -118,17 +131,21 @@ NON_GPROF_ENTRY(MPrellock)
movl (%edx), %eax /* - get the value */
movl %eax,%ecx
decl %ecx /* - new count is one less */
- testl $0x00ffffff, %ecx /* - Unless it's zero... */
+ testl $COUNT_FIELD, %ecx /* - Unless it's zero... */
jnz 2f
#if defined(TEST_LOPRIO)
/* last release, give up LOW PRIO (ie, arbitrate INTerrupts) */
+#ifdef CHEAP_TPR
+ movl $LOPRIO_LEVEL, lapic_tpr /* task prio to 'arbitrate' */
+#else
movl lapic_tpr, %eax /* Task Priority Register */
- andl $0xffffff00, %eax /* clear task priority field */
- orl $0x00000010, %eax /* set task priority to 'arbitrate' */
+ andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
+ orl $LOPRIO_LEVEL, %eax /* set task priority to 'arbitrate' */
movl %eax, lapic_tpr /* set it */
movl (%edx), %eax /* - get the value AGAIN */
+#endif /* CHEAP_TPR */
#endif /** TEST_LOPRIO */
- movl $0xffffffff, %ecx /* - In which case we release it */
+ movl $FREE_LOCK, %ecx /* - In which case we release it */
2: lock
cmpxchg %ecx, (%edx) /* - try it atomically */
jne 1b /* ...do not collect $200 */
@@ -153,14 +170,22 @@ NON_GPROF_ENTRY(get_mplock)
pushl %eax
#ifdef TEST_CPUSTOP
+#ifdef CHEAP_TPR
+ pushl lapic_tpr /* save current TPR */
+ pushfl /* save current EFLAGS */
+ btl $9, (%esp) /* test EI bit */
+ jc 1f /* INTs currently enabled */
+ movl $TPR_BLOCK_HWI, lapic_tpr /* set it */
+#else
movl lapic_tpr, %eax /* get current TPR */
pushl %eax /* save current TPR */
pushfl /* save current EFLAGS */
btl $9, (%esp) /* test EI bit */
jc 1f /* INTs currently enabled */
- andl $0xffffff00, %eax /* clear task priority field */
+ andl $~APIC_TPR_PRIO, %eax /* clear task priority field */
orl $TPR_BLOCK_HWI, %eax /* only allow IPIs
movl %eax, lapic_tpr /* set it */
+#endif /* CHEAP_TPR */
sti /* allow IPI (and only IPI) INTS */
1:
#endif /* TEST_CPUSTOP */
@@ -175,11 +200,10 @@ NON_GPROF_ENTRY(get_mplock)
#ifdef TEST_CPUSTOP
popfl /* restore original EFLAGS */
- popl %eax /* get original/modified TPR value */
- movl %eax, lapic_tpr /* restore TPR */
+ popl lapic_tpr /* restore TPR */
#endif /* TEST_CPUSTOP */
- popl %eax
+ popl %eax /* restore scratch */
ret
/***********************************************************************
diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s
index 55932a56b7d9..9b0df296f7c3 100644
--- a/sys/i386/i386/swtch.s
+++ b/sys/i386/i386/swtch.s
@@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $Id: swtch.s,v 1.53 1997/06/22 16:03:35 peter Exp $
+ * $Id: swtch.s,v 1.2 1997/07/15 00:12:55 smp Exp smp $
*/
#include "npx.h"
@@ -45,10 +45,10 @@
#include <machine/ipl.h>
#include <machine/smptests.h> /** TEST_LOPRIO */
-#if defined(SMP)
+#ifdef SMP
#include <machine/pmap.h>
#include <machine/apic.h>
-#endif
+#endif /* SMP */
#include "assym.s"
@@ -67,23 +67,25 @@
* queues.
*/
.data
+
#ifndef SMP
.globl _curpcb
-_curpcb: .long 0 /* pointer to curproc's PCB area */
-#endif
+_curpcb: .long 0 /* pointer to curproc's PCB area */
+#endif /* !SMP */
+
.globl _whichqs, _whichrtqs, _whichidqs
-_whichqs: .long 0 /* which run queues have data */
-_whichrtqs: .long 0 /* which realtime run queues have data */
-_whichidqs: .long 0 /* which idletime run queues have data */
- .globl _hlt_vector
-_hlt_vector: .long _default_halt /* pointer to halt routine */
+_whichqs: .long 0 /* which run queues have data */
+_whichrtqs: .long 0 /* which realtime run qs have data */
+_whichidqs: .long 0 /* which idletime run qs have data */
+ .globl _hlt_vector
+_hlt_vector: .long _default_halt /* pointer to halt routine */
.globl _qs,_cnt,_panic
.globl _want_resched
-_want_resched: .long 0 /* we need to re-run the scheduler */
+_want_resched: .long 0 /* we need to re-run the scheduler */
.text
/*
@@ -246,11 +248,10 @@ rem3id: .asciz "remrq.id"
*/
ALIGN_TEXT
_idle:
-#ifdef SMP
- movl _smp_active, %eax
- cmpl $0, %eax
+#if defined(SMP) && defined(DIAGNOSTIC)
+ cmpl $0, _smp_active
jnz badsw3
-#endif /* SMP */
+#endif /* SMP && DIAGNOSTIC */
xorl %ebp,%ebp
movl $HIDENAME(tmpstk),%esp
movl _IdlePTD,%ecx
@@ -314,7 +315,7 @@ ENTRY(cpu_switch)
movb P_ONCPU(%ecx), %al /* save "last" cpu */
movb %al, P_LASTCPU(%ecx)
movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
-#endif
+#endif /* SMP */
movl P_ADDR(%ecx),%ecx
@@ -330,10 +331,12 @@ ENTRY(cpu_switch)
#ifdef SMP
movl _mp_lock, %eax
- cmpl $0xffffffff, %eax /* is it free? */
+#ifdef DIAGNOSTIC
+ cmpl $FREE_LOCK, %eax /* is it free? */
je badsw4 /* yes, bad medicine! */
- andl $0x00ffffff, %eax /* clear CPU portion */
- movl %eax,PCB_MPNEST(%ecx) /* store it */
+#endif /* DIAGNOSTIC */
+ andl $COUNT_FIELD, %eax /* clear CPU portion */
+ movl %eax, PCB_MPNEST(%ecx) /* store it */
#endif /* SMP */
#if NNPX > 0
@@ -446,16 +449,16 @@ swtch_com:
movl P_ADDR(%ecx),%edx
movl PCB_CR3(%edx),%ebx
-#if defined(SMP)
+#ifdef SMP
/* Grab the private PT pointer from the outgoing process's PTD */
- movl $_PTD,%esi
+ movl $_PTD, %esi
movl 4*MPPTDI(%esi), %eax /* fetch cpu's prv pt */
-#endif
+#endif /* SMP */
/* switch address space */
movl %ebx,%cr3
-#if defined(SMP)
+#ifdef SMP
/* Copy the private PT to the new process's PTD */
/* XXX yuck, the _PTD changes when we switch, so we have to
* reload %cr3 after changing the address space.
@@ -466,8 +469,8 @@ swtch_com:
movl %eax, 4*MPPTDI(%esi) /* restore cpu's prv page */
/* XXX: we have just changed the page tables.. reload.. */
- movl %ebx,%cr3
-#endif
+ movl %ebx, %cr3
+#endif /* SMP */
#ifdef HOW_TO_SWITCH_TSS /* example only */
/* Fix up tss pointer to floating pcb/stack structure */
@@ -509,18 +512,19 @@ swtch_com:
#ifdef SMP
movl _cpuid,%eax
movb %al, P_ONCPU(%ecx)
-#endif
- movl %edx,_curpcb
- movl %ecx,_curproc /* into next process */
+#endif /* SMP */
+ movl %edx, _curpcb
+ movl %ecx, _curproc /* into next process */
#ifdef SMP
-#if defined(TEST_LOPRIO)
- /* Set us to prefer to get irq's from the apic since we have the lock */
- movl lapic_tpr, %eax /* get TPR register contents */
- andl $0xffffff00, %eax /* clear the prio field */
- movl %eax, lapic_tpr /* now hold loprio for INTs */
+#ifdef TEST_LOPRIO /* hold LOPRIO for INTs */
+#ifdef CHEAP_TPR
+ movl $0, lapic_tpr
+#else
+ andl $~APIC_TPR_PRIO, lapic_tpr
+#endif /* CHEAP_TPR */
#endif /* TEST_LOPRIO */
- movl _cpu_lockid,%eax
+ movl _cpu_lockid, %eax
orl PCB_MPNEST(%edx), %eax /* add next count from PROC */
movl %eax, _mp_lock /* load the mp_lock */
#endif /* SMP */
@@ -569,7 +573,7 @@ badsw2:
sw0_2: .asciz "cpu_switch: not SRUN"
#endif
-#ifdef SMP
+#if defined(SMP) && defined(DIAGNOSTIC)
badsw3:
pushl $sw0_3
call _panic
@@ -581,7 +585,7 @@ badsw4:
call _panic
sw0_4: .asciz "cpu_switch: do not have lock"
-#endif
+#endif /* SMP && DIAGNOSTIC */
/*
* savectx(pcb)