summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/sparc64/include/tlb.h15
-rw-r--r--sys/sparc64/include/tte.h42
-rw-r--r--sys/sparc64/sparc64/exception.S784
-rw-r--r--sys/sparc64/sparc64/exception.s784
-rw-r--r--sys/sparc64/sparc64/genassym.c13
-rw-r--r--sys/sparc64/sparc64/pmap.c38
-rw-r--r--sys/sparc64/sparc64/pv.c6
-rw-r--r--sys/sparc64/sparc64/tsb.c6
8 files changed, 1189 insertions, 499 deletions
diff --git a/sys/sparc64/include/tlb.h b/sys/sparc64/include/tlb.h
index 14c6635eae6b..6f72bca2b462 100644
--- a/sys/sparc64/include/tlb.h
+++ b/sys/sparc64/include/tlb.h
@@ -31,16 +31,17 @@
#define TLB_SLOT_COUNT 64
-#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */
-#define TLB_SLOT_TSB_USER_PRIMARY 61
-#define TLB_SLOT_TSB_USER_SECONDARY 62
+#define TLB_SLOT_TSB_KERNEL_MIN 62 /* XXX */
#define TLB_SLOT_KERNEL 63
#define TLB_DAR_SLOT_SHIFT (3)
#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
-#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK)
-#define TLB_TAR_CTX(ctx) ((ctx) & PAGE_MASK)
+#define TAR_VPN_SHIFT (13)
+#define TAR_CTX_MASK ((1 << TAR_VPN_SHIFT) - 1)
+
+#define TLB_TAR_VA(va) ((va) & ~TAR_CTX_MASK)
+#define TLB_TAR_CTX(ctx) ((ctx) & TAR_CTX_MASK)
#define TLB_DEMAP_ID_SHIFT (4)
#define TLB_DEMAP_ID_PRIMARY (0)
@@ -222,9 +223,9 @@ tlb_range_demap(u_int ctx, vm_offset_t start, vm_offset_t end)
}
static __inline void
-tlb_tte_demap(struct tte tte, vm_offset_t va)
+tlb_tte_demap(struct tte tte, u_int ctx)
{
- tlb_page_demap(TD_GET_TLB(tte.tte_data), TT_GET_CTX(tte.tte_tag), va);
+ tlb_page_demap(TD_GET_TLB(tte.tte_data), ctx, TV_GET_VA(tte.tte_vpn));
}
static __inline void
diff --git a/sys/sparc64/include/tte.h b/sys/sparc64/include/tte.h
index 934a7ac15550..449ec880f644 100644
--- a/sys/sparc64/include/tte.h
+++ b/sys/sparc64/include/tte.h
@@ -34,20 +34,6 @@
#define TTE_SHIFT (4)
-#define TT_CTX_SHIFT (48)
-#define TT_VA_SHIFT (22)
-#define TT_VPN_SHIFT (9)
-
-#define TT_CTX_SIZE (13)
-#define TT_VA_SIZE (42)
-
-#define TT_CTX_MASK ((1UL << TT_CTX_SIZE) - 1)
-#define TT_VA_MASK ((1UL << TT_VA_SIZE) - 1)
-
-#define TT_G (1UL << 63)
-#define TT_CTX(ctx) (((u_long)(ctx) & TT_CTX_MASK) << TT_CTX_SHIFT)
-#define TT_VA(va) ((u_long)(va) >> TT_VA_SHIFT)
-
#define TD_SIZE_SHIFT (61)
#define TD_SOFT2_SHIFT (50)
#define TD_DIAG_SHIFT (41)
@@ -66,9 +52,6 @@
#define TD_PA_MASK (((1UL << TD_PA_SIZE) - 1) << TD_PA_SHIFT)
#define TD_SOFT_MASK (((1UL << TD_SOFT_SIZE) - 1) << TD_SOFT_SHIFT)
-#define TD_VA_LOW_SHIFT TD_SOFT2_SHIFT
-#define TD_VA_LOW_MASK TD_SOFT2_MASK
-
#define TS_EXEC (1UL << 4)
#define TS_REF (1UL << 3)
#define TS_PV (1UL << 2)
@@ -82,8 +65,6 @@
#define TD_4M (3UL << TD_SIZE_SHIFT)
#define TD_NFO (1UL << 60)
#define TD_IE (1UL << 59)
-#define TD_VPN_LOW(vpn) ((vpn << TD_SOFT2_SHIFT) & TD_SOFT2_MASK)
-#define TD_VA_LOW(va) (TD_VPN_LOW((va) >> PAGE_SHIFT))
#define TD_PA(pa) ((pa) & TD_PA_MASK)
#define TD_EXEC (TS_EXEC << TD_SOFT_SHIFT)
#define TD_REF (TS_REF << TD_SOFT_SHIFT)
@@ -98,35 +79,28 @@
#define TD_W (1UL << 1)
#define TD_G (1UL << 0)
-#define TT_GET_CTX(tag) (((tag) >> TT_CTX_SHIFT) & TT_CTX_MASK)
+#define TV_VPN(va) ((va) >> PAGE_SHIFT)
+
#define TD_GET_SIZE(d) (((d) >> TD_SIZE_SHIFT) & 3)
#define TD_GET_PA(d) ((d) & TD_PA_MASK)
#define TD_GET_TLB(d) (((d) & TD_EXEC) ? (TLB_DTLB | TLB_ITLB) : TLB_DTLB)
+#define TV_GET_VA(vpn) ((vpn) << PAGE_SHIFT)
struct tte {
- u_long tte_tag;
+ u_long tte_vpn;
u_long tte_data;
};
-static __inline vm_offset_t
-tte_get_vpn(struct tte tte)
-{
- return (((tte.tte_tag & TT_VA_MASK) << TT_VPN_SHIFT) |
- ((tte.tte_data & TD_VA_LOW_MASK) >> TD_VA_LOW_SHIFT));
-}
-
-static __inline vm_offset_t
-tte_get_va(struct tte tte)
+static __inline int
+tte_match_vpn(struct tte tte, vm_offset_t vpn)
{
- return (tte_get_vpn(tte) << PAGE_SHIFT);
+ return ((tte.tte_data & TD_V) != 0 && tte.tte_vpn == vpn);
}
static __inline int
tte_match(struct tte tte, vm_offset_t va)
{
- return ((tte.tte_data & TD_V) != 0 &&
- ((tte.tte_tag ^ TT_VA(va)) & TT_VA_MASK) == 0 &&
- ((tte.tte_data ^ TD_VA_LOW(va)) & TD_VA_LOW_MASK) == 0);
+ return (tte_match_vpn(tte, va >> PAGE_SHIFT));
}
#endif /* !_MACHINE_TTE_H_ */
diff --git a/sys/sparc64/sparc64/exception.S b/sys/sparc64/sparc64/exception.S
index 1694250f0dc6..e3ba426bc6df 100644
--- a/sys/sparc64/sparc64/exception.S
+++ b/sys/sparc64/sparc64/exception.S
@@ -546,67 +546,97 @@ END(tl0_sfsr_trap)
.align 32
.endm
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl0_immu_miss
+ b,a %xcc, tl0_immu_miss_traced
+ nop
+ .align 128
+ .endm
+
+ENTRY(tl0_immu_miss_traced)
+#else
.macro tl0_immu_miss
+#endif
/*
* Force kernel store order.
*/
wrpr %g0, PSTATE_MMU, %pstate
/*
- * Extract the 8KB pointer.
- */
- ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g6
- srax %g6, TTE_SHIFT, %g6
-
- /*
- * Compute the tte address in the primary user tsb.
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
*/
- and %g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
- sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
- add %g1, TSB_REG, %g1
+ wr %g0, ASI_IMMU, %asi
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
/*
- * Compute low bits of faulting va to check inside bucket loop.
+ * Extract the virtual page number from the contents of the tag
+ * access register.
*/
- and %g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
- sllx %g2, TD_VA_LOW_SHIFT, %g2
- or %g2, TD_EXEC, %g2
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Load the tte tag target.
+ * Compute the tte bucket address.
*/
- ldxa [%g0] ASI_IMMU_TAG_TARGET_REG, %g6
+ set (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
+ and %g1, %g3, %g1
+ sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
+ add %g1, TSB_REG, %g1
- /*
- * Load mask for tte data check.
- */
- mov TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
- sllx %g3, TD_VA_LOW_SHIFT, %g3
- or %g3, TD_EXEC, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl0_immu_miss: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ stx %g2, [%g4 + KTR_PARM4]
+ stx %g3, [%g4 + KTR_PARM5]
+ stx %g1, [%g4 + KTR_PARM6]
+9:
+#endif
/*
* Loop over the ttes in this bucket
*/
/*
- * Load the tte.
+ * Load the tte. Note that this instruction may fault, clobbering
+ * the context of the tag access register (at least), and the contents
+ * of %g3, %g4, %g5, and %g6. Luckily we can recover %g3, and we do
+ * not use %g4 or %g5 until this instruction completes successfully.
*/
1: ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g1 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g1 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g1 + TTE_VPN], %g4
+ ldx [%g1 + TTE_DATA], %g5
+#endif
+
/*
- * Compare the tag.
+ * Recover the virtual page number, which may have been clobbered.
*/
- cmp %g4, %g6
- bne,pn %xcc, 2f
- EMPTY
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Compare the data.
+ * Check that its valid and executable and that the virtual page
+ * numbers match.
*/
- xor %g2, %g5, %g4
brgez,pn %g5, 2f
- andcc %g3, %g4, %g0
- bnz,pn %xcc, 2f
+ andcc %g5, TD_EXEC, %g0
+ bz,pn %xcc, 2f
+ cmp %g3, %g4
+ bne,pn %xcc, 2f
EMPTY
/*
@@ -620,9 +650,18 @@ END(tl0_sfsr_trap)
bz,a,pn %xcc, tl0_immu_miss_set_ref
nop
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss: match tar=%#lx data=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+ stx %g5, [%g1 + KTR_PARM2]
+9:
+#endif
+
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG
retry
@@ -633,27 +672,57 @@ END(tl0_sfsr_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
+
+ /*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
+ membar #Sync
+
b,a %xcc, tl0_immu_miss_trap
+ nop
+#if KTR_COMPILE & KTR_TRAP
+END(tl0_immu_miss_traced)
+#else
.align 128
.endm
+#endif
ENTRY(tl0_immu_miss_set_ref)
/*
* Set the reference bit.
*/
- TTE_SET_REF(%g1, %g2, %g3)
+ TTE_SET_REF(%g1, %g4, %g5)
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss_set_ref: tp=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g1, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
* May have become invalid, in which case start over.
*/
- brgez,pn %g2, 2f
- or %g2, TD_REF, %g2
+ brgez,pn %g4, 1f
+ or %g4, TD_REF, %g4
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss_set_ref: return tar=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g2, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
- stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
-2: retry
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
+ stxa %g4, [%g0] ASI_ITLB_DATA_IN_REG
+1: retry
END(tl0_immu_miss_set_ref)
ENTRY(tl0_immu_miss_trap)
@@ -663,74 +732,99 @@ ENTRY(tl0_immu_miss_trap)
wrpr %g0, PSTATE_ALT, %pstate
/*
- * Load the tar, sfar and sfsr aren't valid.
+ * Reload the tag access register.
*/
- wr %g0, ASI_IMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss_trap: tar=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
/*
- * Save the mmu registers on the stack, and call common trap code.
+ * Save the tag access register, and call common trap code.
*/
tl0_split
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl0_trap
mov T_INSTRUCTION_MISS, %o0
END(tl0_immu_miss_trap)
.macro dmmu_miss_user
/*
- * Extract the 8KB pointer and convert to an index.
- */
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g6
- srax %g6, TTE_SHIFT, %g6
-
- /*
- * Compute the tte bucket address.
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
*/
- and %g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
- sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
- add %g1, TSB_REG, %g1
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/*
- * Compute low bits of faulting va to check inside bucket loop.
+ * Extract the virtual page number from the contents of the tag
+ * access register.
*/
- and %g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
- sllx %g2, TD_VA_LOW_SHIFT, %g2
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Preload the tte tag target.
+ * Compute the tte bucket address.
*/
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g6
+ set (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
+ and %g1, %g3, %g1
+ sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
+ add %g1, TSB_REG, %g1
- /*
- * Load mask for tte data check.
- */
- mov TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
- sllx %g3, TD_VA_LOW_SHIFT, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl0_dmmu_miss: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ stx %g2, [%g4 + KTR_PARM4]
+ stx %g3, [%g4 + KTR_PARM5]
+ stx %g1, [%g4 + KTR_PARM6]
+9:
+#endif
/*
* Loop over the ttes in this bucket
*/
/*
- * Load the tte.
+ * Load the tte. Note that this instruction may fault, clobbering
+ * the contents of the tag access register (at least), and the contents
+ * of %g3, %g4, %g5, and %g6. Luckily we can recover %g3, and we do
+ * not use %g4 or %g5 until this instruction completes successfully.
*/
1: ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g1 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g1 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g1 + TTE_VPN], %g4
+ ldx [%g1 + TTE_DATA], %g5
+#endif
+
/*
- * Compare the tag.
+ * Recover the virtual page number, which may have been clobbered.
*/
- cmp %g4, %g6
- bne,pn %xcc, 2f
- EMPTY
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Compare the data.
+ * Check that its valid and that the virtual page numbers match.
*/
- xor %g2, %g5, %g4
brgez,pn %g5, 2f
- andcc %g3, %g4, %g0
- bnz,pn %xcc, 2f
+ cmp %g3, %g4
+ bne,pn %xcc, 2f
EMPTY
/*
@@ -744,9 +838,18 @@ END(tl0_immu_miss_trap)
bz,a,pn %xcc, dmmu_miss_user_set_ref
nop
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss: match tar=%#lx data=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+ stx %g5, [%g1 + KTR_PARM2]
+9:
+#endif
+
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
@@ -757,28 +860,62 @@ END(tl0_immu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
+
+ /*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
.endm
ENTRY(dmmu_miss_user_set_ref)
/*
* Set the reference bit.
*/
- TTE_SET_REF(%g1, %g2, %g3)
+ TTE_SET_REF(%g1, %g4, %g5)
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss_set_ref: tp=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g1, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
* May have become invalid, in which case start over.
*/
- brgez,pn %g2, 2f
- or %g2, TD_REF, %g2
+ brgez,pn %g4, 1f
+ or %g4, TD_REF, %g4
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss_set_ref: return tar=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g2, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
- stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
-2: retry
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ stxa %g4, [%g0] ASI_DTLB_DATA_IN_REG
+1: retry
END(dmmu_miss_user_set_ref)
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl0_dmmu_miss
+ b,a %xcc, tl0_dmmu_miss_traced
+ nop
+ .align 128
+ .endm
+
+ENTRY(tl0_dmmu_miss_traced)
+#else
.macro tl0_dmmu_miss
+#endif
/*
* Force kernel store order.
*/
@@ -790,11 +927,16 @@ END(dmmu_miss_user_set_ref)
dmmu_miss_user
/*
- * Not in primary tsb, call c code. Nothing else fits inline.
+ * Not in primary tsb, call c code. Not much else fits inline.
*/
- b,a tl0_dmmu_miss_trap
+ b,a %xcc, tl0_dmmu_miss_trap
+ nop
+#if KTR_COMPILE & KTR_TRAP
+END(tl0_dmmu_miss_traced)
+#else
.align 128
.endm
+#endif
ENTRY(tl0_dmmu_miss_trap)
/*
@@ -803,78 +945,107 @@ ENTRY(tl0_dmmu_miss_trap)
wrpr %g0, PSTATE_ALT, %pstate
/*
- * Load the tar, sfar and sfsr aren't valid.
+ * Reload the tag access register.
*/
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss_trap: tar=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
/*
- * Save the mmu registers on the stack and call common trap code.
+ * Save the tag access register and call common trap code.
*/
tl0_split
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl0_trap
mov T_DATA_MISS, %o0
END(tl0_dmmu_miss_trap)
.macro dmmu_prot_user
/*
- * Extract the 8KB pointer and convert to an index.
- */
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g6
- srax %g6, TTE_SHIFT, %g6
-
- /*
- * Compute the tte bucket address.
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
*/
- and %g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
- sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
- add %g1, TSB_REG, %g1
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/*
- * Compute low bits of faulting va to check inside bucket loop.
+ * Extract the virtual page number from the contents of the tag
+ * access register.
*/
- and %g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
- sllx %g2, TD_VA_LOW_SHIFT, %g2
- or %g2, TD_SW, %g2
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Preload the tte tag target.
+ * Compute the tte bucket address.
*/
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g6
+ set (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
+ and %g1, %g3, %g1
+ sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
+ add %g1, TSB_REG, %g1
- /*
- * Load mask for tte data check.
- */
- mov TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
- sllx %g3, TD_VA_LOW_SHIFT, %g3
- or %g3, TD_SW, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl0_dmmu_prot: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ stx %g2, [%g4 + KTR_PARM4]
+ stx %g3, [%g4 + KTR_PARM5]
+ stx %g1, [%g4 + KTR_PARM6]
+9:
+#endif
/*
* Loop over the ttes in this bucket
*/
/*
- * Load the tte.
+ * Load the tte. Note that this instruction may fault, clobbering
+ * the context of the tag access register (at least), and the contents
+ * of %g3, %g4, %g5, and %g6. Luckily we can recover %g3, and we do
+ * not use %g4 or %g5 until this instruction completes successfully.
*/
1: ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g1 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g1 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g1 + TTE_VPN], %g4
+ ldx [%g1 + TTE_DATA], %g5
+#endif
+
/*
- * Compare the tag.
+ * Recover the virtual page number, which may have been clobbered.
*/
- cmp %g4, %g6
- bne,pn %xcc, 2f
- EMPTY
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Compare the data.
+ * Check that its valid and writable and that the virtual page
+ * numbers match.
*/
- xor %g2, %g5, %g4
brgez,pn %g5, 2f
- andcc %g3, %g4, %g0
- bnz,a,pn %xcc, 2f
+ andcc %g5, TD_SW, %g0
+ bz,pn %xcc, 2f
+ cmp %g3, %g4
+ bne,pn %xcc, 2f
nop
+ /*
+ * Set the hardware write bit.
+ */
b,a %xcc, dmmu_prot_set_w
nop
@@ -883,11 +1054,28 @@ END(tl0_dmmu_miss_trap)
*/
2: add %g1, 1 << TTE_SHIFT, %g1
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
- bnz,a,pn %xcc, 1b
+ bnz,a,pt %xcc, 1b
+ nop
+
+ /*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+ .endm
+
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl0_dmmu_prot
+ b,a %xcc, tl0_dmmu_prot_traced
nop
+ .align 128
.endm
+ENTRY(tl0_dmmu_prot_traced)
+#else
.macro tl0_dmmu_prot
+#endif
/*
* Force kernel store order.
*/
@@ -903,32 +1091,54 @@ END(tl0_dmmu_miss_trap)
*/
b,a %xcc, tl0_dmmu_prot_trap
nop
+#if KTR_COMPILE & KTR_TRAP
+END(tl0_dmmu_prot_traced)
+#else
.align 128
.endm
+#endif
ENTRY(dmmu_prot_set_w)
/*
* Set the hardware write bit in the tte.
*/
- TTE_SET_W(%g1, %g2, %g3)
+ TTE_SET_W(%g1, %g4, %g5)
/*
- * Delete the old TLB entry.
+ * Delete the old TLB entry and clear the sfsr.
*/
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
- srlx %g1, PAGE_SHIFT, %g1
- sllx %g1, PAGE_SHIFT, %g1
- stxa %g0, [%g1] ASI_DMMU_DEMAP
+ sllx %g3, TAR_VPN_SHIFT, %g3
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
+ membar #Sync
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot_set_w: tp=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g1, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
- brgez,pn %g2, 1f
- or %g2, TD_W, %g2
+ /*
+ * May have become invalid in which case start over.
+ */
+ brgez,pn %g4, 1f
+ or %g4, TD_W, %g4
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot_set_w: return tar=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g2, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
* Load the tte data into the tlb and retry the instruction.
*/
- stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ stxa %g4, [%g0] ASI_DTLB_DATA_IN_REG
1: retry
END(dmmu_prot_set_w)
@@ -941,15 +1151,22 @@ ENTRY(tl0_dmmu_prot_trap)
/*
* Load the tar, sfar and sfsr.
*/
- wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot_trap: tar=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
+
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync
/*
- * Save the mmu registers on the stack and call common trap code.
+ * Save the mmu registers and call common trap code.
*/
tl0_split
mov %g2, %o3
@@ -1295,121 +1512,224 @@ ENTRY(intr_enqueue)
END(intr_enqueue)
.macro tl1_immu_miss
- ldxa [%g0] ASI_IMMU_TAG_TARGET_REG, %g1
- sllx %g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
+ /*
+ * Load the context and the virtual page number from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_IMMU, %asi
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g6
- set TSB_KERNEL_VA_MASK, %g3
- and %g2, %g3, %g2
+ /*
+ * Extract the virtual page number from the contents of the tag access
+ * register.
+ */
+ srlx %g6, TAR_VPN_SHIFT, %g6
- ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g4
- add %g2, %g4, %g2
+ /*
+ * Find the index into the kernel tsb.
+ */
+ set TSB_KERNEL_MASK, %g4
+ and %g6, %g4, %g3
/*
- * Load the tte, check that it's valid and that the tags match.
+ * Compute the tte address.
*/
- ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
- brgez,pn %g5, 2f
- cmp %g4, %g1
- bne,pn %xcc, 2f
+ ldxa [%g0 + AA_IMMU_TSB] %asi, %g4
+ sllx %g3, TTE_SHIFT, %g3
+ add %g3, %g4, %g3
+
+ /*
+ * Load the tte.
+ */
+ ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+
+ /*
+ * Check that its valid and executable and that the virtual page
+ * numbers match.
+ */
+ brgez,pn %g5, tl1_immu_miss_trap
andcc %g5, TD_EXEC, %g0
- bz,pn %xcc, 2f
+ bz,pn %xcc, tl1_immu_miss_trap
+ cmp %g4, %g6
+ bne,pn %xcc, tl1_immu_miss_trap
EMPTY
/*
- * Set the refence bit, if its currently clear.
+ * Set the reference bit if its currently clear.
*/
andcc %g5, TD_REF, %g0
- bnz,pt %xcc, 1f
- EMPTY
+ bnz,a,pt %xcc, 1f
+ nop
+ TTE_SET_REF(%g3, %g5, %g4)
- TTE_SET_REF(%g2, %g3, %g4)
+ /*
+ * May have become invalid, in which case start over.
+ */
+ brgez,pn %g5, 2f
+ or %g5, TD_REF, %g5
/*
* Load the tte data into the TLB and retry the instruction.
*/
1: stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG
- retry
+2: retry
+ .align 128
+ .endm
+ENTRY(tl1_immu_miss_trap)
/*
* Switch to alternate globals.
*/
-2: wrpr %g0, PSTATE_ALT, %pstate
+ wrpr %g0, PSTATE_ALT, %pstate
- wr %g0, ASI_IMMU, %asi
- ldxa [%g0 + AA_IMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
tl1_kstack
rdpr %pil, %o1
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl1_trap
mov T_INSTRUCTION_MISS | T_KERNEL, %o0
+END(tl1_immu_miss_trap)
+
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl1_dmmu_miss
+ b,a %xcc, tl1_dmmu_miss_traced
+ nop
.align 128
.endm
+ENTRY(tl1_dmmu_miss_traced)
+#else
.macro tl1_dmmu_miss
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
- srlx %g1, TT_CTX_SHIFT, %g2
- brnz,pn %g2, tl1_dmmu_miss_user
- sllx %g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
-
- set TSB_KERNEL_VA_MASK, %g3
- and %g2, %g3, %g2
+#endif
+ /*
+ * Load the context and the virtual page number from the tag access
+ * register.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g4
- add %g2, %g4, %g2
+ /*
+ * Extract the context from the contents of the tag access register.
+ * If its non-zero this is a fault on a user address, otherwise get
+ * the virtual page number.
+ */
+ sllx %g6, 64 - TAR_VPN_SHIFT, %g5
+ brnz,pn %g5, tl1_dmmu_miss_user
+ srlx %g6, TAR_VPN_SHIFT, %g6
/*
- * Load the tte, check that it's valid and that the tags match.
+ * Find the index into the kernel tsb.
*/
- ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
- brgez,pn %g5, 2f
- cmp %g4, %g1
- bne,pn %xcc, 2f
- EMPTY
+ set TSB_KERNEL_MASK, %g4
+ and %g6, %g4, %g3
/*
- * Set the refence bit, if its currently clear.
+ * Compute the tte address.
*/
- andcc %g5, TD_REF, %g0
- bnz,pt %xcc, 1f
- EMPTY
+ ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
+ sllx %g3, TTE_SHIFT, %g3
+ add %g3, %g4, %g3
- TTE_SET_REF(%g2, %g3, %g4)
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl1_dmmu_miss: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
+ stx %g6, [%g4 + KTR_PARM4]
+ srlx %g6, TAR_VPN_SHIFT, %g6
+ stx %g6, [%g4 + KTR_PARM5]
+ stx %g3, [%g4 + KTR_PARM6]
+9:
+#endif
/*
- * Load the tte data into the TLB and retry the instruction.
+ * Load the tte.
*/
-1: stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
- retry
+ ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl1_dmmu_miss: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g3 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g3 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g3 + TTE_VPN], %g4
+ ldx [%g3 + TTE_DATA], %g5
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
+ srlx %g6, TAR_VPN_SHIFT, %g6
+#endif
/*
- * Switch to alternate globals.
+ * Check that its valid and that the virtual page numbers match.
*/
-2: wrpr %g0, PSTATE_ALT, %pstate
+ brgez,pn %g5, tl1_dmmu_miss_trap
+ cmp %g4, %g6
+ bne,pn %xcc, tl1_dmmu_miss_trap
+ EMPTY
- b,a %xcc, tl1_dmmu_miss_trap
+ /*
+ * Set the reference bit if its currently clear.
+ */
+ andcc %g5, TD_REF, %g0
+ bnz,a,pt %xcc, 1f
nop
+ TTE_SET_REF(%g3, %g5, %g4)
+
+ /*
+ * May have become invalid, in which case start over.
+ */
+ brgez,pn %g5, 2f
+ or %g5, TD_REF, %g5
+
+ /*
+ * Load the tte data into the TLB and retry the instruction.
+ */
+1:
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl1_dmmu_miss: match data=%#lx"
+ , %g3, %g4, %g6, 7, 8, 9)
+ stx %g5, [%g3 + KTR_PARM1]
+9:
+#endif
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
+2: retry
+#if KTR_COMPILE & KTR_TRAP
+END(tl1_dmmu_miss_traced)
+#else
.align 128
.endm
+#endif
ENTRY(tl1_dmmu_miss_trap)
+ /*
+ * Switch to alternate globals.
+ */
+ wrpr %g0, PSTATE_ALT, %pstate
+
#if KTR_COMPILE & KTR_TRAP
CATR(KTR_TRAP, "tl1_dmmu_miss_trap: tar=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
- mov AA_DMMU_TAR, %g2
- ldxa [%g2] ASI_DMMU, %g2
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
stx %g2, [%g1 + KTR_PARM1]
9:
#endif
KSTACK_CHECK
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
tl1_kstack
rdpr %pil, %o1
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl1_trap
mov T_DATA_MISS | T_KERNEL, %o0
END(tl1_dmmu_miss_trap)
@@ -1425,61 +1745,97 @@ ENTRY(tl1_dmmu_miss_user)
*/
wrpr %g0, PSTATE_ALT, %pstate
- /* Handle faults during window spill/fill. */
+ /*
+ * Handle faults during window spill/fill.
+ */
RESUME_SPILLFILL_MMU
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl1_dmmu_miss_user: trap tar=%#lx"
+ , %g1, %g2, %g3, 7, 8, 9)
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
+
+ /*
+ * Reload the tag access register.
+ */
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
tl1_kstack
rdpr %pil, %o1
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl1_trap
mov T_DATA_MISS | T_KERNEL, %o0
END(tl1_dmmu_miss_user)
.macro tl1_dmmu_prot
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
- srlx %g1, TT_CTX_SHIFT, %g2
- brnz,pn %g2, tl1_dmmu_prot_user
- sllx %g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
+ /*
+ * Load the context and the virtual page number from the tag access
+ * register.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
- set TSB_KERNEL_VA_MASK, %g3
- and %g2, %g3, %g2
+ /*
+ * Extract the context from the contents of the tag access register.
+ * If its non-zero this is a fault on a user address, otherwise get
+ * the virtual page number.
+ */
+ sllx %g6, 64 - TAR_VPN_SHIFT, %g5
+ brnz,pn %g5, tl1_dmmu_prot_user
+ srlx %g6, TAR_VPN_SHIFT, %g6
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g4
- add %g2, %g4, %g2
+ /*
+ * Find the index into the kernel tsb.
+ */
+ set TSB_KERNEL_MASK, %g4
+ and %g6, %g4, %g5
/*
- * Load the tte, check that it's valid and that the tags match.
+ * Compute the tte address.
*/
- ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
- brgez,pn %g5, 1f
- cmp %g4, %g1
- bne,pn %xcc, 1f
+ ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
+ sllx %g5, TTE_SHIFT, %g5
+ add %g4, %g5, %g3
+
+ /*
+ * Load the tte.
+ */
+ ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+
+ /*
+ * Check that its valid and writeable and that the virtual page
+ * numbers match.
+ */
+ brgez,pn %g5, tl1_dmmu_prot_trap
andcc %g5, TD_SW, %g0
- bz,pn %xcc, 1f
+ bz,pn %xcc, tl1_dmmu_prot_trap
+ cmp %g4, %g6
+ bne,pn %xcc, tl1_dmmu_prot_trap
EMPTY
- TTE_SET_W(%g2, %g3, %g4)
-
/*
- * Delete the old TLB entry.
+ * Delete the old TLB entry and clear the sfsr.
*/
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
- stxa %g0, [%g1] ASI_DMMU_DEMAP
+ sllx %g6, TAR_VPN_SHIFT, %g6
+ or %g6, TLB_DEMAP_NUCLEUS, %g6
+ stxa %g0, [%g6] ASI_DMMU_DEMAP
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
+ membar #Sync
+
+ /*
+ * Set the hardware write bit.
+ */
+ TTE_SET_W(%g3, %g5, %g6)
/*
* Load the tte data into the TLB and retry the instruction.
*/
- or %g3, TD_W, %g3
- stxa %g3, [%g0] ASI_DTLB_DATA_IN_REG
+ or %g5, TD_W, %g5
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
-
-1: b %xcc, tl1_dmmu_prot_trap
- wrpr %g0, PSTATE_ALT, %pstate
.align 128
.endm
@@ -1503,9 +1859,13 @@ END(tl1_dmmu_prot_user)
ENTRY(tl1_dmmu_prot_trap)
/*
+ * Switch to alternate globals.
+ */
+ wrpr %g0, PSTATE_ALT, %pstate
+
+ /*
* Load the sfar, sfsr and tar. Clear the sfsr.
*/
- wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
diff --git a/sys/sparc64/sparc64/exception.s b/sys/sparc64/sparc64/exception.s
index 1694250f0dc6..e3ba426bc6df 100644
--- a/sys/sparc64/sparc64/exception.s
+++ b/sys/sparc64/sparc64/exception.s
@@ -546,67 +546,97 @@ END(tl0_sfsr_trap)
.align 32
.endm
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl0_immu_miss
+ b,a %xcc, tl0_immu_miss_traced
+ nop
+ .align 128
+ .endm
+
+ENTRY(tl0_immu_miss_traced)
+#else
.macro tl0_immu_miss
+#endif
/*
* Force kernel store order.
*/
wrpr %g0, PSTATE_MMU, %pstate
/*
- * Extract the 8KB pointer.
- */
- ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g6
- srax %g6, TTE_SHIFT, %g6
-
- /*
- * Compute the tte address in the primary user tsb.
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
*/
- and %g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
- sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
- add %g1, TSB_REG, %g1
+ wr %g0, ASI_IMMU, %asi
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
/*
- * Compute low bits of faulting va to check inside bucket loop.
+ * Extract the virtual page number from the contents of the tag
+ * access register.
*/
- and %g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
- sllx %g2, TD_VA_LOW_SHIFT, %g2
- or %g2, TD_EXEC, %g2
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Load the tte tag target.
+ * Compute the tte bucket address.
*/
- ldxa [%g0] ASI_IMMU_TAG_TARGET_REG, %g6
+ set (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
+ and %g1, %g3, %g1
+ sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
+ add %g1, TSB_REG, %g1
- /*
- * Load mask for tte data check.
- */
- mov TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
- sllx %g3, TD_VA_LOW_SHIFT, %g3
- or %g3, TD_EXEC, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl0_immu_miss: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ stx %g2, [%g4 + KTR_PARM4]
+ stx %g3, [%g4 + KTR_PARM5]
+ stx %g1, [%g4 + KTR_PARM6]
+9:
+#endif
/*
* Loop over the ttes in this bucket
*/
/*
- * Load the tte.
+ * Load the tte. Note that this instruction may fault, clobbering
+ * the context of the tag access register (at least), and the contents
+ * of %g3, %g4, %g5, and %g6. Luckily we can recover %g3, and we do
+ * not use %g4 or %g5 until this instruction completes successfully.
*/
1: ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g1 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g1 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g1 + TTE_VPN], %g4
+ ldx [%g1 + TTE_DATA], %g5
+#endif
+
/*
- * Compare the tag.
+ * Recover the virtual page number, which may have been clobbered.
*/
- cmp %g4, %g6
- bne,pn %xcc, 2f
- EMPTY
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Compare the data.
+ * Check that its valid and executable and that the virtual page
+ * numbers match.
*/
- xor %g2, %g5, %g4
brgez,pn %g5, 2f
- andcc %g3, %g4, %g0
- bnz,pn %xcc, 2f
+ andcc %g5, TD_EXEC, %g0
+ bz,pn %xcc, 2f
+ cmp %g3, %g4
+ bne,pn %xcc, 2f
EMPTY
/*
@@ -620,9 +650,18 @@ END(tl0_sfsr_trap)
bz,a,pn %xcc, tl0_immu_miss_set_ref
nop
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss: match tar=%#lx data=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+ stx %g5, [%g1 + KTR_PARM2]
+9:
+#endif
+
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG
retry
@@ -633,27 +672,57 @@ END(tl0_sfsr_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
+
+ /*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
+ membar #Sync
+
b,a %xcc, tl0_immu_miss_trap
+ nop
+#if KTR_COMPILE & KTR_TRAP
+END(tl0_immu_miss_traced)
+#else
.align 128
.endm
+#endif
ENTRY(tl0_immu_miss_set_ref)
/*
* Set the reference bit.
*/
- TTE_SET_REF(%g1, %g2, %g3)
+ TTE_SET_REF(%g1, %g4, %g5)
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss_set_ref: tp=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g1, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
* May have become invalid, in which case start over.
*/
- brgez,pn %g2, 2f
- or %g2, TD_REF, %g2
+ brgez,pn %g4, 1f
+ or %g4, TD_REF, %g4
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss_set_ref: return tar=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g2, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
- stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
-2: retry
+ stxa %g2, [%g0 + AA_IMMU_TAR] %asi
+ stxa %g4, [%g0] ASI_ITLB_DATA_IN_REG
+1: retry
END(tl0_immu_miss_set_ref)
ENTRY(tl0_immu_miss_trap)
@@ -663,74 +732,99 @@ ENTRY(tl0_immu_miss_trap)
wrpr %g0, PSTATE_ALT, %pstate
/*
- * Load the tar, sfar and sfsr aren't valid.
+ * Reload the tag access register.
*/
- wr %g0, ASI_IMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_immu_miss_trap: tar=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
/*
- * Save the mmu registers on the stack, and call common trap code.
+ * Save the tag access register, and call common trap code.
*/
tl0_split
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl0_trap
mov T_INSTRUCTION_MISS, %o0
END(tl0_immu_miss_trap)
.macro dmmu_miss_user
/*
- * Extract the 8KB pointer and convert to an index.
- */
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g6
- srax %g6, TTE_SHIFT, %g6
-
- /*
- * Compute the tte bucket address.
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
*/
- and %g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
- sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
- add %g1, TSB_REG, %g1
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/*
- * Compute low bits of faulting va to check inside bucket loop.
+ * Extract the virtual page number from the contents of the tag
+ * access register.
*/
- and %g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
- sllx %g2, TD_VA_LOW_SHIFT, %g2
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Preload the tte tag target.
+ * Compute the tte bucket address.
*/
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g6
+ set (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
+ and %g1, %g3, %g1
+ sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
+ add %g1, TSB_REG, %g1
- /*
- * Load mask for tte data check.
- */
- mov TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
- sllx %g3, TD_VA_LOW_SHIFT, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl0_dmmu_miss: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ stx %g2, [%g4 + KTR_PARM4]
+ stx %g3, [%g4 + KTR_PARM5]
+ stx %g1, [%g4 + KTR_PARM6]
+9:
+#endif
/*
* Loop over the ttes in this bucket
*/
/*
- * Load the tte.
+ * Load the tte. Note that this instruction may fault, clobbering
+ * the contents of the tag access register (at least), and the contents
+ * of %g3, %g4, %g5, and %g6. Luckily we can recover %g3, and we do
+ * not use %g4 or %g5 until this instruction completes successfully.
*/
1: ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g1 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g1 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g1 + TTE_VPN], %g4
+ ldx [%g1 + TTE_DATA], %g5
+#endif
+
/*
- * Compare the tag.
+ * Recover the virtual page number, which may have been clobbered.
*/
- cmp %g4, %g6
- bne,pn %xcc, 2f
- EMPTY
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Compare the data.
+ * Check that its valid and that the virtual page numbers match.
*/
- xor %g2, %g5, %g4
brgez,pn %g5, 2f
- andcc %g3, %g4, %g0
- bnz,pn %xcc, 2f
+ cmp %g3, %g4
+ bne,pn %xcc, 2f
EMPTY
/*
@@ -744,9 +838,18 @@ END(tl0_immu_miss_trap)
bz,a,pn %xcc, dmmu_miss_user_set_ref
nop
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss: match tar=%#lx data=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+ stx %g5, [%g1 + KTR_PARM2]
+9:
+#endif
+
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
@@ -757,28 +860,62 @@ END(tl0_immu_miss_trap)
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
bnz,a,pt %xcc, 1b
nop
+
+ /*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
.endm
ENTRY(dmmu_miss_user_set_ref)
/*
* Set the reference bit.
*/
- TTE_SET_REF(%g1, %g2, %g3)
+ TTE_SET_REF(%g1, %g4, %g5)
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss_set_ref: tp=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g1, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
* May have become invalid, in which case start over.
*/
- brgez,pn %g2, 2f
- or %g2, TD_REF, %g2
+ brgez,pn %g4, 1f
+ or %g4, TD_REF, %g4
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss_set_ref: return tar=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g2, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
- * Load the tte data into the tlb and retry the instruction.
+ * Load the tte tag and data into the tlb and retry the instruction.
*/
- stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
-2: retry
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ stxa %g4, [%g0] ASI_DTLB_DATA_IN_REG
+1: retry
END(dmmu_miss_user_set_ref)
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl0_dmmu_miss
+ b,a %xcc, tl0_dmmu_miss_traced
+ nop
+ .align 128
+ .endm
+
+ENTRY(tl0_dmmu_miss_traced)
+#else
.macro tl0_dmmu_miss
+#endif
/*
* Force kernel store order.
*/
@@ -790,11 +927,16 @@ END(dmmu_miss_user_set_ref)
dmmu_miss_user
/*
- * Not in primary tsb, call c code. Nothing else fits inline.
+ * Not in primary tsb, call c code. Not much else fits inline.
*/
- b,a tl0_dmmu_miss_trap
+ b,a %xcc, tl0_dmmu_miss_trap
+ nop
+#if KTR_COMPILE & KTR_TRAP
+END(tl0_dmmu_miss_traced)
+#else
.align 128
.endm
+#endif
ENTRY(tl0_dmmu_miss_trap)
/*
@@ -803,78 +945,107 @@ ENTRY(tl0_dmmu_miss_trap)
wrpr %g0, PSTATE_ALT, %pstate
/*
- * Load the tar, sfar and sfsr aren't valid.
+ * Reload the tag access register.
*/
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_miss_trap: tar=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
/*
- * Save the mmu registers on the stack and call common trap code.
+ * Save the tag access register and call common trap code.
*/
tl0_split
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl0_trap
mov T_DATA_MISS, %o0
END(tl0_dmmu_miss_trap)
.macro dmmu_prot_user
/*
- * Extract the 8KB pointer and convert to an index.
- */
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g6
- srax %g6, TTE_SHIFT, %g6
-
- /*
- * Compute the tte bucket address.
+ * Load the virtual page number and context from the tag access
+ * register. We ignore the context.
*/
- and %g6, (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
- sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
- add %g1, TSB_REG, %g1
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
/*
- * Compute low bits of faulting va to check inside bucket loop.
+ * Extract the virtual page number from the contents of the tag
+ * access register.
*/
- and %g6, TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g2
- sllx %g2, TD_VA_LOW_SHIFT, %g2
- or %g2, TD_SW, %g2
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Preload the tte tag target.
+ * Compute the tte bucket address.
*/
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g6
+ set (1 << TSB_BUCKET_ADDRESS_BITS) - 1, %g1
+ and %g1, %g3, %g1
+ sllx %g1, TSB_BUCKET_SHIFT + TTE_SHIFT, %g1
+ add %g1, TSB_REG, %g1
- /*
- * Load mask for tte data check.
- */
- mov TD_VA_LOW_MASK >> TD_VA_LOW_SHIFT, %g3
- sllx %g3, TD_VA_LOW_SHIFT, %g3
- or %g3, TD_SW, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl0_dmmu_prot: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ stx %g2, [%g4 + KTR_PARM4]
+ stx %g3, [%g4 + KTR_PARM5]
+ stx %g1, [%g4 + KTR_PARM6]
+9:
+#endif
/*
* Loop over the ttes in this bucket
*/
/*
- * Load the tte.
+ * Load the tte. Note that this instruction may fault, clobbering
+ * the context of the tag access register (at least), and the contents
+ * of %g3, %g4, %g5, and %g6. Luckily we can recover %g3, and we do
+ * not use %g4 or %g5 until this instruction completes successfully.
*/
1: ldda [%g1] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g1 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g1 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g1 + TTE_VPN], %g4
+ ldx [%g1 + TTE_DATA], %g5
+#endif
+
/*
- * Compare the tag.
+ * Recover the virtual page number, which may have been clobbered.
*/
- cmp %g4, %g6
- bne,pn %xcc, 2f
- EMPTY
+ srlx %g2, TAR_VPN_SHIFT, %g3
/*
- * Compare the data.
+ * Check that its valid and writable and that the virtual page
+ * numbers match.
*/
- xor %g2, %g5, %g4
brgez,pn %g5, 2f
- andcc %g3, %g4, %g0
- bnz,a,pn %xcc, 2f
+ andcc %g5, TD_SW, %g0
+ bz,pn %xcc, 2f
+ cmp %g3, %g4
+ bne,pn %xcc, 2f
nop
+ /*
+ * Set the hardware write bit.
+ */
b,a %xcc, dmmu_prot_set_w
nop
@@ -883,11 +1054,28 @@ END(tl0_dmmu_miss_trap)
*/
2: add %g1, 1 << TTE_SHIFT, %g1
andcc %g1, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
- bnz,a,pn %xcc, 1b
+ bnz,a,pt %xcc, 1b
+ nop
+
+ /*
+ * Put back the contents of the tag access register, in case we
+ * faulted.
+ */
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ membar #Sync
+ .endm
+
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl0_dmmu_prot
+ b,a %xcc, tl0_dmmu_prot_traced
nop
+ .align 128
.endm
+ENTRY(tl0_dmmu_prot_traced)
+#else
.macro tl0_dmmu_prot
+#endif
/*
* Force kernel store order.
*/
@@ -903,32 +1091,54 @@ END(tl0_dmmu_miss_trap)
*/
b,a %xcc, tl0_dmmu_prot_trap
nop
+#if KTR_COMPILE & KTR_TRAP
+END(tl0_dmmu_prot_traced)
+#else
.align 128
.endm
+#endif
ENTRY(dmmu_prot_set_w)
/*
* Set the hardware write bit in the tte.
*/
- TTE_SET_W(%g1, %g2, %g3)
+ TTE_SET_W(%g1, %g4, %g5)
/*
- * Delete the old TLB entry.
+ * Delete the old TLB entry and clear the sfsr.
*/
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
- srlx %g1, PAGE_SHIFT, %g1
- sllx %g1, PAGE_SHIFT, %g1
- stxa %g0, [%g1] ASI_DMMU_DEMAP
+ sllx %g3, TAR_VPN_SHIFT, %g3
+ stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
+ membar #Sync
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot_set_w: tp=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g1, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
- brgez,pn %g2, 1f
- or %g2, TD_W, %g2
+ /*
+ * May have become invalid in which case start over.
+ */
+ brgez,pn %g4, 1f
+ or %g4, TD_W, %g4
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot_set_w: return tar=%#lx data=%#lx"
+ , %g3, %g5, %g6, 7, 8, 9)
+ stx %g2, [%g3 + KTR_PARM1]
+ stx %g4, [%g3 + KTR_PARM2]
+9:
+#endif
/*
* Load the tte data into the tlb and retry the instruction.
*/
- stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
+ stxa %g2, [%g0 + AA_DMMU_TAR] %asi
+ stxa %g4, [%g0] ASI_DTLB_DATA_IN_REG
1: retry
END(dmmu_prot_set_w)
@@ -941,15 +1151,22 @@ ENTRY(tl0_dmmu_prot_trap)
/*
* Load the tar, sfar and sfsr.
*/
- wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl0_dmmu_prot_trap: tar=%#lx"
+ , %g1, %g3, %g4, 7, 8, 9)
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
+
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
membar #Sync
/*
- * Save the mmu registers on the stack and call common trap code.
+ * Save the mmu registers and call common trap code.
*/
tl0_split
mov %g2, %o3
@@ -1295,121 +1512,224 @@ ENTRY(intr_enqueue)
END(intr_enqueue)
.macro tl1_immu_miss
- ldxa [%g0] ASI_IMMU_TAG_TARGET_REG, %g1
- sllx %g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
+ /*
+ * Load the context and the virtual page number from the tag access
+ * register. We ignore the context.
+ */
+ wr %g0, ASI_IMMU, %asi
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g6
- set TSB_KERNEL_VA_MASK, %g3
- and %g2, %g3, %g2
+ /*
+ * Extract the virtual page number from the contents of the tag access
+ * register.
+ */
+ srlx %g6, TAR_VPN_SHIFT, %g6
- ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g4
- add %g2, %g4, %g2
+ /*
+ * Find the index into the kernel tsb.
+ */
+ set TSB_KERNEL_MASK, %g4
+ and %g6, %g4, %g3
/*
- * Load the tte, check that it's valid and that the tags match.
+ * Compute the tte address.
*/
- ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
- brgez,pn %g5, 2f
- cmp %g4, %g1
- bne,pn %xcc, 2f
+ ldxa [%g0 + AA_IMMU_TSB] %asi, %g4
+ sllx %g3, TTE_SHIFT, %g3
+ add %g3, %g4, %g3
+
+ /*
+ * Load the tte.
+ */
+ ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+
+ /*
+ * Check that its valid and executable and that the virtual page
+ * numbers match.
+ */
+ brgez,pn %g5, tl1_immu_miss_trap
andcc %g5, TD_EXEC, %g0
- bz,pn %xcc, 2f
+ bz,pn %xcc, tl1_immu_miss_trap
+ cmp %g4, %g6
+ bne,pn %xcc, tl1_immu_miss_trap
EMPTY
/*
- * Set the refence bit, if its currently clear.
+ * Set the reference bit if its currently clear.
*/
andcc %g5, TD_REF, %g0
- bnz,pt %xcc, 1f
- EMPTY
+ bnz,a,pt %xcc, 1f
+ nop
+ TTE_SET_REF(%g3, %g5, %g4)
- TTE_SET_REF(%g2, %g3, %g4)
+ /*
+ * May have become invalid, in which case start over.
+ */
+ brgez,pn %g5, 2f
+ or %g5, TD_REF, %g5
/*
* Load the tte data into the TLB and retry the instruction.
*/
1: stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG
- retry
+2: retry
+ .align 128
+ .endm
+ENTRY(tl1_immu_miss_trap)
/*
* Switch to alternate globals.
*/
-2: wrpr %g0, PSTATE_ALT, %pstate
+ wrpr %g0, PSTATE_ALT, %pstate
- wr %g0, ASI_IMMU, %asi
- ldxa [%g0 + AA_IMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
tl1_kstack
rdpr %pil, %o1
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl1_trap
mov T_INSTRUCTION_MISS | T_KERNEL, %o0
+END(tl1_immu_miss_trap)
+
+#if KTR_COMPILE & KTR_TRAP
+ .macro tl1_dmmu_miss
+ b,a %xcc, tl1_dmmu_miss_traced
+ nop
.align 128
.endm
+ENTRY(tl1_dmmu_miss_traced)
+#else
.macro tl1_dmmu_miss
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
- srlx %g1, TT_CTX_SHIFT, %g2
- brnz,pn %g2, tl1_dmmu_miss_user
- sllx %g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
-
- set TSB_KERNEL_VA_MASK, %g3
- and %g2, %g3, %g2
+#endif
+ /*
+ * Load the context and the virtual page number from the tag access
+ * register.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g4
- add %g2, %g4, %g2
+ /*
+ * Extract the context from the contents of the tag access register.
+ * If its non-zero this is a fault on a user address, otherwise get
+ * the virtual page number.
+ */
+ sllx %g6, 64 - TAR_VPN_SHIFT, %g5
+ brnz,pn %g5, tl1_dmmu_miss_user
+ srlx %g6, TAR_VPN_SHIFT, %g6
/*
- * Load the tte, check that it's valid and that the tags match.
+ * Find the index into the kernel tsb.
*/
- ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
- brgez,pn %g5, 2f
- cmp %g4, %g1
- bne,pn %xcc, 2f
- EMPTY
+ set TSB_KERNEL_MASK, %g4
+ and %g6, %g4, %g3
/*
- * Set the refence bit, if its currently clear.
+ * Compute the tte address.
*/
- andcc %g5, TD_REF, %g0
- bnz,pt %xcc, 1f
- EMPTY
+ ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
+ sllx %g3, TTE_SHIFT, %g3
+ add %g3, %g4, %g3
- TTE_SET_REF(%g2, %g3, %g4)
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP,
+ "tl1_dmmu_miss: tl=%#lx tpc=%#lx %#lx tar=%#lx vpn=%#lx tp=%#lx"
+ , %g4, %g5, %g6, 7, 8, 9)
+ rdpr %tl, %g5
+ stx %g5, [%g4 + KTR_PARM1]
+ rdpr %tpc, %g5
+ stx %g5, [%g4 + KTR_PARM2]
+ rdpr %tnpc, %g5
+ stx %g5, [%g4 + KTR_PARM3]
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
+ stx %g6, [%g4 + KTR_PARM4]
+ srlx %g6, TAR_VPN_SHIFT, %g6
+ stx %g6, [%g4 + KTR_PARM5]
+ stx %g3, [%g4 + KTR_PARM6]
+9:
+#endif
/*
- * Load the tte data into the TLB and retry the instruction.
+ * Load the tte.
*/
-1: stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
- retry
+ ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl1_dmmu_miss: vpn=%#lx data=%#lx"
+ , %g6, %g4, %g5, 7, 8, 9)
+ ldx [%g3 + TTE_VPN], %g4
+ stx %g4, [%g6 + KTR_PARM1]
+ ldx [%g3 + TTE_DATA], %g5
+ stx %g5, [%g6 + KTR_PARM2]
+9:
+ ldx [%g3 + TTE_VPN], %g4
+ ldx [%g3 + TTE_DATA], %g5
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
+ srlx %g6, TAR_VPN_SHIFT, %g6
+#endif
/*
- * Switch to alternate globals.
+ * Check that its valid and that the virtual page numbers match.
*/
-2: wrpr %g0, PSTATE_ALT, %pstate
+ brgez,pn %g5, tl1_dmmu_miss_trap
+ cmp %g4, %g6
+ bne,pn %xcc, tl1_dmmu_miss_trap
+ EMPTY
- b,a %xcc, tl1_dmmu_miss_trap
+ /*
+ * Set the reference bit if its currently clear.
+ */
+ andcc %g5, TD_REF, %g0
+ bnz,a,pt %xcc, 1f
nop
+ TTE_SET_REF(%g3, %g5, %g4)
+
+ /*
+ * May have become invalid, in which case start over.
+ */
+ brgez,pn %g5, 2f
+ or %g5, TD_REF, %g5
+
+ /*
+ * Load the tte data into the TLB and retry the instruction.
+ */
+1:
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl1_dmmu_miss: match data=%#lx"
+ , %g3, %g4, %g6, 7, 8, 9)
+ stx %g5, [%g3 + KTR_PARM1]
+9:
+#endif
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
+2: retry
+#if KTR_COMPILE & KTR_TRAP
+END(tl1_dmmu_miss_traced)
+#else
.align 128
.endm
+#endif
ENTRY(tl1_dmmu_miss_trap)
+ /*
+ * Switch to alternate globals.
+ */
+ wrpr %g0, PSTATE_ALT, %pstate
+
#if KTR_COMPILE & KTR_TRAP
CATR(KTR_TRAP, "tl1_dmmu_miss_trap: tar=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
- mov AA_DMMU_TAR, %g2
- ldxa [%g2] ASI_DMMU, %g2
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
stx %g2, [%g1 + KTR_PARM1]
9:
#endif
KSTACK_CHECK
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
tl1_kstack
rdpr %pil, %o1
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl1_trap
mov T_DATA_MISS | T_KERNEL, %o0
END(tl1_dmmu_miss_trap)
@@ -1425,61 +1745,97 @@ ENTRY(tl1_dmmu_miss_user)
*/
wrpr %g0, PSTATE_ALT, %pstate
- /* Handle faults during window spill/fill. */
+ /*
+ * Handle faults during window spill/fill.
+ */
RESUME_SPILLFILL_MMU
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g3
+#if KTR_COMPILE & KTR_TRAP
+ CATR(KTR_TRAP, "tl1_dmmu_miss_user: trap tar=%#lx"
+ , %g1, %g2, %g3, 7, 8, 9)
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
+
+ /*
+ * Reload the tag access register.
+ */
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
tl1_kstack
rdpr %pil, %o1
- mov %g3, %o3
+ mov %g2, %o3
b %xcc, tl1_trap
mov T_DATA_MISS | T_KERNEL, %o0
END(tl1_dmmu_miss_user)
.macro tl1_dmmu_prot
- ldxa [%g0] ASI_DMMU_TAG_TARGET_REG, %g1
- srlx %g1, TT_CTX_SHIFT, %g2
- brnz,pn %g2, tl1_dmmu_prot_user
- sllx %g1, TT_VA_SHIFT - (PAGE_SHIFT - TTE_SHIFT), %g2
+ /*
+ * Load the context and the virtual page number from the tag access
+ * register.
+ */
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + AA_DMMU_TAR] %asi, %g6
- set TSB_KERNEL_VA_MASK, %g3
- and %g2, %g3, %g2
+ /*
+ * Extract the context from the contents of the tag access register.
+ * If its non-zero this is a fault on a user address, otherwise get
+ * the virtual page number.
+ */
+ sllx %g6, 64 - TAR_VPN_SHIFT, %g5
+ brnz,pn %g5, tl1_dmmu_prot_user
+ srlx %g6, TAR_VPN_SHIFT, %g6
- ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g4
- add %g2, %g4, %g2
+ /*
+ * Find the index into the kernel tsb.
+ */
+ set TSB_KERNEL_MASK, %g4
+ and %g6, %g4, %g5
/*
- * Load the tte, check that it's valid and that the tags match.
+ * Compute the tte address.
*/
- ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
- brgez,pn %g5, 1f
- cmp %g4, %g1
- bne,pn %xcc, 1f
+ ldxa [%g0 + AA_DMMU_TSB] %asi, %g4
+ sllx %g5, TTE_SHIFT, %g5
+ add %g4, %g5, %g3
+
+ /*
+ * Load the tte.
+ */
+ ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 /*, %g5 */
+
+ /*
+ * Check that its valid and writeable and that the virtual page
+ * numbers match.
+ */
+ brgez,pn %g5, tl1_dmmu_prot_trap
andcc %g5, TD_SW, %g0
- bz,pn %xcc, 1f
+ bz,pn %xcc, tl1_dmmu_prot_trap
+ cmp %g4, %g6
+ bne,pn %xcc, tl1_dmmu_prot_trap
EMPTY
- TTE_SET_W(%g2, %g3, %g4)
-
/*
- * Delete the old TLB entry.
+ * Delete the old TLB entry and clear the sfsr.
*/
- wr %g0, ASI_DMMU, %asi
- ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
- stxa %g0, [%g1] ASI_DMMU_DEMAP
+ sllx %g6, TAR_VPN_SHIFT, %g6
+ or %g6, TLB_DEMAP_NUCLEUS, %g6
+ stxa %g0, [%g6] ASI_DMMU_DEMAP
stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
+ membar #Sync
+
+ /*
+ * Set the hardware write bit.
+ */
+ TTE_SET_W(%g3, %g5, %g6)
/*
* Load the tte data into the TLB and retry the instruction.
*/
- or %g3, TD_W, %g3
- stxa %g3, [%g0] ASI_DTLB_DATA_IN_REG
+ or %g5, TD_W, %g5
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
retry
-
-1: b %xcc, tl1_dmmu_prot_trap
- wrpr %g0, PSTATE_ALT, %pstate
.align 128
.endm
@@ -1503,9 +1859,13 @@ END(tl1_dmmu_prot_user)
ENTRY(tl1_dmmu_prot_trap)
/*
+ * Switch to alternate globals.
+ */
+ wrpr %g0, PSTATE_ALT, %pstate
+
+ /*
* Load the sfar, sfsr and tar. Clear the sfsr.
*/
- wr %g0, ASI_DMMU, %asi
ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
diff --git a/sys/sparc64/sparc64/genassym.c b/sys/sparc64/sparc64/genassym.c
index b38077095ff2..6044d6ad9a8b 100644
--- a/sys/sparc64/sparc64/genassym.c
+++ b/sys/sparc64/sparc64/genassym.c
@@ -83,7 +83,8 @@ ASSYM(FPRS_FEF, FPRS_FEF);
ASSYM(LSU_VW, LSU_VW);
-ASSYM(TLB_DAR_TSB_USER_PRIMARY, TLB_DAR_SLOT(TLB_SLOT_TSB_USER_PRIMARY));
+ASSYM(TAR_VPN_SHIFT, TAR_VPN_SHIFT);
+
ASSYM(TLB_DEMAP_NUCLEUS, TLB_DEMAP_NUCLEUS);
ASSYM(TLB_DEMAP_SECONDARY, TLB_DEMAP_SECONDARY);
ASSYM(TLB_DEMAP_CONTEXT, TLB_DEMAP_CONTEXT);
@@ -94,7 +95,7 @@ ASSYM(TLB_ITLB, TLB_ITLB);
ASSYM(TSB_BUCKET_ADDRESS_BITS, TSB_BUCKET_ADDRESS_BITS);
ASSYM(TSB_BUCKET_SHIFT, TSB_BUCKET_SHIFT);
-ASSYM(TSB_KERNEL_VA_MASK, TSB_KERNEL_VA_MASK);
+ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_MASK, PAGE_MASK);
@@ -127,22 +128,16 @@ ASSYM(KTR_PARM4, offsetof(struct ktr_entry, ktr_parm4));
ASSYM(KTR_PARM5, offsetof(struct ktr_entry, ktr_parm5));
ASSYM(KTR_PARM6, offsetof(struct ktr_entry, ktr_parm6));
+ASSYM(TTE_VPN, offsetof(struct tte, tte_vpn));
ASSYM(TTE_DATA, offsetof(struct tte, tte_data));
-ASSYM(TTE_TAG, offsetof(struct tte, tte_tag));
ASSYM(TTE_SHIFT, TTE_SHIFT);
-ASSYM(TD_VA_LOW_MASK, TD_VA_LOW_MASK);
-ASSYM(TD_VA_LOW_SHIFT, TD_VA_LOW_SHIFT);
ASSYM(TD_EXEC, TD_EXEC);
ASSYM(TD_REF, TD_REF);
ASSYM(TD_SW, TD_SW);
ASSYM(TD_L, TD_L);
ASSYM(TD_W, TD_W);
-ASSYM(TT_VA_MASK, TT_VA_MASK);
-ASSYM(TT_VA_SHIFT, TT_VA_SHIFT);
-ASSYM(TT_CTX_SHIFT, TT_CTX_SHIFT);
-
ASSYM(V_INTR, offsetof(struct vmmeter, v_intr));
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
diff --git a/sys/sparc64/sparc64/pmap.c b/sys/sparc64/sparc64/pmap.c
index e59304ba6c64..d2802dcd7ec6 100644
--- a/sys/sparc64/sparc64/pmap.c
+++ b/sys/sparc64/sparc64/pmap.c
@@ -384,7 +384,7 @@ pmap_bootstrap(vm_offset_t ekva)
off += PAGE_SIZE) {
va = translations[i].om_start + off;
tte.tte_data = translations[i].om_tte + off;
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
+ tte.tte_vpn = TV_VPN(va);
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP,
"mapping: va=%#lx tp=%p tte=%#lx pa=%#lx",
@@ -432,9 +432,9 @@ pmap_map_tsb(void)
for (i = 0; i < KVA_PAGES; i++) {
va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M;
pa = tsb_kernel_phys + i * PAGE_SIZE_4M;
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
- tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) |
- TD_L | TD_CP | TD_CV | TD_P | TD_W;
+ tte.tte_vpn = TV_VPN(va);
+ tte.tte_data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP |
+ TD_CV | TD_P | TD_W;
tlb_store_slot(TLB_DTLB, va, TLB_CTX_KERNEL, tte,
TLB_SLOT_TSB_KERNEL_MIN + i);
}
@@ -633,9 +633,9 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
struct tte tte;
struct tte *tp;
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
- tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
- TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
+ tte.tte_vpn = TV_VPN(va);
+ tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP |
+ TD_CV | TD_P | TD_W;
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
va, pa, tp, tp->tte_data);
@@ -657,9 +657,8 @@ pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags)
struct tte tte;
struct tte *tp;
- tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
- tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
- TD_REF | TD_P | flags;
+ tte.tte_vpn = TV_VPN(va);
+ tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
tp = tsb_kvtotte(va);
CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
va, pa, tp, tp->tte_data);
@@ -691,7 +690,7 @@ pmap_kremove(vm_offset_t va)
CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
tp->tte_data);
atomic_clear_long(&tp->tte_data, TD_V);
- tp->tte_tag = 0;
+ tp->tte_vpn = 0;
tp->tte_data = 0;
tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
}
@@ -1224,7 +1223,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, vm_offset_t v
pmap_cache_remove(m, va);
}
atomic_clear_long(&tp->tte_data, TD_V);
- tp->tte_tag = 0;
+ tp->tte_vpn = 0;
tp->tte_data = 0;
if (PMAP_REMOVE_DONE(pm))
return (0);
@@ -1339,8 +1338,8 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
"pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
pm->pm_context, m, va, pa, prot, wired);
- tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
- tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_CP;
+ tte.tte_vpn = TV_VPN(va);
+ tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP;
/*
* If there is an existing mapping, and the physical address has not
@@ -1385,7 +1384,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_track_modified(pm, va))
vm_page_dirty(m);
}
- tlb_tte_demap(otte, va);
+ tlb_tte_demap(otte, pm->pm_context);
}
} else {
CTR0(KTR_PMAP, "pmap_enter: replace");
@@ -1416,7 +1415,7 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_cache_enter(m, va) != 0)
tte.tte_data |= TD_CV;
}
- tlb_tte_demap(otte, va);
+ tlb_tte_demap(otte, pm->pm_context);
}
} else {
CTR0(KTR_PMAP, "pmap_enter: new");
@@ -1511,8 +1510,9 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
vm_page_t m;
if (tsb_tte_lookup(dst_pmap, va) == NULL) {
- tte.tte_data = tp->tte_data & ~(TD_PV | TD_REF | TD_CV | TD_W);
- tte.tte_tag = TT_CTX(dst_pmap->pm_context) | TT_VA(va);
+ tte.tte_data = tp->tte_data &
+ ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
+ tte.tte_vpn = TV_VPN(va);
m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
if ((tp->tte_data & TD_PV) != 0) {
KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
@@ -1634,7 +1634,7 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
continue;
atomic_clear_long(&tp->tte_data, TD_V);
- tp->tte_tag = 0;
+ tp->tte_vpn = 0;
tp->tte_data = 0;
m = pv->pv_m;
diff --git a/sys/sparc64/sparc64/pv.c b/sys/sparc64/sparc64/pv.c
index 4548b88cc30b..5dd92c99da6d 100644
--- a/sys/sparc64/sparc64/pv.c
+++ b/sys/sparc64/sparc64/pv.c
@@ -157,7 +157,7 @@ pv_bit_clear(vm_page_t m, u_long bits)
vm_page_dirty(m);
}
atomic_clear_long(&tp->tte_data, bits);
- tlb_tte_demap(*tp, pv->pv_va);
+ tlb_tte_demap(*tp, pv->pv_pmap->pm_context);
}
}
}
@@ -250,8 +250,8 @@ pv_remove_all(vm_page_t m)
vm_page_dirty(m);
}
atomic_clear_long(&tp->tte_data, TD_V);
- tlb_tte_demap(*tp, pv->pv_va);
- tp->tte_tag = 0;
+ tlb_tte_demap(*tp, pv->pv_pmap->pm_context);
+ tp->tte_vpn = 0;
tp->tte_data = 0;
pv->pv_pmap->pm_stats.resident_count--;
m->md.pv_list_count--;
diff --git a/sys/sparc64/sparc64/tsb.c b/sys/sparc64/sparc64/tsb.c
index 4504b10afa3d..745861b02316 100644
--- a/sys/sparc64/sparc64/tsb.c
+++ b/sys/sparc64/sparc64/tsb.c
@@ -178,7 +178,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
tp = rtp;
if ((tp->tte_data & TD_V) != 0) {
TSB_STATS_INC(tsb_nrepl);
- ova = tte_get_va(*tp);
+ ova = TV_GET_VA(tp->tte_vpn);
if ((tp->tte_data & TD_PV) != 0) {
om = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data));
if ((tp->tte_data & TD_W) != 0 &&
@@ -189,7 +189,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, struct tte tte)
pmap_cache_remove(om, ova);
pv_remove(pm, om, ova);
}
- tlb_tte_demap(*tp, ova);
+ tlb_tte_demap(*tp, pm->pm_context);
}
*tp = tte;
@@ -218,7 +218,7 @@ tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end,
for (i = 0; i < TSB_SIZE; i++) {
tp = &pm1->pm_tsb[i];
if ((tp->tte_data & TD_V) != 0) {
- va = tte_get_va(*tp);
+ va = TV_GET_VA(tp->tte_vpn);
if (va >= start && va < end) {
if (!callback(pm1, pm2, tp, va))
break;