summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJake Burkholder <jake@FreeBSD.org>2003-03-19 06:55:37 +0000
committerJake Burkholder <jake@FreeBSD.org>2003-03-19 06:55:37 +0000
commit00aabd830da70e9235d302cdcb72c31b7dc3c43a (patch)
tree144cd22224c382c87b331ba393ed9686c632a48c
parent2b8cd5a6a2e094d03fb1de474486c6cef800eef8 (diff)
Notes
-rw-r--r--sys/conf/files.sparc642
-rw-r--r--sys/sparc64/include/cache.h76
-rw-r--r--sys/sparc64/include/pmap.h8
-rw-r--r--sys/sparc64/include/smp.h20
-rw-r--r--sys/sparc64/sparc64/cache.c385
-rw-r--r--sys/sparc64/sparc64/cheetah.c71
-rw-r--r--sys/sparc64/sparc64/mp_exception.S45
-rw-r--r--sys/sparc64/sparc64/spitfire.c109
8 files changed, 260 insertions, 456 deletions
diff --git a/sys/conf/files.sparc64 b/sys/conf/files.sparc64
index 8f8ff7480e44..93887df20f0f 100644
--- a/sys/conf/files.sparc64
+++ b/sys/conf/files.sparc64
@@ -36,6 +36,7 @@ sparc64/sbus/sbus.c optional sbus
sparc64/sparc64/autoconf.c standard
sparc64/sparc64/bus_machdep.c standard
sparc64/sparc64/cache.c standard
+sparc64/sparc64/cheetah.c standard
sparc64/sparc64/clock.c standard
sparc64/sparc64/counter.c standard
sparc64/sparc64/critical.c standard
@@ -68,6 +69,7 @@ sparc64/sparc64/pmap.c standard
sparc64/sparc64/prof_machdep.c optional profiling-routine
sparc64/sparc64/rwindow.c standard
sparc64/sparc64/sparcbus_if.m standard
+sparc64/sparc64/spitfire.c standard
sparc64/sparc64/support.S standard
sparc64/sparc64/sys_machdep.c standard
sparc64/sparc64/swtch.S standard
diff --git a/sys/sparc64/include/cache.h b/sys/sparc64/include/cache.h
index 1ebffc1ffe9b..68b88c748d8e 100644
--- a/sys/sparc64/include/cache.h
+++ b/sys/sparc64/include/cache.h
@@ -47,65 +47,14 @@
#ifndef _MACHINE_CACHE_H_
#define _MACHINE_CACHE_H_
-#include <vm/vm.h>
-#include <vm/pmap.h>
-
#include <dev/ofw/openfirm.h>
-/*
- * Cache diagnostic access definitions.
- */
-/* ASI offsets for I$ diagnostic access */
-#define ICDA_SET_SHIFT 13
-#define ICDA_SET_MASK (1UL << ICDA_SET_SHIFT)
-#define ICDA_SET(a) (((a) << ICDA_SET_SHIFT) & ICDA_SET_MASK)
-/* I$ tag/valid format */
-#define ICDT_TAG_SHIFT 8
-#define ICDT_TAG_BITS 28
-#define ICDT_TAG_MASK (((1UL << ICDT_TAG_BITS) - 1) << ICDT_TAG_SHIFT)
-#define ICDT_TAG(x) (((x) & ICDT_TAG_MASK) >> ICDT_TAG_SHIFT)
-#define ICDT_VALID (1UL << 36)
-/* D$ tag/valid format */
-#define DCDT_TAG_SHIFT 2
-#define DCDT_TAG_BITS 28
-#define DCDT_TAG_MASK (((1UL << DCDT_TAG_BITS) - 1) << DCDT_TAG_SHIFT)
-#define DCDT_TAG(x) (((x) & DCDT_TAG_MASK) >> DCDT_TAG_SHIFT)
-#define DCDT_VALID_BITS 2
-#define DCDT_VALID_MASK ((1UL << DCDT_VALID_BITS) - 1)
-/* E$ ASI_ECACHE_W/ASI_ECACHE_R address flags */
-#define ECDA_DATA (1UL << 39)
-#define ECDA_TAG (1UL << 40)
-/* E$ tag/state/parity format */
-#define ECDT_TAG_BITS 13
-#define ECDT_TAG_SIZE (1UL << ECDT_TAG_BITS)
-#define ECDT_TAG_MASK (ECDT_TAG_SIZE - 1)
-
-/*
- * Do two virtual addresses (at which the same page is mapped) form and illegal
- * alias in D$? XXX: should use cache.dc_size here.
- */
-#define DCACHE_BOUNDARY 0x4000
-#define DCACHE_BMASK (DCACHE_BOUNDARY - 1)
-#define CACHE_BADALIAS(v1, v2) \
- (((v1) & DCACHE_BMASK) != ((v2) & DCACHE_BMASK))
-
-/*
- * Routines for dealing with the cache.
- */
-void cache_init(phandle_t); /* turn it on */
-void icache_flush(vm_offset_t, vm_offset_t);
-void icache_inval_phys(vm_offset_t, vm_offset_t);
-void dcache_flush(vm_offset_t, vm_offset_t);
-void dcache_inval(pmap_t, vm_offset_t, vm_offset_t);
-void dcache_inval_phys(vm_offset_t, vm_offset_t);
-void dcache_blast(void);
-void ecache_flush(vm_offset_t, vm_offset_t);
-#if 0
-void ecache_inval_phys(vm_offset_t, vm_offset_t);
-#endif
-
-void dcache_page_inval(vm_offset_t pa);
-void icache_page_inval(vm_offset_t pa);
+#define DCACHE_COLOR_BITS (1)
+#define DCACHE_COLORS (1 << DCACHE_COLOR_BITS)
+#define DCACHE_COLOR_MASK (DCACHE_COLORS - 1)
+#define DCACHE_COLOR(va) (((va) >> PAGE_SHIFT) & DCACHE_COLOR_MASK)
+#define DCACHE_OTHER_COLOR(color) \
+ ((color) ^ DCACHE_COLOR_BITS)
#define DC_TAG_SHIFT 2
#define DC_VALID_SHIFT 0
@@ -146,6 +95,19 @@ struct cacheinfo {
u_int ec_l2linesize;
};
+typedef void dcache_page_inval_t(vm_offset_t pa);
+typedef void icache_page_inval_t(vm_offset_t pa);
+
+void cache_init(phandle_t node);
+
+void cheetah_dcache_page_inval(vm_offset_t pa);
+void cheetah_icache_page_inval(vm_offset_t pa);
+void spitfire_dcache_page_inval(vm_offset_t pa);
+void spitfire_icache_page_inval(vm_offset_t pa);
+
+extern dcache_page_inval_t *dcache_page_inval;
+extern icache_page_inval_t *icache_page_inval;
+
extern struct cacheinfo cache;
#endif /* !_MACHINE_CACHE_H_ */
diff --git a/sys/sparc64/include/pmap.h b/sys/sparc64/include/pmap.h
index b0325f9db4cf..a5fddf1dfa8c 100644
--- a/sys/sparc64/include/pmap.h
+++ b/sys/sparc64/include/pmap.h
@@ -44,15 +44,9 @@
#define _MACHINE_PMAP_H_
#include <sys/queue.h>
+#include <machine/cache.h>
#include <machine/tte.h>
-#define DCACHE_COLOR_BITS (1)
-#define DCACHE_COLORS (1 << DCACHE_COLOR_BITS)
-#define DCACHE_COLOR_MASK (DCACHE_COLORS - 1)
-#define DCACHE_COLOR(va) (((va) >> PAGE_SHIFT) & DCACHE_COLOR_MASK)
-#define DCACHE_OTHER_COLOR(color) \
- ((color) ^ DCACHE_COLOR_BITS)
-
#define PMAP_CONTEXT_MAX 8192
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.tte_list))
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index 270244b0865c..5dbfc76b5b30 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -96,8 +96,10 @@ extern u_long mp_tramp_func;
extern void mp_startup(void);
-extern char tl_ipi_dcache_page_inval[];
-extern char tl_ipi_icache_page_inval[];
+extern char tl_ipi_cheetah_dcache_page_inval[];
+extern char tl_ipi_spitfire_dcache_page_inval[];
+extern char tl_ipi_spitfire_icache_page_inval[];
+
extern char tl_ipi_level[];
extern char tl_ipi_tlb_context_demap[];
extern char tl_ipi_tlb_page_demap[];
@@ -108,7 +110,7 @@ extern char tl_ipi_tlb_range_demap[];
#if defined(_MACHINE_PMAP_H_) && defined(_SYS_MUTEX_H_)
static __inline void *
-ipi_dcache_page_inval(vm_offset_t pa)
+ipi_dcache_page_inval(void *func, vm_offset_t pa)
{
struct ipi_cache_args *ica;
@@ -118,13 +120,12 @@ ipi_dcache_page_inval(vm_offset_t pa)
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
- cpu_ipi_selected(PCPU_GET(other_cpus), 0,
- (u_long)tl_ipi_dcache_page_inval, (u_long)ica);
+ cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
return (&ica->ica_mask);
}
static __inline void *
-ipi_icache_page_inval(vm_offset_t pa)
+ipi_icache_page_inval(void *func, vm_offset_t pa)
{
struct ipi_cache_args *ica;
@@ -134,8 +135,7 @@ ipi_icache_page_inval(vm_offset_t pa)
mtx_lock_spin(&ipi_mtx);
ica->ica_mask = all_cpus;
ica->ica_pa = pa;
- cpu_ipi_selected(PCPU_GET(other_cpus), 0,
- (u_long)tl_ipi_icache_page_inval, (u_long)ica);
+ cpu_ipi_selected(PCPU_GET(other_cpus), 0, (u_long)func, (u_long)ica);
return (&ica->ica_mask);
}
@@ -215,13 +215,13 @@ ipi_wait(void *cookie)
#else
static __inline void *
-ipi_dcache_page_inval(vm_offset_t pa)
+ipi_dcache_page_inval(void *func, vm_offset_t pa)
{
return (NULL);
}
static __inline void *
-ipi_icache_page_inval(vm_offset_t pa)
+ipi_icache_page_inval(void *func, vm_offset_t pa)
{
return (NULL);
}
diff --git a/sys/sparc64/sparc64/cache.c b/sys/sparc64/sparc64/cache.c
index 4ef6bb30c05f..a3e1a1c953c1 100644
--- a/sys/sparc64/sparc64/cache.c
+++ b/sys/sparc64/sparc64/cache.c
@@ -75,120 +75,18 @@
* $FreeBSD$
*/
-/*
- * Cache routines.
- *
- * UltraSPARCs have a virtually indexed, physically tagged (VIPT) level 1 data
- * cache (D$) and physically indexed, physically tagged (PIPT) level 1
- * instruction (I$) and Level 2 (E$) caches.
- * D$ is directly mapped, I$ is pseudo 2-way associative. The Level 2 cache (E$)
- * is documented to be directly mapped on the UltraSPARC IIi, but there are
- * apparently models (using the IIe version) that have a 4-way associative E$.
- *
- * D$ uses a write-through model, while E$ uses write-back and is
- * write-allocating. The lines present in D$ are forced to be a subset of those
- * in E$.
- * This means that lines that are present in D$ always have an identical
- * corresponding (sub-) line in E$.
- *
- * The term "main memory" is used in the following to refer to the non-cache
- * memory as well as to memory-mapped device i/o space.
- *
- * There are 3 documented ways to flush the D$ and E$ caches:
- * - displacement flushing (a sequence of loads of addresses that alias to
- * to-be-flushed ones in the caches). This only works for directly mapped
- * caches, and is recommended to flush D$ and E$ in the IIi manual. It is not
- * used to flush E$ because of the aforementioned models that have a
- * multiple-associative E$. Displacement flushing invalidates the cache
- * entries and writes modified lines back to main memory.
- * - diagnostic acceses can be used to invalidate cache pages. All lines
- * are discarded, which means that changes in D$/E$ that have not been
- * committed to main memory are lost.
- * - block-commit stores. Those use the block transfer ASIs to load a
- * 64-byte block to a set of FP registers and store them back using a
- * special ASI that will cause the data to be immediately committed to main
- * memory. This method has the same properties as the first method, but
- * (hopefully) works regardless of the associativity of the caches. It is
- * expected to be slow.
- *
- * I$ can be handled using the flush instruction.
- *
- * Some usage guidelines:
- *
- * The inval functions are variants of the flush ones that discard modified
- * cache lines.
- * PCI DMA transactions are cache-coherent and do not require flushing
- * before DMA reads or after DMA writes. It is unclear from the manual
- * how far this applies to UPA transactions.
- *
- * icache_flush(): needed before code that has been written to memory is
- * executed, because I$ is not necessarily consistent with D$, E$, or
- * main memory. An exception is that I$ snoops DMA transfers, so no
- * flush is required after to-be-executed data has been fetched this way.
- * icache_inval_phys(): has roughly same effect as icache_flush() since there
- * are no writes to I$.
- *
- * dcache_flush(): required when a page mapping is changed from cacheable to
- * noncacheable, or to resolve illegal aliases. Both cases should happen
- * seldom. Mapping address changes do not require this, since D$ is VIPT.
- * dcache_inval(): has roughly same effect as dcache_flush() since D$ is
- * write-through.
- * dcache_blast(): discards all lines in D$.
- *
- * ecache_flush(): needed to commit modified lines to main memory, and to make
- * sure that no stale data is used when the main memory has changed without
- * the cache controller noticing. This is e.g. needed for device i/o space.
- * It is usually better to use a non-cacheable mapping in this case.
- * ecache_flush() is guaranteed to also flush the relevant addresses out of
- * D$.
- * ecache_inval_phys(): like ecache_flush(), but invalidates a physical range
- * in the cache. This function is usually dangerous and should not be used.
- *
- * All operations have a line size granularity!
- *
- * All flush methods tend to be expensive, so unnecessary flushes should be
- * avoided.
- */
-
-#include "opt_pmap.h"
-
#include <sys/param.h>
-#include <sys/linker_set.h>
-#include <sys/proc.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/smp.h>
-#include <sys/sysctl.h>
#include <sys/systm.h>
-#include <vm/vm.h>
-#include <vm/pmap.h>
+#include <dev/ofw/openfirm.h>
#include <machine/cache.h>
-#include <machine/cpufunc.h>
-#include <machine/fp.h>
-#include <machine/fsr.h>
-#include <machine/pcb.h>
-#include <machine/pmap.h>
-#include <machine/smp.h>
-#include <machine/tte.h>
#include <machine/ver.h>
-#include <machine/vmparam.h>
struct cacheinfo cache;
-PMAP_STATS_VAR(dcache_npage_inval);
-PMAP_STATS_VAR(dcache_npage_inval_match);
-PMAP_STATS_VAR(icache_npage_inval);
-PMAP_STATS_VAR(icache_npage_inval_match);
-
-/* Read to %g0, needed for E$ access. */
-#define CDIAG_RDG0(asi, addr) \
- __asm __volatile("ldxa [%0] %1, %%g0" : : "r" (addr), "I" (asi))
-/* Sigh. I$ diagnostic registers want ldda. */
-#define ICDIAG_RD(asi, addr, r) \
- __asm __volatile("ldda [%1] %2, %%o4; mov %%o5, %0" : "=r" (r) :\
- "r" (addr), "I" (asi) : "%o4", "%o5");
+dcache_page_inval_t *dcache_page_inval;
+icache_page_inval_t *icache_page_inval;
#define OF_GET(h, n, v) OF_getprop((h), (n), &(v), sizeof(v))
@@ -224,277 +122,12 @@ cache_init(phandle_t node)
cache.ec_l2set = ffs(set) - 1;
if ((set & ~(1UL << cache.ec_l2set)) != 0)
panic("cache_init: E$ set size not a power of 2");
- cache.c_enabled = 1; /* enable cache flushing */
-}
-
-void
-dcache_page_inval(vm_offset_t pa)
-{
- u_long target;
- void *cookie;
- u_long addr;
- u_long tag;
-
- KASSERT((pa & PAGE_MASK) == 0,
- ("dcache_page_inval: pa not page aligned"));
-
- if (!cache.c_enabled)
- return;
- PMAP_STATS_INC(dcache_npage_inval);
- target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
- cookie = ipi_dcache_page_inval(pa);
- for (addr = 0; addr < cache.dc_size; addr += cache.dc_linesize) {
- tag = ldxa(addr, ASI_DCACHE_TAG);
- if (((tag >> DC_VALID_SHIFT) & DC_VALID_MASK) == 0)
- continue;
- tag &= DC_TAG_MASK << DC_TAG_SHIFT;
- if (tag == target) {
- PMAP_STATS_INC(dcache_npage_inval_match);
- stxa_sync(addr, ASI_DCACHE_TAG, tag);
- }
- }
- ipi_wait(cookie);
-}
-
-void
-icache_page_inval(vm_offset_t pa)
-{
- register u_long tag __asm("%g1");
- u_long target;
- void *cookie;
- u_long addr;
-
- KASSERT((pa & PAGE_MASK) == 0,
- ("icache_page_inval: pa not page aligned"));
-
- if (!cache.c_enabled)
- return;
- PMAP_STATS_INC(icache_npage_inval);
- target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
- cookie = ipi_icache_page_inval(pa);
- for (addr = 0; addr < cache.ic_size; addr += cache.ic_linesize) {
- __asm __volatile("ldda [%1] %2, %%g0" /*, %g1 */
- : "=r" (tag) : "r" (addr), "n" (ASI_ICACHE_TAG));
- if (((tag >> IC_VALID_SHIFT) & IC_VALID_MASK) == 0)
- continue;
- tag &= IC_TAG_MASK << IC_TAG_SHIFT;
- if (tag == target) {
- PMAP_STATS_INC(icache_npage_inval_match);
- stxa_sync(addr, ASI_ICACHE_TAG, tag);
- }
- }
- ipi_wait(cookie);
-}
-
-
-/* Flush a range of addresses from I$ using the flush instruction. */
-void
-icache_flush(vm_offset_t start, vm_offset_t end)
-{
- char *p, *ep;
- int ls;
-
- if (!cache.c_enabled)
- return;
-
- ls = cache.ic_linesize;
- ep = (char *)ulmin(end, start + cache.ic_size);
- for (p = (char *)start; p < ep; p += ls)
- flush(p);
-}
-
-/*
- * Invalidate an I$ physical range using diagnostic accesses.
- * NOTE: there is a race between checking the tag and invalidating it. It
- * cannot be closed by disabling interrupts, since the fetch for the next
- * instruction may be in that line, so we don't even bother.
- * Since blasting a line does not discard data, this has no ill effect except
- * a minor slowdown.
- */
-void
-icache_inval_phys(vm_offset_t start, vm_offset_t end)
-{
- vm_offset_t addr, ica;
- u_long tag;
- u_long j;
-
- if (!cache.c_enabled)
- return;
-
- for (addr = start & ~(cache.ic_linesize - 1); addr <= end;
- addr += cache.ic_linesize) {
- for (j = 0; j < 2; j++) {
- ica = (addr & (cache.ic_set - 1)) | ICDA_SET(j);
- ICDIAG_RD(ASI_ICACHE_TAG, ica, tag);
- if ((tag & ICDT_VALID) == 0 ||
- ICDT_TAG(tag) != addr >> cache.ic_l2set)
- continue;
- stxa_sync(ica, ASI_ICACHE_TAG, 0);
- }
- }
-}
-
-/*
- * Flush a range of addresses from D$ using displacement flushes. This does
- * not necessarily flush E$, because we do not take care of flushing the
- * correct physical colors and E$ may not be directly mapped.
- */
-void
-dcache_flush(vm_offset_t start, vm_offset_t end)
-{
- int j;
- vm_offset_t baseoff;
- u_long i, mask;
- char *kp;
-
- if (!cache.c_enabled)
- return;
-
- mask = cache.dc_size - 1;
- /* No need to flush lines more than once. */
- baseoff = start & mask;
- /*
- * Use a locked page for flushing. D$ should be smaller than 4M, which
- * is somewhat likely...
- */
- kp = (char *)KERNBASE;
- j = 0;
- for (i = 0; i <= ulmin((end - start), cache.dc_size);
- i += cache.dc_linesize)
- j += kp[(baseoff + i) & mask];
-}
-
-/*
- * Invalidate a D$ range using diagnostic accesses.
- * This has the same (harmless) races as icache_blast().
- */
-void
-dcache_inval(pmap_t pmap, vm_offset_t start, vm_offset_t end)
-{
- vm_offset_t va, pa, offs, dca;
- u_long tag;
-
- if (!cache.c_enabled)
- return;
- for (va = start & ~(cache.dc_linesize - 1); va <= end;
- va = (va + PAGE_SIZE_MIN) & ~PAGE_MASK_MIN) {
- if ((pa = pmap_extract(pmap, va)) == 0)
- continue;
- for (offs = start & PAGE_MASK_MIN;
- offs < ulmin(PAGE_SIZE_MIN, end - va + 1);
- offs += cache.dc_linesize) {
- dca = (va + offs) & (cache.dc_size - 1);
- tag = ldxa(dca, ASI_DCACHE_TAG);
- if (DCDT_TAG(tag) != (pa + offs) >> PAGE_SHIFT_MIN)
- continue;
- stxa_sync(dca, ASI_DCACHE_TAG, 0);
- }
- }
-}
-
-/*
- * Invalidate a physical D$ range using diagnostic accesses.
- * This has the same (harmless) races as icache_blast().
- */
-void
-dcache_inval_phys(vm_offset_t start, vm_offset_t end)
-{
- vm_offset_t pa, dca;
- u_long tag, color, ncolors;
-
- if (!cache.c_enabled)
- return;
- ncolors = 1 << (cache.dc_l2size - PAGE_SHIFT_MIN);
- for (pa = start & ~(cache.dc_linesize - 1); pa <= end;
- pa += cache.dc_linesize) {
- for (color = 0; color < ncolors; color++) {
- dca = (color << PAGE_SHIFT_MIN) | (pa & PAGE_MASK_MIN);
- tag = ldxa(dca, ASI_DCACHE_TAG);
- if (DCDT_TAG(tag) == pa >> PAGE_SHIFT_MIN) {
- stxa_sync(dca, ASI_DCACHE_TAG, 0);
- break;
- }
- }
- }
-}
-
-/* Discard all lines in D$. */
-void
-dcache_blast()
-{
- vm_offset_t dca;
-
- if (!cache.c_enabled)
- return;
- for (dca = 0; dca < cache.dc_size; dca += cache.dc_linesize)
- stxa_sync(dca, ASI_DCACHE_TAG, 0);
-}
-
-/* Flush an E$ physical range using block commit stores. */
-void
-ecache_flush(vm_offset_t start, vm_offset_t end)
-{
- vm_offset_t addr;
-
- if (!cache.c_enabled)
- return;
-
- /* XXX: not needed in all cases, provide a wrapper in fp.c */
- savefpctx(&curthread->td_pcb->pcb_fpstate);
- wr(fprs, 0, FPRS_FEF);
-
- for (addr = start & ~(cache.ec_linesize - 1); addr <= end;
- addr += cache.ec_linesize) {
- __asm __volatile("ldda [%0] %1, %%f0; membar #Sync; "
- "stda %%f0, [%0] %2" : : "r" (addr), "I" (ASI_BLK_S),
- "I" (ASI_BLK_COMMIT_S));
- }
- membar(Sync);
-
- restorefpctx(&curthread->td_pcb->pcb_fpstate);
-}
-
-#if 0
-/*
- * Invalidate an E$ range using diagnostic accesses.
- * This is disabled: it suffers from the same races as dcache_blast() and
- * icache_blast_phys(), but they may be fatal here because blasting an E$ line
- * can discard modified data.
- * There is no really use for this anyway.
- */
-void
-ecache_inval_phys(vm_offset_t start, vm_offset_t end)
-{
- vm_offset_t addr, eca;
- u_long tag, j;
-
- if (!cache.c_enabled)
- return;
- for (addr = start & ~(cache.ec_linesize - 1); addr <= end;
- addr += cache.ec_linesize) {
- for (j = 0; j < cache.ec_assoc; j++) {
- /* XXX: guesswork... */
- eca = (addr & (cache.ec_size - 1)) |
- (j << (cache.ec_l2set));
- /*
- * Retrieve the tag:
- * A read from the appropriate VA in ASI_ECACHE_R
- * will transfer the tag from the tag RAM to the
- * data register (ASI_ECACHE_TAG_DATA, VA 0).
- */
- CDIAG_RDG0(ASI_ECACHE_R, ECDA_TAG | eca);
- tag = ldxa(0, ASI_ECACHE_TAG_DATA);
- if ((addr & ~cache.ec_size) >> cache.ec_l2set ==
- (tag & ECDT_TAG_MASK)) {
- /*
- * Clear. Works like retrieving the tag, but
- * the other way round.
- */
- stxa_sync(0, ASI_ECACHE_TAG_DATA, 0);
- stxa_sync(ECDA_TAG | eca, ASI_ECACHE_W, 0);
- }
- }
+ if (cpu_impl >= CPU_IMPL_ULTRASPARCIII) {
+ dcache_page_inval = cheetah_dcache_page_inval;
+ icache_page_inval = cheetah_icache_page_inval;
+ } else {
+ dcache_page_inval = spitfire_dcache_page_inval;
+ icache_page_inval = spitfire_icache_page_inval;
}
}
-#endif
diff --git a/sys/sparc64/sparc64/cheetah.c b/sys/sparc64/sparc64/cheetah.c
new file mode 100644
index 000000000000..27a5a1c3593a
--- /dev/null
+++ b/sys/sparc64/sparc64/cheetah.c
@@ -0,0 +1,71 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_pmap.h"
+
+#include <sys/param.h>
+#include <sys/linker_set.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/cache.h>
+#include <machine/cpufunc.h>
+#include <machine/smp.h>
+
+/*
+ * Flush a physical page from the data cache.
+ */
+void
+cheetah_dcache_page_inval(vm_offset_t spa)
+{
+ vm_offset_t pa;
+ void *cookie;
+
+ KASSERT((spa & PAGE_MASK) == 0,
+ ("dcache_page_inval: pa not page aligned"));
+ cookie = ipi_dcache_page_inval(tl_ipi_cheetah_dcache_page_inval, spa);
+ for (pa = spa; pa < spa + PAGE_SIZE; pa += cache.dc_linesize)
+ stxa_sync(pa, ASI_DCACHE_INVALIDATE, 0);
+ ipi_wait(cookie);
+}
+
+/*
+ * Flush a physical page from the intsruction cache. Instruction cache
+ * consistency is maintained by hardware.
+ */
+void
+cheetah_icache_page_inval(vm_offset_t pa)
+{
+}
diff --git a/sys/sparc64/sparc64/mp_exception.S b/sys/sparc64/sparc64/mp_exception.S
index f6964c8893d4..d8e977e2d2f1 100644
--- a/sys/sparc64/sparc64/mp_exception.S
+++ b/sys/sparc64/sparc64/mp_exception.S
@@ -44,9 +44,9 @@
nop
/*
- * Invalidate a phsyical page in the data cache.
+ * Invalidate a physical page in the data cache. For UltraSPARC I and II.
*/
-ENTRY(tl_ipi_dcache_page_inval)
+ENTRY(tl_ipi_spitfire_dcache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_dcache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
@@ -81,12 +81,13 @@ ENTRY(tl_ipi_dcache_page_inval)
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
-END(tl_ipi_dcache_page_inval)
+END(tl_ipi_spitfire_dcache_page_inval)
/*
- * Invalidate a phsyical page in the instruction cache.
+ * Invalidate a physical page in the instruction cache. For UltraSPARC I and
+ * II.
*/
-ENTRY(tl_ipi_icache_page_inval)
+ENTRY(tl_ipi_spitfire_icache_page_inval)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "ipi_icache_page_inval: pa=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
@@ -121,7 +122,39 @@ ENTRY(tl_ipi_icache_page_inval)
IPI_WAIT(%g5, %g1, %g2, %g3)
retry
-END(tl_ipi_icache_page_inval)
+END(tl_ipi_spitfire_icache_page_inval)
+
+/*
+ * Invalidate a physical page in the data cache. For UltraSPARC III.
+ */
+ENTRY(tl_ipi_cheetah_dcache_page_inval)
+#if KTR_COMPILE & KTR_SMP
+ CATR(KTR_SMP, "ipi_dcache_page_inval: pa=%#lx"
+ , %g1, %g2, %g3, 7, 8, 9)
+ ldx [%g5 + ICA_PA], %g2
+ stx %g2, [%g1 + KTR_PARM1]
+9:
+#endif
+
+ ldx [%g5 + ICA_PA], %g1
+
+ set PAGE_SIZE, %g2
+ add %g1, %g2, %g3
+
+ SET(cache, %g4, %g2)
+ lduw [%g2 + DC_LINESIZE], %g2
+
+1: stxa %g0, [%g1] ASI_DCACHE_INVALIDATE
+ membar #Sync
+
+ add %g1, %g2, %g1
+ cmp %g1, %g3
+ blt,a,pt %xcc, 1b
+ nop
+
+ IPI_WAIT(%g5, %g1, %g2, %g3)
+ retry
+END(tl_ipi_cheetah_dcache_page_inval)
/*
* Trigger a softint at the desired level.
diff --git a/sys/sparc64/sparc64/spitfire.c b/sys/sparc64/sparc64/spitfire.c
new file mode 100644
index 000000000000..2d905d5ffef4
--- /dev/null
+++ b/sys/sparc64/sparc64/spitfire.c
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 2003 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "opt_pmap.h"
+
+#include <sys/param.h>
+#include <sys/linker_set.h>
+#include <sys/proc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/cache.h>
+#include <machine/cpufunc.h>
+#include <machine/smp.h>
+
+PMAP_STATS_VAR(spitfire_dcache_npage_inval);
+PMAP_STATS_VAR(spitfire_dcache_npage_inval_match);
+PMAP_STATS_VAR(spitfire_icache_npage_inval);
+PMAP_STATS_VAR(spitfire_icache_npage_inval_match);
+
+/*
+ * Flush a physical page from the data cache.
+ */
+void
+spitfire_dcache_page_inval(vm_offset_t pa)
+{
+ u_long target;
+ void *cookie;
+ u_long addr;
+ u_long tag;
+
+ KASSERT((pa & PAGE_MASK) == 0,
+ ("dcache_page_inval: pa not page aligned"));
+ PMAP_STATS_INC(spitfire_dcache_npage_inval);
+ target = pa >> (PAGE_SHIFT - DC_TAG_SHIFT);
+ cookie = ipi_dcache_page_inval(tl_ipi_spitfire_dcache_page_inval, pa);
+ for (addr = 0; addr < cache.dc_size; addr += cache.dc_linesize) {
+ tag = ldxa(addr, ASI_DCACHE_TAG);
+ if (((tag >> DC_VALID_SHIFT) & DC_VALID_MASK) == 0)
+ continue;
+ tag &= DC_TAG_MASK << DC_TAG_SHIFT;
+ if (tag == target) {
+ PMAP_STATS_INC(spitfire_dcache_npage_inval_match);
+ stxa_sync(addr, ASI_DCACHE_TAG, tag);
+ }
+ }
+ ipi_wait(cookie);
+}
+
+/*
+ * Flush a physical page from the instruction cache.
+ */
+void
+spitfire_icache_page_inval(vm_offset_t pa)
+{
+ register u_long tag __asm("%g1");
+ u_long target;
+ void *cookie;
+ u_long addr;
+
+ KASSERT((pa & PAGE_MASK) == 0,
+ ("icache_page_inval: pa not page aligned"));
+ PMAP_STATS_INC(spitfire_icache_npage_inval);
+ target = pa >> (PAGE_SHIFT - IC_TAG_SHIFT);
+ cookie = ipi_icache_page_inval(tl_ipi_spitfire_icache_page_inval, pa);
+ for (addr = 0; addr < cache.ic_size; addr += cache.ic_linesize) {
+ __asm __volatile("ldda [%1] %2, %%g0" /*, %g1 */
+ : "=r" (tag) : "r" (addr), "n" (ASI_ICACHE_TAG));
+ if (((tag >> IC_VALID_SHIFT) & IC_VALID_MASK) == 0)
+ continue;
+ tag &= IC_TAG_MASK << IC_TAG_SHIFT;
+ if (tag == target) {
+ PMAP_STATS_INC(spitfire_icache_npage_inval_match);
+ stxa_sync(addr, ASI_ICACHE_TAG, tag);
+ }
+ }
+ ipi_wait(cookie);
+}