summaryrefslogtreecommitdiff
path: root/sys/arch/m88k
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/m88k')
-rw-r--r--sys/arch/m88k/m88k/pmap.c341
1 files changed, 204 insertions, 137 deletions
diff --git a/sys/arch/m88k/m88k/pmap.c b/sys/arch/m88k/m88k/pmap.c
index 25cf380ce07..cca1356a8c1 100644
--- a/sys/arch/m88k/m88k/pmap.c
+++ b/sys/arch/m88k/m88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.64 2011/10/09 17:01:34 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.65 2011/10/09 17:04:07 miod Exp $ */
/*
* Copyright (c) 2001-2004, 2010, Miodrag Vallat.
@@ -146,8 +146,6 @@ void pmap_remove_pte(pmap_t, vaddr_t, pt_entry_t *,
struct vm_page *, boolean_t);
void pmap_remove_range(pmap_t, vaddr_t, vaddr_t);
boolean_t pmap_testbit(struct vm_page *, int);
-void tlb_flush(pmap_t, vaddr_t);
-void tlb_kflush(vaddr_t);
static __inline pv_entry_t
pg_to_pvh(struct vm_page *pg)
@@ -195,66 +193,6 @@ pmap_pte(pmap_t pmap, vaddr_t va)
}
/*
- * [MD PUBLIC]
- * Change the cache control bits of the address range `sva'..`eva' in
- * pmap_kernel to `mode'.
- */
-void
-pmap_cache_ctrl(vaddr_t sva, vaddr_t eva, u_int mode)
-{
- int s;
- pt_entry_t opte, *pte;
- vaddr_t va;
- paddr_t pa;
- cpuid_t cpu;
-
- DPRINTF(CD_CACHE, ("pmap_cache_ctrl(%p, %p, %x)\n",
- sva, eva, mode));
-
- s = splvm();
- for (va = sva; va != eva; va += PAGE_SIZE) {
- if ((pte = pmap_pte(pmap_kernel(), va)) == NULL)
- continue;
- DPRINTF(CD_CACHE, ("cache_ctrl: pte@%p\n", pte));
-
- /*
- * Data cache should be copied back and invalidated if
- * the old mapping was cached and the new isn't, or if
- * we are downgrading from writeback to writethrough.
- */
- if (((*pte & CACHE_INH) == 0 && (mode & CACHE_INH) != 0) ||
- ((*pte & CACHE_WT) == 0 && (mode & CACHE_WT) != 0)) {
- pa = ptoa(PG_PFNUM(*pte));
-#ifdef MULTIPROCESSOR
- for (cpu = 0; cpu < MAX_CPUS; cpu++)
- if (ISSET(m88k_cpus[cpu].ci_flags, CIF_ALIVE)) {
-#else
- cpu = cpu_number();
-#endif
- if (mode & CACHE_INH)
- cmmu_cache_wbinv(cpu,
- pa, PAGE_SIZE);
- else
- cmmu_dcache_wb(cpu,
- pa, PAGE_SIZE);
-#ifdef MULTIPROCESSOR
- }
-#endif
- }
-
- /*
- * Invalidate pte temporarily to avoid being written back
- * the modified bit and/or the reference bit by any other cpu.
- */
-
- opte = invalidate_pte(pte);
- *pte = (opte & ~CACHE_MASK) | mode;
- tlb_kflush(va);
- }
- splx(s);
-}
-
-/*
* [MI]
* Checks how virtual address `va' would translate with `pmap' as the active
* pmap. Returns TRUE and matching physical address in `pap' (if not NULL) if
@@ -335,81 +273,130 @@ pmap_translation_info(pmap_t pmap, vaddr_t va, paddr_t *pap, uint32_t *ti)
* TLB (ATC) routines
*/
+#ifdef M88100
+void tlb_flush_88100(pmap_t, vaddr_t, pt_entry_t);
+void tlb_kflush_88100(vaddr_t, pt_entry_t);
+#endif
+#ifdef M88110
+void tlb_flush_88110(pmap_t, vaddr_t, pt_entry_t);
+void tlb_kflush_88110(vaddr_t, pt_entry_t);
+#endif
+#if defined(M88100) && defined(M88110)
+void (*tlb_flush_fnptr)(pmap_t, vaddr_t, pt_entry_t);
+void (*tlb_kflush_fnptr)(vaddr_t, pt_entry_t);
+#define tlb_flush(pm,va,pte) (*tlb_flush_fnptr)(pm,va,pte)
+#define tlb_kflush(va,pte) (*tlb_kflush_fnptr)(va,pte)
+#else
+#ifdef M88100
+#define tlb_flush tlb_flush_88100
+#define tlb_kflush tlb_kflush_88100
+#else
+#define tlb_flush tlb_flush_88110
+#define tlb_kflush tlb_kflush_88110
+#endif
+#endif
+
/*
* [INTERNAL]
* Flush translation cache entry for `va' in `pmap'. May act lazily.
*/
+#ifdef M88100
void
-tlb_flush(pmap_t pmap, vaddr_t va)
+tlb_flush_88100(pmap_t pmap, vaddr_t va, pt_entry_t pte)
{
struct cpu_info *ci;
boolean_t kernel = pmap == pmap_kernel();
-
-#ifdef MULTIPROCESSOR /* { */
+#ifdef MULTIPROCESSOR
CPU_INFO_ITERATOR cpu;
+#endif
/*
* On 88100, we take action immediately.
*/
- if (CPU_IS88100) {
- CPU_INFO_FOREACH(cpu, ci) {
- if (kernel || pmap == ci->ci_curpmap)
- cmmu_tlb_inv(ci->ci_cpuid, kernel, va);
- }
+#ifdef MULTIPROCESSOR
+ CPU_INFO_FOREACH(cpu, ci)
+#else
+ ci = curcpu();
+#endif
+ {
+ if (kernel || pmap == ci->ci_curpmap)
+ cmmu_tlb_inv(ci->ci_cpuid, kernel, va);
}
+}
+#endif
+#ifdef M88110
+void
+tlb_flush_88110(pmap_t pmap, vaddr_t va, pt_entry_t pte)
+{
+ struct cpu_info *ci;
+ boolean_t kernel = pmap == pmap_kernel();
+#ifdef MULTIPROCESSOR
+ CPU_INFO_ITERATOR cpu;
+#endif
/*
* On 88110, we only remember which tlb need to be invalidated,
* and wait for pmap_update() to do it.
*/
- if (CPU_IS88110) {
- CPU_INFO_FOREACH(cpu, ci) {
- if (kernel)
- ci->ci_pmap_ipi |= CI_IPI_TLB_FLUSH_KERNEL;
- else if (pmap == ci->ci_curpmap)
- ci->ci_pmap_ipi |= CI_IPI_TLB_FLUSH_USER;
- }
+#ifdef MULTIPROCESSOR /* { */
+ CPU_INFO_FOREACH(cpu, ci) {
+ if (kernel)
+ ci->ci_pmap_ipi |= CI_IPI_TLB_FLUSH_KERNEL;
+ else if (pmap == ci->ci_curpmap)
+ ci->ci_pmap_ipi |= CI_IPI_TLB_FLUSH_USER;
}
#else /* MULTIPROCESSOR */ /* } { */
ci = curcpu();
-
- if (kernel || pmap == ci->ci_curpmap) {
- if (CPU_IS88100)
- cmmu_tlb_inv(ci->ci_cpuid, kernel, va);
- if (CPU_IS88110)
- ci->ci_pmap_ipi |= kernel ?
- CI_IPI_TLB_FLUSH_KERNEL : CI_IPI_TLB_FLUSH_USER;
- }
+ if (kernel)
+ ci->ci_pmap_ipi |= CI_IPI_TLB_FLUSH_KERNEL;
+ else if (pmap == ci->ci_curpmap)
+ ci->ci_pmap_ipi |= CI_IPI_TLB_FLUSH_USER;
#endif /* MULTIPROCESSOR */ /* } */
}
+#endif
/*
* [INTERNAL]
* Flush translation cache entry for `va' in pmap_kernel(). Acts immediately.
*/
+#ifdef M88100
void
-tlb_kflush(vaddr_t va)
+tlb_kflush_88100(vaddr_t va, pt_entry_t pte)
{
struct cpu_info *ci;
-
-#ifdef MULTIPROCESSOR /* { */
+#ifdef MULTIPROCESSOR
CPU_INFO_ITERATOR cpu;
+#endif
- if (CPU_IS88100)
- CPU_INFO_FOREACH(cpu, ci)
- cmmu_tlb_inv(ci->ci_cpuid, TRUE, va);
- if (CPU_IS88110)
- CPU_INFO_FOREACH(cpu, ci)
- cmmu_tlb_inv(ci->ci_cpuid, TRUE, 0);
-#else /* MULTIPROCESSOR */ /* } { */
+#ifdef MULTIPROCESSOR
+ CPU_INFO_FOREACH(cpu, ci)
+#else
ci = curcpu();
-
- if (CPU_IS88100)
+#endif
+ {
cmmu_tlb_inv(ci->ci_cpuid, TRUE, va);
- if (CPU_IS88110)
+ }
+}
+#endif
+#ifdef M88110
+void
+tlb_kflush_88110(vaddr_t va, pt_entry_t pte)
+{
+ struct cpu_info *ci;
+#ifdef MULTIPROCESSOR
+ CPU_INFO_ITERATOR cpu;
+#endif
+
+#ifdef MULTIPROCESSOR /* { */
+ CPU_INFO_FOREACH(cpu, ci) {
cmmu_tlb_inv(ci->ci_cpuid, TRUE, 0);
+ }
+#else /* MULTIPROCESSOR */ /* } { */
+ ci = curcpu();
+ cmmu_tlb_inv(ci->ci_cpuid, TRUE, 0);
#endif /* MULTIPROCESSOR */ /* } */
}
+#endif
#ifdef M88110
/*
@@ -426,15 +413,17 @@ pmap_update(pmap_t pm)
if (CPU_IS88110) {
#endif
u_int ipi;
-#ifdef MULTIPROCESSOR
struct cpu_info *ci;
+#ifdef MULTIPROCESSOR
CPU_INFO_ITERATOR cpu;
+#endif
+#ifdef MULTIPROCESSOR
CPU_INFO_FOREACH(cpu, ci)
#else
- struct cpu_info *ci = curcpu();
+ ci = curcpu();
#endif
- /* CPU_INFO_FOREACH(cpu, ci) */ {
+ {
ipi = atomic_clear_int(&ci->ci_pmap_ipi);
if (ipi & CI_IPI_TLB_FLUSH_KERNEL)
cmmu_tlb_inv(ci->ci_cpuid, TRUE, 0);
@@ -612,7 +601,7 @@ pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
void
pmap_map(paddr_t pa, psize_t sz, vm_prot_t prot, u_int cmode)
{
- pt_entry_t template, *pte;
+ pt_entry_t *pte, npte;
DPRINTF(CD_MAP, ("pmap_map(%p, %p, %x, %x)\n",
pa, sz, prot, cmode));
@@ -622,10 +611,10 @@ pmap_map(paddr_t pa, psize_t sz, vm_prot_t prot, u_int cmode)
pa, pa + sz);
#endif
- template = m88k_protection(prot) | cmode | PG_W | PG_V;
+ npte = m88k_protection(prot) | cmode | PG_W | PG_V;
#ifdef M88110
if (CPU_IS88110 && m88k_protection(prot) != PG_RO)
- template |= PG_M;
+ npte |= PG_M;
#endif
sz = atop(round_page(pa + sz) - trunc_page(pa));
@@ -634,7 +623,7 @@ pmap_map(paddr_t pa, psize_t sz, vm_prot_t prot, u_int cmode)
if ((pte = pmap_pte(pmap_kernel(), pa)) == NULL)
pte = pmap_expand_kmap(pa, 0);
- *pte = template | pa;
+ *pte = npte | pa;
pa += PAGE_SIZE;
pmap_kernel()->pm_stats.resident_count++;
pmap_kernel()->pm_stats.wired_count++;
@@ -658,6 +647,19 @@ pmap_bootstrap(paddr_t s_rom, paddr_t e_rom)
extern void *kernelstart;
extern void *etext;
+ /*
+ * Initialize function pointers depending on the CPU type
+ */
+#if defined(M88100) && defined(M88110)
+ if (CPU_IS88100) {
+ tlb_flush_fnptr = tlb_flush_88100;
+ tlb_kflush_fnptr = tlb_kflush_88100;
+ } else {
+ tlb_flush_fnptr = tlb_flush_88110;
+ tlb_kflush_fnptr = tlb_kflush_88110;
+ }
+#endif
+
virtual_avail = (vaddr_t)avail_end;
s_text = trunc_page((vaddr_t)&kernelstart);
@@ -915,7 +917,7 @@ int
pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
int s;
- pt_entry_t *pte, template;
+ pt_entry_t *pte, npte;
paddr_t old_pa;
pv_entry_t pv_e, pvl;
boolean_t wired = (flags & PMAP_WIRED) != 0;
@@ -924,7 +926,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
DPRINTF(CD_ENT, ("pmap_enter(%p, %p, %p, %x, %x)\n",
pmap, va, pa, prot, flags));
- template = m88k_protection(prot);
+ npte = m88k_protection(prot);
/*
* Expand pmap to include this pte.
@@ -986,7 +988,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
pv_e = pool_get(&pvpool, PR_NOWAIT);
if (pv_e == NULL) {
/* Invalidate the old pte anyway */
- tlb_flush(pmap, va);
+ tlb_flush(pmap, va, PG_NV);
if (flags & PMAP_CANFAIL) {
splx(s);
@@ -1011,9 +1013,9 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
pmap->pm_stats.wired_count++;
} /* if (pa == old_pa) ... else */
- template |= PG_V;
+ npte |= PG_V;
if (wired)
- template |= PG_W;
+ npte |= PG_W;
if (prot & VM_PROT_WRITE) {
/*
@@ -1022,27 +1024,28 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
*/
if (CPU_IS88110 && pmap != pmap_kernel() &&
pg != NULL && (pvl->pv_flags & PG_M) == 0)
- template |= PG_U;
+ npte |= PG_U;
else
- template |= PG_M_U;
+ npte |= PG_M_U;
} else if (prot & VM_PROT_ALL)
- template |= PG_U;
+ npte |= PG_U;
/*
* If outside physical memory, disable cache on this (device) page.
*/
if (pa >= last_addr)
- template |= CACHE_INH;
+ npte |= CACHE_INH;
/*
* Invalidate pte temporarily to avoid being written
* back the modified bit and/or the reference bit by
* any other cpu.
*/
- template |= invalidate_pte(pte) & PG_M_U;
- *pte = template | pa;
- tlb_flush(pmap, va);
- DPRINTF(CD_ENT, ("pmap_enter: new pte %p\n", *pte));
+ npte |= invalidate_pte(pte) & PG_M_U;
+ npte |= pa;
+ *pte = npte;
+ tlb_flush(pmap, va, npte);
+ DPRINTF(CD_ENT, ("pmap_enter: new pte %p\n", npte));
/*
* Cache attribute flags
@@ -1069,20 +1072,20 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
void
pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
{
- pt_entry_t template, *pte;
+ pt_entry_t *pte, npte;
DPRINTF(CD_ENT, ("pmap_kenter_pa(%p, %p, %x)\n", va, pa, prot));
- template = m88k_protection(prot) | PG_W | PG_V;
+ npte = m88k_protection(prot) | PG_W | PG_V;
#ifdef M88110
if (CPU_IS88110 && m88k_protection(prot) != PG_RO)
- template |= PG_M;
+ npte |= PG_M;
#endif
/*
* If outside physical memory, disable cache on this (device) page.
*/
if (pa >= last_addr)
- template |= CACHE_INH;
+ npte |= CACHE_INH;
/*
* Expand pmap to include this pte.
@@ -1097,8 +1100,9 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pmap_kernel()->pm_stats.wired_count++;
invalidate_pte(pte);
- *pte = template | pa;
- tlb_kflush(va);
+ npte |= pa;
+ *pte = npte;
+ tlb_kflush(va, npte);
}
/*
@@ -1133,7 +1137,7 @@ pmap_remove_pte(pmap_t pmap, vaddr_t va, pt_entry_t *pte, struct vm_page *pg,
opte = invalidate_pte(pte) & PG_M_U;
if (flush)
- tlb_flush(pmap, va);
+ tlb_flush(pmap, va, PG_NV);
if (pg == NULL) {
pg = PHYS_TO_VM_PAGE(pa);
@@ -1269,7 +1273,7 @@ pmap_kremove(vaddr_t va, vsize_t len)
e = va + len;
while (va != e) {
sdt_entry_t *sdt;
- pt_entry_t *pte;
+ pt_entry_t *pte, opte;
eseg = (va & SDT_MASK) + (1 << SDT_SHIFT);
if (eseg > e || eseg == 0)
@@ -1288,8 +1292,8 @@ pmap_kremove(vaddr_t va, vsize_t len)
pmap_kernel()->pm_stats.resident_count--;
pmap_kernel()->pm_stats.wired_count--;
- invalidate_pte(pte);
- tlb_kflush(va);
+ opte = invalidate_pte(pte);
+ tlb_kflush(va, PG_NV);
}
va += PAGE_SIZE;
pte++;
@@ -1350,7 +1354,7 @@ void
pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
int s;
- pt_entry_t *pte, ap;
+ pt_entry_t *pte, ap, opte, npte;
vaddr_t va, eseg;
if ((prot & VM_PROT_READ) == 0) {
@@ -1386,9 +1390,10 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
* reference bit being written back by
* any other cpu.
*/
- *pte = ap |
- (invalidate_pte(pte) & ~PG_PROT);
- tlb_flush(pmap, va);
+ opte = invalidate_pte(pte);
+ npte = ap | (opte & ~PG_PROT);
+ *pte = npte;
+ tlb_flush(pmap, va, npte);
}
va += PAGE_SIZE;
pte++;
@@ -1508,7 +1513,7 @@ pmap_changebit(struct vm_page *pg, int set, int mask)
if (npte != opte) {
invalidate_pte(pte);
*pte = npte;
- tlb_flush(pmap, va);
+ tlb_flush(pmap, va, npte);
}
}
}
@@ -1585,7 +1590,7 @@ pmap_unsetbit(struct vm_page *pg, int bit)
{
boolean_t rv = FALSE;
pv_entry_t pvl, pvep;
- pt_entry_t *pte, opte;
+ pt_entry_t *pte, opte, npte;
pmap_t pmap;
int s;
vaddr_t va;
@@ -1633,8 +1638,9 @@ pmap_unsetbit(struct vm_page *pg, int bit)
* other cpu.
*/
invalidate_pte(pte);
- *pte = opte ^ bit;
- tlb_flush(pmap, va);
+ npte = opte ^ bit;
+ *pte = npte;
+ tlb_flush(pmap, va, npte);
rv = TRUE;
}
}
@@ -1782,3 +1788,64 @@ pmap_set_modify(pmap_t pmap, vaddr_t va)
return (TRUE);
}
#endif
+
+/*
+ * [MD PUBLIC]
+ * Change the cache control bits of the address range `sva'..`eva' in
+ * pmap_kernel to `mode'.
+ */
+void
+pmap_cache_ctrl(vaddr_t sva, vaddr_t eva, u_int mode)
+{
+ int s;
+ pt_entry_t *pte, opte, npte;
+ vaddr_t va;
+ paddr_t pa;
+ cpuid_t cpu;
+
+ DPRINTF(CD_CACHE, ("pmap_cache_ctrl(%p, %p, %x)\n",
+ sva, eva, mode));
+
+ s = splvm();
+ for (va = sva; va != eva; va += PAGE_SIZE) {
+ if ((pte = pmap_pte(pmap_kernel(), va)) == NULL)
+ continue;
+ DPRINTF(CD_CACHE, ("cache_ctrl: pte@%p\n", pte));
+
+ /*
+ * Data cache should be copied back and invalidated if
+ * the old mapping was cached and the new isn't, or if
+ * we are downgrading from writeback to writethrough.
+ */
+ if (((*pte & CACHE_INH) == 0 && (mode & CACHE_INH) != 0) ||
+ ((*pte & CACHE_WT) == 0 && (mode & CACHE_WT) != 0)) {
+ pa = ptoa(PG_PFNUM(*pte));
+#ifdef MULTIPROCESSOR
+ for (cpu = 0; cpu < MAX_CPUS; cpu++)
+ if (ISSET(m88k_cpus[cpu].ci_flags, CIF_ALIVE)) {
+#else
+ cpu = cpu_number();
+#endif
+ if (mode & CACHE_INH)
+ cmmu_cache_wbinv(cpu,
+ pa, PAGE_SIZE);
+ else
+ cmmu_dcache_wb(cpu,
+ pa, PAGE_SIZE);
+#ifdef MULTIPROCESSOR
+ }
+#endif
+ }
+
+ /*
+ * Invalidate pte temporarily to avoid being written back
+ * the modified bit and/or the reference bit by any other cpu.
+ */
+
+ opte = invalidate_pte(pte);
+ npte = (opte & ~CACHE_MASK) | mode;
+ *pte = npte;
+ tlb_kflush(va, npte);
+ }
+ splx(s);
+}