summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDale Rahn <drahn@cvs.openbsd.org>2003-10-31 03:06:18 +0000
committerDale Rahn <drahn@cvs.openbsd.org>2003-10-31 03:06:18 +0000
commit7c2e57a9b5986b6fb19c6bc4ebf5050a6a1743d0 (patch)
treeb0897f49dea0a6939c73f563ad0526aa61b0de3a
parent9f5ab21dd6d83943475eff08f950c17219786400 (diff)
Fix ppc segment register restores, this fixes the 1GB ram limit and
cleans up pieces in the pmap code. tested otto, brad, miod, pval.
-rw-r--r--sys/arch/macppc/macppc/locore.S103
-rw-r--r--sys/arch/macppc/macppc/machdep.c17
-rw-r--r--sys/arch/powerpc/include/pmap.h6
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c76
4 files changed, 87 insertions, 115 deletions
diff --git a/sys/arch/macppc/macppc/locore.S b/sys/arch/macppc/macppc/locore.S
index b4494746597..44bb7e90ef7 100644
--- a/sys/arch/macppc/macppc/locore.S
+++ b/sys/arch/macppc/macppc/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.13 2003/10/24 19:56:44 drahn Exp $ */
+/* $OpenBSD: locore.S,v 1.14 2003/10/31 03:06:16 drahn Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
@@ -284,15 +284,6 @@ switch_exited:
addic. %r5,%r5,64
li %r6,0
- mfsr %r8,KERNEL_SR /* save kernel SR */
-1:
- addis %r6,%r6,-0x10000000@ha /* set new procs segment registers */
- or. %r6,%r6,%r6 /* This is done from the real address pmap */
- lwzu %r7,-4(%r5) /* so we don't have to worry */
- mtsrin %r7,%r6 /* about accessibility */
- bne 1b
- mtsr KERNEL_SR,%r8 /* restore kernel SR */
- isync
lwz %r1,PCB_SP(%r4) /* get new procs SP */
@@ -400,7 +391,6 @@ _C_LABEL(dsitrap):
mfxer %r30 /* save XER */
mtsprg 2,%r30 /* in SPRG2 */
mfsrr1 %r31 /* test kernel mode */
-#if 0
mtcr %r31
bc 12,17,1f /* branch if PSL_PR is set */
mfdar %r31 /* get fault address */
@@ -427,7 +417,6 @@ _C_LABEL(dsitrap):
lmw %r28,disisave(0) /* restore r28-r31 */
rfi /* return to trapped code */
1:
-#endif
mflr %r28 /* save LR */
bla s_dsitrap
_C_LABEL(dsisize) = .-_C_LABEL(dsitrap)
@@ -443,7 +432,6 @@ _C_LABEL(isitrap):
mflr %r28 /* save LR */
mfcr %r29 /* save CR */
mfsrr1 %r31 /* test kernel mode */
-#if 0
mtcr %r31
bc 12,17,1f /* branch if PSL_PR is set */
mfsrr0 %r31 /* get fault address */
@@ -459,7 +447,6 @@ _C_LABEL(isitrap):
lmw %r28,disisave(0) /* restore r28-r31 */
rfi /* return to trapped code */
1:
-#endif
bla s_isitrap
_C_LABEL(isisize) = .-_C_LABEL(isitrap)
@@ -748,6 +735,25 @@ _C_LABEL(ddbsize) = .-_C_LABEL(ddblow)
mfsrr0 %r30; \
mfsrr1 %r31; \
stmw %r30,savearea+24(0); \
+ /* load all kernel segment registers. */ \
+ lis %r31,_C_LABEL(kernel_pmap_)@ha; \
+ addi %r31,%r31,_C_LABEL(kernel_pmap_)@l; \
+ lwz %r30,0(%r31); mtsr 0,%r30; \
+ lwz %r30,4(%r31); mtsr 1,%r30; \
+ lwz %r30,8(%r31); mtsr 2,%r30; \
+ lwz %r30,12(%r31); mtsr 3,%r30; \
+ lwz %r30,16(%r31); mtsr 4,%r30; \
+ lwz %r30,20(%r31); mtsr 5,%r30; \
+ lwz %r30,24(%r31); mtsr 6,%r30; \
+ lwz %r30,28(%r31); mtsr 7,%r30; \
+ lwz %r30,32(%r31); mtsr 8,%r30; \
+ lwz %r30,36(%r31); mtsr 9,%r30; \
+ lwz %r30,40(%r31); mtsr 10,%r30; \
+ lwz %r30,44(%r31); mtsr 11,%r30; \
+ lwz %r30,48(%r31); mtsr 12,%r30; \
+/* lwz %r30,52(%r31); mtsr 13,%r30; - dont load user SR - XXX? */ \
+ lwz %r30,56(%r31); mtsr 14,%r30; \
+ lwz %r30,60(%r31); mtsr 15,%r30; \
mfmsr %r30; \
ori %r30,%r30,(PSL_DR|PSL_IR); \
mtmsr %r30; \
@@ -805,11 +811,23 @@ _C_LABEL(ddbsize) = .-_C_LABEL(ddblow)
bc 4,17,1f; /* branch if PSL_PR is false */ \
/* Restore user & kernel access SR: */ \
lis %r2,_C_LABEL(curpm)@ha; /* get real address of pmap */ \
- lwz %r2,_C_LABEL(curpm)@l(2); \
- lwz %r3,PM_USRSR(%r2); \
- mtsr USER_SR,%r3; \
- lwz %r3,PM_KERNELSR(%r2); \
- mtsr KERNEL_SR,%r3; \
+ lwz %r2,_C_LABEL(curpm)@l(2); \
+ lwz %r3,0(%r2); mtsr 0,%r3; \
+ lwz %r3,4(%r2); mtsr 1,%r3; \
+ lwz %r3,8(%r2); mtsr 2,%r3; \
+ lwz %r3,12(%r2); mtsr 3,%r3; \
+ lwz %r3,16(%r2); mtsr 4,%r3; \
+ lwz %r3,20(%r2); mtsr 5,%r3; \
+ lwz %r3,24(%r2); mtsr 6,%r3; \
+ lwz %r3,28(%r2); mtsr 7,%r3; \
+ lwz %r3,32(%r2); mtsr 8,%r3; \
+ lwz %r3,36(%r2); mtsr 9,%r3; \
+ lwz %r3,40(%r2); mtsr 10,%r3; \
+ lwz %r3,44(%r2); mtsr 11,%r3; \
+ lwz %r3,48(%r2); mtsr 12,%r3; \
+ lwz %r3,52(%r2); mtsr 13,%r3; \
+ lwz %r3,56(%r2); mtsr 14,%r3; \
+ lwz %r3,60(%r2); mtsr 15,%r3; \
1: mfsprg %r2,1; /* restore cr */ \
mtcr %r2; \
lwz %r2,savearea(0); \
@@ -844,10 +862,6 @@ realtrap:
* Now the common trap catching code.
*/
s_trap:
-/* First have to enable KERNEL mapping */
- lis %r31,KERNEL_SEGMENT@ha
- addi %r31,%r31,KERNEL_SEGMENT@l
- mtsr KERNEL_SR,%r31
FRAME_SETUP(tempsave)
/* Now we can recover interrupts again: */
mfmsr %r7
@@ -954,7 +968,7 @@ s_pte_spill:
* ISI second stage fault handler
*/
s_isitrap:
- mfsrr1 %r31 /* test if this may be a spill fault */
+ mfsrr1 %r31 /* test if this is a spill fault */
mtcr %r31
mtsprg 1,%r1 /* save SP */
bc 4,%r1,disitrap /* branch if table miss is false */
@@ -1014,10 +1028,26 @@ s_isitrap:
stw %r5,20(%r1); \
stw %r4,12(%r1); \
stw %r3,8(%r1); \
+/* load all kernel segment registers. */ \
+ lis 3,_C_LABEL(kernel_pmap_)@ha; \
+ addi 3,3,_C_LABEL(kernel_pmap_)@l; \
+ lwz %r5,0(%r3); mtsr 0,%r5; \
+ lwz %r5,4(%r3); mtsr 1,%r5; \
+ lwz %r5,8(%r3); mtsr 2,%r5; \
+ lwz %r5,12(%r3); mtsr 3,%r5; \
+ lwz %r5,16(%r3); mtsr 4,%r5; \
+ lwz %r5,20(%r3); mtsr 5,%r5; \
+ lwz %r5,24(%r3); mtsr 6,%r5; \
+ lwz %r5,28(%r3); mtsr 7,%r5; \
+ lwz %r5,32(%r3); mtsr 8,%r5; \
+ lwz %r5,36(%r3); mtsr 9,%r5; \
+ lwz %r5,40(%r3); mtsr 10,%r5; \
+ lwz %r5,44(%r3); mtsr 11,%r5; \
+ lwz %r5,48(%r3); mtsr 12,%r5; \
+/* lwz %r5,52(%r3); mtsr 13,%r5; - dont load user SR - XXX? */ \
+ lwz %r5,56(%r3); mtsr 14,%r5; \
+ lwz %r5,60(%r3); mtsr 15,%r5; \
/* interrupts are recoverable here, and enable translation */ \
- lis %r3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@ha; \
- addi %r3,%r3,(KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@l; \
- mtsr KERNEL_SR,%r3; \
mfmsr %r5; \
ori %r5,%r5,(PSL_IR|PSL_DR|PSL_RI); \
mtmsr %r5; \
@@ -1055,8 +1085,23 @@ intr_exit:
bc 4,17,1f /* branch if PSL_PR is false */
lis %r3,_C_LABEL(curpm)@ha /* get current pmap real address */
lwz %r3,_C_LABEL(curpm)@l(%r3)
- lwz %r3,PM_KERNELSR(%r3)
- mtsr KERNEL_SR,%r3 /* Restore kernel SR */
+ /* reload all segment registers. */
+ lwz %r4,0(3); mtsr 0,%r4;
+ lwz %r4,4(3); mtsr 1,%r4;
+ lwz %r4,8(3); mtsr 2,%r4;
+ lwz %r4,12(3); mtsr 3,%r4;
+ lwz %r4,16(3); mtsr 4,%r4;
+ lwz %r4,20(3); mtsr 5,%r4;
+ lwz %r4,24(3); mtsr 6,%r4;
+ lwz %r4,28(3); mtsr 7,%r4;
+ lwz %r4,32(3); mtsr 8,%r4;
+ lwz %r4,36(3); mtsr 9,%r4;
+ lwz %r4,40(3); mtsr 10,%r4;
+ lwz %r4,44(3); mtsr 11,%r4;
+ lwz %r4,48(3); mtsr 12,%r4;
+ lwz %r4,52(3); mtsr 13,%r4;
+ lwz %r4,56(3); mtsr 14,%r4;
+ lwz %r4,60(3); mtsr 15,%r4;
lis %r3,_C_LABEL(astpending)@ha /* Test AST pending */
lwz %r4,_C_LABEL(astpending)@l(%r3)
andi. %r4,%r4,1
diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c
index 2aff9452fbf..2cbc8d6e429 100644
--- a/sys/arch/macppc/macppc/machdep.c
+++ b/sys/arch/macppc/macppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.62 2003/10/30 03:17:54 itojun Exp $ */
+/* $OpenBSD: machdep.c,v 1.63 2003/10/31 03:06:17 drahn Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -312,20 +312,6 @@ initppc(startkernel, endkernel, args)
*/
pmap_bootstrap(startkernel, endkernel);
- /* use BATs to map 1GB memory, no pageable BATs now */
- if (physmem > btoc(0x10000000)) {
- ppc_mtdbat1l(BATL(0x10000000, BAT_M));
- ppc_mtdbat1u(BATU(0x10000000));
- }
- if (physmem > btoc(0x20000000)) {
- ppc_mtdbat2l(BATL(0x20000000, BAT_M));
- ppc_mtdbat2u(BATU(0x20000000));
- }
- if (physmem > btoc(0x30000000)) {
- ppc_mtdbat3l(BATL(0x30000000, BAT_M));
- ppc_mtdbat3u(BATU(0x30000000));
- }
-#if 0
/* now that we know physmem size, map physical memory with BATs */
if (physmem > btoc(0x10000000)) {
battable[0x1].batl = BATL(0x10000000, BAT_M);
@@ -355,7 +341,6 @@ initppc(startkernel, endkernel, args)
battable[0x7].batl = BATL(0x70000000, BAT_M);
battable[0x7].batu = BATU(0x70000000);
}
-#endif
/*
* Now enable translation (and machine checks/recoverable interrupts).
diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h
index d61d9e2c378..efcab9e177c 100644
--- a/sys/arch/powerpc/include/pmap.h
+++ b/sys/arch/powerpc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.31 2002/09/15 09:01:59 deraadt Exp $ */
+/* $OpenBSD: pmap.h,v 1.32 2003/10/31 03:06:16 drahn Exp $ */
/* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
@@ -71,7 +71,9 @@ typedef u_int sr_t;
#define VP_IDX2_MASK (VP_IDX2_SIZE-1)
#define VP_IDX2_POS 12
-void pmap_kenter_cache( vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
+/* functions used by the bus layer for device accesses */
+void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
+void pmap_kremove_pg(vaddr_t va);
/* cache flags */
#define PMAP_CACHE_DEFAULT 0 /* WB cache managed mem, devices not */
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index 74e04ec9758..d578529036e 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.82 2003/07/02 21:30:12 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.83 2003/10/31 03:06:16 drahn Exp $ */
/*
* Copyright (c) 2001, 2002 Dale Rahn.
@@ -520,16 +520,9 @@ pmap_enter(pm, va, pa, prot, flags)
u_int sn = VP_SR(va);
pm->pm_exec[sn]++;
- if (pm->pm_sr[sn] & SR_NOEXEC) {
+ if (pm->pm_sr[sn] & SR_NOEXEC)
pm->pm_sr[sn] &= ~SR_NOEXEC;
- /* set the current sr if not kernel used segemnts
- * and this pmap is current active pmap
- */
- if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
if (pattr != NULL)
*pattr |= (PTE_EXE >> ATTRSHIFT);
} else {
@@ -648,16 +641,8 @@ pmap_remove_pg(pmap_t pm, vaddr_t va)
pted->pted_va &= ~PTED_VA_EXEC_M;
pm->pm_exec[sn]--;
- if (pm->pm_exec[sn] == 0) {
+ if (pm->pm_exec[sn] == 0)
pm->pm_sr[sn] |= SR_NOEXEC;
-
- /* set the current sr if not kernel used segemnts
- * and this pmap is current active pmap
- */
- if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
}
pted->pted_pte.pte_hi &= ~PTE_VALID;
@@ -732,16 +717,8 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
u_int sn = VP_SR(va);
pm->pm_exec[sn]++;
- if (pm->pm_sr[sn] & SR_NOEXEC) {
+ if (pm->pm_sr[sn] & SR_NOEXEC)
pm->pm_sr[sn] &= ~SR_NOEXEC;
-
- /* set the current sr if not kernel used segemnts
- * and this pmap is current active pmap
- */
- if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
}
splx(s);
@@ -795,16 +772,8 @@ pmap_kremove_pg(vaddr_t va)
pted->pted_va &= ~PTED_VA_EXEC_M;
pm->pm_exec[sn]--;
- if (pm->pm_exec[sn] == 0) {
+ if (pm->pm_exec[sn] == 0)
pm->pm_sr[sn] |= SR_NOEXEC;
-
- /* set the current sr if not kernel used segemnts
- * and this pmap is current active pmap
- */
- if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
}
if (PTED_MANAGED(pted))
@@ -1093,9 +1062,8 @@ again:
splx(s); /* pmap create unlock */
seg = try << 4;
- for (k = 0; k < 16; k++) {
+ for (k = 0; k < 16; k++)
pm->pm_sr[k] = (seg + k) | SR_NOEXEC;
- }
return;
}
}
@@ -1216,30 +1184,6 @@ pmap_avail_setup(void)
for (mp = pmap_mem; mp->size !=0; mp++)
physmem += btoc(mp->size);
- /* limit to 1GB available, for now -XXXGRR */
-#define MEMMAX 0x40000000
- for (mp = pmap_avail; mp->size !=0 ; /* increment in loop */) {
- if (mp->start + mp->size > MEMMAX) {
- int rm_start;
- int rm_end;
- if (mp->start > MEMMAX) {
- rm_start = mp->start;
- rm_end = mp->start+mp->size;
- } else {
- rm_start = MEMMAX;
- rm_end = mp->start+mp->size;
- }
- pmap_remove_avail(rm_start, rm_end);
-
- /* whack physmem, since we ignore more than 256MB */
- physmem = btoc(MEMMAX);
-
- /* start over at top, make sure not to skip any */
- mp = pmap_avail;
- continue;
- }
- mp++;
- }
for (mp = pmap_avail; mp->size !=0; mp++)
pmap_cnt_avail += 1;
}
@@ -1406,15 +1350,11 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
msgbuf_addr = pmap_steal_avail(MSGBUFSIZE,4);
- for (mp = pmap_avail; mp->size; mp++) {
- bzero((void *)mp->start, mp->size);
- }
-
#ifndef HTABENTS
#define HTABENTS 1024
#endif
pmap_ptab_cnt = HTABENTS;
- while ((HTABSIZE << 7) < ctob(physmem)) {
+ while (HTABSIZE < (ctob(physmem) >> 7)) {
pmap_ptab_cnt <<= 1;
}
/*
@@ -1924,7 +1864,7 @@ pmap_init()
NULL);
pool_setlowat(&pmap_pted_pool, 20);
- /* pmap_pvh and pmap_attr must be allocated 1-1 so that pmap_save_attr
+ /* pmap_pvh and pmap_attr must be allocated 1-1 so that pmap_attr_save
* is callable from pte_spill_r (with vm disabled)
*/
pvh = (struct pted_pv_head *)pmap_pvh;