summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/macppc/macppc/locore.S126
-rw-r--r--sys/arch/macppc/macppc/machdep.c19
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c96
3 files changed, 121 insertions, 120 deletions
diff --git a/sys/arch/macppc/macppc/locore.S b/sys/arch/macppc/macppc/locore.S
index 9cfaab04552..fe074cdc3fe 100644
--- a/sys/arch/macppc/macppc/locore.S
+++ b/sys/arch/macppc/macppc/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.24 2005/10/03 04:40:09 deraadt Exp $ */
+/* $OpenBSD: locore.S,v 1.25 2005/10/03 04:47:30 drahn Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
@@ -289,17 +289,6 @@ switch_exited:
addic. %r5,%r5,64
li %r6,0
- mfsr %r8,PPC_KERNEL_SR /* save kernel SR */
-nop64_1s:
-1:
- addis %r6,%r6,-0x10000000@ha /* set new procs segment registers */
- or. %r6,%r6,%r6 /* This is done from the real address pmap */
- lwzu %r7,-4(%r5) /* so we don't have to worry */
- mtsrin %r7,%r6 /* about accessibility */
- bne 1b
-nop64_1e:
- mtsr PPC_KERNEL_SR,%r8 /* restore kernel SR */
- isync
lwz %r1,PCB_SP(%r4) /* get new procs SP */
@@ -423,7 +412,7 @@ nop32_3e:
mfxer %r30 /* save XER */
mtsprg 2,%r30 /* in SPRG2 */
mfsrr1 %r31 /* test kernel mode */
-#if 0
+nop64_1s:
mtcr %r31
bc 12,17,1f /* branch if PSL_PR is set */
mfdar %r31 /* get fault address */
@@ -448,9 +437,9 @@ nop32_3e:
mtxer %r30
mtcr %r29 /* restore CR */
lmw %r28,disisave(0) /* restore r28-r31 */
-rfi1: rfi /* return to trapped code */
+ rfi /* return to trapped code */
1:
-#endif
+nop64_1e:
mflr %r28 /* save LR */
bla s_dsitrap
_C_LABEL(dsisize) = .-_C_LABEL(dsitrap)
@@ -473,7 +462,7 @@ nop32_4e:
mflr %r28 /* save LR */
mfcr %r29 /* save CR */
mfsrr1 %r31 /* test kernel mode */
-#if 0
+nop64_2s:
mtcr %r31
bc 12,17,1f /* branch if PSL_PR is set */
mfsrr0 %r31 /* get fault address */
@@ -487,9 +476,9 @@ nop32_4e:
mtibatl 3,%r30
mtcr %r29 /* restore CR */
lmw %r28,disisave(0) /* restore r28-r31 */
-rfi2: rfi /* return to trapped code */
+ rfi /* return to trapped code */
1:
-#endif
+nop64_2e:
bla s_isitrap
_C_LABEL(isisize) = .-_C_LABEL(isitrap)
@@ -793,6 +782,25 @@ _C_LABEL(ddbsize) = .-_C_LABEL(ddblow)
mfsrr0 %r30; \
mfsrr1 %r31; \
stmw %r30,savearea+24(0); \
+ /* load all kernel segment registers. */ \
+ lis %r31,_C_LABEL(kernel_pmap_)@ha; \
+ addi %r31,%r31,_C_LABEL(kernel_pmap_)@l; \
+ lwz %r30,0(%r31); mtsr 0,%r30; \
+ lwz %r30,4(%r31); mtsr 1,%r30; \
+ lwz %r30,8(%r31); mtsr 2,%r30; \
+ lwz %r30,12(%r31); mtsr 3,%r30; \
+ lwz %r30,16(%r31); mtsr 4,%r30; \
+ lwz %r30,20(%r31); mtsr 5,%r30; \
+ lwz %r30,24(%r31); mtsr 6,%r30; \
+ lwz %r30,28(%r31); mtsr 7,%r30; \
+ lwz %r30,32(%r31); mtsr 8,%r30; \
+ lwz %r30,36(%r31); mtsr 9,%r30; \
+ lwz %r30,40(%r31); mtsr 10,%r30; \
+ lwz %r30,44(%r31); mtsr 11,%r30; \
+ lwz %r30,48(%r31); mtsr 12,%r30; \
+/* lwz %r30,52(%r31); mtsr 13,%r30; - dont load user SR - XXX? */ \
+ lwz %r30,56(%r31); mtsr 14,%r30; \
+ lwz %r30,60(%r31); mtsr 15,%r30; \
mfmsr %r30; \
ori %r30,%r30,(PSL_DR|PSL_IR); \
mtmsr %r30; \
@@ -850,11 +858,23 @@ _C_LABEL(ddbsize) = .-_C_LABEL(ddblow)
bc 4,17,1f; /* branch if PSL_PR is false */ \
/* Restore user & kernel access SR: */ \
lis %r2,_C_LABEL(curpm)@ha; /* get real address of pmap */ \
- lwz %r2,_C_LABEL(curpm)@l(%r2); \
- lwz %r3,PM_USRSR(%r2); \
- mtsr PPC_USER_SR,%r3; \
- lwz %r3,PM_KERNELSR(%r2); \
- mtsr PPC_KERNEL_SR,%r3; \
+ lwz %r2,_C_LABEL(curpm)@l(2); \
+ lwz %r3,0(%r2); mtsr 0,%r3; \
+ lwz %r3,4(%r2); mtsr 1,%r3; \
+ lwz %r3,8(%r2); mtsr 2,%r3; \
+ lwz %r3,12(%r2); mtsr 3,%r3; \
+ lwz %r3,16(%r2); mtsr 4,%r3; \
+ lwz %r3,20(%r2); mtsr 5,%r3; \
+ lwz %r3,24(%r2); mtsr 6,%r3; \
+ lwz %r3,28(%r2); mtsr 7,%r3; \
+ lwz %r3,32(%r2); mtsr 8,%r3; \
+ lwz %r3,36(%r2); mtsr 9,%r3; \
+ lwz %r3,40(%r2); mtsr 10,%r3; \
+ lwz %r3,44(%r2); mtsr 11,%r3; \
+ lwz %r3,48(%r2); mtsr 12,%r3; \
+ lwz %r3,52(%r2); mtsr 13,%r3; \
+ lwz %r3,56(%r2); mtsr 14,%r3; \
+ lwz %r3,60(%r2); mtsr 15,%r3; \
1: mfsprg %r2,1; /* restore cr */ \
mtcr %r2; \
lwz %r2,savearea(0); \
@@ -889,10 +909,6 @@ realtrap:
* Now the common trap catching code.
*/
s_trap:
-/* First have to enable KERNEL mapping */
- lis %r31,PPC_KERNEL_SEGMENT@ha
- addi %r31,%r31,PPC_KERNEL_SEGMENT@l
- mtsr PPC_KERNEL_SR,%r31
FRAME_SETUP(tempsave)
/* Now we can recover interrupts again: */
mfmsr %r7
@@ -924,7 +940,7 @@ trapexit:
b trapagain
1:
FRAME_LEAVE(tempsave)
-rfi3: rfi
+rfi1: rfi
/*
* Child comes here at the end of a fork.
@@ -993,7 +1009,7 @@ s_pte_spill:
mtcr %r29 /* restore CR */
mtlr %r28 /* restore LR */
lmw %r28,disisave(0) /* restore r28-r31 */
-rfi4: rfi /* return to trapped code */
+rfi2: rfi /* return to trapped code */
/*
* ISI second stage fault handler
@@ -1059,10 +1075,26 @@ s_isitrap:
stw %r5,20(%r1); \
stw %r4,12(%r1); \
stw %r3,8(%r1); \
+/* load all kernel segment registers. */ \
+ lis 3,_C_LABEL(kernel_pmap_)@ha; \
+ addi 3,3,_C_LABEL(kernel_pmap_)@l; \
+ lwz %r5,0(%r3); mtsr 0,%r5; \
+ lwz %r5,4(%r3); mtsr 1,%r5; \
+ lwz %r5,8(%r3); mtsr 2,%r5; \
+ lwz %r5,12(%r3); mtsr 3,%r5; \
+ lwz %r5,16(%r3); mtsr 4,%r5; \
+ lwz %r5,20(%r3); mtsr 5,%r5; \
+ lwz %r5,24(%r3); mtsr 6,%r5; \
+ lwz %r5,28(%r3); mtsr 7,%r5; \
+ lwz %r5,32(%r3); mtsr 8,%r5; \
+ lwz %r5,36(%r3); mtsr 9,%r5; \
+ lwz %r5,40(%r3); mtsr 10,%r5; \
+ lwz %r5,44(%r3); mtsr 11,%r5; \
+ lwz %r5,48(%r3); mtsr 12,%r5; \
+/* lwz %r5,52(%r3); mtsr 13,%r5; - dont load user SR - XXX? */ \
+ lwz %r5,56(%r3); mtsr 14,%r5; \
+ lwz %r5,60(%r3); mtsr 15,%r5; \
/* interrupts are recoverable here, and enable translation */ \
- lis %r3,(PPC_KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@ha; \
- addi %r3,%r3,(PPC_KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY)@l; \
- mtsr PPC_KERNEL_SR,%r3; \
mfmsr %r5; \
ori %r5,%r5,(PSL_IR|PSL_DR|PSL_RI); \
mtmsr %r5; \
@@ -1100,8 +1132,23 @@ intr_exit:
bc 4,17,1f /* branch if PSL_PR is false */
lis %r3,_C_LABEL(curpm)@ha /* get current pmap real address */
lwz %r3,_C_LABEL(curpm)@l(%r3)
- lwz %r3,PM_KERNELSR(%r3)
- mtsr PPC_KERNEL_SR,%r3 /* Restore kernel SR */
+ /* reload all segment registers. */
+ lwz %r4,0(3); mtsr 0,%r4;
+ lwz %r4,4(3); mtsr 1,%r4;
+ lwz %r4,8(3); mtsr 2,%r4;
+ lwz %r4,12(3); mtsr 3,%r4;
+ lwz %r4,16(3); mtsr 4,%r4;
+ lwz %r4,20(3); mtsr 5,%r4;
+ lwz %r4,24(3); mtsr 6,%r4;
+ lwz %r4,28(3); mtsr 7,%r4;
+ lwz %r4,32(3); mtsr 8,%r4;
+ lwz %r4,36(3); mtsr 9,%r4;
+ lwz %r4,40(3); mtsr 10,%r4;
+ lwz %r4,44(3); mtsr 11,%r4;
+ lwz %r4,48(3); mtsr 12,%r4;
+ lwz %r4,52(3); mtsr 13,%r4;
+ lwz %r4,56(3); mtsr 14,%r4;
+ lwz %r4,60(3); mtsr 15,%r4;
lis %r3,_C_LABEL(astpending)@ha /* Test AST pending */
lwz %r4,_C_LABEL(astpending)@l(%r3)
andi. %r4,%r4,1
@@ -1140,7 +1187,7 @@ intr_exit:
lwz %r3,80(%r1)
lwz %r0,84(%r1)
lwz %r1,0(%r1)
-rfi5: rfi
+rfi3: rfi
/*
* Decrementer interrupt second level handler
@@ -1206,7 +1253,6 @@ _C_LABEL(sigcode):
mffs %f0
stfd %f0,112(%r6)
lfd %f0,0(%r6) /* restore the clobbered register */
-
blrl
addi %r6,%r1,8
lfd %f0,112(%r6)
@@ -1275,7 +1321,7 @@ ddbtrap:
b realtrap
ddbleave:
FRAME_LEAVE(ddbsave)
-rfi6: rfi
+rfi4: rfi
#endif /* DDB */
.globl _C_LABEL(rfi_inst)
@@ -1290,21 +1336,17 @@ _C_LABEL(rfid_inst):
.globl _C_LABEL(rfi_start)
_C_LABEL(rfi_start):
-#if 0
- /* rfi1 and rfi2 are currently inside if 0ed code */
.long rfi1, rfi1 + 4
.long rfi2, rfi2 + 4
-#endif
.long rfi3, rfi3 + 4
.long rfi4, rfi4 + 4
- .long rfi5, rfi5 + 4
- .long rfi6, rfi6 + 4
.long 0, 0
.globl _C_LABEL(nop64_start)
_C_LABEL(nop64_start):
.long nop64_1s, nop64_1e
+ .long nop64_2s, nop64_2e
.long 0, 0
.globl _C_LABEL(nop32_start)
diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c
index 800433f56ac..09ccd65f14d 100644
--- a/sys/arch/macppc/macppc/machdep.c
+++ b/sys/arch/macppc/macppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.73 2005/10/03 02:16:10 drahn Exp $ */
+/* $OpenBSD: machdep.c,v 1.74 2005/10/03 04:47:30 drahn Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -310,20 +310,6 @@ initppc(startkernel, endkernel, args)
*/
pmap_bootstrap(startkernel, endkernel);
- /* use BATs to map 1GB memory, no pageable BATs now */
- if (physmem > btoc(0x10000000)) {
- ppc_mtdbat1l(BATL(0x10000000, BAT_M));
- ppc_mtdbat1u(BATU(0x10000000));
- }
- if (physmem > btoc(0x20000000)) {
- ppc_mtdbat2l(BATL(0x20000000, BAT_M));
- ppc_mtdbat2u(BATU(0x20000000));
- }
- if (physmem > btoc(0x30000000)) {
- ppc_mtdbat3l(BATL(0x30000000, BAT_M));
- ppc_mtdbat3u(BATU(0x30000000));
- }
-#if 0
/* now that we know physmem size, map physical memory with BATs */
if (physmem > btoc(0x10000000)) {
battable[0x1].batl = BATL(0x10000000, BAT_M);
@@ -353,7 +339,6 @@ initppc(startkernel, endkernel, args)
battable[0x7].batl = BATL(0x70000000, BAT_M);
battable[0x7].batu = BATU(0x70000000);
}
-#endif
/*
* Now enable translation (and machine checks/recoverable interrupts).
@@ -410,7 +395,7 @@ initppc(startkernel, endkernel, args)
}
bootpath= &bootpathbuf[0];
-#ifdef DDB
+#ifdef notyet_DDB
ddb_init();
#endif
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index b028d1794ab..7063455d192 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.90 2005/10/03 02:18:50 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.91 2005/10/03 04:47:30 drahn Exp $ */
/*
* Copyright (c) 2001, 2002 Dale Rahn.
@@ -155,6 +155,7 @@ struct pool pmap_pted_pool;
int pmap_initialized = 0;
int physmem;
+int physmaxaddr;
#define ATTRSHIFT 4
@@ -538,17 +539,9 @@ pmap_enter(pm, va, pa, prot, flags)
u_int sn = VP_SR(va);
pm->pm_exec[sn]++;
- if (pm->pm_sr[sn] & SR_NOEXEC) {
+ if (pm->pm_sr[sn] & SR_NOEXEC)
pm->pm_sr[sn] &= ~SR_NOEXEC;
- /* set the current sr if not kernel used segments
- * and this pmap is the currently active pmap
- */
- if (sn != PPC_USER_SR && sn != PPC_KERNEL_SR &&
- curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
if (pattr != NULL)
*pattr |= (PTE_EXE_32 >> ATTRSHIFT);
} else {
@@ -667,17 +660,8 @@ pmap_remove_pg(pmap_t pm, vaddr_t va)
pted->pted_va &= ~PTED_VA_EXEC_M;
pm->pm_exec[sn]--;
- if (pm->pm_exec[sn] == 0) {
+ if (pm->pm_exec[sn] == 0)
pm->pm_sr[sn] |= SR_NOEXEC;
-
- /* set the current sr if not kernel used segments
- * and this pmap is the currently active pmap
- */
- if (sn != PPC_USER_SR && sn != PPC_KERNEL_SR &&
- curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
}
if (ppc_proc_is_64b)
@@ -759,17 +743,8 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
u_int sn = VP_SR(va);
pm->pm_exec[sn]++;
- if (pm->pm_sr[sn] & SR_NOEXEC) {
+ if (pm->pm_sr[sn] & SR_NOEXEC)
pm->pm_sr[sn] &= ~SR_NOEXEC;
-
- /* set the current sr if not kernel used segments
- * and this pmap is the currently active pmap
- */
- if (sn != PPC_USER_SR && sn != PPC_KERNEL_SR &&
- curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
}
splx(s);
@@ -823,17 +798,8 @@ pmap_kremove_pg(vaddr_t va)
pted->pted_va &= ~PTED_VA_EXEC_M;
pm->pm_exec[sn]--;
- if (pm->pm_exec[sn] == 0) {
+ if (pm->pm_exec[sn] == 0)
pm->pm_sr[sn] |= SR_NOEXEC;
-
- /* set the current sr if not kernel used segments
- * and this pmap is the currently active pmap
- */
- if (sn != PPC_USER_SR && sn != PPC_KERNEL_SR &&
- curpm == pm)
- ppc_mtsrin(pm->pm_sr[sn],
- sn << ADDR_SR_SHIFT);
- }
}
if (PTED_MANAGED(pted))
@@ -1338,30 +1304,38 @@ pmap_avail_setup(void)
for (mp = pmap_mem; mp->size !=0; mp++)
physmem += btoc(mp->size);
- /* limit to 1GB available, for now -XXXGRR */
-#define MEMMAX 0x40000000
- for (mp = pmap_avail; mp->size !=0 ; /* increment in loop */) {
- if (mp->start + mp->size > MEMMAX) {
- int rm_start;
- int rm_end;
- if (mp->start > MEMMAX) {
- rm_start = mp->start;
- rm_end = mp->start+mp->size;
- } else {
- rm_start = MEMMAX;
- rm_end = mp->start+mp->size;
+ if (ppc_proc_is_64b) {
+ /* limit to 256MB available, for now -XXXGRR */
+#define MEMMAX 0x10000000
+ for (mp = pmap_avail; mp->size !=0 ; /* increment in loop */) {
+ if (mp->start + mp->size > MEMMAX) {
+ int rm_start;
+ int rm_end;
+ if (mp->start > MEMMAX) {
+ rm_start = mp->start;
+ rm_end = mp->start+mp->size;
+ } else {
+ rm_start = MEMMAX;
+ rm_end = mp->start+mp->size;
+ }
+ pmap_remove_avail(rm_start, rm_end);
+
+ /* whack physmem, since we ignore more than
+ * 256MB
+ */
+ physmem = btoc(MEMMAX);
+
+ /*
+ * start over at top, make sure not to
+ * skip any
+ */
+ mp = pmap_avail;
+ continue;
}
- pmap_remove_avail(rm_start, rm_end);
-
- /* whack physmem, since we ignore more than 256MB */
- physmem = btoc(MEMMAX);
-
- /* start over at top, make sure not to skip any */
- mp = pmap_avail;
- continue;
+ mp++;
}
- mp++;
}
+
for (mp = pmap_avail; mp->size !=0; mp++)
pmap_cnt_avail += 1;
}