summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorDale Rahn <drahn@cvs.openbsd.org>2002-07-24 02:19:29 +0000
committerDale Rahn <drahn@cvs.openbsd.org>2002-07-24 02:19:29 +0000
commitc7e7f672d73e4b0521e4932a18643e549b55316c (patch)
treeca0f87399b88b1ce0a18a98cf0a95c0ecdfe443e /sys/arch
parent71762e55310fdb5bd048869aad2685129ba33e81 (diff)
- change pte_spill_X() to take an extra parameter to determine if
the fault is a EXE fault or R/W fault. - mask/or the SR_NOEXEC bit into the segment register value when the number of executable pages becomes 0/non-zero. - create segments with SR_NOEXEC set, will be cleared when first exec mapping in the segment is created. - allow pte_spill_X() to deal with a new type of fault, page mapped but non executable, when execute was requested. Adds up to - non-exec stack support for powerpc.
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/macppc/macppc/locore.S4
-rw-r--r--sys/arch/powerpc/include/pmap.h5
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c130
-rw-r--r--sys/arch/powerpc/powerpc/trap.c8
4 files changed, 107 insertions, 40 deletions
diff --git a/sys/arch/macppc/macppc/locore.S b/sys/arch/macppc/macppc/locore.S
index 83e92875bae..50433e85933 100644
--- a/sys/arch/macppc/macppc/locore.S
+++ b/sys/arch/macppc/macppc/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.3 2002/03/21 03:02:32 drahn Exp $ */
+/* $OpenBSD: locore.S,v 1.4 2002/07/24 02:19:28 drahn Exp $ */
/* $NetBSD: locore.S,v 1.2 1996/10/16 19:33:09 ws Exp $ */
/*
@@ -981,6 +981,7 @@ s_dsitrap:
mfdar 3
mfsrr1 4
mfdsisr 5
+ li 6, 0
s_pte_spill:
bl _C_LABEL(pte_spill_r) /* try a spill */
cmpwi 0,3,0
@@ -1035,6 +1036,7 @@ s_isitrap:
mfsrr0 3
mfsrr1 4
li 5, 0
+ li 6, 1
b s_pte_spill /* above */
/*
diff --git a/sys/arch/powerpc/include/pmap.h b/sys/arch/powerpc/include/pmap.h
index 3e3066c8632..c13941fcc30 100644
--- a/sys/arch/powerpc/include/pmap.h
+++ b/sys/arch/powerpc/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.27 2002/07/15 17:01:26 drahn Exp $ */
+/* $OpenBSD: pmap.h,v 1.28 2002/07/24 02:19:28 drahn Exp $ */
/* $NetBSD: pmap.h,v 1.1 1996/09/30 16:34:29 ws Exp $ */
/*-
@@ -46,6 +46,7 @@ typedef u_int sr_t;
#define SR_TYPE 0x80000000
#define SR_SUKEY 0x40000000
#define SR_PRKEY 0x20000000
+#define SR_NOEXEC 0x10000000
#define SR_VSID 0x00ffffff
/*
* bit
@@ -127,7 +128,7 @@ void pmap_release(struct pmap *);
void pmap_real_memory(vm_offset_t *start, vm_size_t *size);
void switchexit(struct proc *);
-int pte_spill_v(struct pmap *pm, u_int32_t va, u_int32_t dsisr);
+int pte_spill_v(struct pmap *pm, u_int32_t va, u_int32_t dsisr, int exec_fault);
#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) ;
#endif /* _KERNEL */
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index be9e5d169d1..eaf45ef032e 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.72 2002/07/15 17:01:25 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.73 2002/07/24 02:19:28 drahn Exp $ */
/*
* Copyright (c) 2001, 2002 Dale Rahn. All rights reserved.
@@ -132,7 +132,8 @@ void pmap_remove_avail(paddr_t base, paddr_t end);
void *pmap_steal_avail(size_t size, int align);
/* asm interface */
-int pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t access_type);
+int pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t access_type,
+ int exec_fault);
u_int32_t pmap_setusr(pmap_t pm, vaddr_t va);
void pmap_popusr(u_int32_t oldsr);
@@ -497,8 +498,22 @@ pmap_enter(pm, va, pa, prot, flags)
*/
pte_insert(pted);
- if (prot & VM_PROT_EXECUTE)
- pm->pm_exec[va >> ADDR_SR_SHIFT]++;
+ if (prot & VM_PROT_EXECUTE) {
+ u_int sn = VP_SR(va);
+
+ pm->pm_exec[sn]++;
+ if (pm->pm_sr[sn] & SR_NOEXEC) {
+ pm->pm_sr[sn] &= ~SR_NOEXEC;
+
+ /* set the current sr if not kernel used segemnts
+ * and this pmap is current active pmap
+ */
+ if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
+ asm volatile ("mtsrin %0,%1"
+ :: "r"(pm->pm_sr[sn]),
+ "r"(sn << ADDR_SR_SHIFT) );
+ }
+ }
splx(s);
@@ -603,8 +618,21 @@ pmap_remove_pg(pmap_t pm, vaddr_t va)
pmap_hash_remove(pted);
if (pted->pted_va & PTED_VA_EXEC_M) {
- pm->pm_exec[pted->pted_va >> ADDR_SR_SHIFT]--;
+ u_int sn = VP_SR(va);
+
pted->pted_va &= ~PTED_VA_EXEC_M;
+ pm->pm_exec[sn]--;
+ if (pm->pm_exec[sn] == 0) {
+ pm->pm_sr[sn] |= SR_NOEXEC;
+
+ /* set the current sr if not kernel used segemnts
+ * and this pmap is current active pmap
+ */
+ if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
+ asm volatile ("mtsrin %0,%1"
+ :: "r"(pm->pm_sr[sn]),
+ "r"(sn << ADDR_SR_SHIFT) );
+ }
}
pted->pted_pte.pte_hi &= ~PTE_VALID;
@@ -675,8 +703,22 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
pte_insert(pted);
pted->pted_va |= PTED_VA_WIRED_M;
- if (prot & VM_PROT_EXECUTE)
- pm->pm_exec[va >> ADDR_SR_SHIFT]++;
+ if (prot & VM_PROT_EXECUTE) {
+ u_int sn = VP_SR(va);
+
+ pm->pm_exec[sn]++;
+ if (pm->pm_sr[sn] & SR_NOEXEC) {
+ pm->pm_sr[sn] &= ~SR_NOEXEC;
+
+ /* set the current sr if not kernel used segemnts
+ * and this pmap is current active pmap
+ */
+ if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
+ asm volatile ("mtsrin %0,%1"
+ :: "r"(pm->pm_sr[sn]),
+ "r"(sn << ADDR_SR_SHIFT) );
+ }
+ }
splx(s);
@@ -725,8 +767,21 @@ pmap_kremove_pg(vaddr_t va)
pmap_hash_remove(pted);
if (pted->pted_va & PTED_VA_EXEC_M) {
- pm->pm_exec[pted->pted_va >> ADDR_SR_SHIFT]--;
+ u_int sn = VP_SR(va);
+
pted->pted_va &= ~PTED_VA_EXEC_M;
+ pm->pm_exec[sn]--;
+ if (pm->pm_exec[sn] == 0) {
+ pm->pm_sr[sn] |= SR_NOEXEC;
+
+ /* set the current sr if not kernel used segemnts
+ * and this pmap is current active pmap
+ */
+ if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
+ asm volatile ("mtsrin %0,%1"
+ :: "r"(pm->pm_sr[sn]),
+ "r"(sn << ADDR_SR_SHIFT) );
+ }
}
if (PTED_MANAGED(pted))
@@ -1010,7 +1065,7 @@ again:
seg = try << 4;
for (k = 0; k < 16; k++) {
- pm->pm_sr[k] = seg + k;
+ pm->pm_sr[k] = (seg + k) | SR_NOEXEC;
}
return;
}
@@ -1081,7 +1136,7 @@ pmap_release(pmap_t pm)
int s;
pmap_vp_destroy(pm);
- i = pm->pm_sr[0] >> 4;
+ i = (pm->pm_sr[0] & SR_VSID) >> 4;
tblidx = i / (8 * sizeof usedsr[0]);
tbloff = i % (8 * sizeof usedsr[0]);
@@ -1375,7 +1430,7 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
|= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
#endif
for (i = 0; i < 16; i++) {
- pmap_kernel()->pm_sr[i] = KERNEL_SEG0 + i;
+ pmap_kernel()->pm_sr[i] = (KERNEL_SEG0 + i) | SR_NOEXEC;
asm volatile ("mtsrin %0,%1"
:: "r"( KERNEL_SEG0 + i), "r"(i << ADDR_SR_SHIFT) );
}
@@ -1867,17 +1922,15 @@ pmap_init()
* "user" accesses.
*/
int
-pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr)
+pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr, int exec_fault)
{
pmap_t pm;
struct pte_desc *pted;
- int retcode = 0;
/*
* This function only handles kernel faults, not supervisor copyins.
*/
if (!(msr & PSL_PR)) {
- /* PSL_PR is clear for supervisor, right? - XXXXX */
/* lookup is done physical to prevent faults */
if (VP_SR(va) == USER_SR) {
return 0;
@@ -1889,26 +1942,32 @@ pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr)
}
pted = pmap_vp_lookup(pm, va);
- if (pted != NULL) {
- /* if the current mapping is RO and the access was a write
- * we return 0
- */
- if (!PTED_VALID(pted) ||
- ((dsisr & (1 << (31-6)))
- && (pted->pted_pte.pte_lo & 0x1))) {
- /* write fault and we have a readonly mapping */
- retcode = 0;
- } else {
- retcode = 1;
- pte_insert(pted);
- }
+ if (pted == NULL) {
+ return 0;
+ }
+
+ /* if the current mapping is RO and the access was a write
+ * we return 0
+ */
+ if (!PTED_VALID(pted)) {
+ return 0;
+ }
+ if ((dsisr & (1 << (31-6))) && (pted->pted_pte.pte_lo & 0x1)) {
+ /* write fault and we have a readonly mapping */
+ return 0;
}
+ if ((exec_fault != 0)
+ && ((pted->pted_va & PTED_VA_EXEC_M) == 0)) {
+ /* attempted to execute non-executeable page */
+ return 0;
+ }
+ pte_insert(pted);
- return retcode;
+ return 1;
}
int
-pte_spill_v(pmap_t pm, u_int32_t va, u_int32_t dsisr)
+pte_spill_v(pmap_t pm, u_int32_t va, u_int32_t dsisr, int exec_fault)
{
struct pte_desc *pted;
@@ -1921,11 +1980,16 @@ pte_spill_v(pmap_t pm, u_int32_t va, u_int32_t dsisr)
* if the current mapping is RO and the access was a write
* we return 0
*/
- if (!PTED_VALID(pted) ||
- ((dsisr & (1 << (31-6))) && (pted->pted_pte.pte_lo & 0x1))) {
+ if (!PTED_VALID(pted)) {
+ return 0;
+ }
+ if ((dsisr & (1 << (31-6))) && (pted->pted_pte.pte_lo & 0x1)) {
/* write fault and we have a readonly mapping */
- if (PTED_VALID(pted))
- pmap_hash_remove(pted);
+ return 0;
+ }
+ if ((exec_fault != 0)
+ && ((pted->pted_va & PTED_VA_EXEC_M) == 0)) {
+ /* attempted to execute non-executeable page */
return 0;
}
pte_insert(pted);
diff --git a/sys/arch/powerpc/powerpc/trap.c b/sys/arch/powerpc/powerpc/trap.c
index d4e42379428..f9857554aa3 100644
--- a/sys/arch/powerpc/powerpc/trap.c
+++ b/sys/arch/powerpc/powerpc/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.51 2002/06/07 21:57:57 drahn Exp $ */
+/* $OpenBSD: trap.c,v 1.52 2002/07/24 02:19:28 drahn Exp $ */
/* $NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $ */
/*
@@ -300,7 +300,7 @@ trap(frame)
va &= ADDR_PIDX | ADDR_POFF;
va |= user_sr << ADDR_SR_SHIFT;
map = &p->p_vmspace->vm_map;
- if (pte_spill_v(map->pmap, va, frame->dsisr)) {
+ if (pte_spill_v(map->pmap, va, frame->dsisr, 0)) {
return;
}
}
@@ -329,7 +329,7 @@ printf("kern dsi on addr %x iar %x\n", frame->dar, frame->srr0);
int ftype, vftype;
if (pte_spill_v(p->p_vmspace->vm_map.pmap,
- frame->dar, frame->dsisr))
+ frame->dar, frame->dsisr, 0))
{
/* fault handled by spill handler */
break;
@@ -359,7 +359,7 @@ printf("dsi on addr %x iar %x lr %x\n", frame->dar, frame->srr0,frame->lr);
int ftype;
if (pte_spill_v(p->p_vmspace->vm_map.pmap,
- frame->srr0, 0))
+ frame->srr0, 0, 1))
{
/* fault handled by spill handler */
break;