summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2002-07-24 00:33:51 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2002-07-24 00:33:51 +0000
commit7e2bd6297d10d10c88eb737b0ef812b3100e865f (patch)
treefcc94f373cb90580d69267172ddd2a3864ee034f /sys
parentd34f87bfce1c1f67a548fd1fc2bb3ee8049c94fd (diff)
Pretty nasty hack to make non-exec mappings work.
Instead of using FOE for just emulating references, we also keep track of a pages executability and don't remove the FOE bit if the page is not executable. This is implmented with horrible hacks. Maybe when I have time, I'll reimplment the whole pmap to allow this without ugly hacks (read: probably not this decade). The stack on alpha is now non-exec.
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/alpha/alpha/pmap.c38
-rw-r--r--sys/arch/alpha/alpha/trap.c40
-rw-r--r--sys/arch/alpha/include/pmap.h5
3 files changed, 53 insertions, 30 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index 2beed186321..80444e3e27c 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.34 2002/06/25 21:33:19 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.35 2002/07/24 00:33:49 art Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -1637,6 +1637,8 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
PMAP_LOCK(pmap);
bits = pte_prot(pmap, prot);
+ if (!pmap_pte_exec(&bits))
+ bits |= PG_FOE;
isactive = PMAP_ISACTIVE(pmap, cpu_id);
l1pte = pmap_l1pte(pmap, sva);
@@ -1955,6 +1957,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
else if ((attrs & PGA_MODIFIED) == 0)
npte |= PG_FOW;
+ /* Always force FOE on non-exec mappings. */
+ if (!pmap_pte_exec(pte))
+ npte |= PG_FOE;
+
/*
* Mapping was entered on PV list.
*/
@@ -2805,6 +2811,9 @@ pmap_changebit(paddr_t pa, u_long set, u_long mask, long cpu_id)
if (pv->pv_pmap->pm_cpus != 0)
needisync = TRUE;
}
+ } else {
+ /* Never clear FOE on non-exec mappings. */
+ npte |= PG_FOE;
}
PMAP_SET_PTE(pte, npte);
if (needisync)
@@ -2825,9 +2834,12 @@ pmap_changebit(paddr_t pa, u_long set, u_long mask, long cpu_id)
* pmap_emulate_reference:
*
* Emulate reference and/or modified bit hits.
+ *
+ * return non-zero if this was a FOE fault and the pte is not
+ * executable.
*/
-void
-pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int write)
+int
+pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type)
{
pt_entry_t faultoff, *pte;
paddr_t pa;
@@ -2838,7 +2850,7 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int write)
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
printf("pmap_emulate_reference: %p, 0x%lx, %d, %d\n",
- p, v, user, write);
+ p, v, user, type);
#endif
/*
@@ -2865,6 +2877,11 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int write)
* We'll unlock below where we're done with the PTE.
*/
}
+ if (!pmap_pte_exec(pte) && type == ALPHA_MMCSR_FOE) {
+ if (didlock)
+ PMAP_UNLOCK(p->p_vmspace->vm_map.pmap);
+ return (1);
+ }
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
printf("\tpte = %p, ", pte);
@@ -2880,7 +2897,7 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int write)
* pmap_emulate_reference(), and the bits aren't guaranteed,
* for them...
*/
- if (write) {
+ if (type == ALPHA_MMCSR_FOW) {
if (!(*pte & (user ? PG_UWE : PG_UWE | PG_KWE)))
panic("pmap_emulate_reference: write but unwritable");
if (!(*pte & PG_FOW))
@@ -2909,7 +2926,7 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int write)
#endif
#ifdef DIAGNOSTIC
if (!PAGE_IS_MANAGED(pa))
- panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): pa 0x%lx not managed", p, v, user, write, pa);
+ panic("pmap_emulate_reference(%p, 0x%lx, %d, %d): pa 0x%lx not managed", p, v, user, type, pa);
#endif
/*
@@ -2925,17 +2942,24 @@ pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int write)
PMAP_HEAD_TO_MAP_LOCK();
simple_lock(&pvh->pvh_slock);
- if (write) {
+ if (type == ALPHA_MMCSR_FOW) {
pvh->pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
faultoff = PG_FOR | PG_FOW | PG_FOE;
} else {
pvh->pvh_attrs |= PGA_REFERENCED;
faultoff = PG_FOR | PG_FOE;
}
+ /*
+ * If the page is not PG_EXEC, pmap_changebit will automagically
+ * set PG_FOE (gross, but necessary if I don't want to change the
+ * whole API).
+ */
pmap_changebit(pa, 0, ~faultoff, cpu_id);
simple_unlock(&pvh->pvh_slock);
PMAP_HEAD_TO_MAP_UNLOCK();
+
+ return (0);
}
#ifdef DEBUG
diff --git a/sys/arch/alpha/alpha/trap.c b/sys/arch/alpha/alpha/trap.c
index fd01cc85b73..56c5a9b2f0e 100644
--- a/sys/arch/alpha/alpha/trap.c
+++ b/sys/arch/alpha/alpha/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.37 2002/06/28 16:50:38 art Exp $ */
+/* $OpenBSD: trap.c,v 1.38 2002/07/24 00:33:49 art Exp $ */
/* $NetBSD: trap.c,v 1.52 2000/05/24 16:48:33 thorpej Exp $ */
/*-
@@ -293,6 +293,7 @@ trap(a0, a1, a2, entry, framep)
caddr_t v;
int typ;
union sigval sv;
+ vm_prot_t ftype;
uvmexp.traps++;
p = curproc;
@@ -424,11 +425,12 @@ trap(a0, a1, a2, entry, framep)
switch (a1) {
case ALPHA_MMCSR_FOR:
case ALPHA_MMCSR_FOE:
- pmap_emulate_reference(p, a0, user, 0);
- goto out;
-
case ALPHA_MMCSR_FOW:
- pmap_emulate_reference(p, a0, user, 1);
+ if (pmap_emulate_reference(p, a0, user, a1)) {
+ /* XXX - stupid API right now. */
+ ftype = VM_PROT_EXECUTE|VM_PROT_READ;
+ goto do_fault;
+ }
goto out;
case ALPHA_MMCSR_INVALTRANS:
@@ -437,10 +439,22 @@ trap(a0, a1, a2, entry, framep)
vaddr_t va;
struct vmspace *vm = NULL;
struct vm_map *map;
- vm_prot_t ftype;
int rv;
extern struct vm_map *kernel_map;
+ switch (a2) {
+ case -1: /* instruction fetch fault */
+ ftype = VM_PROT_EXECUTE|VM_PROT_READ;
+ break;
+ case 0: /* load instruction */
+ ftype = VM_PROT_READ;
+ break;
+ case 1: /* store instruction */
+ ftype = VM_PROT_READ|VM_PROT_WRITE;
+ break;
+ }
+
+do_fault:
/*
* If it was caused by fuswintr or suswintr,
* just punt. Note that we check the faulting
@@ -475,20 +489,6 @@ trap(a0, a1, a2, entry, framep)
map = &vm->vm_map;
}
- switch (a2) {
- case -1: /* instruction fetch fault */
- case 0: /* load instruction */
- ftype = VM_PROT_READ;
- break;
- case 1: /* store instruction */
- ftype = VM_PROT_WRITE;
- break;
-#ifdef DIAGNOSTIC
- default: /* XXX gcc -Wuninitialized */
- goto dopanic;
-#endif
- }
-
va = trunc_page((vaddr_t)a0);
rv = uvm_fault(map, va, 0, ftype);
/*
diff --git a/sys/arch/alpha/include/pmap.h b/sys/arch/alpha/include/pmap.h
index 20ba20184c1..ce905446937 100644
--- a/sys/arch/alpha/include/pmap.h
+++ b/sys/arch/alpha/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.11 2001/12/05 00:11:51 millert Exp $ */
+/* $OpenBSD: pmap.h,v 1.12 2002/07/24 00:33:50 art Exp $ */
/* $NetBSD: pmap.h,v 1.37 2000/11/19 03:16:35 thorpej Exp $ */
/*-
@@ -210,8 +210,7 @@ paddr_t vtophys(vaddr_t);
/* Machine-specific functions. */
void pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids);
-void pmap_emulate_reference(struct proc *p, vaddr_t v,
- int user, int write);
+int pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type);
#ifdef _PMAP_MAY_USE_PROM_CONSOLE
int pmap_uses_prom_console(void);
#endif