diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2003-05-13 03:49:05 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2003-05-13 03:49:05 +0000 |
commit | d62b0a287052a9d4eeab55ad2d56f5e316e662d4 (patch) | |
tree | cb51e3103595e6b266ec16d29af87cd7253d614d /sys/arch/i386 | |
parent | 371cd3ebede8749d196eb400e88ac4b26c4b723c (diff) |
The current solution to handle the protection fault trap is not
correct. It breaks down if we're trying to jump through a function
pointer. The protection fault trap on i386 must be one of the most
braindead traps ever invented in the history of humankind. It doesn't
give you any information about what went wrong except the instruction
that faulted. Since the problem we're trying to deal with is a
segmentation problem, we don't get the desitination that we want to
jump to, we just get the instruction and we won't add a disassembler
to trap handling just to try to figure out what went wrong.
What we want to do is to handle this as a normal fault to let noexec
accounting in pmap_enter deal with the changes to the code
segment. Unfortunately that's impossible. We don't know the faulting
address, so we need to change how the exec accounting works. Basically
the code segment must already cover the address we want to execute
before we can fault it in.
New scheme:
o Start with conservative code segment.
o If we get a protection fault, go through all mappings in the process
and find the highest executable mapping, fix up the code segment and
record that address. If the code segment didn't change, the protection
fault wasn't fixable - just die.
o If the highest executable mapping is removed, just reset the code
segment to something conservative and let the next protection fault
deal with it. We can't read all the vm mappings of the process from
the pmap because of locking hell.
This should allow floating code segment whenever someone implements that.
Also, fix the pmap_protect function to behave more like the other
pmaps we have and be slightly more agressive to force more proper
protection changes.
ok:ed by various people.
Diffstat (limited to 'sys/arch/i386')
-rw-r--r-- | sys/arch/i386/i386/freebsd_machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/i386/i386/linux_machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/i386/i386/machdep.c | 6 | ||||
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 85 | ||||
-rw-r--r-- | sys/arch/i386/i386/svr4_machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/i386/i386/trap.c | 17 | ||||
-rw-r--r-- | sys/arch/i386/include/pmap.h | 21 |
7 files changed, 91 insertions, 50 deletions
diff --git a/sys/arch/i386/i386/freebsd_machdep.c b/sys/arch/i386/i386/freebsd_machdep.c index 58f605738a8..0923b38ee98 100644 --- a/sys/arch/i386/i386/freebsd_machdep.c +++ b/sys/arch/i386/i386/freebsd_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: freebsd_machdep.c,v 1.15 2003/05/04 05:01:04 drahn Exp $ */ +/* $OpenBSD: freebsd_machdep.c,v 1.16 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: freebsd_machdep.c,v 1.10 1996/05/03 19:42:05 christos Exp $ */ /*- @@ -161,7 +161,7 @@ freebsd_sendsig(catcher, sig, mask, code, type, val) tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = p->p_sigcode; - tf->tf_cs = pmap->pm_nxpages > 0? + tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? GSEL(GUCODE1_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL); tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); tf->tf_esp = (int)fp; diff --git a/sys/arch/i386/i386/linux_machdep.c b/sys/arch/i386/i386/linux_machdep.c index 366b51bb92a..865767cec23 100644 --- a/sys/arch/i386/i386/linux_machdep.c +++ b/sys/arch/i386/i386/linux_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: linux_machdep.c,v 1.27 2003/05/04 05:01:04 drahn Exp $ */ +/* $OpenBSD: linux_machdep.c,v 1.28 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: linux_machdep.c,v 1.29 1996/05/03 19:42:11 christos Exp $ */ /* @@ -185,7 +185,7 @@ linux_sendsig(catcher, sig, mask, code, type, val) tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = p->p_sigcode; - tf->tf_cs = pmap->pm_nxpages > 0? + tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? GSEL(GUCODE1_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL); tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); tf->tf_esp = (int)fp; diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c index 02b1d8453f1..58807149298 100644 --- a/sys/arch/i386/i386/machdep.c +++ b/sys/arch/i386/i386/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.228 2003/05/05 17:54:59 drahn Exp $ */ +/* $OpenBSD: machdep.c,v 1.229 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */ /*- @@ -1674,7 +1674,7 @@ sendsig(catcher, sig, mask, code, type, val) tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = p->p_sigcode; - tf->tf_cs = pmap->pm_nxpages > 0? + tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? GSEL(GUCODE1_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL); tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); tf->tf_esp = (int)fp; @@ -2078,7 +2078,7 @@ setregs(p, pack, stack, retval) tf->tf_ebp = 0; tf->tf_ebx = (int)PS_STRINGS; tf->tf_eip = pack->ep_entry; - tf->tf_cs = pmap->pm_nxpages > 0? + tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? LSEL(LUCODE1_SEL, SEL_UPL) : LSEL(LUCODE_SEL, SEL_UPL); tf->tf_eflags = PSL_USERSET; tf->tf_esp = stack; diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index ad1e141b4eb..a6f02c0b7b0 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.71 2003/05/09 23:51:23 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.72 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */ /* @@ -572,31 +572,67 @@ pmap_unmap_ptes(pmap) } __inline static void -pmap_nxstack_account(struct pmap *pmap, vaddr_t va, +pmap_exec_account(struct pmap *pm, vaddr_t va, pt_entry_t opte, pt_entry_t npte) { - if (((opte ^ npte) & PG_X) && - va < VM_MAXUSER_ADDRESS && va >= I386_MAX_EXE_ADDR) { + if (curproc == NULL || curproc->p_vmspace == NULL || + pm != vm_map_pmap(&curproc->p_vmspace->vm_map)) + return; + + if ((opte ^ npte) & PG_X) + pmap_update_pg(va); + + /* + * Executability was removed on the last executable change. + * Reset the code segment to something conservative and + * let the trap handler deal with setting the right limit. + * We can't do that because of locking constraints on the vm map. + * + * XXX - floating cs - set this _really_ low. + */ + if ((opte & PG_X) && (npte & PG_X) == 0 && va == pm->pm_hiexec) { struct trapframe *tf = curproc->p_md.md_regs; - struct vm_map *map = &curproc->p_vmspace->vm_map; struct pcb *pcb = &curproc->p_addr->u_pcb; - if (npte & PG_X && !(opte & PG_X)) { - if (++pmap->pm_nxpages == 1 && - pmap == vm_map_pmap(map)) { - pcb->pcb_cs = tf->tf_cs = - GSEL(GUCODE1_SEL, SEL_UPL); - pmap_update_pg(va); - } - } else { - if (!--pmap->pm_nxpages && - pmap == vm_map_pmap(map)) { - pcb->pcb_cs = tf->tf_cs = - GSEL(GUCODE_SEL, SEL_UPL); - pmap_update_pg(va); - } - } + pcb->pcb_cs = tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); + pm->pm_hiexec = I386_MAX_EXE_ADDR; + } +} + +/* + * Fixup the code segment to cover all potential executable mappings. + * returns 0 if no changes to the code segment were made. + */ +int +pmap_exec_fixup(struct vm_map *map, struct trapframe *tf, struct pcb *pcb) +{ + struct vm_map_entry *ent; + struct pmap *pm = vm_map_pmap(map); + vaddr_t va = 0; + + vm_map_lock(map); + for (ent = (&map->header)->next; ent != &map->header; ent = ent->next) { + /* + * This entry has greater va than the entries before. + * We need to make it point to the last page, not past it. + */ + if (ent->protection & VM_PROT_EXECUTE) + va = trunc_page(ent->end) - PAGE_SIZE; + } + vm_map_unlock(map); + + if (va == pm->pm_hiexec) + return (0); + + pm->pm_hiexec = va; + + if (pm->pm_hiexec > (vaddr_t)I386_MAX_EXE_ADDR) { + pcb->pcb_cs = tf->tf_cs = GSEL(GUCODE1_SEL, SEL_UPL); + } else { + pcb->pcb_cs = tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); } + + return (1); } /* @@ -1728,7 +1764,7 @@ pmap_pinit(pmap) pmap->pm_stats.wired_count = 0; pmap->pm_stats.resident_count = 1; /* count the PDP allocd below */ pmap->pm_ptphint = NULL; - pmap->pm_nxpages = 0; + pmap->pm_hiexec = 0; pmap->pm_flags = 0; /* allocate PDP */ @@ -2243,7 +2279,7 @@ pmap_remove_pte(pmap, ptp, pte, va) opte = *pte; /* save the old PTE */ *pte = 0; /* zap! */ - pmap_nxstack_account(pmap, va, opte, 0); + pmap_exec_account(pmap, va, opte, 0); if (opte & PG_W) pmap->pm_stats.wired_count--; @@ -2828,8 +2864,7 @@ pmap_write_protect(pmap, sva, eva, prot) npte = (*spte & ~PG_PROT) | md_prot; if (npte != *spte) { - /* account for executable pages on the stack */ - pmap_nxstack_account(pmap, sva, *spte, npte); + pmap_exec_account(pmap, sva, *spte, npte); *spte = npte; /* zap! */ @@ -3125,7 +3160,7 @@ enter_now: */ npte = pa | protection_codes[prot] | PG_V; - pmap_nxstack_account(pmap, va, opte, npte); + pmap_exec_account(pmap, va, opte, npte); if (pvh) npte |= PG_PVLIST; if (wired) diff --git a/sys/arch/i386/i386/svr4_machdep.c b/sys/arch/i386/i386/svr4_machdep.c index 27f5aa6f109..b50246f2108 100644 --- a/sys/arch/i386/i386/svr4_machdep.c +++ b/sys/arch/i386/i386/svr4_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: svr4_machdep.c,v 1.19 2003/05/04 05:01:04 drahn Exp $ */ +/* $OpenBSD: svr4_machdep.c,v 1.20 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: svr4_machdep.c,v 1.24 1996/05/03 19:42:26 christos Exp $ */ /* @@ -379,7 +379,7 @@ svr4_sendsig(catcher, sig, mask, code, type, val) tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_eip = p->p_sigcode; - tf->tf_cs = pmap->pm_nxpages > 0? + tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? GSEL(GUCODE1_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL); tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); tf->tf_esp = (int)fp; diff --git a/sys/arch/i386/i386/trap.c b/sys/arch/i386/i386/trap.c index fc54a2430c0..fb730187ae7 100644 --- a/sys/arch/i386/i386/trap.c +++ b/sys/arch/i386/i386/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.56 2003/05/04 15:56:34 mickey Exp $ */ +/* $OpenBSD: trap.c,v 1.57 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: trap.c,v 1.95 1996/05/05 06:50:02 mycroft Exp $ */ /*- @@ -325,12 +325,14 @@ trap(frame) goto out; } #endif - if (ftype == VM_PROT_READ) { - ftype |= VM_PROT_EXECUTE; - /* XXX force %cr2 register have fault address */ - __asm __volatile("movl %0,%%cr2" :: "r" (frame.tf_eip)); - } - goto page_fault; + /* If pmap_exec_fixup does something, let's retry the trap. */ + if (pmap_exec_fixup(&p->p_vmspace->vm_map, &frame, + &p->p_addr->u_pcb)) + goto out; + + sv.sival_int = frame.tf_eip; + trapsignal(p, SIGSEGV, vftype, SEGV_MAPERR, sv); + goto out; case T_TSSFLT|T_USER: sv.sival_int = frame.tf_eip; @@ -414,7 +416,6 @@ trap(frame) goto we_re_toast; #endif /* FALLTHROUGH */ - page_fault: case T_PAGEFLT|T_USER: { /* page fault */ vaddr_t va, fa; struct vmspace *vm = p->p_vmspace; diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h index 8e086787f4f..cc806191c88 100644 --- a/sys/arch/i386/include/pmap.h +++ b/sys/arch/i386/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.31 2003/04/09 07:53:57 niklas Exp $ */ +/* $OpenBSD: pmap.h,v 1.32 2003/05/13 03:49:04 art Exp $ */ /* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */ /* @@ -266,7 +266,7 @@ struct pmap { struct vm_page *pm_ptphint; /* pointer to a PTP in our pmap */ struct pmap_statistics pm_stats; /* pmap stats (lck by object lock) */ - int pm_nxpages; /* # of executable pages on stack */ + vaddr_t pm_hiexec; /* highest executable mapping */ int pm_flags; /* see below */ union descriptor *pm_ldt; /* user-set LDT */ @@ -387,6 +387,8 @@ static void pmap_update_pg(vaddr_t); static void pmap_update_2pg(vaddr_t,vaddr_t); void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); +int pmap_exec_fixup(struct vm_map *, struct trapframe *, + struct pcb *); vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ @@ -475,12 +477,15 @@ pmap_protect(pmap, sva, eva, prot) vaddr_t sva, eva; vm_prot_t prot; { - if ((prot & VM_PROT_WRITE) == 0) { - if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { - pmap_write_protect(pmap, sva, eva, prot); - } else { - pmap_remove(pmap, sva, eva); - } + if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == + (VM_PROT_WRITE|VM_PROT_EXECUTE)) + return; + + if ((prot & (VM_PROT_READ|VM_PROT_EXECUTE)) == + (VM_PROT_READ|VM_PROT_EXECUTE)) { + pmap_write_protect(pmap, sva, eva, prot); + } else { + pmap_remove(pmap, sva, eva); } } |