diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2007-06-27 16:16:54 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2007-06-27 16:16:54 +0000 |
commit | 90ed0d1897431034355c3e2f3a7f77f80049772b (patch) | |
tree | e0ed61e709edad05e6021a7e3ad4b68d8dcf26f2 /sys/arch | |
parent | dbb0c5c750b0620e2d8cabd5989405776473c704 (diff) |
According to Intel errata:
"AI91 - Update of attribute bits on page directories without immediate
tlb shootdown may cause unexpected processor behavior.".
When we're allocating kernel page tables, we use the generic page table
allocation function that sets PG_u then we immediately remove the PG_u.
This might not be enough, because the PDE can get preloaded into the
translation cache before we clear the PG_u. So even without the errata,
this could cause us horrible trouble.
Instead of first entering the PDE with PG_u and then removing it for
kernel page tables, just enter it with the right bits every time.
tom@ ok
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index 8504bb6f5eb..d02f95d15e6 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.118 2007/06/19 09:41:39 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.119 2007/06/27 16:16:53 art Exp $ */ /* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */ /* @@ -324,7 +324,7 @@ extern vaddr_t pentium_idt_vaddr; */ struct pv_entry *pmap_add_pvpage(struct pv_page *, boolean_t); -struct vm_page *pmap_alloc_ptp(struct pmap *, int, boolean_t); +struct vm_page *pmap_alloc_ptp(struct pmap *, int, boolean_t, pt_entry_t); struct pv_entry *pmap_alloc_pv(struct pmap *, int); /* see codes below */ #define ALLOCPV_NEED 0 /* need PV now */ #define ALLOCPV_TRY 1 /* just try to allocate */ @@ -1351,7 +1351,8 @@ pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va) */ struct vm_page * -pmap_alloc_ptp(struct pmap *pmap, int pde_index, boolean_t just_try) +pmap_alloc_ptp(struct pmap *pmap, int pde_index, boolean_t just_try, + pt_entry_t pde_flags) { struct vm_page *ptp; @@ -1363,8 +1364,8 @@ pmap_alloc_ptp(struct pmap *pmap, int pde_index, boolean_t just_try) /* got one! */ atomic_clearbits_int(&ptp->pg_flags, PG_BUSY); ptp->wire_count = 1; /* no mappings yet */ - pmap->pm_pdir[pde_index] = (pd_entry_t)(VM_PAGE_TO_PHYS(ptp) | PG_u | - PG_RW | PG_V | PG_M | PG_U); + pmap->pm_pdir[pde_index] = (pd_entry_t)(VM_PAGE_TO_PHYS(ptp) | + PG_RW | PG_V | PG_M | PG_U | pde_flags); pmap->pm_stats.resident_count++; /* count PTP as resident */ pmap->pm_ptphint = ptp; return(ptp); @@ -1400,7 +1401,7 @@ pmap_get_ptp(struct pmap *pmap, int pde_index, boolean_t just_try) } /* allocate a new PTP (updates ptphint) */ - return (pmap_alloc_ptp(pmap, pde_index, just_try)); + return (pmap_alloc_ptp(pmap, pde_index, just_try, PG_u)); } /* @@ -2832,12 +2833,9 @@ pmap_growkernel(vaddr_t maxkvaddr) * INVOKED WHILE pmap_init() IS RUNNING! */ - while (!pmap_alloc_ptp(kpm, PDSLOT_KERN + nkpde, FALSE)) + while (!pmap_alloc_ptp(kpm, PDSLOT_KERN + nkpde, FALSE, 0)) uvm_wait("pmap_growkernel"); - /* PG_u not for kernel */ - kpm->pm_pdir[PDSLOT_KERN + nkpde] &= ~PG_u; - /* distribute new kernel PTP to all active pmaps */ simple_lock(&pmaps_lock); LIST_FOREACH(pm, &pmaps, pm_list) { |