diff options
author | Owain Ainsworth <oga@cvs.openbsd.org> | 2009-08-11 17:15:55 +0000 |
---|---|---|
committer | Owain Ainsworth <oga@cvs.openbsd.org> | 2009-08-11 17:15:55 +0000 |
commit | 98b05a003a84406b2feccb22b9cd67bae0c215c7 (patch) | |
tree | 8ffc766dcda9f986475939c1e1e6747c19eb4d54 /sys/arch/i386 | |
parent | 6900534ae3a09df07b8422e0a7e7d9f24493d9c5 (diff) |
fix some stupidity in x86 bus_space_map.
right now, we do a pmap_kenter_pa(), we then get the pte (behind pmap's
back) and check for the cache inhibit bit (if needed). If it isn't what
we want (this is the normal case) then we change it ourselves, and do a
manual tlb shootdown (i386 was a bit more stupid about it than amd64,
too).
Instead, make it so that like on some other archs (sparc64 comes to
mind) you can pass in flags in the low bits of the physical address,
pmap then does everything correctly for you.
Discovered this when I had some code doing a lot of bus_space_maps(), it
was incredibly slow, and profilling was dominated by
pmap_tlb_shootwait();
discussed with kettenis@, miod@, toby@ and art@.
ok art@
Diffstat (limited to 'sys/arch/i386')
-rw-r--r-- | sys/arch/i386/i386/bus_dma.c | 7 | ||||
-rw-r--r-- | sys/arch/i386/i386/machdep.c | 18 | ||||
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 17 | ||||
-rw-r--r-- | sys/arch/i386/include/pmap.h | 7 |
4 files changed, 25 insertions, 24 deletions
diff --git a/sys/arch/i386/i386/bus_dma.c b/sys/arch/i386/i386/bus_dma.c index 0c784c7310e..f3cd5e4b6c6 100644 --- a/sys/arch/i386/i386/bus_dma.c +++ b/sys/arch/i386/i386/bus_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_dma.c,v 1.17 2009/06/06 05:43:13 oga Exp $ */ +/* $OpenBSD: bus_dma.c,v 1.18 2009/08/11 17:15:54 oga Exp $ */ /*- * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc. * All rights reserved. @@ -418,7 +418,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, { vaddr_t va; bus_addr_t addr; - int curseg, pmapflags = VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED; + int curseg, pmapflags = 0; if (flags & BUS_DMA_NOCACHE) pmapflags |= PMAP_NOCACHE; @@ -437,7 +437,8 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, if (size == 0) panic("_bus_dmamem_map: size botch"); pmap_enter(pmap_kernel(), va, addr, - VM_PROT_READ | VM_PROT_WRITE, pmapflags); + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | + VM_PROT_WRITE | PMAP_WIRED); } } pmap_update(pmap_kernel()); diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c index 8a60f6c55c5..c708a5250ef 100644 --- a/sys/arch/i386/i386/machdep.c +++ b/sys/arch/i386/i386/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.457 2009/08/10 16:40:50 oga Exp $ */ +/* $OpenBSD: machdep.c,v 1.458 2009/08/11 17:15:54 oga Exp $ */ /* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */ /*- @@ -3527,7 +3527,6 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags, { u_long pa, endpa; vaddr_t va; - pt_entry_t *pte; bus_size_t map_size; pa = trunc_page(bpa); @@ -3547,18 +3546,9 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags, *bshp = (bus_space_handle_t)(va + (bpa & PGOFSET)); for (; map_size > 0; - pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE) { - pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE); - - pte = kvtopte(va); - if (flags & BUS_SPACE_MAP_CACHEABLE) - *pte &= ~PG_N; - else - *pte |= PG_N; - pmap_tlb_shootpage(pmap_kernel(), va); - } - - pmap_tlb_shootwait(); + pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE) + pmap_kenter_pa(va, pa | ((flags & BUS_SPACE_MAP_CACHEABLE) ? + 0 : PMAP_NOCACHE), VM_PROT_READ | VM_PROT_WRITE); pmap_update(pmap_kernel()); return 0; diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index a553b083971..1dd8e011e9c 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.144 2009/08/06 15:28:14 oga Exp $ */ +/* $OpenBSD: pmap.c,v 1.145 2009/08/11 17:15:54 oga Exp $ */ /* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */ /* @@ -686,8 +686,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) pt_entry_t *pte, opte, npte; pte = vtopte(va); - npte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | PG_V | - PG_U | PG_M; + npte = (pa & PMAP_PA_MASK) | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | + PG_V | PG_U | PG_M | ((pa & PMAP_NOCACHE) ? PG_N : 0); /* special 1:1 mappings in the first 4MB must not be global */ if (va >= (vaddr_t)NBPD) @@ -695,6 +695,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) opte = i386_atomic_testset_ul(pte, npte); if (pmap_valid_entry(opte)) { + if (pa & PMAP_NOCACHE && (opte & PG_N) == 0) + wbinvd(); /* NB. - this should not happen. */ pmap_tlb_shootpage(pmap_kernel(), va); pmap_tlb_shootwait(); @@ -2371,9 +2373,12 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *ptp; struct pv_entry *pve = NULL, *freepve; boolean_t wired = (flags & PMAP_WIRED) != 0; + boolean_t nocache = (pa & PMAP_NOCACHE) != 0; struct vm_page *pg = NULL; int error, wired_count, resident_count, ptp_count; + pa &= PMAP_PA_MASK; /* nuke flags from pa */ + #ifdef DIAGNOSTIC /* sanity check: totally out of range? */ if (va >= VM_MAX_KERNEL_ADDRESS) @@ -2525,7 +2530,7 @@ enter_now: pmap_exec_account(pmap, va, opte, npte); if (wired) npte |= PG_W; - if (flags & PMAP_NOCACHE) + if (nocache) npte |= PG_N; if (va < VM_MAXUSER_ADDRESS) npte |= PG_u; @@ -2548,7 +2553,9 @@ enter_now: pmap->pm_stats.resident_count += resident_count; pmap->pm_stats.wired_count += wired_count; - if (opte & PG_V) { + if (pmap_valid_entry(opte)) { + if (nocache && (opte & PG_N) == 0) + wbinvd(); /* XXX clflush before we enter? */ pmap_tlb_shootpage(pmap, va); pmap_tlb_shootwait(); } diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h index 6b989e0f59e..c380b253d44 100644 --- a/sys/arch/i386/include/pmap.h +++ b/sys/arch/i386/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.52 2009/06/03 00:49:12 art Exp $ */ +/* $OpenBSD: pmap.h,v 1.53 2009/08/11 17:15:54 oga Exp $ */ /* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */ /* @@ -294,7 +294,10 @@ struct pv_entry { /* locked by its list's pvh_lock */ /* * MD flags to pmap_enter: */ -#define PMAP_NOCACHE PMAP_MD0 + +/* to get just the pa from params to pmap_enter */ +#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) +#define PMAP_NOCACHE 0x1 /* map uncached */ /* * We keep mod/ref flags in struct vm_page->pg_flags. |