summaryrefslogtreecommitdiff
path: root/sys/arch/amd64
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-08-11 17:15:55 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-08-11 17:15:55 +0000
commit98b05a003a84406b2feccb22b9cd67bae0c215c7 (patch)
tree8ffc766dcda9f986475939c1e1e6747c19eb4d54 /sys/arch/amd64
parent6900534ae3a09df07b8422e0a7e7d9f24493d9c5 (diff)
fix some stupidity in x86 bus_space_map.
right now, we do a pmap_kenter_pa(), we then get the pte (behind pmap's back) and check for the cache inhibit bit (if needed). If it isn't what we want (this is the normal case) then we change it ourselves, and do a manual tlb shootdown (i386 was a bit more stupid about it than amd64, too). Instead, make it so that like on some other archs (sparc64 comes to mind) you can pass in flags in the low bits of the physical address, pmap then does everything correctly for you. Discovered this when I had some code doing a lot of bus_space_maps(), it was incredibly slow, and profilling was dominated by pmap_tlb_shootwait(); discussed with kettenis@, miod@, toby@ and art@. ok art@
Diffstat (limited to 'sys/arch/amd64')
-rw-r--r--sys/arch/amd64/amd64/bus_dma.c9
-rw-r--r--sys/arch/amd64/amd64/bus_space.c20
-rw-r--r--sys/arch/amd64/amd64/pmap.c14
-rw-r--r--sys/arch/amd64/include/pmap.h7
4 files changed, 25 insertions, 25 deletions
diff --git a/sys/arch/amd64/amd64/bus_dma.c b/sys/arch/amd64/amd64/bus_dma.c
index 5a5c4de055b..7d76ad0747a 100644
--- a/sys/arch/amd64/amd64/bus_dma.c
+++ b/sys/arch/amd64/amd64/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.28 2009/06/07 02:30:34 oga Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.29 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: bus_dma.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
/*-
@@ -473,7 +473,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
{
vaddr_t va;
bus_addr_t addr;
- int curseg, pmapflags = VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED;
+ int curseg, pmapflags = 0;
if (flags & BUS_DMA_NOCACHE)
pmapflags |= PMAP_NOCACHE;
@@ -491,8 +491,9 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
- pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, pmapflags);
+ pmap_enter(pmap_kernel(), va, addr | pmapflags,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
+ VM_PROT_WRITE | PMAP_WIRED);
}
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/amd64/amd64/bus_space.c b/sys/arch/amd64/amd64/bus_space.c
index 921f754467b..2e45908648a 100644
--- a/sys/arch/amd64/amd64/bus_space.c
+++ b/sys/arch/amd64/amd64/bus_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_space.c,v 1.14 2009/05/31 19:41:57 kettenis Exp $ */
+/* $OpenBSD: bus_space.c,v 1.15 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: bus_space.c,v 1.2 2003/03/14 18:47:53 christos Exp $ */
/*-
@@ -239,8 +239,7 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
bus_space_handle_t *bshp)
{
u_long pa, endpa;
- vaddr_t va, sva;
- pt_entry_t *pte;
+ vaddr_t va;
bus_size_t map_size;
pa = trunc_page(bpa);
@@ -259,19 +258,10 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
- sva = va;
for (; map_size > 0;
- pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-
- pte = kvtopte(va);
- if (flags & BUS_SPACE_MAP_CACHEABLE)
- *pte &= ~PG_N;
- else
- *pte |= PG_N;
- }
- pmap_tlb_shootrange(pmap_kernel(), sva, va);
- pmap_tlb_shootwait();
+ pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
+ pmap_kenter_pa(va, pa | ((flags & BUS_SPACE_MAP_CACHEABLE) ?
+ 0 : PMAP_NOCACHE), VM_PROT_READ | VM_PROT_WRITE);
pmap_update(pmap_kernel());
return 0;
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index d7dfac863d5..4f8f1999906 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.50 2009/08/06 15:28:14 oga Exp $ */
+/* $OpenBSD: pmap.c,v 1.51 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -447,7 +447,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pte = kvtopte(va);
- npte = pa | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) | PG_V;
+ npte = (pa & PMAP_PA_MASK) | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
+ ((pa & PMAP_NOCACHE) ? PG_N : 0) | PG_V;
/* special 1:1 mappings in the first 2MB must not be global */
if (va >= (vaddr_t)NBPD_L2)
@@ -462,6 +463,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
panic("pmap_kenter_pa: PG_PS");
#endif
if (pmap_valid_entry(opte)) {
+ if (pa & PMAP_NOCACHE && (opte & PG_N) == 0)
+ wbinvd();
/* This shouldn't happen */
pmap_tlb_shootpage(pmap_kernel(), va);
pmap_tlb_shootwait();
@@ -1963,8 +1966,11 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
struct pv_entry *pve = NULL;
int ptpdelta, wireddelta, resdelta;
boolean_t wired = (flags & PMAP_WIRED) != 0;
+ boolean_t nocache = (pa & PMAP_NOCACHE) != 0;
int error;
+ pa &= PMAP_PA_MASK;
+
#ifdef DIAGNOSTIC
if (va == (vaddr_t) PDP_BASE || va == (vaddr_t) APDP_BASE)
panic("pmap_enter: trying to map over PDP/APDP!");
@@ -2127,7 +2133,7 @@ enter_now:
npte |= PG_PVLIST;
if (wired)
npte |= PG_W;
- if (flags & PMAP_NOCACHE)
+ if (nocache)
npte |= PG_N;
if (va < VM_MAXUSER_ADDRESS)
npte |= PG_u;
@@ -2143,6 +2149,8 @@ enter_now:
* flush the TLB. (is this overkill?)
*/
if (opte & PG_V) {
+ if (nocache && (opte & PG_N) == 0)
+ wbinvd();
pmap_tlb_shootpage(pmap, va);
pmap_tlb_shootwait();
}
diff --git a/sys/arch/amd64/include/pmap.h b/sys/arch/amd64/include/pmap.h
index 8c9ffcd1047..da9e160f0d7 100644
--- a/sys/arch/amd64/include/pmap.h
+++ b/sys/arch/amd64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.27 2009/06/09 02:56:38 krw Exp $ */
+/* $OpenBSD: pmap.h,v 1.28 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
/*
@@ -329,9 +329,10 @@ struct pmap {
};
/*
- * MD flags that we use for pmap_enter:
+ * MD flags that we use for pmap_enter (in the pa):
*/
-#define PMAP_NOCACHE PMAP_MD0 /* set the non-cacheable bit. */
+#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */
+#define PMAP_NOCACHE 0x1 /* set the non-cacheable bit. */
/*
* We keep mod/ref flags in struct vm_page->pg_flags.