summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorOwain Ainsworth <oga@cvs.openbsd.org>2009-08-11 17:15:55 +0000
committerOwain Ainsworth <oga@cvs.openbsd.org>2009-08-11 17:15:55 +0000
commit98b05a003a84406b2feccb22b9cd67bae0c215c7 (patch)
tree8ffc766dcda9f986475939c1e1e6747c19eb4d54 /sys
parent6900534ae3a09df07b8422e0a7e7d9f24493d9c5 (diff)
fix some stupidity in x86 bus_space_map.
right now, we do a pmap_kenter_pa(), we then get the pte (behind pmap's back) and check for the cache inhibit bit (if needed). If it isn't what we want (this is the normal case) then we change it ourselves, and do a manual tlb shootdown (i386 was a bit more stupid about it than amd64, too). Instead, make it so that like on some other archs (sparc64 comes to mind) you can pass in flags in the low bits of the physical address, pmap then does everything correctly for you. Discovered this when I had some code doing a lot of bus_space_maps(), it was incredibly slow, and profilling was dominated by pmap_tlb_shootwait(); discussed with kettenis@, miod@, toby@ and art@. ok art@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amd64/amd64/bus_dma.c9
-rw-r--r--sys/arch/amd64/amd64/bus_space.c20
-rw-r--r--sys/arch/amd64/amd64/pmap.c14
-rw-r--r--sys/arch/amd64/include/pmap.h7
-rw-r--r--sys/arch/i386/i386/bus_dma.c7
-rw-r--r--sys/arch/i386/i386/machdep.c18
-rw-r--r--sys/arch/i386/i386/pmap.c17
-rw-r--r--sys/arch/i386/include/pmap.h7
8 files changed, 50 insertions, 49 deletions
diff --git a/sys/arch/amd64/amd64/bus_dma.c b/sys/arch/amd64/amd64/bus_dma.c
index 5a5c4de055b..7d76ad0747a 100644
--- a/sys/arch/amd64/amd64/bus_dma.c
+++ b/sys/arch/amd64/amd64/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.28 2009/06/07 02:30:34 oga Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.29 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: bus_dma.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
/*-
@@ -473,7 +473,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
{
vaddr_t va;
bus_addr_t addr;
- int curseg, pmapflags = VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED;
+ int curseg, pmapflags = 0;
if (flags & BUS_DMA_NOCACHE)
pmapflags |= PMAP_NOCACHE;
@@ -491,8 +491,9 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
- pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, pmapflags);
+ pmap_enter(pmap_kernel(), va, addr | pmapflags,
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
+ VM_PROT_WRITE | PMAP_WIRED);
}
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/amd64/amd64/bus_space.c b/sys/arch/amd64/amd64/bus_space.c
index 921f754467b..2e45908648a 100644
--- a/sys/arch/amd64/amd64/bus_space.c
+++ b/sys/arch/amd64/amd64/bus_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_space.c,v 1.14 2009/05/31 19:41:57 kettenis Exp $ */
+/* $OpenBSD: bus_space.c,v 1.15 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: bus_space.c,v 1.2 2003/03/14 18:47:53 christos Exp $ */
/*-
@@ -239,8 +239,7 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
bus_space_handle_t *bshp)
{
u_long pa, endpa;
- vaddr_t va, sva;
- pt_entry_t *pte;
+ vaddr_t va;
bus_size_t map_size;
pa = trunc_page(bpa);
@@ -259,19 +258,10 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
- sva = va;
for (; map_size > 0;
- pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-
- pte = kvtopte(va);
- if (flags & BUS_SPACE_MAP_CACHEABLE)
- *pte &= ~PG_N;
- else
- *pte |= PG_N;
- }
- pmap_tlb_shootrange(pmap_kernel(), sva, va);
- pmap_tlb_shootwait();
+ pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
+ pmap_kenter_pa(va, pa | ((flags & BUS_SPACE_MAP_CACHEABLE) ?
+ 0 : PMAP_NOCACHE), VM_PROT_READ | VM_PROT_WRITE);
pmap_update(pmap_kernel());
return 0;
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index d7dfac863d5..4f8f1999906 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.50 2009/08/06 15:28:14 oga Exp $ */
+/* $OpenBSD: pmap.c,v 1.51 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -447,7 +447,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pte = kvtopte(va);
- npte = pa | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) | PG_V;
+ npte = (pa & PMAP_PA_MASK) | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
+ ((pa & PMAP_NOCACHE) ? PG_N : 0) | PG_V;
/* special 1:1 mappings in the first 2MB must not be global */
if (va >= (vaddr_t)NBPD_L2)
@@ -462,6 +463,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
panic("pmap_kenter_pa: PG_PS");
#endif
if (pmap_valid_entry(opte)) {
+ if (pa & PMAP_NOCACHE && (opte & PG_N) == 0)
+ wbinvd();
/* This shouldn't happen */
pmap_tlb_shootpage(pmap_kernel(), va);
pmap_tlb_shootwait();
@@ -1963,8 +1966,11 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
struct pv_entry *pve = NULL;
int ptpdelta, wireddelta, resdelta;
boolean_t wired = (flags & PMAP_WIRED) != 0;
+ boolean_t nocache = (pa & PMAP_NOCACHE) != 0;
int error;
+ pa &= PMAP_PA_MASK;
+
#ifdef DIAGNOSTIC
if (va == (vaddr_t) PDP_BASE || va == (vaddr_t) APDP_BASE)
panic("pmap_enter: trying to map over PDP/APDP!");
@@ -2127,7 +2133,7 @@ enter_now:
npte |= PG_PVLIST;
if (wired)
npte |= PG_W;
- if (flags & PMAP_NOCACHE)
+ if (nocache)
npte |= PG_N;
if (va < VM_MAXUSER_ADDRESS)
npte |= PG_u;
@@ -2143,6 +2149,8 @@ enter_now:
* flush the TLB. (is this overkill?)
*/
if (opte & PG_V) {
+ if (nocache && (opte & PG_N) == 0)
+ wbinvd();
pmap_tlb_shootpage(pmap, va);
pmap_tlb_shootwait();
}
diff --git a/sys/arch/amd64/include/pmap.h b/sys/arch/amd64/include/pmap.h
index 8c9ffcd1047..da9e160f0d7 100644
--- a/sys/arch/amd64/include/pmap.h
+++ b/sys/arch/amd64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.27 2009/06/09 02:56:38 krw Exp $ */
+/* $OpenBSD: pmap.h,v 1.28 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
/*
@@ -329,9 +329,10 @@ struct pmap {
};
/*
- * MD flags that we use for pmap_enter:
+ * MD flags that we use for pmap_enter (in the pa):
*/
-#define PMAP_NOCACHE PMAP_MD0 /* set the non-cacheable bit. */
+#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */
+#define PMAP_NOCACHE 0x1 /* set the non-cacheable bit. */
/*
* We keep mod/ref flags in struct vm_page->pg_flags.
diff --git a/sys/arch/i386/i386/bus_dma.c b/sys/arch/i386/i386/bus_dma.c
index 0c784c7310e..f3cd5e4b6c6 100644
--- a/sys/arch/i386/i386/bus_dma.c
+++ b/sys/arch/i386/i386/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.17 2009/06/06 05:43:13 oga Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.18 2009/08/11 17:15:54 oga Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -418,7 +418,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
{
vaddr_t va;
bus_addr_t addr;
- int curseg, pmapflags = VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED;
+ int curseg, pmapflags = 0;
if (flags & BUS_DMA_NOCACHE)
pmapflags |= PMAP_NOCACHE;
@@ -437,7 +437,8 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, pmapflags);
+ VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
+ VM_PROT_WRITE | PMAP_WIRED);
}
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 8a60f6c55c5..c708a5250ef 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.457 2009/08/10 16:40:50 oga Exp $ */
+/* $OpenBSD: machdep.c,v 1.458 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -3527,7 +3527,6 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
{
u_long pa, endpa;
vaddr_t va;
- pt_entry_t *pte;
bus_size_t map_size;
pa = trunc_page(bpa);
@@ -3547,18 +3546,9 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
for (; map_size > 0;
- pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
-
- pte = kvtopte(va);
- if (flags & BUS_SPACE_MAP_CACHEABLE)
- *pte &= ~PG_N;
- else
- *pte |= PG_N;
- pmap_tlb_shootpage(pmap_kernel(), va);
- }
-
- pmap_tlb_shootwait();
+ pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
+ pmap_kenter_pa(va, pa | ((flags & BUS_SPACE_MAP_CACHEABLE) ?
+ 0 : PMAP_NOCACHE), VM_PROT_READ | VM_PROT_WRITE);
pmap_update(pmap_kernel());
return 0;
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index a553b083971..1dd8e011e9c 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.144 2009/08/06 15:28:14 oga Exp $ */
+/* $OpenBSD: pmap.c,v 1.145 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -686,8 +686,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pt_entry_t *pte, opte, npte;
pte = vtopte(va);
- npte = pa | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) | PG_V |
- PG_U | PG_M;
+ npte = (pa & PMAP_PA_MASK) | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
+ PG_V | PG_U | PG_M | ((pa & PMAP_NOCACHE) ? PG_N : 0);
/* special 1:1 mappings in the first 4MB must not be global */
if (va >= (vaddr_t)NBPD)
@@ -695,6 +695,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
opte = i386_atomic_testset_ul(pte, npte);
if (pmap_valid_entry(opte)) {
+ if (pa & PMAP_NOCACHE && (opte & PG_N) == 0)
+ wbinvd();
/* NB. - this should not happen. */
pmap_tlb_shootpage(pmap_kernel(), va);
pmap_tlb_shootwait();
@@ -2371,9 +2373,12 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa,
struct vm_page *ptp;
struct pv_entry *pve = NULL, *freepve;
boolean_t wired = (flags & PMAP_WIRED) != 0;
+ boolean_t nocache = (pa & PMAP_NOCACHE) != 0;
struct vm_page *pg = NULL;
int error, wired_count, resident_count, ptp_count;
+ pa &= PMAP_PA_MASK; /* nuke flags from pa */
+
#ifdef DIAGNOSTIC
/* sanity check: totally out of range? */
if (va >= VM_MAX_KERNEL_ADDRESS)
@@ -2525,7 +2530,7 @@ enter_now:
pmap_exec_account(pmap, va, opte, npte);
if (wired)
npte |= PG_W;
- if (flags & PMAP_NOCACHE)
+ if (nocache)
npte |= PG_N;
if (va < VM_MAXUSER_ADDRESS)
npte |= PG_u;
@@ -2548,7 +2553,9 @@ enter_now:
pmap->pm_stats.resident_count += resident_count;
pmap->pm_stats.wired_count += wired_count;
- if (opte & PG_V) {
+ if (pmap_valid_entry(opte)) {
+ if (nocache && (opte & PG_N) == 0)
+ wbinvd(); /* XXX clflush before we enter? */
pmap_tlb_shootpage(pmap, va);
pmap_tlb_shootwait();
}
diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h
index 6b989e0f59e..c380b253d44 100644
--- a/sys/arch/i386/include/pmap.h
+++ b/sys/arch/i386/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.52 2009/06/03 00:49:12 art Exp $ */
+/* $OpenBSD: pmap.h,v 1.53 2009/08/11 17:15:54 oga Exp $ */
/* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */
/*
@@ -294,7 +294,10 @@ struct pv_entry { /* locked by its list's pvh_lock */
/*
* MD flags to pmap_enter:
*/
-#define PMAP_NOCACHE PMAP_MD0
+
+/* to get just the pa from params to pmap_enter */
+#define PMAP_PA_MASK ~((paddr_t)PAGE_MASK)
+#define PMAP_NOCACHE 0x1 /* map uncached */
/*
* We keep mod/ref flags in struct vm_page->pg_flags.