diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2010-07-01 21:27:40 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2010-07-01 21:27:40 +0000 |
commit | 9b5af8fa665fbe2d5508c1aac172c7a864618c3d (patch) | |
tree | 235f6eb84a99018827a5c873505f0bfe799c5090 /sys/uvm | |
parent | de101667466fd782081d6f7cb241706f15b3d283 (diff) |
Implement vs{,un}lock_device and use it for physio.
Just like normal vs{,un}lock, but in case the pages we get are not dma
accessible, we bounce them, if they are dma acessible, the functions behave
exactly like normal vslock. The plan for the future is to have fault_wire
allocate dma acessible pages so that we don't need to bounce (especially
in cases where the same buffer is reused for physio over and over again),
but for now, keep it as simple as possible.
Diffstat (limited to 'sys/uvm')
-rw-r--r-- | sys/uvm/uvm_extern.h | 7 | ||||
-rw-r--r-- | sys/uvm/uvm_glue.c | 99 |
2 files changed, 101 insertions, 5 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index e9ea11298e5..b70dd6a903d 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.87 2010/06/27 03:03:49 thib Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.88 2010/07/01 21:27:39 art Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -503,6 +503,11 @@ int uvm_vslock(struct proc *, caddr_t, size_t, vm_prot_t); void uvm_vsunlock(struct proc *, caddr_t, size_t); +int uvm_vslock_device(struct proc *, void *, size_t, + vm_prot_t, void **); +void uvm_vsunlock_device(struct proc *, void *, size_t, + void *); + /* uvm_init.c */ void uvm_init(void); diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c index 453e524f797..3783e418b0a 100644 --- a/sys/uvm/uvm_glue.c +++ b/sys/uvm/uvm_glue.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_glue.c,v 1.51 2010/06/30 20:20:18 thib Exp $ */ +/* $OpenBSD: uvm_glue.c,v 1.52 2010/07/01 21:27:39 art Exp $ */ /* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */ /* @@ -157,13 +157,12 @@ uvm_chgkprot(caddr_t addr, size_t len, int rw) * uvm_vslock: wire user memory for I/O * * - called from physio and sys___sysctl - * - XXXCDC: consider nuking this (or making it a macro?) */ int uvm_vslock(struct proc *p, caddr_t addr, size_t len, vm_prot_t access_type) { - vm_map_t map; + struct vm_map *map; vaddr_t start, end; int rv; @@ -182,7 +181,6 @@ uvm_vslock(struct proc *p, caddr_t addr, size_t len, vm_prot_t access_type) * uvm_vsunlock: unwire user memory wired by uvm_vslock() * * - called from physio and sys___sysctl - * - XXXCDC: consider nuking this (or making it a macro?) */ void @@ -199,6 +197,99 @@ uvm_vsunlock(struct proc *p, caddr_t addr, size_t len) } /* + * uvm_vslock_device: wire user memory, make sure it's device reachable + * and bounce if necessary. + * Always bounces for now. + */ +int +uvm_vslock_device(struct proc *p, void *addr, size_t len, + vm_prot_t access_type, void **retp) +{ + struct vm_page *pg; + struct pglist pgl; + int npages; + vaddr_t start, end, off; + vaddr_t sva, va; + vsize_t sz; + int error, i; + + start = trunc_page((vaddr_t)addr); + end = round_page((vaddr_t)addr + len); + sz = end - start; + off = (vaddr_t)addr - start; + if (end <= start) + return (EINVAL); + + if ((error = uvm_fault_wire(&p->p_vmspace->vm_map, start, end, + access_type))) { + return (error); + } + + npages = atop(sz); + for (i = 0; i < npages; i++) { + paddr_t pa; + + if (!pmap_extract(p->p_vmspace->vm_map.pmap, + start + ptoa(i), &pa)) + return (EFAULT); + if (!PADDR_IS_DMA_REACHABLE(pa)) + break; + } + if (i == npages) { + *retp = NULL; + return (0); + } + + if ((va = uvm_km_valloc(kernel_map, sz)) == 0) { + return (ENOMEM); + } + TAILQ_INIT(&pgl); + if (uvm_pglistalloc(npages * PAGE_SIZE, dma_constraint.ucr_low, + dma_constraint.ucr_high, 0, 0, &pgl, npages, UVM_PLA_WAITOK)) { + uvm_km_free(kernel_map, va, sz); + return (ENOMEM); + } + + sva = va; + while ((pg = TAILQ_FIRST(&pgl)) != NULL) { + TAILQ_REMOVE(&pgl, pg, pageq); + pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); + va += PAGE_SIZE; + } + KASSERT(va == sva + sz); + *retp = (void *)(sva + off); + + error = copyin(addr, *retp, len); + return (error); +} + +void +uvm_vsunlock_device(struct proc *p, void *addr, size_t len, void *map) +{ + vaddr_t start, end; + vaddr_t kva; + vsize_t sz; + + start = trunc_page((vaddr_t)addr); + end = round_page((vaddr_t)addr + len); + sz = end - start; + if (end <= start) + return; + + if (map) + copyout(map, addr, len); + uvm_fault_unwire(&p->p_vmspace->vm_map, start, end); + + if (!map) + return; + + kva = trunc_page((vaddr_t)map); + pmap_kremove(kva, sz); + uvm_km_pgremove_intrsafe(kva, kva + sz); + uvm_km_free(kernel_map, kva, sz); +} + +/* * uvm_fork: fork a virtual address space * * - the address space is copied as per parent map's inherit values |