diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2022-02-05 09:37:07 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2022-02-05 09:37:07 +0000 |
commit | 1d66f92e8ab22d48aa6033e5ff3816ca3355b2e2 (patch) | |
tree | c3cc632d92e500133c1e73078cb6ffc0399aacee /sys/arch | |
parent | 77d9aba6737bc75aa4aaa223480925b1c228f608 (diff) |
Switch vmapbuf() and vunmapbuf() over to km_alloc(9) and km_free(9) like
we did for arm64 and powerpc64 some time ago.
ok guenther@
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/amd64/amd64/vm_machdep.c | 16 |
1 files changed, 11 insertions, 5 deletions
diff --git a/sys/arch/amd64/amd64/vm_machdep.c b/sys/arch/amd64/amd64/vm_machdep.c index 39bf02bdc5c..e8c7ac59f29 100644 --- a/sys/arch/amd64/amd64/vm_machdep.c +++ b/sys/arch/amd64/amd64/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.44 2021/05/16 06:20:28 jsg Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.45 2022/02/05 09:37:06 kettenis Exp $ */ /* $NetBSD: vm_machdep.c,v 1.1 2003/04/26 18:39:33 fvdl Exp $ */ /*- @@ -140,10 +140,15 @@ setguardpage(struct proc *p) pmap_update(pmap_kernel()); } +struct kmem_va_mode kv_physwait = { + .kv_map = &phys_map, + .kv_wait = 1, +}; + /* * Map a user I/O request into kernel virtual address space. * Note: the pages are already locked by uvm_vslock(), so we - * do not need to pass an access_type to pmap_enter(). + * do not need to pass an access_type to pmap_enter(). */ void vmapbuf(struct buf *bp, vsize_t len) @@ -156,13 +161,13 @@ vmapbuf(struct buf *bp, vsize_t len) faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data)); off = (vaddr_t)bp->b_data - faddr; len = round_page(off + len); - taddr= uvm_km_valloc_wait(phys_map, len); + taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok); bp->b_data = (caddr_t)(taddr + off); /* * The region is locked, so we expect that pmap_pte() will return * non-NULL. * XXX: unwise to expect this in a multithreaded environment. - * anything can happen to a pmap between the time we lock a + * anything can happen to a pmap between the time we lock a * region, release the pmap lock, and then relock it for * the pmap_extract(). * @@ -178,6 +183,7 @@ vmapbuf(struct buf *bp, vsize_t len) taddr += PAGE_SIZE; len -= PAGE_SIZE; } + pmap_update(pmap_kernel()); } /* @@ -195,7 +201,7 @@ vunmapbuf(struct buf *bp, vsize_t len) len = round_page(off + len); pmap_kremove(addr, len); pmap_update(pmap_kernel()); - uvm_km_free_wakeup(phys_map, addr, len); + km_free((void *)addr, len, &kv_physwait, &kp_none); bp->b_data = bp->b_saveaddr; bp->b_saveaddr = NULL; } |