summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2022-02-21 19:15:59 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2022-02-21 19:15:59 +0000
commit21f4cec0e6817ba47598712224700157937be5b9 (patch)
tree7ed7a370e1a7736884b1121332822dd2ea2941ae /sys
parent06c3d8da2f0a3de1a6a5d474d972058c56264115 (diff)
Convert KVA allocation to km_alloc(9).
ok mpi@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/arm/arm/pmap7.c7
-rw-r--r--sys/arch/arm/arm/vm_machdep.c34
2 files changed, 22 insertions, 19 deletions
diff --git a/sys/arch/arm/arm/pmap7.c b/sys/arch/arm/arm/pmap7.c
index ffad79e0b5e..c8844b51d09 100644
--- a/sys/arch/arm/arm/pmap7.c
+++ b/sys/arch/arm/arm/pmap7.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap7.c,v 1.62 2022/02/01 19:57:28 kettenis Exp $ */
+/* $OpenBSD: pmap7.c,v 1.63 2022/02/21 19:15:58 kettenis Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -625,7 +625,8 @@ printf("%s: %d\n", __func__, ++nl1);
/* Allocate a L1 page table */
for (;;) {
- va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
+ va = (vaddr_t)km_alloc(L1_TABLE_SIZE, &kv_any, &kp_none,
+ &kd_nowait);
if (va != 0)
break;
uvm_wait("alloc_l1_va");
@@ -686,7 +687,7 @@ pmap_free_l1(pmap_t pm)
uvm_pglistfree(&mlist);
/* free backing va */
- uvm_km_free(kernel_map, (vaddr_t)l1->l1_kva, L1_TABLE_SIZE);
+ km_free(l1->l1_kva, L1_TABLE_SIZE, &kv_any, &kp_none);
free(l1, M_VMPMAP, 0);
}
diff --git a/sys/arch/arm/arm/vm_machdep.c b/sys/arch/arm/arm/vm_machdep.c
index 7ae64366c3c..75e19c2d64a 100644
--- a/sys/arch/arm/arm/vm_machdep.c
+++ b/sys/arch/arm/arm/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.27 2021/05/16 06:20:28 jsg Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.28 2022/02/21 19:15:58 kettenis Exp $ */
/* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
/*
@@ -135,6 +135,11 @@ cpu_exit(struct proc *p)
sched_exit(p);
}
+struct kmem_va_mode kv_physwait = {
+ .kv_map = &phys_map,
+ .kv_wait = 1,
+};
+
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we
@@ -146,26 +151,29 @@ vmapbuf(struct buf *bp, vsize_t len)
vaddr_t faddr, taddr, off;
paddr_t fpa;
-
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
-
faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
off = (vaddr_t)bp->b_data - faddr;
len = round_page(off + len);
- taddr = uvm_km_valloc_wait(phys_map, len);
+ taddr = (vaddr_t)km_alloc(len, &kv_physwait, &kp_none, &kd_waitok);
bp->b_data = (caddr_t)(taddr + off);
-
/*
* The region is locked, so we expect that pmap_pte() will return
* non-NULL.
+ * XXX: unwise to expect this in a multithreaded environment.
+ * anything can happen to a pmap between the time we lock a
+ * region, release the pmap lock, and then relock it for
+ * the pmap_extract().
+ *
+ * no need to flush TLB since we expect nothing to be mapped
+ * where we we just allocated (TLB will be flushed when our
+ * mapping is removed).
*/
while (len) {
(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr, &fpa);
- pmap_enter(pmap_kernel(), taddr, fpa,
- PROT_READ | PROT_WRITE,
- PROT_READ | PROT_WRITE | PMAP_WIRED);
+ pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
@@ -183,18 +191,12 @@ vunmapbuf(struct buf *bp, vsize_t len)
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
-
- /*
- * Make sure the cache does not have dirty data for the
- * pages we had mapped.
- */
addr = trunc_page((vaddr_t)bp->b_data);
off = (vaddr_t)bp->b_data - addr;
len = round_page(off + len);
-
- pmap_remove(pmap_kernel(), addr, addr + len);
+ pmap_kremove(addr, len);
pmap_update(pmap_kernel());
- uvm_km_free_wakeup(phys_map, addr, len);
+ km_free((void *)addr, len, &kv_physwait, &kp_none);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}