summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2024-11-01 20:26:19 +0000
committerMartin Pieuchot <mpi@cvs.openbsd.org>2024-11-01 20:26:19 +0000
commit8b888e6b8fba8f91bf27465970f5c209a57c333d (patch)
treef27a959f691a1956df038f5ac51c24bbf02de436 /sys/uvm
parent5e1c44b849972d37384e3a2769cdd8b64d8b356c (diff)
Retire uvm_km_zalloc().
ok jsg@
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_extern.h5
-rw-r--r--sys/uvm/uvm_km.c82
2 files changed, 2 insertions, 85 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 0191372ee11..6c527d33762 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.178 2024/10/08 02:29:10 jsg Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.179 2024/11/01 20:26:18 mpi Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -286,9 +286,6 @@ int uvm_io(vm_map_t, struct uio *, int);
#define UVM_IO_FIXPROT 0x01
-#ifdef __i386__
-vaddr_t uvm_km_zalloc(vm_map_t, vsize_t);
-#endif
void uvm_km_free(vm_map_t, vaddr_t, vsize_t);
vaddr_t uvm_km_kmemalloc_pla(struct vm_map *,
struct uvm_object *, vsize_t, vsize_t, int,
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index b2c4ba882e9..33a3beb4e81 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.154 2024/08/24 10:46:43 mpi Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.155 2024/11/01 20:26:18 mpi Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -433,86 +433,6 @@ uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size)
uvm_unmap(map, trunc_page(addr), round_page(addr+size));
}
-#ifdef __i386__
-/*
- * uvm_km_zalloc: allocate wired down memory in the kernel map.
- *
- * => we can sleep if needed
- */
-vaddr_t
-uvm_km_zalloc(struct vm_map *map, vsize_t size)
-{
- vaddr_t kva, loopva;
- voff_t offset;
- struct vm_page *pg;
-
- KASSERT(vm_map_pmap(map) == pmap_kernel());
-
- size = round_page(size);
- kva = vm_map_min(map); /* hint */
-
- /* allocate some virtual space */
- if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
- UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(PROT_READ | PROT_WRITE,
- PROT_READ | PROT_WRITE | PROT_EXEC,
- MAP_INHERIT_NONE, MADV_RANDOM, 0)) != 0)) {
- return 0;
- }
-
- /* recover object offset from virtual address */
- offset = kva - vm_map_min(kernel_map);
-
- /* now allocate the memory. we must be careful about released pages. */
- loopva = kva;
- while (size) {
- rw_enter(uvm.kernel_object->vmobjlock, RW_WRITE);
- /* allocate ram */
- pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0);
- if (pg) {
- atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
- UVM_PAGE_OWN(pg, NULL);
- }
- rw_exit(uvm.kernel_object->vmobjlock);
- if (__predict_false(pg == NULL)) {
- if (curproc == uvm.pagedaemon_proc) {
- /*
- * It is unfeasible for the page daemon to
- * sleep for memory, so free what we have
- * allocated and fail.
- */
- uvm_unmap(map, kva, loopva - kva);
- return (0);
- } else {
- uvm_wait("km_zallocw"); /* wait for memory */
- continue;
- }
- }
-
- /*
- * map it in; note we're never called with an intrsafe
- * object, so we always use regular old pmap_enter().
- */
- pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
- PROT_READ | PROT_WRITE,
- PROT_READ | PROT_WRITE | PMAP_WIRED);
-
- loopva += PAGE_SIZE;
- offset += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- pmap_update(map->pmap);
-
- /*
- * zero on request (note that "size" is now zero due to the above loop
- * so we need to subtract kva from loopva to reconstruct the size).
- */
- memset((caddr_t)kva, 0, loopva - kva);
-
- return kva;
-}
-#endif
-
#if defined(__HAVE_PMAP_DIRECT)
/*
* uvm_km_page allocator, __HAVE_PMAP_DIRECT arch