diff options
author | Mark Kettenis <kettenis@cvs.openbsd.org> | 2024-10-21 18:27:35 +0000 |
---|---|---|
committer | Mark Kettenis <kettenis@cvs.openbsd.org> | 2024-10-21 18:27:35 +0000 |
commit | aae803f7d9290f0c52ca254b2165359cc4ea951c (patch) | |
tree | dfe4caf43dc932ae02473629f8920ecae1b7a36b /sys | |
parent | 2d27eb7a3635dbb66535856633ebd13bec608552 (diff) |
We have not been swapping out kernel stacks since forever. So just
allocate the uarea with zeroed pages using km_alloc(9). Adjust the amd64
code that creates a guard page at the top of the kernel stack to use
pmap_kremove(9) instead of pmap_remove(9) to reflect that the uarea no
longer uses "managed" pages.
ok mpi@
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/amd64/amd64/vm_machdep.c | 5 | ||||
-rw-r--r-- | sys/uvm/uvm_glue.c | 18 |
2 files changed, 10 insertions, 13 deletions
diff --git a/sys/arch/amd64/amd64/vm_machdep.c b/sys/arch/amd64/amd64/vm_machdep.c index 8be22524d5d..5275969ed39 100644 --- a/sys/arch/amd64/amd64/vm_machdep.c +++ b/sys/arch/amd64/amd64/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.47 2023/04/11 00:45:07 jsg Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.48 2024/10/21 18:27:34 kettenis Exp $ */ /* $NetBSD: vm_machdep.c,v 1.1 2003/04/26 18:39:33 fvdl Exp $ */ /*- @@ -135,8 +135,7 @@ cpu_exit(struct proc *p) void setguardpage(struct proc *p) { - pmap_remove(pmap_kernel(), (vaddr_t)p->p_addr + PAGE_SIZE, - (vaddr_t)p->p_addr + 2 * PAGE_SIZE); + pmap_kremove((vaddr_t)p->p_addr + PAGE_SIZE, PAGE_SIZE); pmap_update(pmap_kernel()); } diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c index 2d1a6b08fe4..603bdab582e 100644 --- a/sys/uvm/uvm_glue.c +++ b/sys/uvm/uvm_glue.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_glue.c,v 1.85 2024/10/08 02:29:10 jsg Exp $ */ +/* $OpenBSD: uvm_glue.c,v 1.86 2024/10/21 18:27:34 kettenis Exp $ */ /* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */ /* @@ -257,20 +257,18 @@ uvm_vsunlock_device(struct proc *p, void *addr, size_t len, void *map) uvm_km_free(kernel_map, kva, sz); } +const struct kmem_va_mode kv_uarea = { + .kv_map = &kernel_map, + .kv_align = USPACE_ALIGN +}; + /* * uvm_uarea_alloc: allocate the u-area for a new thread */ vaddr_t uvm_uarea_alloc(void) { - vaddr_t uaddr; - - uaddr = uvm_km_kmemalloc_pla(kernel_map, uvm.kernel_object, USPACE, - USPACE_ALIGN, UVM_KMF_ZERO, - no_constraint.ucr_low, no_constraint.ucr_high, - 0, 0, USPACE/PAGE_SIZE); - - return (uaddr); + return (vaddr_t)km_alloc(USPACE, &kv_uarea, &kp_zero, &kd_waitok); } /* @@ -282,7 +280,7 @@ uvm_uarea_alloc(void) void uvm_uarea_free(struct proc *p) { - uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE); + km_free(p->p_addr, USPACE, &kv_uarea, &kp_zero); p->p_addr = NULL; } |