diff options
author | Martin Pieuchot <mpi@cvs.openbsd.org> | 2022-06-28 19:39:55 +0000 |
---|---|---|
committer | Martin Pieuchot <mpi@cvs.openbsd.org> | 2022-06-28 19:39:55 +0000 |
commit | 7f8092e653ca6580b18f9a03c66ae27b1b6be323 (patch) | |
tree | 872677a6530747e7638d96fd04081b76b1cf34ba | |
parent | e47e30f367bcf2c6defb282089207c7451eb4322 (diff) |
Make sure uvm_swap_get() always sleep and do not return and error.
If no memory was immediately available to decrypt (bounce) a page from swap
an error was returned to the fault handler which would result in processes
dying when a lot of memory pressure was applied to a system.
Note that reading from swap is always done synchronously.
ok beck@, kettenis@
-rw-r--r-- | sys/uvm/uvm_swap.c | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c index 4915010b8fd..609c54c5034 100644 --- a/sys/uvm/uvm_swap.c +++ b/sys/uvm/uvm_swap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_swap.c,v 1.157 2022/06/28 19:19:34 mpi Exp $ */ +/* $OpenBSD: uvm_swap.c,v 1.158 2022/06/28 19:39:54 mpi Exp $ */ /* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */ /* @@ -239,7 +239,7 @@ void sw_reg_start(struct swapdev *); int uvm_swap_io(struct vm_page **, int, int, int); void swapmount(void); -int uvm_swap_allocpages(struct vm_page **, int); +int uvm_swap_allocpages(struct vm_page **, int, int); #ifdef UVM_SWAP_ENCRYPT /* for swap encrypt */ @@ -288,7 +288,7 @@ uvm_swap_init(void) "swp vnd", NULL); /* allocate pages for OOM situations. */ - error = uvm_swap_allocpages(oompps, SWCLUSTPAGES); + error = uvm_swap_allocpages(oompps, SWCLUSTPAGES, UVM_PLA_NOWAIT); KASSERT(error == 0); /* Setup the initial swap partition */ @@ -334,7 +334,7 @@ uvm_swap_initcrypt(struct swapdev *sdp, int npages) #endif /* UVM_SWAP_ENCRYPT */ int -uvm_swap_allocpages(struct vm_page **pps, int npages) +uvm_swap_allocpages(struct vm_page **pps, int npages, int flags) { struct pglist pgl; int error, i; @@ -344,7 +344,7 @@ uvm_swap_allocpages(struct vm_page **pps, int npages) TAILQ_INIT(&pgl); again: error = uvm_pglistalloc(npages * PAGE_SIZE, dma_constraint.ucr_low, - dma_constraint.ucr_high, 0, 0, &pgl, npages, UVM_PLA_NOWAIT); + dma_constraint.ucr_high, 0, 0, &pgl, npages, flags); if (error && (curproc == uvm.pagedaemon_proc)) { mtx_enter(&oommtx); if (oom) { @@ -394,6 +394,7 @@ uvm_swap_freepages(struct vm_page **pps, int npages) for (i = 0; i < npages; i++) uvm_pagefree(pps[i]); uvm_unlock_pageq(); + } #ifdef UVM_SWAP_ENCRYPT @@ -1707,13 +1708,16 @@ uvm_swap_io(struct vm_page **pps, int startslot, int npages, int flags) } if (bounce) { - int swmapflags; + int swmapflags, plaflags; /* We always need write access. */ swmapflags = UVMPAGER_MAPIN_READ; - if (!async) + plaflags = UVM_PLA_NOWAIT; + if (!async) { swmapflags |= UVMPAGER_MAPIN_WAITOK; - if (uvm_swap_allocpages(tpps, npages)) { + plaflags = UVM_PLA_WAITOK; + } + if (uvm_swap_allocpages(tpps, npages, plaflags)) { pool_put(&bufpool, bp); uvm_pagermapout(kva, npages); return (VM_PAGER_AGAIN); |