summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2024-11-07 10:39:16 +0000
committerMartin Pieuchot <mpi@cvs.openbsd.org>2024-11-07 10:39:16 +0000
commita636e52f9ba3df8b94877d7fc4c2d2ee6c95c015 (patch)
tree9d18211fd94dfa606e5ce2acc4ae49d75fd8c7e5 /sys/uvm
parent44784db89cd1e5abcf915441c83f3f6e19b40f02 (diff)
Use a static request to notify failed nowait allocations.
As a side effect the page daemon now considers releasing inactive pages when a nowait allocation for low pages failed. Note that the hardcoded number of 16 pages (a 64K cluster on 4K archs) which corresponds to what the buffer cache currently wants is left with the original XXX. ok miod@
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_pdaemon.c27
-rw-r--r--sys/uvm/uvm_pmemrange.c13
2 files changed, 21 insertions, 19 deletions
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 06359c190eb..6aedc5faa53 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.125 2024/11/07 10:31:11 mpi Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.126 2024/11/07 10:39:15 mpi Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -194,7 +194,7 @@ uvmpd_tune(void)
* recover at least some memory in the most restricted region (assumed
* to be dma_constraint).
*/
-volatile int uvm_nowait_failed;
+struct uvm_pmalloc nowait_pma;
static inline int
uvmpd_pma_done(struct uvm_pmalloc *pma)
@@ -219,11 +219,19 @@ uvm_pageout(void *arg)
(void) spl0();
uvmpd_tune();
+ /*
+ * XXX realistically, this is what our nowait callers probably
+ * care about.
+ */
+ nowait_pma.pm_constraint = dma_constraint;
+ nowait_pma.pm_size = (16 << PAGE_SHIFT); /* XXX */
+ nowait_pma.pm_flags = 0;
+
for (;;) {
long size;
uvm_lock_fpageq();
- if (!uvm_nowait_failed && TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
+ if (TAILQ_EMPTY(&uvm.pmr_control.allocs)) {
msleep_nsec(&uvm.pagedaemon, &uvm.fpageqlock, PVM,
"pgdaemon", INFSLP);
uvmexp.pdwoke++;
@@ -233,15 +241,7 @@ uvm_pageout(void *arg)
pma->pm_flags |= UVM_PMA_BUSY;
constraint = pma->pm_constraint;
} else {
- if (uvm_nowait_failed) {
- /*
- * XXX realistically, this is what our
- * nowait callers probably care about
- */
- constraint = dma_constraint;
- uvm_nowait_failed = 0;
- } else
- constraint = no_constraint;
+ constraint = no_constraint;
}
/* How many pages do we need to free during this round? */
shortage = uvmexp.freetarg - uvmexp.free + BUFPAGES_DEFICIT;
@@ -303,8 +303,7 @@ uvm_pageout(void *arg)
pma->pm_flags &= ~UVM_PMA_BUSY;
if (pma->pm_flags & UVM_PMA_FREED) {
pma->pm_flags &= ~UVM_PMA_LINKED;
- TAILQ_REMOVE(&uvm.pmr_control.allocs, pma,
- pmq);
+ TAILQ_REMOVE(&uvm.pmr_control.allocs, pma, pmq);
wakeup(pma);
}
}
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c
index 287308db735..dc7d360d97c 100644
--- a/sys/uvm/uvm_pmemrange.c
+++ b/sys/uvm/uvm_pmemrange.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmemrange.c,v 1.73 2024/11/06 10:41:12 mpi Exp $ */
+/* $OpenBSD: uvm_pmemrange.c,v 1.74 2024/11/07 10:39:15 mpi Exp $ */
/*
* Copyright (c) 2024 Martin Pieuchot <mpi@openbsd.org>
@@ -841,7 +841,7 @@ uvm_pmr_extract_range(struct uvm_pmemrange *pmr, struct vm_page *pg,
* recover at least some memory in the most restricted region (assumed
* to be dma_constraint).
*/
-extern volatile int uvm_nowait_failed;
+extern struct uvm_pmalloc nowait_pma;
/*
* Acquire a number of pages.
@@ -1190,9 +1190,12 @@ fail:
flags & UVM_PLA_FAILOK) == 0)
goto retry;
KASSERT(flags & UVM_PLA_FAILOK);
- } else {
- if (!(flags & UVM_PLA_NOWAKE)) {
- uvm_nowait_failed = 1;
+ } else if (!(flags & UVM_PLA_NOWAKE)) {
+ struct uvm_pmalloc *pma = &nowait_pma;
+
+ if (!(nowait_pma.pm_flags & UVM_PMA_LINKED)) {
+ nowait_pma.pm_flags = UVM_PMA_LINKED;
+ TAILQ_INSERT_TAIL(&uvm.pmr_control.allocs, pma, pmq);
wakeup(&uvm.pagedaemon);
}
}