From 6e54e233a46b586bad068b5ea60bf441cc49198c Mon Sep 17 00:00:00 2001 From: Martin Pieuchot Date: Mon, 25 Nov 2024 13:37:50 +0000 Subject: Push the KERNEL_LOCK() down in the aiodone_daemon(). Improve responsiveness during swapping for MP machines without bouncing. When the page daemon is busy writing a lot of clusters without releasing the KERNEL_LOCK() and without allocating. This currently require vm.swapencrypt.enable=0 and a dma_constraint covering the whole address range. Tested by sthen@ and miod@. ok claudio@, tb@ --- sys/uvm/uvm_pdaemon.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'sys') diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c index 6b0682bdfa4..4e5228e7be3 100644 --- a/sys/uvm/uvm_pdaemon.c +++ b/sys/uvm/uvm_pdaemon.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_pdaemon.c,v 1.131 2024/11/25 13:06:25 mpi Exp $ */ +/* $OpenBSD: uvm_pdaemon.c,v 1.132 2024/11/25 13:37:49 mpi Exp $ */ /* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */ /* @@ -343,6 +343,7 @@ uvm_aiodone_daemon(void *arg) struct buf *bp, *nbp; uvm.aiodoned_proc = curproc; + KERNEL_UNLOCK(); for (;;) { /* @@ -359,6 +360,7 @@ uvm_aiodone_daemon(void *arg) /* process each i/o that's done. */ npages = 0; + KERNEL_LOCK(); while (bp != NULL) { if (bp->b_flags & B_PDAEMON) { npages += bp->b_bufsize >> PAGE_SHIFT; @@ -371,6 +373,7 @@ uvm_aiodone_daemon(void *arg) sched_pause(yield); } + KERNEL_UNLOCK(); uvm_lock_fpageq(); atomic_sub_int(&uvmexp.paging, npages); -- cgit v1.2.3