summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2022-09-10 20:35:30 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2022-09-10 20:35:30 +0000
commitb2eb2a5e4c4d57cbeea65dabf3d241852d62561f (patch)
treebfaf8e87034a4d90ec5ebff8be4519357b94e273 /sys
parent377c83dc5717ad0ad4c69daa289e37335389cded (diff)
Remove pmap_collect() when a no-op, define __HAVE_PMAP_COLLECT otherwise.
Use that define to shunt uvm_swapout_threads(), which is a noop when pmap_collect() does nothing. ok mpi@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/alpha/include/pmap.h4
-rw-r--r--sys/arch/amd64/amd64/pmap.c7
-rw-r--r--sys/arch/arm/arm/pmap7.c17
-rw-r--r--sys/arch/arm64/arm64/pmap.c20
-rw-r--r--sys/arch/hppa/hppa/pmap.c9
-rw-r--r--sys/arch/hppa/include/param.h3
-rw-r--r--sys/arch/m88k/include/pmap.h3
-rw-r--r--sys/arch/mips64/include/pmap.h4
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c20
-rw-r--r--sys/arch/powerpc64/powerpc64/pmap.c7
-rw-r--r--sys/arch/riscv64/riscv64/pmap.c20
-rw-r--r--sys/arch/sh/include/pmap.h3
-rw-r--r--sys/arch/sparc64/include/pmap.h2
-rw-r--r--sys/arch/sparc64/sparc64/pmap.c4
-rw-r--r--sys/uvm/uvm_glue.c6
-rw-r--r--sys/uvm/uvm_pdaemon.c4
-rw-r--r--sys/uvm/uvm_pmap.h4
17 files changed, 32 insertions, 105 deletions
diff --git a/sys/arch/alpha/include/pmap.h b/sys/arch/alpha/include/pmap.h
index d611156bbc3..cfa4cc2400f 100644
--- a/sys/arch/alpha/include/pmap.h
+++ b/sys/arch/alpha/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.40 2016/04/20 05:24:18 landry Exp $ */
+/* $OpenBSD: pmap.h,v 1.41 2022/09/10 20:35:28 miod Exp $ */
/* $NetBSD: pmap.h,v 1.37 2000/11/19 03:16:35 thorpej Exp $ */
/*-
@@ -197,6 +197,8 @@ extern pt_entry_t *VPT; /* Virtual Page Table */
paddr_t vtophys(vaddr_t);
+#define __HAVE_PMAP_COLLECT
+
/* Machine-specific functions. */
void pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids);
int pmap_emulate_reference(struct proc *p, vaddr_t v, int user, int type);
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index ad35db3a1ab..0078b47b6e4 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.153 2022/06/30 13:51:24 mlarkin Exp $ */
+/* $OpenBSD: pmap.c,v 1.154 2022/09/10 20:35:28 miod Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -2206,6 +2206,7 @@ pmap_unwire(struct pmap *pmap, vaddr_t va)
#endif
}
+#if 0
/*
* pmap_collect: free resources held by a pmap
*
@@ -2221,10 +2222,10 @@ pmap_collect(struct pmap *pmap)
* for its entire address space.
*/
-/* pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS,
+ pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS,
PMAP_REMOVE_SKIPWIRED);
-*/
}
+#endif
/*
* pmap_copy: copy mappings from one pmap to another
diff --git a/sys/arch/arm/arm/pmap7.c b/sys/arch/arm/arm/pmap7.c
index c8844b51d09..d4eb442a7dc 100644
--- a/sys/arch/arm/arm/pmap7.c
+++ b/sys/arch/arm/arm/pmap7.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap7.c,v 1.63 2022/02/21 19:15:58 kettenis Exp $ */
+/* $OpenBSD: pmap7.c,v 1.64 2022/09/10 20:35:28 miod Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -1743,21 +1743,6 @@ dab_access(trapframe_t *tf, u_int fsr, u_int far, struct proc *p)
}
/*
- * pmap_collect: free resources held by a pmap
- *
- * => optional function.
- * => called when a process is swapped out to free memory.
- */
-void
-pmap_collect(pmap_t pm)
-{
- /*
- * Nothing to do.
- * We don't even need to free-up the process' L1.
- */
-}
-
-/*
* Routine: pmap_proc_iflush
*
* Function:
diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c
index 0fe4b83b381..49a6308319e 100644
--- a/sys/arch/arm64/arm64/pmap.c
+++ b/sys/arch/arm64/arm64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.84 2022/01/10 09:20:27 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.85 2022/09/10 20:35:28 miod Exp $ */
/*
* Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com>
*
@@ -856,24 +856,6 @@ pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
}
/*
- * Garbage collects the physical map system for pages which are
- * no longer used. Success need not be guaranteed -- that is, there
- * may well be pages which are not referenced, but others may be collected
- * Called by the pageout daemon when pages are scarce.
- */
-void
-pmap_collect(pmap_t pm)
-{
- /* This could return unused v->p table layers which
- * are empty.
- * could malicious programs allocate memory and eat
- * these wired pages? These are allocated via pool.
- * Are there pool functions which could be called
- * to lower the pool usage here?
- */
-}
-
-/*
* Fill the given physical page with zeros.
*/
void
diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c
index b1dbcf437e7..657dfffa362 100644
--- a/sys/arch/hppa/hppa/pmap.c
+++ b/sys/arch/hppa/hppa/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.177 2021/09/14 16:16:51 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.178 2022/09/10 20:35:28 miod Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -734,13 +734,6 @@ pmap_reference(struct pmap *pmap)
atomic_inc_int(&pmap->pm_obj.uo_refs);
}
-void
-pmap_collect(struct pmap *pmap)
-{
- DPRINTF(PDB_FOLLOW|PDB_PMAP, ("pmap_collect(%p)\n", pmap));
- /* nothing yet */
-}
-
int
pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
diff --git a/sys/arch/hppa/include/param.h b/sys/arch/hppa/include/param.h
index c53885bb7c4..355b349c6fd 100644
--- a/sys/arch/hppa/include/param.h
+++ b/sys/arch/hppa/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.47 2018/09/14 13:58:20 claudio Exp $ */
+/* $OpenBSD: param.h,v 1.48 2022/09/10 20:35:28 miod Exp $ */
/*
* Copyright (c) 1988-1994, The University of Utah and
@@ -66,6 +66,5 @@
#endif /* _KERNEL */
#define MACHINE_STACK_GROWS_UP 1 /* stack grows to higher addresses */
-#define __SWAP_BROKEN
#endif /* _MACHINE_PARAM_H_ */
diff --git a/sys/arch/m88k/include/pmap.h b/sys/arch/m88k/include/pmap.h
index f7b038e72cf..3f35640563a 100644
--- a/sys/arch/m88k/include/pmap.h
+++ b/sys/arch/m88k/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.26 2015/07/25 20:45:05 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.27 2022/09/10 20:35:28 miod Exp $ */
/*
* Mach Operating System
* Copyright (c) 1991 Carnegie Mellon University
@@ -80,6 +80,7 @@ int pmap_translation_info(pmap_t, vaddr_t, paddr_t *, uint32_t *);
#define pmap_unmap_direct(va) PHYS_TO_VM_PAGE((paddr_t)va)
#define __HAVE_PMAP_DIRECT
#define PMAP_STEAL_MEMORY
+#define __HAVE_PMAP_COLLECT
#endif /* _KERNEL */
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h
index c58fa74542e..ea6cb7252e6 100644
--- a/sys/arch/mips64/include/pmap.h
+++ b/sys/arch/mips64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.49 2021/05/01 16:11:11 visa Exp $ */
+/* $OpenBSD: pmap.h,v 1.50 2022/09/10 20:35:28 miod Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -170,6 +170,8 @@ void pmap_page_cache(vm_page_t, u_int);
vaddr_t pmap_map_direct(vm_page_t);
vm_page_t pmap_unmap_direct(vaddr_t);
+#define __HAVE_PMAP_COLLECT
+
/*
* MD flags to pmap_enter:
*/
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index fae4c8c20cf..f27adad680a 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.176 2022/02/07 23:20:09 gkoehler Exp $ */
+/* $OpenBSD: pmap.c,v 1.177 2022/09/10 20:35:28 miod Exp $ */
/*
* Copyright (c) 2015 Martin Pieuchot
@@ -1052,24 +1052,6 @@ pmap_clear_attrs(struct vm_page *pg, u_int flagbit)
}
/*
- * Garbage collects the physical map system for pages which are
- * no longer used. Success need not be guaranteed -- that is, there
- * may well be pages which are not referenced, but others may be collected
- * Called by the pageout daemon when pages are scarce.
- */
-void
-pmap_collect(pmap_t pm)
-{
- /* This could return unused v->p table layers which
- * are empty.
- * could malicious programs allocate memory and eat
- * these wired pages? These are allocated via pool.
- * Are there pool functions which could be called
- * to lower the pool usage here?
- */
-}
-
-/*
* Fill the given physical page with zeros.
*/
void
diff --git a/sys/arch/powerpc64/powerpc64/pmap.c b/sys/arch/powerpc64/powerpc64/pmap.c
index 9d19c54295a..2fc2a10b378 100644
--- a/sys/arch/powerpc64/powerpc64/pmap.c
+++ b/sys/arch/powerpc64/powerpc64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.57 2021/10/12 18:06:15 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.58 2022/09/10 20:35:28 miod Exp $ */
/*
* Copyright (c) 2015 Martin Pieuchot
@@ -1465,11 +1465,6 @@ pmap_unwire(pmap_t pm, vaddr_t va)
}
void
-pmap_collect(pmap_t pm)
-{
-}
-
-void
pmap_zero_page(struct vm_page *pg)
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
diff --git a/sys/arch/riscv64/riscv64/pmap.c b/sys/arch/riscv64/riscv64/pmap.c
index a1dcc203222..d0413e01744 100644
--- a/sys/arch/riscv64/riscv64/pmap.c
+++ b/sys/arch/riscv64/riscv64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.22 2022/08/29 02:01:18 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.23 2022/09/10 20:35:29 miod Exp $ */
/*
* Copyright (c) 2019-2020 Brian Bamsch <bbamsch@google.com>
@@ -761,24 +761,6 @@ pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
}
/*
- * Garbage collects the physical map system for pages which are
- * no longer used. Success need not be guaranteed -- that is, there
- * may well be pages which are not referenced, but others may be collected
- * Called by the pageout daemon when pages are scarce.
- */
-void
-pmap_collect(pmap_t pm)
-{
- /* This could return unused v->p table layers which
- * are empty.
- * could malicious programs allocate memory and eat
- * these wired pages? These are allocated via pool.
- * Are there pool functions which could be called
- * to lower the pool usage here?
- */
-}
-
-/*
* Fill the given physical page with zeros.
*/
void
diff --git a/sys/arch/sh/include/pmap.h b/sys/arch/sh/include/pmap.h
index 06b1c48236c..dff64bce05c 100644
--- a/sys/arch/sh/include/pmap.h
+++ b/sys/arch/sh/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.14 2015/02/15 21:34:33 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.15 2022/09/10 20:35:29 miod Exp $ */
/* $NetBSD: pmap.h,v 1.28 2006/04/10 23:12:11 uwe Exp $ */
/*-
@@ -64,7 +64,6 @@ void pmap_bootstrap(void);
#define pmap_deactivate(pmap) do { /* nothing */ } while (0)
#define pmap_update(pmap) do { /* nothing */ } while (0)
#define pmap_copy(dp,sp,d,l,s) do { /* nothing */ } while (0)
-#define pmap_collect(pmap) do { /* nothing */ } while (0)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
diff --git a/sys/arch/sparc64/include/pmap.h b/sys/arch/sparc64/include/pmap.h
index c07d8c46a8a..3941383f91c 100644
--- a/sys/arch/sparc64/include/pmap.h
+++ b/sys/arch/sparc64/include/pmap.h
@@ -177,6 +177,8 @@ void pmap_bootstrap(u_long, u_long, u_int, u_int);
#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
+#define __HAVE_PMAP_COLLECT
+
/* SPARC specific? */
void pmap_redzone(void);
int pmap_dumpsize(void);
diff --git a/sys/arch/sparc64/sparc64/pmap.c b/sys/arch/sparc64/sparc64/pmap.c
index 5e5b80dd2d0..3800557252a 100644
--- a/sys/arch/sparc64/sparc64/pmap.c
+++ b/sys/arch/sparc64/sparc64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.105 2022/09/08 17:44:48 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.106 2022/09/10 20:35:29 miod Exp $ */
/* $NetBSD: pmap.c,v 1.107 2001/08/31 16:47:41 eeh Exp $ */
/*
*
@@ -1546,7 +1546,6 @@ pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
void
pmap_collect(struct pmap *pm)
{
-#if 1
int i, j, k, n, m, s;
paddr_t *pdir, *ptbl;
/* This is a good place to scan the pmaps for page tables with
@@ -1584,7 +1583,6 @@ pmap_collect(struct pmap *pm)
}
}
splx(s);
-#endif
}
void
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index c37b24720c5..9c45bfdb767 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_glue.c,v 1.83 2022/03/12 08:11:07 mpi Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.84 2022/09/10 20:35:29 miod Exp $ */
/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */
/*
@@ -321,6 +321,8 @@ uvm_init_limits(struct plimit *limit0)
limit0->pl_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free);
}
+#ifdef __HAVE_PMAP_COLLECT
+
#ifdef DEBUG
int enableswap = 1;
int swapdebug = 0;
@@ -414,6 +416,8 @@ next_process: ;
}
}
+#endif /* __HAVE_PMAP_COLLECT */
+
/*
* uvm_atopg: convert KVAs back to their page structures.
*/
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 350f95edb6b..eeee53330cc 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.104 2022/08/31 09:26:04 mpi Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.105 2022/09/10 20:35:29 miod Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -890,7 +890,7 @@ uvmpd_scan(struct uvm_pmalloc *pma)
*/
free = uvmexp.free - BUFPAGES_DEFICIT;
-#ifndef __SWAP_BROKEN
+#ifdef __HAVE_PMAP_COLLECT
/*
* swap out some processes if we are below our free target.
* we need to unlock the page queues for this.
diff --git a/sys/uvm/uvm_pmap.h b/sys/uvm/uvm_pmap.h
index e971573c5f1..88bcc0d6fdc 100644
--- a/sys/uvm/uvm_pmap.h
+++ b/sys/uvm/uvm_pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmap.h,v 1.30 2022/08/07 19:39:25 miod Exp $ */
+/* $OpenBSD: uvm_pmap.h,v 1.31 2022/09/10 20:35:29 miod Exp $ */
/* $NetBSD: uvm_pmap.h,v 1.1 2000/06/27 09:00:14 mrg Exp $ */
/*
@@ -126,7 +126,7 @@ boolean_t pmap_clear_modify(struct vm_page *);
boolean_t pmap_clear_reference(struct vm_page *);
#endif
-#if !defined(pmap_collect)
+#if !defined(pmap_collect) && defined(__HAVE_PMAP_COLLECT)
void pmap_collect(pmap_t);
#endif
#if !defined(pmap_copy)