diff options
author | Ted Unangst <tedu@cvs.openbsd.org> | 2008-10-23 23:54:03 +0000 |
---|---|---|
committer | Ted Unangst <tedu@cvs.openbsd.org> | 2008-10-23 23:54:03 +0000 |
commit | a589fbadefc286c5b9f79ad996596a1006bcdea3 (patch) | |
tree | 11917cdb46abf003114075e269bb90c39a4f349e /sys/arch | |
parent | 8cf2bd98977117b4f40fc8cc454c9784708fb9a0 (diff) |
a better fix for the "uvm_km thread runs out of memory" problem.
add a new arg to the backend so it can tell pool to slow down. when we get
this flag, yield *after* putting the page in the pool's free list. whatever
we do, don't let the thread sleep.
this makes things better by still letting the thread run when a huge pf
request comes in, but without artificially increasing pressure on the backend
by eating pages without feeding them forward.
ok deraadt
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/alpha/alpha/pmap.c | 12 | ||||
-rw-r--r-- | sys/arch/arm/arm/pmap.c | 11 | ||||
-rw-r--r-- | sys/arch/sh/sh/pmap.c | 9 | ||||
-rw-r--r-- | sys/arch/sparc/sparc/pmap.c | 7 |
4 files changed, 22 insertions, 17 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c index 21e931730df..255a9a19dd6 100644 --- a/sys/arch/alpha/alpha/pmap.c +++ b/sys/arch/alpha/alpha/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.57 2008/09/12 12:27:26 blambert Exp $ */ +/* $OpenBSD: pmap.c,v 1.58 2008/10/23 23:54:02 tedu Exp $ */ /* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */ /*- @@ -451,7 +451,7 @@ void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, cpuid_t, void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, cpuid_t); void pmap_l1pt_delref(pmap_t, pt_entry_t *, cpuid_t); -void *pmap_l1pt_alloc(struct pool *, int); +void *pmap_l1pt_alloc(struct pool *, int, int *); void pmap_l1pt_free(struct pool *, void *); struct pool_allocator pmap_l1pt_allocator = { @@ -468,7 +468,7 @@ void pmap_pv_remove(pmap_t, paddr_t, vaddr_t, boolean_t, struct pv_entry **); struct pv_entry *pmap_pv_alloc(void); void pmap_pv_free(struct pv_entry *); -void *pmap_pv_page_alloc(struct pool *, int); +void *pmap_pv_page_alloc(struct pool *, int, int *); void pmap_pv_page_free(struct pool *, void *); struct pool_allocator pmap_pv_allocator = { pmap_pv_page_alloc, pmap_pv_page_free, 0, @@ -3159,10 +3159,11 @@ pmap_pv_free(struct pv_entry *pv) * Allocate a page for the pv_entry pool. */ void * -pmap_pv_page_alloc(struct pool *pp, int flags) +pmap_pv_page_alloc(struct pool *pp, int flags, int *slowdown) { paddr_t pg; + *slowdown = 0; if (pmap_physpage_alloc(PGU_PVENT, &pg)) return ((void *)ALPHA_PHYS_TO_K0SEG(pg)); return (NULL); @@ -3558,13 +3559,14 @@ pmap_l1pt_ctor(void *arg, void *object, int flags) * Page allocator for L1 PT pages. */ void * -pmap_l1pt_alloc(struct pool *pp, int flags) +pmap_l1pt_alloc(struct pool *pp, int flags, int *slowdown) { paddr_t ptpa; /* * Attempt to allocate a free page. */ + *slowdown = 0; if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE) return (NULL); diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c index 7f81a98e094..ba43c9042fe 100644 --- a/sys/arch/arm/arm/pmap.c +++ b/sys/arch/arm/arm/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.17 2008/06/27 06:03:08 ray Exp $ */ +/* $OpenBSD: pmap.c,v 1.18 2008/10/23 23:54:02 tedu Exp $ */ /* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */ /* @@ -262,7 +262,7 @@ static LIST_HEAD(, pmap) pmap_pmaps; * Pool of PV structures */ static struct pool pmap_pv_pool; -void *pmap_bootstrap_pv_page_alloc(struct pool *, int); +void *pmap_bootstrap_pv_page_alloc(struct pool *, int, int *); void pmap_bootstrap_pv_page_free(struct pool *, void *); struct pool_allocator pmap_bootstrap_pv_allocator = { pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free @@ -4055,14 +4055,15 @@ static vaddr_t last_bootstrap_page = 0; static void *free_bootstrap_pages = NULL; void * -pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags) +pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags, int *slowdown) { - extern void *pool_page_alloc(struct pool *, int); + extern void *pool_page_alloc(struct pool *, int, int *); vaddr_t new_page; void *rv; if (pmap_initialized) - return (pool_page_alloc(pp, flags)); + return (pool_page_alloc(pp, flags, slowdown)); + *slowdown = 0; if (free_bootstrap_pages) { rv = free_bootstrap_pages; diff --git a/sys/arch/sh/sh/pmap.c b/sys/arch/sh/sh/pmap.c index bc9b7e4a7bc..649293d0f14 100644 --- a/sys/arch/sh/sh/pmap.c +++ b/sys/arch/sh/sh/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.10 2008/09/12 12:27:27 blambert Exp $ */ +/* $OpenBSD: pmap.c,v 1.11 2008/10/23 23:54:02 tedu Exp $ */ /* $NetBSD: pmap.c,v 1.55 2006/08/07 23:19:36 tsutsui Exp $ */ /*- @@ -72,7 +72,7 @@ struct pv_entry { #define __pmap_pv_free(pv) pool_put(&__pmap_pv_pool, (pv)) STATIC void __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, vm_prot_t); STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t); -STATIC void *__pmap_pv_page_alloc(struct pool *, int); +STATIC void *__pmap_pv_page_alloc(struct pool *, int, int *); STATIC void __pmap_pv_page_free(struct pool *, void *); STATIC struct pool __pmap_pv_pool; STATIC struct pool_allocator pmap_pv_page_allocator = { @@ -899,14 +899,15 @@ pmap_prefer(vaddr_t foff, vaddr_t *vap) /* * pv_entry pool allocator: - * void *__pmap_pv_page_alloc(struct pool *pool, int flags): + * void *__pmap_pv_page_alloc(struct pool *pool, int flags, int *slowdown): * void __pmap_pv_page_free(struct pool *pool, void *v): */ void * -__pmap_pv_page_alloc(struct pool *pool, int flags) +__pmap_pv_page_alloc(struct pool *pool, int flags, int *slowdown) { struct vm_page *pg; + *slowdown = 0; pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); if (pg == NULL) return (NULL); diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c index 1a3e4fab3df..b800dacdce5 100644 --- a/sys/arch/sparc/sparc/pmap.c +++ b/sys/arch/sparc/sparc/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.149 2008/06/09 20:31:47 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.150 2008/10/23 23:54:02 tedu Exp $ */ /* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */ /* @@ -193,7 +193,7 @@ struct pool pvpool; */ static struct pool L1_pool; static struct pool L23_pool; -void *pgt_page_alloc(struct pool *, int); +void *pgt_page_alloc(struct pool *, int, int *); void pgt_page_free(struct pool *, void *); struct pool_allocator pgt_allocator = { @@ -216,10 +216,11 @@ pcache_flush(va, pa, n) * Page table pool back-end. */ void * -pgt_page_alloc(struct pool *pp, int flags) +pgt_page_alloc(struct pool *pp, int flags, int *slowdown) { caddr_t p; + *slowdown = 0; p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object, PAGE_SIZE, UVM_KMF_NOWAIT); if (p != NULL && ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)) { |