summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sys/arch/alpha/alpha/pmap.c12
-rw-r--r--sys/arch/arm/arm/pmap.c11
-rw-r--r--sys/arch/sh/sh/pmap.c9
-rw-r--r--sys/arch/sparc/sparc/pmap.c7
-rw-r--r--sys/kern/subr_pool.c58
-rw-r--r--sys/kern/vfs_cache.c18
-rw-r--r--sys/sys/pool.h4
-rw-r--r--sys/uvm/uvm_extern.h4
-rw-r--r--sys/uvm/uvm_km.c28
-rw-r--r--sys/uvm/uvm_map.c5
10 files changed, 92 insertions, 64 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index 21e931730df..255a9a19dd6 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.57 2008/09/12 12:27:26 blambert Exp $ */
+/* $OpenBSD: pmap.c,v 1.58 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -451,7 +451,7 @@ void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, cpuid_t,
void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, cpuid_t);
void pmap_l1pt_delref(pmap_t, pt_entry_t *, cpuid_t);
-void *pmap_l1pt_alloc(struct pool *, int);
+void *pmap_l1pt_alloc(struct pool *, int, int *);
void pmap_l1pt_free(struct pool *, void *);
struct pool_allocator pmap_l1pt_allocator = {
@@ -468,7 +468,7 @@ void pmap_pv_remove(pmap_t, paddr_t, vaddr_t, boolean_t,
struct pv_entry **);
struct pv_entry *pmap_pv_alloc(void);
void pmap_pv_free(struct pv_entry *);
-void *pmap_pv_page_alloc(struct pool *, int);
+void *pmap_pv_page_alloc(struct pool *, int, int *);
void pmap_pv_page_free(struct pool *, void *);
struct pool_allocator pmap_pv_allocator = {
pmap_pv_page_alloc, pmap_pv_page_free, 0,
@@ -3159,10 +3159,11 @@ pmap_pv_free(struct pv_entry *pv)
* Allocate a page for the pv_entry pool.
*/
void *
-pmap_pv_page_alloc(struct pool *pp, int flags)
+pmap_pv_page_alloc(struct pool *pp, int flags, int *slowdown)
{
paddr_t pg;
+ *slowdown = 0;
if (pmap_physpage_alloc(PGU_PVENT, &pg))
return ((void *)ALPHA_PHYS_TO_K0SEG(pg));
return (NULL);
@@ -3558,13 +3559,14 @@ pmap_l1pt_ctor(void *arg, void *object, int flags)
* Page allocator for L1 PT pages.
*/
void *
-pmap_l1pt_alloc(struct pool *pp, int flags)
+pmap_l1pt_alloc(struct pool *pp, int flags, int *slowdown)
{
paddr_t ptpa;
/*
* Attempt to allocate a free page.
*/
+ *slowdown = 0;
if (pmap_physpage_alloc(PGU_L1PT, &ptpa) == FALSE)
return (NULL);
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index 7f81a98e094..ba43c9042fe 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.17 2008/06/27 06:03:08 ray Exp $ */
+/* $OpenBSD: pmap.c,v 1.18 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -262,7 +262,7 @@ static LIST_HEAD(, pmap) pmap_pmaps;
* Pool of PV structures
*/
static struct pool pmap_pv_pool;
-void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
+void *pmap_bootstrap_pv_page_alloc(struct pool *, int, int *);
void pmap_bootstrap_pv_page_free(struct pool *, void *);
struct pool_allocator pmap_bootstrap_pv_allocator = {
pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
@@ -4055,14 +4055,15 @@ static vaddr_t last_bootstrap_page = 0;
static void *free_bootstrap_pages = NULL;
void *
-pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags)
+pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags, int *slowdown)
{
- extern void *pool_page_alloc(struct pool *, int);
+ extern void *pool_page_alloc(struct pool *, int, int *);
vaddr_t new_page;
void *rv;
if (pmap_initialized)
- return (pool_page_alloc(pp, flags));
+ return (pool_page_alloc(pp, flags, slowdown));
+ *slowdown = 0;
if (free_bootstrap_pages) {
rv = free_bootstrap_pages;
diff --git a/sys/arch/sh/sh/pmap.c b/sys/arch/sh/sh/pmap.c
index bc9b7e4a7bc..649293d0f14 100644
--- a/sys/arch/sh/sh/pmap.c
+++ b/sys/arch/sh/sh/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.10 2008/09/12 12:27:27 blambert Exp $ */
+/* $OpenBSD: pmap.c,v 1.11 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: pmap.c,v 1.55 2006/08/07 23:19:36 tsutsui Exp $ */
/*-
@@ -72,7 +72,7 @@ struct pv_entry {
#define __pmap_pv_free(pv) pool_put(&__pmap_pv_pool, (pv))
STATIC void __pmap_pv_enter(pmap_t, struct vm_page *, vaddr_t, vm_prot_t);
STATIC void __pmap_pv_remove(pmap_t, struct vm_page *, vaddr_t);
-STATIC void *__pmap_pv_page_alloc(struct pool *, int);
+STATIC void *__pmap_pv_page_alloc(struct pool *, int, int *);
STATIC void __pmap_pv_page_free(struct pool *, void *);
STATIC struct pool __pmap_pv_pool;
STATIC struct pool_allocator pmap_pv_page_allocator = {
@@ -899,14 +899,15 @@ pmap_prefer(vaddr_t foff, vaddr_t *vap)
/*
* pv_entry pool allocator:
- * void *__pmap_pv_page_alloc(struct pool *pool, int flags):
+ * void *__pmap_pv_page_alloc(struct pool *pool, int flags, int *slowdown):
* void __pmap_pv_page_free(struct pool *pool, void *v):
*/
void *
-__pmap_pv_page_alloc(struct pool *pool, int flags)
+__pmap_pv_page_alloc(struct pool *pool, int flags, int *slowdown)
{
struct vm_page *pg;
+ *slowdown = 0;
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
if (pg == NULL)
return (NULL);
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index 1a3e4fab3df..b800dacdce5 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.149 2008/06/09 20:31:47 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.150 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -193,7 +193,7 @@ struct pool pvpool;
*/
static struct pool L1_pool;
static struct pool L23_pool;
-void *pgt_page_alloc(struct pool *, int);
+void *pgt_page_alloc(struct pool *, int, int *);
void pgt_page_free(struct pool *, void *);
struct pool_allocator pgt_allocator = {
@@ -216,10 +216,11 @@ pcache_flush(va, pa, n)
* Page table pool back-end.
*/
void *
-pgt_page_alloc(struct pool *pp, int flags)
+pgt_page_alloc(struct pool *pp, int flags, int *slowdown)
{
caddr_t p;
+ *slowdown = 0;
p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
PAGE_SIZE, UVM_KMF_NOWAIT);
if (p != NULL && ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)) {
diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c
index 8ee2f318845..fc01bd3e366 100644
--- a/sys/kern/subr_pool.c
+++ b/sys/kern/subr_pool.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_pool.c,v 1.62 2008/06/26 05:42:20 ray Exp $ */
+/* $OpenBSD: subr_pool.c,v 1.63 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */
/*-
@@ -85,6 +85,7 @@ struct pool_item {
#endif
/* Other entries use only this list entry */
TAILQ_ENTRY(pool_item) pi_list;
+ int pi_fillstart;
};
#define POOL_NEEDS_CATCHUP(pp) \
@@ -106,7 +107,7 @@ void pr_rmpage(struct pool *, struct pool_item_header *,
int pool_chk_page(struct pool *, const char *, struct pool_item_header *);
struct pool_item_header *pool_alloc_item_header(struct pool *, caddr_t , int);
-void *pool_allocator_alloc(struct pool *, int);
+void *pool_allocator_alloc(struct pool *, int, int *);
void pool_allocator_free(struct pool *, void *);
#ifdef DDB
@@ -392,6 +393,7 @@ pool_do_get(struct pool *pp, int flags)
struct pool_item *pi;
struct pool_item_header *ph;
void *v;
+ int slowdown = 0;
#ifdef DIAGNOSTIC
if ((flags & PR_WAITOK) != 0)
@@ -462,7 +464,7 @@ startover:
/*
* Call the back-end page allocator for more memory.
*/
- v = pool_allocator_alloc(pp, flags);
+ v = pool_allocator_alloc(pp, flags, &slowdown);
if (__predict_true(v != NULL))
ph = pool_alloc_item_header(pp, v, flags);
@@ -490,6 +492,12 @@ startover:
pool_prime_page(pp, v, ph);
pp->pr_npagealloc++;
+ if (slowdown && (flags & PR_WAITOK)) {
+ mtx_leave(&pp->pr_mtx);
+ yield();
+ mtx_enter(&pp->pr_mtx);
+ }
+
/* Start the allocation process over. */
goto startover;
}
@@ -510,6 +518,19 @@ startover:
" item addr %p",
pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
}
+ {
+ int i, *ip = v;
+
+ for (i = 0; i < pp->pr_size / sizeof(int); i++) {
+ if (++ip < &pi->pi_fillstart)
+ continue;
+ if (*ip != PI_MAGIC) {
+ panic("pool_do_get(%s): free list modified: magic=%x; page %p;"
+ " item addr %p",
+ pp->pr_wchan, *ip, ph->ph_page, pi);
+ }
+ }
+ }
#endif
/*
@@ -615,9 +636,6 @@ pool_do_put(struct pool *pp, void *v)
* Return to item list.
*/
#ifdef DIAGNOSTIC
- pi->pi_magic = PI_MAGIC;
-#endif
-#ifdef DEBUG
{
int i, *ip = v;
@@ -625,9 +643,10 @@ pool_do_put(struct pool *pp, void *v)
*ip++ = PI_MAGIC;
}
}
+ pi->pi_magic = PI_MAGIC;
#endif
- TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
+ TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
ph->ph_nmissing--;
pp->pr_nitems++;
pp->pr_nout--;
@@ -688,12 +707,13 @@ pool_prime(struct pool *pp, int n)
struct pool_item_header *ph;
caddr_t cp;
int newpages;
+ int slowdown;
mtx_enter(&pp->pr_mtx);
newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
while (newpages-- > 0) {
- cp = pool_allocator_alloc(pp, PR_NOWAIT);
+ cp = pool_allocator_alloc(pp, PR_NOWAIT, &slowdown);
if (__predict_true(cp != NULL))
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
if (__predict_false(cp == NULL || ph == NULL)) {
@@ -772,6 +792,13 @@ pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
/* Insert on page list */
TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list);
#ifdef DIAGNOSTIC
+ {
+ int i, *ip = (int *)pi;
+
+ for (i = 0; i < pp->pr_size / sizeof(int); i++) {
+ *ip++ = PI_MAGIC;
+ }
+ }
pi->pi_magic = PI_MAGIC;
#endif
cp = (caddr_t)(cp + pp->pr_size);
@@ -799,12 +826,13 @@ pool_catchup(struct pool *pp)
struct pool_item_header *ph;
caddr_t cp;
int error = 0;
+ int slowdown;
while (POOL_NEEDS_CATCHUP(pp)) {
/*
* Call the page back-end allocator for more memory.
*/
- cp = pool_allocator_alloc(pp, PR_NOWAIT);
+ cp = pool_allocator_alloc(pp, PR_NOWAIT, &slowdown);
if (__predict_true(cp != NULL))
ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
if (__predict_false(cp == NULL || ph == NULL)) {
@@ -1249,9 +1277,7 @@ sysctl_dopool(int *name, u_int namelen, char *where, size_t *sizep)
*
* Each pool has a backend allocator that handles allocation, deallocation
*/
-void *pool_page_alloc_oldnointr(struct pool *, int);
-void pool_page_free_oldnointr(struct pool *, void *);
-void *pool_page_alloc(struct pool *, int);
+void *pool_page_alloc(struct pool *, int, int *);
void pool_page_free(struct pool *, void *);
/*
@@ -1278,14 +1304,14 @@ struct pool_allocator pool_allocator_nointr = {
*/
void *
-pool_allocator_alloc(struct pool *pp, int flags)
+pool_allocator_alloc(struct pool *pp, int flags, int *slowdown)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
void *v;
if (waitok)
mtx_leave(&pp->pr_mtx);
- v = pp->pr_alloc->pa_alloc(pp, flags);
+ v = pp->pr_alloc->pa_alloc(pp, flags, slowdown);
if (waitok)
mtx_enter(&pp->pr_mtx);
@@ -1301,11 +1327,11 @@ pool_allocator_free(struct pool *pp, void *v)
}
void *
-pool_page_alloc(struct pool *pp, int flags)
+pool_page_alloc(struct pool *pp, int flags, int *slowdown)
{
boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
- return (uvm_km_getpage(waitok));
+ return (uvm_km_getpage(waitok, slowdown));
}
void
diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c
index 9356cb32039..41b8263819d 100644
--- a/sys/kern/vfs_cache.c
+++ b/sys/kern/vfs_cache.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_cache.c,v 1.27 2008/10/20 20:33:07 deraadt Exp $ */
+/* $OpenBSD: vfs_cache.c,v 1.28 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: vfs_cache.c,v 1.13 1996/02/04 02:18:09 christos Exp $ */
/*
@@ -162,6 +162,9 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
goto remove;
}
+ /* remove from lru now to prevent races */
+ TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
+
vp = ncp->nc_vp;
vpid = vp->v_id;
if (vp == dvp) { /* lookup on "." */
@@ -178,6 +181,8 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
if (!error && (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) == 0) {
if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0) {
vput(vp);
+ /* parent has permanent issues; recycle */
+ TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru);
return (error);
}
cnp->cn_flags &= ~PDIRUNLOCK;
@@ -204,6 +209,8 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
nchstats.ncs_falsehits++;
} else
nchstats.ncs_badhits++;
+ /* cache entry is stale; recycle */
+ TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru);
/*
* The parent needs to be locked when we return to VOP_LOOKUP().
* The `.' case here should be extremely rare (if it can happen
@@ -219,13 +226,8 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
}
nchstats.ncs_goodhits++;
- /*
- * Move this slot to end of LRU chain, if not already there.
- */
- if (TAILQ_NEXT(ncp, nc_lru) != NULL) {
- TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
- TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
- }
+ /* cache entry is valid; keep it */
+ TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
*vpp = vp;
return (0);
diff --git a/sys/sys/pool.h b/sys/sys/pool.h
index 1354478c3fc..9015f0996b6 100644
--- a/sys/sys/pool.h
+++ b/sys/sys/pool.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pool.h,v 1.29 2008/06/26 05:42:20 ray Exp $ */
+/* $OpenBSD: pool.h,v 1.30 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */
/*-
@@ -52,7 +52,7 @@
struct pool;
struct pool_allocator {
- void *(*pa_alloc)(struct pool *, int);
+ void *(*pa_alloc)(struct pool *, int, int *);
void (*pa_free)(struct pool *, void *);
int pa_pagesz;
int pa_pagemask;
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index f0e48816b5b..deedda2e841 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.70 2008/06/09 20:30:23 miod Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.71 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -505,7 +505,7 @@ vaddr_t uvm_km_alloc_poolpage1(vm_map_t,
struct uvm_object *, boolean_t);
void uvm_km_free_poolpage1(vm_map_t, vaddr_t);
-void *uvm_km_getpage(boolean_t);
+void *uvm_km_getpage(boolean_t, int *);
void uvm_km_putpage(void *);
/* uvm_map.c */
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index 693392c8972..c0b99379e1f 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.67 2008/06/14 03:48:32 art Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.68 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -760,9 +760,10 @@ uvm_km_page_init(void)
}
void *
-uvm_km_getpage(boolean_t waitok)
+uvm_km_getpage(boolean_t waitok, int *slowdown)
{
+ *slowdown = 0;
return ((void *)uvm_km_alloc_poolpage1(NULL, NULL, waitok));
}
@@ -791,6 +792,8 @@ struct km_page {
struct km_page *next;
} *uvm_km_pages_head;
+struct proc *uvm_km_proc;
+
void uvm_km_createthread(void *);
void uvm_km_thread(void *);
@@ -833,7 +836,7 @@ uvm_km_page_init(void)
void
uvm_km_createthread(void *arg)
{
- kthread_create(uvm_km_thread, NULL, NULL, "kmthread");
+ kthread_create(uvm_km_thread, NULL, &uvm_km_proc, "kmthread");
}
/*
@@ -878,10 +881,11 @@ uvm_km_thread(void *arg)
* permits it. Wake up the thread if we've dropped below lowat.
*/
void *
-uvm_km_getpage(boolean_t waitok)
+uvm_km_getpage(boolean_t waitok, int *slowdown)
{
struct km_page *page = NULL;
+ *slowdown = 0;
mtx_enter(&uvm_km_mtx);
for (;;) {
page = uvm_km_pages_head;
@@ -894,22 +898,12 @@ uvm_km_getpage(boolean_t waitok)
break;
msleep(&uvm_km_pages_free, &uvm_km_mtx, PVM, "getpage", 0);
}
+ mtx_leave(&uvm_km_mtx);
if (uvm_km_pages_free < uvm_km_pages_lowat) {
+ if (curproc != uvm_km_proc)
+ *slowdown = 1;
wakeup(&uvm_km_pages_head);
-
- /*
- * If we're below the low watermark and are allowed to
- * sleep, we should slow down our allocations a bit
- * to not exhaust the reserve of pages for nosleep
- * allocators.
- *
- * Just sleep once.
- */
- if (waitok)
- msleep(&uvm_km_pages_free, &uvm_km_mtx, PPAUSE,
- "getpg2", 0);
}
- mtx_leave(&uvm_km_mtx);
return (page);
}
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 61a69aad4d1..9741c57b276 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.105 2008/10/08 08:41:19 art Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.106 2008/10/23 23:54:02 tedu Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -390,6 +390,7 @@ uvm_mapent_alloc(struct vm_map *map)
{
struct vm_map_entry *me, *ne;
int s, i;
+ int slowdown;
UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
if (map->flags & VM_MAP_INTRSAFE || cold) {
@@ -397,7 +398,7 @@ uvm_mapent_alloc(struct vm_map *map)
simple_lock(&uvm.kentry_lock);
me = uvm.kentry_free;
if (me == NULL) {
- ne = uvm_km_getpage(0);
+ ne = uvm_km_getpage(0, &slowdown);
if (ne == NULL)
panic("uvm_mapent_alloc: cannot allocate map "
"entry");