summaryrefslogtreecommitdiff
path: root/sys/arch/sparc
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2002-01-23 00:39:49 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2002-01-23 00:39:49 +0000
commit428a9d0c41ac6e6a1e3b34fe87ab6ef38f3764cb (patch)
treebca3c796baa50ba1a667d9fc1450766d842763b4 /sys/arch/sparc
parentad9498378fb50081ca58cbd745f9705b789f2da8 (diff)
Pool deals fairly well with physical memory shortage, but it doesn't deal
well (not at all) with shortages of the vm_map where the pages are mapped (usually kmem_map). Try to deal with it: - group all information the backend allocator for a pool in a separate struct. The pool will only have a pointer to that struct. - change the pool_init API to reflect that. - link all pools allocating from the same allocator on a linked list. - Since an allocator is responsible to wait for physical memory it will only fail (waitok) when it runs out of its backing vm_map, carefully drain pools using the same allocator so that va space is freed. (see comments in code for caveats and details). - change pool_reclaim to return if it actually succeeded to free some memory, use that information to make draining easier and more efficient. - get rid of PR_URGENT, noone uses it.
Diffstat (limited to 'sys/arch/sparc')
-rw-r--r--sys/arch/sparc/sparc/pmap.c40
1 files changed, 18 insertions, 22 deletions
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index f0ef4375d49..23cd2348e08 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.117 2001/12/19 08:58:05 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.118 2002/01/23 00:39:47 art Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -214,8 +214,12 @@ pvfree(pv)
*/
static struct pool L1_pool;
static struct pool L23_pool;
-void *pgt_page_alloc __P((unsigned long, int, int));
-void pgt_page_free __P((void *, unsigned long, int));
+void *pgt_page_alloc(struct pool *, int);
+void pgt_page_free(struct pool *, void *);
+
+struct pool_allocator pgt_allocator = {
+ pgt_page_alloc, pgt_page_free, 0,
+};
void pcache_flush __P((caddr_t, caddr_t, int));
void
@@ -233,30 +237,23 @@ pcache_flush(va, pa, n)
* Page table pool back-end.
*/
void *
-pgt_page_alloc(sz, flags, mtype)
- unsigned long sz;
- int flags;
- int mtype;
+pgt_page_alloc(struct pool *pp, int flags)
{
caddr_t p;
p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
- (vsize_t)sz, UVM_KMF_NOWAIT);
-
+ PAGE_SIZE, UVM_KMF_NOWAIT);
if (p != NULL && ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)) {
- pcache_flush(p, (caddr_t)VA2PA(p), sz);
- kvm_uncache(p, atop(sz));
+ pcache_flush(p, (caddr_t)VA2PA(p), PAGE_SIZE);
+ kvm_uncache(p, atop(PAGE_SIZE));
}
return (p);
}
void
-pgt_page_free(v, sz, mtype)
- void *v;
- unsigned long sz;
- int mtype;
+pgt_page_free(struct pool *pp, void *v);
{
- uvm_km_free(kernel_map, (vaddr_t)v, sz);
+ uvm_km_free(kernel_map, (vaddr_t)v, PAGE_SIZE);
}
#endif /* SUN4M */
@@ -3359,8 +3356,7 @@ pmap_init()
sizeof(struct pvlist);
}
- pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", 0,
- NULL, NULL, 0);
+ pool_init(&pvpool, sizeof(struct pvlist), 0, 0, 0, "pvpl", NULL);
/*
* We can set it here since it's only used in pmap_enter to see
@@ -3378,12 +3374,12 @@ pmap_init()
int n;
n = SRMMU_L1SIZE * sizeof(int);
- pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable", 0,
- pgt_page_alloc, pgt_page_free, 0);
+ pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable",
+ &pgt_page_allocator);
n = SRMMU_L2SIZE * sizeof(int);
- pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable", 0,
- pgt_page_alloc, pgt_page_free, 0);
+ pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable",
+ &pgt_page_allocator);
}
#endif
}