summaryrefslogtreecommitdiff
path: root/sys/arch/alpha
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2002-01-23 00:39:49 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2002-01-23 00:39:49 +0000
commit428a9d0c41ac6e6a1e3b34fe87ab6ef38f3764cb (patch)
treebca3c796baa50ba1a667d9fc1450766d842763b4 /sys/arch/alpha
parentad9498378fb50081ca58cbd745f9705b789f2da8 (diff)
Pool deals fairly well with physical memory shortage, but it doesn't deal
well (not at all) with shortages of the vm_map where the pages are mapped (usually kmem_map). Try to deal with it: - group all information the backend allocator for a pool in a separate struct. The pool will only have a pointer to that struct. - change the pool_init API to reflect that. - link all pools allocating from the same allocator on a linked list. - Since an allocator is responsible to wait for physical memory it will only fail (waitok) when it runs out of its backing vm_map, carefully drain pools using the same allocator so that va space is freed. (see comments in code for caveats and details). - change pool_reclaim to return if it actually succeeded to free some memory, use that information to make draining easier and more efficient. - get rid of PR_URGENT, noone uses it.
Diffstat (limited to 'sys/arch/alpha')
-rw-r--r--sys/arch/alpha/alpha/pmap.c39
1 files changed, 22 insertions, 17 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index 22fb769b976..043179c55f9 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.32 2001/12/19 08:58:05 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.33 2002/01/23 00:39:46 art Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -512,8 +512,12 @@ void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, long,
void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, long);
void pmap_l1pt_delref(pmap_t, pt_entry_t *, long);
-void *pmap_l1pt_alloc(unsigned long, int, int);
-void pmap_l1pt_free(void *, unsigned long, int);
+void *pmap_l1pt_alloc(struct pool *, int);
+void pmap_l1pt_free(struct pool *, void *);
+
+struct pool_allocator pmap_l1pt_allocator = {
+ pmap_l1pt_alloc, pmap_l1pt_free, 0,
+};
int pmap_l1pt_ctor(void *, void *, int);
@@ -525,8 +529,11 @@ void pmap_pv_remove(pmap_t, paddr_t, vaddr_t, boolean_t,
struct pv_entry **);
struct pv_entry *pmap_pv_alloc(void);
void pmap_pv_free(struct pv_entry *);
-void *pmap_pv_page_alloc(u_long, int, int);
-void pmap_pv_page_free(void *, u_long, int);
+void *pmap_pv_page_alloc(struct pool *, int);
+void pmap_pv_page_free(struct pool *, void *);
+struct pool_allocator pmap_pv_allocator = {
+ pmap_pv_page_alloc, pmap_pv_page_free, 0,
+};
#ifdef DEBUG
void pmap_pv_dump(paddr_t);
#endif
@@ -947,19 +954,17 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
*/
pmap_ncpuids = ncpuids;
pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ &pool_allocator_nointr);
pool_init(&pmap_l1pt_pool, PAGE_SIZE, 0, 0, 0, "l1ptpl",
- 0, pmap_l1pt_alloc, pmap_l1pt_free, M_VMPMAP);
+ &pmap_l1pt_allocator);
pool_cache_init(&pmap_l1pt_cache, &pmap_l1pt_pool, pmap_l1pt_ctor,
NULL, NULL);
pool_init(&pmap_asn_pool, pmap_ncpuids * sizeof(u_int), 0, 0, 0,
- "pmasnpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ "pmasnpl", &pool_allocator_nointr);
pool_init(&pmap_asngen_pool, pmap_ncpuids * sizeof(u_long), 0, 0, 0,
- "pmasngenpl",
- 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
+ "pmasngenpl", &pool_allocator_nointr);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
- 0, pmap_pv_page_alloc, pmap_pv_page_free, M_VMPMAP);
+ &pmap_pv_allocator);
TAILQ_INIT(&pmap_all_pmaps);
@@ -1003,7 +1008,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids)
*/
pool_init(&pmap_tlb_shootdown_job_pool,
sizeof(struct pmap_tlb_shootdown_job), 0, 0, 0, "pmaptlbpl",
- 0, NULL, NULL, M_VMPMAP);
+ NULL);
for (i = 0; i < ALPHA_MAXPROCS; i++) {
TAILQ_INIT(&pmap_tlb_shootdown_q[i].pq_head);
simple_lock_init(&pmap_tlb_shootdown_q[i].pq_slock);
@@ -3203,7 +3208,7 @@ pmap_pv_free(struct pv_entry *pv)
* Allocate a page for the pv_entry pool.
*/
void *
-pmap_pv_page_alloc(u_long size, int flags, int mtype)
+pmap_pv_page_alloc(struct pool *pp, int flags)
{
paddr_t pg;
@@ -3218,7 +3223,7 @@ pmap_pv_page_alloc(u_long size, int flags, int mtype)
* Free a pv_entry pool page.
*/
void
-pmap_pv_page_free(void *v, u_long size, int mtype)
+pmap_pv_page_free(struct pool *pp, void *v)
{
pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t)v));
@@ -3600,7 +3605,7 @@ pmap_l1pt_ctor(void *arg, void *object, int flags)
* Page alloctor for L1 PT pages.
*/
void *
-pmap_l1pt_alloc(unsigned long sz, int flags, int mtype)
+pmap_l1pt_alloc(struct pool *pp, int flags)
{
paddr_t ptpa;
@@ -3627,7 +3632,7 @@ pmap_l1pt_alloc(unsigned long sz, int flags, int mtype)
* Page freer for L1 PT pages.
*/
void
-pmap_l1pt_free(void *v, unsigned long sz, int mtype)
+pmap_l1pt_free(struct pool *pp, void *v)
{
pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v));