summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorBob Beck <beck@cvs.openbsd.org>2011-04-02 16:47:18 +0000
committerBob Beck <beck@cvs.openbsd.org>2011-04-02 16:47:18 +0000
commit11ae302ebcf8bbe27cb1c13a64dce31c551cdac8 (patch)
treeff5ae54716a7cff0ff2b2e11852beffcff447d67 /sys/uvm
parentf3acb273af8efc67104501a764c2ac28a24d7ec3 (diff)
Constrain the buffer cache to use only the dma reachable region of memory.
With this change bufcachepercent will be the percentage of dma reachable memory that the buffer cache will attempt to use. ok deraadt@ thib@ oga@
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_extern.h4
-rw-r--r--sys/uvm/uvm_page.c28
2 files changed, 30 insertions, 2 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 66f4ed1d22f..a6448d99b20 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.89 2010/07/02 01:25:06 art Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.90 2011/04/02 16:47:17 beck Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -583,6 +583,8 @@ struct vm_page *uvm_pagealloc(struct uvm_object *,
voff_t, struct vm_anon *, int);
vaddr_t uvm_pagealloc_contig(vaddr_t, vaddr_t,
vaddr_t, vaddr_t);
+void uvm_pagealloc_multi(struct uvm_object *, voff_t,
+ vsize_t, int);
void uvm_pagerealloc(struct vm_page *,
struct uvm_object *, voff_t);
/* Actually, uvm_page_physload takes PF#s which need their own type */
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 82951d1c99f..a431450748a 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.103 2011/04/02 12:38:37 ariane Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.104 2011/04/02 16:47:17 beck Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -804,6 +804,32 @@ uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off,
}
/*
+ * interface used by the buffer cache to allocate a buffer at a time.
+ * The pages are allocated wired in DMA accessible memory
+ */
+void
+uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size, int flags)
+{
+ struct pglist plist;
+ struct vm_page *pg;
+ int i;
+
+
+ TAILQ_INIT(&plist);
+ (void) uvm_pglistalloc(size, dma_constraint.ucr_low,
+ dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
+ UVM_PLA_WAITOK);
+ i = 0;
+ while ((pg = TAILQ_FIRST(&plist)) != NULL) {
+ pg->wire_count = 1;
+ atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
+ KASSERT((pg->pg_flags & PG_DEV) == 0);
+ TAILQ_REMOVE(&plist, pg, pageq);
+ uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
+ }
+}
+
+/*
* uvm_pagealloc_strat: allocate vm_page from a particular free list.
*
* => return null if no pages free