summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 11:36:45 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 11:36:45 +0000
commitbafd0a76f022d1620830385c9ef744396806c179 (patch)
treef43fd5d4fa3a3f46877cec9878ab3f15899aa49e /sys
parente7d453379447ac233df4d3bf297bce631de1ba92 (diff)
Put back former pmap_steal_memory in non MNN case, at least i386 still
needs it.
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/pmap.h7
-rw-r--r--sys/vm/vm_page.c140
2 files changed, 144 insertions, 3 deletions
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 8edd35b77a8..e92ce8e4514 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.5 1998/03/01 00:37:58 niklas Exp $ */
+/* $OpenBSD: pmap.h,v 1.6 1998/03/01 11:36:44 niklas Exp $ */
/* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */
/*
@@ -196,8 +196,11 @@ void pmap_zero_page __P((vm_offset_t));
#ifdef MACHINE_NONCONTIG
u_int pmap_free_pages __P((void));
boolean_t pmap_next_page __P((vm_offset_t *));
+void pmap_startup __P((vm_offset_t *, vm_offset_t *));
+vm_offset_t pmap_steal_memory __P((vm_size_t));
+void pmap_virtual_space __P((vm_offset_t *, vm_offset_t *));
#endif
-#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
+#ifdef MACHINE_NEW_NONCONTIG
#if defined(PMAP_STEAL_MEMORY)
vm_offset_t pmap_steal_memory __P((vm_size_t, vm_offset_t *,
vm_offset_t *));
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 365acb01bdb..0f79856ccfc 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_page.c,v 1.11 1998/03/01 00:38:18 niklas Exp $ */
+/* $OpenBSD: vm_page.c,v 1.12 1998/03/01 11:36:42 niklas Exp $ */
/* $NetBSD: vm_page.c,v 1.41 1998/02/08 18:24:52 thorpej Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
@@ -1847,3 +1847,141 @@ vm_page_free_memory(list)
simple_unlock(&vm_page_queue_free_lock);
splx(s);
}
+
+#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES)
+/*
+ * We implement pmap_steal_memory and pmap_startup with the help
+ * of two simpler functions, pmap_virtual_space and pmap_next_page.
+ */
+vm_offset_t
+pmap_steal_memory(size)
+ vm_size_t size;
+{
+ vm_offset_t addr, vaddr, paddr;
+
+#ifdef i386 /* XXX i386 calls pmap_steal_memory before vm_mem_init() */
+ if (cnt.v_page_size == 0) /* XXX */
+ vm_set_page_size();
+#endif
+
+ /*
+ * We round the size to an integer multiple.
+ */
+ size = (size + 3) &~ 3; /* XXX */
+
+ /*
+ * If this is the first call to pmap_steal_memory,
+ * we have to initialize ourself.
+ */
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /*
+ * The initial values must be aligned properly, and
+ * we don't trust the pmap module to do it right.
+ */
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * Allocate virtual memory for this request.
+ */
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * Allocate and map physical pages to back new virtual pages.
+ */
+ for (vaddr = round_page(addr); vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ if (!pmap_next_page(&paddr))
+ panic("pmap_steal_memory");
+
+ /*
+ * XXX Logically, these mappings should be wired,
+ * but some pmap modules barf if they are.
+ */
+ pmap_enter(pmap_kernel(), vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+
+ return addr;
+}
+
+void
+pmap_startup(startp, endp)
+ vm_offset_t *startp;
+ vm_offset_t *endp;
+{
+ unsigned int i, freepages;
+ vm_offset_t paddr;
+
+ /*
+ * We calculate how many page frames we will have
+ * and then allocate the page structures in one chunk.
+ * The calculation is non-trivial. We want:
+ *
+ * vmpages > (freepages - (vmpages / sizeof(vm_page_t)))
+ *
+ * which, with some algebra, becomes:
+ *
+ * vmpages > (freepages * sizeof(...) / (1 + sizeof(...)))
+ *
+ * The value of vm_page_count need not be exact, but must be
+ * large enough so vm_page_array handles the index range.
+ */
+ freepages = pmap_free_pages();
+ /* Fudge slightly to deal with truncation error. */
+ freepages += 1; /* fudge */
+
+ vm_page_count = (PAGE_SIZE * freepages) /
+ (PAGE_SIZE + sizeof(*vm_page_array));
+
+ vm_page_array = (vm_page_t)
+ pmap_steal_memory(vm_page_count * sizeof(*vm_page_array));
+ bzero(vm_page_array, vm_page_count * sizeof(*vm_page_array));
+
+#ifdef DIAGNOSTIC
+ /*
+ * Initialize everyting in case the holes are stepped in,
+ * and set PA to something that will cause a panic...
+ */
+ for (i = 0; i < vm_page_count; i++)
+ vm_page_array[i].phys_addr = 0xdeadbeef;
+#endif
+
+ /*
+ * Initialize the page frames.
+ * Note that some page indices may not be usable
+ * when pmap_free_pages() counts pages in a hole.
+ */
+ if (!pmap_next_page(&paddr))
+ panic("pmap_startup: can't get first page");
+ first_page = pmap_page_index(paddr);
+ i = 0;
+ for (;;) {
+ /* Initialize a page array element. */
+ VM_PAGE_INIT(&vm_page_array[i], NULL, 0);
+ vm_page_array[i].phys_addr = paddr;
+ vm_page_free(&vm_page_array[i]);
+
+ /* Are there more physical pages? */
+ if (!pmap_next_page(&paddr))
+ break;
+ i = pmap_page_index(paddr) - first_page;
+
+ /* Don't trust pmap_page_index()... */
+ if (
+#if 0
+ /* Cannot happen; i is unsigned */
+ i < 0 ||
+#endif
+ i >= vm_page_count)
+ panic("pmap_startup: bad i=0x%x", i);
+ }
+
+ *startp = virtual_space_start;
+ *endp = virtual_space_end;
+}
+#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */