diff options
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 8 | ||||
-rw-r--r-- | sys/arch/i386/i386/pmap.old.c | 8 | ||||
-rw-r--r-- | sys/vm/pmap.h | 6 | ||||
-rw-r--r-- | sys/vm/vm_page.c | 140 | ||||
-rw-r--r-- | sys/vm/vm_page.h | 3 |
5 files changed, 14 insertions, 151 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index bf773284dde..a70c4141b38 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.23 1998/01/20 18:40:15 niklas Exp $ */ +/* $OpenBSD: pmap.c,v 1.24 1998/03/20 15:40:32 niklas Exp $ */ /* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */ /* @@ -287,12 +287,15 @@ pmap_bootstrap(virtual_start) */ virtual_avail = reserve_dumppages(virtual_avail); + /* Register the page size with the vm system */ + vm_set_page_size(); + /* flawed, no mappings?? */ if (ctob(physmem) > 31*1024*1024 && MAXKPDE != NKPDE) { vm_offset_t p; int i; - p = pmap_steal_memory((MAXKPDE-NKPDE+1) * NBPG); + p = vm_bootstrap_steal_memory((MAXKPDE-NKPDE+1) * NBPG); bzero((void *)p, (MAXKPDE-NKPDE+1) * NBPG); p = round_page(p); for (i = NKPDE; i < MAXKPDE; i++, p += NBPG) @@ -308,7 +311,6 @@ pmap_virtual_space(startp, endp) vm_offset_t *startp; vm_offset_t *endp; { - *startp = virtual_avail; *endp = virtual_end; } diff --git a/sys/arch/i386/i386/pmap.old.c b/sys/arch/i386/i386/pmap.old.c index fc10b16125b..ae9d1696b9b 100644 --- a/sys/arch/i386/i386/pmap.old.c +++ b/sys/arch/i386/i386/pmap.old.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.old.c,v 1.23 1998/01/20 18:40:15 niklas Exp $ */ +/* $OpenBSD: pmap.old.c,v 1.24 1998/03/20 15:40:32 niklas Exp $ */ /* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */ /* @@ -287,12 +287,15 @@ pmap_bootstrap(virtual_start) */ virtual_avail = reserve_dumppages(virtual_avail); + /* Register the page size with the vm system */ + vm_set_page_size(); + /* flawed, no mappings?? */ if (ctob(physmem) > 31*1024*1024 && MAXKPDE != NKPDE) { vm_offset_t p; int i; - p = pmap_steal_memory((MAXKPDE-NKPDE+1) * NBPG); + p = vm_bootstrap_steal_memory((MAXKPDE-NKPDE+1) * NBPG); bzero((void *)p, (MAXKPDE-NKPDE+1) * NBPG); p = round_page(p); for (i = NKPDE; i < MAXKPDE; i++, p += NBPG) @@ -308,7 +311,6 @@ pmap_virtual_space(startp, endp) vm_offset_t *startp; vm_offset_t *endp; { - *startp = virtual_avail; *endp = virtual_end; } diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index a289e32b817..35fa35391de 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.7 1998/03/02 17:07:12 niklas Exp $ */ +/* $OpenBSD: pmap.h,v 1.8 1998/03/20 15:40:34 niklas Exp $ */ /* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */ /* @@ -207,8 +207,4 @@ __END_DECLS #endif /* kernel*/ #endif /* PMAP_EXCLUDE_DECLS */ -/* XXX these are about to disappear real soon */ -void pmap_startup __P((vm_offset_t *, vm_offset_t *)); -vm_offset_t pmap_steal_memory __P((vm_size_t)); - #endif /* _PMAP_VM_ */ diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 0f79856ccfc..cf5e97a6683 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_page.c,v 1.12 1998/03/01 11:36:42 niklas Exp $ */ +/* $OpenBSD: vm_page.c,v 1.13 1998/03/20 15:40:36 niklas Exp $ */ /* $NetBSD: vm_page.c,v 1.41 1998/02/08 18:24:52 thorpej Exp $ */ #define VM_PAGE_ALLOC_MEMORY_STATS @@ -1847,141 +1847,3 @@ vm_page_free_memory(list) simple_unlock(&vm_page_queue_free_lock); splx(s); } - -#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES) -/* - * We implement pmap_steal_memory and pmap_startup with the help - * of two simpler functions, pmap_virtual_space and pmap_next_page. - */ -vm_offset_t -pmap_steal_memory(size) - vm_size_t size; -{ - vm_offset_t addr, vaddr, paddr; - -#ifdef i386 /* XXX i386 calls pmap_steal_memory before vm_mem_init() */ - if (cnt.v_page_size == 0) /* XXX */ - vm_set_page_size(); -#endif - - /* - * We round the size to an integer multiple. - */ - size = (size + 3) &~ 3; /* XXX */ - - /* - * If this is the first call to pmap_steal_memory, - * we have to initialize ourself. - */ - if (virtual_space_start == virtual_space_end) { - pmap_virtual_space(&virtual_space_start, &virtual_space_end); - - /* - * The initial values must be aligned properly, and - * we don't trust the pmap module to do it right. - */ - virtual_space_start = round_page(virtual_space_start); - virtual_space_end = trunc_page(virtual_space_end); - } - - /* - * Allocate virtual memory for this request. - */ - addr = virtual_space_start; - virtual_space_start += size; - - /* - * Allocate and map physical pages to back new virtual pages. - */ - for (vaddr = round_page(addr); vaddr < addr + size; - vaddr += PAGE_SIZE) { - if (!pmap_next_page(&paddr)) - panic("pmap_steal_memory"); - - /* - * XXX Logically, these mappings should be wired, - * but some pmap modules barf if they are. - */ - pmap_enter(pmap_kernel(), vaddr, paddr, - VM_PROT_READ|VM_PROT_WRITE, FALSE); - } - - return addr; -} - -void -pmap_startup(startp, endp) - vm_offset_t *startp; - vm_offset_t *endp; -{ - unsigned int i, freepages; - vm_offset_t paddr; - - /* - * We calculate how many page frames we will have - * and then allocate the page structures in one chunk. - * The calculation is non-trivial. We want: - * - * vmpages > (freepages - (vmpages / sizeof(vm_page_t))) - * - * which, with some algebra, becomes: - * - * vmpages > (freepages * sizeof(...) / (1 + sizeof(...))) - * - * The value of vm_page_count need not be exact, but must be - * large enough so vm_page_array handles the index range. - */ - freepages = pmap_free_pages(); - /* Fudge slightly to deal with truncation error. */ - freepages += 1; /* fudge */ - - vm_page_count = (PAGE_SIZE * freepages) / - (PAGE_SIZE + sizeof(*vm_page_array)); - - vm_page_array = (vm_page_t) - pmap_steal_memory(vm_page_count * sizeof(*vm_page_array)); - bzero(vm_page_array, vm_page_count * sizeof(*vm_page_array)); - -#ifdef DIAGNOSTIC - /* - * Initialize everyting in case the holes are stepped in, - * and set PA to something that will cause a panic... - */ - for (i = 0; i < vm_page_count; i++) - vm_page_array[i].phys_addr = 0xdeadbeef; -#endif - - /* - * Initialize the page frames. - * Note that some page indices may not be usable - * when pmap_free_pages() counts pages in a hole. - */ - if (!pmap_next_page(&paddr)) - panic("pmap_startup: can't get first page"); - first_page = pmap_page_index(paddr); - i = 0; - for (;;) { - /* Initialize a page array element. */ - VM_PAGE_INIT(&vm_page_array[i], NULL, 0); - vm_page_array[i].phys_addr = paddr; - vm_page_free(&vm_page_array[i]); - - /* Are there more physical pages? */ - if (!pmap_next_page(&paddr)) - break; - i = pmap_page_index(paddr) - first_page; - - /* Don't trust pmap_page_index()... */ - if ( -#if 0 - /* Cannot happen; i is unsigned */ - i < 0 || -#endif - i >= vm_page_count) - panic("pmap_startup: bad i=0x%x", i); - } - - *startp = virtual_space_start; - *endp = virtual_space_end; -} -#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */ diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index ab48d7dffd6..8ee9f33634a 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_page.h,v 1.5 1998/03/01 00:38:20 niklas Exp $ */ +/* $OpenBSD: vm_page.h,v 1.6 1998/03/20 15:40:38 niklas Exp $ */ /* $NetBSD: vm_page.h,v 1.24 1998/02/10 14:09:03 mrg Exp $ */ /* @@ -333,6 +333,7 @@ int vm_page_alloc_memory __P((vm_size_t size, vm_offset_t low, void vm_page_free_memory __P((struct pglist *list)); #if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *)); +vm_offset_t vm_bootstrap_steal_memory __P((vm_size_t)); #endif void vm_page_copy __P((vm_page_t, vm_page_t)); void vm_page_deactivate __P((vm_page_t)); |