diff options
Diffstat (limited to 'sys/vm/vm_page.c')
-rw-r--r-- | sys/vm/vm_page.c | 358 |
1 files changed, 166 insertions, 192 deletions
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 37131d3c021..c3c0bf6e460 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_page.c,v 1.4 1997/01/04 14:17:30 niklas Exp $ */ +/* $OpenBSD: vm_page.c,v 1.5 1997/04/17 01:25:21 niklas Exp $ */ /* $NetBSD: vm_page.c,v 1.28 1996/02/05 01:54:05 christos Exp $ */ /* @@ -36,7 +36,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 + * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. @@ -66,7 +66,7 @@ */ /* - * Resident memory management module. + * Resident memory management module. */ #include <sys/param.h> @@ -82,17 +82,17 @@ #ifdef MACHINE_NONCONTIG /* - * These variables record the values returned by vm_page_bootstrap, - * for debugging purposes. The implementation of pmap_steal_memory - * and pmap_startup here also uses them internally. + * These variables record the values returned by vm_page_bootstrap, + * for debugging purposes. The implementation of pmap_steal_memory + * and pmap_startup here also uses them internally. */ vm_offset_t virtual_space_start; vm_offset_t virtual_space_end; #endif /* MACHINE_NONCONTIG */ /* - * Associated with page of user-allocatable memory is a - * page structure. + * Associated with page of user-allocatable memory is a + * page structure. */ struct pglist *vm_page_buckets; /* Array of buckets */ @@ -123,13 +123,13 @@ vm_size_t page_mask; int page_shift; /* - * vm_set_page_size: + * vm_set_page_size: * - * Sets the page size, perhaps based upon the memory - * size. Must be called before any use of page-size - * dependent functions. + * Sets the page size, perhaps based upon the memory + * size. Must be called before any use of page-size + * dependent functions. * - * Sets page_shift and page_mask from cnt.v_page_size. + * Sets page_shift and page_mask from cnt.v_page_size. */ void vm_set_page_size() @@ -148,14 +148,14 @@ vm_set_page_size() #ifdef MACHINE_NONCONTIG /* - * vm_page_bootstrap: + * vm_page_bootstrap: * - * Initializes the resident memory module. + * Initializes the resident memory module. * - * Allocates memory for the page cells, and - * for the object/offset-to-page hash table headers. - * Each page cell is initialized and placed on the free list. - * Returns the range of available kernel virtual memory. + * Allocates memory for the page cells, and + * for the object/offset-to-page hash table headers. + * Each page cell is initialized and placed on the free list. + * Returns the range of available kernel virtual memory. */ void vm_page_bootstrap(startp, endp) @@ -170,55 +170,50 @@ vm_page_bootstrap(startp, endp) /* - * Initialize the locks + * Initialize the locks */ - simple_lock_init(&vm_page_queue_free_lock); simple_lock_init(&vm_page_queue_lock); /* - * Initialize the queue headers for the free queue, - * the active queue and the inactive queue. + * Initialize the queue headers for the free queue, + * the active queue and the inactive queue. */ - TAILQ_INIT(&vm_page_queue_free); TAILQ_INIT(&vm_page_queue_active); TAILQ_INIT(&vm_page_queue_inactive); /* - * Pre-allocate maps and map entries that cannot be dynamically - * allocated via malloc(). The maps include the kernel_map and - * kmem_map which must be initialized before malloc() will - * work (obviously). Also could include pager maps which would - * be allocated before kmeminit. + * Pre-allocate maps and map entries that cannot be dynamically + * allocated via malloc(). The maps include the kernel_map and + * kmem_map which must be initialized before malloc() will + * work (obviously). Also could include pager maps which would + * be allocated before kmeminit. * - * Allow some kernel map entries... this should be plenty - * since people shouldn't be cluttering up the kernel - * map (they should use their own maps). + * Allow some kernel map entries... this should be plenty + * since people shouldn't be cluttering up the kernel + * map (they should use their own maps). */ - kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) + MAX_KMAPENT*sizeof(struct vm_map_entry)); kentry_data = (vm_offset_t) pmap_steal_memory(kentry_data_size); /* - * Validate these zone addresses. + * Validate these zone addresses. */ - bzero((caddr_t) kentry_data, kentry_data_size); /* - * Allocate (and initialize) the virtual-to-physical - * table hash buckets. + * Allocate (and initialize) the virtual-to-physical + * table hash buckets. * - * The number of buckets MUST BE a power of 2, and - * the actual value is the next power of 2 greater - * than the number of physical pages in the system. + * The number of buckets MUST BE a power of 2, and + * the actual value is the next power of 2 greater + * than the number of physical pages in the system. * - * Note: - * This computation can be tweaked if desired. + * Note: + * This computation can be tweaked if desired. */ - if (vm_page_bucket_count == 0) { unsigned int npages = pmap_free_pages(); @@ -241,13 +236,12 @@ vm_page_bootstrap(startp, endp) simple_lock_init(&bucket_lock); /* - * Machine-dependent code allocates the resident page table. - * It uses VM_PAGE_INIT to initialize the page frames. - * The code also returns to us the virtual space available - * to the kernel. We don't trust the pmap module - * to get the alignment right. + * Machine-dependent code allocates the resident page table. + * It uses VM_PAGE_INIT to initialize the page frames. + * The code also returns to us the virtual space available + * to the kernel. We don't trust the pmap module + * to get the alignment right. */ - pmap_startup(&virtual_space_start, &virtual_space_end); virtual_space_start = round_page(virtual_space_start); virtual_space_end = trunc_page(virtual_space_end); @@ -261,13 +255,13 @@ vm_page_bootstrap(startp, endp) #else /* MACHINE_NONCONTIG */ /* - * vm_page_startup: + * vm_page_startup: * - * Initializes the resident memory module. + * Initializes the resident memory module. * - * Allocates memory for the page cells, and - * for the object/offset-to-page hash table headers. - * Each page cell is initialized and placed on the free list. + * Allocates memory for the page cells, and + * for the object/offset-to-page hash table headers. + * Each page cell is initialized and placed on the free list. */ void vm_page_startup(start, end) @@ -284,32 +278,29 @@ vm_page_startup(start, end) /* - * Initialize the locks + * Initialize the locks */ - simple_lock_init(&vm_page_queue_free_lock); simple_lock_init(&vm_page_queue_lock); /* - * Initialize the queue headers for the free queue, - * the active queue and the inactive queue. + * Initialize the queue headers for the free queue, + * the active queue and the inactive queue. */ - TAILQ_INIT(&vm_page_queue_free); TAILQ_INIT(&vm_page_queue_active); TAILQ_INIT(&vm_page_queue_inactive); /* - * Calculate the number of hash table buckets. + * Calculate the number of hash table buckets. * - * The number of buckets MUST BE a power of 2, and - * the actual value is the next power of 2 greater - * than the number of physical pages in the system. + * The number of buckets MUST BE a power of 2, and + * the actual value is the next power of 2 greater + * than the number of physical pages in the system. * - * Note: - * This computation can be tweaked if desired. + * Note: + * This computation can be tweaked if desired. */ - if (vm_page_bucket_count == 0) { vm_page_bucket_count = 1; while (vm_page_bucket_count < atop(*end - *start)) @@ -319,7 +310,7 @@ vm_page_startup(start, end) vm_page_hash_mask = vm_page_bucket_count - 1; /* - * Allocate (and initialize) the hash table buckets. + * Allocate (and initialize) the hash table buckets. */ vm_page_buckets = (struct pglist *) pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist)); @@ -333,41 +324,37 @@ vm_page_startup(start, end) simple_lock_init(&bucket_lock); /* - * Truncate the remainder of physical memory to our page size. + * Truncate the remainder of physical memory to our page size. */ - *end = trunc_page(*end); /* - * Pre-allocate maps and map entries that cannot be dynamically - * allocated via malloc(). The maps include the kernel_map and - * kmem_map which must be initialized before malloc() will - * work (obviously). Also could include pager maps which would - * be allocated before kmeminit. + * Pre-allocate maps and map entries that cannot be dynamically + * allocated via malloc(). The maps include the kernel_map and + * kmem_map which must be initialized before malloc() will + * work (obviously). Also could include pager maps which would + * be allocated before kmeminit. * - * Allow some kernel map entries... this should be plenty - * since people shouldn't be cluttering up the kernel - * map (they should use their own maps). + * Allow some kernel map entries... this should be plenty + * since people shouldn't be cluttering up the kernel + * map (they should use their own maps). */ - kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) + MAX_KMAPENT*sizeof(struct vm_map_entry)); kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size); /* - * Compute the number of pages of memory that will be - * available for use (taking into account the overhead - * of a page structure per page). + * Compute the number of pages of memory that will be + * available for use (taking into account the overhead + * of a page structure per page). */ - cnt.v_free_count = npages = (*end - *start + sizeof(struct vm_page)) / (PAGE_SIZE + sizeof(struct vm_page)); /* - * Record the extent of physical memory that the - * virtual memory system manages. + * Record the extent of physical memory that the + * virtual memory system manages. */ - first_page = *start; first_page += npages*sizeof(struct vm_page); first_page = atop(round_page(first_page)); @@ -378,17 +365,15 @@ vm_page_startup(start, end) /* - * Allocate and clear the mem entry structures. + * Allocate and clear the mem entry structures. */ - m = vm_page_array = (vm_page_t) pmap_bootstrap_alloc(npages * sizeof(struct vm_page)); /* - * Initialize the mem entry structures now, and - * put them in the free queue. + * Initialize the mem entry structures now, and + * put them in the free queue. */ - pa = first_phys_addr; while (npages--) { m->flags = 0; @@ -400,8 +385,8 @@ vm_page_startup(start, end) } /* - * Initialize vm_pages_needed lock here - don't wait for pageout - * daemon XXX + * Initialize vm_pages_needed lock here - don't wait for pageout + * daemon XXX */ simple_lock_init(&vm_pages_needed_lock); @@ -412,8 +397,8 @@ vm_page_startup(start, end) #if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES) /* - * We implement pmap_steal_memory and pmap_startup with the help - * of two simpler functions, pmap_virtual_space and pmap_next_page. + * We implement pmap_steal_memory and pmap_startup with the help + * of two simpler functions, pmap_virtual_space and pmap_next_page. */ vm_offset_t pmap_steal_memory(size) @@ -427,22 +412,22 @@ pmap_steal_memory(size) #endif /* - * We round the size to an integer multiple. + * We round the size to an integer multiple. */ size = (size + 3) &~ 3; /* XXX */ /* - * If this is the first call to pmap_steal_memory, - * we have to initialize ourself. + * If this is the first call to pmap_steal_memory, + * we have to initialize ourself. */ if (virtual_space_start == virtual_space_end) { pmap_virtual_space(&virtual_space_start, &virtual_space_end); /* - * The initial values must be aligned properly, and - * we don't trust the pmap module to do it right. + * The initial values must be aligned properly, and + * we don't trust the pmap module to do it right. */ virtual_space_start = round_page(virtual_space_start); @@ -450,14 +435,14 @@ pmap_steal_memory(size) } /* - * Allocate virtual memory for this request. + * Allocate virtual memory for this request. */ addr = virtual_space_start; virtual_space_start += size; /* - * Allocate and map physical pages to back new virtual pages. + * Allocate and map physical pages to back new virtual pages. */ for (vaddr = round_page(addr); @@ -467,8 +452,8 @@ pmap_steal_memory(size) panic("pmap_steal_memory"); /* - * XXX Logically, these mappings should be wired, - * but some pmap modules barf if they are. + * XXX Logically, these mappings should be wired, + * but some pmap modules barf if they are. */ pmap_enter(pmap_kernel(), vaddr, paddr, @@ -491,25 +476,24 @@ pmap_startup(startp, endp) * and then allocate the page structures in one chunk. * The calculation is non-trivial. We want: * - * vmpages > (freepages - (vmpages / sizeof(vm_page_t))) + * vmpages > (freepages - (vmpages / sizeof(vm_page_t))) * * which, with some algebra, becomes: * - * vmpages > (freepages * sizeof(...) / (1 + sizeof(...))) + * vmpages > (freepages * sizeof(...) / (1 + sizeof(...))) * * The value of vm_page_count need not be exact, but must be * large enough so vm_page_array handles the index range. */ - freepages = pmap_free_pages(); /* Fudge slightly to deal with truncation error. */ freepages += 1; /* fudge */ vm_page_count = (PAGE_SIZE * freepages) / - (PAGE_SIZE + sizeof(*vm_page_array)); + (PAGE_SIZE + sizeof(*vm_page_array)); vm_page_array = (vm_page_t) - pmap_steal_memory(vm_page_count * sizeof(*vm_page_array)); + pmap_steal_memory(vm_page_count * sizeof(*vm_page_array)); #ifdef DIAGNOSTIC /* @@ -523,9 +507,9 @@ pmap_startup(startp, endp) #endif /* - * Initialize the page frames. - * Note that some page indices may not be usable - * when pmap_free_pages() counts pages in a hole. + * Initialize the page frames. + * Note that some page indices may not be usable + * when pmap_free_pages() counts pages in a hole. */ if (!pmap_next_page(&paddr)) panic("pmap_startup: can't get first page"); @@ -548,7 +532,7 @@ pmap_startup(startp, endp) /* Cannot happen; i is unsigned */ i < 0 || #endif - i >= vm_page_count) + i >= vm_page_count) panic("pmap_startup: bad i=0x%x", i); } @@ -558,22 +542,22 @@ pmap_startup(startp, endp) #endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */ /* - * vm_page_hash: + * vm_page_hash: * - * Distributes the object/offset key pair among hash buckets. + * Distributes the object/offset key pair among hash buckets. * - * NOTE: This macro depends on vm_page_bucket_count being a power of 2. + * NOTE: This macro depends on vm_page_bucket_count being a power of 2. */ #define vm_page_hash(object, offset) \ (((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask) /* - * vm_page_insert: [ internal use only ] + * vm_page_insert: [ internal use only ] * - * Inserts the given mem entry into the object/object-page - * table and object list. + * Inserts the given mem entry into the object/object-page + * table and object list. * - * The object and page must be locked. + * The object and page must be locked. */ void vm_page_insert(mem, object, offset) @@ -590,16 +574,14 @@ vm_page_insert(mem, object, offset) panic("vm_page_insert: already inserted"); /* - * Record the object/offset pair in this page + * Record the object/offset pair in this page */ - mem->object = object; mem->offset = offset; /* - * Insert it into the object_object/offset hash table + * Insert it into the object_object/offset hash table */ - bucket = &vm_page_buckets[vm_page_hash(object, offset)]; spl = splimp(); simple_lock(&bucket_lock); @@ -608,28 +590,26 @@ vm_page_insert(mem, object, offset) (void) splx(spl); /* - * Now link into the object's list of backed pages. + * Now link into the object's list of backed pages. */ - TAILQ_INSERT_TAIL(&object->memq, mem, listq); mem->flags |= PG_TABLED; /* - * And show that the object has one more resident - * page. + * And show that the object has one more resident + * page. */ - object->resident_page_count++; } /* - * vm_page_remove: [ internal use only ] - * NOTE: used by device pager as well -wfj + * vm_page_remove: [ internal use only ] + * NOTE: used by device pager as well -wfj * - * Removes the given mem entry from the object/offset-page - * table and the object page list. + * Removes the given mem entry from the object/offset-page + * table and the object page list. * - * The object and page must be locked. + * The object and page must be locked. */ void vm_page_remove(mem) @@ -649,9 +629,8 @@ vm_page_remove(mem) return; /* - * Remove from the object_object/offset hash table + * Remove from the object_object/offset hash table */ - bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)]; spl = splimp(); simple_lock(&bucket_lock); @@ -660,28 +639,26 @@ vm_page_remove(mem) (void) splx(spl); /* - * Now remove from the object's list of backed pages. + * Now remove from the object's list of backed pages. */ - TAILQ_REMOVE(&mem->object->memq, mem, listq); /* - * And show that the object has one fewer resident - * page. + * And show that the object has one fewer resident + * page. */ - mem->object->resident_page_count--; mem->flags &= ~PG_TABLED; } /* - * vm_page_lookup: + * vm_page_lookup: * - * Returns the page associated with the object/offset - * pair specified; if none is found, NULL is returned. + * Returns the page associated with the object/offset + * pair specified; if none is found, NULL is returned. * - * The object must be locked. No side effects. + * The object must be locked. No side effects. */ vm_page_t vm_page_lookup(object, offset) @@ -693,9 +670,8 @@ vm_page_lookup(object, offset) int spl; /* - * Search the hash table for this object/offset pair + * Search the hash table for this object/offset pair */ - bucket = &vm_page_buckets[vm_page_hash(object, offset)]; spl = splimp(); @@ -715,12 +691,12 @@ vm_page_lookup(object, offset) } /* - * vm_page_rename: + * vm_page_rename: * - * Move the given memory entry from its - * current object to the specified target object/offset. + * Move the given memory entry from its + * current object to the specified target object/offset. * - * The object must be locked. + * The object must be locked. */ void vm_page_rename(mem, new_object, new_offset) @@ -739,12 +715,12 @@ vm_page_rename(mem, new_object, new_offset) } /* - * vm_page_alloc: + * vm_page_alloc: * - * Allocate and return a memory cell associated - * with this VM object/offset pair. + * Allocate and return a memory cell associated + * with this VM object/offset pair. * - * Object must be locked. + * Object must be locked. */ vm_page_t vm_page_alloc(object, offset) @@ -772,16 +748,15 @@ vm_page_alloc(object, offset) VM_PAGE_INIT(mem, object, offset); /* - * Decide if we should poke the pageout daemon. - * We do this if the free count is less than the low - * water mark, or if the free count is less than the high - * water mark (but above the low water mark) and the inactive - * count is less than its target. + * Decide if we should poke the pageout daemon. + * We do this if the free count is less than the low + * water mark, or if the free count is less than the high + * water mark (but above the low water mark) and the inactive + * count is less than its target. * - * We don't have the counts locked ... if they change a little, - * it doesn't really matter. + * We don't have the counts locked ... if they change a little, + * it doesn't really matter. */ - if (cnt.v_free_count < cnt.v_free_min || (cnt.v_free_count < cnt.v_free_target && cnt.v_inactive_count < cnt.v_inactive_target)) @@ -790,12 +765,12 @@ vm_page_alloc(object, offset) } /* - * vm_page_free: + * vm_page_free: * - * Returns the given page to the free list, - * disassociating it with any VM object. + * Returns the given page to the free list, + * disassociating it with any VM object. * - * Object and page must be locked prior to entry. + * Object and page must be locked prior to entry. */ void vm_page_free(mem) @@ -828,13 +803,13 @@ vm_page_free(mem) } /* - * vm_page_wire: + * vm_page_wire: * - * Mark this page as wired down by yet - * another map, removing it from paging queues - * as necessary. + * Mark this page as wired down by yet + * another map, removing it from paging queues + * as necessary. * - * The page queues must be locked. + * The page queues must be locked. */ void vm_page_wire(mem) @@ -859,12 +834,12 @@ vm_page_wire(mem) } /* - * vm_page_unwire: + * vm_page_unwire: * - * Release one wiring of this page, potentially - * enabling it to be paged again. + * Release one wiring of this page, potentially + * enabling it to be paged again. * - * The page queues must be locked. + * The page queues must be locked. */ void vm_page_unwire(mem) @@ -882,13 +857,13 @@ vm_page_unwire(mem) } /* - * vm_page_deactivate: + * vm_page_deactivate: * - * Returns the given page to the inactive list, - * indicating that no physical maps have access - * to this page. [Used by the physical mapping system.] + * Returns the given page to the inactive list, + * indicating that no physical maps have access + * to this page. [Used by the physical mapping system.] * - * The page queues must be locked. + * The page queues must be locked. */ void vm_page_deactivate(m) @@ -897,10 +872,9 @@ vm_page_deactivate(m) VM_PAGE_CHECK(m); /* - * Only move active pages -- ignore locked or already - * inactive ones. + * Only move active pages -- ignore locked or already + * inactive ones. */ - if (m->flags & PG_ACTIVE) { TAILQ_REMOVE(&vm_page_queue_active, m, pageq); m->flags &= ~PG_ACTIVE; @@ -923,11 +897,11 @@ vm_page_deactivate(m) } /* - * vm_page_activate: + * vm_page_activate: * - * Put the specified page on the active list (if appropriate). + * Put the specified page on the active list (if appropriate). * - * The page queues must be locked. + * The page queues must be locked. */ void vm_page_activate(m) @@ -951,11 +925,11 @@ vm_page_activate(m) } /* - * vm_page_zero_fill: + * vm_page_zero_fill: * - * Zero-fill the specified page. - * Written as a standard pagein routine, to - * be used by the zero-fill object. + * Zero-fill the specified page. + * Written as a standard pagein routine, to + * be used by the zero-fill object. */ boolean_t vm_page_zero_fill(m) @@ -969,9 +943,9 @@ vm_page_zero_fill(m) } /* - * vm_page_copy: + * vm_page_copy: * - * Copy one page to another + * Copy one page to another */ void vm_page_copy(src_m, dest_m) |