From bef5c1203e8b821e3eeb308fe0fec1cd4a4b82aa Mon Sep 17 00:00:00 2001 From: Artur Grabowski Date: Wed, 9 May 2001 15:31:29 +0000 Subject: More sync to NetBSD. - Change pmap_change_wiring to pmap_unwire because it's only called that way. - Remove pmap_pageable because it's seldom implemented and when it is, it's either almost useless or incorrect. The same information is already passed to the pmap anyway by pmap_enter and pmap_unwire. --- sys/vm/pmap.h | 9 ++------- sys/vm/vm_fault.c | 21 ++------------------- 2 files changed, 4 insertions(+), 26 deletions(-) (limited to 'sys/vm') diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h index 764572133fc..25b5cf2fe4b 100644 --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.15 2001/03/26 08:36:20 art Exp $ */ +/* $OpenBSD: pmap.h,v 1.16 2001/05/09 15:31:22 art Exp $ */ /* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */ /* @@ -121,12 +121,9 @@ typedef struct pmap_statistics *pmap_statistics_t; #ifdef _KERNEL __BEGIN_DECLS void *pmap_bootstrap_alloc __P((int)); -void pmap_change_wiring __P((pmap_t, vaddr_t, boolean_t)); +void pmap_unwire __P((pmap_t, vaddr_t)); #if defined(PMAP_NEW) -#if 0 -void pmap_unwire __P((pmap_t, vaddr_t)); -#endif #if !defined(pmap_clear_modify) boolean_t pmap_clear_modify __P((struct vm_page *)); #endif @@ -199,8 +196,6 @@ void pmap_page_protect __P((struct vm_page *, vm_prot_t)); void pmap_page_protect __P((paddr_t, vm_prot_t)); #endif -void pmap_pageable __P((pmap_t, - vaddr_t, vaddr_t, boolean_t)); #if !defined(pmap_phys_address) paddr_t pmap_phys_address __P((int)); #endif diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index cf141035a1d..0f0bbc7fc48 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_fault.c,v 1.17 1999/09/03 18:02:27 art Exp $ */ +/* $OpenBSD: vm_fault.c,v 1.18 2001/05/09 15:31:23 art Exp $ */ /* $NetBSD: vm_fault.c,v 1.21 1998/01/31 04:02:39 ross Exp $ */ /* @@ -860,14 +860,6 @@ vm_fault_wire(map, start, end) pmap = vm_map_pmap(map); - /* - * Inform the physical mapping system that the - * range of addresses may not fault, so that - * page tables and such can be locked down as well. - */ - - pmap_pageable(pmap, start, end, FALSE); - /* * We simulate a fault to get the page and enter it * in the physical map. @@ -912,19 +904,10 @@ vm_fault_unwire(map, start, end) if (pa == (vm_offset_t)0) { panic("unwire: page not in pmap"); } - pmap_change_wiring(pmap, va, FALSE); + pmap_unwire(pmap, va); vm_page_unwire(PHYS_TO_VM_PAGE(pa)); } vm_page_unlock_queues(); - - /* - * Inform the physical mapping system that the range - * of addresses may fault, so that page tables and - * such may be unwired themselves. - */ - - pmap_pageable(pmap, start, end, TRUE); - } /* -- cgit v1.2.3