summaryrefslogtreecommitdiff
path: root/sys/arch/mac68k/mac68k/pmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/arch/mac68k/mac68k/pmap.c')
-rw-r--r--sys/arch/mac68k/mac68k/pmap.c158
1 files changed, 1 insertions, 157 deletions
diff --git a/sys/arch/mac68k/mac68k/pmap.c b/sys/arch/mac68k/mac68k/pmap.c
index 15bf41730d1..a97f05d39d0 100644
--- a/sys/arch/mac68k/mac68k/pmap.c
+++ b/sys/arch/mac68k/mac68k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.24 2001/06/27 04:02:05 beck Exp $ */
+/* $OpenBSD: pmap.c,v 1.25 2001/06/27 04:22:38 art Exp $ */
/* $NetBSD: pmap.c,v 1.55 1999/04/22 04:24:53 chs Exp $ */
/*
@@ -107,9 +107,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#if defined(UVM)
#include <uvm/uvm.h>
-#endif
#include <machine/cpu.h>
@@ -216,9 +214,7 @@ vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
struct pmap kernel_pmap_store;
vm_map_t st_map, pt_map;
-#if defined(UVM)
struct vm_map st_map_store, pt_map_store;
-#endif
paddr_t avail_start; /* PA of first available physical page */
paddr_t avail_end; /* PA of last available physical page */
@@ -343,7 +339,6 @@ pmap_init()
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in pmap_bootstrap().
*/
-#if defined(UVM)
addr = (vaddr_t)IOBase;
if (uvm_map(kernel_map, &addr,
m68k_ptob(IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE),
@@ -366,26 +361,6 @@ pmap_init()
bogons:
panic("pmap_init: bogons in the VM system!\n");
}
-#else
- addr = (vaddr_t)IOBase;
- (void)vm_map_find(kernel_map, NULL, (vaddr_t)0, &addr,
- m68k_ptob(IIOMAPSIZE + ROMMAPSIZE + VIDMAPSIZE), FALSE);
- if (addr != (vaddr_t)IOBase)
- goto bogons;
-
- addr = (vaddr_t)Sysmap;
- vm_object_reference(kernel_object);
- (void)vm_map_find(kernel_map, kernel_object, addr,
- &addr, MAC_MAX_PTSIZE, FALSE);
- /*
- * If this fails it is probably because the static portion of
- * the kernel page table isn't big enough and we overran the
- * page table map. Need to adjust pmap_size() in mac68k_init.c.
- */
- if (addr != (vaddr_t)Sysmap)
- bogons:
- panic("pmap_init: bogons in the VM system!");
-#endif /* UVM */
PMAP_DPRINTF(PDB_INIT,
("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
@@ -404,13 +379,9 @@ pmap_init()
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
-#if defined(UVM)
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: can't allocate data structures");
-#else
- addr = kmem_alloc(kernel_map, s);
-#endif
Segtabzero = (st_entry_t *)addr;
pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
@@ -451,7 +422,6 @@ pmap_init()
* Verify that space will be allocated in region for which
* we already have kernel PT pages.
*/
-#if defined(UVM)
addr = 0;
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
@@ -461,25 +431,14 @@ pmap_init()
rv = uvm_unmap(kernel_map, addr, addr + s);
if (rv != KERN_SUCCESS)
panic("pmap_init: uvm_unmap failed");
-#else
- addr = 0;
- rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS || addr + s >= (vaddr_t)Sysmap)
- panic("pmap_init: kernel PT too small");
- vm_map_remove(kernel_map, addr, addr + s);
-#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
-#if defined(UVM)
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: cannot allocate KPT free list");
-#else
- addr = kmem_alloc(kernel_map, s);
-#endif
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
@@ -495,7 +454,6 @@ pmap_init()
PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
atop(s), addr, addr + s));
-#if defined(UVM)
/*
* Allocate the segment table map and the page table map.
*/
@@ -517,45 +475,6 @@ pmap_init()
s = (maxproc * MAC_MAX_PTSIZE);
pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
TRUE, &pt_map_store);
-#else
- /*
- * Allocate the segment table map
- */
- s = maxproc * MAC_STSIZE;
- st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
-
- /*
- * Slightly modified version of kmem_suballoc() to get page table
- * map where we want it.
- */
- addr = MAC_PTBASE;
- if ((MAC_PTMAXSIZE / MAC_MAX_PTSIZE) < maxproc) {
- s = MAC_PTMAXSIZE;
- /*
- * XXX We don't want to hang when we run out of
- * page tables, so we lower maxproc so that fork()
- * will fail instead. Note that root could still raise
- * this value via sysctl(2).
- */
- maxproc = (MAC_PTMAXSIZE / MAC_MAX_PTSIZE);
- } else
- s = (maxproc * MAC_MAX_PTSIZE);
- addr2 = addr + s;
- rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS)
- panic("pmap_init: cannot allocate space for PT map");
- pmap_reference(vm_map_pmap(kernel_map));
- pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
- if (pt_map == NULL)
- panic("pmap_init: cannot create pt_map");
- rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
- if (rv != KERN_SUCCESS)
- panic("pmap_init: cannot map range to pt_map");
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2);
-#endif
-#endif /* UVM */
#if defined(M68040)
if (mmutype == MMU_68040) {
@@ -590,15 +509,9 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
-#if defined(UVM)
pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: uvm_km_zalloc() failed");
-#else
- pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
- if (pvp == 0)
- panic("pmap_alloc_pv: kmem_alloc() failed");
-#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@@ -645,11 +558,7 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
-#if defined(UVM)
uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
-#else
- kmem_free(kernel_map, (vaddr_t)pvp, NBPG);
-#endif
break;
}
}
@@ -714,11 +623,7 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
-#if defined(UVM)
uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
-#else
- kmem_free(kernel_map, (vaddr_t)pvp, NBPG);
-#endif
}
}
@@ -859,21 +764,11 @@ pmap_release(pmap)
#endif
if (pmap->pm_ptab)
-#if defined(UVM)
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
MAC_MAX_PTSIZE);
-#else
- kmem_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
- MAC_MAX_PTSIZE);
-#endif
if (pmap->pm_stab != Segtabzero)
-#if defined(UVM)
uvm_km_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
MAC_STSIZE);
-#else
- kmem_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
- MAC_STSIZE);
-#endif
}
/*
@@ -1197,13 +1092,8 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
-#if defined(UVM)
pmap->pm_ptab = (pt_entry_t *)
uvm_km_valloc_wait(pt_map, MAC_MAX_PTSIZE);
-#else
- pmap->pm_ptab = (pt_entry_t *)
- kmem_alloc_wait(pt_map, MAC_MAX_PTSIZE);
-#endif
/*
* Segment table entry not valid, we need a new PT page
@@ -1260,13 +1150,8 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* is a valid mapping in the page.
*/
if (pmap != pmap_kernel())
-#if defined(UVM)
(void)uvm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
round_page((vaddr_t)(pte+1)), FALSE, FALSE);
-#else
- (void)vm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
- round_page((vaddr_t)(pte+1)), FALSE);
-#endif
/*
* Enter on the PV list if part of our managed memory
@@ -1923,13 +1808,8 @@ pmap_remove_mapping(pmap, va, pte, flags)
* PT page.
*/
if (pmap != pmap_kernel()) {
-#if defined(UVM)
(void)uvm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
round_page((vaddr_t)(pte+1)), TRUE, FALSE);
-#else
- (void)vm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
- round_page((vaddr_t)(pte+1)), TRUE);
-#endif
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
pmap_check_wiring("remove", (vaddr_t)trunc_page(pte));
@@ -2018,13 +1898,8 @@ pmap_remove_mapping(pmap, va, pte, flags)
PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
("remove: free stab %p\n",
ptpmap->pm_stab));
-#if defined(UVM)
uvm_km_free_wakeup(st_map,
(vaddr_t)ptpmap->pm_stab, MAC_STSIZE);
-#else
- kmem_free_wakeup(st_map,
- (vaddr_t)ptpmap->pm_stab, MAC_STSIZE);
-#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040)
@@ -2162,15 +2037,8 @@ pmap_changebit(pa, set, mask)
* XXX don't write protect pager mappings
*/
if (set == PG_RO) {
-#if defined(UVM)
if (va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
-#else
- extern vaddr_t pager_sva, pager_eva;
-
- if (va >= pager_sva && va < pager_eva)
- continue;
-#endif
}
pte = pmap_pte(pv->pv_pmap, va);
@@ -2227,12 +2095,8 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
-#if defined(UVM)
pmap->pm_stab = (st_entry_t *)
uvm_km_zalloc(st_map, MAC_STSIZE);
-#else
- pmap->pm_stab = (st_entry_t *)kmem_alloc(st_map, MAC_STSIZE);
-#endif
pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040)
@@ -2344,26 +2208,13 @@ pmap_enter_ptpage(pmap, va)
pmap->pm_sref++;
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
("enter: about to fault UPT pg at %lx\n", va));
-#if defined(UVM)
s = uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE);
if (s != KERN_SUCCESS) {
printf("uvm_fault(pt_map, 0x%lx, 0, RW) -> %d\n",
va, s);
panic("pmap_enter: uvm_fault failed");
}
-#else
- s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
- if (s != KERN_SUCCESS) {
- printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s);
- panic("pmap_enter: vm_fault failed");
- }
-#endif
pmap_extract(pmap_kernel(), va, &ptpa);
-#if !defined (UVM)
-#ifdef DEBUG
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
-#endif
-#endif
}
#if defined(M68040)
/*
@@ -2488,17 +2339,10 @@ pmap_check_wiring(str, va)
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
return;
-#if defined(UVM)
if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
printf("wired_check: entry for %lx not found\n", va);
return;
}
-#else
- if (!vm_map_lookup_entry(pt_map, va, &entry)) {
- printf("wired_check: entry for %lx not found\n", va);
- return;
- }
-#endif
count = 0;
for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
if (*pte)