summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-06-27 04:19:18 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-06-27 04:19:18 +0000
commit26ec1214c72f0a66f5e598b1ac4731aa0d500c9a (patch)
tree455f7f2f0453839f493141dc4818ec9ea18cf9df /sys/arch
parentb22d2a5b6097f6f0309a013c6e863b8bd00ce83e (diff)
zap old vm
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/mvme68k/mvme68k/genassym.cf8
-rw-r--r--sys/arch/mvme68k/mvme68k/hpux_machdep.c17
-rw-r--r--sys/arch/mvme68k/mvme68k/locore.s10
-rw-r--r--sys/arch/mvme68k/mvme68k/machdep.c83
-rw-r--r--sys/arch/mvme68k/mvme68k/mem.c10
-rw-r--r--sys/arch/mvme68k/mvme68k/pmap.c147
-rw-r--r--sys/arch/mvme68k/mvme68k/trap.c42
-rw-r--r--sys/arch/mvme68k/mvme68k/vm_machdep.c16
8 files changed, 8 insertions, 325 deletions
diff --git a/sys/arch/mvme68k/mvme68k/genassym.cf b/sys/arch/mvme68k/mvme68k/genassym.cf
index 1264deaae86..c77e07bdbd3 100644
--- a/sys/arch/mvme68k/mvme68k/genassym.cf
+++ b/sys/arch/mvme68k/mvme68k/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.7 2001/06/26 21:35:41 miod Exp $
+# $OpenBSD: genassym.cf,v 1.8 2001/06/27 04:19:17 art Exp $
#
# Copyright (c) 1995 Theo de Raadt
@@ -83,9 +83,7 @@ include <machine/prom.h>
include <machine/pte.h>
include <vm/vm.h>
-ifdef UVM
include <uvm/uvm_extern.h>
-endif
define __XXX_BUG_FODDER 0
@@ -131,11 +129,7 @@ define SSLEEP SSLEEP
define SRUN SRUN
# interrupt/fault metering
-ifdef UVM
define UVMEXP_INTRS offsetof(struct uvmexp, intrs)
-else
-define V_INTR offsetof(struct vmmeter, v_intr)
-endif
# trap types (should just include trap.h?)
define T_BUSERR T_BUSERR
diff --git a/sys/arch/mvme68k/mvme68k/hpux_machdep.c b/sys/arch/mvme68k/mvme68k/hpux_machdep.c
index 3fb571d117f..5c323874978 100644
--- a/sys/arch/mvme68k/mvme68k/hpux_machdep.c
+++ b/sys/arch/mvme68k/mvme68k/hpux_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: hpux_machdep.c,v 1.2 2001/06/26 21:35:41 miod Exp $ */
+/* $OpenBSD: hpux_machdep.c,v 1.3 2001/06/27 04:19:17 art Exp $ */
/* $NetBSD: hpux_machdep.c,v 1.9 1997/03/16 10:00:45 thorpej Exp $ */
/*
@@ -76,9 +76,7 @@
#include <vm/vm_param.h>
#include <vm/vm_map.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <machine/cpu.h>
#include <machine/reg.h>
@@ -458,13 +456,8 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
psp->ps_sigstk.ss_flags |= SS_ONSTACK;
} else
fp = (struct hpuxsigframe *)(frame->f_regs[SP] - fsize);
-#if defined(UVM)
if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
(void)uvm_grow(p, (unsigned)fp);
-#else
- if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
- (void)grow(p, (unsigned)fp);
-#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
@@ -472,11 +465,7 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
#endif
-#if defined(UVM)
if (uvm_useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
-#else
- if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
-#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
printf("hpux_sendsig(%d): useracc failed on sig %d\n",
@@ -633,11 +622,7 @@ hpux_sys_sigreturn(p, v, retval)
* Fetch and test the HP-UX context structure.
* We grab it all at once for speed.
*/
-#if defined(UVM)
if (uvm_useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
-#else
- if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
-#endif
copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
return (EINVAL);
scp = &tsigc;
diff --git a/sys/arch/mvme68k/mvme68k/locore.s b/sys/arch/mvme68k/mvme68k/locore.s
index b446c88c1f5..04dc5f5a35a 100644
--- a/sys/arch/mvme68k/mvme68k/locore.s
+++ b/sys/arch/mvme68k/mvme68k/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.25 2001/06/26 21:35:41 miod Exp $ */
+/* $OpenBSD: locore.s,v 1.26 2001/06/27 04:19:17 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -554,11 +554,7 @@ Lenab1:
*/
/* select the software page size now */
lea _ASM_LABEL(tmpstk),sp | temporary stack
-#if defined(UVM)
jbsr _C_LABEL(uvm_setpagesize) | select software page size
-#else
- jbsr _C_LABEL(vm_set_page_size) | select software page size
-#endif
/* set kernel stack, user SP, and initial pcb */
movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
lea a1@(USPACE-4),sp | set kernel stack to end of area
@@ -1122,11 +1118,7 @@ Lbrkpt3:
ENTRY_NOPROFILE(spurintr)
addql #1,_C_LABEL(intrcnt)+0
-#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
-#else
- addql #1,_C_LABEL(cnt)+V_INTR
-#endif
jra _ASM_LABEL(rei) | all done
/*
diff --git a/sys/arch/mvme68k/mvme68k/machdep.c b/sys/arch/mvme68k/mvme68k/machdep.c
index 11e51304f39..1a557ddd1f4 100644
--- a/sys/arch/mvme68k/mvme68k/machdep.c
+++ b/sys/arch/mvme68k/mvme68k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.40 2001/06/26 21:35:41 miod Exp $ */
+/* $OpenBSD: machdep.c,v 1.41 2001/06/27 04:19:17 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -117,29 +117,20 @@
#define MAXMEM 64*1024 /* XXX - from cmap.h */
#include <vm/vm_kern.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
/* the following is used externally (sysctl_hw) */
char machine[] = "mvme68k"; /* cpu "architecture" */
-#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
-#else
-vm_map_t buffer_map;
-#endif
extern vm_offset_t avail_end;
/*
* Declare these as initialized data so we can patch them.
*/
-#if !defined(UVM)
-int nswbuf = 0;
-#endif
#ifdef NBUF
int nbuf = NBUF;
#else
@@ -203,15 +194,10 @@ mvme68k_init()
* mvme68k only has one segment.
*/
-#if defined(UVM)
uvmexp.pagesize = NBPG;
uvm_setpagesize();
uvm_page_physload(atop(avail_start), atop(avail_end),
atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);
-#else
- vm_page_physload(atop(avail_start), atop(avail_end),
- atop(avail_start), atop(avail_end));
-#endif /* UVM */
#endif /* MACHINE_NEW_NONCONTIG */
/*
@@ -257,11 +243,7 @@ cpu_startup()
register caddr_t v, firstaddr;
int base, residual;
-#if defined(UVM)
vaddr_t minaddr, maxaddr;
-#else
- vm_offset_t minaddr, maxaddr;
-#endif
vm_size_t size;
#ifdef BUFFERS_UNMANAGED
vm_offset_t bufmemp;
@@ -347,33 +329,17 @@ again:
if (nbuf < 16)
nbuf = 16;
}
-#if !defined(UVM)
- if (nswbuf == 0) {
- nswbuf = (nbuf / 2) &~ 1; /* force even */
- if (nswbuf > 256)
- nswbuf = 256; /* sanity */
- }
- valloc(swbuf, struct buf, nswbuf);
-#endif
valloc(buf, struct buf, nbuf);
/*
* End of first pass, size has been calculated so allocate memory
*/
if (firstaddr == 0) {
size = (vm_size_t)(v - firstaddr);
-#if defined(UVM)
firstaddr = (caddr_t) uvm_km_zalloc(kernel_map, round_page(size));
-#else
- firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
-#endif
if (firstaddr == 0)
panic("startup: no room for tables");
#ifdef BUFFERS_UNMANAGED
-#if defined(UVM)
buffermem = (caddr_t) uvm_km_zalloc(kernel_map, bufpages*PAGE_SIZE);
-#else
- buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*PAGE_SIZE);
-#endif
if (buffermem == 0)
panic("startup: no room for buffers");
#endif
@@ -390,21 +356,12 @@ again:
*/
size = MAXBSIZE * nbuf;
-#if defined(UVM)
if (uvm_map(kernel_map, (vaddr_t *) &buffers, m68k_round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("cpu_startup: cannot allocate VM for buffers");
minaddr = (vaddr_t)buffers;
-#else
- buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
- &maxaddr, size, TRUE);
- minaddr = (vm_offset_t)buffers;
- if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
- (vm_offset_t *)&minaddr, size, FALSE) != KERN_SUCCESS)
- panic("startup: cannot allocate buffers");
-#endif
if ((bufpages / nbuf) >= btoc(MAXBSIZE)) {
/* don't want to alloc more physical mem than needed */
@@ -413,7 +370,6 @@ again:
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
-#if defined(UVM)
vsize_t curbufsize;
vaddr_t curbuf;
struct vm_page *pg;
@@ -438,53 +394,21 @@ again:
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
-#else
- vm_size_t curbufsize;
- vm_offset_t curbuf;
-
- /*
- * First <residual> buffers get (base+1) physical pages
- * allocated for them. The rest get (base) physical pages.
- *
- * The rest of each buffer occupies virtual space,
- * but has no physical memory allocated for it.
- */
- curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
- curbufsize = PAGE_SIZE * (i < residual ? base+1 : base);
- /* this faults in the required physical pages */
- vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
- vm_map_simplify(buffer_map, curbuf);
-#endif
}
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
-#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL);
-#else
- exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- 16*NCARGS, TRUE);
-#endif
/*
* Allocate a submap for physio
*/
-#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, 0, FALSE, NULL);
-#else
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- VM_PHYS_SIZE, TRUE);
-#endif
-#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&mbutl, &maxaddr,
VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
-#else
- mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
- VM_MBUF_SIZE, FALSE);
-#endif
/*
* Initialize timeouts
*/
@@ -493,12 +417,7 @@ again:
#ifdef DEBUG
pmapdebug = opmapdebug;
#endif
-#if defined(UVM)
printf("avail mem = %ld (%ld pages)\n", ptoa(uvmexp.free), uvmexp.free);
-#else
- printf("avail mem = %ld (%ld pages)\n", ptoa(cnt.v_free_count),
- ptoa(cnt.v_free_count)/NBPG);
-#endif
printf("using %d buffers containing %d bytes of memory\n",
nbuf, bufpages * PAGE_SIZE);
#ifdef MFS
diff --git a/sys/arch/mvme68k/mvme68k/mem.c b/sys/arch/mvme68k/mvme68k/mem.c
index 9c2453f8c85..bec83087daf 100644
--- a/sys/arch/mvme68k/mvme68k/mem.c
+++ b/sys/arch/mvme68k/mvme68k/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.13 2001/06/26 21:35:42 miod Exp $ */
+/* $OpenBSD: mem.c,v 1.14 2001/06/27 04:19:17 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -83,9 +83,7 @@
#include <machine/cpu.h>
#include <vm/vm.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
extern u_int lowram;
static caddr_t devzeropage;
@@ -181,15 +179,9 @@ mmrw(dev, uio, flags)
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
-#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
-#else
- if (!kernacc((caddr_t)v, c,
- uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
- return (EFAULT);
-#endif
if (v < NBPG)
return (EFAULT);
error = uiomove((caddr_t)v, c, uio);
diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c
index 9b804d45326..16c97c333e3 100644
--- a/sys/arch/mvme68k/mvme68k/pmap.c
+++ b/sys/arch/mvme68k/mvme68k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.22 2001/06/26 21:35:42 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.23 2001/06/27 04:19:17 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -135,10 +135,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#if defined(UVM)
#include <uvm/uvm.h>
-#else
-#endif
#include <machine/cpu.h>
@@ -242,9 +239,7 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
struct pmap kernel_pmap_store;
vm_map_t st_map, pt_map;
-#if defined(UVM)
struct vm_map st_map_store, pt_map_store;
-#endif
vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
@@ -406,7 +401,6 @@ pmap_init(phys_start, phys_end)
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in pmap_bootstrap().
*/
-#if defined(UVM)
addr = (vaddr_t) intiobase;
if (uvm_map(kernel_map, &addr,
m68k_ptob(iiomapsize+EIOMAPSIZE),
@@ -429,25 +423,6 @@ pmap_init(phys_start, phys_end)
bogons:
panic("pmap_init: bogons in the VM system!\n");
}
-#else
- addr = (vm_offset_t) intiobase;
- (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
- &addr, m68k_ptob(iiomapsize+EIOMAPSIZE), FALSE);
- if (addr != (vm_offset_t)intiobase)
- goto bogons;
- addr = (vm_offset_t) Sysmap;
- vm_object_reference(kernel_object);
- (void) vm_map_find(kernel_map, kernel_object, addr,
- &addr, M68K_MAX_PTSIZE, FALSE);
- /*
- * If this fails it is probably because the static portion of
- * the kernel page table isn't big enough and we overran the
- * page table map.
- */
- if (addr != (vm_offset_t)Sysmap)
-bogons:
- panic("pmap_init: bogons in the VM system!");
-#endif
#ifdef DEBUG
if (pmapdebug & PDB_INIT) {
@@ -472,13 +447,9 @@ bogons:
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
-#if defined(UVM)
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: can't allocate data structures");
-#else
- addr = (vm_offset_t) kmem_alloc(kernel_map, s);
-#endif
Segtabzero = (st_entry_t *) addr;
pmap_extract(pmap_kernel(), addr, (paddr_t *)&Segtabzeropa);
@@ -536,7 +507,6 @@ bogons:
* we already have kernel PT pages.
*/
addr = 0;
-#if defined(UVM)
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
@@ -545,24 +515,14 @@ bogons:
rv = uvm_unmap(kernel_map, addr, addr + s);
if (rv != KERN_SUCCESS)
panic("pmap_init: uvm_unmap failed");
-#else
- rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
- panic("pmap_init: kernel PT too small");
- vm_map_remove(kernel_map, addr, addr + s);
-#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
-#if defined(UVM)
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: cannot allocate KPT free list");
-#else
- addr = (vm_offset_t) kmem_alloc(kernel_map, s);
-#endif
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
@@ -591,12 +551,8 @@ bogons:
* Allocate the segment table map
*/
s = maxproc * M68K_STSIZE;
-#if defined(UVM)
st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
&st_map_store);
-#else
- st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
-#endif
/*
* Slightly modified version of kmem_suballoc() to get page table
@@ -614,26 +570,8 @@ bogons:
maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
} else
s = (maxproc * M68K_MAX_PTSIZE);
-#if defined(UVM)
pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
TRUE, &pt_map_store);
-#else
- addr2 = addr + s;
- rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS)
- panic("pmap_init: cannot allocate space for PT map");
- pmap_reference(vm_map_pmap(kernel_map));
- pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
- if (pt_map == NULL)
- panic("pmap_init: cannot create pt_map");
- rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
- if (rv != KERN_SUCCESS)
- panic("pmap_init: cannot map range to pt_map");
-#ifdef DEBUG
- if (pmapdebug & PDB_INIT)
- printf("pmap_init: pt_map [%x - %x)\n", addr, addr2);
-#endif
-#endif
#if defined(M68040) || defined(M68060)
if (mmutype <= MMU_68040) {
@@ -661,15 +599,9 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
-#if defined(UVM)
pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: uvm_km_zalloc() failed");
-#else
- pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
- if (pvp == 0)
- panic("pmap_alloc_pv: kmem_alloc() failed");
-#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@@ -712,11 +644,7 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
-#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
-#else
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
-#endif
break;
}
}
@@ -774,11 +702,7 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
-#if defined(UVM)
uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
-#else
- kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
-#endif
}
}
@@ -929,21 +853,11 @@ pmap_release(pmap)
#endif
if (pmap->pm_ptab)
-#if defined(UVM)
uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
M68K_MAX_PTSIZE);
-#else
- kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
- M68K_MAX_PTSIZE);
-#endif
if (pmap->pm_stab != Segtabzero)
-#if defined(UVM)
uvm_km_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
M68K_STSIZE);
-#else
- kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
- M68K_STSIZE);
-#endif
}
/*
@@ -1224,13 +1138,8 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
-#ifdef UVM
pmap->pm_ptab = (pt_entry_t *)
uvm_km_valloc_wait(pt_map, M68K_MAX_PTSIZE);
-#else
- pmap->pm_ptab = (pt_entry_t *)
- kmem_alloc_wait(pt_map, M68K_MAX_PTSIZE);
-#endif
/*
* Segment table entry not valid, we need a new PT page
@@ -1294,12 +1203,7 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* is a valid mapping in the page.
*/
if (pmap != pmap_kernel()) {
-#ifdef UVM
pmap_ptpage_addref(trunc_page((vaddr_t)pte));
-#else
- (void) vm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
- round_page((vaddr_t)(pte+1)), FALSE);
-#endif
}
/*
@@ -1938,7 +1842,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
*/
if (pmap != pmap_kernel()) {
vaddr_t ptpva = trunc_page((vaddr_t)pte);
-#if defined(UVM)
int refs = pmap_ptpage_delref(ptpva);
/*
@@ -1979,10 +1882,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
ptpva, pa));
#endif
}
-#else
- (void) vm_map_pageable(pt_map, ptpva,
- round_page((vaddr_t)(pte+1)), TRUE);
-#endif
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
pmap_check_wiring("remove", trunc_page(pte));
@@ -2074,15 +1973,9 @@ pmap_remove_mapping(pmap, va, pte, flags)
printf("remove: free stab %x\n",
ptpmap->pm_stab);
#endif
-#if defined(UVM)
uvm_km_free_wakeup(st_map,
(vm_offset_t)ptpmap->pm_stab,
M68K_STSIZE);
-#else
- kmem_free_wakeup(st_map,
- (vm_offset_t)ptpmap->pm_stab,
- M68K_STSIZE);
-#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040) || defined(M68060)
@@ -2207,15 +2100,8 @@ pmap_changebit(pa, bit, setem)
* XXX don't write protect pager mappings
*/
if (bit == PG_RO) {
-#if defined(UVM)
if (va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
-#else
- extern vm_offset_t pager_sva, pager_eva;
-
- if (va >= pager_sva && va < pager_eva)
- continue;
-#endif
}
pte = pmap_pte(pv->pv_pmap, va);
@@ -2273,13 +2159,8 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
-#if defined(UVM)
pmap->pm_stab = (st_entry_t *)
uvm_km_zalloc(st_map, M68K_STSIZE);
-#else
- pmap->pm_stab = (st_entry_t *)
- kmem_alloc(st_map, M68K_STSIZE);
-#endif
pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040) || defined(M68060)
@@ -2418,28 +2299,11 @@ pmap_enter_ptpage(pmap, va)
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter: about to fault UPT pg at %x\n", va);
#endif
-#if defined(UVM)
if (uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
VM_PROT_READ|VM_PROT_WRITE)
!= KERN_SUCCESS)
panic("pmap_enter: uvm_fault failed");
-#else
- if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
- != KERN_SUCCESS)
- panic("pmap_enter: vm_fault failed");
-#endif
pmap_extract(pmap_kernel(), va, &ptpa);
- /*
- * Mark the page clean now to avoid its pageout (and
- * hence creation of a pager) between now and when it
- * is wired; i.e. while it is on a paging queue.
- */
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
-#if !defined(UVM)
-#ifdef DEBUG
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
-#endif
-#endif
}
#if defined(M68040) || defined(M68060)
/*
@@ -2543,7 +2407,6 @@ pmap_enter_ptpage(pmap, va)
splx(s);
}
-#ifdef UVM
/*
* pmap_ptpage_addref:
*
@@ -2579,7 +2442,6 @@ pmap_ptpage_delref(ptpva)
simple_unlock(&uvm.kernel_object->vmobjlock);
return (rv);
}
-#endif
#ifdef DEBUG
/* static */
@@ -2611,17 +2473,10 @@ pmap_check_wiring(str, va)
if (!pmap_ste_v(pmap_kernel(), va) ||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
return;
-#if defined(UVM)
if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
printf("wired_check: entry for %lx not found\n", va);
return;
}
-#else
- if (!vm_map_lookup_entry(pt_map, va, &entry)) {
- printf("wired_check: entry for %lx not found\n", va);
- return;
- }
-#endif
count = 0;
for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++)
if (*pte)
diff --git a/sys/arch/mvme68k/mvme68k/trap.c b/sys/arch/mvme68k/mvme68k/trap.c
index bd7b1d664e7..2463a805a8e 100644
--- a/sys/arch/mvme68k/mvme68k/trap.c
+++ b/sys/arch/mvme68k/mvme68k/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.31 2001/06/26 21:35:43 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.32 2001/06/27 04:19:17 art Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -98,9 +98,7 @@ extern struct emul emul_sunos;
#include <vm/vm.h>
#include <vm/pmap.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#ifdef COMPAT_HPUX
#include <compat/hpux/hpux.h>
@@ -281,11 +279,7 @@ trap(type, code, v, frame)
#endif
register union sigval sv;
-#if defined(UVM)
uvmexp.traps++;
-#else
- cnt.v_trap++;
-#endif
p = curproc;
ucode = 0;
if (USERMODE(frame.f_sr)) {
@@ -524,11 +518,7 @@ copyfault:
while (bit = ffs(ssir)) {
--bit;
ssir &= ~(1 << bit);
-#if defined(UVM)
uvmexp.softs++;
-#else
- cnt.v_soft++;
-#endif
if (sir_routines[bit])
sir_routines[bit](sir_args[bit]);
}
@@ -536,11 +526,7 @@ copyfault:
* If this was not an AST trap, we are all done.
*/
if (type != (T_ASTFLT|T_USER)) {
-#if defined(UVM)
uvmexp.traps--;
-#else
- cnt.v_trap--;
-#endif
return;
}
spl0();
@@ -609,30 +595,17 @@ copyfault:
rv = pmap_mapmulti(map->pmap, va);
if (rv != KERN_SUCCESS) {
bva = HPMMBASEADDR(va);
-#if defined(UVM)
rv = uvm_fault(map, bva, 0, ftype);
-#else
- rv = vm_fault(map, bva, ftype, FALSE);
-#endif
if (rv == KERN_SUCCESS)
(void) pmap_mapmulti(map->pmap, va);
}
} else
#endif
-#if defined(UVM)
rv = uvm_fault(map, va, 0, ftype);
-#else
- rv = vm_fault(map, va, ftype, FALSE);
-#endif
#ifdef DEBUG
if (rv && MDB_ISPID(p->p_pid))
-#if defined(UVM)
printf("uvm_fault(%x, %x, 0, %x) -> %x\n",
map, va, ftype, rv);
-#else
- printf("vm_fault(%x, %x, %x, 0) -> %x\n",
- map, va, ftype, rv);
-#endif
#endif
/*
* If this was a stack access we keep track of the maximum
@@ -664,13 +637,8 @@ copyfault:
if (type == T_MMUFLT) {
if (p && p->p_addr->u_pcb.pcb_onfault)
goto copyfault;
-#if defined(UVM)
printf("uvm_fault(%x, %x, 0, %x) -> %x\n",
map, va, ftype, rv);
-#else
- printf("vm_fault(%x, %x, %x, 0) -> %x\n",
- map, va, ftype, rv);
-#endif
printf(" type %x, code [mmu,,ssw]: %x\n",
type, code);
goto dopanic;
@@ -1012,11 +980,7 @@ syscall(code, frame)
#ifdef COMPAT_SUNOS
extern struct emul emul_sunos;
#endif
-#if defined(UVM)
uvmexp.syscalls++;
-#else
- cnt.v_syscall++;
-#endif
if (!USERMODE(frame.f_sr))
panic("syscall");
@@ -1213,11 +1177,7 @@ hardintr(pc, evec, frame)
int count = 0;
int r;
-#if defined(UVM)
uvmexp.intrs++;
-#else
- cnt.v_intr++;
-#endif
/* intrcnt[level]++; */
for (ih = intrs[vec]; ih; ih = ih->ih_next) {
#if 0
diff --git a/sys/arch/mvme68k/mvme68k/vm_machdep.c b/sys/arch/mvme68k/mvme68k/vm_machdep.c
index 41bd463898e..980bf446a27 100644
--- a/sys/arch/mvme68k/mvme68k/vm_machdep.c
+++ b/sys/arch/mvme68k/mvme68k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.25 2001/06/26 21:35:43 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.26 2001/06/27 04:19:17 art Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -57,9 +57,7 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
/*
* Finish a fork operation, with process p2 nearly set up.
@@ -141,11 +139,7 @@ cpu_exit(p)
{
(void) splimp();
-#if defined(UVM)
uvmexp.swtch++;
-#else
- cnt.v_swtch++;
-#endif
switch_exit(p);
/* NOTREACHED */
}
@@ -304,11 +298,7 @@ vmapbuf(bp, siz)
off = (int)addr & PGOFSET;
p = bp->b_proc;
npf = btoc(round_page(bp->b_bcount + off));
-#if defined(UVM)
kva = uvm_km_valloc_wait(phys_map, ctob(npf));
-#else
- kva = kmem_alloc_wait(phys_map, ctob(npf));
-#endif
bp->b_data = (caddr_t)(kva + off);
while (npf--) {
if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
@@ -338,11 +328,7 @@ vunmapbuf(bp, siz)
addr = bp->b_data;
npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
kva = (vm_offset_t)((int)addr & ~PGOFSET);
-#if defined(UVM)
uvm_km_free_wakeup(phys_map, kva, ctob(npf));
-#else
- kmem_free_wakeup(phys_map, kva, ctob(npf));
-#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = NULL;
}