summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-06-27 04:05:46 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-06-27 04:05:46 +0000
commit3ba3e54b50196ea6bf59e2c3102d52fb9cdf4183 (patch)
tree40fa1688f8f708b3cbd97ad86010f7054e2fbf68 /sys
parent3d058ffa6a5c9e93e2c571c6b793547745da3d03 (diff)
no more old VM
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/hp300/dev/grf.c14
-rw-r--r--sys/arch/hp300/hp300/genassym.cf8
-rw-r--r--sys/arch/hp300/hp300/hpux_machdep.c17
-rw-r--r--sys/arch/hp300/hp300/intr.c8
-rw-r--r--sys/arch/hp300/hp300/locore.s18
-rw-r--r--sys/arch/hp300/hp300/machdep.c79
-rw-r--r--sys/arch/hp300/hp300/mem.c10
-rw-r--r--sys/arch/hp300/hp300/pmap.c161
-rw-r--r--sys/arch/hp300/hp300/trap.c42
-rw-r--r--sys/arch/hp300/hp300/vm_machdep.c16
-rw-r--r--sys/arch/hp300/include/param.h4
11 files changed, 11 insertions, 366 deletions
diff --git a/sys/arch/hp300/dev/grf.c b/sys/arch/hp300/dev/grf.c
index 1f934a423c0..dd8863dad08 100644
--- a/sys/arch/hp300/dev/grf.c
+++ b/sys/arch/hp300/dev/grf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: grf.c,v 1.11 2001/05/10 21:08:48 millert Exp $ */
+/* $OpenBSD: grf.c,v 1.12 2001/06/27 04:05:44 art Exp $ */
/* $NetBSD: grf.c,v 1.30 1998/08/20 08:33:41 kleink Exp $ */
/*
@@ -78,9 +78,7 @@ extern struct emul emul_hpux;
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
-#if defined(UVM)
#include <uvm/uvm.h>
-#endif
#include <miscfs/specfs/specdev.h>
@@ -642,16 +640,10 @@ grfmap(dev, addrp, p)
vn.v_type = VCHR; /* XXX */
vn.v_specinfo = &si; /* XXX */
vn.v_rdev = dev; /* XXX */
-#if defined(UVM)
error = uvm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
(vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
flags, (caddr_t)&vn, 0,
p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
-#else
- error = vm_mmap(&p->p_vmspace->vm_map, (vaddr_t *)addrp,
- (vsize_t)len, VM_PROT_ALL, VM_PROT_ALL,
- flags, (caddr_t)&vn, 0);
-#endif
if (error == 0)
(void) (*gp->g_sw->gd_mode)(gp, GM_MAP, *addrp);
return(error);
@@ -676,12 +668,8 @@ grfunmap(dev, addr, p)
return(EINVAL); /* XXX: how do we deal with this? */
(void) (*gp->g_sw->gd_mode)(gp, GM_UNMAP, 0);
size = round_page(gp->g_display.gd_regsize + gp->g_display.gd_fbsize);
-#if defined(UVM)
rv = uvm_unmap(&p->p_vmspace->vm_map, (vaddr_t)addr,
(vaddr_t)addr + size);
-#else
- rv = vm_deallocate(&p->p_vmspace->vm_map, (vaddr_t)addr, size);
-#endif
return(rv == KERN_SUCCESS ? 0 : EINVAL);
}
diff --git a/sys/arch/hp300/hp300/genassym.cf b/sys/arch/hp300/hp300/genassym.cf
index a8528516459..26ee920df2a 100644
--- a/sys/arch/hp300/hp300/genassym.cf
+++ b/sys/arch/hp300/hp300/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.8 2001/05/13 17:19:56 millert Exp $
+# $OpenBSD: genassym.cf,v 1.9 2001/06/27 04:05:45 art Exp $
# $NetBSD: genassym.cf,v 1.11 1998/02/16 20:58:29 thorpej Exp $
#
@@ -47,9 +47,7 @@ include <sys/user.h>
include <vm/vm.h>
-ifdef UVM
include <uvm/uvm_extern.h>
-endif
include <machine/hp300spu.h>
include <machine/cpu.h>
@@ -157,11 +155,7 @@ define SSLEEP SSLEEP
define SRUN SRUN
# interrupt/fault metering
-ifdef UVM
define UVMEXP_INTRS offsetof(struct uvmexp, intrs)
-else
-define V_INTR offsetof(struct vmmeter, v_intr)
-endif
# PSL values (should just include psl.h?)
define PSL_S PSL_S
diff --git a/sys/arch/hp300/hp300/hpux_machdep.c b/sys/arch/hp300/hp300/hpux_machdep.c
index ed43734daed..fb3ecb6b282 100644
--- a/sys/arch/hp300/hp300/hpux_machdep.c
+++ b/sys/arch/hp300/hp300/hpux_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: hpux_machdep.c,v 1.8 2001/05/04 22:48:59 aaron Exp $ */
+/* $OpenBSD: hpux_machdep.c,v 1.9 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: hpux_machdep.c,v 1.19 1998/02/16 20:58:30 thorpej Exp $ */
/*
@@ -78,9 +78,7 @@
#include <vm/vm_param.h>
#include <vm/vm_map.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <sys/syscallargs.h>
@@ -418,13 +416,8 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
psp->ps_sigstk.ss_flags |= SS_ONSTACK;
} else
fp = (struct hpuxsigframe *)(frame->f_regs[SP] - fsize);
-#if defined(UVM)
if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
(void)uvm_grow(p, (unsigned)fp);
-#else
- if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
- (void)grow(p, (unsigned)fp);
-#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
@@ -432,11 +425,7 @@ hpux_sendsig(catcher, sig, mask, code, type, val)
p->p_pid, sig, &oonstack, fp, &fp->hsf_sc, ft);
#endif
-#if defined(UVM)
if (uvm_useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
-#else
- if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
-#endif
#ifdef DEBUG
if ((hpuxsigdebug & SDB_KSTACK) && p->p_pid == hpuxsigpid)
printf("hpux_sendsig(%d): useracc failed on sig %d\n",
@@ -594,11 +583,7 @@ hpux_sys_sigreturn(p, v, retval)
* Fetch and test the HP-UX context structure.
* We grab it all at once for speed.
*/
-#if defined(UVM)
if (uvm_useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
-#else
- if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
-#endif
copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
return (EINVAL);
scp = &tsigc;
diff --git a/sys/arch/hp300/hp300/intr.c b/sys/arch/hp300/hp300/intr.c
index 4acf8a1b52c..3995d0efa7d 100644
--- a/sys/arch/hp300/hp300/intr.c
+++ b/sys/arch/hp300/hp300/intr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intr.c,v 1.7 2001/05/04 22:48:59 aaron Exp $ */
+/* $OpenBSD: intr.c,v 1.8 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: intr.c,v 1.5 1998/02/16 20:58:30 thorpej Exp $ */
/*-
@@ -48,10 +48,8 @@
#include <sys/malloc.h>
#include <sys/vmmeter.h>
-#if defined(UVM)
#include <vm/vm.h>
#include <uvm/uvm_extern.h>
-#endif
#include <net/netisr.h>
#include "ppp.h"
@@ -263,11 +261,7 @@ intr_dispatch(evec)
ipl = vec - ISRLOC;
intrcnt[ipl]++;
-#if defined(UVM)
uvmexp.intrs++;
-#else
- cnt.v_intr++;
-#endif
list = &isr_list[ipl];
if (list->lh_first == NULL) {
diff --git a/sys/arch/hp300/hp300/locore.s b/sys/arch/hp300/hp300/locore.s
index d83e0306877..4834f6d5d32 100644
--- a/sys/arch/hp300/hp300/locore.s
+++ b/sys/arch/hp300/hp300/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.27 2001/05/13 17:27:24 aaron Exp $ */
+/* $OpenBSD: locore.s,v 1.28 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: locore.s,v 1.91 1998/11/11 06:41:25 thorpej Exp $ */
/*
@@ -496,11 +496,7 @@ Lehighcode:
Lenab1:
/* select the software page size now */
lea _ASM_LABEL(tmpstk),sp | temporary stack
-#if defined(UVM)
jbsr _C_LABEL(uvm_setpagesize) | select software page size
-#else
- jbsr _C_LABEL(vm_set_page_size) | select software page size
-#endif
/* set kernel stack, user SP, and initial pcb */
movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr
lea a1@(USPACE-4),sp | set kernel stack to end of area
@@ -1017,11 +1013,7 @@ Lbrkpt3:
ENTRY_NOPROFILE(spurintr) /* level 0 */
addql #1,_C_LABEL(intrcnt)+0
-#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
-#else
- addql #1,_C_LABEL(cnt)+V_INTR
-#endif
jra _ASM_LABEL(rei)
ENTRY_NOPROFILE(lev1intr) /* level 1: HIL XXX this needs to go away */
@@ -1029,11 +1021,7 @@ ENTRY_NOPROFILE(lev1intr) /* level 1: HIL XXX this needs to go away */
jbsr _C_LABEL(hilint)
INTERRUPT_RESTOREREG
addql #1,_C_LABEL(intrcnt)+4
-#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS
-#else
- addql #1,_C_LABEL(cnt)+V_INTR
-#endif
jra _ASM_LABEL(rei)
ENTRY_NOPROFILE(intrhand) /* levels 2 through 5 */
@@ -1104,11 +1092,7 @@ Lnoleds0:
addql #4,sp
CLKADDR(a0)
Lrecheck:
-#if defined(UVM)
addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS | chalk up another interrupt
-#else
- addql #1,_C_LABEL(cnt)+V_INTR | chalk up another interrupt
-#endif
movb a0@(CLKSR),d0 | see if anything happened
jmi Lclkagain | while we were in hardclock/statintr
INTERRUPT_RESTOREREG
diff --git a/sys/arch/hp300/hp300/machdep.c b/sys/arch/hp300/hp300/machdep.c
index 307c23e1bc7..2a3b084a811 100644
--- a/sys/arch/hp300/hp300/machdep.c
+++ b/sys/arch/hp300/hp300/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.51 2001/06/25 00:43:09 mickey Exp $ */
+/* $OpenBSD: machdep.c,v 1.52 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: machdep.c,v 1.121 1999/03/26 23:41:29 mycroft Exp $ */
/*
@@ -105,9 +105,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_param.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include "opt_useleds.h"
@@ -121,13 +119,9 @@
/* the following is used externally (sysctl_hw) */
char machine[] = MACHINE; /* from <machine/param.h> */
-#if defined(UVM)
vm_map_t exec_map = NULL;
vm_map_t mb_map = NULL;
vm_map_t phys_map = NULL;
-#else
-vm_map_t buffer_map;
-#endif
extern paddr_t avail_start, avail_end;
@@ -208,13 +202,8 @@ hp300_init()
* Tell the VM system about available physical memory. The
* hp300 only has one segment.
*/
-#if defined(UVM)
uvm_page_physload(atop(avail_start), atop(avail_end),
atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);
-#else
- vm_page_physload(atop(avail_start), atop(avail_end),
- atop(avail_start), atop(avail_end));
-#endif /* UVM */
/* Initialize the interrupt handlers. */
intr_init();
@@ -301,13 +290,8 @@ cpu_startup()
* and the give everything true virtual addresses.
*/
size = (vsize_t)allocsys((caddr_t)0);
-#if defined(UVM)
if ((v = (caddr_t)uvm_km_zalloc(kernel_map, round_page(size))) == 0)
panic("startup: no room for tables");
-#else
- if ((v = (caddr_t)kmem_alloc(kernel_map, round_page(size))) == 0)
- panic("startup: no room for tables");
-#endif
if ((allocsys(v) - v) != size)
panic("startup: table size inconsistency");
@@ -316,25 +300,15 @@ cpu_startup()
* in that they usually occupy more virtual memory than physical.
*/
size = MAXBSIZE * nbuf;
-#if defined(UVM)
if (uvm_map(kernel_map, (vaddr_t *) &buffers, round_page(size),
NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_NORMAL, 0)) != KERN_SUCCESS)
panic("startup: cannot allocate VM for buffers");
minaddr = (vaddr_t)buffers;
-#else
- buffer_map = kmem_suballoc(kernel_map, (vaddr_t *)&buffers,
- &maxaddr, size, TRUE);
- minaddr = (vaddr_t)buffers;
- if (vm_map_find(buffer_map, vm_object_allocate(size), (vaddr_t)0,
- &minaddr, size, FALSE) != KERN_SUCCESS)
- panic("startup: cannot allocate buffers");
-#endif /* UVM */
base = bufpages / nbuf;
residual = bufpages % nbuf;
for (i = 0; i < nbuf; i++) {
-#if defined(UVM)
vsize_t curbufsize;
vaddr_t curbuf;
struct vm_page *pg;
@@ -363,54 +337,23 @@ cpu_startup()
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
-#else /* ! UVM */
- vsize_t curbufsize;
- vaddr_t curbuf;
-
- /*
- * First <residual> buffers get (base+1) physical pages
- * allocated for them. The rest get (base) physical pages.
- *
- * The rest of each buffer occupies virtual space,
- * but has no physical memory allocated for it.
- */
- curbuf = (vaddr_t)buffers + i * MAXBSIZE;
- curbufsize = PAGE_SIZE * (i < residual ? base+1 : base);
- vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
- vm_map_simplify(buffer_map, curbuf);
-#endif /* UVM */
}
/*
* Allocate a submap for exec arguments. This map effectively
* limits the number of processes exec'ing at any time.
*/
-#if defined(UVM)
exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
16*NCARGS, TRUE, FALSE, NULL);
-#else
- exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- 16*NCARGS, TRUE);
-#endif
/*
* Allocate a submap for physio
*/
-#if defined(UVM)
phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_PHYS_SIZE, TRUE, FALSE, NULL);
-#else
- phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- VM_PHYS_SIZE, TRUE);
-#endif
-#if defined(UVM)
mb_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL);
-#else
- mb_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
- VM_MBUF_SIZE, FALSE);
-#endif
/*
* Initialize timeouts
@@ -420,13 +363,8 @@ cpu_startup()
#ifdef DEBUG
pmapdebug = opmapdebug;
#endif
-#if defined(UVM)
printf("avail mem = %lu (%uK)\n", ptoa(uvmexp.free),
ptoa(uvmexp.free)/1024);
-#else
- printf("avail mem = %lu (%uK)\n", ptoa(cnt.v_free_count),
- ptoa(cnt.v_free_count)/1024);
-#endif
printf("using %d buffers containing %u bytes (%uK) of memory\n",
nbuf, bufpages * PAGE_SIZE, bufpages * PAGE_SIZE / 1024);
@@ -436,15 +374,9 @@ cpu_startup()
* XXX This is bogus; should just fix KERNBASE and
* XXX VM_MIN_KERNEL_ADDRESS, but not right now.
*/
-#if defined(UVM)
if (uvm_map_protect(kernel_map, 0, NBPG, UVM_PROT_NONE, TRUE)
!= KERN_SUCCESS)
panic("can't mark page 0 off-limits");
-#else
- if (vm_map_protect(kernel_map, 0, NBPG, VM_PROT_NONE, TRUE)
- != KERN_SUCCESS)
- panic("can't mark page 0 off-limits");
-#endif
/*
* Tell the VM system that writing to kernel text isn't allowed.
@@ -453,15 +385,9 @@ cpu_startup()
* XXX Should be trunc_page(&kernel_text) instead
* XXX of NBPG.
*/
-#if defined(UVM)
if (uvm_map_protect(kernel_map, NBPG, round_page((vaddr_t)&etext),
UVM_PROT_READ|UVM_PROT_EXEC, TRUE) != KERN_SUCCESS)
panic("can't protect kernel text");
-#else
- if (vm_map_protect(kernel_map, NBPG, round_page((vaddr_t)&etext),
- VM_PROT_READ|VM_PROT_EXECUTE, TRUE) != KERN_SUCCESS)
- panic("can't protect kernel text");
-#endif
/*
* Set up CPU-specific registers, cache, etc.
@@ -544,9 +470,6 @@ allocsys(v)
if (nswbuf > 256)
nswbuf = 256; /* sanity */
}
-#if !defined(UVM)
- valloc(swbuf, struct buf, nswbuf);
-#endif
valloc(buf, struct buf, nbuf);
return (v);
}
diff --git a/sys/arch/hp300/hp300/mem.c b/sys/arch/hp300/hp300/mem.c
index 83d96809bbe..fb0cbef15e3 100644
--- a/sys/arch/hp300/hp300/mem.c
+++ b/sys/arch/hp300/hp300/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.13 2001/05/05 20:56:34 art Exp $ */
+/* $OpenBSD: mem.c,v 1.14 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: mem.c,v 1.25 1999/03/27 00:30:06 mycroft Exp $ */
/*
@@ -56,9 +56,7 @@
#include <machine/cpu.h>
#include <vm/vm.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
extern u_int lowram;
extern char *extiobase;
@@ -161,15 +159,9 @@ mmrw(dev, uio, flags)
case 1:
v = uio->uio_offset;
c = min(iov->iov_len, MAXPHYS);
-#if defined(UVM)
if (!uvm_kernacc((caddr_t)v, c,
uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
return (EFAULT);
-#else
- if (!kernacc((caddr_t)v, c,
- uio->uio_rw == UIO_READ ? B_READ : B_WRITE))
- return (EFAULT);
-#endif
/*
* Don't allow reading intio or dio
diff --git a/sys/arch/hp300/hp300/pmap.c b/sys/arch/hp300/hp300/pmap.c
index 33f15e5b853..459b76b906e 100644
--- a/sys/arch/hp300/hp300/pmap.c
+++ b/sys/arch/hp300/hp300/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.24 2001/06/08 13:32:54 millert Exp $ */
+/* $OpenBSD: pmap.c,v 1.25 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: pmap.c,v 1.80 1999/09/16 14:52:06 chs Exp $ */
/*-
@@ -146,9 +146,7 @@
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
-#if defined(UVM)
#include <uvm/uvm.h>
-#endif
#include <machine/cpu.h>
@@ -255,9 +253,7 @@ vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
struct pmap kernel_pmap_store;
vm_map_t st_map, pt_map;
-#if defined(UVM)
struct vm_map st_map_store, pt_map_store;
-#endif
paddr_t avail_start; /* PA of first available physical page */
paddr_t avail_end; /* PA of last available physical page */
@@ -387,7 +383,6 @@ pmap_init()
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in pmap_bootstrap().
*/
-#if defined(UVM)
addr = (vaddr_t) intiobase;
if (uvm_map(kernel_map, &addr,
m68k_ptob(IIOMAPSIZE+EIOMAPSIZE),
@@ -410,25 +405,6 @@ pmap_init()
bogons:
panic("pmap_init: bogons in the VM system!\n");
}
-#else
- addr = (vaddr_t) intiobase;
- (void) vm_map_find(kernel_map, NULL, (vaddr_t) 0,
- &addr, m68k_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE);
- if (addr != (vaddr_t)intiobase)
- goto bogons;
- addr = (vaddr_t) Sysmap;
- vm_object_reference(kernel_object);
- (void) vm_map_find(kernel_map, kernel_object, addr,
- &addr, HP_MAX_PTSIZE, FALSE);
- /*
- * If this fails it is probably because the static portion of
- * the kernel page table isn't big enough and we overran the
- * page table map.
- */
- if (addr != (vaddr_t)Sysmap)
-bogons:
- panic("pmap_init: bogons in the VM system!");
-#endif /* UVM */
PMAP_DPRINTF(PDB_INIT,
("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
@@ -447,13 +423,9 @@ bogons:
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
-#if defined(UVM)
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: can't allocate data structures");
-#else
- addr = kmem_alloc(kernel_map, s);
-#endif
Segtabzero = (st_entry_t *) addr;
pmap_extract(pmap_kernel(), addr, (paddr_t *)Segtabzeropa);
@@ -494,7 +466,6 @@ bogons:
* Verify that space will be allocated in region for which
* we already have kernel PT pages.
*/
-#if defined(UVM)
addr = 0;
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
@@ -504,25 +475,14 @@ bogons:
rv = uvm_unmap(kernel_map, addr, addr + s);
if (rv != KERN_SUCCESS)
panic("pmap_init: uvm_unmap failed");
-#else
- addr = 0;
- rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS || addr + s >= (vaddr_t)Sysmap)
- panic("pmap_init: kernel PT too small");
- vm_map_remove(kernel_map, addr, addr + s);
-#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
-#if defined(UVM)
addr = uvm_km_zalloc(kernel_map, s);
if (addr == 0)
panic("pmap_init: cannot allocate KPT free list");
-#else
- addr = kmem_alloc(kernel_map, s);
-#endif
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
@@ -538,7 +498,6 @@ bogons:
PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
atop(s), addr, addr + s));
-#if defined(UVM)
/*
* Allocate the segment table map and the page table map.
*/
@@ -560,44 +519,6 @@ bogons:
s = (maxproc * HP_MAX_PTSIZE);
pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE,
TRUE, &pt_map_store);
-#else
- /*
- * Allocate the segment table map
- */
- s = maxproc * HP_STSIZE;
- st_map = kmem_suballoc(kernel_map, &addr, &addr2, s, TRUE);
-
- /*
- * Slightly modified version of kmem_suballoc() to get page table
- * map where we want it.
- */
- addr = HP_PTBASE;
- if ((HP_PTMAXSIZE / HP_MAX_PTSIZE) < maxproc) {
- s = HP_PTMAXSIZE;
- /*
- * XXX We don't want to hang when we run out of
- * page tables, so we lower maxproc so that fork()
- * will fail instead. Note that root could still raise
- * this value via sysctl(2).
- */
- maxproc = (HP_PTMAXSIZE / HP_MAX_PTSIZE);
- } else
- s = (maxproc * HP_MAX_PTSIZE);
- addr2 = addr + s;
- rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
- if (rv != KERN_SUCCESS)
- panic("pmap_init: cannot allocate space for PT map");
- pmap_reference(vm_map_pmap(kernel_map));
- pt_map = vm_map_create(vm_map_pmap(kernel_map), addr, addr2, TRUE);
- if (pt_map == NULL)
- panic("pmap_init: cannot create pt_map");
- rv = vm_map_submap(kernel_map, addr, addr2, pt_map);
- if (rv != KERN_SUCCESS)
- panic("pmap_init: cannot map range to pt_map");
-
- PMAP_DPRINTF(PDB_INIT,
- /* ( */ ("pmap_init: pt_map [%lx - %lx)\n", addr, addr2));
-#endif /* UVM */
#if defined(M68040)
if (mmutype == MMU_68040) {
@@ -632,15 +553,9 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
-#if defined(UVM)
pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: uvm_km_zalloc() failed");
-#else
- pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
- if (pvp == 0)
- panic("pmap_alloc_pv: kmem_alloc() failed");
-#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@@ -687,11 +602,7 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
-#if defined(UVM)
uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
-#else
- kmem_free(kernel_map, (vaddr_t)pvp, NBPG);
-#endif
break;
}
}
@@ -756,11 +667,7 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
-#if defined(UVM)
uvm_km_free(kernel_map, (vaddr_t)pvp, NBPG);
-#else
- kmem_free(kernel_map, (vaddr_t)pvp, NBPG);
-#endif
}
}
@@ -917,21 +824,11 @@ pmap_release(pmap)
#endif
if (pmap->pm_ptab)
-#if defined(UVM)
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
HP_MAX_PTSIZE);
-#else
- kmem_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
- HP_MAX_PTSIZE);
-#endif
if (pmap->pm_stab != Segtabzero)
-#if defined(UVM)
uvm_km_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
HP_STSIZE);
-#else
- kmem_free_wakeup(st_map, (vaddr_t)pmap->pm_stab,
- HP_STSIZE);
-#endif
}
/*
@@ -1299,13 +1196,8 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
-#if defined(UVM)
pmap->pm_ptab = (pt_entry_t *)
uvm_km_valloc_wait(pt_map, HP_MAX_PTSIZE);
-#else
- pmap->pm_ptab = (pt_entry_t *)
- kmem_alloc_wait(pt_map, HP_MAX_PTSIZE);
-#endif
/*
* Segment table entry not valid, we need a new PT page
@@ -1363,12 +1255,7 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* is a valid mapping in the page.
*/
if (pmap != pmap_kernel()) {
-#ifdef UVM
pmap_ptpage_addref(trunc_page((vaddr_t)pte));
-#else
- (void) vm_map_pageable(pt_map, trunc_page((vaddr_t)pte),
- round_page((vaddr_t)pte + 1), FALSE);
-#endif
}
/*
@@ -2278,7 +2165,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
* For user mappings decrement the wiring count on
* the PT page.
*/
-#ifdef UVM
if (pmap != pmap_kernel()) {
vaddr_t ptpva = trunc_page((vaddr_t)pte);
int refs = pmap_ptpage_delref(ptpva);
@@ -2323,18 +2209,6 @@ pmap_remove_mapping(pmap, va, pte, flags)
ptpva, pa));
}
}
-#else
- if (pmap != pmap_kernel()) {
- vaddr_t ptpva = trunc_page((vaddr_t)pte);
-
- (void) vm_map_pageable(pt_map, ptpva,
- round_page((vaddr_t)pte + 1), TRUE);
-#ifdef DEBUG
- if (pmapdebug & PDB_WIRING)
- pmap_check_wiring("remove", ptpva);
-#endif
- }
-#endif
/*
* If this isn't a managed page, we are all done.
*/
@@ -2434,15 +2308,9 @@ pmap_remove_mapping(pmap, va, pte, flags)
PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
("remove: free stab %p\n",
ptpmap->pm_stab));
-#if defined(UVM)
uvm_km_free_wakeup(st_map,
(vaddr_t)ptpmap->pm_stab,
HP_STSIZE);
-#else
- kmem_free_wakeup(st_map,
- (vaddr_t)ptpmap->pm_stab,
- HP_STSIZE);
-#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040)
@@ -2586,15 +2454,8 @@ pmap_changebit(pa, set, mask)
* XXX don't write protect pager mappings
*/
if (set == PG_RO) {
-#if defined(UVM)
if (va >= uvm.pager_sva && va < uvm.pager_eva)
continue;
-#else
- extern vaddr_t pager_sva, pager_eva;
-
- if (va >= pager_sva && va < pager_eva)
- continue;
-#endif
}
pte = pmap_pte(pv->pv_pmap, va);
@@ -2661,13 +2522,8 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
-#if defined(UVM)
pmap->pm_stab = (st_entry_t *)
uvm_km_zalloc(st_map, HP_STSIZE);
-#else
- pmap->pm_stab = (st_entry_t *)
- kmem_alloc(st_map, HP_STSIZE);
-#endif
pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040)
@@ -2784,7 +2640,6 @@ pmap_enter_ptpage(pmap, va)
pmap->pm_sref++;
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
("enter: about to fault UPT pg at %lx\n", va));
-#if defined(UVM)
s = uvm_fault_wire(pt_map, va, va + PAGE_SIZE,
VM_PROT_READ|VM_PROT_WRITE);
if (s != KERN_SUCCESS) {
@@ -2792,19 +2647,7 @@ pmap_enter_ptpage(pmap, va)
"-> %d\n", va, va + PAGE_SIZE, s);
panic("pmap_enter: uvm_fault_wire failed");
}
-#else
- s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
- if (s != KERN_SUCCESS) {
- printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s);
- panic("pmap_enter: vm_fault failed");
- }
-#endif
ptpa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
-#if !defined(UVM)
-#ifdef DEBUG
- PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
-#endif
-#endif
}
#if defined(M68040)
/*
@@ -2886,7 +2729,6 @@ pmap_enter_ptpage(pmap, va)
splx(s);
}
-#ifdef UVM
/*
* pmap_ptpage_addref:
*
@@ -2922,7 +2764,6 @@ pmap_ptpage_delref(ptpva)
simple_unlock(&uvm.kernel_object->vmobjlock);
return (rv);
}
-#endif
#ifdef DEBUG
/*
diff --git a/sys/arch/hp300/hp300/trap.c b/sys/arch/hp300/hp300/trap.c
index 4c419d80d81..7808e616b70 100644
--- a/sys/arch/hp300/hp300/trap.c
+++ b/sys/arch/hp300/hp300/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.23 2001/06/08 08:08:45 art Exp $ */
+/* $OpenBSD: trap.c,v 1.24 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: trap.c,v 1.57 1998/02/16 20:58:31 thorpej Exp $ */
/*
@@ -100,9 +100,7 @@
#include <vm/vm.h>
#include <vm/pmap.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
#include <dev/cons.h>
@@ -308,11 +306,7 @@ trap(type, code, v, frame)
int typ = 0;
union sigval sv;
-#if defined(UVM)
uvmexp.traps++;
-#else
- cnt.v_trap++;
-#endif
p = curproc;
ucode = 0;
@@ -588,31 +582,19 @@ trap(type, code, v, frame)
if (ssir & SIR_NET) {
void netintr __P((void));
siroff(SIR_NET);
-#if defined(UVM)
uvmexp.softs++;
-#else
- cnt.v_soft++;
-#endif
netintr();
}
if (ssir & SIR_CLOCK) {
siroff(SIR_CLOCK);
-#if defined(UVM)
uvmexp.softs++;
-#else
- cnt.v_soft++;
-#endif
softclock();
}
/*
* If this was not an AST trap, we are all done.
*/
if (type != (T_ASTFLT|T_USER)) {
-#if defined(UVM)
uvmexp.traps--;
-#else
- cnt.v_trap--;
-#endif
return;
}
spl0();
@@ -683,31 +665,18 @@ trap(type, code, v, frame)
rv = pmap_mapmulti(map->pmap, va);
if (rv != KERN_SUCCESS) {
bva = HPMMBASEADDR(va);
-#if defined(UVM)
rv = uvm_fault(map, bva, 0, ftype);
-#else
- rv = vm_fault(map, bva, ftype, FALSE);
-#endif
if (rv == KERN_SUCCESS)
(void) pmap_mapmulti(map->pmap, va);
}
} else
#endif
-#if defined(UVM)
rv = uvm_fault(map, va, 0, ftype);
#ifdef DEBUG
if (rv && MDB_ISPID(p->p_pid))
printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n",
map, va, ftype, rv);
#endif
-#else /* ! UVM */
- rv = vm_fault(map, va, ftype, FALSE);
-#ifdef DEBUG
- if (rv && MDB_ISPID(p->p_pid))
- printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
- map, va, ftype, rv);
-#endif
-#endif /* UVM */
/*
* If this was a stack access we keep track of the maximum
* accessed stack size. Also, if vm_fault gets a protection
@@ -739,13 +708,8 @@ trap(type, code, v, frame)
if (type == T_MMUFLT) {
if (p->p_addr->u_pcb.pcb_onfault)
goto copyfault;
-#if defined(UVM)
printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n",
map, va, ftype, rv);
-#else
- printf("vm_fault(%p, %lx, %x, 0) -> %x\n",
- map, va, ftype, rv);
-#endif
printf(" type %x, code [mmu,,ssw]: %x\n",
type, code);
goto dopanic;
@@ -1087,11 +1051,7 @@ syscall(code, frame)
register_t args[8], rval[2];
u_quad_t sticks;
-#if defined(UVM)
uvmexp.syscalls++;
-#else
- cnt.v_syscall++;
-#endif
if (!USERMODE(frame.f_sr))
panic("syscall");
p = curproc;
diff --git a/sys/arch/hp300/hp300/vm_machdep.c b/sys/arch/hp300/hp300/vm_machdep.c
index 0250250a7d9..3ba7f2d6c29 100644
--- a/sys/arch/hp300/hp300/vm_machdep.c
+++ b/sys/arch/hp300/hp300/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.26 2001/06/10 01:44:43 deraadt Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.27 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: vm_machdep.c,v 1.47 1999/03/26 23:41:29 mycroft Exp $ */
/*
@@ -62,9 +62,7 @@
#include <vm/vm.h>
#include <vm/vm_kern.h>
-#if defined(UVM)
#include <uvm/uvm_extern.h>
-#endif
/*
* Finish a fork operation, with process p2 nearly set up.
@@ -153,11 +151,7 @@ cpu_exit(p)
{
(void) splhigh();
-#if defined(UVM)
uvmexp.swtch++;
-#else
- cnt.v_swtch++;
-#endif
switch_exit(p);
/* NOTREACHED */
}
@@ -336,11 +330,7 @@ vmapbuf(bp, len)
uva = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
off = (vaddr_t)bp->b_data - uva;
len = round_page(off + len);
-#if defined(UVM)
kva = uvm_km_valloc_wait(phys_map, len);
-#else
- kva = kmem_alloc_wait(phys_map, len);
-#endif
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
@@ -377,11 +367,7 @@ vunmapbuf(bp, len)
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
-#if defined(UVM)
uvm_km_free_wakeup(phys_map, kva, len);
-#else
- kmem_free_wakeup(phys_map, kva, len);
-#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}
diff --git a/sys/arch/hp300/include/param.h b/sys/arch/hp300/include/param.h
index 57e89bea0d0..1a5e31b2a06 100644
--- a/sys/arch/hp300/include/param.h
+++ b/sys/arch/hp300/include/param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: param.h,v 1.14 2001/05/16 17:40:41 millert Exp $ */
+/* $OpenBSD: param.h,v 1.15 2001/06/27 04:05:45 art Exp $ */
/* $NetBSD: param.h,v 1.35 1997/07/10 08:22:38 veego Exp $ */
/*
@@ -105,8 +105,6 @@ void _delay __P((u_int));
((unsigned)(v) & ~HPMMMASK)
#endif
-#ifdef UVM
#define PMAP_NEW
-#endif
#endif /* !_MACHINE_PARAM_H_ */