diff options
author | Theo de Raadt <deraadt@cvs.openbsd.org> | 2001-04-05 20:39:42 +0000 |
---|---|---|
committer | Theo de Raadt <deraadt@cvs.openbsd.org> | 2001-04-05 20:39:42 +0000 |
commit | 754ddc37cfc4328b16f4cc36ccd84c57efb35d74 (patch) | |
tree | 34a7d73bfad068f9ede76a2e18a04f6417f65331 /sys/arch | |
parent | 1ffb7475839894d85cde279427f6b80741904ab1 (diff) |
undo changes which did not even compile
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/mvme68k/mvme68k/genassym.cf | 11 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/locore.s | 6 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/machdep.c | 113 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/mem.c | 21 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/pmap.c | 167 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/pmap_bootstrap.c | 10 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/trap.c | 41 | ||||
-rw-r--r-- | sys/arch/mvme68k/mvme68k/vm_machdep.c | 21 |
8 files changed, 49 insertions, 341 deletions
diff --git a/sys/arch/mvme68k/mvme68k/genassym.cf b/sys/arch/mvme68k/mvme68k/genassym.cf index 2159793ab3f..dfb1eec5957 100644 --- a/sys/arch/mvme68k/mvme68k/genassym.cf +++ b/sys/arch/mvme68k/mvme68k/genassym.cf @@ -1,4 +1,4 @@ -# $OpenBSD: genassym.cf,v 1.5 2001/03/12 07:38:29 smurph Exp $ +# $OpenBSD: genassym.cf,v 1.6 2001/04/05 20:39:38 deraadt Exp $ # # Copyright (c) 1995 Theo de Raadt @@ -83,10 +83,6 @@ include <machine/prom.h> include <machine/pte.h> include <vm/vm.h> -ifdef UVM -include <uvm/uvm_extern.h> -endif - define __XXX_BUG_FODDER 0 # CPU options @@ -132,12 +128,7 @@ define SRUN SRUN # interrupt/fault metering define V_SWTCH offsetof(struct vmmeter, v_swtch) - -ifdef UVM -define UVMEXP_INTRS offsetof(struct uvmexp, intrs) -else define V_INTR offsetof(struct vmmeter, v_intr) -endif # trap types (should just include trap.h?) define T_BUSERR T_BUSERR diff --git a/sys/arch/mvme68k/mvme68k/locore.s b/sys/arch/mvme68k/mvme68k/locore.s index ba338ef4161..c20395ca18d 100644 --- a/sys/arch/mvme68k/mvme68k/locore.s +++ b/sys/arch/mvme68k/mvme68k/locore.s @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.s,v 1.23 2001/03/12 07:38:31 smurph Exp $ */ +/* $OpenBSD: locore.s,v 1.24 2001/04/05 20:39:39 deraadt Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -488,7 +488,7 @@ Lmmu_enable: .long 0x4e7b1806 | movc d1,urp jra Lstploaddone Lmotommu1: - RELOC(_protorp, a0) + RELOC(_protorp, a0) movl #0x80000202,a0@ | nolimit + share global + 4 byte PTEs movl d1,a0@(4) | + segtable address pmove a0@,srp | load the supervisor root pointer @@ -1827,7 +1827,7 @@ _getdfc: /* * Load a new user segment table pointer. */ -ENTRY(loadustp) /* XXX - smurph */ +ENTRY(loadustp) /* XXX - smuprh */ movl sp@(4),d0 | new USTP moveq #PGSHIFT,d1 lsll d1,d0 | convert to addr diff --git a/sys/arch/mvme68k/mvme68k/machdep.c b/sys/arch/mvme68k/mvme68k/machdep.c index 74c749ef921..fd8820d2443 100644 --- a/sys/arch/mvme68k/mvme68k/machdep.c +++ b/sys/arch/mvme68k/mvme68k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.34 2001/03/12 07:38:31 smurph Exp $ */ +/* $OpenBSD: machdep.c,v 1.35 2001/04/05 20:39:39 deraadt Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -117,21 +117,10 @@ #define MAXMEM 64*1024*CLSIZE /* XXX - from cmap.h */ #include <vm/vm_kern.h> -#if defined(UVM) -#include <uvm/uvm_extern.h> -#endif - /* the following is used externally (sysctl_hw) */ char machine[] = "mvme68k"; /* cpu "architecture" */ -#if defined(UVM) -vm_map_t exec_map = NULL; -vm_map_t mb_map = NULL; -vm_map_t phys_map = NULL; -#else vm_map_t buffer_map; -#endif - extern vm_offset_t avail_end; /* @@ -199,15 +188,12 @@ mvme68k_init() * Tell the VM system about available physical memory. The * hp300 only has one segment. */ - #if defined(UVM) - uvmexp.pagesize = NBPG; - uvm_setpagesize(); uvm_page_physload(atop(avail_start), atop(avail_end), - atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); + atop(avail_start), atop(avail_end)); #else vm_page_physload(atop(avail_start), atop(avail_end), - atop(avail_start), atop(avail_end)); + atop(avail_start), atop(avail_end)); #endif /* UVM */ #endif /* MACHINE_NEW_NONCONTIG */ @@ -253,12 +239,7 @@ cpu_startup() register unsigned i; register caddr_t v, firstaddr; int base, residual; - -#if defined(UVM) - vaddr_t minaddr, maxaddr; -#else vm_offset_t minaddr, maxaddr; -#endif vm_size_t size; #ifdef BUFFERS_UNMANAGED vm_offset_t bufmemp; @@ -349,28 +330,18 @@ cpu_startup() if (nswbuf > 256) nswbuf = 256; /* sanity */ } -#if !defined(UVM) valloc(swbuf, struct buf, nswbuf); -#endif valloc(buf, struct buf, nbuf); /* * End of first pass, size has been calculated so allocate memory */ if (firstaddr == 0) { size = (vm_size_t)(v - firstaddr); -#if defined(UVM) - firstaddr = (caddr_t) uvm_km_zalloc(kernel_map, round_page(size)); -#else firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size)); -#endif if (firstaddr == 0) panic("startup: no room for tables"); #ifdef BUFFERS_UNMANAGED -#if defined(UVM) - buffermem = (caddr_t) uvm_km_zalloc(kernel_map, bufpages*CLBYTES); -#else buffermem = (caddr_t) kmem_alloc(kernel_map, bufpages*CLBYTES); -#endif if (buffermem == 0) panic("startup: no room for buffers"); #endif @@ -386,55 +357,15 @@ cpu_startup() * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; - -#if defined(UVM) - if (uvm_map(kernel_map, (vaddr_t *) &buffers, m88k_round_page(size), - NULL, UVM_UNKNOWN_OFFSET, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_NORMAL, 0)) != KERN_SUCCESS) - panic("cpu_startup: cannot allocate VM for buffers"); - minaddr = (vaddr_t)buffers; -#else buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, - &maxaddr, size, TRUE); + &maxaddr, size, TRUE); minaddr = (vm_offset_t)buffers; if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, - (vm_offset_t *)&minaddr, size, FALSE) != KERN_SUCCESS) + &minaddr, size, FALSE) != KERN_SUCCESS) panic("startup: cannot allocate buffers"); -#endif - - if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { - /* don't want to alloc more physical mem than needed */ - bufpages = btoc(MAXBSIZE) * nbuf; - } base = bufpages / nbuf; residual = bufpages % nbuf; -#if defined(UVM) - vsize_t curbufsize; - vaddr_t curbuf; - struct vm_page *pg; - - /* - * Each buffer has MAXBSIZE bytes of VM space allocated. Of - * that MAXBSIZE space, we allocate and map (base+1) pages - * for the first "residual" buffers, and then we allocate - * "base" pages for the rest. - */ - curbuf = (vm_offset_t) buffers + (i * MAXBSIZE); - curbufsize = CLBYTES * ((i < residual) ? (base+1) : base); - - while (curbufsize) { - pg = uvm_pagealloc(NULL, 0, NULL, 0); - if (pg == NULL) - panic("cpu_startup: not enough memory for " - "buffer cache"); - pmap_enter(kernel_map->pmap, curbuf, - VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE, - VM_PROT_READ|VM_PROT_WRITE); - curbuf += PAGE_SIZE; - curbufsize -= PAGE_SIZE; - } -#else + for (i = 0; i < nbuf; i++) { vm_size_t curbufsize; vm_offset_t curbuf; @@ -447,32 +378,20 @@ cpu_startup() */ curbuf = (vm_offset_t)buffers + i * MAXBSIZE; curbufsize = CLBYTES * (i < residual ? base+1 : base); - /* this faults in the required physical pages */ vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); vm_map_simplify(buffer_map, curbuf); -#endif } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ -#if defined(UVM) - exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, - 16*NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); -#else exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, - 16*NCARGS, TRUE); -#endif + 16*NCARGS, TRUE); /* * Allocate a submap for physio */ -#if defined(UVM) - phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, - VM_PHYS_SIZE, 0, FALSE, NULL); -#else - phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, - VM_PHYS_SIZE, TRUE); -#endif + phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, + VM_PHYS_SIZE, TRUE); /* * Finally, allocate mbuf pool. Since mclrefcnt is an off-size @@ -481,13 +400,8 @@ cpu_startup() mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES, M_MBUF, M_NOWAIT); bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES); -#if defined(UVM) - mb_map = uvm_km_suballoc(kernel_map, (vaddr_t *)&mbutl, &maxaddr, - VM_MBUF_SIZE, VM_MAP_INTRSAFE, FALSE, NULL); -#else mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, - VM_MBUF_SIZE, FALSE); -#endif + VM_MBUF_SIZE, FALSE); /* * Initialize timeouts */ @@ -496,12 +410,7 @@ cpu_startup() #ifdef DEBUG pmapdebug = opmapdebug; #endif -#if defined(UVM) - printf("avail mem = %ld (%ld pages)\n", ptoa(uvmexp.free), uvmexp.free); -#else - printf("avail mem = %ld (%ld pages)\n", ptoa(cnt.v_free_count), - ptoa(cnt.v_free_count)/NBPG); -#endif + printf("avail mem = %d\n", ptoa(cnt.v_free_count)); printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); #ifdef MFS diff --git a/sys/arch/mvme68k/mvme68k/mem.c b/sys/arch/mvme68k/mvme68k/mem.c index 6f96bcc0a0e..0821f2a0645 100644 --- a/sys/arch/mvme68k/mvme68k/mem.c +++ b/sys/arch/mvme68k/mvme68k/mem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mem.c,v 1.10 2001/03/12 07:38:32 smurph Exp $ */ +/* $OpenBSD: mem.c,v 1.11 2001/04/05 20:39:39 deraadt Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -83,9 +83,6 @@ #include <machine/cpu.h> #include <vm/vm.h> -#if defined(UVM) -#include <uvm/uvm_extern.h> -#endif extern u_int lowram; static caddr_t devzeropage; @@ -164,11 +161,11 @@ mmrw(dev, uio, flags) } #endif - pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, - trunc_page(v), - uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, - TRUE, - uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE); + pmap_enter(pmap_kernel(), (vm_offset_t)vmmap, + trunc_page(v), + uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, + TRUE, + uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE); o = uio->uio_offset & PGOFSET; c = min(uio->uio_resid, (int)(NBPG - o)); @@ -181,15 +178,9 @@ mmrw(dev, uio, flags) case 1: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); -#if defined(UVM) - if (!uvm_kernacc((caddr_t)v, c, - uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) - return (EFAULT); -#else if (!kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); -#endif if (v < NBPG) return (EFAULT); error = uiomove((caddr_t)v, c, uio); diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c index c37d5f132f9..50141bc6b00 100644 --- a/sys/arch/mvme68k/mvme68k/pmap.c +++ b/sys/arch/mvme68k/mvme68k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.16 2001/03/12 07:38:32 smurph Exp $ */ +/* $OpenBSD: pmap.c,v 1.17 2001/04/05 20:39:40 deraadt Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -133,9 +133,6 @@ #include <vm/vm.h> #include <vm/vm_kern.h> #include <vm/vm_page.h> -#if defined(UVM) -#include <uvm/uvm.h> -#endif #include <machine/cpu.h> @@ -289,10 +286,6 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES; struct pmap kernel_pmap_store; vm_map_t st_map, pt_map; -#if defined(UVM) -struct vm_map pt_map_store; -#endif - vm_offset_t avail_start; /* PA of first available physical page */ vm_offset_t avail_end; /* PA of last available physical page */ @@ -437,9 +430,7 @@ pmap_init(phys_start, phys_end) char *attr; int bank; #endif -#if defined(UVM) - vm_map_entry_t *out_entry_list; -#endif + #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) @@ -453,30 +444,6 @@ pmap_init(phys_start, phys_end) * Now that kernel map has been allocated, we can mark as * unavailable regions which we have mapped in pmap_bootstrap(). */ -#if defined(UVM) - addr = (vm_offset_t) intiobase; - if (uvm_map(kernel_map, &addr, - m68k_ptob(iiomapsize+EIOMAPSIZE), - NULL, UVM_UNKNOWN_OFFSET, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, - UVM_INH_NONE, UVM_ADV_RANDOM, - UVM_FLAG_FIXED)) != KERN_SUCCESS) - goto bogons; - addr = (vm_offset_t) Sysmap; - if (uvm_map(kernel_map, &addr, M68K_MAX_PTSIZE, - NULL, UVM_UNKNOWN_OFFSET, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, - UVM_INH_NONE, UVM_ADV_RANDOM, - UVM_FLAG_FIXED)) != KERN_SUCCESS) { - /* - * If this fails, it is probably because the static - * portion of the kernel page table isn't big enough - * and we overran the page table map. - */ -bogons: - panic("pmap_init: bogons in the VM system!\n"); - } -#else addr = (vm_offset_t) intiobase; (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, &addr, m68k_ptob(iiomapsize+EIOMAPSIZE), FALSE); @@ -494,7 +461,7 @@ bogons: if (addr != (vm_offset_t)Sysmap) bogons: panic("pmap_init: bogons in the VM system!"); -#endif + #ifdef DEBUG if (pmapdebug & PDB_INIT) { printf("pmap_init: Sysseg %x, Sysmap %x, Sysptmap %x\n", @@ -518,11 +485,7 @@ bogons: s += page_cnt * sizeof(struct pv_entry); /* pv table */ s += page_cnt * sizeof(char); /* attribute table */ s = round_page(s); -#if defined(UVM) - addr = (vm_offset_t)uvm_km_zalloc(kernel_map, s); -#else - addr = (vm_offset_t)kmem_alloc(kernel_map, s); -#endif + addr = (vm_offset_t) kmem_alloc(kernel_map, s); Segtabzero = (st_entry_t *) addr; Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr); @@ -579,32 +542,17 @@ bogons: * Verify that space will be allocated in region for which * we already have kernel PT pages. */ -#if defined(UVM) - addr = 0; - rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, - UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, - UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); - if (rv != KERN_SUCCESS || (addr + s) >= (vm_offset_t)Sysmap) - panic("pmap_init: kernel PT too small"); - rv = uvm_unmap(kernel_map, addr, addr + s); - if (rv != KERN_SUCCESS) - panic("pmap_init: uvm_unmap failed"); -#else addr = 0; rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) panic("pmap_init: kernel PT too small"); -#endif + vm_map_remove(kernel_map, addr, addr + s); /* * Now allocate the space and link the pages together to * form the KPT free list. */ -#if defined(UVM) - addr = (vm_offset_t)uvm_km_zalloc(kernel_map, s); -#else - addr = (vm_offset_t)kmem_alloc(kernel_map, s); -#endif + addr = (vm_offset_t) kmem_alloc(kernel_map, s); s = ptoa(npages); addr2 = addr + s; kpt_pages = &((struct kpt_page *)addr2)[npages]; @@ -632,25 +580,6 @@ bogons: atop(s), addr, addr + s); #endif -#if defined(UVM) - /* - * Allocate the segment table map and the page table map. - */ - addr = M68K_PTBASE; - if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) { - s = M68K_PTMAXSIZE; - /* - * XXX We don't want to hang when we run out of - * page tables, so we lower maxproc so that fork() - * will fail instead. Note that root could still raise - * this value via sysctl(2). - */ - maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);; - } else - s = (maxproc * M68K_MAX_PTSIZE); - pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, VM_MAP_PAGEABLE, - TRUE, &pt_map_store); -#else /* * Allocate the segment table map */ @@ -684,7 +613,6 @@ bogons: rv = vm_map_submap(kernel_map, addr, addr2, pt_map); if (rv != KERN_SUCCESS) panic("pmap_init: cannot map range to pt_map"); -#endif #ifdef DEBUG if (pmapdebug & PDB_INIT) printf("pmap_init: pt_map [%x - %x)\n", addr, addr2); @@ -716,15 +644,9 @@ pmap_alloc_pv() int i; if (pv_nfree == 0) { -#if defined(UVM) - pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG); - if (pvp == 0) - panic("pmap_alloc_pv: uvm_km_zalloc() failed"); -#else pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG); if (pvp == 0) panic("pmap_alloc_pv: kmem_alloc() failed"); -#endif pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; for (i = NPVPPG - 2; i; i--, pv++) pv->pv_next = pv + 1; @@ -767,11 +689,7 @@ pmap_free_pv(pv) case NPVPPG: pv_nfree -= NPVPPG - 1; TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); -#if defined(UVM) - uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG); -#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); -#endif break; } } @@ -829,11 +747,7 @@ pmap_collect_pv() for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { npvp = pvp->pvp_pgi.pgi_list.tqe_next; -#if defined(UVM) - uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG); -#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); -#endif } } @@ -982,21 +896,13 @@ pmap_release(pmap) if (pmap->pm_count != 1) panic("pmap_release count"); #endif -#if defined(UVM) - if (pmap->pm_ptab) - uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, - M68K_MAX_PTSIZE); - if (pmap->pm_stab != Segtabzero) - uvm_km_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab, - M68K_STSIZE); -#else + if (pmap->pm_ptab) kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, M68K_MAX_PTSIZE); if (pmap->pm_stab != Segtabzero) - kmem_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab, + kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab, M68K_STSIZE); -#endif } /* @@ -1301,11 +1207,7 @@ pmap_enter(pmap, va, pa, prot, wired, access_type) */ if (pmap->pm_ptab == NULL) pmap->pm_ptab = (pt_entry_t *) -#if defined(UVM) - uvm_km_valloc_wait(pt_map, M68K_MAX_PTSIZE); -#else kmem_alloc_wait(pt_map, M68K_MAX_PTSIZE); -#endif /* * Segment table entry not valid, we need a new PT page @@ -1384,13 +1286,8 @@ pmap_enter(pmap, va, pa, prot, wired, access_type) * is a valid mapping in the page. */ if (pmap != pmap_kernel()) -#if defined(UVM) - (void) uvm_map_pageable(pt_map, trunc_page(pte), - round_page(pte+1), FALSE); -#else (void) vm_map_pageable(pt_map, trunc_page(pte), round_page(pte+1), FALSE); -#endif /* * Enter on the PV list if part of our managed memory @@ -2127,13 +2024,8 @@ pmap_remove_mapping(pmap, va, pte, flags) * PT page. */ if (pmap != pmap_kernel()) { -#if defined(UVM) - (void) uvm_map_pageable(pt_map, trunc_page(pte), - round_page(pte+1), TRUE); -#else (void) vm_map_pageable(pt_map, trunc_page(pte), round_page(pte+1), TRUE); -#endif #ifdef DEBUG if (pmapdebug & PDB_WIRING) pmap_check_wiring("remove", trunc_page(pte)); @@ -2234,15 +2126,9 @@ pmap_remove_mapping(pmap, va, pte, flags) printf("remove: free stab %x\n", ptpmap->pm_stab); #endif -#if defined(UVM) - uvm_km_free_wakeup(st_map, - (vm_offset_t)ptpmap->pm_stab, - M68K_STSIZE); -#else kmem_free_wakeup(st_map, (vm_offset_t)ptpmap->pm_stab, M68K_STSIZE); -#endif ptpmap->pm_stab = Segtabzero; ptpmap->pm_stpa = Segtabzeropa; #if defined(M68040) || defined(M68060) @@ -2378,15 +2264,10 @@ pmap_changebit(pa, bit, setem) * XXX don't write protect pager mappings */ if (bit == PG_RO) { -#if defined(UVM) - if (va >= uvm.pager_sva && va < uvm.pager_eva) - continue; -#else extern vm_offset_t pager_sva, pager_eva; if (va >= pager_sva && va < pager_eva) continue; -#endif } pte = pmap_pte(pv->pv_pmap, va); @@ -2461,12 +2342,8 @@ pmap_enter_ptpage(pmap, va) * reference count drops to zero. */ if (pmap->pm_stab == Segtabzero) { - pmap->pm_stab = (st_entry_t *) -#if defined(UVM) - uvm_km_zalloc(st_map, M68K_STSIZE); -#else + pmap->pm_stab = (st_entry_t *) kmem_alloc(st_map, M68K_STSIZE); -#endif pmap->pm_stpa = (st_entry_t *) pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab); #if defined(M68040) || defined(M68060) @@ -2609,15 +2486,11 @@ pmap_enter_ptpage(pmap, va) if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) printf("enter: about to fault UPT pg at %x\n", va); #endif -#if defined(UVM) - if (uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE) - != KERN_SUCCESS) - panic("pmap_enter: uvm_fault failed"); -#else - if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE) - != KERN_SUCCESS) + s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE); + if (s != KERN_SUCCESS) { + printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s); panic("pmap_enter: vm_fault failed"); -#endif + } ptpa = pmap_extract(pmap_kernel(), va); /* * Mark the page clean now to avoid its pageout (and @@ -2625,10 +2498,8 @@ pmap_enter_ptpage(pmap, va) * is wired; i.e. while it is on a paging queue. */ PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN; -#if !defined(UVM) #ifdef DEBUG - PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE; -#endif + PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE; #endif } #if defined(M68040) || defined(M68060) @@ -2763,17 +2634,11 @@ pmap_check_wiring(str, va) if (!pmap_ste_v(pmap_kernel(), va) || !pmap_pte_v(pmap_pte(pmap_kernel(), va))) return; -#if defined(UVM) - if (!uvm_map_lookup_entry(pt_map, va, &entry)) { - printf("wired_check: entry for %lx not found\n", va); - return; - } -#else + if (!vm_map_lookup_entry(pt_map, va, &entry)) { - printf("wired_check: entry for %lx not found\n", va); + printf("wired_check: entry for %x not found\n", va); return; } -#endif count = 0; for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++) if (*pte) diff --git a/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c b/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c index 1a8ce411869..3f0f296ba7b 100644 --- a/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c +++ b/sys/arch/mvme68k/mvme68k/pmap_bootstrap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap_bootstrap.c,v 1.6 2001/03/12 07:38:32 smurph Exp $ */ +/* $OpenBSD: pmap_bootstrap.c,v 1.7 2001/04/05 20:39:40 deraadt Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -364,10 +364,10 @@ register vm_offset_t firstpa; * iiomapsize pages prior to external IO space at end of static * kernel page table. */ - RELOC(intiobase, char *) = - (char *)m68k_ptob(nptpages*NPTEPG - (RELOC(iiomapsize, int)+EIOMAPSIZE)); - RELOC(intiolimit, char *) = - (char *)m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE); + RELOC(intiobase, char *) = (char *) + m68k_ptob(nptpages*NPTEPG - (RELOC(iiomapsize, int)+EIOMAPSIZE)); + RELOC(intiolimit, char *) = (char *) + m68k_ptob(nptpages*NPTEPG - EIOMAPSIZE); /* * extiobase: base of external (DIO-II) IO space. * EIOMAPSIZE pages at the end of the static kernel page table. diff --git a/sys/arch/mvme68k/mvme68k/trap.c b/sys/arch/mvme68k/mvme68k/trap.c index 62d58f3ebdf..41baa261208 100644 --- a/sys/arch/mvme68k/mvme68k/trap.c +++ b/sys/arch/mvme68k/mvme68k/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.26 2001/03/12 07:38:32 smurph Exp $ */ +/* $OpenBSD: trap.c,v 1.27 2001/04/05 20:39:40 deraadt Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -98,9 +98,6 @@ extern struct emul emul_sunos; #include <vm/vm.h> #include <vm/pmap.h> -#if defined(UVM) -#include <uvm/uvm_extern.h> -#endif #ifdef COMPAT_HPUX #include <compat/hpux/hpux.h> @@ -281,11 +278,7 @@ struct frame frame; #endif register union sigval sv; -#if defined(UVM) - uvmexp.traps++; -#else cnt.v_trap++; -#endif p = curproc; ucode = 0; if (USERMODE(frame.f_sr)) { @@ -525,11 +518,7 @@ copyfault: while (bit = ffs(ssir)) { --bit; ssir &= ~(1 << bit); -#if defined(UVM) - uvmexp.softs++; -#else cnt.v_soft++; -#endif if (sir_routines[bit]) sir_routines[bit](sir_args[bit]); } @@ -607,30 +596,17 @@ copyfault: rv = pmap_mapmulti(map->pmap, va); if (rv != KERN_SUCCESS) { bva = HPMMBASEADDR(va); -#if defined(UVM) - rv = uvm_fault(map, bva, 0, ftype); -#else rv = vm_fault(map, bva, ftype, FALSE); -#endif if (rv == KERN_SUCCESS) (void) pmap_mapmulti(map->pmap, va); } } else #endif -#if defined(UVM) - rv = uvm_fault(map, va, 0, ftype); -#else - rv = vm_fault(map, va, ftype, FALSE); -#endif + rv = vm_fault(map, va, ftype, FALSE); #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) -#if defined(UVM) - printf("uvm_fault(%x, %x, 0, %x) -> %x\n", - map, va, ftype, rv); -#else printf("vm_fault(%x, %x, %x, 0) -> %x\n", map, va, ftype, rv); -#endif #endif /* * If this was a stack access we keep track of the maximum @@ -661,14 +637,9 @@ copyfault: } if (type == T_MMUFLT) { if (p && p->p_addr->u_pcb.pcb_onfault) - goto copyfault; -#if defined(UVM) - printf("uvm_fault(%x, %x, 0, %x) -> %x\n", - map, va, ftype, rv); -#else + goto copyfault; printf("vm_fault(%x, %x, %x, 0) -> %x\n", map, va, ftype, rv); -#endif printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; @@ -1006,12 +977,8 @@ struct frame frame; #ifdef COMPAT_SUNOS extern struct emul emul_sunos; #endif -#if defined(UVM) - uvmexp.syscalls++; -#else + cnt.v_syscall++; -#endif - if (!USERMODE(frame.f_sr)) panic("syscall"); p = curproc; diff --git a/sys/arch/mvme68k/mvme68k/vm_machdep.c b/sys/arch/mvme68k/mvme68k/vm_machdep.c index 9b05657c5cc..61a8878fc31 100644 --- a/sys/arch/mvme68k/mvme68k/vm_machdep.c +++ b/sys/arch/mvme68k/mvme68k/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.19 2001/03/12 07:38:33 smurph Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.20 2001/04/05 20:39:41 deraadt Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -57,9 +57,6 @@ #include <vm/vm.h> #include <vm/vm_kern.h> -#if defined(UVM) -#include <uvm/uvm_extern.h> -#endif /* * Finish a fork operation, with process p2 nearly set up. @@ -95,13 +92,13 @@ cpu_fork(p1, p2, stack, stacksize) p2->p_md.md_regs = (int *)tf; *tf = *(struct trapframe *)p1->p_md.md_regs; - /* + /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_regs[15] = (u_int)stack + stacksize; - sf = (struct switchframe *)tf - 1; + sf = (struct switchframe *)tf - 1; sf->sf_pc = (u_int)proc_trampoline; pcb->pcb_regs[6] = (int)child_return; /* A2 */ @@ -133,11 +130,7 @@ cpu_exit(p) { (void) splimp(); -#if defined(UVM) - uvmexp.swtch++; -#else cnt.v_swtch++; -#endif switch_exit(p); /* NOTREACHED */ } @@ -293,11 +286,7 @@ vmapbuf(bp, siz) off = (int)addr & PGOFSET; p = bp->b_proc; npf = btoc(round_page(bp->b_bcount + off)); -#if defined(UVM) - kva = uvm_km_valloc_wait(phys_map, ctob(npf)); -#else kva = kmem_alloc_wait(phys_map, ctob(npf)); -#endif bp->b_data = (caddr_t)(kva + off); while (npf--) { pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), @@ -328,11 +317,7 @@ vunmapbuf(bp, siz) addr = bp->b_data; npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); kva = (vm_offset_t)((int)addr & ~PGOFSET); -#if defined(UVM) - uvm_km_free_wakeup(phys_map, kva, ctob(npf)); -#else kmem_free_wakeup(phys_map, kva, ctob(npf)); -#endif bp->b_data = bp->b_saveaddr; bp->b_saveaddr = NULL; } |