diff options
author | Steve Murphree <smurph@cvs.openbsd.org> | 2001-01-12 07:29:28 +0000 |
---|---|---|
committer | Steve Murphree <smurph@cvs.openbsd.org> | 2001-01-12 07:29:28 +0000 |
commit | 0764426082e0e2480c3365cf453b99287858662c (patch) | |
tree | baac8a0d30bf0e8e7e9674104178fc580472e62b | |
parent | 36a7fbd69a94ac66a8f93f5d4471c4213500a12c (diff) |
Update vm interface to MACHIN_NEW_NONCONTIG. Fix compile warning in pcctwo.c
-rw-r--r-- | sys/arch/mvme88k/dev/pcctwo.c | 4 | ||||
-rw-r--r-- | sys/arch/mvme88k/include/pcb.h | 68 | ||||
-rw-r--r-- | sys/arch/mvme88k/include/pmap.h | 24 | ||||
-rw-r--r-- | sys/arch/mvme88k/include/reg.h | 86 | ||||
-rw-r--r-- | sys/arch/mvme88k/include/vmparam.h | 37 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/genassym.c | 21 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/m18x_cmmu.c | 3 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/machdep.c | 40 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/pmap.c | 458 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/process.S | 34 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/vm_machdep.c | 6 |
11 files changed, 506 insertions, 275 deletions
diff --git a/sys/arch/mvme88k/dev/pcctwo.c b/sys/arch/mvme88k/dev/pcctwo.c index abf812af171..e91514f62a9 100644 --- a/sys/arch/mvme88k/dev/pcctwo.c +++ b/sys/arch/mvme88k/dev/pcctwo.c @@ -1,5 +1,5 @@ -/* $OpenBSD: pcctwo.c,v 1.8 2000/03/26 23:32:00 deraadt Exp $ */ +/* $OpenBSD: pcctwo.c,v 1.9 2001/01/12 07:29:27 smurph Exp $ */ /* * Copyright (c) 1995 Theo de Raadt @@ -141,7 +141,7 @@ pcctwo_scan(parent, child, args) bzero(&oca, sizeof oca); oca.ca_offset = cf->cf_loc[0]; oca.ca_ipl = cf->cf_loc[1]; - if ((oca.ca_offset != (void*)-1) && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) { + if (((int)oca.ca_offset != -1) && ISIIOVA(sc->sc_vaddr + oca.ca_offset)) { oca.ca_vaddr = sc->sc_vaddr + oca.ca_offset; oca.ca_paddr = sc->sc_paddr + oca.ca_offset; } else { diff --git a/sys/arch/mvme88k/include/pcb.h b/sys/arch/mvme88k/include/pcb.h index 3823e772504..26c20af5101 100644 --- a/sys/arch/mvme88k/include/pcb.h +++ b/sys/arch/mvme88k/include/pcb.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pcb.h,v 1.6 2000/12/28 21:21:24 smurph Exp $ */ +/* $OpenBSD: pcb.h,v 1.7 2001/01/12 07:29:27 smurph Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * Mach Operating System @@ -31,8 +31,10 @@ */ /* */ -#ifndef _PCB_H_ -#define _PCB_H_ +#ifndef _M88K_PCB_H_ +#define _M88K_PCB_H_ + +#include <machine/reg.h> /* * Our PCB is the regular PCB+Save area for kernel frame. @@ -75,63 +77,7 @@ struct m88100_pcb { unsigned pcb_sp; /* kernel stack pointer */ }; - -/* - * m88100_saved_state this structure corresponds to the state - * of the user registers as saved on the - * stack upon kernel entry. This structure - * is used internally only. Since this - * structure may change from version to - * version, it is hidden from the user. - */ - -/* This must always be an even number of words long */ - -struct m88100_saved_state { - unsigned r[32]; /* 0 - 31 */ -#define tf_sp r[31] - unsigned epsr; /* 32 */ - unsigned fpsr; - unsigned fpcr; -#define exip sxip - unsigned sxip; -#define enip snip - unsigned snip; - unsigned sfip; - unsigned ssbr; - unsigned dmt0; - unsigned dmd0; - unsigned dma0; - unsigned dmt1; - unsigned dmd1; - unsigned dma1; - unsigned dmt2; - unsigned dmd2; - unsigned dma2; - unsigned fpecr; - unsigned fphs1; - unsigned fpls1; - unsigned fphs2; - unsigned fpls2; - unsigned fppt; - unsigned fprh; - unsigned fprl; - unsigned fpit; - unsigned vector; /* exception vector number */ - unsigned mask; /* interrupt mask level */ - unsigned mode; /* interrupt mode */ - unsigned scratch1; /* used by locore trap handling code */ - unsigned ipfsr; /* P BUS status - used in inst fault handling */ - unsigned dpfsr; /* P BUS status - used in data fault handling */ - unsigned dsr; /* MVME197 */ - unsigned dlar; /* MVME197 */ - unsigned dpar; /* MVME197 */ - unsigned isr; /* MVME197 */ - unsigned ilar; /* MVME197 */ - unsigned ipar; /* MVME197 */ - unsigned pad; /* alignment */ -}; - +#define m88100_saved_state reg #define trapframe m88100_saved_state struct pcb @@ -159,4 +105,4 @@ struct md_coredump { struct trapframe md_tf; }; -#endif _PCB_H_ +#endif _M88K_PCB_H_ diff --git a/sys/arch/mvme88k/include/pmap.h b/sys/arch/mvme88k/include/pmap.h index 16b92109900..66d70af13b1 100644 --- a/sys/arch/mvme88k/include/pmap.h +++ b/sys/arch/mvme88k/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.9 1999/09/27 20:46:19 smurph Exp $ */ +/* $OpenBSD: pmap.h,v 1.10 2001/01/12 07:29:27 smurph Exp $ */ /* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University @@ -51,12 +51,24 @@ struct pmap { }; -#include <vm/vm.h> - #define PMAP_NULL ((pmap_t) 0) - extern pmap_t kernel_pmap; +/* The PV (Physical to virtual) List. + * + * For each vm_page_t, pmap keeps a list of all currently valid virtual + * mappings of that page. An entry is a pv_entry_t; the list is the + * pv_head_table. This is used by things like pmap_remove, when we must + * find and remove all mappings for a particular physical page. + */ +typedef struct pv_entry { + struct pv_entry *next; /* next pv_entry */ + pmap_t pmap; /* pmap where mapping lies */ + vm_offset_t va; /* virtual address for mapping */ +} *pv_entry_t; + +#include <vm/vm.h> + #define PMAP_ACTIVATE(pmap, th, my_cpu) _pmap_activate(pmap, th, my_cpu) #define PMAP_DEACTIVATE(pmap, th, my_cpu) _pmap_deactivate(pmap, th, my_cpu) @@ -94,8 +106,8 @@ extern pmap_t kernel_pmap; void _pmap_activate(pmap_t pmap, pcb_t, int my_cpu); void _pmap_deactivate(pmap_t pmap, pcb_t, int my_cpu); -void pmap_activate(pmap_t my_pmap, pcb_t, int cpu); -void pmap_deactivate(pmap_t pmap, pcb_t, int cpu); +void pmap_activate(struct proc *p); +void pmap_deactivate(struct proc *p); int pmap_check_transaction(pmap_t pmap, vm_offset_t va, vm_prot_t type); vm_offset_t pmap_map( diff --git a/sys/arch/mvme88k/include/reg.h b/sys/arch/mvme88k/include/reg.h index 59827e812cb..00c3ae9060f 100644 --- a/sys/arch/mvme88k/include/reg.h +++ b/sys/arch/mvme88k/include/reg.h @@ -1,4 +1,4 @@ -/* $OpenBSD: reg.h,v 1.5 1999/09/27 20:46:19 smurph Exp $ */ +/* $OpenBSD: reg.h,v 1.6 2001/01/12 07:29:27 smurph Exp $ */ /* * Copyright (c) 1999 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -30,46 +30,56 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ -#include <machine/pcb.h> +#ifndef _M88K_REG_H_ +#define _M88K_REG_H_ -#define reg m88100_saved_state #define r_ -#if 0 +/* This must always be an even number of words long */ struct reg { - unsigned r_r[32]; - unsigned r_fpsr; - unsigned r_fpcr; - unsigned r_epsr; - unsigned r_sxip; - unsigned r_snip; - unsigned r_sfip; - unsigned r_ssbr; - unsigned r_dmt0; - unsigned r_dmd0; - unsigned r_dma0; - unsigned r_dmt1; - unsigned r_dmd1; - unsigned r_dma1; - unsigned r_dmt2; - unsigned r_dmd2; - unsigned r_dma2; - unsigned r_fpecr; - unsigned r_fphs1; - unsigned r_fpls1; - unsigned r_fphs2; - unsigned r_fpls2; - unsigned r_fppt; - unsigned r_fprh; - unsigned r_fprl; - unsigned r_fpit; - unsigned r_vector; /* exception vector number */ - unsigned r_mask; /* interrupt mask level */ - unsigned r_mode; /* interrupt mode */ - unsigned r_scratch1; /* used by locore trap handling code */ - unsigned r_pad; /* to make an even length */ -} ; -#endif + unsigned r[32]; /* 0 - 31 */ +#define tf_sp r[31] + unsigned epsr; /* 32 */ + unsigned fpsr; + unsigned fpcr; + unsigned sxip; +#define exip sxip + unsigned snip; +#define enip snip + unsigned sfip; + unsigned ssbr; + unsigned dmt0; + unsigned dmd0; + unsigned dma0; + unsigned dmt1; + unsigned dmd1; + unsigned dma1; + unsigned dmt2; + unsigned dmd2; + unsigned dma2; + unsigned fpecr; + unsigned fphs1; + unsigned fpls1; + unsigned fphs2; + unsigned fpls2; + unsigned fppt; + unsigned fprh; + unsigned fprl; + unsigned fpit; + unsigned vector; /* exception vector number */ + unsigned mask; /* interrupt mask level */ + unsigned mode; /* interrupt mode */ + unsigned scratch1; /* used by locore trap handling code */ + unsigned ipfsr; /* P BUS status - used in inst fault handling */ + unsigned dpfsr; /* P BUS status - used in data fault handling */ + unsigned dsr; /* MVME197 */ + unsigned dlar; /* MVME197 */ + unsigned dpar; /* MVME197 */ + unsigned isr; /* MVME197 */ + unsigned ilar; /* MVME197 */ + unsigned ipar; /* MVME197 */ + unsigned pad; /* alignment */ +}; struct fpreg { unsigned fp_fpecr; @@ -82,3 +92,5 @@ struct fpreg { unsigned fp_fprl; unsigned fp_fpit; }; + +#endif /* _M88K_REG_H_ */ diff --git a/sys/arch/mvme88k/include/vmparam.h b/sys/arch/mvme88k/include/vmparam.h index 8823aaa8a62..1e10d3be943 100644 --- a/sys/arch/mvme88k/include/vmparam.h +++ b/sys/arch/mvme88k/include/vmparam.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmparam.h,v 1.6 1999/09/27 20:46:19 smurph Exp $ */ +/* $OpenBSD: vmparam.h,v 1.7 2001/01/12 07:29:27 smurph Exp $ */ /* * Mach Operating System * Copyright (c) 1992 Carnegie Mellon University @@ -68,6 +68,14 @@ #endif /* + * PTEs for mapping user space into the kernel for phyio operations. + * One page is enough to handle 4Mb of simultaneous raw IO operations. + */ +#ifndef USRIOSIZE +#define USRIOSIZE (1 * NPTEPG) /* 4mb */ +#endif + +/* * External IO space map size. */ #ifndef EIOMAPSIZE @@ -168,6 +176,7 @@ /* virtual sizes (bytes) for various kernel submaps */ #define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES) #define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES) +#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES) /* * Conversion between MACHINE pages and VM pages @@ -177,6 +186,32 @@ #define round_m88k_to_vm(p) (atop(round_page(m88k_ptob(p)))) #define vm_to_m88k(p) (m88k_btop(ptoa(p))) +/* Use new VM page bootstrap interface. */ +#define MACHINE_NEW_NONCONTIG + +#if defined(MACHINE_NEW_NONCONTIG) +/* + * Constants which control the way the VM system deals with memory segments. + * The hp300 only has one physical memory segment. + */ +#define VM_PHYSSEG_MAX 1 +#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH +#define VM_PHYSSEG_NOADD + +#define VM_NFREELIST 1 +#define VM_FREELIST_DEFAULT 0 + +/* + * pmap-specific data stored in the vm_physmem[] array. + */ +struct pmap_physseg { + struct pv_entry *pvent; /* pv table for this seg */ + char *attrs; /* page modify list for this seg */ + struct simplelock *plock; /* page lock for this seg */ +}; +#endif /* MACHINE_NEW_NONCONTIG */ + + #if 1 /*Do we really need all this stuff*/ #if 1 /*Do we really need all this stuff*/ #if 1 /*Do we really need all this stuff*/ diff --git a/sys/arch/mvme88k/mvme88k/genassym.c b/sys/arch/mvme88k/mvme88k/genassym.c index 8090849ed48..0f09828b1dd 100644 --- a/sys/arch/mvme88k/mvme88k/genassym.c +++ b/sys/arch/mvme88k/mvme88k/genassym.c @@ -1,4 +1,4 @@ -/* $OpenBSD: genassym.c,v 1.5 1999/09/27 19:13:22 smurph Exp $ */ +/* $OpenBSD: genassym.c,v 1.6 2001/01/12 07:29:26 smurph Exp $ */ /* * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. @@ -32,7 +32,7 @@ * SUCH DAMAGE. * * @(#)genassym.c 7.8 (Berkeley) 5/7/91 - * $Id: genassym.c,v 1.5 1999/09/27 19:13:22 smurph Exp $ + * $Id: genassym.c,v 1.6 2001/01/12 07:29:26 smurph Exp $ */ #ifndef KERNEL @@ -51,6 +51,9 @@ #include <machine/vmparam.h> #include <sys/syscall.h> #include <vm/vm.h> +#ifdef UVM +#include <uvm/uvm_extern.h> +#endif #include <sys/user.h> #define pair(TOKEN, ELEMENT) \ @@ -62,7 +65,11 @@ main() { register struct proc *p = (struct proc *)0; struct m88100_saved_state *ss = (struct m88100_saved_state *) 0; +#ifdef UVM + register struct uvmexp *uvm = (struct uvmexp *)0; +#else register struct vmmeter *vm = (struct vmmeter *)0; +#endif register struct user *up = (struct user *)0; register struct rusage *rup = (struct rusage *)0; struct vmspace *vms = (struct vmspace *)0; @@ -83,10 +90,16 @@ main() printf("#define\tP_STAT %d\n", &p->p_stat); printf("#define\tP_WCHAN %d\n", &p->p_wchan); printf("#define\tSRUN %d\n", SRUN); - +#if 1 printf("#define\tVM_PMAP %d\n", &vms->vm_pmap); +#else + printf("#define\tVM_PMAP %d\n", &vms->vm_map.pmap); +#endif +#ifdef UVM + printf("#define\tUVMEXP_INTRS %d\n", &uvm->intrs); +#else printf("#define\tV_INTR %d\n", &vm->v_intr); - +#endif printf("#define\tUPAGES %d\n", UPAGES); printf("#define\tPGSHIFT %d\n", PGSHIFT); printf("#define\tUSIZE %d\n", USPACE); diff --git a/sys/arch/mvme88k/mvme88k/m18x_cmmu.c b/sys/arch/mvme88k/mvme88k/m18x_cmmu.c index 861946d9acf..b55fe7027f8 100644 --- a/sys/arch/mvme88k/mvme88k/m18x_cmmu.c +++ b/sys/arch/mvme88k/mvme88k/m18x_cmmu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: m18x_cmmu.c,v 1.3 2000/12/28 21:21:24 smurph Exp $ */ +/* $OpenBSD: m18x_cmmu.c,v 1.4 2001/01/12 07:29:26 smurph Exp $ */ /* * Copyright (c) 1998 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -61,6 +61,7 @@ #include <sys/param.h> #include <sys/types.h> #include <sys/simplelock.h> +#include <machine/asm_macro.h> #include <machine/board.h> #include <machine/cpus.h> #include <machine/cpu_number.h> diff --git a/sys/arch/mvme88k/mvme88k/machdep.c b/sys/arch/mvme88k/mvme88k/machdep.c index 88f5822d696..11558c41313 100644 --- a/sys/arch/mvme88k/mvme88k/machdep.c +++ b/sys/arch/mvme88k/mvme88k/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.21 2000/12/28 21:21:24 smurph Exp $ */ +/* $OpenBSD: machdep.c,v 1.22 2001/01/12 07:29:26 smurph Exp $ */ /* * Copyright (c) 1998, 1999 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur @@ -166,6 +166,14 @@ int BugWorks = 0; */ int safepri = 0; +#if defined(UVM) +vm_map_t exec_map = NULL; +vm_map_t mb_map = NULL; +vm_map_t phys_map = NULL; +#else +vm_map_t buffer_map; +#endif + /* * iomap stuff is for managing chunks of virtual address space that * can be allocated to IO devices. @@ -612,9 +620,9 @@ cpu_startup() /* * Allocate map for physio. */ - - phys_map = vm_map_create(kernel_pmap, PHYSIO_MAP_START, - PHYSIO_MAP_START + PHYSIO_MAP_SIZE, TRUE); + phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, + TRUE); + if (phys_map == NULL) { panic("cpu_startup: unable to create phys_map"); } @@ -629,12 +637,13 @@ cpu_startup() * defined, as it checks (long)addr < 0. So as a workaround, I use * 0x10000000 as a base address. XXX smurph */ - - iomap_map = vm_map_create(kernel_pmap, (u_long)0x10000000, - (u_long)0x10000000 + IOMAP_SIZE, TRUE); + iomap_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, IOMAP_SIZE, + TRUE); + if (iomap_map == NULL) { panic("cpu_startup: unable to create iomap_map"); } + iomapbase = (void *)kmem_alloc_wait(iomap_map, IOMAP_SIZE); rminit(iomap, IOMAP_SIZE, (u_long)iomapbase, "iomap", NIOPMAP); @@ -1085,9 +1094,10 @@ boot(howto) register int howto; { /* take a snap shot before clobbering any registers */ +#if 0 if (curproc) savectx(curproc->p_addr->u_pcb); - +#endif boothowto = howto; if ((howto & RB_NOSYNC) == 0 && waittime < 0) { extern struct proc proc0; @@ -2158,6 +2168,20 @@ mvme_bootstrap(void) &avail_start, &avail_end, &virtual_avail, &virtual_end); +#if defined(MACHINE_NEW_NONCONTIG) + /* + * Tell the VM system about available physical memory. + * mvme88k only has one segment. + */ +#if defined(UVM) + uvm_page_physload(atop(avail_start), atop(avail_end), + atop(avail_start), atop(avail_end),VM_FREELIST_DEFAULT); +#else + vm_page_physload(atop(avail_start), atop(avail_end), + atop(avail_start), atop(avail_end)); +#endif /* UVM */ +#endif /* MACHINE_NEW_NONCONTIG */ + /* * Must initialize p_addr before autoconfig or * the fault handler will get a NULL reference. diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c index 67acb97c46d..1b824ce0c0d 100644 --- a/sys/arch/mvme88k/mvme88k/pmap.c +++ b/sys/arch/mvme88k/mvme88k/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.15 2000/12/28 21:21:24 smurph Exp $ */ +/* $OpenBSD: pmap.c,v 1.16 2001/01/12 07:29:26 smurph Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * All rights reserved. @@ -61,6 +61,7 @@ #include <sys/proc.h> #include <sys/malloc.h> #include <sys/msgbuf.h> +#include <sys/user.h> #include <machine/assert.h> #include <machine/cpu_number.h> #include <machine/pmap_table.h> @@ -189,32 +190,76 @@ int ptes_per_vm_page; /* no. of ptes required to map one VM page */ */ char *pmap_modify_list; - -/* The PV (Physical to virtual) List. - * - * For each vm_page_t, pmap keeps a list of all currently valid virtual - * mappings of that page. An entry is a pv_entry_t; the list is the - * pv_head_table. This is used by things like pmap_remove, when we must - * find and remove all mappings for a particular physical page. - */ -typedef struct pv_entry { - struct pv_entry *next; /* next pv_entry */ - pmap_t pmap; /* pmap where mapping lies */ - vm_offset_t va; /* virtual address for mapping */ -} *pv_entry_t; - #define PV_ENTRY_NULL ((pv_entry_t) 0) static struct simplelock *pv_lock_table; /* array */ static pv_entry_t pv_head_table; /* array of entries, one per page */ + +#if !defined(MACHINE_NEW_NONCONTIG) +/* + * First and last physical address that we maintain any information + * for. Initialized to zero so that pmap operations done before + * pmap_init won't touch any non-existent structures. + */ +static vm_offset_t pmap_phys_start = (vm_offset_t) 0; +static vm_offset_t pmap_phys_end = (vm_offset_t) 0; + /* * Index into pv_head table, its lock bits, and the modify bits * starting at pmap_phys_start. */ #define PFIDX(pa) (atop(pa - pmap_phys_start)) #define PFIDX_TO_PVH(pfidx) (&pv_head_table[pfidx]) +#define PA_TO_PVH(pa) (&pv_head_table[PFIDX(pa)]) +#define PMAP_MANAGED(pa) (pmap_initialized && \ + ((pa) >= pmap_phys_start && (pa) < pmap_phys_end)) +#define LOCK_PVH(pa) simple_lock(&(pv_lock_table[PFIDX(pa)])) +#define UNLOCK_PVH(pa) simple_unlock(&(pv_lock_table[PFIDX(pa)])) +#define PA_TO_ATTRIB(pa) (pmap_modify_list[PFIDX(pa)]) +#define SET_ATTRIB(pa, attr) (pmap_modify_list[PFIDX(pa)] = (attr)) + +#else +#define PMAP_MANAGED(pa) (pmap_initialized && \ + vm_physseg_find(atop((pa)), NULL) != -1) + +#define PA_TO_PVH(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + &vm_physmem[bank_].pmseg.pvent[pg_]; \ +}) +#define LOCK_PVH(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + simple_lock(&vm_physmem[bank_].pmseg.plock[pg_]); \ +}) +#define UNLOCK_PVH(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + simple_unlock(&vm_physmem[bank_].pmseg.plock[pg_]); \ +}) +#define PA_TO_ATTRIB(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + vm_physmem[bank_].pmseg.attrs[pg_]; \ +}) +#define SET_ATTRIB(pa, attr) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + vm_physmem[bank_].pmseg.attrs[pg_] = (attr); \ +}) +#endif /* * Locking and TLB invalidation primitives @@ -274,25 +319,13 @@ static pv_entry_t pv_head_table; /* array of entries, one per page */ } #define PV_LOCK_TABLE_SIZE(n) ((vm_size_t)((n) * sizeof(struct simplelock))) -#define LOCK_PVH(index) simple_lock(&(pv_lock_table[index])) -#define UNLOCK_PVH(index) simple_unlock(&(pv_lock_table[index])) +#define PV_TABLE_SIZE(n) ((vm_size_t)((n) * sizeof(struct pv_entry))) #define ETHERPAGES 16 void *etherbuf=NULL; int etherlen; /* - * First and last physical address that we maintain any information - * for. Initialized to zero so that pmap operations done before - * pmap_init won't touch any non-existent structures. - */ - -static vm_offset_t pmap_phys_start = (vm_offset_t) 0; -static vm_offset_t pmap_phys_end = (vm_offset_t) 0; - -#define PMAP_MANAGED(pa) (pmap_initialized && ((pa) >= pmap_phys_start && (pa) < pmap_phys_end)) - -/* * This variable extract vax's pmap.c. * pmap_verify_free refer to this. * pmap_init initialize this. @@ -404,6 +437,7 @@ flush_atc_entry(long users, vm_offset_t va, int kernel) * _pmap_activate. * */ +#if 0 void _pmap_activate(pmap_t pmap, pcb_t pcb, int my_cpu) { @@ -469,6 +503,7 @@ _pmap_activate(pmap_t pmap, pcb_t pcb, int my_cpu) } } /* _pmap_activate */ +#endif /* * Routine: _PMAP_DEACTIVATE @@ -491,6 +526,7 @@ _pmap_activate(pmap_t pmap, pcb_t pcb, int my_cpu) * _pmap_deactivate. * */ +#if 0 void _pmap_deactivate(pmap_t pmap, pcb_t pcb, int my_cpu) { @@ -504,7 +540,7 @@ _pmap_deactivate(pmap_t pmap, pcb_t pcb, int my_cpu) simple_unlock(&pmap->lock); } } - +#endif /* * Author: Joe Uemura * Convert machine-independent protection code to M88K protection bits. @@ -1447,6 +1483,7 @@ pmap_bootstrap(vm_offset_t load_start, /* IN */ * memory will never be freed, and in essence it is wired down. */ +#if !defined(MACHINE_NEW_NONCONTIG) void * pmap_bootstrap_alloc(int size) { @@ -1467,6 +1504,7 @@ pmap_bootstrap_alloc(int size) bzero((void *)mem, size); return (mem); } +#endif /* !defined(MACHINE_NEW_NONCONTIG) */ /* * Routine: PMAP_INIT @@ -1510,6 +1548,89 @@ pmap_bootstrap_alloc(int size) * zinit(segment zone) * */ +#ifdef MACHINE_NEW_NONCONTIG +void +pmap_init(void) +{ + register long npages; + register vm_offset_t addr; + register vm_size_t s; + register int i; + struct pv_entry *pv; + char *attr; + struct simplelock *lock; + int bank; + +#ifdef DEBUG + if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM)) + printf("pmap_init()\n"); +#endif + + /* + * Allocate memory for the pv_head_table and its lock bits, + * the modify bit array, and the pte_page table. + */ + for (npages = 0, bank = 0; bank < vm_nphysseg; bank++) + npages += vm_physmem[bank].end - vm_physmem[bank].start; + + s = PV_TABLE_SIZE(npages); /* pv_list */ + s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */ + s += npages * sizeof(char); /* pmap_modify_list */ + +#ifdef DEBUG + if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) { + printf("(pmap_init) nbr of managed pages = %x\n", npages); + printf("(pmap_init) size of pv_list = %x\n", + npages * sizeof(struct pv_entry)); + } +#endif + + s = round_page(s); +#if defined(UVM) + addr = (vaddr_t)uvm_km_zalloc(kernel_map, s); +#else + addr = (vm_offset_t)kmem_alloc(kernel_map, s); +#endif + + pv_head_table = (pv_entry_t)addr; + addr += PV_TABLE_SIZE(npages); + + /* + * Assume that 'simple_lock' is used to lock pv_lock_table + */ + pv_lock_table = (struct simplelock *)addr; /* XXX */ + addr += PV_LOCK_TABLE_SIZE(npages); + + pmap_modify_list = (char *)addr; + + /* + * Initialize pv_lock_table + */ + for (i = 0; i < npages; i++) + simple_lock_init(&(pv_lock_table[i])); + + /* + * Now that the pv, attribute, and lock tables have been allocated, + * assign them to the memory segments. + */ + pv = pv_head_table; + lock = pv_lock_table; + attr = pmap_modify_list; + for (bank = 0; bank < vm_nphysseg; bank++) { + npages = vm_physmem[bank].end - vm_physmem[bank].start; + vm_physmem[bank].pmseg.pvent = pv; + vm_physmem[bank].pmseg.attrs = attr; + vm_physmem[bank].pmseg.plock = lock; + pv += npages; + lock += npages; + attr += npages; + } + + pmap_initialized = TRUE; + +} /* pmap_init() */ + +#else void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { @@ -1529,10 +1650,9 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) * the modify bit array, and the pte_page table. */ npages = atop(phys_end - phys_start); - pvl_table_size = PV_LOCK_TABLE_SIZE(npages); - s = (vm_size_t)(npages * sizeof(struct pv_entry) /* pv_list */ - + pvl_table_size /* pv_lock_table */ - + npages); /* pmap_modify_list */ + s = PV_TABLE_SIZE(npages); /* pv_list */ + s += PV_LOCK_TABLE_SIZE(npages); /* pv_lock_table */ + s += npages * sizeof(char); /* pmap_modify_list */ #ifdef DEBUG if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) { @@ -1546,13 +1666,13 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) addr = (vm_offset_t)kmem_alloc(kernel_map, s); pv_head_table = (pv_entry_t)addr; - addr = (vm_offset_t)(pv_head_table + npages); + addr += PV_TABLE_SIZE(npages); /* * Assume that 'simple_lock' is used to lock pv_lock_table */ pv_lock_table = (struct simplelock *)addr; /* XXX */ - addr = (vm_offset_t)pv_lock_table + pvl_table_size; + addr += PV_LOCK_TABLE_SIZE(npages); pmap_modify_list = (char *)addr; @@ -1575,7 +1695,7 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) pmap_initialized = TRUE; } /* pmap_init() */ - +#endif /* * Routine: PMAP_ZERO_PAGE @@ -1986,10 +2106,9 @@ pmap_reference(pmap_t p) * PDT_VALID * M88K_PTOB * PMAP_MANAGED - * PFIDX * LOCK_PVH * UNLOCK_PVH - * PFIDX_TO_PVH + * PA_TO_PVH * CHECK_PV_LIST * zfree * invalidate_pte @@ -2082,13 +2201,12 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e) pa = M88K_PTOB(pfn); if (PMAP_MANAGED(pa)) { - pfi = PFIDX(pa); - LOCK_PVH(pfi); + LOCK_PVH(pa); /* * Remove the mapping from the pvlist for * this physical page. */ - pvl = PFIDX_TO_PVH(pfi); + pvl = PA_TO_PVH(pa); CHECK_PV_LIST(pa, pvl, "pmap_remove_range before"); if (pvl->pmap == PMAP_NULL) @@ -2127,7 +2245,7 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e) } CHECK_PV_LIST(pa, pvl, "pmap_remove_range after"); - UNLOCK_PVH(pfi); + UNLOCK_PVH(pa); } /* if PAGE_MANAGED */ @@ -2153,7 +2271,7 @@ pmap_remove_range(pmap_t pmap, vm_offset_t s, vm_offset_t e) } /* keep track ourselves too */ if (PMAP_MANAGED(pa)) - pmap_modify_list[pfi] = 1; + SET_ATTRIB(pa, 1); } pte++; tva += M88K_PGBYTES; @@ -2234,8 +2352,7 @@ pmap_remove(pmap_t map, vm_offset_t s, vm_offset_t e) * Calls: * PMAP_MANAGED * SPLVM, SPLX - * PFIDX - * PFIDX_TO_PVH + * PA_TO_PVH * CHECK_PV_LIST * simple_lock * M88K_PTOB @@ -2292,10 +2409,9 @@ pmap_remove_all(vm_offset_t phys) */ remove_all_Retry: - pfi = PFIDX(phys); - pvl = PFIDX_TO_PVH(pfi); + pvl = PA_TO_PVH(phys); CHECK_PV_LIST(phys, pvl, "pmap_remove_all before"); - LOCK_PVH(pfi); + LOCK_PVH(phys); /* * Loop for each entry on the pv list @@ -2303,7 +2419,7 @@ pmap_remove_all(vm_offset_t phys) while ((pmap = pvl->pmap) != PMAP_NULL) { va = pvl->va; if (!simple_lock_try(&pmap->lock)) { - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); goto remove_all_Retry; } @@ -2355,7 +2471,7 @@ pmap_remove_all(vm_offset_t phys) if (opte.pte.modified) { vm_page_set_modified((vm_page_t)PHYS_TO_VM_PAGE(phys)); /* keep track ourselves too */ - pmap_modify_list[pfi] = 1; + SET_ATTRIB(phys, 1); } pte++; va += M88K_PGBYTES; @@ -2370,7 +2486,7 @@ pmap_remove_all(vm_offset_t phys) } CHECK_PV_LIST(phys, pvl, "pmap_remove_all after"); - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); } /* pmap_remove_all() */ @@ -2386,7 +2502,7 @@ pmap_remove_all(vm_offset_t phys) * * Calls: * SPLVM, SPLX - * PFIDX_TO_PVH + * PA_TO_PVH * CHECK_PV_LIST * simple_lock, simple_unlock * panic @@ -2422,10 +2538,10 @@ pmap_copy_on_write(vm_offset_t phys) SPLVM(spl); - copy_on_write_Retry: - pv_e = PFIDX_TO_PVH(PFIDX(phys)); +copy_on_write_Retry: + pv_e = PA_TO_PVH(phys); CHECK_PV_LIST(phys, pv_e, "pmap_copy_on_write before"); - LOCK_PVH(PFIDX(phys)); + LOCK_PVH(phys); if (pv_e->pmap == PMAP_NULL) { #ifdef DEBUG @@ -2433,7 +2549,7 @@ pmap_copy_on_write(vm_offset_t phys) printf("(pmap_copy_on_write :%x) phys addr 0x%x not mapped\n", curproc, phys); #endif - UNLOCK_PVH(PFIDX(phys)); + UNLOCK_PVH(phys); SPLX(spl); return; /* no mappings */ @@ -2452,7 +2568,7 @@ pmap_copy_on_write(vm_offset_t phys) va = pv_e->va; if (!simple_lock_try(&pmap->lock)) { - UNLOCK_PVH(PFIDX(phys)); + UNLOCK_PVH(phys); goto copy_on_write_Retry; } @@ -2498,9 +2614,9 @@ pmap_copy_on_write(vm_offset_t phys) simple_unlock(&pmap->lock); pv_e = pv_e->next; } - CHECK_PV_LIST(phys, PFIDX_TO_PVH(PFIDX(phys)), "pmap_copy_on_write"); + CHECK_PV_LIST(phys, PA_TO_PVH(phys), "pmap_copy_on_write"); - UNLOCK_PVH(PFIDX(phys)); + UNLOCK_PVH(phys); SPLX(spl); } /* pmap_copy_on_write */ @@ -3013,9 +3129,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, * Enter the mappimg in the PV list for this * physical page. */ - pfi = PFIDX(pa); - LOCK_PVH(pfi); - pvl = PFIDX_TO_PVH(pfi); + LOCK_PVH(pa); + pvl = PA_TO_PVH(pa); CHECK_PV_LIST (pa, pvl, "pmap_enter before"); if (pvl->pmap == PMAP_NULL) { @@ -3045,7 +3160,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, * Add new pv_entry after header. */ if (pv_e == PV_ENTRY_NULL) { - UNLOCK_PVH(pfi); + UNLOCK_PVH(pa); PMAP_UNLOCK(pmap, spl); pv_e = (pv_entry_t) malloc(sizeof *pv_e, M_VMPVENT, M_NOWAIT); @@ -3060,7 +3175,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, */ pv_e = PV_ENTRY_NULL; } - UNLOCK_PVH(pfi); + UNLOCK_PVH(pa); } /* @@ -3487,18 +3602,74 @@ pmap_collect(pmap_t pmap) * to the physical address of the segment descriptor table. * * Parameters: - * pmap pointer to pmap structure - * pcbp pointer to current pcb - * cpu CPU number + * p pointer to proc structure */ void -pmap_activate(pmap_t pmap, pcb_t pcb, int cpu) +pmap_activate(struct proc *p) { -#ifdef lint - my_cpu++; + apr_template_t apr_data; + int n; + pmap_t pmap = p->p_vmspace->vm_map.pmap; + int my_cpu = cpu_number(); + +#ifdef DEBUG + if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM)) + printf("(_pmap_activate :%x) pmap 0x%x\n", p, (unsigned)pmap); #endif - cpu = cpu_number(); /* hack to fix bogus cpu number */ - PMAP_ACTIVATE(pmap, pcb, cpu); + + if (pmap != kernel_pmap) { + /* + * Lock the pmap to put this cpu in its active set. + */ + simple_lock(&pmap->lock); + + apr_data.bits = 0; + apr_data.field.st_base = M88K_BTOP(pmap->sdt_paddr); + apr_data.field.wt = 0; + apr_data.field.g = 1; + apr_data.field.ci = 0; + apr_data.field.te = 1; +#ifdef notyet + #ifdef OMRON_PMAP + /* + * cmmu_pmap_activate will set the uapr and the batc entries, then + * flush the *USER* TLB. IF THE KERNEL WILL EVER CARE ABOUT THE + * BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE FLUSHED AS WELL. + */ + cmmu_pmap_activate(my_cpu, apr_data.bits, pmap->i_batc, pmap->d_batc); + for (n = 0; n < BATC_MAX; n++) + *(unsigned*)&batc_entry[n] = pmap->i_batc[n].bits; + #else + cmmu_set_uapr(apr_data.bits); + cmmu_flush_tlb(0, 0, -1); + #endif +#endif /* notyet */ + /* + * I am forcing it to not program the BATC at all. pmap.c module + * needs major, major cleanup. XXX nivas + */ + cmmu_set_uapr(apr_data.bits); + cmmu_flush_tlb(0, 0, -1); + + /* + * Mark that this cpu is using the pmap. + */ + SETBIT_CPUSET(my_cpu, &(pmap->cpus_using)); + + simple_unlock(&pmap->lock); + + } else { + + /* + * kernel_pmap must be always active. + */ + +#ifdef DEBUG + if ((pmap_con_dbg & (CD_ACTIVATE | CD_NORM)) == (CD_ACTIVATE | CD_NORM)) + printf("(_pmap_activate :%x) called for kernel_pmap\n", curproc); +#endif + + } } /* pmap_activate() */ @@ -3512,17 +3683,23 @@ pmap_activate(pmap_t pmap, pcb_t pcb, int cpu) * In a mono-processor the PMAP_DEACTIVATE macro is null. * * Parameters: - * pmap pointer to pmap structure - * pcb pointer to pcb - * cpu CPU number + * p pointer to proc structure */ void -pmap_deactivate(pmap_t pmap, pcb_t pcb,int cpu) +pmap_deactivate(struct proc *p) { -#ifdef lint - pmap++; th++; which_cpu++; -#endif - PMAP_DEACTIVATE(pmap, pcb, cpu); + pmap_t pmap = p->p_vmspace->vm_map.pmap; + int my_cpu = cpu_number(); + + if (pmap != kernel_pmap) { + + /* + * we expect the spl is already raised to sched level. + */ + simple_lock(&pmap->lock); + CLRBIT_CPUSET(my_cpu, &(pmap->cpus_using)); + simple_unlock(&pmap->lock); + } } /* pmap_deactivate() */ @@ -3862,8 +4039,7 @@ pmap_redzone(pmap_t pmap, vm_offset_t va) * Calls: * PMAP_MANAGED * SPLVM, SPLX - * PFIDX - * PFIDX_TO_PVH + * PA_TO_PVH * CHECK_PV_LIST * simple_lock, simple_unlock * pmap_pte @@ -3901,14 +4077,12 @@ pmap_clear_modify(vm_offset_t phys) SPLVM(spl); clear_modify_Retry: - pfi = PFIDX(phys); - pvl = PFIDX_TO_PVH(pfi); + pvl = PA_TO_PVH(phys); CHECK_PV_LIST (phys, pvl, "pmap_clear_modify"); - LOCK_PVH(pfi); - + LOCK_PVH(phys); /* update correspoinding pmap_modify_list element */ - pmap_modify_list[pfi] = 0; + SET_ATTRIB(phys, 0); if (pvl->pmap == PMAP_NULL) { #ifdef DEBUG @@ -3916,7 +4090,7 @@ pmap_clear_modify(vm_offset_t phys) printf("(pmap_clear_modify :%x) phys addr 0x%x not mapped\n", curproc, phys); #endif - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return; } @@ -3927,7 +4101,7 @@ pmap_clear_modify(vm_offset_t phys) pmap = pvep->pmap; va = pvep->va; if (!simple_lock_try(&pmap->lock)) { - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); goto clear_modify_Retry; } @@ -3964,7 +4138,7 @@ pmap_clear_modify(vm_offset_t phys) pvep = pvep->next; } - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); } /* pmap_clear_modify() */ @@ -3990,8 +4164,7 @@ pmap_clear_modify(vm_offset_t phys) * simple_lock, simple_unlock * SPLVM, SPLX * PMAP_MANAGED - * PFIDX - * PFIDX_TO_PVH + * PA_TO_PVH * pmap_pte * * If the physical address specified is not a managed page, this @@ -4028,12 +4201,11 @@ pmap_is_modified(vm_offset_t phys) SPLVM(spl); - pfi = PFIDX(phys); - pvl = PFIDX_TO_PVH(pfi); + pvl = PA_TO_PVH(phys); CHECK_PV_LIST (phys, pvl, "pmap_is_modified"); - is_mod_Retry: +is_mod_Retry: - if ((boolean_t) pmap_modify_list[pfi]) { + if ((boolean_t) PA_TO_ATTRIB(phys)) { /* we've already cached a modify flag for this page, no use looking further... */ #ifdef DBG @@ -4043,17 +4215,17 @@ pmap_is_modified(vm_offset_t phys) SPLX(spl); return (TRUE); } - LOCK_PVH(pfi); + LOCK_PVH(phys); if (pvl->pmap == PMAP_NULL) { /* unmapped page - get info from page_modified array maintained by pmap_remove_range/ pmap_remove_all */ - modified_flag = (boolean_t) pmap_modify_list[pfi]; + modified_flag = (boolean_t) PA_TO_ATTRIB(phys); #ifdef DBG if ((pmap_con_dbg & (CD_IMOD | CD_NORM)) == (CD_IMOD | CD_NORM)) printf("(pmap_is_modified :%x) phys addr 0x%x not mapped\n", curproc, phys); #endif - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return (modified_flag); } @@ -4062,7 +4234,7 @@ pmap_is_modified(vm_offset_t phys) pvep = pvl; while (pvep != PV_ENTRY_NULL) { if (!simple_lock_try(&pvep->pmap->lock)) { - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); goto is_mod_Retry; } @@ -4078,7 +4250,7 @@ pmap_is_modified(vm_offset_t phys) if ((pmap_con_dbg & (CD_IMOD | CD_FULL)) == (CD_IMOD | CD_FULL)) printf("(pmap_is_modified :%x) modified page pte@0x%x\n", curproc, (unsigned)ptep); #endif - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return (TRUE); } @@ -4089,7 +4261,7 @@ pmap_is_modified(vm_offset_t phys) pvep = pvep->next; } - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return (FALSE); @@ -4115,8 +4287,7 @@ pmap_is_modified(vm_offset_t phys) * Calls: * PMAP_MANAGED * SPLVM, SPLX - * PFIDX - * PFIDX_TO_PVH + * PA_TO_PVH * CHECK_PV_LIST * simple_lock * pmap_pte @@ -4157,9 +4328,8 @@ pmap_clear_reference(vm_offset_t phys) SPLVM(spl); clear_reference_Retry: - pfi = PFIDX(phys); - LOCK_PVH(pfi); - pvl = PFIDX_TO_PVH(pfi); + LOCK_PVH(phys); + pvl = PA_TO_PVH(phys); CHECK_PV_LIST(phys, pvl, "pmap_clear_reference"); @@ -4168,7 +4338,7 @@ pmap_clear_reference(vm_offset_t phys) if ((pmap_con_dbg & (CD_CREF | CD_NORM)) == (CD_CREF | CD_NORM)) printf("(pmap_clear_reference :%x) phys addr 0x%x not mapped\n", curproc,phys); #endif - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return; } @@ -4179,7 +4349,7 @@ pmap_clear_reference(vm_offset_t phys) pmap = pvep->pmap; va = pvep->va; if (!simple_lock_try(&pmap->lock)) { - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); goto clear_reference_Retry; } @@ -4216,7 +4386,7 @@ pmap_clear_reference(vm_offset_t phys) pvep = pvep->next; } - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); } /* pmap_clear_reference() */ @@ -4243,8 +4413,7 @@ pmap_clear_reference(vm_offset_t phys) * Calls: * PMAP_MANAGED * SPLVM - * PFIDX - * PFIDX_TO_PVH + * PA_TO_PVH * CHECK_PV_LIST * simple_lock * pmap_pte @@ -4272,24 +4441,23 @@ pmap_is_referenced(vm_offset_t phys) SPLVM(spl); - pfi = PFIDX(phys); - pvl = PFIDX_TO_PVH(pfi); + pvl = PA_TO_PVH(phys); CHECK_PV_LIST(phys, pvl, "pmap_is_referenced"); - is_ref_Retry: +is_ref_Retry: if (pvl->pmap == PMAP_NULL) { SPLX(spl); return (FALSE); } - LOCK_PVH(pfi); + LOCK_PVH(phys); /* for each listed pmap, check used bit for given page */ pvep = pvl; while (pvep != PV_ENTRY_NULL) { if (!simple_lock_try(&pvep->pmap->lock)) { - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); goto is_ref_Retry; } @@ -4299,7 +4467,7 @@ pmap_is_referenced(vm_offset_t phys) for (i = ptes_per_vm_page; i > 0; i--) { if (ptep->pg_used) { simple_unlock(&pvep->pmap->lock); - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return (TRUE); } @@ -4310,7 +4478,7 @@ pmap_is_referenced(vm_offset_t phys) pvep = pvep->next; } - UNLOCK_PVH(pfi); + UNLOCK_PVH(phys); SPLX(spl); return (FALSE); } /* pmap_is referenced() */ @@ -4325,14 +4493,10 @@ pmap_is_referenced(vm_offset_t phys) * Variables changed below, * vm_first_phys --> pmap_phys_start * vm_last_phys --> pmap_phys_end - * Macro chnged below, - * pa_index --> PFIDX - * pai_to_pvh --> PFI_TO_PVH * * Calls: * SPLVM, SPLX - * PFIDX - * PFI_TO_PVH + * PA_TO_PVH * * Global/Extern: * pmap_initialized @@ -4360,11 +4524,11 @@ pmap_verify_free(vm_offset_t phys) SPLVM(spl); - pv_h = PFIDX_TO_PVH(PFIDX(phys)); - LOCK_PVH(PFIDX(phys)); + pv_h = PA_TO_PVH(phys); + LOCK_PVH(phys); result = (pv_h->pmap == PMAP_NULL); - UNLOCK_PVH(PFIDX(phys)); + UNLOCK_PVH(phys); SPLX(spl); return (result); @@ -4491,12 +4655,11 @@ pagemove(vm_offset_t from, vm_offset_t to, int size) */ pa = M88K_PTOB(srcpte->pfn); if (PMAP_MANAGED(pa)) { - pfi = PFIDX(pa); - LOCK_PVH(pfi); - pvl = PFIDX_TO_PVH(pfi); + LOCK_PVH(pa); + pvl = PA_TO_PVH(pa); CHECK_PV_LIST(pa, pvl, "pagemove"); pvl->va = (vm_offset_t)to; - UNLOCK_PVH(pfi); + UNLOCK_PVH(pa); } /* @@ -4734,7 +4897,7 @@ check_pv_list(vm_offset_t phys, pv_entry_t pv_h, char *who) pt_entry_t *pte; vm_offset_t pa; - if (pv_h != PFIDX_TO_PVH(PFIDX(phys))) { + if (pv_h != PA_TO_PVH(phys)) { printf("check_pv_list: incorrect pv_h supplied.\n"); panic(who); } @@ -4818,7 +4981,10 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who) pt_entry_t *ptep; boolean_t found; int loopcnt; - +#if defined(MACHINE_NEW_NONCONTIG) + int bank; + unsigned npages; +#endif /* * for each page in the address space, check to see if there's @@ -4865,11 +5031,18 @@ check_map(pmap_t map, vm_offset_t s, vm_offset_t e, char *who) reserved by vm_page_startup */ /* pmap_init also allocate some memory for itself. */ +#if defined(MACHINE_NEW_NONCONTIG) + for (npages = 0, bank = 0; bank < vm_nphysseg; bank++) + npages += vm_physmem[bank].end - vm_physmem[bank].start; + if (map == kernel_pmap && + va < round_page((vm_offset_t)(pmap_modify_list + npages))) + continue; +#else if (map == kernel_pmap && va < round_page((vm_offset_t)(pmap_modify_list + (pmap_phys_end - pmap_phys_start)))) continue; - - pv_h = PFIDX_TO_PVH(PFIDX(phys)); +#endif + pv_h = PA_TO_PVH(phys); found = FALSE; if (pv_h->pmap != PMAP_NULL) { @@ -4949,6 +5122,10 @@ check_pmap_consistency(char *who) vm_offset_t phys; pv_entry_t pv_h; int spl; +#ifdef MACHINE_NEW_NONCONTIG + int bank; + unsigned npages; +#endif if ((pmap_con_dbg & (CD_CHKPM | CD_NORM)) == (CD_CHKPM | CD_NORM)) printf("check_pmap_consistency (%s :%x) start.\n", who, curproc); @@ -4975,10 +5152,19 @@ check_pmap_consistency(char *who) } /* run through all managed paes, check pv_list for each one */ +#if defined(MACHINE_NEW_NONCONTIG) + for (npages = 0, bank = 0; bank < vm_nphysseg; bank++){ + for (phys = ptoa(vm_physmem[bank].start); phys < ptoa(vm_physmem[bank].end); phys += PAGE_SIZE) { + pv_h = PA_TO_PVH(phys); + check_pv_list(phys, pv_h, who); + } + } +#else for (phys = pmap_phys_start; phys < pmap_phys_end; phys += PAGE_SIZE) { - pv_h = PFIDX_TO_PVH(PFIDX(phys)); + pv_h = PA_TO_PVH(phys); check_pv_list(phys, pv_h, who); } +#endif /* defined(MACHINE_NEW_NONCONTIG) */ SPLX(spl); diff --git a/sys/arch/mvme88k/mvme88k/process.S b/sys/arch/mvme88k/mvme88k/process.S index a58707d6c81..34d7867ede5 100644 --- a/sys/arch/mvme88k/mvme88k/process.S +++ b/sys/arch/mvme88k/mvme88k/process.S @@ -1,4 +1,4 @@ -/* $OpenBSD: process.S,v 1.5 1999/02/09 06:36:30 smurph Exp $ */ +/* $OpenBSD: process.S,v 1.6 2001/01/12 07:29:26 smurph Exp $ */ /* * Copyright (c) 1996 Nivas Madhur * All rights reserved. @@ -70,12 +70,13 @@ Lswsrunpanic: * The mapping of the pcb at p->p_addr has already been deleted, * and the memory for the pcb+stack has been freed. * The ipl is high enough to prevent the memory from being reallocated. + * switch_exit(proc * p) */ ENTRY(switch_exit) /* * Change pcb to idle u. area, i.e., set r31 to top of stack - * and set curpcb to point to _idle_u. + * and set curpcb to point to _idle_u. r2 contains proc *p. */ or.u r30, r0, hi16(_idle_u) or r30, r30,lo16(_idle_u) @@ -84,7 +85,12 @@ ENTRY(switch_exit) st r30, r10,lo16(_curpcb) /* curpcb = &idle_u */ or.u r10, r0, hi16(_curproc) st r0, r10, lo16(_curproc) /* curproc = NULL */ - bsr.n _cpu_switch + + /* Schedule the vmspace and stack to be freed. */ + bsr.n _exit2 /* call exit2(p); */ + subu r31, r31, 48 /* allocate stack */ + addu r31, r31, 48 /* restore stack */ + bsr.n _cpu_switch /* goto final switch */ or r2, r0, r10 #if 0 @@ -254,18 +260,16 @@ Lsw2: ld r3, r9, P_ADDR or.u r10, r0, hi16(_curpcb) st r3, r10, lo16(_curpcb) /* curpcb = p->p_addr */ - + /* see if pmap_activate needs to be called */ - ld r2, r9, P_VMSPACE /* vmspace = p->p_vmspace */ - addu r2, r2, VM_PMAP /* pmap = &vmspace.vm_pmap */ -#ifdef notyet - ld r5, r2, PM_STCHG /* pmap->st_changed? */ - bcnd eq0, r5, Lswnochg /* no, skip */ -#endif /* notyet */ + /* _pmap_activate() now has proc * as parameter 01-11-2000 smurph */ + /* No more VM_PMAP to contend with!!! */ + or r2, r0, r9 /* r2 = p */ or r14, r0, r9 /* save p in r14 */ subu r31, r31,48 - bsr _pmap_activate /* pmap_activate(pmap, pcb) */ - addu r31, r31,48 + /* r2 = pmap, r3 = pcb, r4 = cpu number */ + bsr _pmap_activate /* _pmap_activate(proc *p)*/ + addu r31, r31,48 or r9, r0, r14 /* restore p saved in r14 */ Lswnochg: @@ -276,9 +280,11 @@ Lswnochg: or r2, r0, r9 addu r31, r31,48 /* flush tlb of any user addresses */ - or r2, r0, 0 - or r3, r0, 0 + or r2, r0, 0 /* 0 = user space */ + or r3, r0, 0 /* start at addr 0 */ subu r31, r31,48 + /* r2 = 1 : kernel ? user, r3 = address, r4 = size */ + /* cmmu_flush_tlb(0, 0, 0xffff) */ bsr.n _cmmu_flush_tlb or r4, r0, 0xffff /* cmmu_flush_tlb flushes entire tlb */ /* for sizes > 4096 */ diff --git a/sys/arch/mvme88k/mvme88k/vm_machdep.c b/sys/arch/mvme88k/mvme88k/vm_machdep.c index 41cc589a8cd..bcf30882ccb 100644 --- a/sys/arch/mvme88k/mvme88k/vm_machdep.c +++ b/sys/arch/mvme88k/mvme88k/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.15 2000/12/28 21:21:25 smurph Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.16 2001/01/12 07:29:26 smurph Exp $ */ /* * Copyright (c) 1998 Steve Murphree, Jr. @@ -180,10 +180,6 @@ cpu_exit(struct proc *p) #else cnt.v_swtch++; #endif - -#if 1 - exit2(p); /* XXX - can't be right! */ -#endif switch_exit(p); /* NOTREACHED */ } |