diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2004-09-17 19:19:09 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2004-09-17 19:19:09 +0000 |
commit | 37f2204d0d07e6d4c2940b86430d865cf839d483 (patch) | |
tree | af5518e22e0a5234c738fd2450aed77950215332 /sys | |
parent | df0919718c4a19aeb0dcffc3df0d246f121f25a3 (diff) |
Long-due mips pmap potpourri:
- un-__P()ee.
- remove splimp() protection.
- allow pmap_extract() to return FALSE, dammit!
XXX some code under arch/mips64 considers pa == 0 as failure, instead
of checking the return value of pmap_extract(). Free commits.
- do not peek at uvm vm_page flags for ref/mod accounting, but use real
MD flags in pv lists, and implement pmap_is_* and pmap_clear_*, so uvm
(which knows better) does the right thing.
- debug code #ifdef PMAPDEBUG, not #ifdef DIAGNOSTIC.
- merge pmap_init() and pmap_pinit(), pmap_destroy() and pmap_release(),
and update comments.
- better pmap_enter() logic, from NetBSD.
- do not use IS_VM_PHYSADDR() to know if we can use PHYS_TO_VM_PAGE()
unchecked. Instead, compute PHYS_TO_VM_PAGE(), and check it against
NULL. Saves a vm_physseg_find() call each time.
- pass vm_page_t to internal function whenever possible, rather than
paddr_t, so that PHYS_TO_VM_PAGE() needs not be computed again.
- in the same way, do not compute PHYS_TO_KSEG0(foo) several times in a
row (though PHYS_TO_KSEG0 is trivial).
- allow userspace address to cross 0x80000000 (may be useful in the
future).
- implement PMAP_CANFAIL.
As a bonus:
- switch from HAVE_PMAP_PHYSSEG to HAVE_VM_PAGE_MD.
- remove dead (pre-pmap_pv_pool) code.
- KNF fixes.
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/mips64/include/pmap.h | 25 | ||||
-rw-r--r-- | sys/arch/mips64/include/vmparam.h | 31 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/pmap.c | 842 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/trap.c | 31 |
4 files changed, 440 insertions, 489 deletions
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h index f1ba4bf8e1c..a67011ac8db 100644 --- a/sys/arch/mips64/include/pmap.h +++ b/sys/arch/mips64/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.3 2004/08/10 20:28:13 deraadt Exp $ */ +/* $OpenBSD: pmap.h,v 1.4 2004/09/17 19:19:05 miod Exp $ */ /* * Copyright (c) 1987 Carnegie-Mellon University @@ -88,14 +88,7 @@ typedef struct pmap { struct segtab *pm_segtab; /* pointers to pages of PTEs */ } *pmap_t; -/* - * Defines for pmap_attributes[phys_mach_page]; - */ -#define PMAP_ATTR_MOD 0x01 /* page has been modified */ -#define PMAP_ATTR_REF 0x02 /* page has been referenced */ - #ifdef _KERNEL -extern char *pmap_attributes; /* reference and modify bits */ extern struct pmap kernel_pmap_store; #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) @@ -108,21 +101,15 @@ extern struct pmap kernel_pmap_store; #define pmap_update(x) /* nothing */ -void pmap_prefer(vaddr_t, vaddr_t *); - -void pmap_bootstrap(void); -int pmap_is_page_ro( pmap_t, vaddr_t, int); -int pmap_alloc_tlbpid(struct proc *); -void pmap_remove_pv(pmap_t, vaddr_t, vaddr_t); -int pmap_is_pa_mapped(vaddr_t); -vaddr_t pmap_pa_to_va(paddr_t); -void pmap_page_cache(vaddr_t, int); +void pmap_bootstrap(void); +int pmap_is_page_ro( pmap_t, vaddr_t, int); +void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cache); +void pmap_prefer(vaddr_t, vaddr_t *); +void pmap_set_modify(vm_page_t); #define pmap_proc_iflush(p,va,len) /* nothing yet (handled in trap now) */ #define pmap_unuse_final(p) /* nothing yet */ -void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cache); - paddr_t vtophys(void *); #endif /* _KERNEL */ diff --git a/sys/arch/mips64/include/vmparam.h b/sys/arch/mips64/include/vmparam.h index 7484d6832dd..e808f7ef88b 100644 --- a/sys/arch/mips64/include/vmparam.h +++ b/sys/arch/mips64/include/vmparam.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmparam.h,v 1.4 2004/09/09 22:21:41 pefo Exp $ */ +/* $OpenBSD: vmparam.h,v 1.5 2004/09/17 19:19:05 miod Exp $ */ /* $NetBSD: vmparam.h,v 1.5 1994/10/26 21:10:10 cgd Exp $ */ /* @@ -129,13 +129,32 @@ vaddr_t virtual_end; /* virtual sizes (bytes) for various kernel submaps */ #define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE) +#if defined(_KERNEL) && !defined(_LOCORE) /* - * pmap-specific data stored in the vm_physmem[] array. + * pmap-specific data */ -#define __HAVE_PMAP_PHYSSEG -struct pmap_physseg { - struct pv_entry *pvent; /* pv list of this seg */ - char *attrs; + +/* XXX - belongs in pmap.h, but put here because of ordering issues */ +typedef struct pv_entry { + struct pv_entry *pv_next; /* next pv_entry */ + struct pmap *pv_pmap; /* pmap where mapping lies */ + vaddr_t pv_va; /* virtual address for mapping */ + int pv_flags; /* Some flags for the mapping */ +} *pv_entry_t; + +#define __HAVE_VM_PAGE_MD +struct vm_page_md { + struct pv_entry pvent; /* pv list of this seg */ }; +#define VM_MDPAGE_INIT(pg) \ + do { \ + (pg)->mdpage.pvent.pv_next = NULL; \ + (pg)->mdpage.pvent.pv_pmap = NULL; \ + (pg)->mdpage.pvent.pv_va = 0; \ + (pg)->mdpage.pvent.pv_flags = 0; \ + } while (0) + +#endif /* _KERNEL && !_LOCORE */ + #endif /* !_MIPS_VMPARAM_H_ */ diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c index dd8eed7b405..b748063cfd1 100644 --- a/sys/arch/mips64/mips64/pmap.c +++ b/sys/arch/mips64/mips64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.8 2004/09/16 07:25:26 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.9 2004/09/17 19:19:08 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -38,38 +38,26 @@ #include <sys/shm.h> #endif -#include <uvm/uvm.h> - #include <machine/pte.h> #include <machine/cpu.h> #include <machine/autoconf.h> #include <machine/memconf.h> +#include <machine/vmparam.h> #include <mips64/archtype.h> -extern void mem_zero_page (vaddr_t); +#include <uvm/uvm.h> + +extern void mem_zero_page(vaddr_t); -typedef struct pv_entry { - struct pv_entry *pv_next; /* next pv_entry */ - struct pmap *pv_pmap; /* pmap where mapping lies */ - vaddr_t pv_va; /* virtual address for mapping */ - int pv_flags; /* Some flags for the mapping */ -} *pv_entry_t; +/* flags for pv_entry */ #define PV_UNCACHED 0x0001 /* Page is mapped unchached */ #define PV_CACHED 0x0002 /* Page has been cached */ - -/* - * Local pte bits used only here - */ -#define PG_RO 0x40000000 - -pv_entry_t pv_table; /* array of entries, one per page */ +#define PV_ATTR_MOD 0x0004 +#define PV_ATTR_REF 0x0008 struct pool pmap_pmap_pool; struct pool pmap_pv_pool; -void *pmap_pv_page_alloc(u_long, int, int); -void pmap_pv_page_free(void *, u_long, int); - #define pmap_pv_alloc() (pv_entry_t)pool_get(&pmap_pv_pool, PR_NOWAIT) #define pmap_pv_free(pv) pool_put(&pmap_pv_pool, (pv)) @@ -78,12 +66,12 @@ void pmap_pv_page_free(void *, u_long, int); #endif int pmap_pv_lowat = PMAP_PV_LOWAT; -void pmap_pinit __P((struct pmap *pmap)); -void pmap_release __P((pmap_t pmap)); -boolean_t pmap_physpage_alloc(paddr_t *); -void pmap_physpage_free(paddr_t); +int pmap_alloc_tlbpid(struct proc *); +int pmap_enter_pv(pmap_t, vaddr_t, vm_page_t, u_int *); +void pmap_page_cache(vm_page_t, int); +void pmap_remove_pv(pmap_t, vaddr_t, paddr_t); -#ifdef DIAGNOSTIC +#ifdef PMAPDEBUG struct { int kernel; /* entering kernel mapping */ int user; /* entering user mapping */ @@ -127,13 +115,7 @@ struct { int pmapdebugflag = 0x0; #define pmapdebug pmapdebugflag - -#else - -#define stat_count(what) -#define pmapdebug (0) - -#endif /* DIAGNOSTIC */ +#endif /* PMAPDEBUG */ struct pmap kernel_pmap_store; @@ -158,7 +140,6 @@ pmap_bootstrap() { int i; pt_entry_t *spte; - int n; /* @@ -177,24 +158,9 @@ pmap_bootstrap() Sysmap = (pt_entry_t *)uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize); - /* - * Allocate memory for pv_table. - * This will allocate more entries than we really need. - * We could do this in pmap_init when we know the actual - * phys_start and phys_end but its better to use kseg0 addresses - * rather than kernel virtual addresses mapped through the TLB. - */ - i = 0; - for( n = 0; n < MAXMEMSEGS; n++) { - i += mem_layout[n].mem_last_page - mem_layout[n].mem_first_page + 1; - } - pv_table = (struct pv_entry *)uvm_pageboot_alloc(sizeof(struct pv_entry) * i); - pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0,"pmappl", NULL); pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0,"pvpl", NULL); - /* XXX need to decide how to set cnt.v_page_size */ - simple_lock_init(&pmap_kernel()->pm_lock); pmap_kernel()->pm_count = 1; @@ -222,9 +188,11 @@ pmap_steal_memory(size, vstartp, vendp) vaddr_t va; paddr_t pa; +#ifdef DIAGNOSTIC if (uvm.page_init_done) { panic("pmap_steal_memory: to late, vm is running!"); } +#endif size = round_page(size); npgs = atop(size); @@ -279,118 +247,83 @@ pmap_steal_memory(size, vstartp, vendp) void pmap_init() { - vsize_t s; - int bank; - pv_entry_t pv; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_INIT)) { printf("pmap_init()\n"); } - - pv = pv_table; - for(bank = 0; bank < vm_nphysseg; bank++) { - s = vm_physmem[bank].end - vm_physmem[bank].start; - vm_physmem[bank].pmseg.pvent = pv; - pv += s; - } +#endif #if 0 /* too early */ pool_setlowat(&pmap_pv_pool, pmap_pv_lowat); #endif } -inline struct pv_entry *pa_to_pvh __P((paddr_t)); -inline struct pv_entry * -pa_to_pvh(pa) - paddr_t pa; +static pv_entry_t pg_to_pvh(struct vm_page *); +static __inline pv_entry_t +pg_to_pvh(struct vm_page *pg) { - int i, p; - - i = vm_physseg_find(atop((pa)), &p); - return(&vm_physmem[i].pmseg.pvent[p]); + return &pg->mdpage.pvent; } /* * Create and return a physical map. - * - * If the size specified for the map - * is zero, the map is an actual physical - * map, and may be referenced by the - * hardware. - * - * If the size specified is non-zero, - * the map will be used in software only, and - * is bounded by that size. */ pmap_t pmap_create() { pmap_t pmap; + int i; +extern struct vmspace vmspace0; +extern struct user *proc0paddr; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) { printf("pmap_create()\n"); } +#endif pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); bzero(pmap, sizeof(*pmap)); - pmap_pinit(pmap); - return (pmap); -} - -/* - * Initialize a preallocated and zeroed pmap structure, - * such as one in a vmspace structure. - */ -void -pmap_pinit(pmap) - struct pmap *pmap; -{ - int i; - int s; -extern struct vmspace vmspace0; -extern struct user *proc0paddr; simple_lock_init(&pmap->pm_lock); pmap->pm_count = 1; if (free_segtab) { - s = splimp(); pmap->pm_segtab = free_segtab; free_segtab = *(struct segtab **)free_segtab; pmap->pm_segtab->seg_tab[0] = NULL; - splx(s); - } - else { + } else { struct segtab *stp; - vm_page_t mem; + vm_page_t pg; pv_entry_t pv; + vaddr_t va; do { - mem = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO); - if (mem == NULL) { + pg = uvm_pagealloc(NULL, 0, NULL, + UVM_PGA_USERESERVE | UVM_PGA_ZERO); + if (pg == NULL) { /* XXX What else can we do? Deadlocks? */ - uvm_wait("ppinit"); + uvm_wait("pmap_create"); } - } while (mem == NULL); + } while (pg == NULL); - pv = pa_to_pvh(VM_PAGE_TO_PHYS(mem)); - if (pv->pv_flags & PV_CACHED && - ((pv->pv_va ^ PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem))) & CpuCacheAliasMask) != 0) { + pv = pg_to_pvh(pg); + va = PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)); + if ((pv->pv_flags & PV_CACHED) && + ((pv->pv_va ^ va) & CpuCacheAliasMask) != 0) { Mips_SyncDCachePage(pv->pv_va); } - pv->pv_va = PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem)); + pv->pv_va = va; pv->pv_flags = PV_CACHED; - pmap->pm_segtab = stp = (struct segtab *) - (long)PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem)); + pmap->pm_segtab = stp = (struct segtab *)va; i = NBPG / sizeof(struct segtab); - s = splimp(); while (--i != 0) { stp++; *(struct segtab **)stp = free_segtab; free_segtab = stp; } - splx(s); } if (pmap == vmspace0.vm_map.pmap) { /* @@ -405,6 +338,8 @@ extern struct user *proc0paddr; pmap->pm_tlbpid = 0; pmap->pm_tlbgen = 0; } + + return (pmap); } /* @@ -418,39 +353,22 @@ pmap_destroy(pmap) { int count; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) { printf("pmap_destroy(%x)\n", pmap); } - if (pmap) { - simple_lock(&pmap->pm_lock); - count = --pmap->pm_count; - simple_unlock(&pmap->pm_lock); - if (count == 0) { - pmap_release(pmap); - pool_put(&pmap_pmap_pool, pmap); - } - } -} - -/* - * Release any resources held by the given physical map. - * Called when a pmap initialized by pmap_pinit is being released. - * Should only be called if the map contains no valid mappings. - */ -void -pmap_release(pmap) - pmap_t pmap; -{ +#endif - if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) { - printf("pmap_release(%x)\n", pmap); - } + simple_lock(&pmap->pm_lock); + count = --pmap->pm_count; + simple_unlock(&pmap->pm_lock); + if (count > 0) + return; if (pmap->pm_segtab) { pt_entry_t *pte; int i; - int s; -#ifdef PARANIOA +#ifdef PARANOIA int j; #endif @@ -462,19 +380,19 @@ pmap_release(pmap) #ifdef PARANOIA for (j = 0; j < NPTEPG; j++) { if ((pte+j)->pt_entry) - panic("pmap_release: segmap not empty"); + panic("pmap_destroy: segmap not empty"); } #endif Mips_HitInvalidateDCache((vaddr_t)pte, PAGE_SIZE); uvm_pagefree(PHYS_TO_VM_PAGE(KSEG0_TO_PHYS(pte))); pmap->pm_segtab->seg_tab[i] = NULL; } - s = splimp(); *(struct segtab **)pmap->pm_segtab = free_segtab; free_segtab = pmap->pm_segtab; - splx(s); pmap->pm_segtab = NULL; } + + pool_put(&pmap_pmap_pool, pmap); } /* @@ -485,9 +403,11 @@ pmap_reference(pmap) pmap_t pmap; { +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_reference(%x)\n", pmap); } +#endif if (pmap) { simple_lock(&pmap->pm_lock); pmap->pm_count++; @@ -534,11 +454,12 @@ pmap_remove(pmap, sva, eva) pt_entry_t *pte; unsigned entry; - stat_count(remove_stats.calls); - +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT)) { printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva); } + stat_count(remove_stats.calls); +#endif if (pmap == NULL) { return; } @@ -563,7 +484,9 @@ pmap_remove(pmap, sva, eva) */ pte->pt_entry = PG_NV | PG_G; /* See above about G bit */ tlb_flush_addr(sva); +#ifdef PMAPDEBUG stat_count(remove_stats.flushes); +#endif } return; } @@ -603,7 +526,9 @@ pmap_remove(pmap, sva, eva) if (pmap->pm_tlbgen == tlbpid_gen) { tlb_flush_addr(sva | (pmap->pm_tlbpid << VMTLB_PID_SHIFT)); +#ifdef PMAPDEBUG stat_count(remove_stats.flushes); +#endif } } } @@ -619,18 +544,17 @@ pmap_page_protect(pg, prot) struct vm_page *pg; vm_prot_t prot; { - paddr_t pa = VM_PAGE_TO_PHYS(pg); pv_entry_t pv; vaddr_t va; - int s, i; +#ifdef PMAPDEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || ((prot == VM_PROT_NONE) && (pmapdebug & PDB_REMOVE))) { - printf("pmap_page_protect(%x, %x)\n", pa, prot); + printf("pmap_page_protect(%x, %x)\n", pg, prot); } - if (!IS_VM_PHYSADDR(pa)) { +#endif + if (pg == NULL) return; - } switch (prot) { case VM_PROT_READ|VM_PROT_WRITE: @@ -640,37 +564,26 @@ pmap_page_protect(pg, prot) /* copy_on_write */ case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: - pv = pa_to_pvh(pa); - s = splimp(); + pv = pg_to_pvh(pg); /* * Loop over all current mappings setting/clearing as apropos. */ if (pv->pv_pmap != NULL) { for (; pv; pv = pv->pv_next) { va = pv->pv_va; - - /* - * XXX don't write protect pager mappings - */ - if (va >= uvm.pager_sva && va < uvm.pager_eva) - continue; - pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, prot); + pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, + prot); } } - splx(s); break; /* remove_all */ default: - i = 0; - pv = pa_to_pvh(pa); - s = splimp(); - while (pv->pv_pmap != NULL && i < 10) { - i++; - pmap_remove(pv->pv_pmap, pv->pv_va, - pv->pv_va + PAGE_SIZE); + pv = pg_to_pvh(pg); + while (pv->pv_pmap != NULL) { + va = pv->pv_va; + pmap_remove(pv->pv_pmap, va, va + PAGE_SIZE); } - splx(s); } } @@ -689,9 +602,11 @@ pmap_protect(pmap, sva, eva, prot) unsigned entry; u_int p; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) { printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot); } +#endif if (pmap == NULL) return; @@ -702,7 +617,7 @@ pmap_protect(pmap, sva, eva, prot) p = (prot & VM_PROT_WRITE) ? PG_M : PG_RO; - if (!pmap->pm_segtab) { + if (pmap == pmap_kernel()) { /* * Change entries in kernel pmap. * This will trap if the page is writeable (in order to set @@ -775,7 +690,7 @@ pmap_is_page_ro(pmap, va, entry) vaddr_t va; int entry; { - return(entry & PG_RO); + return (entry & PG_RO); } /* @@ -784,32 +699,28 @@ pmap_is_page_ro(pmap, va, entry) * Change all mappings of a page to cached/uncached. */ void -pmap_page_cache(pa,mode) - vaddr_t pa; +pmap_page_cache(pg,mode) + vm_page_t pg; int mode; { pv_entry_t pv; pt_entry_t *pte; unsigned entry; unsigned newmode; - int s; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) { printf("pmap_page_uncache(%x)\n", pa); } - if (!IS_VM_PHYSADDR(pa)) { - return; - } - +#endif newmode = mode & PV_UNCACHED ? PG_UNCACHED : PG_CACHED; - pv = pa_to_pvh(pa); - s = splimp(); + pv = pg_to_pvh(pg); while (pv) { pv->pv_flags = (pv->pv_flags & ~PV_UNCACHED) | mode; - if (!pv->pv_pmap->pm_segtab) { - /* - * Change entries in kernel pmap. - */ + if (pv->pv_pmap == pmap_kernel()) { + /* + * Change entries in kernel pmap. + */ pte = kvtopte(pv->pv_va); entry = pte->pt_entry; if (entry & PG_V) { @@ -817,8 +728,7 @@ pmap_page_cache(pa,mode) pte->pt_entry = entry; tlb_update(pv->pv_va, entry); } - } - else { + } else { if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va))) { pte += (pv->pv_va >> PGSHIFT) & (NPTEPG - 1); entry = pte->pt_entry; @@ -833,8 +743,6 @@ pmap_page_cache(pa,mode) } pv = pv->pv_next; } - - splx(s); } /* @@ -847,57 +755,63 @@ pmap_page_cache(pa,mode) * insert this page into the given map NOW. */ int -pmap_enter(pmap, va, pa, prot, stat) +pmap_enter(pmap, va, pa, prot, flags) pmap_t pmap; vaddr_t va; paddr_t pa; vm_prot_t prot; - int stat; + int flags; { pt_entry_t *pte; u_int npte; - vm_page_t mem; - int is_physaddr; + vm_page_t pg; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) { printf("pmap_enter(%x, %x, %x, %x, %x)\n", pmap, va, pa, prot, stat); } +#endif #ifdef DIAGNOSTIC if (pmap == pmap_kernel()) { +#ifdef PMAPDEBUG enter_stats.kernel++; +#endif if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) panic("pmap_enter: kva %p", va); } else { +#ifdef PMAPDEBUG enter_stats.user++; +#endif if (va >= VM_MAXUSER_ADDRESS) panic("pmap_enter: uva %p", va); } #endif - is_physaddr = IS_VM_PHYSADDR(pa); + pg = PHYS_TO_VM_PAGE(pa); + + if (pg != NULL) { + pv_entry_t pv; + + pv = pg_to_pvh(pg); - if (is_physaddr) { - pv_entry_t pv, npv; - int s; + /* Set page referenced/modified status based on flags */ + if (flags & VM_PROT_WRITE) + pv->pv_flags |= PV_ATTR_MOD | PV_ATTR_REF; + else if (flags & VM_PROT_ALL) + pv->pv_flags |= PV_ATTR_REF; if (!(prot & VM_PROT_WRITE)) { npte = PG_ROPAGE; } else { - vm_page_t mem; - - mem = PHYS_TO_VM_PAGE(pa); - if ((int)va < 0) { + if ((int64_t)va < 0) { /* * Don't bother to trap on kernel writes, * just record page as dirty. */ npte = PG_RWPAGE; -#if 0 /*XXX*/ - mem->flags &= ~PG_CLEAN; -#endif } else { - if (!(mem->flags & PG_CLEAN)) { + if (pv->pv_flags & PV_ATTR_MOD) { npte = PG_RWPAGE; } else { npte = PG_CWPAGE; @@ -905,104 +819,17 @@ pmap_enter(pmap, va, pa, prot, stat) } } +#ifdef PMAPDEBUG stat_count(enter_stats.managed); - /* - * Enter the pmap and virtual address into the - * physical to virtual map table. - */ - pv = pa_to_pvh(pa); - s = splimp(); - - if (pmapdebug & PDB_ENTER) { - printf("pmap_enter: pv %x: was %x/%x/%x\n", - pv, pv->pv_va, pv->pv_pmap, pv->pv_next); - } - - if (pv->pv_pmap == NULL) { - /* - * No entries yet, use header as the first entry - */ - - if (pmapdebug & PDB_PVENTRY) { - printf("pmap_enter: first pv: pmap %x va %x pa %p\n", - pmap, va, pa); - } - stat_count(enter_stats.firstpv); - - Mips_SyncDCachePage(pv->pv_va); - - pv->pv_va = va; - pv->pv_flags = PV_CACHED; - pv->pv_pmap = pmap; - pv->pv_next = NULL; - } else { - if (pv->pv_flags & PV_UNCACHED) { - npte = (npte & ~PG_CACHEMODE) | PG_UNCACHED; - } else if (CpuCacheAliasMask != 0) { - /* - * There is at least one other VA mapping this page. - * Check if they are cache index compatible. If not - * remove all mappings, flush the cache and set page - * to be mapped uncached. Caching will be restored - * when pages are mapped compatible again. NOT! - */ - for (npv = pv; npv; npv = npv->pv_next) { - /* - * Check cache aliasing incompatibility - */ - if (((npv->pv_va ^ va) & CpuCacheAliasMask) != 0) { - printf("pmap_enter: uncached mapping for pa %p, va %p != %p.\n", pa, npv->pv_va, va); - pmap_page_cache(pa,PV_UNCACHED); - Mips_SyncCache(); - pv->pv_flags &= ~PV_CACHED; - npte = (npte & ~PG_CACHEMODE) | PG_UNCACHED; - break; - } - } - } - - /* - * There is at least one other VA mapping this page. - * Place this entry after the header. - * - * Note: the entry may already be in the table if - * we are only changing the protection bits. - */ - for (npv = pv; npv; npv = npv->pv_next) { - if (pmap == npv->pv_pmap && va == npv->pv_va) { - goto fnd; - } - } - - if (pmapdebug & PDB_PVENTRY) { - printf("pmap_enter: new pv: pmap %x va %x pa %p\n", - pmap, va, pa); - } - - /* can this cause us to recurse forever? */ - npv = pmap_pv_alloc(); - if (npv == NULL) { - panic("pmap_pv_alloc() failed"); - } - npv->pv_va = va; - npv->pv_pmap = pmap; - npv->pv_next = pv->pv_next; - npv->pv_flags = pv->pv_flags; - pv->pv_next = npv; - - if (!npv->pv_next) - stat_count(enter_stats.secondpv); - fnd: - ; - } - splx(s); - } - else { +#endif + } else { /* * Assumption: if it is not part of our managed memory - * then it must be device memory which may be volitile. + * then it must be device memory which may be volatile. */ +#ifdef PMAPDEBUG stat_count(enter_stats.unmanaged); +#endif if (prot & VM_PROT_WRITE) { npte = PG_IOPAGE & ~PG_G; } else { @@ -1011,14 +838,24 @@ pmap_enter(pmap, va, pa, prot, stat) } if (pmap == pmap_kernel()) { + if (pg != NULL) { + if (pmap_enter_pv(pmap, va, pg, &npte) != 0) { + if (flags & PMAP_CANFAIL) + return ENOMEM; + panic("pmap_enter: pmap_enter_pv() failed"); + } + } + pte = kvtopte(va); npte |= vad_to_pfn(pa) | PG_ROPAGE | PG_G; if (!(pte->pt_entry & PG_V)) { pmap->pm_stats.resident_count++; } - if (pa != pfn_to_pad(pte->pt_entry)) { + if ((pte->pt_entry & PG_V) && pa != pfn_to_pad(pte->pt_entry)) { pmap_remove(pmap, va, va + NBPG); +#ifdef PMAPDEBUG stat_count(enter_stats.mchange); +#endif } /* @@ -1026,33 +863,37 @@ pmap_enter(pmap, va, pa, prot, stat) */ pte->pt_entry = npte; tlb_update(va, npte); - return (KERN_SUCCESS); + return 0; } /* * User space mapping. Do table build. */ if (!(pte = pmap_segmap(pmap, va))) { - pv_entry_t pv; + vm_page_t npg; /* do not clobber pg! */ + do { - mem = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE|UVM_PGA_ZERO); - if (mem == NULL) { - /* XXX What else can we do? Deadlocks? */ - uvm_wait("penter"); + npg = uvm_pagealloc(NULL, 0, NULL, + UVM_PGA_USERESERVE | UVM_PGA_ZERO); + if (npg == NULL) { + if (flags & PMAP_CANFAIL) + return ENOMEM; + uvm_wait("pmap_enter"); } - } while (mem == NULL); - - pv = pa_to_pvh(VM_PAGE_TO_PHYS(mem)); - if (pv->pv_flags & PV_CACHED && - ((pv->pv_va ^ PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem))) & CpuCacheAliasMask) != 0) { - Mips_SyncDCachePage(pv->pv_va); - } - pv->pv_va = PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem)); - pv->pv_flags = PV_CACHED; + } while (npg == NULL); pmap_segmap(pmap, va) = pte = (pt_entry_t *) - (long)PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(mem)); + (vaddr_t)PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(npg)); } + + if (pg != NULL) { + if (pmap_enter_pv(pmap, va, pg, &npte) != 0) { + if (flags & PMAP_CANFAIL) + return ENOMEM; + panic("pmap_enter: pmap_enter_pv() failed"); + } + } + pte += (va >> PGSHIFT) & (NPTEPG - 1); /* @@ -1060,22 +901,21 @@ pmap_enter(pmap, va, pa, prot, stat) * Assume uniform modified and referenced status for all * MIPS pages in a OpenBSD page. */ - if (is_physaddr) { - npte |= vad_to_pfn(pa); - } - else { - npte |= vad_to_pfn(pa); - } + npte |= vad_to_pfn(pa); +#ifdef PMAPDEBUG if (pmapdebug & PDB_ENTER) { printf("pmap_enter: new pte %x", npte); if (pmap->pm_tlbgen == tlbpid_gen) printf(" tlbpid %d", pmap->pm_tlbpid); printf("\n"); } +#endif - if (pa != pfn_to_pad(pte->pt_entry)) { + if ((pte->pt_entry & PG_V) && pa != pfn_to_pad(pte->pt_entry)) { pmap_remove(pmap, va, va + NBPG); +#ifdef PMAPDEBUG stat_count(enter_stats.mchange); +#endif } if (!(pte->pt_entry & PG_V)) { @@ -1083,20 +923,18 @@ pmap_enter(pmap, va, pa, prot, stat) } pte->pt_entry = npte; if (pmap->pm_tlbgen == tlbpid_gen) { - int s, i; - s = splimp(); - i = tlb_update(va | (pmap->pm_tlbpid << VMTLB_PID_SHIFT), npte); + tlb_update(va | (pmap->pm_tlbpid << VMTLB_PID_SHIFT), npte); /* * If mapping a memory space address invalidate ICache. */ - if (is_physaddr) { + if (pg != NULL && + (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == + (VM_PROT_READ | VM_PROT_EXECUTE)) Mips_InvalidateICachePage(va); - } - splx(s); } - return (KERN_SUCCESS); + return 0; } void @@ -1108,16 +946,16 @@ pmap_kenter_pa(va, pa, prot) pt_entry_t *pte; u_int npte; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) { printf("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot); } - +#endif npte = vad_to_pfn(pa) | PG_G; if (prot & VM_PROT_WRITE) { npte |= PG_RWPAGE; - } - else { + } else { npte |= PG_ROPAGE; } pte = kvtopte(va); @@ -1135,17 +973,17 @@ pmap_kenter_cache(va, pa, prot, cache) pt_entry_t *pte; u_int npte; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_ENTER)) { printf("pmap_kenter_cache(%lx, %lx, %x)\n", va, pa, prot); } - +#endif npte = vad_to_pfn(pa) | PG_G; if (prot & VM_PROT_WRITE) { - npte |= PG_M | cache; - } - else { - npte |= PG_RO | cache; + npte |= PG_V | PG_M | cache; + } else { + npte |= PG_V | PG_RO | cache; } pte = kvtopte(va); pte->pt_entry = npte; @@ -1161,9 +999,11 @@ pmap_kremove(va, len) vaddr_t eva; u_int entry; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE)) { printf("pmap_kremove(%lx, %lx)\n", va, len); } +#endif pte = kvtopte(va); eva = va + len; @@ -1183,6 +1023,7 @@ pmap_unwire(pmap, va) pmap_t pmap; vaddr_t va; { + /* XXX this pmap does not handle wired mappings yet... */ } /* @@ -1197,43 +1038,50 @@ pmap_extract(pmap, va, pa) vaddr_t va; paddr_t *pa; { + boolean_t rv = TRUE; + pt_entry_t *pte; + +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_extract(%x, %x) -> ", pmap, va); } +#endif - if (!pmap->pm_segtab) { - if (va >= (long)KSEG0_BASE && va < (long)(KSEG0_BASE + KSEG_SIZE)) { + if (pmap == pmap_kernel()) { + if (va >= (long)KSEG0_BASE && + va < (long)(KSEG0_BASE + KSEG_SIZE)) { *pa = (long)KSEG0_TO_PHYS(va); - } - else { + } else { #ifdef DIAGNOSTIC if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end) { printf("pmap_extract(%x, %x) -> ", pmap, va); panic("pmap_extract"); } #endif - *pa = pfn_to_pad(kvtopte(va)->pt_entry); - } - } - else { - pt_entry_t *pte; - - if (!(pte = pmap_segmap(pmap, va))) { - *pa = 0; + pte = kvtopte(va); + if (pte->pt_entry & PG_V) + *pa = pfn_to_pad(pte->pt_entry); + else + rv = FALSE; } + } else { + if (!(pte = pmap_segmap(pmap, va))) + rv = FALSE; else { pte += (va >> PGSHIFT) & (NPTEPG - 1); *pa = pfn_to_pad(pte->pt_entry); } } - if (*pa) + if (rv != FALSE) *pa |= va & PGOFSET; +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { - printf("pmap_extract: pa %x\n", *pa); + printf("pmap_extract: rv %d pa %x\n", rv, *pa); } +#endif - return (TRUE); + return (rv); } /* @@ -1278,10 +1126,12 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) vaddr_t src_addr; { +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_copy(%x, %x, %x, %x, %x)\n", dst_pmap, src_pmap, dst_addr, len, src_addr); } +#endif } #ifndef pmap_update @@ -1297,9 +1147,11 @@ void pmap_update(pmap) pmap_t pmap; { +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_update()\n"); } +#endif } #endif @@ -1310,21 +1162,23 @@ void pmap_zero_page(struct vm_page *pg) { paddr_t phys = VM_PAGE_TO_PHYS(pg); - vaddr_t p; + vaddr_t va; pv_entry_t pv; +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_zero_page(%x)\n", phys); } +#endif - p = (vaddr_t)PHYS_TO_KSEG0(phys); - pv = pa_to_pvh(phys); - if (pv->pv_flags & PV_CACHED && - ((pv->pv_va ^ (int)p) & CpuCacheAliasMask) != 0) { + va = (vaddr_t)PHYS_TO_KSEG0(phys); + pv = pg_to_pvh(pg); + if ((pv->pv_flags & PV_CACHED) && + ((pv->pv_va ^ va) & CpuCacheAliasMask) != 0) { Mips_SyncDCachePage(pv->pv_va); } - mem_zero_page(p); - Mips_HitSyncDCache(p, PAGE_SIZE); + mem_zero_page(va); + Mips_HitSyncDCache(va, PAGE_SIZE); } /* @@ -1354,19 +1208,21 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) int tmp0, tmp1, tmp2, tmp3; pv_entry_t pv; +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_copy_page(%x, %x)\n", src, dst); } - s = (int *)(long)PHYS_TO_KSEG0(src); - d = (int *)(long)PHYS_TO_KSEG0(dst); +#endif + s = (int *)(vaddr_t)PHYS_TO_KSEG0(src); + d = (int *)(vaddr_t)PHYS_TO_KSEG0(dst); - pv = pa_to_pvh(src); - if (pv->pv_flags & PV_CACHED && + pv = pg_to_pvh(srcpg); + if ((pv->pv_flags & PV_CACHED) && (sf = ((pv->pv_va ^ (long)s) & CpuCacheAliasMask) != 0)) { Mips_SyncDCachePage(pv->pv_va); } - pv = pa_to_pvh(dst); - if (pv->pv_flags & PV_CACHED && + pv = pg_to_pvh(dstpg); + if ((pv->pv_flags & PV_CACHED) && (df = ((pv->pv_va ^ (long)d) & CpuCacheAliasMask) != 0)) { Mips_SyncDCachePage(pv->pv_va); } @@ -1399,13 +1255,33 @@ boolean_t pmap_clear_modify(pg) struct vm_page *pg; { - paddr_t pa = VM_PAGE_TO_PHYS(pg); - boolean_t rv = FALSE; + pv_entry_t pv; + boolean_t rv; +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); printf("pmap_clear_modify(%x)\n", pa); } - return(rv); +#endif + + /* + * XXX - we should walk the pv list, and flush any dirty pages too. + */ + pv = pg_to_pvh(pg); + rv = (pv->pv_flags & PV_ATTR_MOD) != 0; + pv->pv_flags &= ~PV_ATTR_MOD; + return rv; +} + +void +pmap_set_modify(pg) + struct vm_page *pg; +{ + pv_entry_t pv; + + pv = pg_to_pvh(pg); + pv->pv_flags |= PV_ATTR_MOD | PV_ATTR_REF; } /* @@ -1417,12 +1293,20 @@ boolean_t pmap_clear_reference(pg) struct vm_page *pg; { - paddr_t pa = VM_PAGE_TO_PHYS(pg); + pv_entry_t pv; + boolean_t rv; +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { + paddr_t pa = VM_PAGE_TO_PHYS(pg); printf("pmap_clear_reference(%x)\n", pa); } - return(FALSE); +#endif + + pv = pg_to_pvh(pg); + rv = (pv->pv_flags & PV_ATTR_REF) != 0; + pv->pv_flags &= ~PV_ATTR_REF; + return rv; } /* @@ -1435,7 +1319,10 @@ boolean_t pmap_is_referenced(pg) struct vm_page *pg; { - return (FALSE); + pv_entry_t pv; + + pv = pg_to_pvh(pg); + return (pv->pv_flags & PV_ATTR_REF) != 0; } /* @@ -1448,7 +1335,10 @@ boolean_t pmap_is_modified(pg) struct vm_page *pg; { - return (FALSE); + pv_entry_t pv; + + pv = pg_to_pvh(pg); + return (pv->pv_flags & PV_ATTR_MOD) != 0; } paddr_t @@ -1456,9 +1346,11 @@ pmap_phys_address(ppn) int ppn; { +#ifdef PMAPDEBUG if (pmapdebug & PDB_FOLLOW) { printf("pmap_phys_address(%x)\n", ppn); } +#endif return (ptoa(ppn)); } @@ -1500,22 +1392,128 @@ pmap_alloc_tlbpid(p) id = pmap->pm_tlbpid; } +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_TLBPID)) { if (curproc) { printf("pmap_alloc_tlbpid: curproc %d '%s' ", curproc->p_pid, curproc->p_comm); - } - else { + } else { printf("pmap_alloc_tlbpid: curproc <none> "); } printf("segtab %x tlbpid %d pid %d '%s'\n", pmap->pm_segtab, id, p->p_pid, p->p_comm); } +#endif return (id); } /* + * Enter the pmap and virtual address into the + * physical to virtual map table. + */ +int +pmap_enter_pv(pmap, va, pg, npte) + pmap_t pmap; + vaddr_t va; + vm_page_t pg; + u_int *npte; +{ + pv_entry_t pv, npv; +#ifdef PMAPDEBUG + paddr_t pa = VM_PAGE_TO_PHYS(pg); +#endif + + pv = pg_to_pvh(pg); + + if (pv->pv_pmap == NULL) { + /* + * No entries yet, use header as the first entry + */ + +#ifdef PMAPDEBUG + if (pmapdebug & PDB_PVENTRY) { + printf("pmap_enter: first pv: pmap %x va %x pa %p\n", + pmap, va, pa); + } + stat_count(enter_stats.firstpv); +#endif + + Mips_SyncDCachePage(pv->pv_va); + + pv->pv_va = va; + pv->pv_flags = PV_CACHED; + pv->pv_pmap = pmap; + pv->pv_next = NULL; + } else { + if (pv->pv_flags & PV_UNCACHED) { + *npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED; + } else if (CpuCacheAliasMask != 0) { + /* + * There is at least one other VA mapping this page. + * Check if they are cache index compatible. If not + * remove all mappings, flush the cache and set page + * to be mapped uncached. Caching will be restored + * when pages are mapped compatible again. NOT! + */ + for (npv = pv; npv; npv = npv->pv_next) { + /* + * Check cache aliasing incompatibility + */ + if (((npv->pv_va ^ va) & CpuCacheAliasMask) != 0) { +#ifdef PMAPDEBUG + printf("pmap_enter: uncached mapping for pa %p, va %p != %p.\n", + pa, npv->pv_va, va); +#endif + pmap_page_cache(pg, PV_UNCACHED); + Mips_SyncCache(); + pv->pv_flags &= ~PV_CACHED; + *npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED; + break; + } + } + } + + /* + * There is at least one other VA mapping this page. + * Place this entry after the header. + * + * Note: the entry may already be in the table if + * we are only changing the protection bits. + */ + for (npv = pv; npv; npv = npv->pv_next) { + if (pmap == npv->pv_pmap && va == npv->pv_va) { + return 0; + } + } + +#ifdef PMAPDEBUG + if (pmapdebug & PDB_PVENTRY) { + printf("pmap_enter: new pv: pmap %x va %x pa %p\n", + pmap, va, pa); + } +#endif + + /* can this cause us to recurse forever? */ + npv = pmap_pv_alloc(); + if (npv == NULL) + return ENOMEM; + npv->pv_va = va; + npv->pv_pmap = pmap; + npv->pv_next = pv->pv_next; + npv->pv_flags = pv->pv_flags; + pv->pv_next = npv; + +#ifdef PMAPDEBUG + if (!npv->pv_next) + stat_count(enter_stats.secondpv); +#endif + } + + return 0; +} + +/* * Remove a physical to virtual address translation. * Returns TRUE if it was the last mapping and cached, else FALSE. */ @@ -1526,22 +1524,23 @@ pmap_remove_pv(pmap, va, pa) paddr_t pa; { pv_entry_t pv, npv; - int s; + vm_page_t pg; +#ifdef PMAPDEBUG if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY)) { printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa); } +#endif /* * Remove page from the PV table (raise IPL since we * may be called at interrupt time). */ - if (!IS_VM_PHYSADDR(pa)) { + pg = PHYS_TO_VM_PAGE(pa); + if (pg == NULL) return; - } - pv = pa_to_pvh(pa); - s = splimp(); /* XXX not in nbsd */ + pv = pg_to_pvh(pg); /* * If it is the first entry on the list, it is actually * in the header and we must copy the following entry up @@ -1551,93 +1550,34 @@ pmap_remove_pv(pmap, va, pa) if (pmap == pv->pv_pmap && va == pv->pv_va) { npv = pv->pv_next; if (npv) { + npv->pv_flags |= + (pv->pv_flags & (PV_ATTR_MOD | PV_ATTR_REF)); *pv = *npv; pmap_pv_free(npv); - } - else { + } else { pv->pv_pmap = NULL; } +#ifdef PMAPDEBUG stat_count(remove_stats.pvfirst); - } - else { +#endif + } else { for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) { +#ifdef PMAPDEBUG stat_count(remove_stats.pvsearch); +#endif if (pmap == npv->pv_pmap && va == npv->pv_va) - goto fnd; + break; } + if (npv != NULL) { + pv->pv_next = npv->pv_next; + pmap_pv_free(npv); + } else { #ifdef DIAGNOSTIC - printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa); - panic("pmap_remove_pv"); + panic("pmap_remove_pv(%x, %x, %x) not found", + pmap, va, pa); #endif - fnd: - pv->pv_next = npv->pv_next; - pmap_pv_free(npv); - } - splx(s); - return; -} - -/* - * pmap_pv_page_alloc: - * - * Allocate a page for the pv_entry pool. - */ -void * -pmap_pv_page_alloc(u_long size, int flags, int mtype) -{ - paddr_t pg; - - if (pmap_physpage_alloc(&pg)) - return ((void *)(long)PHYS_TO_KSEG0(pg)); - return (NULL); -} - -/* - * pmap_pv_page_free: - * - * Free a pv_entry pool page. - */ -void -pmap_pv_page_free(void *v, u_long size, int mtype) -{ - pmap_physpage_free(KSEG0_TO_PHYS((vaddr_t)v)); -} - -/* - * pmap_physpage_alloc: - * - * Allocate a single page from the VM system and return the - * physical address for that page. - */ -boolean_t -pmap_physpage_alloc(paddr_t *pap) -{ - struct vm_page *pg; - paddr_t pa; - - pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); - if (pg != NULL) { - pa = VM_PAGE_TO_PHYS(pg); - *pap = pa; - return (TRUE); - } - return (FALSE); -} - -/* - * pmap_physpage_free: - * - * Free a single pmap metadate page at the specified physical address. - */ -void -pmap_physpage_free(paddr_t pa) -{ - struct vm_page *pg; - - if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { - panic("pmap_physpage_free: bogus physical page address"); + } } - uvm_pagefree(pg); } /*==================================================================*/ @@ -1657,11 +1597,12 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int cacheable, off = bpa - spa; len = size+off; - if (phys_map == NULL) { - printf("ouch, add mapping when phys map not ready!\n"); - } else { - vaddr = uvm_km_valloc_wait(kernel_map, len); - } +#ifdef DEBUG + if (phys_map == NULL) + panic("bus_mem_add_mapping when phys map not ready!"); +#endif + + vaddr = uvm_km_valloc_wait(kernel_map, len); *bshp = vaddr + off; #ifdef DEBUG_BUS_MEM_ADD_MAPPING printf("map bus %x size %x to %x vbase %x\n", bpa, size, *bshp, spa); @@ -1675,4 +1616,3 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int cacheable, } return 0; } - diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c index fd9e07a1d53..ba840ac097c 100644 --- a/sys/arch/mips64/mips64/trap.c +++ b/sys/arch/mips64/mips64/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.7 2004/09/16 09:58:56 miod Exp $ */ +/* $OpenBSD: trap.c,v 1.8 2004/09/17 19:19:08 miod Exp $ */ /* tracked to 1.23 */ /* @@ -195,10 +195,11 @@ trap(trapframe) switch (type) { case T_TLB_MOD: /* check for kernel address */ - if ((int)trapframe->badvaddr < 0) { + if (trapframe->badvaddr < 0) { pt_entry_t *pte; unsigned int entry; - vaddr_t pa; + paddr_t pa; + vm_page_t pg; pte = kvtopte(trapframe->badvaddr); entry = pte->pt_entry; @@ -206,7 +207,8 @@ trap(trapframe) if (!(entry & PG_V) || (entry & PG_M)) panic("trap: ktlbmod: invalid pte"); #endif - if (pmap_is_page_ro(pmap_kernel(), mips_trunc_page(trapframe->badvaddr), entry)) { + if (pmap_is_page_ro(pmap_kernel(), + mips_trunc_page(trapframe->badvaddr), entry)) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; goto kernel_fault; @@ -216,9 +218,10 @@ trap(trapframe) trapframe->badvaddr &= ~PGOFSET; tlb_update(trapframe->badvaddr, entry); pa = pfn_to_pad(entry); - if (!IS_VM_PHYSADDR(pa)) + pg = PHYS_TO_VM_PAGE(pa); + if (pg == NULL) panic("trap: ktlbmod: unmanaged page"); - PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; + pmap_set_modify(pg); return (trapframe->pc); } /* FALLTHROUGH */ @@ -228,6 +231,7 @@ trap(trapframe) pt_entry_t *pte; unsigned int entry; paddr_t pa; + vm_page_t pg; pmap_t pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, trapframe->badvaddr))) @@ -235,24 +239,25 @@ trap(trapframe) pte += (trapframe->badvaddr >> PGSHIFT) & (NPTEPG - 1); entry = pte->pt_entry; #ifdef DIAGNOSTIC - if (!(entry & PG_V) || (entry & PG_M)) { + if (!(entry & PG_V) || (entry & PG_M)) panic("trap: utlbmod: invalid pte"); - } #endif - if (pmap_is_page_ro(pmap, (vaddr_t)mips_trunc_page(trapframe->badvaddr), entry)) { + if (pmap_is_page_ro(pmap, + mips_trunc_page(trapframe->badvaddr), entry)) { /* write to read only page */ ftype = VM_PROT_WRITE; goto dofault; } entry |= PG_M; pte->pt_entry = entry; - trapframe->badvaddr = (trapframe->badvaddr & ~PGOFSET) | (pmap->pm_tlbpid << VMTLB_PID_SHIFT); + trapframe->badvaddr = (trapframe->badvaddr & ~PGOFSET) | + (pmap->pm_tlbpid << VMTLB_PID_SHIFT); tlb_update(trapframe->badvaddr, entry); pa = pfn_to_pad(entry); - if (!IS_VM_PHYSADDR(pa)) { + pg = PHYS_TO_VM_PAGE(pa); + if (pg == NULL) panic("trap: utlbmod: unmanaged page"); - } - PHYS_TO_VM_PAGE(pa)->flags &= ~PG_CLEAN; + pmap_set_modify(pg); if (!USERMODE(trapframe->sr)) return (trapframe->pc); goto out; |