diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 1999-02-26 10:37:52 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 1999-02-26 10:37:52 +0000 |
commit | f50de1503a4294f039e2c06e159f97c68e31b74d (patch) | |
tree | 682d6fc478e0e4b6760067440a2d4fd53aea57e2 /sys/arch/i386 | |
parent | 3c211f25f64dbb1b0c6932c027f6b8b49682082c (diff) |
deal with uvm. Mostly name changes.
Diffstat (limited to 'sys/arch/i386')
-rw-r--r-- | sys/arch/i386/i386/pmap.c | 83 | ||||
-rw-r--r-- | sys/arch/i386/i386/pmap.old.c | 83 | ||||
-rw-r--r-- | sys/arch/i386/i386/vm_machdep.c | 24 |
3 files changed, 173 insertions, 17 deletions
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c index 0d80435f439..8501fb1e4b6 100644 --- a/sys/arch/i386/i386/pmap.c +++ b/sys/arch/i386/i386/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.26 1999/02/26 10:26:57 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.27 1999/02/26 10:37:51 art Exp $ */ /* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */ /* @@ -89,6 +89,10 @@ #include <vm/vm_kern.h> #include <vm/vm_page.h> +#if defined(UVM) +#include <uvm/uvm.h> +#endif + #include <machine/cpu.h> #include <dev/isa/isareg.h> @@ -230,6 +234,17 @@ pmap_bootstrap(virtual_start) vm_offset_t va; pt_entry_t *pte; #endif +#if defined(UVM) + int first16q; +#endif + + /* Register the page size with the vm system */ +#if defined(UVM) + uvm_setpagesize(); +#else + vm_set_page_size(); +#endif + /* XXX: allow for msgbuf */ avail_end -= i386_round_page(sizeof(struct msgbuf)); @@ -290,9 +305,6 @@ pmap_bootstrap(virtual_start) */ virtual_avail = reserve_dumppages(virtual_avail); - /* Register the page size with the vm system */ - vm_set_page_size(); - /* flawed, no mappings?? */ if (ctob(physmem) > 31*1024*1024 && MAXKPDE != NKPDE) { vm_offset_t p; @@ -313,17 +325,30 @@ pmap_bootstrap(virtual_start) * [i.e. here] */ #if defined(UVM) + if (avail_end < (16 * 1024 * 1024)) + first16q = VM_FREELIST_DEFAULT; + else + first16q = VM_FREELIST_FIRST16; + if (avail_start < hole_start) uvm_page_physload(atop(avail_start), atop(hole_start), - atop(avail_start), atop(hole_start)); + atop(avail_start), atop(hole_start), first16q); + if (first16q == VM_FREELIST_FIRST16) { + uvm_page_physload(atop(hole_end), atop(16 * 1024 * 1024), + atop(hole_end), atop(16 * 1024 * 1024), first16q); + uvm_page_physload(atop(16 * 1024 * 1024), atop(avail_end), + atop(16 * 1024 * 1024), atop(avail_end), + VM_FREELIST_DEFAULT); + } else { uvm_page_physload(atop(hole_end), atop(avail_end), - atop(hole_end), atop(avail_end)); + atop(hole_end), atop(avail_end), first16q); + } #else if (avail_start < hole_start) vm_page_physload(atop(avail_start), atop(hole_start), atop(avail_start), atop(hole_start)); - vm_page_physload(atop(hole_end), atop(avail_end), - atop(hole_end), atop(avail_end)); + vm_page_physload(atop(hole_end), atop(avail_end), + atop(hole_end), atop(avail_end)); #endif #endif pmap_update(); @@ -438,7 +463,12 @@ pmap_alloc_pv() int i; if (pv_nfree == 0) { +#if defined(UVM) + /* NOTE: can't lock kernel_map here */ + MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK); +#else pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG); +#endif if (pvp == 0) panic("pmap_alloc_pv: kmem_alloc() failed"); pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; @@ -482,7 +512,11 @@ pmap_free_pv(pv) case NPVPPG: pv_nfree -= NPVPPG - 1; TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); +#if defined(UVM) + FREE((vaddr_t) pvp, M_VMPVENT); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif break; } } @@ -552,7 +586,11 @@ pmap_collect_pv() for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { npvp = pvp->pvp_pgi.pgi_list.tqe_next; +#if defined(UVM) + FREE((vaddr_t) pvp, M_VMPVENT); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif } } @@ -731,8 +769,16 @@ pmap_pinit(pmap) * No need to allocate page table space yet but we do need a * valid page directory table. */ +#if defined(UVM) + pmap->pm_pdir = (pd_entry_t *) uvm_km_zalloc(kernel_map, NBPG); +#else pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG); +#endif +#ifdef DIAGNOSTIC + if (pmap->pm_pdir == NULL) + panic("pmap_pinit: alloc failed"); +#endif /* wire in kernel global address entries */ bcopy(&PTD[KPTDI], &pmap->pm_pdir[KPTDI], MAXKPDE * sizeof(pd_entry_t)); @@ -794,7 +840,11 @@ pmap_release(pmap) panic("pmap_release count"); #endif +#if defined(UVM) + uvm_km_free(kernel_map, (vaddr_t)pmap->pm_pdir, NBPG); +#else kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG); +#endif } /* @@ -1251,10 +1301,18 @@ pmap_enter(pmap, va, pa, prot, wired) vmap = &curproc->p_vmspace->vm_map; v = trunc_page(vtopte(va)); printf("faulting in a pt page map %x va %x\n", vmap, v); +#if defined(UVM) + rv = uvm_fault(vmap, v, 0, VM_PROT_READ|VM_PROT_WRITE); +#else rv = vm_fault(vmap, v, VM_PROT_READ|VM_PROT_WRITE, FALSE); +#endif if (rv != KERN_SUCCESS) panic("ptdi2 %x", pmap->pm_pdir[PTDPTDI]); +#if defined(UVM) + uvm_map_pageable(vmap, v, round_page(v+1), FALSE); +#else vm_map_pageable(vmap, v, round_page(v+1), FALSE); +#endif pte = pmap_pte(pmap, va); if (!pte) panic("ptdi3 %x", pmap->pm_pdir[PTDPTDI]); @@ -1896,10 +1954,15 @@ pmap_changebit(pa, setbits, maskbits) */ if ((PG_RO && setbits == PG_RO) || (PG_RW && maskbits == ~PG_RW)) { +#if defined(UVM) + if (va >= uvm.pager_sva && va < uvm.pager_eva) + continue; +#else extern vm_offset_t pager_sva, pager_eva; if (va >= pager_sva && va < pager_eva) continue; +#endif } pte = pmap_pte(pv->pv_pmap, va); @@ -1921,7 +1984,11 @@ pmap_prefault(map, v, l) for (pv = v; pv < v + l ; pv += ~PD_MASK + 1) { if (!pmap_pde_v(pmap_pde(map->pmap, pv))) { pv2 = trunc_page(vtopte(pv)); +#if defined(UVM) + uvm_fault(map, pv2, 0, VM_PROT_READ); +#else vm_fault(map, pv2, VM_PROT_READ, FALSE); +#endif } pv &= PD_MASK; } diff --git a/sys/arch/i386/i386/pmap.old.c b/sys/arch/i386/i386/pmap.old.c index a71b15f3fc0..6e7cb48a249 100644 --- a/sys/arch/i386/i386/pmap.old.c +++ b/sys/arch/i386/i386/pmap.old.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.old.c,v 1.26 1999/02/26 10:26:57 art Exp $ */ +/* $OpenBSD: pmap.old.c,v 1.27 1999/02/26 10:37:51 art Exp $ */ /* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */ /* @@ -89,6 +89,10 @@ #include <vm/vm_kern.h> #include <vm/vm_page.h> +#if defined(UVM) +#include <uvm/uvm.h> +#endif + #include <machine/cpu.h> #include <dev/isa/isareg.h> @@ -230,6 +234,17 @@ pmap_bootstrap(virtual_start) vm_offset_t va; pt_entry_t *pte; #endif +#if defined(UVM) + int first16q; +#endif + + /* Register the page size with the vm system */ +#if defined(UVM) + uvm_setpagesize(); +#else + vm_set_page_size(); +#endif + /* XXX: allow for msgbuf */ avail_end -= i386_round_page(sizeof(struct msgbuf)); @@ -290,9 +305,6 @@ pmap_bootstrap(virtual_start) */ virtual_avail = reserve_dumppages(virtual_avail); - /* Register the page size with the vm system */ - vm_set_page_size(); - /* flawed, no mappings?? */ if (ctob(physmem) > 31*1024*1024 && MAXKPDE != NKPDE) { vm_offset_t p; @@ -313,17 +325,30 @@ pmap_bootstrap(virtual_start) * [i.e. here] */ #if defined(UVM) + if (avail_end < (16 * 1024 * 1024)) + first16q = VM_FREELIST_DEFAULT; + else + first16q = VM_FREELIST_FIRST16; + if (avail_start < hole_start) uvm_page_physload(atop(avail_start), atop(hole_start), - atop(avail_start), atop(hole_start)); + atop(avail_start), atop(hole_start), first16q); + if (first16q == VM_FREELIST_FIRST16) { + uvm_page_physload(atop(hole_end), atop(16 * 1024 * 1024), + atop(hole_end), atop(16 * 1024 * 1024), first16q); + uvm_page_physload(atop(16 * 1024 * 1024), atop(avail_end), + atop(16 * 1024 * 1024), atop(avail_end), + VM_FREELIST_DEFAULT); + } else { uvm_page_physload(atop(hole_end), atop(avail_end), - atop(hole_end), atop(avail_end)); + atop(hole_end), atop(avail_end), first16q); + } #else if (avail_start < hole_start) vm_page_physload(atop(avail_start), atop(hole_start), atop(avail_start), atop(hole_start)); - vm_page_physload(atop(hole_end), atop(avail_end), - atop(hole_end), atop(avail_end)); + vm_page_physload(atop(hole_end), atop(avail_end), + atop(hole_end), atop(avail_end)); #endif #endif pmap_update(); @@ -438,7 +463,12 @@ pmap_alloc_pv() int i; if (pv_nfree == 0) { +#if defined(UVM) + /* NOTE: can't lock kernel_map here */ + MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK); +#else pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG); +#endif if (pvp == 0) panic("pmap_alloc_pv: kmem_alloc() failed"); pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; @@ -482,7 +512,11 @@ pmap_free_pv(pv) case NPVPPG: pv_nfree -= NPVPPG - 1; TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); +#if defined(UVM) + FREE((vaddr_t) pvp, M_VMPVENT); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif break; } } @@ -552,7 +586,11 @@ pmap_collect_pv() for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { npvp = pvp->pvp_pgi.pgi_list.tqe_next; +#if defined(UVM) + FREE((vaddr_t) pvp, M_VMPVENT); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif } } @@ -731,8 +769,16 @@ pmap_pinit(pmap) * No need to allocate page table space yet but we do need a * valid page directory table. */ +#if defined(UVM) + pmap->pm_pdir = (pd_entry_t *) uvm_km_zalloc(kernel_map, NBPG); +#else pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, NBPG); +#endif +#ifdef DIAGNOSTIC + if (pmap->pm_pdir == NULL) + panic("pmap_pinit: alloc failed"); +#endif /* wire in kernel global address entries */ bcopy(&PTD[KPTDI], &pmap->pm_pdir[KPTDI], MAXKPDE * sizeof(pd_entry_t)); @@ -794,7 +840,11 @@ pmap_release(pmap) panic("pmap_release count"); #endif +#if defined(UVM) + uvm_km_free(kernel_map, (vaddr_t)pmap->pm_pdir, NBPG); +#else kmem_free(kernel_map, (vm_offset_t)pmap->pm_pdir, NBPG); +#endif } /* @@ -1251,10 +1301,18 @@ pmap_enter(pmap, va, pa, prot, wired) vmap = &curproc->p_vmspace->vm_map; v = trunc_page(vtopte(va)); printf("faulting in a pt page map %x va %x\n", vmap, v); +#if defined(UVM) + rv = uvm_fault(vmap, v, 0, VM_PROT_READ|VM_PROT_WRITE); +#else rv = vm_fault(vmap, v, VM_PROT_READ|VM_PROT_WRITE, FALSE); +#endif if (rv != KERN_SUCCESS) panic("ptdi2 %x", pmap->pm_pdir[PTDPTDI]); +#if defined(UVM) + uvm_map_pageable(vmap, v, round_page(v+1), FALSE); +#else vm_map_pageable(vmap, v, round_page(v+1), FALSE); +#endif pte = pmap_pte(pmap, va); if (!pte) panic("ptdi3 %x", pmap->pm_pdir[PTDPTDI]); @@ -1896,10 +1954,15 @@ pmap_changebit(pa, setbits, maskbits) */ if ((PG_RO && setbits == PG_RO) || (PG_RW && maskbits == ~PG_RW)) { +#if defined(UVM) + if (va >= uvm.pager_sva && va < uvm.pager_eva) + continue; +#else extern vm_offset_t pager_sva, pager_eva; if (va >= pager_sva && va < pager_eva) continue; +#endif } pte = pmap_pte(pv->pv_pmap, va); @@ -1921,7 +1984,11 @@ pmap_prefault(map, v, l) for (pv = v; pv < v + l ; pv += ~PD_MASK + 1) { if (!pmap_pde_v(pmap_pde(map->pmap, pv))) { pv2 = trunc_page(vtopte(pv)); +#if defined(UVM) + uvm_fault(map, pv2, 0, VM_PROT_READ); +#else vm_fault(map, pv2, VM_PROT_READ, FALSE); +#endif } pv &= PD_MASK; } diff --git a/sys/arch/i386/i386/vm_machdep.c b/sys/arch/i386/i386/vm_machdep.c index 24aedecdeb4..76ef0ac24f3 100644 --- a/sys/arch/i386/i386/vm_machdep.c +++ b/sys/arch/i386/i386/vm_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_machdep.c,v 1.15 1999/02/26 10:26:57 art Exp $ */ +/* $OpenBSD: vm_machdep.c,v 1.16 1999/02/26 10:37:51 art Exp $ */ /* $NetBSD: vm_machdep.c,v 1.61 1996/05/03 19:42:35 christos Exp $ */ /*- @@ -60,6 +60,10 @@ #include <vm/vm.h> #include <vm/vm_kern.h> +#if defined(UVM) +#include <uvm/uvm_extern.h> +#endif + #include <machine/cpu.h> #include <machine/gdt.h> #include <machine/reg.h> @@ -128,7 +132,11 @@ cpu_fork(p1, p2) union descriptor *new_ldt; len = pcb->pcb_ldt_len * sizeof(union descriptor); +#if defined(UVM) + new_ldt = (union descriptor *)uvm_km_alloc(kernel_map, len); +#else new_ldt = (union descriptor *)kmem_alloc(kernel_map, len); +#endif bcopy(pcb->pcb_ldt, new_ldt, len); pcb->pcb_ldt = new_ldt; ldt_alloc(pcb, new_ldt, len); @@ -207,10 +215,16 @@ cpu_exit(p) #endif vm = p->p_vmspace; +#if !defined(UVM) if (vm->vm_refcnt == 1) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); +#endif +#if defined(UVM) + uvmexp.swtch++; +#else cnt.v_swtch++; +#endif switch_exit(p); } @@ -362,7 +376,11 @@ vmapbuf(bp, len) faddr = trunc_page(bp->b_saveaddr = bp->b_data); off = (vm_offset_t)bp->b_data - faddr; len = round_page(off + len); +#if defined(UVM) + taddr= uvm_km_valloc_wait(phys_map, len); +#else taddr = kmem_alloc_wait(phys_map, len); +#endif bp->b_data = (caddr_t)(taddr + off); /* * The region is locked, so we expect that pmap_pte() will return @@ -392,7 +410,11 @@ vunmapbuf(bp, len) addr = trunc_page(bp->b_data); off = (vm_offset_t)bp->b_data - addr; len = round_page(off + len); +#if defined(UVM) + uvm_km_free_wakeup(phys_map, addr, len); +#else kmem_free_wakeup(phys_map, addr, len); +#endif bp->b_data = bp->b_saveaddr; bp->b_saveaddr = 0; } |