summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amiga/amiga/pmap.c168
1 files changed, 165 insertions, 3 deletions
diff --git a/sys/arch/amiga/amiga/pmap.c b/sys/arch/amiga/amiga/pmap.c
index 45777618543..524480c732b 100644
--- a/sys/arch/amiga/amiga/pmap.c
+++ b/sys/arch/amiga/amiga/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.19 2000/05/27 20:14:18 art Exp $ */
+/* $OpenBSD: pmap.c,v 1.20 2000/05/27 21:17:59 art Exp $ */
/* $NetBSD: pmap.c,v 1.39 1997/06/10 18:26:41 veego Exp $ */
/*
@@ -85,6 +85,9 @@
#include <sys/user.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
+#if defined(UVM)
+#include <uvm/uvm.h>
+#endif
#include <machine/pte.h>
#include <machine/cpu.h>
#include <machine/vmparam.h>
@@ -235,7 +238,10 @@ u_int *Segtabzero, *Segtabzeropa;
vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
struct pmap kernel_pmap_store;
-vm_map_t pt_map;
+vm_map_t st_map, pt_map;
+#if defined(UVM)
+struct vm_map st_map_store, pt_map_store;
+#endif
vm_size_t mem_size; /* memory size in bytes */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
@@ -325,8 +331,13 @@ pmap_bootstrap(firstaddr, loadaddr)
* first segment of memory is always the one loadbsd found
* for loading the kernel into.
*/
+#if defined(UVM)
+ uvm_page_physload(atop(fromads), atop(toads),
+ atop(fromads), atop(toads), VM_FREELIST_DEFAULT);
+#else
vm_page_physload(atop(fromads), atop(toads),
atop(fromads), atop(toads));
+#endif
sp = memlist->m_seg;
esp = sp + memlist->m_nseg;
@@ -366,8 +377,14 @@ pmap_bootstrap(firstaddr, loadaddr)
if ((fromads <= z2mem_start) && (toads > z2mem_start))
toads = z2mem_start;
+#if defined(UVM)
+ vm_page_physload(atop(fromads), atop(toads),
+ atop(fromads), atop(toads), (fromads & 0xff000000) ?
+ VM_FREELIST_DEFAULT : VM_FREELIST_ZORROII);
+#else
vm_page_physload(atop(fromads), atop(toads),
atop(fromads), atop(toads));
+#endif
physmem += (toads - fromads) / NBPG;
++i;
if (noncontig_enable == 1)
@@ -448,6 +465,30 @@ pmap_init()
* unavailable regions which we have mapped in locore.
* XXX in pmap_boostrap() ???
*/
+#if defined(UVM)
+ addr = (vm_offset_t) amigahwaddr;
+ if (uvm_map(kernel_map, &addr,
+ ptoa(namigahwpg),
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)) != KERN_SUCCESS)
+ goto bogons;
+ addr = (vm_offset_t) Sysmap;
+ if (uvm_map(kernel_map, &addr, AMIGA_KPTSIZE,
+ NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
+ UVM_INH_NONE, UVM_ADV_RANDOM,
+ UVM_FLAG_FIXED)) != KERN_SUCCESS) {
+ /*
+ * If this fails, it is probably because the static
+ * portion of the kernel page table isn't big enough
+ * and we overran the page table map.
+ */
+bogons:
+ panic("pmap_init: bogons in the VM system!\n");
+ }
+#else
addr = amigahwaddr;
(void)vm_map_find(kernel_map, NULL, 0, &addr, ptoa(namigahwpg), FALSE);
if (addr != amigahwaddr)
@@ -464,6 +505,7 @@ pmap_init()
*/
if (addr != (vm_offset_t)Sysmap)
panic("pmap_init: bogons in the VM system!");
+#endif
#ifdef DEBUG
if (pmapdebug & PDB_INIT) {
printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
@@ -493,7 +535,11 @@ pmap_init()
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
+#if defined(UVM)
+ addr = (vaddr_t)uvm_km_zalloc(kernel_map, s);
+#else
addr = (vm_offset_t)kmem_alloc(kernel_map, s);
+#endif
Segtabzero = (u_int *)addr;
Segtabzeropa = (u_int *)pmap_extract(pmap_kernel(), addr);
@@ -546,17 +592,34 @@ pmap_init()
* Verify that space will be allocated in region for which
* we already have kernel PT pages.
*/
+#if defined(UVM)
+ addr = 0;
+ rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
+ UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
+ if (rv != KERN_SUCCESS || (addr + s) >= (vm_offset_t)Sysmap)
+ panic("pmap_init: kernel PT too small");
+ rv = uvm_unmap(kernel_map, addr, addr + s, FALSE);
+ if (rv != KERN_SUCCESS)
+ panic("pmap_init: uvm_unmap failed");
+#else
addr = 0;
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
panic("pmap_init: kernel PT too small");
vm_map_remove(kernel_map, addr, addr + s);
-
+#endif
/*
* Now allocate the space and link the pages together to
* form the KPT free list.
*/
+#if defined(UVM)
+ addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
+ if (addr == 0)
+ panic("pmap_init: cannot allocate KPT free list");
+#else
addr = (vm_offset_t)kmem_alloc(kernel_map, s);
+#endif
s = ptoa(npages);
addr2 = addr + s;
kpt_pages = &((struct kpt_page *)addr2)[npages];
@@ -576,6 +639,29 @@ pmap_init()
addr, addr + s);
#endif
+#if defined(UVM)
+ /*
+ * Allocate the segment table map and the page table map.
+ */
+ s = maxproc * AMIGA_STSIZE;
+ st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
+ FALSE, &st_map_store);
+
+ addr = AMIGA_UPTBASE;
+ if ((AMIGA_UPTMAXSIZE / AMIGA_MAX_PTSIZE) < maxproc) {
+ s = AMIGA_UPTMAXSIZE;
+ /*
+ * XXX We don't want to hang when we run out of
+ * page tables, so we lower maxproc so that fork()
+ * will fail instead. Note that root could still raise
+ * this value via sysctl(2).
+ */
+ maxproc = (AMIGA_UPTMAXSIZE / AMIGA_MAX_PTSIZE);
+ } else
+ s = (maxproc * AMIGA_MAX_PTSIZE);
+ pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
+ TRUE, &pt_map_store);
+#else
/*
* Slightly modified version of kmem_suballoc() to get page table
* map where we want it.
@@ -598,6 +684,7 @@ pmap_init()
if (pmapdebug & PDB_INIT)
printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2);
#endif
+#endif /* UVM */
#if defined(M68040) || defined(M68060)
if (mmutype == MMU_68040) {
@@ -645,9 +732,15 @@ pmap_alloc_pv()
int i;
if (pv_nfree == 0) {
+#if defined(UVM)
+ pvp = (struct pv_page *)kmem_km_zalloc(kernel_map, NBPG);
+ if (pvp == 0)
+ panic("pmap_alloc_pv: uvm_km_zalloc() failed");
+#else
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
if (pvp == 0)
panic("pmap_alloc_pv: kmem_alloc() failed");
+#endif
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
for (i = NPVPPG - 2; i; i--, pv++)
pv->pv_next = pv + 1;
@@ -689,7 +782,11 @@ pmap_free_pv(pv)
case NPVPPG:
pv_nfree -= NPVPPG - 1;
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
+#if defined(UVM)
+ uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#endif
break;
}
}
@@ -751,7 +848,11 @@ pmap_collect_pv()
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
+#if defined(UVM)
+ uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#else
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
+#endif
}
}
#endif
@@ -899,12 +1000,21 @@ pmap_release(pmap)
if (pmap->pm_count != 1)
panic("pmap_release count");
#endif
+#if defined(UVM)
+ if (pmap->pm_ptab)
+ uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
+ AMIGA_UPTSIZE);
+ if (pmap->pm_stab != Segtabzero)
+ uvm_km_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab,
+ AMIGA_STSIZE);
+#else
if (pmap->pm_ptab)
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
AMIGA_UPTSIZE);
if (pmap->pm_stab != Segtabzero)
kmem_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab,
AMIGA_STSIZE);
+#endif
}
/*
@@ -1008,8 +1118,13 @@ pmap_remove(pmap, sva, eva)
*/
if (pmap != pmap_kernel()) {
pte = pmap_pte(pmap, va);
+#if defined(UVM)
+ uvm_map_pageable(pt_map, trunc_page(pte),
+ round_page(pte+1), TRUE);
+#else
vm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), TRUE);
+#endif
#ifdef DEBUG
if (pmapdebug & PDB_WIRING)
pmap_check_wiring("remove", trunc_page(pte));
@@ -1128,9 +1243,15 @@ pmap_remove(pmap, sva, eva)
printf("remove: free stab %p\n",
ptpmap->pm_stab);
#endif
+#if defined(UVM)
+ uvm_km_free_wakeup(kernel_map,
+ (vm_offset_t)ptpmap->pm_stab,
+ AMIGA_STSIZE);
+#else
kmem_free_wakeup(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
AMIGA_STSIZE);
+#endif
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040) || defined(M68060)
@@ -1352,8 +1473,13 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
+#if defined(UVM)
+ pmap->pm_ptab = (pt_entry_t *)
+ uvm_km_valloc_wait(pt_map, AMIGA_UPTSIZE);
+#else
pmap->pm_ptab = (u_int *)
kmem_alloc_wait(pt_map, AMIGA_UPTSIZE);
+#endif
/*
* Segment table entry not valid, we need a new PT page
@@ -1425,8 +1551,13 @@ pmap_enter(pmap, va, pa, prot, wired, access_type)
* is a valid mapping in the page.
*/
if (pmap != pmap_kernel())
+#if defined(UVM)
+ uvm_map_pageable(pt_map, trunc_page(pte),
+ round_page(pte+1), FALSE);
+#else
vm_map_pageable(pt_map, trunc_page(pte),
round_page(pte+1), FALSE);
+#endif
/*
* Enter on the PV list if part of our managed memory
@@ -2192,10 +2323,15 @@ pmap_changebit(pa, bit, setem)
* XXX don't write protect pager mappings
*/
if (bit == PG_RO) {
+#if defined(UVM)
+ if (va >= uvm.pager_sva && va < uvm.pager_eva)
+ continue;
+#else
extern vm_offset_t pager_sva, pager_eva;
if (va >= pager_sva && va < pager_eva)
continue;
+#endif
}
pte = (int *)pmap_pte(pv->pv_pmap, va);
@@ -2252,8 +2388,13 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
+#if defined(UVM)
+ pmap->pm_stab = (st_entry_t *)
+ uvm_km_zalloc(kernel_map, AMIGA_STSIZE);
+#else
pmap->pm_stab = (u_int *)
kmem_alloc(kernel_map, AMIGA_STSIZE);
+#endif
pmap->pm_stpa = (u_int *)pmap_extract(
pmap_kernel(), (vm_offset_t)pmap->pm_stab);
#if defined(M68040) || defined(M68060)
@@ -2403,13 +2544,27 @@ pmap_enter_ptpage(pmap, va)
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter_pt: about to fault UPT pg at %lx\n", va);
#endif
+#if defined(UVM)
+ if (uvm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
+ != KERN_SUCCESS)
+ panic("pmap_enter: uvm_fault failed");
+#else
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
panic("pmap_enter: vm_fault failed");
+#endif
ptpa = pmap_extract(pmap_kernel(), va);
+ /*
+ * Mark the page clean now to avoid its pageout (and
+ * hence creation of a pager) between now and when it
+ * is wired; i.e. while it is on a paging queue.
+ */
+ PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
+#if !defined(UVM)
#ifdef DEBUG
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
#endif
+#endif
}
#ifdef M68060
@@ -2513,10 +2668,17 @@ pmap_check_wiring(str, va)
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
return;
+#if defined(UVM)
+ if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
+ printf("wired_check: entry for %lx not found\n", va);
+ return;
+ }
+#else
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
printf("wired_check: entry for %lx not found\n", va);
return;
}
+#endif
count = 0;
for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
if (*pte)