summaryrefslogtreecommitdiff
path: root/sys/arch
diff options
context:
space:
mode:
authorMichael Shalayeff <mickey@cvs.openbsd.org>2002-07-18 04:35:04 +0000
committerMichael Shalayeff <mickey@cvs.openbsd.org>2002-07-18 04:35:04 +0000
commit4ecc3f854ec00bbfaf1815d308f11fca93183e1e (patch)
tree5d141a9312c807234e33b9be02490d7cb73c48f9 /sys/arch
parenta12af22dd53de0816c7e2b0d0a2932e41984d69c (diff)
make reserved pages for the pv_pool variable on physmem (should pool get preload in the future this is to be changed immidiately). map the page above the kernel stack unmapped
Diffstat (limited to 'sys/arch')
-rw-r--r--sys/arch/hppa/hppa/locore.S4
-rw-r--r--sys/arch/hppa/hppa/pmap.c37
2 files changed, 27 insertions, 14 deletions
diff --git a/sys/arch/hppa/hppa/locore.S b/sys/arch/hppa/hppa/locore.S
index bb5754c6f28..831af05ee89 100644
--- a/sys/arch/hppa/hppa/locore.S
+++ b/sys/arch/hppa/hppa/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.70 2002/06/09 02:50:29 mickey Exp $ */
+/* $OpenBSD: locore.S,v 1.71 2002/07/18 04:35:03 mickey Exp $ */
/*
* Copyright (c) 1998-2002 Michael Shalayeff
@@ -199,7 +199,7 @@ $start_zero_tf
stw r0, U_PCB+PCB_ONFAULT(arg3)
stw r0, U_PCB+PCB_SPACE(arg3) /* XXX HPPA_SID_KERNEL == 0 */
stw arg3, U_PCB+PCB_UVA(arg3)
- ldil L%USPACE, arg0
+ ldil L%(USPACE+NBPG), arg0 /* normal U plus red zone */
add arg0, arg3, arg0
ldil L%proc0paddr, t1
stw arg3, R%proc0paddr(t1)
diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c
index 1ba51e301a2..a5cb8594a37 100644
--- a/sys/arch/hppa/hppa/pmap.c
+++ b/sys/arch/hppa/hppa/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.77 2002/07/17 22:08:07 mickey Exp $ */
+/* $OpenBSD: pmap.c,v 1.78 2002/07/18 04:35:03 mickey Exp $ */
/*
* Copyright (c) 1998-2002 Michael Shalayeff
@@ -89,7 +89,7 @@ int pmapdebug = 0
vaddr_t virtual_avail;
paddr_t physical_steal, physical_end;
-vaddr_t pmap_pv_page;
+vaddr_t pmap_pv_page, pmap_pv_page_end;
#if defined(HP7100LC_CPU) || defined(HP7300LC_CPU)
int pmap_hptsize = 256; /* patchable */
@@ -200,13 +200,18 @@ pmap_pde_alloc(struct pmap *pm, vaddr_t va, struct vm_page **pdep)
/* special hacking for pre-mapping the kernel */
if (!pmap_initialized) {
+ register u_int32_t sm;
+
if (physical_steal >= physical_end)
panic("pmap_pde_alloc: out of steallage");
pa = physical_steal;
physical_steal += PAGE_SIZE;
- bzero((void *)pa, PAGE_SIZE);
- fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
+ rsm(0, sm);
+ if (sm & PSW_D)
+ pmap_zero_page(pa);
+ else
+ bzero((void *)pa, PAGE_SIZE);
pmap_pde_set(pm, va, pa);
pm->pm_stats.resident_count++; /* count PTP as resident */
@@ -292,7 +297,7 @@ pmap_pte_set(pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
if (!pde)
panic("pmap_pte_set: zero pde");
- if (pte && pte < physical_end &&
+ if (pte && pmap_initialized && pte < physical_end &&
hppa_trunc_page(pte) != (paddr_t)&gateway_page)
panic("pmap_pte_set: invalid pte");
@@ -528,7 +533,8 @@ pmap_bootstrap(vstart)
addr = hppa_round_page(addr);
pmap_pv_page = addr;
- addr += PAGE_SIZE;
+ pmap_pv_page_end =
+ addr += (totalphysmem / (16 * 1024)) * PAGE_SIZE;
size = hppa_round_page(sizeof(struct pv_head) * totalphysmem);
bzero ((caddr_t)addr, size);
virtual_avail = addr + size;
@@ -542,12 +548,19 @@ pmap_bootstrap(vstart)
vm_physmem[0].pmseg.pvhead = (struct pv_head *)addr;
addr += size;
+ /* map .text to avoid reloading the btlb on heavy faults */
+ for (va = 0; va < (vaddr_t)&etext1; va += PAGE_SIZE)
+ pmap_kenter_pa(va, va, UVM_PROT_RX);
/* now we know how much to map */
- for (va = (vaddr_t)&etext1; va < addr; va += PAGE_SIZE)
- pmap_kenter_pa(va, va, UVM_PROT_RW);
+ for (; va < addr; va += PAGE_SIZE) {
+ extern struct user *proc0paddr;
+
+ pmap_kenter_pa(va, va,
+ (va == (vaddr_t)proc0paddr + USPACE)?
+ UVM_PROT_NONE : UVM_PROT_RW);
+ }
- DPRINTF(PDB_INIT, ("bootstrap: mapped %p - 0x%x\n",
- &etext1, addr));
+ DPRINTF(PDB_INIT, ("bootstrap: mapped %p - 0x%x\n", &etext1, addr));
}
void
@@ -1264,9 +1277,9 @@ pmap_pv_page_alloc(struct pool *pp, int flags)
DPRINTF(PDB_FOLLOW|PDB_POOL,
("pmap_pv_page_alloc(%p, %x)\n", pp, flags));
- if (pmap_pv_page) {
+ if (pmap_pv_page < pmap_pv_page_end) {
void *v = (void *)pmap_pv_page;
- pmap_pv_page = 0;
+ pmap_pv_page += PAGE_SIZE;
return (v);
}