diff options
author | Artur Grabowski <art@cvs.openbsd.org> | 2001-03-16 09:06:04 +0000 |
---|---|---|
committer | Artur Grabowski <art@cvs.openbsd.org> | 2001-03-16 09:06:04 +0000 |
commit | 95f138edd63968725ea7388add15c9e46089832a (patch) | |
tree | 2072b0da57e608919cf5217ee6ef971d1a2ae6fb /sys/arch | |
parent | a62b60c2323478cd3f35a8fd5e92aec617c74774 (diff) |
Implement pmap_growkernel. From NetBSD.
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/alpha/alpha/pmap.c | 116 | ||||
-rw-r--r-- | sys/arch/alpha/include/pmap.h | 3 |
2 files changed, 113 insertions, 6 deletions
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c index b9a8aeec24a..4820d12bca1 100644 --- a/sys/arch/alpha/alpha/pmap.c +++ b/sys/arch/alpha/alpha/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.9 2001/03/04 13:37:44 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.10 2001/03/16 09:06:03 art Exp $ */ /* $NetBSD: pmap.c,v 1.132 2000/05/23 05:12:54 thorpej Exp $ */ /*- @@ -379,6 +379,9 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */ * all pmaps. Note that a pm_slock must never be held while this * lock is held. * + * * pmap_growkernel_slock - This lock protects pmap_growkernel() + * and the virtual_end variable. + * * Address space number management (global ASN counters and per-pmap * ASN state) are not locked; they use arrays of values indexed * per-processor. @@ -389,6 +392,7 @@ u_long pmap_asn_generation[ALPHA_MAXPROCS]; /* current ASN generation */ */ struct lock pmap_main_lock; struct simplelock pmap_all_pmaps_slock; +struct simplelock pmap_growkernel_slock; #ifdef __OpenBSD__ #define spinlockinit(lock, name, flags) lockinit(lock, 0, name, 0, flags) @@ -876,6 +880,9 @@ pmap_bootstrap(ptaddr, maxasn, ncpuids) (i*PAGE_SIZE*NPTEPG))] = pte; } + /* Initialize the pmap_growkernel_slock. */ + simple_lock_init(&pmap_growkernel_slock); + /* * Set up level three page table (lev3map) */ @@ -889,7 +896,7 @@ pmap_bootstrap(ptaddr, maxasn, ncpuids) avail_start = ptoa(vm_physmem[0].start); avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end); virtual_avail = VM_MIN_KERNEL_ADDRESS; - virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * NBPG; + virtual_end = VM_MIN_KERNEL_ADDRESS + lev3mapsize * PAGE_SIZE; #if 0 printf("avail_start = 0x%lx\n", avail_start); @@ -1088,7 +1095,7 @@ pmap_steal_memory(size, vstartp, vendp) if (vstartp) *vstartp = round_page(virtual_avail); if (vendp) - *vendp = trunc_page(virtual_end); + *vendp = VM_MAX_KERNEL_ADDRESS; va = ALPHA_PHYS_TO_K0SEG(pa); bzero((caddr_t)va, size); @@ -3390,9 +3397,108 @@ pmap_physpage_delref(kva) * * Grow the kernel address space. This is a hint from the * upper layer to pre-allocate more kernel PT pages. - * - * XXX Implement XXX */ +vaddr_t +pmap_growkernel(vaddr_t maxkvaddr) +{ + struct pmap *kpm = pmap_kernel(), *pm; + paddr_t ptaddr; + pt_entry_t *l1pte, *l2pte, pte; + vaddr_t va; + int s, l1idx; + + if (maxkvaddr <= virtual_end) + goto out; /* we are OK */ + + s = splhigh(); /* to be safe */ + simple_lock(&pmap_growkernel_slock); + + va = virtual_end; + + while (va < maxkvaddr) { + /* + * If there is no valid L1 PTE (i.e. no L2 PT page), + * allocate a new L2 PT page and insert it into the + * L1 map. + */ + l1pte = pmap_l1pte(kpm, va); + if (pmap_pte_v(l1pte) == 0) { + /* + * XXX PGU_NORMAL? It's not a "traditional" PT page. + */ +#ifdef notyet + if (uvm.page_init_done == FALSE) { +#else + if (vm_physmem[0].pgs == NULL) { +#endif + /* + * We're growing the kernel pmap early (from + * uvm_pageboot_alloc()). This case must + * be handled a little differently. + */ + ptaddr = ALPHA_K0SEG_TO_PHYS( + pmap_steal_memory(PAGE_SIZE, NULL, NULL)); + } else if (pmap_physpage_alloc(PGU_NORMAL, + &ptaddr) == FALSE) + goto die; + pte = (atop(ptaddr) << PG_SHIFT) | + PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; + *l1pte = pte; + + l1idx = l1pte_index(va); + + /* Update all the user pmaps. */ + simple_lock(&pmap_all_pmaps_slock); + for (pm = TAILQ_FIRST(&pmap_all_pmaps); + pm != NULL; pm = TAILQ_NEXT(pm, pm_list)) { + /* Skip the kernel pmap. */ + if (pm == pmap_kernel()) + continue; + + PMAP_LOCK(pm); + if (pm->pm_lev1map == kernel_lev1map) { + PMAP_UNLOCK(pm); + continue; + } + pm->pm_lev1map[l1idx] = pte; + PMAP_UNLOCK(pm); + } + simple_unlock(&pmap_all_pmaps_slock); + } + + /* + * Have an L2 PT page now, add the L3 PT page. + */ + l2pte = pmap_l2pte(kpm, va, l1pte); + KASSERT(pmap_pte_v(l2pte) == 0); +#ifdef notyet + if (uvm.page_init_done == FALSE) { +#else + if (vm_physmem[0].pgs == NULL) { +#endif + /* + * See above. + */ + ptaddr = ALPHA_K0SEG_TO_PHYS( + pmap_steal_memory(PAGE_SIZE, NULL, NULL)); + } else if (pmap_physpage_alloc(PGU_NORMAL, &ptaddr) == FALSE) + goto die; + *l2pte = (atop(ptaddr) << PG_SHIFT) | + PG_V | PG_ASM | PG_KRE | PG_KWE | PG_WIRED; + va += ALPHA_L2SEG_SIZE; + } + + virtual_end = va; + + simple_unlock(&pmap_growkernel_slock); + splx(s); + + out: + return (virtual_end); + + die: + panic("pmap_growkernel: out of memory"); +} /* * pmap_lev1map_create: diff --git a/sys/arch/alpha/include/pmap.h b/sys/arch/alpha/include/pmap.h index ad9aedf06b2..007931c4723 100644 --- a/sys/arch/alpha/include/pmap.h +++ b/sys/arch/alpha/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.6 2000/11/08 21:27:21 ericj Exp $ */ +/* $OpenBSD: pmap.h,v 1.7 2001/03/16 09:06:02 art Exp $ */ /* $NetBSD: pmap.h,v 1.35 2000/06/08 03:10:06 thorpej Exp $ */ /*- @@ -192,6 +192,7 @@ void pmap_do_tlb_shootdown(void); extern pt_entry_t *VPT; /* Virtual Page Table */ #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */ +#define PMAP_GROWKERNEL /* enable pmap_growkernel() */ /* * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. |