summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMichael Shalayeff <mickey@cvs.openbsd.org>1998-04-25 20:31:38 +0000
committerMichael Shalayeff <mickey@cvs.openbsd.org>1998-04-25 20:31:38 +0000
commit54701e3e035b78e17d77c7002543a05ff7d84cfb (patch)
tree98df59b339290c3845d5b978c00dd10e7e2268f7 /sys
parente8dd2cc15e205b9739ac6b6b7c8f16e863727869 (diff)
convert i386 to MNN
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/i386/i386/machdep.c21
-rw-r--r--sys/arch/i386/i386/pmap.c244
-rw-r--r--sys/arch/i386/i386/pmap.old.c244
-rw-r--r--sys/arch/i386/include/pmap.h4
-rw-r--r--sys/arch/i386/include/pmap.old.h4
-rw-r--r--sys/arch/i386/include/vmparam.h31
6 files changed, 490 insertions, 58 deletions
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 8acec722ba6..9b5699e9c3e 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.84 1998/03/04 07:22:02 downsj Exp $ */
+/* $OpenBSD: machdep.c,v 1.85 1998/04/25 20:31:27 mickey Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -196,8 +196,10 @@ bootarg_t *bootargp;
vm_map_t buffer_map;
extern vm_offset_t avail_start, avail_end;
-static vm_offset_t hole_start, hole_end;
+vm_offset_t hole_start, hole_end;
+#if !defined(MACHINE_NEW_NONCONTIG)
static vm_offset_t avail_next;
+#endif
/*
* Extent maps to manage I/O and ISA memory hole space. Allocate
@@ -1638,8 +1640,10 @@ init386(first_avail)
* BIOS leaves data in low memory and VM system doesn't work with
* phys 0, /boot leaves arguments at page 1.
*/
- avail_next = avail_start =
- bootapiver >= 2 ? NBPG + i386_round_page(bootargc) : NBPG;
+#if !defined(MACHINE_NEW_NONCONTIG)
+ avail_next =
+#endif
+ avail_start = bootapiver >= 2 ? NBPG + i386_round_page(bootargc) : NBPG;
avail_end = extmem ? IOM_END + extmem * 1024
: cnvmem * 1024; /* just temporary use */
@@ -1694,6 +1698,13 @@ init386(first_avail)
/* call pmap initialization to make new kernel address space */
pmap_bootstrap((vm_offset_t)atdevbase + IOM_SIZE);
+#if !defined(MACHINE_NEW_NONCONTIG)
+ /*
+ * Initialize for pmap_free_pages and pmap_next_page
+ */
+ avail_next = avail_start;
+#endif
+
#ifdef DDB
ddb_init();
if (boothowto & RB_KDB)
@@ -1759,6 +1770,7 @@ cpu_exec_aout_makecmds(p, epp)
return ENOEXEC;
}
+#if !defined(MACHINE_NEW_NONCONTIG)
u_int
pmap_free_pages()
{
@@ -1797,6 +1809,7 @@ pmap_page_index(pa)
return i386_btop(pa - hole_end + hole_start - avail_start);
return -1;
}
+#endif
/*
* consinit:
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index a70c4141b38..0fee47d13a9 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.24 1998/03/20 15:40:32 niklas Exp $ */
+/* $OpenBSD: pmap.c,v 1.25 1998/04/25 20:31:30 mickey Exp $ */
/* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */
/*
@@ -176,10 +176,14 @@ vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+extern vm_offset_t hole_start, hole_end;
int npages;
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+#if !defined(MACHINE_NEW_NONCONTIG)
char *pmap_attributes; /* reference and modify bits */
+struct pv_entry *pv_table; /* array of entries, one per page */
+#endif
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
@@ -188,8 +192,8 @@ struct pv_entry * pmap_alloc_pv __P((void));
void pmap_free_pv __P((struct pv_entry *));
void i386_protection_init __P((void));
void pmap_collect_pv __P((void));
-__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, u_int));
-__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, u_int));
+__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
+__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
void pmap_deactivate __P((pmap_t, struct pcb *));
void pmap_remove_all __P((vm_offset_t));
void pads __P((pmap_t pm));
@@ -303,6 +307,26 @@ pmap_bootstrap(virtual_start)
PG_V | PG_KW;
}
+#if defined(MACHINE_NEW_NONCONTIG)
+ /*
+ * we must call vm_page_physload() after we are done playing
+ * with virtual_avail but before we call pmap_steal_memory.
+ * [i.e. here]
+ */
+#if defined(UVM)
+ if (avail_start < hole_start)
+ uvm_page_physload(atop(avail_start), atop(hole_start),
+ atop(avail_start), atop(hole_start));
+ uvm_page_physload(atop(hole_end), atop(avail_end),
+ atop(hole_end), atop(avail_end));
+#else
+ if (avail_start < hole_start)
+ vm_page_physload(atop(avail_start), atop(hole_start),
+ atop(avail_start), atop(hole_start));
+ vm_page_physload(atop(hole_end), atop(avail_end),
+ atop(hole_end), atop(avail_end));
+#endif
+#endif
pmap_update();
}
@@ -315,6 +339,62 @@ pmap_virtual_space(startp, endp)
*endp = virtual_end;
}
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init()
+{
+ vm_offset_t addr;
+ vm_size_t s;
+ int lcv;
+
+ if (PAGE_SIZE != NBPG)
+ panic("pmap_init: CLSIZE != 1");
+
+ npages = 0;
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
+ s = (vm_size_t) (sizeof(struct pv_entry) * npages + npages);
+ s = round_page(s);
+#if defined(UVM)
+ addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
+ if (addr == NULL)
+ panic("pmap_init");
+#else
+ addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+#endif
+
+ /* allocate pv_entry stuff first */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+ vm_physmem[lcv].pmseg.pvent = (struct pv_entry *) addr;
+ addr = (vm_offset_t)(vm_physmem[lcv].pmseg.pvent +
+ (vm_physmem[lcv].end - vm_physmem[lcv].start));
+ }
+ /* allocate attrs next */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+ vm_physmem[lcv].pmseg.attrs = (char *) addr;
+ addr = (vm_offset_t)(vm_physmem[lcv].pmseg.attrs +
+ (vm_physmem[lcv].end - vm_physmem[lcv].start));
+ }
+ TAILQ_INIT(&pv_page_freelist);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: %lx bytes (%x pgs)\n",
+ s, npages);
+#endif
+
+ /*
+ * Now it is safe to enable pv_entry recording.
+ */
+ pmap_initialized = TRUE;
+}
+
+#else /* MACHINE_NEW_NONCONTIG */
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
@@ -349,6 +429,7 @@ pmap_init()
*/
pmap_initialized = TRUE;
}
+#endif /* MACHINE_NEW_NONCONTIG */
struct pv_entry *
pmap_alloc_pv()
@@ -414,6 +495,9 @@ pmap_collect_pv()
struct pv_page *pvp, *npvp;
struct pv_entry *ph, *ppv, *pv, *npv;
int s;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#endif
TAILQ_INIT(&pv_page_collectlist);
@@ -432,7 +516,16 @@ pmap_collect_pv()
if (pv_page_collectlist.tqh_first == 0)
return;
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(0), &off)) == -1) {
+ printf("INVALID PA!");
+ return;
+ }
+
+ for (ph = &vm_physmem[bank].pmseg.pvent[off]; ph; ph = ph->pv_next) {
+#else
for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
+#endif
if (ph->pv_pmap == 0)
continue;
s = splimp();
@@ -465,12 +558,12 @@ pmap_collect_pv()
}
__inline void
-pmap_enter_pv(pmap, va, pind)
+pmap_enter_pv(pmap, va, pv)
register pmap_t pmap;
vm_offset_t va;
- u_int pind;
+ struct pv_entry *pv;
{
- register struct pv_entry *pv, *npv;
+ register struct pv_entry *npv;
int s;
if (!pmap_initialized)
@@ -481,8 +574,6 @@ pmap_enter_pv(pmap, va, pind)
printf("pmap_enter_pv: pv %x: %x/%x/%x\n",
pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
#endif
-
- pv = &pv_table[pind];
s = splimp();
if (pv->pv_pmap == NULL) {
@@ -519,19 +610,18 @@ pmap_enter_pv(pmap, va, pind)
}
__inline void
-pmap_remove_pv(pmap, va, pind)
+pmap_remove_pv(pmap, va, pv)
register pmap_t pmap;
vm_offset_t va;
- u_int pind;
+ struct pv_entry *pv;
{
- register struct pv_entry *pv, *npv;
+ register struct pv_entry *npv;
int s;
/*
* Remove from the PV table (raise IPL since we
* may be called at interrupt time).
*/
- pv = &pv_table[pind];
s = splimp();
/*
@@ -764,7 +854,11 @@ pmap_remove(pmap, sva, eva)
{
register pt_entry_t *pte;
vm_offset_t pa;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
int flush = 0;
sva &= PG_FRAME;
@@ -849,10 +943,20 @@ pmap_remove(pmap, sva, eva)
reduce wiring count on page table pages as references drop
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
+ vm_physmem[bank].pmseg.attrs[off] |=
+ *pte & (PG_M | PG_U);
+ pmap_remove_pv(pmap, sva,
+ &vm_physmem[bank].pmseg.pvent[off]);
+ }
+#else
+
if ((pind = pmap_page_index(pa)) != -1) {
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
- pmap_remove_pv(pmap, sva, pind);
+ pmap_remove_pv(pmap, sva, &pv_table[pind]);
}
+#endif /* MACHINE_NEW_NONCONTIG */
*pte = 0;
@@ -881,7 +985,11 @@ pmap_remove_all(pa)
struct pv_entry *ph, *pv, *npv;
register pmap_t pmap;
register pt_entry_t *pte;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
int s;
#ifdef DEBUG
@@ -890,10 +998,18 @@ pmap_remove_all(pa)
/*pmap_pvdump(pa);*/
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ bank = vm_physseg_find(atop(pa), &off);
+ if (bank == -1)
+ return;
+
+ pv = ph = &vm_physmem[bank].pmseg.pvent[off];
+#else
if ((pind = pmap_page_index(pa)) == -1)
return;
pv = ph = &pv_table[pind];
+#endif
s = splimp();
if (ph->pv_pmap == NULL) {
@@ -934,8 +1050,11 @@ reduce wiring count on page table pages as references drop
/*
* Update saved attributes for managed page
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+ vm_physmem[bank].pmseg.attrs[off] |= *pte & (PG_M | PG_U);
+#else
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
-
+#endif
*pte = 0;
npv = pv->pv_next;
@@ -1069,7 +1188,11 @@ pmap_enter(pmap, va, pa, prot, wired)
{
register pt_entry_t *pte;
register pt_entry_t npte;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
int flush = 0;
boolean_t cacheable;
@@ -1193,10 +1316,19 @@ pmap_enter(pmap, va, pa, prot, wired)
if (pmapdebug & PDB_ENTER)
printf("enter: removing old mapping %x pa %x ", va, opa);
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
+ vm_physmem[bank].pmseg.attrs[off] |=
+ *pte & (PG_M | PG_U);
+ pmap_remove_pv(pmap, va,
+ &vm_physmem[bank].pmseg.pvent[off]);
+ }
+#else
if ((pind = pmap_page_index(opa)) != -1) {
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
- pmap_remove_pv(pmap, va, pind);
+ pmap_remove_pv(pmap, va, &pv_table[pind]);
}
+#endif
#ifdef DEBUG
enter_stats.mchange++;
#endif
@@ -1212,13 +1344,24 @@ pmap_enter(pmap, va, pa, prot, wired)
/*
* Enter on the PV list if part of our managed memory
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
+#ifdef DEBUG
+ enter_stats.managed++;
+#endif
+ pmap_enter_pv(pmap, va, &vm_physmem[bank].pmseg.pvent[off]);
+ cacheable = TRUE;
+ }
+#else
if ((pind = pmap_page_index(pa)) != -1) {
#ifdef DEBUG
enter_stats.managed++;
#endif
- pmap_enter_pv(pmap, va, pind);
+ pmap_enter_pv(pmap, va, &pv_table[pind]);
cacheable = TRUE;
- } else if (pmap_initialized) {
+ }
+#endif
+ else if (pmap_initialized) {
#ifdef DEBUG
enter_stats.unmanaged++;
#endif
@@ -1452,6 +1595,9 @@ pmap_dump_pvlist(phys, m)
char *m;
{
register struct pv_entry *pv;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#endif
if (!(pmapdebug & PDB_PARANOIA))
return;
@@ -1459,7 +1605,12 @@ pmap_dump_pvlist(phys, m)
if (!pmap_initialized)
return;
printf("%s %08x:", m, phys);
+#if defined(MACHINE_NEW_NONCONTIG)
+ bank = vm_physseg_find(atop(phys), &off);
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
pv = &pv_table[pmap_page_index(phys)];
+#endif
if (pv->pv_pmap == NULL) {
printf(" no mappings\n");
return;
@@ -1561,7 +1712,11 @@ pmap_pageable(pmap, sva, eva, pageable)
register vm_offset_t pa;
register pt_entry_t *pte;
#ifdef DIAGNOSTIC
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
register struct pv_entry *pv;
#endif
@@ -1583,14 +1738,18 @@ pmap_pageable(pmap, sva, eva, pageable)
if ((*pte & (PG_u | PG_RW)) != (PG_u | PG_RW))
printf("pmap_pageable: unexpected pte=%x va %x\n",
*pte, sva);
-
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
+ return;
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
if ((pind = pmap_page_index(pa)) == -1) {
printf("pmap_pageable: invalid pa %x va %x\n",
pa, sva);
return;
}
-
pv = &pv_table[pind];
+#endif
if (pv->pv_va != sva || pv->pv_next) {
printf("pmap_pageable: bad PT page va %x next %x\n",
pv->pv_va, pv->pv_next);
@@ -1637,19 +1796,30 @@ pmap_testbit(pa, setbits)
{
register struct pv_entry *pv;
register pt_entry_t *pte;
- u_int pind;
int s;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
- if ((pind = pmap_page_index(pa)) == -1)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
return FALSE;
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
+ u_int pind;
+ if ((pind = pmap_page_index(pa)) == -1)
+ return FALSE;
pv = &pv_table[pind];
+#endif
s = splimp();
/*
* Check saved info first
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+ if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
+#else
if (pmap_attributes[pind] & setbits) {
+#endif
splx(s);
return TRUE;
}
@@ -1684,8 +1854,12 @@ pmap_changebit(pa, setbits, maskbits)
register struct pv_entry *pv;
register pt_entry_t *pte;
vm_offset_t va;
- u_int pind;
int s;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
+ u_int pind;
+#endif
#ifdef DEBUG
if (pmapdebug & PDB_BITS)
@@ -1693,17 +1867,26 @@ pmap_changebit(pa, setbits, maskbits)
pa, setbits, ~maskbits);
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
+ return;
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
if ((pind = pmap_page_index(pa)) == -1)
return;
-
pv = &pv_table[pind];
+#endif
s = splimp();
/*
* Clear saved attributes (modify, reference)
*/
if (~maskbits)
+#if defined(MACHINE_NEW_NONCONTIG)
+ vm_physmem[bank].pmseg.attrs[off] &= maskbits;
+#else
pmap_attributes[pind] &= maskbits;
+#endif
/*
* Loop over all current mappings setting/clearing as appropos
@@ -1755,12 +1938,27 @@ pmap_pvdump(pa)
vm_offset_t pa;
{
register struct pv_entry *pv;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#endif
printf("pa %x", pa);
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1) {
+ printf("INVALID PA!");
+ } else {
+ for (pv = &vm_physmem[bank].pmseg.pvent[off] ; pv ;
+ pv = pv->pv_next) {
+ printf(" -> pmap %p, va %lx", pv->pv_pmap, pv->pv_va);
+ pads(pv->pv_pmap);
+ }
+ }
+#else
for (pv = &pv_table[pmap_page_index(pa)]; pv; pv = pv->pv_next) {
printf(" -> pmap %x, va %x", pv->pv_pmap, pv->pv_va);
pads(pv->pv_pmap);
}
+#endif
printf(" ");
}
diff --git a/sys/arch/i386/i386/pmap.old.c b/sys/arch/i386/i386/pmap.old.c
index ae9d1696b9b..7912f70780f 100644
--- a/sys/arch/i386/i386/pmap.old.c
+++ b/sys/arch/i386/i386/pmap.old.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.old.c,v 1.24 1998/03/20 15:40:32 niklas Exp $ */
+/* $OpenBSD: pmap.old.c,v 1.25 1998/04/25 20:31:30 mickey Exp $ */
/* $NetBSD: pmap.c,v 1.36 1996/05/03 19:42:22 christos Exp $ */
/*
@@ -176,10 +176,14 @@ vm_offset_t avail_start; /* PA of first available physical page */
vm_offset_t avail_end; /* PA of last available physical page */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+extern vm_offset_t hole_start, hole_end;
int npages;
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
+#if !defined(MACHINE_NEW_NONCONTIG)
char *pmap_attributes; /* reference and modify bits */
+struct pv_entry *pv_table; /* array of entries, one per page */
+#endif
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
@@ -188,8 +192,8 @@ struct pv_entry * pmap_alloc_pv __P((void));
void pmap_free_pv __P((struct pv_entry *));
void i386_protection_init __P((void));
void pmap_collect_pv __P((void));
-__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, u_int));
-__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, u_int));
+__inline void pmap_remove_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
+__inline void pmap_enter_pv __P((pmap_t, vm_offset_t, struct pv_entry *));
void pmap_deactivate __P((pmap_t, struct pcb *));
void pmap_remove_all __P((vm_offset_t));
void pads __P((pmap_t pm));
@@ -303,6 +307,26 @@ pmap_bootstrap(virtual_start)
PG_V | PG_KW;
}
+#if defined(MACHINE_NEW_NONCONTIG)
+ /*
+ * we must call vm_page_physload() after we are done playing
+ * with virtual_avail but before we call pmap_steal_memory.
+ * [i.e. here]
+ */
+#if defined(UVM)
+ if (avail_start < hole_start)
+ uvm_page_physload(atop(avail_start), atop(hole_start),
+ atop(avail_start), atop(hole_start));
+ uvm_page_physload(atop(hole_end), atop(avail_end),
+ atop(hole_end), atop(avail_end));
+#else
+ if (avail_start < hole_start)
+ vm_page_physload(atop(avail_start), atop(hole_start),
+ atop(avail_start), atop(hole_start));
+ vm_page_physload(atop(hole_end), atop(avail_end),
+ atop(hole_end), atop(avail_end));
+#endif
+#endif
pmap_update();
}
@@ -315,6 +339,62 @@ pmap_virtual_space(startp, endp)
*endp = virtual_end;
}
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init()
+{
+ vm_offset_t addr;
+ vm_size_t s;
+ int lcv;
+
+ if (PAGE_SIZE != NBPG)
+ panic("pmap_init: CLSIZE != 1");
+
+ npages = 0;
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
+ s = (vm_size_t) (sizeof(struct pv_entry) * npages + npages);
+ s = round_page(s);
+#if defined(UVM)
+ addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
+ if (addr == NULL)
+ panic("pmap_init");
+#else
+ addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+#endif
+
+ /* allocate pv_entry stuff first */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+ vm_physmem[lcv].pmseg.pvent = (struct pv_entry *) addr;
+ addr = (vm_offset_t)(vm_physmem[lcv].pmseg.pvent +
+ (vm_physmem[lcv].end - vm_physmem[lcv].start));
+ }
+ /* allocate attrs next */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+ vm_physmem[lcv].pmseg.attrs = (char *) addr;
+ addr = (vm_offset_t)(vm_physmem[lcv].pmseg.attrs +
+ (vm_physmem[lcv].end - vm_physmem[lcv].start));
+ }
+ TAILQ_INIT(&pv_page_freelist);
+
+#ifdef DEBUG
+ if (pmapdebug & PDB_INIT)
+ printf("pmap_init: %lx bytes (%x pgs)\n",
+ s, npages);
+#endif
+
+ /*
+ * Now it is safe to enable pv_entry recording.
+ */
+ pmap_initialized = TRUE;
+}
+
+#else /* MACHINE_NEW_NONCONTIG */
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
@@ -349,6 +429,7 @@ pmap_init()
*/
pmap_initialized = TRUE;
}
+#endif /* MACHINE_NEW_NONCONTIG */
struct pv_entry *
pmap_alloc_pv()
@@ -414,6 +495,9 @@ pmap_collect_pv()
struct pv_page *pvp, *npvp;
struct pv_entry *ph, *ppv, *pv, *npv;
int s;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#endif
TAILQ_INIT(&pv_page_collectlist);
@@ -432,7 +516,16 @@ pmap_collect_pv()
if (pv_page_collectlist.tqh_first == 0)
return;
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(0), &off)) == -1) {
+ printf("INVALID PA!");
+ return;
+ }
+
+ for (ph = &vm_physmem[bank].pmseg.pvent[off]; ph; ph = ph->pv_next) {
+#else
for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
+#endif
if (ph->pv_pmap == 0)
continue;
s = splimp();
@@ -465,12 +558,12 @@ pmap_collect_pv()
}
__inline void
-pmap_enter_pv(pmap, va, pind)
+pmap_enter_pv(pmap, va, pv)
register pmap_t pmap;
vm_offset_t va;
- u_int pind;
+ struct pv_entry *pv;
{
- register struct pv_entry *pv, *npv;
+ register struct pv_entry *npv;
int s;
if (!pmap_initialized)
@@ -481,8 +574,6 @@ pmap_enter_pv(pmap, va, pind)
printf("pmap_enter_pv: pv %x: %x/%x/%x\n",
pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
#endif
-
- pv = &pv_table[pind];
s = splimp();
if (pv->pv_pmap == NULL) {
@@ -519,19 +610,18 @@ pmap_enter_pv(pmap, va, pind)
}
__inline void
-pmap_remove_pv(pmap, va, pind)
+pmap_remove_pv(pmap, va, pv)
register pmap_t pmap;
vm_offset_t va;
- u_int pind;
+ struct pv_entry *pv;
{
- register struct pv_entry *pv, *npv;
+ register struct pv_entry *npv;
int s;
/*
* Remove from the PV table (raise IPL since we
* may be called at interrupt time).
*/
- pv = &pv_table[pind];
s = splimp();
/*
@@ -764,7 +854,11 @@ pmap_remove(pmap, sva, eva)
{
register pt_entry_t *pte;
vm_offset_t pa;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
int flush = 0;
sva &= PG_FRAME;
@@ -849,10 +943,20 @@ pmap_remove(pmap, sva, eva)
reduce wiring count on page table pages as references drop
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
+ vm_physmem[bank].pmseg.attrs[off] |=
+ *pte & (PG_M | PG_U);
+ pmap_remove_pv(pmap, sva,
+ &vm_physmem[bank].pmseg.pvent[off]);
+ }
+#else
+
if ((pind = pmap_page_index(pa)) != -1) {
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
- pmap_remove_pv(pmap, sva, pind);
+ pmap_remove_pv(pmap, sva, &pv_table[pind]);
}
+#endif /* MACHINE_NEW_NONCONTIG */
*pte = 0;
@@ -881,7 +985,11 @@ pmap_remove_all(pa)
struct pv_entry *ph, *pv, *npv;
register pmap_t pmap;
register pt_entry_t *pte;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
int s;
#ifdef DEBUG
@@ -890,10 +998,18 @@ pmap_remove_all(pa)
/*pmap_pvdump(pa);*/
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ bank = vm_physseg_find(atop(pa), &off);
+ if (bank == -1)
+ return;
+
+ pv = ph = &vm_physmem[bank].pmseg.pvent[off];
+#else
if ((pind = pmap_page_index(pa)) == -1)
return;
pv = ph = &pv_table[pind];
+#endif
s = splimp();
if (ph->pv_pmap == NULL) {
@@ -934,8 +1050,11 @@ reduce wiring count on page table pages as references drop
/*
* Update saved attributes for managed page
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+ vm_physmem[bank].pmseg.attrs[off] |= *pte & (PG_M | PG_U);
+#else
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
-
+#endif
*pte = 0;
npv = pv->pv_next;
@@ -1069,7 +1188,11 @@ pmap_enter(pmap, va, pa, prot, wired)
{
register pt_entry_t *pte;
register pt_entry_t npte;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
int flush = 0;
boolean_t cacheable;
@@ -1193,10 +1316,19 @@ pmap_enter(pmap, va, pa, prot, wired)
if (pmapdebug & PDB_ENTER)
printf("enter: removing old mapping %x pa %x ", va, opa);
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
+ vm_physmem[bank].pmseg.attrs[off] |=
+ *pte & (PG_M | PG_U);
+ pmap_remove_pv(pmap, va,
+ &vm_physmem[bank].pmseg.pvent[off]);
+ }
+#else
if ((pind = pmap_page_index(opa)) != -1) {
pmap_attributes[pind] |= *pte & (PG_M | PG_U);
- pmap_remove_pv(pmap, va, pind);
+ pmap_remove_pv(pmap, va, &pv_table[pind]);
}
+#endif
#ifdef DEBUG
enter_stats.mchange++;
#endif
@@ -1212,13 +1344,24 @@ pmap_enter(pmap, va, pa, prot, wired)
/*
* Enter on the PV list if part of our managed memory
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
+#ifdef DEBUG
+ enter_stats.managed++;
+#endif
+ pmap_enter_pv(pmap, va, &vm_physmem[bank].pmseg.pvent[off]);
+ cacheable = TRUE;
+ }
+#else
if ((pind = pmap_page_index(pa)) != -1) {
#ifdef DEBUG
enter_stats.managed++;
#endif
- pmap_enter_pv(pmap, va, pind);
+ pmap_enter_pv(pmap, va, &pv_table[pind]);
cacheable = TRUE;
- } else if (pmap_initialized) {
+ }
+#endif
+ else if (pmap_initialized) {
#ifdef DEBUG
enter_stats.unmanaged++;
#endif
@@ -1452,6 +1595,9 @@ pmap_dump_pvlist(phys, m)
char *m;
{
register struct pv_entry *pv;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#endif
if (!(pmapdebug & PDB_PARANOIA))
return;
@@ -1459,7 +1605,12 @@ pmap_dump_pvlist(phys, m)
if (!pmap_initialized)
return;
printf("%s %08x:", m, phys);
+#if defined(MACHINE_NEW_NONCONTIG)
+ bank = vm_physseg_find(atop(phys), &off);
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
pv = &pv_table[pmap_page_index(phys)];
+#endif
if (pv->pv_pmap == NULL) {
printf(" no mappings\n");
return;
@@ -1561,7 +1712,11 @@ pmap_pageable(pmap, sva, eva, pageable)
register vm_offset_t pa;
register pt_entry_t *pte;
#ifdef DIAGNOSTIC
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
u_int pind;
+#endif
register struct pv_entry *pv;
#endif
@@ -1583,14 +1738,18 @@ pmap_pageable(pmap, sva, eva, pageable)
if ((*pte & (PG_u | PG_RW)) != (PG_u | PG_RW))
printf("pmap_pageable: unexpected pte=%x va %x\n",
*pte, sva);
-
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
+ return;
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
if ((pind = pmap_page_index(pa)) == -1) {
printf("pmap_pageable: invalid pa %x va %x\n",
pa, sva);
return;
}
-
pv = &pv_table[pind];
+#endif
if (pv->pv_va != sva || pv->pv_next) {
printf("pmap_pageable: bad PT page va %x next %x\n",
pv->pv_va, pv->pv_next);
@@ -1637,19 +1796,30 @@ pmap_testbit(pa, setbits)
{
register struct pv_entry *pv;
register pt_entry_t *pte;
- u_int pind;
int s;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
- if ((pind = pmap_page_index(pa)) == -1)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
return FALSE;
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
+ u_int pind;
+ if ((pind = pmap_page_index(pa)) == -1)
+ return FALSE;
pv = &pv_table[pind];
+#endif
s = splimp();
/*
* Check saved info first
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+ if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
+#else
if (pmap_attributes[pind] & setbits) {
+#endif
splx(s);
return TRUE;
}
@@ -1684,8 +1854,12 @@ pmap_changebit(pa, setbits, maskbits)
register struct pv_entry *pv;
register pt_entry_t *pte;
vm_offset_t va;
- u_int pind;
int s;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#else
+ u_int pind;
+#endif
#ifdef DEBUG
if (pmapdebug & PDB_BITS)
@@ -1693,17 +1867,26 @@ pmap_changebit(pa, setbits, maskbits)
pa, setbits, ~maskbits);
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
+ return;
+ pv = &vm_physmem[bank].pmseg.pvent[off];
+#else
if ((pind = pmap_page_index(pa)) == -1)
return;
-
pv = &pv_table[pind];
+#endif
s = splimp();
/*
* Clear saved attributes (modify, reference)
*/
if (~maskbits)
+#if defined(MACHINE_NEW_NONCONTIG)
+ vm_physmem[bank].pmseg.attrs[off] &= maskbits;
+#else
pmap_attributes[pind] &= maskbits;
+#endif
/*
* Loop over all current mappings setting/clearing as appropos
@@ -1755,12 +1938,27 @@ pmap_pvdump(pa)
vm_offset_t pa;
{
register struct pv_entry *pv;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, off;
+#endif
printf("pa %x", pa);
+#if defined(MACHINE_NEW_NONCONTIG)
+ if ((bank = vm_physseg_find(atop(pa), &off)) == -1) {
+ printf("INVALID PA!");
+ } else {
+ for (pv = &vm_physmem[bank].pmseg.pvent[off] ; pv ;
+ pv = pv->pv_next) {
+ printf(" -> pmap %p, va %lx", pv->pv_pmap, pv->pv_va);
+ pads(pv->pv_pmap);
+ }
+ }
+#else
for (pv = &pv_table[pmap_page_index(pa)]; pv; pv = pv->pv_next) {
printf(" -> pmap %x, va %x", pv->pv_pmap, pv->pv_va);
pads(pv->pv_pmap);
}
+#endif
printf(" ");
}
diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h
index 67415f52349..c025d2330a6 100644
--- a/sys/arch/i386/include/pmap.h
+++ b/sys/arch/i386/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.8 1997/01/07 05:37:34 tholo Exp $ */
+/* $OpenBSD: pmap.h,v 1.9 1998/04/25 20:31:35 mickey Exp $ */
/* $NetBSD: pmap.h,v 1.23 1996/05/03 19:26:30 christos Exp $ */
/*
@@ -156,7 +156,6 @@ struct pv_page {
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
-struct pv_entry *pv_table; /* array of entries, one per page */
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
@@ -201,6 +200,7 @@ pmap_phys_address(int ppn)
}
void pmap_activate __P((pmap_t, struct pcb *));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
#endif /* _KERNEL */
diff --git a/sys/arch/i386/include/pmap.old.h b/sys/arch/i386/include/pmap.old.h
index bbe198f994f..73661d92bb9 100644
--- a/sys/arch/i386/include/pmap.old.h
+++ b/sys/arch/i386/include/pmap.old.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.old.h,v 1.8 1997/01/07 05:37:34 tholo Exp $ */
+/* $OpenBSD: pmap.old.h,v 1.9 1998/04/25 20:31:35 mickey Exp $ */
/* $NetBSD: pmap.h,v 1.23 1996/05/03 19:26:30 christos Exp $ */
/*
@@ -156,7 +156,6 @@ struct pv_page {
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
-struct pv_entry *pv_table; /* array of entries, one per page */
#define pmap_kernel() (&kernel_pmap_store)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
@@ -201,6 +200,7 @@ pmap_phys_address(int ppn)
}
void pmap_activate __P((pmap_t, struct pcb *));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
#endif /* _KERNEL */
diff --git a/sys/arch/i386/include/vmparam.h b/sys/arch/i386/include/vmparam.h
index eb467044061..532d0c7c993 100644
--- a/sys/arch/i386/include/vmparam.h
+++ b/sys/arch/i386/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.9 1997/08/26 21:19:34 deraadt Exp $ */
+/* $OpenBSD: vmparam.h,v 1.10 1998/04/25 20:31:37 mickey Exp $ */
/* $NetBSD: vmparam.h,v 1.15 1994/10/27 04:16:34 cgd Exp $ */
/*-
@@ -39,7 +39,8 @@
* @(#)vmparam.h 5.9 (Berkeley) 5/12/91
*/
-
+#ifndef _MACHINE_VM_PARAM_H_
+#define _MACHINE_VM_PARAM_H_
/*
* Machine dependent constants for 386.
*/
@@ -140,9 +141,31 @@
/* APTDPTDI<<PDSHIFT */
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xffc00000)
-#define MACHINE_NONCONTIG /* VM <=> pmap interface modifier */
-
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
#define VM_KMEM_SIZE (NKMEMCLUSTERS*CLBYTES)
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
+
+#define MACHINE_NEW_NONCONTIG /* VM <=> pmap interface modifier */
+
+#define VM_PHYSSEG_MAX 4 /* actually we could have this many segments */
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BIGFIRST
+#define VM_PHYSSEG_NOADD /* can't add RAM after vm_mem_init */
+
+/*
+ * pmap specific data stored in the vm_physmem[] array
+ */
+#if defined(PMAP_NEW)
+struct pmap_physseg {
+ struct pv_head *pvhead; /* pv_head array */
+ char *attrs; /* attrs array */
+};
+#else
+struct pmap_physseg {
+ struct pv_entry *pvent; /* pv_entry array */
+ char *attrs; /* attrs array */
+};
+#endif
+
+
+#endif /* _MACHINE_VM_PARAM_H_ */