summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2010-06-29 21:28:12 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2010-06-29 21:28:12 +0000
commitde3823732bf5afd3afd9f14563e9818df599ec58 (patch)
tree43b9dcd928c8ba882aaeeb1e061e5d9e1a90fc4d /sys
parente8b3fae2e0b91ce09d03d15f1cdf27e46f2683fd (diff)
During kernel bootstrap, stop assuming the kernel image has been loaded in
low physical memory, but instead figure out where it has been loaded from the current MMU setup. From NetBSD.
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/sparc/include/autoconf.h15
-rw-r--r--sys/arch/sparc/include/bsd_openprom.h18
-rw-r--r--sys/arch/sparc/sparc/autoconf.c127
-rw-r--r--sys/arch/sparc/sparc/db_interface.c6
-rw-r--r--sys/arch/sparc/sparc/locore.s142
-rw-r--r--sys/arch/sparc/sparc/machdep.c24
-rw-r--r--sys/arch/sparc/sparc/pmap.c408
7 files changed, 416 insertions, 324 deletions
diff --git a/sys/arch/sparc/include/autoconf.h b/sys/arch/sparc/include/autoconf.h
index af812cebb7f..f3b3088d138 100644
--- a/sys/arch/sparc/include/autoconf.h
+++ b/sys/arch/sparc/include/autoconf.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: autoconf.h,v 1.17 2009/07/12 20:07:53 kettenis Exp $ */
+/* $OpenBSD: autoconf.h,v 1.18 2010/06/29 21:28:08 miod Exp $ */
/* $NetBSD: autoconf.h,v 1.20 1997/05/24 20:03:03 pk Exp $ */
/*
@@ -166,19 +166,6 @@ int romprop(struct romaux *ra, const char *name, int node);
*/
char *clockfreq(int freq);
-/*
- * Memory description arrays. Shared between pmap.c and autoconf.c; no
- * one else should use this (except maybe mem.c, e.g., if we fix the VM to
- * handle discontiguous physical memory).
- */
-struct memarr {
- u_int addr;
- u_int len;
-};
-int makememarr(struct memarr *, int max, int which);
-#define MEMARR_AVAILPHYS 0
-#define MEMARR_TOTALPHYS 1
-
/* Pass a string to the FORTH interpreter. May fail silently. */
void rominterpret(char *);
diff --git a/sys/arch/sparc/include/bsd_openprom.h b/sys/arch/sparc/include/bsd_openprom.h
index a14ba5d246b..bc60caf4855 100644
--- a/sys/arch/sparc/include/bsd_openprom.h
+++ b/sys/arch/sparc/include/bsd_openprom.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bsd_openprom.h,v 1.11 2003/11/14 19:05:36 miod Exp $ */
+/* $OpenBSD: bsd_openprom.h,v 1.12 2010/06/29 21:28:08 miod Exp $ */
/* $NetBSD: bsd_openprom.h,v 1.11 1996/05/18 12:27:43 mrg Exp $ */
/*
@@ -254,7 +254,7 @@ struct promvec {
* easily.
*/
void (*pv_setctxt)(int ctxt, caddr_t va, int pmeg);
-#if defined(SUN4M) && defined(notyet)
+#if (defined(SUN4D) || defined(SUN4M)) && defined(notyet)
/*
* The following are V3 ROM functions to handle MP machines in the
* Sun4m series. They have undefined results when run on a uniprocessor!
@@ -320,3 +320,17 @@ __dead void romhalt(void);
__dead void romboot(char *);
extern struct promvec *promvec;
+
+/*
+ * Memory description arrays, matching version 2 memory information layout.
+ * Shared between boot blocks, pmap.c and autoconf.c; no one else should use
+ * this.
+ */
+struct memarr {
+ uint32_t addr_hi;
+ uint32_t addr_lo;
+ uint32_t len;
+};
+int makememarr(struct memarr *, u_int max, int which);
+#define MEMARR_AVAILPHYS 0
+#define MEMARR_TOTALPHYS 1
diff --git a/sys/arch/sparc/sparc/autoconf.c b/sys/arch/sparc/sparc/autoconf.c
index 34d8f2fcbc9..914d857cdc7 100644
--- a/sys/arch/sparc/sparc/autoconf.c
+++ b/sys/arch/sparc/sparc/autoconf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: autoconf.c,v 1.88 2010/06/27 05:52:01 beck Exp $ */
+/* $OpenBSD: autoconf.c,v 1.89 2010/06/29 21:28:10 miod Exp $ */
/* $NetBSD: autoconf.c,v 1.73 1997/07/29 09:41:53 fair Exp $ */
/*
@@ -800,7 +800,8 @@ cpu_configure()
node = findroot();
#endif
- *promvec->pv_synchook = sync_crash;
+ if (!CPU_ISSUN4)
+ *promvec->pv_synchook = sync_crash;
oca.ca_ra.ra_node = node;
oca.ca_ra.ra_name = cp = "mainbus";
@@ -1361,130 +1362,76 @@ findzs(zs)
/* NOTREACHED */
}
-#if defined(SUN4C) || defined(SUN4M)
-struct v2rmi {
- int zero;
- int addr;
- int len;
-} v2rmi[200]; /* version 2 rom meminfo layout */
-#endif
-
int
-makememarr(ap, max, which)
- register struct memarr *ap;
- int max, which;
+makememarr(struct memarr *ap, u_int xmax, int which)
{
#if defined(SUN4C) || defined(SUN4M)
-#define MAXMEMINFO (sizeof(v2rmi) / sizeof(*v2rmi))
- register struct v0mlist *mp;
- register int i, node, len;
+ struct v0mlist *mp;
+ int node, n;
char *prop;
#endif
+#ifdef DIAGNOSTIC
+ if (which != MEMARR_AVAILPHYS && which != MEMARR_TOTALPHYS)
+ panic("makememarr");
+#endif
+
#if defined(SUN4)
if (CPU_ISSUN4) {
- switch (which) {
- case MEMARR_AVAILPHYS:
- ap[0].addr = 0;
- ap[0].len = *oldpvec->memoryAvail;
- break;
- case MEMARR_TOTALPHYS:
- ap[0].addr = 0;
- ap[0].len = *oldpvec->memorySize;
- break;
- default:
- printf("pre_panic: makememarr");
- break;
+ if (ap != NULL && xmax != 0) {
+ ap[0].addr_hi = 0;
+ ap[0].addr_lo = 0;
+ ap[0].len = which == MEMARR_AVAILPHYS ?
+ *oldpvec->memoryAvail : *oldpvec->memorySize;
}
- return (1);
+ return 1;
}
#endif
#if defined(SUN4C) || defined(SUN4M)
- switch (i = promvec->pv_romvec_vers) {
-
+ switch (n = promvec->pv_romvec_vers) {
case 0:
/*
* Version 0 PROMs use a linked list to describe these
* guys.
*/
- switch (which) {
-
- case MEMARR_AVAILPHYS:
- mp = *promvec->pv_v0mem.v0_physavail;
- break;
-
- case MEMARR_TOTALPHYS:
- mp = *promvec->pv_v0mem.v0_phystot;
- break;
+ mp = which == MEMARR_AVAILPHYS ?
+ *promvec->pv_v0mem.v0_physavail :
+ *promvec->pv_v0mem.v0_phystot;
- default:
- panic("makememarr");
- }
- for (i = 0; mp != NULL; mp = mp->next, i++) {
- if (i >= max)
- goto overflow;
- ap->addr = (u_int)mp->addr;
+ for (n = 0; mp != NULL; mp = mp->next, n++) {
+ if (ap == NULL || n >= xmax)
+ continue;
+ ap->addr_hi = 0;
+ ap->addr_lo = (uint32_t)mp->addr;
ap->len = mp->nbytes;
ap++;
}
break;
-
default:
printf("makememarr: hope version %d PROM is like version 2\n",
- i);
+ n);
/* FALLTHROUGH */
-
- case 3:
+ case 3:
case 2:
/*
* Version 2 PROMs use a property array to describe them.
*/
- if (max > MAXMEMINFO) {
- printf("makememarr: limited to %d\n", MAXMEMINFO);
- max = MAXMEMINFO;
- }
if ((node = findnode(firstchild(findroot()), "memory")) == 0)
panic("makememarr: cannot find \"memory\" node");
- switch (which) {
-
- case MEMARR_AVAILPHYS:
- prop = "available";
- break;
-
- case MEMARR_TOTALPHYS:
- prop = "reg";
- break;
-
- default:
- panic("makememarr");
- }
- len = getprop(node, prop, (void *)v2rmi, sizeof v2rmi) /
- sizeof(struct v2rmi);
- for (i = 0; i < len; i++) {
- if (i >= max)
- goto overflow;
- ap->addr = v2rmi[i].addr;
- ap->len = v2rmi[i].len;
- ap++;
+ prop = which == MEMARR_AVAILPHYS ? "available" : "reg";
+ n = getproplen(node, prop) / sizeof(struct memarr);
+ if (ap != NULL) {
+ if (getprop(node, prop, ap,
+ xmax * sizeof(struct memarr)) <= 0)
+ panic("makememarr: cannot get property");
}
break;
}
- /*
- * Success! (Hooray)
- */
- if (i == 0)
+ if (n <= 0)
panic("makememarr: no memory found");
- return (i);
-
-overflow:
- /*
- * Oops, there are more things in the PROM than our caller
- * provided space for. Truncate any extras.
- */
- printf("makememarr: WARNING: lost some memory\n");
- return (i);
-#endif
+ return (n);
+#endif /* SUN4C || SUN4M */
}
/*
diff --git a/sys/arch/sparc/sparc/db_interface.c b/sys/arch/sparc/sparc/db_interface.c
index 1f843f749ea..dd0ced0f574 100644
--- a/sys/arch/sparc/sparc/db_interface.c
+++ b/sys/arch/sparc/sparc/db_interface.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_interface.c,v 1.12 2005/04/19 21:30:20 miod Exp $ */
+/* $OpenBSD: db_interface.c,v 1.13 2010/06/29 21:28:11 miod Exp $ */
/* $NetBSD: db_interface.c,v 1.18 1997/09/01 00:16:31 pk Exp $ */
/*
@@ -84,12 +84,12 @@ db_write_bytes(addr, size, data)
size_t size;
char *data;
{
- extern char etext[];
+ extern char __data_start[];
char *dst;
dst = (char *)addr;
while (size-- > 0) {
- if ((dst >= (char *)VM_MIN_KERNEL_ADDRESS) && (dst < etext))
+ if (dst >= (char *)VM_MIN_KERNEL_ADDRESS && dst < __data_start)
pmap_writetext(dst, *data);
else
*dst = *data;
diff --git a/sys/arch/sparc/sparc/locore.s b/sys/arch/sparc/sparc/locore.s
index 5e08de6adc8..2f25726fb26 100644
--- a/sys/arch/sparc/sparc/locore.s
+++ b/sys/arch/sparc/sparc/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.85 2010/06/17 16:11:19 miod Exp $ */
+/* $OpenBSD: locore.s,v 1.86 2010/06/29 21:28:11 miod Exp $ */
/* $NetBSD: locore.s,v 1.73 1997/09/13 20:36:48 pk Exp $ */
/*
@@ -236,6 +236,10 @@ _C_LABEL(trapbase):
sun4m_notsup:
.asciz "cr .( OpenBSD/sparc: this kernel does not support the sun4m) cr"
#endif
+#if !defined(SUN4D)
+sun4d_notsup:
+ .asciz "cr .( OpenBSD/sparc: this kernel does not support the sun4d) cr"
+#endif
#if !defined(SUN4C)
sun4c_notsup:
.asciz "cr .( OpenBSD/sparc: this kernel does not support the sun4c) cr"
@@ -2311,7 +2315,7 @@ return_from_syscall:
* interrupt request can come in while we're in the handler. If
* the handler deals with everything for both the original & the
* new request, we'll erroneously report a stray interrupt when
- * we take the software interrupt for the new request.
+ * we take the software interrupt for the new request).
*
* Inputs:
* %l0 = %psr
@@ -3363,12 +3367,30 @@ dostart:
/*
* Startup.
*
- * We have been loaded in low RAM, at some address which
- * is page aligned (0x4000 actually) rather than where we
- * want to run (KERNBASE+0x4000). Until we get everything set,
+ * We may have been loaded in low RAM, at some address which
+ * is page aligned (PROM_LOADADDR actually) rather than where we
+ * want to run (KERNBASE+PROM_LOADADDR). Until we get everything set,
* we have to be sure to use only pc-relative addressing.
*/
+ /*
+ * Find out if the above is the case.
+ */
+0: call 1f
+ sethi %hi(0b), %l0 ! %l0 = virtual address of 0:
+1: or %l0, %lo(0b), %l0
+ sub %l0, %o7, %l7 ! subtract actual physical address of 0:
+
+ /*
+ * If we're already running at our desired virtual load address,
+ * %l7 will be set to 0, otherwise it will be KERNBASE.
+ * From now on until the end of locore bootstrap code, %l7 will
+ * be used to relocate memory references.
+ */
+#define RELOCATE(l,r) \
+ set l, r; \
+ sub r, %l7, r
+
#if defined(DDB) || NKSYMS > 0
/*
* First, check for DDB arguments. The loader passes `_esym' in %o4.
@@ -3403,8 +3425,9 @@ dostart:
tst %o4 ! do we have the symbols?
bz 2f
sub %o4, %l4, %o4 ! apply compat correction
- sethi %hi(_C_LABEL(esym) - KERNBASE), %l3 ! store _esym
- st %o4, [%l3 + %lo(_C_LABEL(esym) - KERNBASE)]
+
+ RELOCATE(_C_LABEL(esym), %l3)
+ st %o4, [%l3] ! store _esym
2:
#endif
/*
@@ -3420,31 +3443,34 @@ dostart:
mov %o0, %g7 ! save prom vector pointer
/*
- * are we on a sun4c or a sun4m?
+ * are we on a sun4c, sun4d or a sun4m?
*/
ld [%g7 + PV_NODEOPS], %o4 ! node = pv->pv_nodeops->no_nextnode(0)
ld [%o4 + NO_NEXTNODE], %o4
call %o4
mov 0, %o0 ! node
- mov %o0, %l0
- set _C_LABEL(cputypvar)-KERNBASE, %o1 ! name = "compatible"
- set _C_LABEL(cputypval)-KERNBASE, %o2 ! buffer ptr (assume buffer long enough)
+ !mov %o0, %l0
+ RELOCATE(_C_LABEL(cputypvar), %o1) ! name = "compatible"
+ RELOCATE(_C_LABEL(cputypval), %l2) ! buffer ptr (assume buffer long enough)
ld [%g7 + PV_NODEOPS], %o4 ! (void)pv->pv_nodeops->no_getprop(...)
ld [%o4 + NO_GETPROP], %o4
call %o4
- nop
- set _C_LABEL(cputypval)-KERNBASE, %o2 ! buffer ptr
- ldub [%o2 + 4], %o0 ! which is it... "sun4c", "sun4m", "sun4d"?
+ mov %l2, %o2
+ !RELOCATE(_C_LABEL(cputypval), %l2) ! buffer ptr
+ ldub [%l2 + 4], %o0 ! which is it... "sun4c", "sun4m", "sun4d"?
cmp %o0, 'c'
be is_sun4c
nop
+ cmp %o0, 'd'
+ be is_sun4d
+ nop
cmp %o0, 'm'
be is_sun4m
nop
#endif /* SUN4C || SUN4M */
- ! ``on a sun4d?! hell no!''
+ ! ``on a sun4e or sun4u? hell no!''
ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
call %o1
nop
@@ -3456,7 +3482,7 @@ is_sun4m:
b start_havetype
mov CPU_SUN4M, %g4
#else
- set sun4m_notsup-KERNBASE, %o0
+ RELOCATE(sun4m_notsup, %o0)
ld [%g7 + PV_EVAL], %o1
call %o1 ! print a message saying that the
nop ! sun4m architecture is not supported
@@ -3465,6 +3491,22 @@ is_sun4m:
nop
/*NOTREACHED*/
#endif
+is_sun4d:
+#if defined(SUN4D)
+ set trapbase_sun4m, %g6
+ mov SUN4CM_PGSHIFT, %g5
+ b start_havetype
+ mov CPU_SUN4D, %g4
+#else
+ RELOCATE(sun4d_notsup, %o0)
+ ld [%g7 + PV_EVAL], %o1
+ call %o1 ! print a message saying that the
+ nop ! sun4d architecture is not supported
+ ld [%g7 + PV_HALT], %o1 ! by this kernel, then halt
+ call %o1
+ nop
+ /*NOTREACHED*/
+#endif
is_sun4c:
#if defined(SUN4C)
set trapbase_sun4c, %g6
@@ -3476,7 +3518,7 @@ is_sun4c:
b start_havetype
mov CPU_SUN4C, %g4 ! XXX CPU_SUN4
#else
- set sun4c_notsup-KERNBASE, %o0
+ RELOCATE(sun4c_notsup, %o0)
ld [%g7 + PV_ROMVEC_VERS], %o1
cmp %o1, 0
@@ -3486,6 +3528,7 @@ is_sun4c:
! stupid version 0 rom interface is pv_eval(int length, char *string)
mov %o0, %o1
2: ldub [%o0], %o4
+ tst %o4
bne 2b
inc %o0
dec %o0
@@ -3512,7 +3555,7 @@ is_sun4:
#else
set PROM_BASE, %g7
- set sun4_notsup-KERNBASE, %o0
+ RELOCATE(sun4_notsup, %o0)
ld [%g7 + OLDMON_PRINTF], %o1
call %o1 ! print a message saying that the
nop ! sun4 architecture is not supported
@@ -3523,6 +3566,9 @@ is_sun4:
#endif
start_havetype:
+ cmp %l7, 0
+ be startmap_done
+
/*
* Step 1: double map low RAM (addresses [0.._end-start-1])
* to KERNBASE (addresses [KERNBASE.._end-1]). None of these
@@ -3563,29 +3609,10 @@ start_havetype:
cmp %l1, %l2 ! done?
blu 0b ! no, loop
add %l3, %l0, %l0 ! (and lowva += segsz)
-
-#if 0 /* moved to autoconf */
- /*
- * Now map the interrupt enable register and clear any interrupts,
- * enabling NMIs. Note that we will not take NMIs until we change
- * %tbr.
- */
- set IE_reg_addr, %l0
-
- set IE_REG_PTE_PG, %l1
- set INT_ENABLE_REG_PHYSADR, %l2
- srl %l2, %g5, %l2
- or %l2, %l1, %l1
-
- sta %l1, [%l0] ASI_PTE
- mov IE_ALLIE, %l1
- nop; nop ! paranoia
- stb %l1, [%l0]
-#endif
- b startmap_done
- nop
+ b,a startmap_done
1:
#endif /* SUN4C */
+
#if defined(SUN4)
cmp %g4, CPU_SUN4
bne 2f
@@ -3594,14 +3621,26 @@ start_havetype:
lduba [%l3] ASI_CONTROL, %l3
cmp %l3, 0x24 ! XXX - SUN4_400
bne no_3mmu
+ nop
+
+ /*
+ * Three-level sun4 MMU.
+ * Double-map by duplicating a single region entry (which covers
+ * 16MB) corresponding to the kernel's virtual load address.
+ */
add %l0, 2, %l0 ! get to proper half-word in RG space
add %l1, 2, %l1
lduha [%l0] ASI_REGMAP, %l4 ! regmap[highva] = regmap[lowva];
stha %l4, [%l1] ASI_REGMAP
- b,a remap_done
-
+ b,a startmap_done
no_3mmu:
#endif
+
+ /*
+ * Two-level sun4 MMU.
+ * Double-map by duplicating the required number of segment
+ * entries corresponding to the kernel's virtual load address.
+ */
set 1 << 18, %l3 ! segment size in bytes
0:
lduha [%l0] ASI_SEGMAP, %l4 ! segmap[highva] = segmap[lowva];
@@ -3610,27 +3649,6 @@ no_3mmu:
cmp %l1, %l2 ! done?
blu 0b ! no, loop
add %l3, %l0, %l0 ! (and lowva += segsz)
-
-remap_done:
-
-#if 0 /* moved to autoconf */
- /*
- * Now map the interrupt enable register and clear any interrupts,
- * enabling NMIs. Note that we will not take NMIs until we change
- * %tbr.
- */
- set IE_reg_addr, %l0
-
- set IE_REG_PTE_PG, %l1
- set INT_ENABLE_REG_PHYSADR, %l2
- srl %l2, %g5, %l2
- or %l2, %l1, %l1
-
- sta %l1, [%l0] ASI_PTE
- mov IE_ALLIE, %l1
- nop; nop ! paranoia
- stb %l1, [%l0]
-#endif
b,a startmap_done
2:
#endif /* SUN4 */
diff --git a/sys/arch/sparc/sparc/machdep.c b/sys/arch/sparc/sparc/machdep.c
index ead377a6ad5..f8edd86ae7f 100644
--- a/sys/arch/sparc/sparc/machdep.c
+++ b/sys/arch/sparc/sparc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.127 2010/06/29 21:26:12 miod Exp $ */
+/* $OpenBSD: machdep.c,v 1.128 2010/06/29 21:28:11 miod Exp $ */
/* $NetBSD: machdep.c,v 1.85 1997/09/12 08:55:02 pk Exp $ */
/*
@@ -149,6 +149,7 @@ cpu_startup()
int opmapdebug = pmapdebug;
#endif
vaddr_t minaddr, maxaddr;
+ paddr_t msgbufpa;
extern struct user *proc0paddr;
#ifdef DEBUG
@@ -162,9 +163,22 @@ cpu_startup()
}
/*
- * fix message buffer mapping, note phys addr of msgbuf is 0
+ * Re-map the message buffer from its temporary address
+ * at KERNBASE to MSGBUF_VA.
*/
- pmap_map(MSGBUF_VA, 0, MSGBUFSIZE, VM_PROT_READ|VM_PROT_WRITE);
+
+ /* Get physical address of the message buffer */
+ pmap_extract(pmap_kernel(), (vaddr_t)KERNBASE, &msgbufpa);
+
+ /* Invalidate the current mapping at KERNBASE. */
+ pmap_kremove((vaddr_t)KERNBASE, PAGE_SIZE);
+ pmap_update(pmap_kernel());
+
+ /* Enter the new mapping */
+ pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE,
+ VM_PROT_READ | VM_PROT_WRITE);
+
+ /* Re-initialize the message buffer. */
initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);
proc0.p_addr = proc0paddr;
@@ -697,7 +711,7 @@ dumpsys()
int error = 0;
struct memarr *mp;
int nmem;
- extern struct memarr pmemarr[];
+ extern struct memarr *pmemarr;
extern int npmemarr;
/* copy registers to memory */
@@ -738,7 +752,7 @@ dumpsys()
printf("memory ");
for (mp = pmemarr, nmem = npmemarr; --nmem >= 0 && error == 0; mp++) {
unsigned i = 0, n;
- unsigned maddr = mp->addr;
+ unsigned maddr = mp->addr_lo;
/* XXX - what's so special about PA 0 that we can't dump it? */
if (maddr == 0) {
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index 3bd7db6b5cc..5b7418978fd 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.154 2010/06/17 16:11:19 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.155 2010/06/29 21:28:11 miod Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -371,21 +371,17 @@ u_int *kernel_segtable_store; /* 2k of storage to map the kernel */
u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */
#endif
+struct memarr *pmemarr; /* physical memory regions */
+int npmemarr; /* number of entries in pmemarr */
+
+vaddr_t avail_start; /* first available physical page */
vaddr_t virtual_avail; /* first free virtual page number */
vaddr_t virtual_end; /* last free virtual page number */
paddr_t phys_avail; /* first free physical page
XXX - pmap_pa_exists needs this */
vaddr_t pagetables_start, pagetables_end;
-/*
- * XXX - these have to be global for dumpsys()
- */
-#define MA_SIZE 32 /* size of memory descriptor arrays */
-struct memarr pmemarr[MA_SIZE];/* physical memory regions */
-int npmemarr; /* number of entries in pmemarr */
-
-static void pmap_page_upload(paddr_t);
-void pmap_pinit(pmap_t);
+static void pmap_page_upload(void);
void pmap_release(pmap_t);
#if defined(SUN4) || defined(SUN4C)
@@ -744,7 +740,7 @@ sparc_protection_init4m(void)
} while (0)
-static void sortm(struct memarr *, int);
+static void get_phys_mem(void **);
void ctx_alloc(struct pmap *);
void ctx_free(struct pmap *);
void pg_flushcache(struct vm_page *);
@@ -755,40 +751,73 @@ void pm_check_u(char *, struct pmap *);
#endif
/*
- * Sort a memory array by address.
+ * During the PMAP bootstrap, we can use a simple translation to map a
+ * kernel virtual address to a physical memory address (this is arranged
+ * in locore). Usually, KERNBASE maps to physical address 0. This is always
+ * the case on sun4 and sun4c machines (unless the kernel is too large to fit
+ * under the second stage bootloader in memory). On sun4m machines, if no
+ * memory is installed in the bank corresponding to physical address 0, or
+ * again if the kernel is large, the boot blocks may elect to load us at
+ * some other address, presumably at the start of the first memory bank that
+ * is large enough to hold the kernel image. We set the up the variable
+ * `va2pa_offset' to hold the physical address corresponding to KERNBASE.
*/
-static void
-sortm(mp, n)
- struct memarr *mp;
- int n;
+
+static u_long va2pa_offset;
+#define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset))
+#define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset))
+
+/*
+ * Grab physical memory list.
+ * While here, compute `physmem'.
+ */
+void
+get_phys_mem(void **top)
{
- struct memarr *mpj;
- int i, j;
- paddr_t addr;
- psize_t len;
-
- /* Insertion sort. This is O(n^2), but so what? */
- for (i = 1; i < n; i++) {
- /* save i'th entry */
- addr = mp[i].addr;
- len = mp[i].len;
- /* find j such that i'th entry goes before j'th */
- for (j = 0, mpj = mp; j < i; j++, mpj++)
- if (addr < mpj->addr)
- break;
- /* slide up any additional entries */
- ovbcopy(mpj, mpj + 1, (i - j) * sizeof(*mp));
- mpj->addr = addr;
- mpj->len = len;
+ struct memarr *mp;
+ char *p;
+ int i;
+
+ /* Load the memory descriptor array at the current kernel top */
+ p = (void *)ALIGN(*top);
+ pmemarr = (struct memarr *)p;
+ npmemarr = makememarr(pmemarr, 1000, MEMARR_AVAILPHYS);
+
+ /* Update kernel top */
+ p += npmemarr * sizeof(struct memarr);
+ *top = p;
+
+ for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++) {
+#ifdef SUN4D
+ if (CPU_ISSUN4D) {
+ /*
+ * XXX Limit ourselves to 2GB of physical memory
+ * XXX for now.
+ */
+ uint32_t addr, len;
+ int skip = 0;
+
+ addr = mp->addr_lo;
+ len = mp->len;
+ if (mp->addr_hi != 0 || addr >= 0x80000000)
+ skip = 1;
+ else {
+ if (len >= 0x80000000)
+ len = 0x80000000;
+ if (addr + len > 0x80000000)
+ len = 0x80000000 - addr;
+ }
+ if (skip)
+ len = 0; /* disable this entry */
+ mp->len = len;
+ }
+#endif
+ physmem += atop(mp->len);
}
}
/*
- * For our convenience, vm_page.c implements:
- * vm_bootstrap_steal_memory()
- * using the functions:
- * pmap_virtual_space(), pmap_free_pages(), pmap_next_page(),
- * which are much simpler to implement.
+ * Support functions for vm_page_bootstrap();
*/
/*
@@ -808,44 +837,75 @@ pmap_virtual_space(v_start, v_end)
* Helper routine that hands off available physical pages to the VM system.
*/
static void
-pmap_page_upload(first_pa)
- paddr_t first_pa;
+pmap_page_upload(void)
{
- int n = 0;
- paddr_t start, end;
+ int n;
+ paddr_t start, end;
- phys_avail = first_pa;
-
- npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
- sortm(pmemarr, npmemarr);
+ for (n = 0; n < npmemarr; n++) {
+ start = pmemarr[n].addr_lo;
+ end = start + pmemarr[n].len;
- if (pmemarr[0].addr != 0)
- panic("pmap_page_upload: no memory?");
+ /*
+ * Exclude any memory allocated for the kernel as computed
+ * by pmap_bootstrap(), i.e. the range
+ * [KERNBASE_PA, avail_start>.
+ */
+ if (start < PMAP_BOOTSTRAP_VA2PA(KERNBASE)) {
+ /*
+ * This segment starts below the kernel load address.
+ * Chop it off at the start of the kernel.
+ */
+ paddr_t chop = PMAP_BOOTSTRAP_VA2PA(KERNBASE);
- /*
- * Compute physmem
- */
- physmem = 0;
- for (n = 0; n < npmemarr; n++)
- physmem += atop(pmemarr[n].len);
+ if (end < chop)
+ chop = end;
+#ifdef DEBUG
+ prom_printf("bootstrap gap: start %lx, chop %lx, end %lx\n",
+ start, chop, end);
+#endif
+ uvm_page_physload(atop(start), atop(chop),
+ atop(start), atop(chop), VM_FREELIST_DEFAULT);
- for (n = 0; n < npmemarr; n++) {
- start = (first_pa > pmemarr[n].addr) ? first_pa :
- pmemarr[n].addr;
- end = pmemarr[n].addr + pmemarr[n].len;
- if (start >= end)
+ /*
+ * Adjust the start address to reflect the
+ * uploaded portion of this segment.
+ */
+ start = chop;
+ }
+
+ /* Skip the current kernel address range */
+ if (start <= avail_start && avail_start < end)
+ start = avail_start;
+
+ if (start == end)
continue;
+ /* Upload (the rest of) this segment */
uvm_page_physload(atop(start), atop(end),
- atop(start), atop(end), VM_FREELIST_DEFAULT);
+ atop(start), atop(end), VM_FREELIST_DEFAULT);
}
}
+/*
+ * This routine is used by mmrw() to validate access to `/dev/mem'.
+ */
int
-pmap_pa_exists(pa)
- paddr_t pa;
+pmap_pa_exists(paddr_t pa)
{
- return (pa < phys_avail || (pvhead(atop(pa)) != NULL));
+ int nmem;
+ struct memarr *mp;
+
+ for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
+#ifdef SUN4D
+ if (mp->len == 0)
+ continue;
+#endif
+ if (pa >= mp->addr_lo && pa < mp->addr_lo + mp->len)
+ return 1;
+ }
+
+ return 0;
}
/* update pv_flags given a valid pte */
@@ -2575,10 +2635,10 @@ int nptesg;
#endif
#if defined(SUN4M)
-static void pmap_bootstrap4m(void);
+static void pmap_bootstrap4m(void *);
#endif
#if defined(SUN4) || defined(SUN4C)
-static void pmap_bootstrap4_4c(int, int, int);
+static void pmap_bootstrap4_4c(void *, int, int, int);
#endif
/*
@@ -2589,9 +2649,13 @@ static void pmap_bootstrap4_4c(int, int, int);
* nctx is the number of contexts.
*/
void
-pmap_bootstrap(nctx, nregion, nsegment)
- int nsegment, nctx, nregion;
+pmap_bootstrap(int nctx, int nregion, int nsegment)
{
+ void *p;
+ extern char end[];
+#ifdef DDB
+ extern char *esym;
+#endif
extern int nbpg; /* locore.s */
uvmexp.pagesize = nbpg;
@@ -2602,46 +2666,58 @@ pmap_bootstrap(nctx, nregion, nsegment)
nptesg = (NBPSG >> uvmexp.pageshift);
#endif
-#if 0
- ncontext = nctx;
+ /*
+ * Grab physical memory list.
+ */
+ p = end;
+#ifdef DDB
+ if (esym != 0)
+ p = esym;
#endif
+ get_phys_mem(&p);
-#if defined(SUN4M)
if (CPU_ISSUN4M) {
- pmap_bootstrap4m();
- return;
- }
+#if defined(SUN4M)
+ pmap_bootstrap4m(p);
#endif
+ } else if (CPU_ISSUN4OR4C) {
#if defined(SUN4) || defined(SUN4C)
- if (CPU_ISSUN4OR4C) {
- pmap_bootstrap4_4c(nctx, nregion, nsegment);
- return;
- }
+ pmap_bootstrap4_4c(p, nctx, nregion, nsegment);
#endif
+ }
+
+ pmap_page_upload();
}
#if defined(SUN4) || defined(SUN4C)
void
-pmap_bootstrap4_4c(nctx, nregion, nsegment)
- int nsegment, nctx, nregion;
+pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment)
{
union ctxinfo *ci;
struct mmuentry *mmuseg;
#if defined(SUN4_MMU3L)
struct mmuentry *mmureg;
#endif
- struct regmap *rp;
+ struct regmap *rp;
int i, j;
int npte, zseg, vr, vs;
- int rcookie, scookie;
+ int startscookie, scookie;
+#if defined(SUN4_MMU3L)
+ int startrcookie, rcookie;
+#endif
caddr_t p;
+ vaddr_t va;
void (*rom_setmap)(int ctx, caddr_t va, int pmeg);
int lastpage;
- paddr_t avail_start;
- extern char end[];
-#ifdef DDB
- extern char *esym;
-#endif
+ extern char kernel_text[];
+
+ /*
+ * Compute `va2pa_offset'.
+ * Use `kernel_text' to probe the MMU translation since
+ * the pages at KERNBASE might not be mapped.
+ */
+ va2pa_offset = (vaddr_t)kernel_text -
+ ((getpte4(kernel_text) & PG_PFNUM) << PGSHIFT);
switch (cputyp) {
case CPU_SUN4C:
@@ -2735,17 +2811,13 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
/*
* Allocate and clear mmu entries and context structures.
*/
- p = end;
-#ifdef DDB
- if (esym != 0)
- p = esym;
-#endif
+ p = top;
#if defined(SUN4_MMU3L)
- mmuregions = mmureg = (struct mmuentry *)p;
+ mmuregions = (struct mmuentry *)p;
p += nregion * sizeof(struct mmuentry);
bzero(mmuregions, nregion * sizeof(struct mmuentry));
#endif
- mmusegments = mmuseg = (struct mmuentry *)p;
+ mmusegments = (struct mmuentry *)p;
p += nsegment * sizeof(struct mmuentry);
bzero(mmusegments, nsegment * sizeof(struct mmuentry));
@@ -2772,7 +2844,7 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
* for dumpsys(), all with no associated physical memory.
*/
p = (caddr_t)round_page((vaddr_t)p);
- avail_start = (paddr_t)p - KERNBASE;
+ avail_start = PMAP_BOOTSTRAP_VA2PA(p);
i = (int)p;
vpage[0] = p, p += NBPG;
@@ -2804,8 +2876,8 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
*
* All the other MMU entries are free.
*
- * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
- * BOOT PROCESS
+ * THIS ASSUMES THE KERNEL IS MAPPED BY A CONTIGUOUS RANGE OF
+ * MMU SEGMENTS/REGIONS DURING THE BOOT PROCESS
*/
rom_setmap = promvec->pv_setctxt;
@@ -2824,7 +2896,17 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
vr = VA_VREG(VM_MIN_KERNEL_ADDRESS); /* first virtual region */
rp = &pmap_kernel()->pm_regmap[vr];
- for (rcookie = 0, scookie = 0;;) {
+ /* Get region/segment where kernel addresses start */
+#if defined(SUN4_MMU3L)
+ if (HASSUN4_MMU3L)
+ startrcookie = rcookie = getregmap(p);
+ mmureg = &mmuregions[rcookie];
+#endif
+ startscookie = scookie = getsegmap(p);
+ mmuseg = &mmusegments[scookie];
+ zseg += scookie; /* First free segment */
+
+ for (;;) {
/*
* Distribute each kernel region/segment into all contexts.
@@ -2901,20 +2983,50 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
*/
for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
setsegmap(p, seginval);
- }
+
+ /*
+ * Unmap any kernel regions that we aren't using.
+ */
+ for (i = 0; i < nctx; i++) {
+ setcontext4(i);
+ for (va = (vaddr_t)p;
+ va < (OPENPROM_STARTVADDR & ~(NBPRG - 1));
+ va += NBPRG)
+ setregmap(va, reginval);
+ }
+ } else
#endif
+ {
+ /*
+ * Unmap any kernel regions that we aren't using.
+ */
+ for (i = 0; i < nctx; i++) {
+ setcontext4(i);
+ for (va = (vaddr_t)p;
+ va < (OPENPROM_STARTVADDR & ~(NBPSG - 1));
+ va += NBPSG)
+ setsegmap(va, seginval);
+ }
+ }
break;
}
#if defined(SUN4_MMU3L)
if (HASSUN4_MMU3L)
- for (; rcookie < nregion; rcookie++, mmureg++) {
+ for (rcookie = 0; rcookie < nregion; rcookie++) {
+ if (rcookie == startrcookie)
+ /* Kernel must fit in one region! */
+ rcookie++;
+ mmureg = &mmuregions[rcookie];
mmureg->me_cookie = rcookie;
TAILQ_INSERT_TAIL(&region_freelist, mmureg, me_list);
}
#endif
- for (; scookie < nsegment; scookie++, mmuseg++) {
+ for (scookie = 0; scookie < nsegment; scookie++) {
+ if (scookie == startscookie)
+ scookie = zseg;
+ mmuseg = &mmusegments[scookie];
mmuseg->me_cookie = scookie;
TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
pmap_stats.ps_npmeg_free++;
@@ -2943,18 +3055,21 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
* set red zone at kernel base; enable cache on message buffer.
*/
{
- extern char etext[];
-#ifdef KGDB
- int mask = ~PG_NC; /* XXX chgkprot is busted */
-#else
- int mask = ~(PG_W | PG_NC);
-#endif
+ extern char __data_start[];
+ caddr_t sdata = (caddr_t)trunc_page((vaddr_t)__data_start);
- for (p = (caddr_t)trapbase; p < etext; p += NBPG)
- setpte4(p, getpte4(p) & mask);
- }
+ /* Enable cache on message buffer */
+ for (p = (caddr_t)KERNBASE; p < (caddr_t)trapbase; p += NBPG)
+ setpte4(p, getpte4(p) & ~PG_NC);
+
+ /* Enable cache and write protext kernel text and rodata */
+ for (; p < sdata; p += NBPG)
+ setpte4(p, getpte4(p) & ~(PG_W | PG_NC));
- pmap_page_upload(avail_start);
+ /* Enable cache on data & bss */
+ for (; p < (caddr_t)virtual_avail; p += NBPG)
+ setpte4(p, getpte4(p) & ~PG_NC);
+ }
}
#endif
@@ -2965,7 +3080,7 @@ pmap_bootstrap4_4c(nctx, nregion, nsegment)
* Switches from ROM to kernel page tables, and sets up initial mappings.
*/
static void
-pmap_bootstrap4m(void)
+pmap_bootstrap4m(void *top)
{
int i, j;
caddr_t p;
@@ -2973,13 +3088,16 @@ pmap_bootstrap4m(void)
union ctxinfo *ci;
int reg, seg;
unsigned int ctxtblsize;
- paddr_t avail_start;
- extern char end[];
- extern char etext[];
+ paddr_t pagetables_start_pa;
+ extern char kernel_text[];
extern caddr_t reserve_dumppages(caddr_t);
-#ifdef DDB
- extern char *esym;
-#endif
+
+ /*
+ * Compute `va2pa_offset'.
+ * Use `kernel_text' to probe the MMU translation since
+ * the pages at KERNBASE might not be mapped.
+ */
+ va2pa_offset = (vaddr_t)kernel_text - VA2PA(kernel_text);
#if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
pmap_clear_modify_p = pmap_clear_modify4m;
@@ -3024,11 +3142,8 @@ pmap_bootstrap4m(void)
kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
}
- p = end; /* p points to top of kernel mem */
-#ifdef DDB
- if (esym != 0)
- p = esym;
-#endif
+ p = top; /* p points to top of kernel mem */
+ p = (caddr_t)round_page((vaddr_t)p);
/* Allocate context administration */
pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
@@ -3054,6 +3169,8 @@ pmap_bootstrap4m(void)
* alignment restrictions.
*/
pagetables_start = (vaddr_t)p;
+ pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p);
+
/*
* Allocate context table.
* To keep supersparc happy, minimum alignment is on a 4K boundary.
@@ -3077,26 +3194,23 @@ pmap_bootstrap4m(void)
p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(long));
kernel_regtable_store = (u_int *)p;
p += SRMMU_L1SIZE * sizeof(long);
- bzero(kernel_regtable_store,
- p - (caddr_t) kernel_regtable_store);
+ bzero(kernel_regtable_store, p - (caddr_t)kernel_regtable_store);
p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(long));
kernel_segtable_store = (u_int *)p;
p += (SRMMU_L2SIZE * sizeof(long)) * NKREG;
- bzero(kernel_segtable_store,
- p - (caddr_t) kernel_segtable_store);
+ bzero(kernel_segtable_store, p - (caddr_t)kernel_segtable_store);
p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(long));
kernel_pagtable_store = (u_int *)p;
p += ((SRMMU_L3SIZE * sizeof(long)) * NKREG) * NSEGRG;
- bzero(kernel_pagtable_store,
- p - (caddr_t) kernel_pagtable_store);
+ bzero(kernel_pagtable_store, p - (caddr_t)kernel_pagtable_store);
/* Round to next page and mark end of stolen pages */
p = (caddr_t)round_page((vaddr_t)p);
pagetables_end = (vaddr_t)p;
- avail_start = (paddr_t)p - KERNBASE;
+ avail_start = PMAP_BOOTSTRAP_VA2PA(p);
/*
* Since we've statically allocated space to map the entire kernel,
@@ -3106,9 +3220,9 @@ pmap_bootstrap4m(void)
* XXX WHY DO WE HAVE THIS CACHING PROBLEM WITH L1/L2 PTPS????? %%%
*/
- pmap_kernel()->pm_reg_ptps = (int *) kernel_regtable_store;
+ pmap_kernel()->pm_reg_ptps = (int *)kernel_regtable_store;
pmap_kernel()->pm_reg_ptps_pa =
- VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps);
+ PMAP_BOOTSTRAP_VA2PA(kernel_regtable_store);
/* Install L1 table in context 0 */
setpgt4m(&cpuinfo.ctx_tbl[0],
@@ -3131,15 +3245,11 @@ pmap_bootstrap4m(void)
&kernel_segtable_store[reg * SRMMU_L2SIZE];
setpgt4m(&pmap_kernel()->pm_reg_ptps[reg + VA_VREG(VM_MIN_KERNEL_ADDRESS)],
- (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
+ (PMAP_BOOTSTRAP_VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) |
+ SRMMU_TEPTD);
rp->rg_seg_ptps = (int *)kphyssegtbl;
- if (rp->rg_segmap == NULL) {
- printf("rp->rg_segmap == NULL!\n");
- rp->rg_segmap = &kernel_segmap_store[reg * NSEGRG];
- }
-
for (seg = 0; seg < NSEGRG; seg++) {
struct segmap *sp;
caddr_t kphyspagtbl;
@@ -3152,7 +3262,7 @@ pmap_bootstrap4m(void)
[((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
setpgt4m(&rp->rg_seg_ptps[seg],
- (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
+ (PMAP_BOOTSTRAP_VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
SRMMU_TEPTD);
sp->sg_pte = (int *) kphyspagtbl;
}
@@ -3208,28 +3318,32 @@ pmap_bootstrap4m(void)
for (q = (caddr_t) VM_MIN_KERNEL_ADDRESS; q < p; q += NBPG) {
struct regmap *rp;
struct segmap *sp;
- int pte;
+ int pte, *ptep;
+ extern char __data_start[];
+ caddr_t sdata = (caddr_t)trunc_page((vaddr_t)__data_start);
/*
* Now install entry for current page.
*/
rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
sp = &rp->rg_segmap[VA_VSEG(q)];
- sp->sg_npte++;
+ ptep = &sp->sg_pte[VA_VPG(q)];
- pte = ((int)q - VM_MIN_KERNEL_ADDRESS) >> SRMMU_PPNPASHIFT;
+ pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT;
pte |= PPROT_N_RX | SRMMU_TEPTE;
+ /* Deal with the cacheable bit for pagetable memory */
if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
q < (caddr_t)pagetables_start ||
q >= (caddr_t)pagetables_end)
pte |= SRMMU_PG_C;
/* write-protect kernel text */
- if (q < (caddr_t) trapbase || q >= etext)
+ if (q < (caddr_t)trapbase || q >= sdata)
pte |= PPROT_WRITE;
- setpgt4m(&sp->sg_pte[VA_VPG(q)], pte);
+ setpgt4m(ptep, pte);
+ sp->sg_npte++;
pmap_kernel()->pm_stats.resident_count++;
}
@@ -3250,7 +3364,7 @@ pmap_bootstrap4m(void)
if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
/* Flush page tables from cache */
pcache_flush((caddr_t)pagetables_start,
- (caddr_t)VA2PA((caddr_t)pagetables_start),
+ (caddr_t)pagetables_start_pa,
pagetables_end - pagetables_start);
/*
@@ -3258,7 +3372,6 @@ pmap_bootstrap4m(void)
*/
mmu_install_tables(&cpuinfo);
- pmap_page_upload(avail_start);
sparc_protection_init4m();
}
@@ -3525,7 +3638,6 @@ pmap_destroy(pm)
/*
* Release any resources held by the given physical map.
- * Called when a pmap initialized by pmap_pinit is being released.
*/
void
pmap_release(pm)
@@ -6465,7 +6577,7 @@ pmap_dumpmmu(dump, blkno)
EXPEDITE(&dummy, 4);
}
for (i = 0; i < npmemarr; i++) {
- memseg.start = pmemarr[i].addr;
+ memseg.start = pmemarr[i].addr_lo;
memseg.size = pmemarr[i].len;
EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
}