summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMike Larkin <mlarkin@cvs.openbsd.org>2013-08-24 04:26:17 +0000
committerMike Larkin <mlarkin@cvs.openbsd.org>2013-08-24 04:26:17 +0000
commit398ef4b00f2ccad148d88165915f772c9440c1b8 (patch)
tree603c4ba3c775e4229820bddb16110029cb502521 /sys
parentf97c761004a7ec7eedf26d2ed6805c3de1b36abd (diff)
Cleanup amd64 and i386 MTRR code -
1. Makes amd64 and i386 MTRR code nearly identical 2. Removes support for per-process MTRRs (which were never implemented) 3. Treat "unknown" MTRR types as uncacheable instead of trying to preserve bogus settings made by the BIOS 4. Various KNF cleanups Should be no functional change. ok jsg@, deraadt@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/amd64/amd64/amd64_mem.c262
-rw-r--r--sys/arch/amd64/amd64/mtrr.c6
-rw-r--r--sys/arch/amd64/include/specialreg.h43
-rw-r--r--sys/arch/i386/i386/i686_mem.c281
-rw-r--r--sys/arch/i386/i386/mtrr.c6
-rw-r--r--sys/arch/i386/include/specialreg.h79
-rw-r--r--sys/sys/memrange.h3
7 files changed, 314 insertions, 366 deletions
diff --git a/sys/arch/amd64/amd64/amd64_mem.c b/sys/arch/amd64/amd64/amd64_mem.c
index 94e7705a124..d709b9cbafe 100644
--- a/sys/arch/amd64/amd64/amd64_mem.c
+++ b/sys/arch/amd64/amd64/amd64_mem.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: amd64_mem.c,v 1.6 2012/08/01 15:44:14 mikeb Exp $ */
-/*-
+/* $OpenBSD: amd64_mem.c,v 1.7 2013/08/24 04:26:15 mlarkin Exp $ */
+/*
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* All rights reserved.
*
@@ -38,15 +38,13 @@
#include <machine/specialreg.h>
/*
- * AMD64 memory range operations
- *
- * This code implements a set of MSRs known as MTRR which are defined in
- * AMD64 Arch Programming Manual Vol2, section 7.7
+ * This code implements a set of MSRs known as MTRR which define caching
+ * modes/behavior for various memory ranges.
*/
char *mem_owner_bios = "BIOS";
-#define MRAMD64_FIXMTRR (1<<0)
+#define MR_FIXMTRR (1<<0)
#define mrwithin(mr, a) \
(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
@@ -59,43 +57,46 @@ char *mem_owner_bios = "BIOS";
powerof2((len)) && /* ... and power of two */ \
!((base) & ((len) - 1))) /* range is not discontiuous */
-#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
+#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | \
+ ((new) & MDF_ATTRMASK))
-void amd64_mrinit(struct mem_range_softc *sc);
-int amd64_mrset(struct mem_range_softc *sc,
+#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + \
+ (MTRR_N4K * 0x1000))
+
+void mrinit(struct mem_range_softc *sc);
+int mrset(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
-void amd64_mrinit_cpu(struct mem_range_softc *sc);
-void amd64_mrreload_cpu(struct mem_range_softc *sc);
-
-struct mem_range_ops amd64_mrops = {
- amd64_mrinit,
- amd64_mrset,
- amd64_mrinit_cpu,
- amd64_mrreload_cpu
+void mrinit_cpu(struct mem_range_softc *sc);
+void mrreload_cpu(struct mem_range_softc *sc);
+
+struct mem_range_ops mrops = {
+ mrinit,
+ mrset,
+ mrinit_cpu,
+ mrreload_cpu
};
-/* XXX for AP startup hook */
u_int64_t mtrrcap, mtrrdef;
u_int64_t mtrrmask = 0x0000000ffffff000ULL;
struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
struct mem_range_desc *mrd);
-void amd64_mrfetch(struct mem_range_softc *sc);
-int amd64_mtrrtype(u_int64_t flags);
-int amd64_mrt2mtrr(u_int64_t flags, int oldval);
-int amd64_mtrr2mrt(int val);
-int amd64_mtrrconflict(u_int64_t flag1, u_int64_t flag2);
-void amd64_mrstore(struct mem_range_softc *sc);
-void amd64_mrstoreone(struct mem_range_softc *sc);
-struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
+void mrfetch(struct mem_range_softc *sc);
+int mtrrtype(u_int64_t flags);
+int mrt2mtrr(u_int64_t flags);
+int mtrr2mrt(int val);
+int mtrrconflict(u_int64_t flag1, u_int64_t flag2);
+void mrstore(struct mem_range_softc *sc);
+void mrstoreone(struct mem_range_softc *sc);
+struct mem_range_desc *mtrrfixsearch(struct mem_range_softc *sc,
u_int64_t addr);
-int amd64_mrsetlow(struct mem_range_softc *sc,
+int mrsetlow(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
-int amd64_mrsetvariable(struct mem_range_softc *sc,
+int mrsetvariable(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
-/* AMD64 MTRR type to memory range type conversion */
-int amd64_mtrrtomrt[] = {
+/* MTRR type to memory range type conversion */
+int mtrrtomrt[] = {
MDF_UNCACHEABLE,
MDF_WRITECOMBINE,
MDF_UNKNOWN,
@@ -105,23 +106,22 @@ int amd64_mtrrtomrt[] = {
MDF_WRITEBACK
};
-#define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0]))
+#define MTRRTOMRTLEN (sizeof(mtrrtomrt) / sizeof(mtrrtomrt[0]))
int
-amd64_mtrr2mrt(int val)
+mtrr2mrt(int val)
{
if (val < 0 || val >= MTRRTOMRTLEN)
return MDF_UNKNOWN;
- return amd64_mtrrtomrt[val];
+ return mtrrtomrt[val];
}
/*
- * AMD64 MTRR conflicts. Writeback and uncachable may overlap.
+ * MTRR conflicts. Writeback and uncachable may overlap.
*/
int
-amd64_mtrrconflict(u_int64_t flag1, u_int64_t flag2)
+mtrrconflict(u_int64_t flag1, u_int64_t flag2)
{
-
flag1 &= MDF_ATTRMASK;
flag2 &= MDF_ATTRMASK;
if (flag1 == flag2 ||
@@ -153,49 +153,60 @@ mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
* that MTRRs are enabled, and we may or may not have fixed MTRRs.
*/
void
-amd64_mrfetch(struct mem_range_softc *sc)
+mrfetch(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
u_int64_t msrv;
- int i, j, msr;
+ int i, j, msr, mrt;
mrd = sc->mr_desc;
+
+ /* We should never be fetching MTRRs from an AP */
+ KASSERT(CPU_IS_PRIMARY(curcpu()));
- /* Get fixed-range MTRRs */
- if (sc->mr_cap & MRAMD64_FIXMTRR) {
+ /* Get fixed-range MTRRs, if the CPU supports them */
+ if (sc->mr_cap & MR_FIXMTRR) {
msr = MSR_MTRRfix64K_00000;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- amd64_mtrr2mrt(msrv & 0xff) |
- MDF_ACTIVE;
+ mrt | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strlcpy(mrd->mr_owner, mem_owner_bios,
sizeof(mrd->mr_owner));
msrv = msrv >> 8;
}
}
+
msr = MSR_MTRRfix16K_80000;
for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- amd64_mtrr2mrt(msrv & 0xff) |
- MDF_ACTIVE;
+ mrt | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strlcpy(mrd->mr_owner, mem_owner_bios,
sizeof(mrd->mr_owner));
msrv = msrv >> 8;
}
}
+
msr = MSR_MTRRfix4K_C0000;
for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- amd64_mtrr2mrt(msrv & 0xff) |
- MDF_ACTIVE;
+ mrt | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strlcpy(mrd->mr_owner, mem_owner_bios,
sizeof(mrd->mr_owner));
@@ -205,11 +216,13 @@ amd64_mrfetch(struct mem_range_softc *sc)
}
/* Get remainder which must be variable MTRRs */
- msr = MSR_MTRRphysBase0;
+ msr = MSR_MTRRvarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
msrv = rdmsr(msr);
- mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- amd64_mtrr2mrt(msrv & 0xff);
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
+ mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | mrt;
mrd->mr_base = msrv & mtrrmask;
msrv = rdmsr(msr + 1);
mrd->mr_flags = (msrv & 0x800) ?
@@ -230,28 +243,28 @@ amd64_mrfetch(struct mem_range_softc *sc)
* Return the MTRR memory type matching a region's flags
*/
int
-amd64_mtrrtype(u_int64_t flags)
+mtrrtype(u_int64_t flags)
{
- int i;
+ int i;
flags &= MDF_ATTRMASK;
for (i = 0; i < MTRRTOMRTLEN; i++) {
- if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
+ if (mtrrtomrt[i] == MDF_UNKNOWN)
continue;
- if (flags == amd64_mtrrtomrt[i])
+ if (flags == mtrrtomrt[i])
return(i);
}
- return(-1);
+ return MDF_UNCACHEABLE;
}
int
-amd64_mrt2mtrr(u_int64_t flags, int oldval)
+mrt2mtrr(u_int64_t flags)
{
int val;
- if ((val = amd64_mtrrtype(flags)) == -1)
- return oldval & 0xff;
+ val = mtrrtype(flags);
+
return val & 0xff;
}
@@ -262,13 +275,13 @@ amd64_mrt2mtrr(u_int64_t flags, int oldval)
* XXX Must be called with interrupts enabled.
*/
void
-amd64_mrstore(struct mem_range_softc *sc)
+mrstore(struct mem_range_softc *sc)
{
disable_intr(); /* disable interrupts */
#ifdef MULTIPROCESSOR
x86_broadcast_ipi(X86_IPI_MTRR);
#endif
- amd64_mrstoreone(sc);
+ mrstoreone(sc);
enable_intr();
}
@@ -278,10 +291,10 @@ amd64_mrstore(struct mem_range_softc *sc)
* just stuffing one entry; this is simpler (but slower, of course).
*/
void
-amd64_mrstoreone(struct mem_range_softc *sc)
+mrstoreone(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
- u_int64_t omsrv, msrv;
+ u_int64_t msrv;
int i, j, msr;
u_int cr4save;
@@ -290,44 +303,40 @@ amd64_mrstoreone(struct mem_range_softc *sc)
cr4save = rcr4(); /* save cr4 */
if (cr4save & CR4_PGE)
lcr4(cr4save & ~CR4_PGE);
- lcr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
- wbinvd(); /* flush caches */
- wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */
+
+ /* Flush caches, then disable caches, then disable MTRRs */
+ wbinvd();
+ lcr0((rcr0() & ~CR0_NW) | CR0_CD);
+ wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800);
/* Set fixed-range MTRRs */
- if (sc->mr_cap & MRAMD64_FIXMTRR) {
+ if (sc->mr_cap & MR_FIXMTRR) {
msr = MSR_MTRRfix64K_00000;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = 0;
- omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
- msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
- omsrv >> (j*8));
+ msrv |= mrt2mtrr((mrd + j)->mr_flags);
}
wrmsr(msr, msrv);
mrd += 8;
}
+
msr = MSR_MTRRfix16K_80000;
- for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
- msrv = 0;
- omsrv = rdmsr(msr);
+ for (i = 0, msrv = 0; i < (MTRR_N16K / 8); i++, msr++) {
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
- msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
- omsrv >> (j*8));
+ msrv |= mrt2mtrr((mrd + j)->mr_flags);
}
wrmsr(msr, msrv);
mrd += 8;
}
+
msr = MSR_MTRRfix4K_C0000;
- for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
- msrv = 0;
- omsrv = rdmsr(msr);
+ for (i = 0, msrv = 0; i < (MTRR_N4K / 8); i++, msr++) {
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
- msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
- omsrv >> (j*8));
+ msrv |= mrt2mtrr((mrd + j)->mr_flags);
}
wrmsr(msr, msrv);
mrd += 8;
@@ -335,38 +344,36 @@ amd64_mrstoreone(struct mem_range_softc *sc)
}
/* Set remainder which must be variable MTRRs */
- msr = MSR_MTRRphysBase0;
+ msr = MSR_MTRRvarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
- /* base/type register */
- omsrv = rdmsr(msr);
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = mrd->mr_base & mtrrmask;
- msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
- } else {
+ msrv |= mrt2mtrr(mrd->mr_flags);
+ } else
msrv = 0;
- }
+
wrmsr(msr, msrv);
/* mask/active register */
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = 0x800 | (~(mrd->mr_len - 1) & mtrrmask);
- } else {
+ } else
msrv = 0;
- }
+
wrmsr(msr + 1, msrv);
}
- wbinvd(); /* flush caches */
- tlbflushg(); /* flush TLB */
- wrmsr(MSR_MTRRdefType, mtrrdef | 0x800); /* set MTRR behaviour to match BSP and enable it */
- lcr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
- lcr4(cr4save); /* restore cr4 */
+
+ /* Re-enable caches and MTRRs */
+ wrmsr(MSR_MTRRdefType, mtrrdef | 0x800);
+ lcr0(rcr0() & ~(CR0_CD | CR0_NW));
+ lcr4(cr4save);
}
/*
* Hunt for the fixed MTRR referencing (addr)
*/
struct mem_range_desc *
-amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
+mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
{
struct mem_range_desc *mrd;
int i;
@@ -384,17 +391,15 @@ amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
* Note that we try to be generous here; we'll bloat the range out to the
* next higher/lower boundary to avoid the consumer having to know too much
* about the mechanisms here.
- *
- * XXX note that this will have to be updated when we start supporting "busy" ranges.
*/
int
-amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
+mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *first_md, *last_md, *curr_md;
/* range check */
- if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
- ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
+ if (((first_md = mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
+ ((last_md = mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
return(EINVAL);
/* check we aren't doing something risky */
@@ -416,12 +421,9 @@ amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
/*
* Modify/add a variable MTRR to satisfy the request.
- *
- * XXX needs to be updated to properly support "busy" ranges.
*/
int
-amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
- int *arg)
+mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *curr_md, *free_md;
int i;
@@ -433,7 +435,7 @@ amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
* Keep track of the first empty variable descriptor in case we
* can't perform a takeover.
*/
- i = (sc->mr_cap & MRAMD64_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
+ i = (sc->mr_cap & MR_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
curr_md = sc->mr_desc + i;
free_md = NULL;
for (; i < sc->mr_ndesc; i++, curr_md++) {
@@ -441,9 +443,6 @@ amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
/* exact match? */
if ((curr_md->mr_base == mrd->mr_base) &&
(curr_md->mr_len == mrd->mr_len)) {
- /* whoops, owned by someone */
- if (curr_md->mr_flags & MDF_BUSY)
- return(EBUSY);
/* check we aren't doing something risky */
if (!(mrd->mr_flags & MDF_FORCE) &&
((curr_md->mr_flags & MDF_ATTRMASK)
@@ -456,7 +455,7 @@ amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
/* non-exact overlap ? */
if (mroverlap(curr_md, mrd)) {
/* between conflicting region types? */
- if (amd64_mtrrconflict(curr_md->mr_flags,
+ if (mtrrconflict(curr_md->mr_flags,
mrd->mr_flags))
return(EINVAL);
}
@@ -478,31 +477,28 @@ amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
/*
* Handle requests to set memory range attributes by manipulating MTRRs.
- *
*/
int
-amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
+mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *targ;
int error = 0;
switch(*arg) {
case MEMRANGE_SET_UPDATE:
- /* make sure that what's being asked for is even possible at all */
+ /* make sure that what's being asked for is possible */
if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
- amd64_mtrrtype(mrd->mr_flags) == -1)
+ mtrrtype(mrd->mr_flags) == -1)
return(EINVAL);
-#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
-
/* are the "low memory" conditions applicable? */
- if ((sc->mr_cap & MRAMD64_FIXMTRR) &&
+ if ((sc->mr_cap & MR_FIXMTRR) &&
((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
- if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
+ if ((error = mrsetlow(sc, mrd, arg)) != 0)
return(error);
} else {
/* it's time to play with variable MTRRs */
- if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
+ if ((error = mrsetvariable(sc, mrd, arg)) != 0)
return(error);
}
break;
@@ -512,8 +508,6 @@ amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
return(ENOENT);
if (targ->mr_flags & MDF_FIXACTIVE)
return(EPERM);
- if (targ->mr_flags & MDF_BUSY)
- return(EBUSY);
targ->mr_flags &= ~MDF_ACTIVE;
targ->mr_owner[0] = 0;
break;
@@ -523,8 +517,7 @@ amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
}
/* update the hardware */
- amd64_mrstore(sc);
- amd64_mrfetch(sc); /* refetch to see where we're at */
+ mrstore(sc);
return(0);
}
@@ -533,7 +526,7 @@ amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
* fetch the initial settings.
*/
void
-amd64_mrinit(struct mem_range_softc *sc)
+mrinit(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
uint32_t regs[4];
@@ -545,17 +538,20 @@ amd64_mrinit(struct mem_range_softc *sc)
/* For now, bail out if MTRRs are not enabled */
if (!(mtrrdef & 0x800)) {
- printf("mtrr: CPU supports MTRRs but not enabled\n");
+ printf("mtrr: CPU supports MTRRs but not enabled by BIOS\n");
return;
}
nmdesc = mtrrcap & 0xff;
- printf("mtrr: Pentium Pro MTRR support\n");
+ printf("mtrr: Pentium Pro MTRR support, %d var ranges", nmdesc);
/* If fixed MTRRs supported and enabled */
if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
- sc->mr_cap = MRAMD64_FIXMTRR;
+ sc->mr_cap = MR_FIXMTRR;
nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
+ printf(", %d fixed ranges", MTRR_N64K + MTRR_N16K + MTRR_N4K);
}
+
+ printf("\n");
sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc),
M_MEMDESC, M_WAITOK|M_ZERO);
@@ -564,17 +560,19 @@ amd64_mrinit(struct mem_range_softc *sc)
mrd = sc->mr_desc;
/* Populate the fixed MTRR entries' base/length */
- if (sc->mr_cap & MRAMD64_FIXMTRR) {
+ if (sc->mr_cap & MR_FIXMTRR) {
for (i = 0; i < MTRR_N64K; i++, mrd++) {
mrd->mr_base = i * 0x10000;
mrd->mr_len = 0x10000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
}
+
for (i = 0; i < MTRR_N16K; i++, mrd++) {
mrd->mr_base = i * 0x4000 + 0x80000;
mrd->mr_len = 0x4000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
}
+
for (i = 0; i < MTRR_N4K; i++, mrd++) {
mrd->mr_base = i * 0x1000 + 0xc0000;
mrd->mr_len = 0x1000;
@@ -599,9 +597,9 @@ amd64_mrinit(struct mem_range_softc *sc)
/*
* Get current settings, anything set now is considered to have
- * been set by the firmware. (XXX has something already played here?)
+ * been set by the firmware.
*/
- amd64_mrfetch(sc);
+ mrfetch(sc);
mrd = sc->mr_desc;
for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
if (mrd->mr_flags & MDF_ACTIVE)
@@ -613,15 +611,15 @@ amd64_mrinit(struct mem_range_softc *sc)
* Initialise MTRRs on a cpu from the software state.
*/
void
-amd64_mrinit_cpu(struct mem_range_softc *sc)
+mrinit_cpu(struct mem_range_softc *sc)
{
- amd64_mrstoreone(sc); /* set MTRRs to match BSP */
+ mrstoreone(sc); /* set MTRRs to match BSP */
}
void
-amd64_mrreload_cpu(struct mem_range_softc *sc)
+mrreload_cpu(struct mem_range_softc *sc)
{
- disable_intr(); /* disable interrupts */
- amd64_mrstoreone(sc); /* set MTRRs to match BSP */
+ disable_intr();
+ mrstoreone(sc); /* set MTRRs to match BSP */
enable_intr();
}
diff --git a/sys/arch/amd64/amd64/mtrr.c b/sys/arch/amd64/amd64/mtrr.c
index ed6907843d1..04cde53edc7 100644
--- a/sys/arch/amd64/amd64/mtrr.c
+++ b/sys/arch/amd64/amd64/mtrr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mtrr.c,v 1.2 2009/06/01 20:46:50 phessler Exp $ */
+/* $OpenBSD: mtrr.c,v 1.3 2013/08/24 04:26:15 mlarkin Exp $ */
/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* Copyright (c) 1999 Brian Fundakowski Feldman
@@ -33,7 +33,7 @@
#include <machine/specialreg.h>
-extern struct mem_range_ops amd64_mrops;
+extern struct mem_range_ops mrops;
void mtrrattach(int);
@@ -55,7 +55,7 @@ mtrrattach(int num)
(strcmp(cpu_vendor, "AuthenticAMD") == 0)) &&
(family == 0x6 || family == 0xf) &&
cpu_feature & CPUID_MTRR) {
- mem_range_softc.mr_op = &amd64_mrops;
+ mem_range_softc.mr_op = &mrops;
}
/* Initialise memory range handling */
diff --git a/sys/arch/amd64/include/specialreg.h b/sys/arch/amd64/include/specialreg.h
index cc245c69547..f8ae621d0a8 100644
--- a/sys/arch/amd64/include/specialreg.h
+++ b/sys/arch/amd64/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.26 2013/06/02 16:34:15 guenther Exp $ */
+/* $OpenBSD: specialreg.h,v 1.27 2013/08/24 04:26:16 mlarkin Exp $ */
/* $NetBSD: specialreg.h,v 1.1 2003/04/26 18:39:48 fvdl Exp $ */
/* $NetBSD: x86/specialreg.h,v 1.2 2003/04/25 21:54:30 fvdl Exp $ */
@@ -309,46 +309,23 @@
#define MSR_LASTINTFROMIP 0x1dd
#define MSR_LASTINTTOIP 0x1de
#define MSR_ROB_CR_BKUPTMPDR6 0x1e0
-#define MSR_MTRRphysBase0 0x200
-#define MSR_MTRRphysMask0 0x201
-#define MSR_MTRRphysBase1 0x202
-#define MSR_MTRRphysMask1 0x203
-#define MSR_MTRRphysBase2 0x204
-#define MSR_MTRRphysMask2 0x205
-#define MSR_MTRRphysBase3 0x206
-#define MSR_MTRRphysMask3 0x207
-#define MSR_MTRRphysBase4 0x208
-#define MSR_MTRRphysMask4 0x209
-#define MSR_MTRRphysBase5 0x20a
-#define MSR_MTRRphysMask5 0x20b
-#define MSR_MTRRphysBase6 0x20c
-#define MSR_MTRRphysMask6 0x20d
-#define MSR_MTRRphysBase7 0x20e
-#define MSR_MTRRphysMask7 0x20f
+#define MSR_MTRRvarBase 0x200
#define MSR_MTRRfix64K_00000 0x250
#define MSR_MTRRfix16K_80000 0x258
-#define MSR_MTRRfix16K_A0000 0x259
#define MSR_MTRRfix4K_C0000 0x268
-#define MSR_MTRRfix4K_C8000 0x269
-#define MSR_MTRRfix4K_D0000 0x26a
-#define MSR_MTRRfix4K_D8000 0x26b
-#define MSR_MTRRfix4K_E0000 0x26c
-#define MSR_MTRRfix4K_E8000 0x26d
-#define MSR_MTRRfix4K_F0000 0x26e
-#define MSR_MTRRfix4K_F8000 0x26f
#define MSR_CR_PAT 0x277
#define MSR_MTRRdefType 0x2ff
#define MSR_PERF_FIXED_CTR1 0x30a /* CPU_CLK_Unhalted.Core */
#define MSR_PERF_FIXED_CTR2 0x30b /* CPU_CLK.Unhalted.Ref */
#define MSR_PERF_FIXED_CTR_CTRL 0x38d
-#define MSR_PERF_FIXED_CTR_FC_DIS 0x0 /* disable counter */
-#define MSR_PERF_FIXED_CTR_FC_1 0x1 /* count ring 1 */
-#define MSR_PERF_FIXED_CTR_FC_123 0x2 /* count rings 1,2,3 */
-#define MSR_PERF_FIXED_CTR_FC_ANY 0x3 /* count everything */
-#define MSR_PERF_FIXED_CTR_FC_MASK 0x3
-#define MSR_PERF_FIXED_CTR_FC(_i, _v) ((_v) << (4 * (_i)))
-#define MSR_PERF_FIXED_CTR_ANYTHR(_i) (0x4 << (4 * (_i)))
-#define MSR_PERF_FIXED_CTR_INT(_i) (0x8 << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_FC_DIS 0x0 /* disable counter */
+#define MSR_PERF_FIXED_CTR_FC_1 0x1 /* count ring 1 */
+#define MSR_PERF_FIXED_CTR_FC_123 0x2 /* count rings 1,2,3 */
+#define MSR_PERF_FIXED_CTR_FC_ANY 0x3 /* count everything */
+#define MSR_PERF_FIXED_CTR_FC_MASK 0x3
+#define MSR_PERF_FIXED_CTR_FC(_i, _v) ((_v) << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_ANYTHR(_i) (0x4 << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_INT(_i) (0x8 << (4 * (_i)))
#define MSR_PERF_GLOBAL_CTRL 0x38f
#define MSR_PERF_GLOBAL_CTR1_EN (1ULL << 33)
#define MSR_PERF_GLOBAL_CTR2_EN (1ULL << 34)
diff --git a/sys/arch/i386/i386/i686_mem.c b/sys/arch/i386/i386/i686_mem.c
index 3a38cf751e4..b2a4786db36 100644
--- a/sys/arch/i386/i386/i686_mem.c
+++ b/sys/arch/i386/i386/i686_mem.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: i686_mem.c,v 1.14 2012/08/01 15:44:14 mikeb Exp $ */
-/*-
+/* $OpenBSD: i686_mem.c,v 1.15 2013/08/24 04:26:16 mlarkin Exp $ */
+/*
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* All rights reserved.
*
@@ -38,15 +38,13 @@
#include <machine/specialreg.h>
/*
- * i686 memory range operations
- *
- * This code will probably be impenetrable without reference to the
- * Intel Pentium Pro documentation.
+ * This code implements a set of MSRs known as MTRR which define caching
+ * modes/behavior for various memory ranges.
*/
char *mem_owner_bios = "BIOS";
-#define MR686_FIXMTRR (1<<0)
+#define MR_FIXMTRR (1<<0)
#define mrwithin(mr, a) \
(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
@@ -59,43 +57,46 @@ char *mem_owner_bios = "BIOS";
powerof2((len)) && /* ... and power of two */ \
!((base) & ((len) - 1))) /* range is not discontiuous */
-#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
+#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | \
+ ((new) & MDF_ATTRMASK))
-void i686_mrinit(struct mem_range_softc *sc);
-int i686_mrset(struct mem_range_softc *sc,
+#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + \
+ (MTRR_N4K * 0x1000))
+
+void mrinit(struct mem_range_softc *sc);
+int mrset(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
-void i686_mrinit_cpu(struct mem_range_softc *sc);
-void i686_mrreload_cpu(struct mem_range_softc *sc);
-
-struct mem_range_ops i686_mrops = {
- i686_mrinit,
- i686_mrset,
- i686_mrinit_cpu,
- i686_mrreload_cpu
+void mrinit_cpu(struct mem_range_softc *sc);
+void mrreload_cpu(struct mem_range_softc *sc);
+
+struct mem_range_ops mrops = {
+ mrinit,
+ mrset,
+ mrinit_cpu,
+ mrreload_cpu
};
-/* XXX for AP startup hook */
u_int64_t mtrrcap, mtrrdef;
u_int64_t mtrrmask = 0x0000000ffffff000ULL;
struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
struct mem_range_desc *mrd);
-void i686_mrfetch(struct mem_range_softc *sc);
-int i686_mtrrtype(int flags);
-int i686_mrt2mtrr(int flags, int oldval);
-int i686_mtrr2mrt(int val);
-int i686_mtrrconflict(int flag1, int flag2);
-void i686_mrstore(struct mem_range_softc *sc);
-void i686_mrstoreone(struct mem_range_softc *sc);
-struct mem_range_desc *i686_mtrrfixsearch(struct mem_range_softc *sc,
+void mrfetch(struct mem_range_softc *sc);
+int mtrrtype(u_int64_t flags);
+int mrt2mtrr(u_int64_t flags);
+int mtrr2mrt(int val);
+int mtrrconflict(u_int64_t flag1, u_int64_t flag2);
+void mrstore(struct mem_range_softc *sc);
+void mrstoreone(struct mem_range_softc *sc);
+struct mem_range_desc *mtrrfixsearch(struct mem_range_softc *sc,
u_int64_t addr);
-int i686_mrsetlow(struct mem_range_softc *sc,
+int mrsetlow(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
-int i686_mrsetvariable(struct mem_range_softc *sc,
+int mrsetvariable(struct mem_range_softc *sc,
struct mem_range_desc *mrd, int *arg);
-/* i686 MTRR type to memory range type conversion */
-int i686_mtrrtomrt[] = {
+/* MTRR type to memory range type conversion */
+int mtrrtomrt[] = {
MDF_UNCACHEABLE,
MDF_WRITECOMBINE,
MDF_UNKNOWN,
@@ -105,23 +106,22 @@ int i686_mtrrtomrt[] = {
MDF_WRITEBACK
};
-#define MTRRTOMRTLEN (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0]))
+#define MTRRTOMRTLEN (sizeof(mtrrtomrt) / sizeof(mtrrtomrt[0]))
int
-i686_mtrr2mrt(int val)
+mtrr2mrt(int val)
{
if (val < 0 || val >= MTRRTOMRTLEN)
return MDF_UNKNOWN;
- return i686_mtrrtomrt[val];
+ return mtrrtomrt[val];
}
/*
- * i686 MTRR conflicts. Writeback and uncachable may overlap.
+ * MTRR conflicts. Writeback and uncachable may overlap.
*/
int
-i686_mtrrconflict(int flag1, int flag2)
+mtrrconflict(u_int64_t flag1, u_int64_t flag2)
{
-
flag1 &= MDF_ATTRMASK;
flag2 &= MDF_ATTRMASK;
if (flag1 == flag2 ||
@@ -153,49 +153,60 @@ mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
* that MTRRs are enabled, and we may or may not have fixed MTRRs.
*/
void
-i686_mrfetch(struct mem_range_softc *sc)
+mrfetch(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
u_int64_t msrv;
- int i, j, msr;
+ int i, j, msr, mrt;
mrd = sc->mr_desc;
+
+ /* We should never be fetching MTRRs from an AP */
+ KASSERT(CPU_IS_PRIMARY(curcpu()));
- /* Get fixed-range MTRRs */
- if (sc->mr_cap & MR686_FIXMTRR) {
- msr = MSR_MTRR64kBase;
+ /* Get fixed-range MTRRs, if the CPU supports them */
+ if (sc->mr_cap & MR_FIXMTRR) {
+ msr = MSR_MTRRfix64K_00000;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- i686_mtrr2mrt(msrv & 0xff) |
- MDF_ACTIVE;
+ mrt | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strlcpy(mrd->mr_owner, mem_owner_bios,
sizeof(mrd->mr_owner));
msrv = msrv >> 8;
}
}
- msr = MSR_MTRR16kBase;
+
+ msr = MSR_MTRRfix16K_80000;
for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- i686_mtrr2mrt(msrv & 0xff) |
- MDF_ACTIVE;
+ mrt | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strlcpy(mrd->mr_owner, mem_owner_bios,
sizeof(mrd->mr_owner));
msrv = msrv >> 8;
}
}
- msr = MSR_MTRR4kBase;
+
+ msr = MSR_MTRRfix4K_C0000;
for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
msrv = rdmsr(msr);
for (j = 0; j < 8; j++, mrd++) {
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- i686_mtrr2mrt(msrv & 0xff) |
- MDF_ACTIVE;
+ mrt | MDF_ACTIVE;
if (mrd->mr_owner[0] == 0)
strlcpy(mrd->mr_owner, mem_owner_bios,
sizeof(mrd->mr_owner));
@@ -205,11 +216,13 @@ i686_mrfetch(struct mem_range_softc *sc)
}
/* Get remainder which must be variable MTRRs */
- msr = MSR_MTRRVarBase;
+ msr = MSR_MTRRvarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
msrv = rdmsr(msr);
- mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
- i686_mtrr2mrt(msrv & 0xff);
+ mrt = mtrr2mrt(msrv & 0xff);
+ if (mrt == MDF_UNKNOWN)
+ mrt = MDF_UNCACHEABLE;
+ mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | mrt;
mrd->mr_base = msrv & mtrrmask;
msrv = rdmsr(msr + 1);
mrd->mr_flags = (msrv & 0x800) ?
@@ -230,28 +243,28 @@ i686_mrfetch(struct mem_range_softc *sc)
* Return the MTRR memory type matching a region's flags
*/
int
-i686_mtrrtype(int flags)
+mtrrtype(u_int64_t flags)
{
- int i;
+ int i;
flags &= MDF_ATTRMASK;
for (i = 0; i < MTRRTOMRTLEN; i++) {
- if (i686_mtrrtomrt[i] == MDF_UNKNOWN)
+ if (mtrrtomrt[i] == MDF_UNKNOWN)
continue;
- if (flags == i686_mtrrtomrt[i])
+ if (flags == mtrrtomrt[i])
return(i);
}
- return(-1);
+ return MDF_UNCACHEABLE;
}
int
-i686_mrt2mtrr(int flags, int oldval)
+mrt2mtrr(u_int64_t flags)
{
int val;
- if ((val = i686_mtrrtype(flags)) == -1)
- return oldval & 0xff;
+ val = mtrrtype(flags);
+
return val & 0xff;
}
@@ -262,13 +275,13 @@ i686_mrt2mtrr(int flags, int oldval)
* XXX Must be called with interrupts enabled.
*/
void
-i686_mrstore(struct mem_range_softc *sc)
+mrstore(struct mem_range_softc *sc)
{
disable_intr(); /* disable interrupts */
#ifdef MULTIPROCESSOR
i386_broadcast_ipi(I386_IPI_MTRR);
#endif
- i686_mrstoreone(sc);
+ mrstoreone(sc);
enable_intr();
}
@@ -278,10 +291,10 @@ i686_mrstore(struct mem_range_softc *sc)
* just stuffing one entry; this is simpler (but slower, of course).
*/
void
-i686_mrstoreone(struct mem_range_softc *sc)
+mrstoreone(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
- u_int64_t omsrv, msrv;
+ u_int64_t msrv;
int i, j, msr;
u_int cr4save;
@@ -290,44 +303,40 @@ i686_mrstoreone(struct mem_range_softc *sc)
cr4save = rcr4(); /* save cr4 */
if (cr4save & CR4_PGE)
lcr4(cr4save & ~CR4_PGE);
- lcr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */
- wbinvd(); /* flush caches */
- wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */
+
+ /* Flush caches, then disable caches, then disable MTRRs */
+ wbinvd();
+ lcr0((rcr0() & ~CR0_NW) | CR0_CD);
+ wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800);
/* Set fixed-range MTRRs */
- if (sc->mr_cap & MR686_FIXMTRR) {
- msr = MSR_MTRR64kBase;
+ if (sc->mr_cap & MR_FIXMTRR) {
+ msr = MSR_MTRRfix64K_00000;
for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
msrv = 0;
- omsrv = rdmsr(msr);
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
- msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
- omsrv >> (j*8));
+ msrv |= mrt2mtrr((mrd + j)->mr_flags);
}
wrmsr(msr, msrv);
mrd += 8;
}
- msr = MSR_MTRR16kBase;
- for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
- msrv = 0;
- omsrv = rdmsr(msr);
+
+ msr = MSR_MTRRfix16K_80000;
+ for (i = 0, msrv = 0; i < (MTRR_N16K / 8); i++, msr++) {
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
- msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
- omsrv >> (j*8));
+ msrv |= mrt2mtrr((mrd + j)->mr_flags);
}
wrmsr(msr, msrv);
mrd += 8;
}
- msr = MSR_MTRR4kBase;
- for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
- msrv = 0;
- omsrv = rdmsr(msr);
+
+ msr = MSR_MTRRfix4K_C0000;
+ for (i = 0, msrv = 0; i < (MTRR_N4K / 8); i++, msr++) {
for (j = 7; j >= 0; j--) {
msrv = msrv << 8;
- msrv |= i686_mrt2mtrr((mrd + j)->mr_flags,
- omsrv >> (j*8));
+ msrv |= mrt2mtrr((mrd + j)->mr_flags);
}
wrmsr(msr, msrv);
mrd += 8;
@@ -335,38 +344,36 @@ i686_mrstoreone(struct mem_range_softc *sc)
}
/* Set remainder which must be variable MTRRs */
- msr = MSR_MTRRVarBase;
+ msr = MSR_MTRRvarBase;
for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
- /* base/type register */
- omsrv = rdmsr(msr);
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = mrd->mr_base & mtrrmask;
- msrv |= i686_mrt2mtrr(mrd->mr_flags, omsrv);
- } else {
+ msrv |= mrt2mtrr(mrd->mr_flags);
+ } else
msrv = 0;
- }
+
wrmsr(msr, msrv);
/* mask/active register */
if (mrd->mr_flags & MDF_ACTIVE) {
msrv = 0x800 | (~(mrd->mr_len - 1) & mtrrmask);
- } else {
+ } else
msrv = 0;
- }
+
wrmsr(msr + 1, msrv);
}
- wbinvd(); /* flush caches */
- tlbflushg(); /* flush TLB */
- wrmsr(MSR_MTRRdefType, mtrrdef | 0x800); /* restore MTRR state */
- lcr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */
- lcr4(cr4save); /* restore cr4 */
+
+ /* Re-enable caches and MTRRs */
+ wrmsr(MSR_MTRRdefType, mtrrdef | 0x800);
+ lcr0(rcr0() & ~(CR0_CD | CR0_NW));
+ lcr4(cr4save);
}
/*
* Hunt for the fixed MTRR referencing (addr)
*/
struct mem_range_desc *
-i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
+mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
{
struct mem_range_desc *mrd;
int i;
@@ -384,17 +391,15 @@ i686_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
* Note that we try to be generous here; we'll bloat the range out to the
* next higher/lower boundary to avoid the consumer having to know too much
* about the mechanisms here.
- *
- * XXX note that this will have to be updated when we start supporting "busy" ranges.
*/
int
-i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
+mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *first_md, *last_md, *curr_md;
/* range check */
- if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
- ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
+ if (((first_md = mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
+ ((last_md = mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
return(EINVAL);
/* check we aren't doing something risky */
@@ -416,12 +421,9 @@ i686_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
/*
* Modify/add a variable MTRR to satisfy the request.
- *
- * XXX needs to be updated to properly support "busy" ranges.
*/
int
-i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
- int *arg)
+mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *curr_md, *free_md;
int i;
@@ -433,7 +435,7 @@ i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
* Keep track of the first empty variable descriptor in case we
* can't perform a takeover.
*/
- i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
+ i = (sc->mr_cap & MR_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
curr_md = sc->mr_desc + i;
free_md = NULL;
for (; i < sc->mr_ndesc; i++, curr_md++) {
@@ -441,9 +443,6 @@ i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
/* exact match? */
if ((curr_md->mr_base == mrd->mr_base) &&
(curr_md->mr_len == mrd->mr_len)) {
- /* whoops, owned by someone */
- if (curr_md->mr_flags & MDF_BUSY)
- return(EBUSY);
/* check we aren't doing something risky */
if (!(mrd->mr_flags & MDF_FORCE) &&
((curr_md->mr_flags & MDF_ATTRMASK)
@@ -456,7 +455,7 @@ i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
/* non-exact overlap ? */
if (mroverlap(curr_md, mrd)) {
/* between conflicting region types? */
- if (i686_mtrrconflict(curr_md->mr_flags,
+ if (mtrrconflict(curr_md->mr_flags,
mrd->mr_flags))
return(EINVAL);
}
@@ -478,31 +477,28 @@ i686_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
/*
* Handle requests to set memory range attributes by manipulating MTRRs.
- *
*/
int
-i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
+mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *targ;
int error = 0;
switch(*arg) {
case MEMRANGE_SET_UPDATE:
- /* make sure that what's being asked for is even possible at all */
+ /* make sure that what's being asked for is possible */
if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
- i686_mtrrtype(mrd->mr_flags) == -1)
+ mtrrtype(mrd->mr_flags) == -1)
return(EINVAL);
-#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
-
/* are the "low memory" conditions applicable? */
- if ((sc->mr_cap & MR686_FIXMTRR) &&
+ if ((sc->mr_cap & MR_FIXMTRR) &&
((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
- if ((error = i686_mrsetlow(sc, mrd, arg)) != 0)
+ if ((error = mrsetlow(sc, mrd, arg)) != 0)
return(error);
} else {
/* it's time to play with variable MTRRs */
- if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0)
+ if ((error = mrsetvariable(sc, mrd, arg)) != 0)
return(error);
}
break;
@@ -512,8 +508,6 @@ i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
return(ENOENT);
if (targ->mr_flags & MDF_FIXACTIVE)
return(EPERM);
- if (targ->mr_flags & MDF_BUSY)
- return(EBUSY);
targ->mr_flags &= ~MDF_ACTIVE;
targ->mr_owner[0] = 0;
break;
@@ -523,8 +517,7 @@ i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
}
/* update the hardware */
- i686_mrstore(sc);
- i686_mrfetch(sc); /* refetch to see where we're at */
+ mrstore(sc);
return(0);
}
@@ -533,7 +526,7 @@ i686_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
* fetch the initial settings.
*/
void
-i686_mrinit(struct mem_range_softc *sc)
+mrinit(struct mem_range_softc *sc)
{
struct mem_range_desc *mrd;
uint32_t regs[4];
@@ -545,17 +538,20 @@ i686_mrinit(struct mem_range_softc *sc)
/* For now, bail out if MTRRs are not enabled */
if (!(mtrrdef & 0x800)) {
- printf("mtrr: CPU supports MTRRs but not enabled\n");
+ printf("mtrr: CPU supports MTRRs but not enabled by BIOS\n");
return;
}
nmdesc = mtrrcap & 0xff;
- printf("mtrr: Pentium Pro MTRR support\n");
+ printf("mtrr: Pentium Pro MTRR support, %d var ranges", nmdesc);
/* If fixed MTRRs supported and enabled */
if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) {
- sc->mr_cap = MR686_FIXMTRR;
+ sc->mr_cap = MR_FIXMTRR;
nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
+ printf(", %d fixed ranges", MTRR_N64K + MTRR_N16K + MTRR_N4K);
}
+
+ printf("\n");
sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc),
M_MEMDESC, M_WAITOK|M_ZERO);
@@ -564,17 +560,19 @@ i686_mrinit(struct mem_range_softc *sc)
mrd = sc->mr_desc;
/* Populate the fixed MTRR entries' base/length */
- if (sc->mr_cap & MR686_FIXMTRR) {
+ if (sc->mr_cap & MR_FIXMTRR) {
for (i = 0; i < MTRR_N64K; i++, mrd++) {
mrd->mr_base = i * 0x10000;
mrd->mr_len = 0x10000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
}
+
for (i = 0; i < MTRR_N16K; i++, mrd++) {
mrd->mr_base = i * 0x4000 + 0x80000;
mrd->mr_len = 0x4000;
mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
}
+
for (i = 0; i < MTRR_N4K; i++, mrd++) {
mrd->mr_base = i * 0x1000 + 0xc0000;
mrd->mr_len = 0x1000;
@@ -588,9 +586,9 @@ i686_mrinit(struct mem_range_softc *sc)
* If CPUID does not support leaf function 0x80000008, use the
* default a 36-bit address size.
*/
- cpuid(0x80000000, regs);
+ CPUID(0x80000000, regs[0], regs[1], regs[2], regs[3]);
if (regs[0] >= 0x80000008) {
- cpuid(0x80000008, regs);
+ CPUID(0x80000008, regs[0], regs[1], regs[2], regs[3]);
if (regs[0] & 0xff) {
mtrrmask = (1ULL << (regs[0] & 0xff)) - 1;
mtrrmask &= ~0x0000000000000fffULL;
@@ -599,9 +597,9 @@ i686_mrinit(struct mem_range_softc *sc)
/*
* Get current settings, anything set now is considered to have
- * been set by the firmware. (XXX has something already played here?)
+ * been set by the firmware.
*/
- i686_mrfetch(sc);
+ mrfetch(sc);
mrd = sc->mr_desc;
for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
if (mrd->mr_flags & MDF_ACTIVE)
@@ -610,19 +608,18 @@ i686_mrinit(struct mem_range_softc *sc)
}
/*
- * Initialise MTRRs on an AP after the BSP has run the init code (or
- * re-initialise the MTRRs on the BSP after suspend).
+ * Initialise MTRRs on a cpu from the software state.
*/
void
-i686_mrinit_cpu(struct mem_range_softc *sc)
+mrinit_cpu(struct mem_range_softc *sc)
{
- i686_mrstoreone(sc); /* set MTRRs to match BSP */
+ mrstoreone(sc); /* set MTRRs to match BSP */
}
void
-i686_mrreload_cpu(struct mem_range_softc *sc)
+mrreload_cpu(struct mem_range_softc *sc)
{
- disable_intr(); /* disable interrupts */
- i686_mrstoreone(sc); /* set MTRRs to match BSP */
+ disable_intr();
+ mrstoreone(sc); /* set MTRRs to match BSP */
enable_intr();
}
diff --git a/sys/arch/i386/i386/mtrr.c b/sys/arch/i386/i386/mtrr.c
index c5358a0970b..6da64026304 100644
--- a/sys/arch/i386/i386/mtrr.c
+++ b/sys/arch/i386/i386/mtrr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mtrr.c,v 1.11 2009/06/01 20:46:50 phessler Exp $ */
+/* $OpenBSD: mtrr.c,v 1.12 2013/08/24 04:26:16 mlarkin Exp $ */
/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* Copyright (c) 1999 Brian Fundakowski Feldman
@@ -33,7 +33,7 @@
#include <machine/specialreg.h>
-extern struct mem_range_ops i686_mrops;
+extern struct mem_range_ops mrops;
extern struct mem_range_ops k6_mrops;
void mtrrattach(int);
@@ -63,7 +63,7 @@ mtrrattach(int num)
(strcmp(cpu_vendor, "AuthenticAMD") == 0)) &&
(family == 0x6 || family == 0xf) &&
(cpu_feature & CPUID_MTRR)) {
- mem_range_softc.mr_op = &i686_mrops;
+ mem_range_softc.mr_op = &mrops;
}
/* Initialise memory range handling */
diff --git a/sys/arch/i386/include/specialreg.h b/sys/arch/i386/include/specialreg.h
index 13e036c36df..1447eea1186 100644
--- a/sys/arch/i386/include/specialreg.h
+++ b/sys/arch/i386/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.45 2013/05/06 00:15:11 dlg Exp $ */
+/* $OpenBSD: specialreg.h,v 1.46 2013/08/24 04:26:16 mlarkin Exp $ */
/* $NetBSD: specialreg.h,v 1.7 1994/10/27 04:16:26 cgd Exp $ */
/*-
@@ -308,46 +308,23 @@
#define MSR_LASTINTFROMIP 0x1dd
#define MSR_LASTINTTOIP 0x1de
#define MSR_ROB_CR_BKUPTMPDR6 0x1e0
-#define MSR_MTRRVarBase 0x200
-#define MSR_MTRRphysMask0 0x201
-#define MSR_MTRRphysBase1 0x202
-#define MSR_MTRRphysMask1 0x203
-#define MSR_MTRRphysBase2 0x204
-#define MSR_MTRRphysMask2 0x205
-#define MSR_MTRRphysBase3 0x206
-#define MSR_MTRRphysMask3 0x207
-#define MSR_MTRRphysBase4 0x208
-#define MSR_MTRRphysMask4 0x209
-#define MSR_MTRRphysBase5 0x20a
-#define MSR_MTRRphysMask5 0x20b
-#define MSR_MTRRphysBase6 0x20c
-#define MSR_MTRRphysMask6 0x20d
-#define MSR_MTRRphysBase7 0x20e
-#define MSR_MTRRphysMask7 0x20f
-#define MSR_MTRR64kBase 0x250
-#define MSR_MTRR16kBase 0x258
-#define MSR_MTRRfix16K_A0000 0x259
-#define MSR_MTRR4kBase 0x268
-#define MSR_MTRRfix4K_C8000 0x269
-#define MSR_MTRRfix4K_D0000 0x26a
-#define MSR_MTRRfix4K_D8000 0x26b
-#define MSR_MTRRfix4K_E0000 0x26c
-#define MSR_MTRRfix4K_E8000 0x26d
-#define MSR_MTRRfix4K_F0000 0x26e
-#define MSR_MTRRfix4K_F8000 0x26f
+#define MSR_MTRRvarBase 0x200
+#define MSR_MTRRfix64K_00000 0x250
+#define MSR_MTRRfix16K_80000 0x258
+#define MSR_MTRRfix4K_C0000 0x268
#define MSR_CR_PAT 0x277
#define MSR_MTRRdefType 0x2ff
#define MSR_PERF_FIXED_CTR1 0x30a /* CPU_CLK_Unhalted.Core */
#define MSR_PERF_FIXED_CTR2 0x30b /* CPU_CLK.Unhalted.Ref */
#define MSR_PERF_FIXED_CTR_CTRL 0x38d
-#define MSR_PERF_FIXED_CTR_FC_DIS 0x0 /* disable counter */
-#define MSR_PERF_FIXED_CTR_FC_1 0x1 /* count ring 1 */
-#define MSR_PERF_FIXED_CTR_FC_123 0x2 /* count rings 1,2,3 */
-#define MSR_PERF_FIXED_CTR_FC_ANY 0x3 /* count everything */
-#define MSR_PERF_FIXED_CTR_FC_MASK 0x3
-#define MSR_PERF_FIXED_CTR_FC(_i, _v) ((_v) << (4 * (_i)))
-#define MSR_PERF_FIXED_CTR_ANYTHR(_i) (0x4 << (4 * (_i)))
-#define MSR_PERF_FIXED_CTR_INT(_i) (0x8 << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_FC_DIS 0x0 /* disable counter */
+#define MSR_PERF_FIXED_CTR_FC_1 0x1 /* count ring 1 */
+#define MSR_PERF_FIXED_CTR_FC_123 0x2 /* count rings 1,2,3 */
+#define MSR_PERF_FIXED_CTR_FC_ANY 0x3 /* count everything */
+#define MSR_PERF_FIXED_CTR_FC_MASK 0x3
+#define MSR_PERF_FIXED_CTR_FC(_i, _v) ((_v) << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_ANYTHR(_i) (0x4 << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_INT(_i) (0x8 << (4 * (_i)))
#define MSR_PERF_GLOBAL_CTRL 0x38f
#define MSR_PERF_GLOBAL_CTR1_EN (1ULL << 33)
#define MSR_PERF_GLOBAL_CTR2_EN (1ULL << 34)
@@ -393,10 +370,10 @@
#define MSR_SYSCFG 0xc0000010
#define MSR_EFER 0xc0000080 /* Extended feature enable */
-#define EFER_SCE 0x00000001 /* SYSCALL extension */
-#define EFER_LME 0x00000100 /* Long Mode Active */
-#define EFER_LMA 0x00000400 /* Long Mode Enabled */
-#define EFER_NXE 0x00000800 /* No-Execute Enabled */
+#define EFER_SCE 0x00000001 /* SYSCALL extension */
+#define EFER_LME 0x00000100 /* Long Mode Active */
+#define EFER_LMA 0x00000400 /* Long Mode Enabled */
+#define EFER_NXE 0x00000800 /* No-Execute Enabled */
#define MSR_STAR 0xc0000081 /* 32 bit syscall gate addr */
#define MSR_LSTAR 0xc0000082 /* 64 bit syscall gate addr */
@@ -409,7 +386,7 @@
#define MSR_INT_PEN_MSG 0xc0010055 /* Interrupt pending message */
#define MSR_DE_CFG 0xc0011029 /* Decode Configuration */
-#define DE_CFG_721 0x00000001 /* errata 721 */
+#define DE_CFG_721 0x00000001 /* errata 721 */
#define IPM_C1E_CMP_HLT 0x10000000
#define IPM_SMI_CMP_HLT 0x08000000
@@ -418,26 +395,26 @@
* These require a 'passcode' for access. See cpufunc.h.
*/
#define MSR_HWCR 0xc0010015
-#define HWCR_FFDIS 0x00000040
+#define HWCR_FFDIS 0x00000040
#define MSR_NB_CFG 0xc001001f
-#define NB_CFG_DISIOREQLOCK 0x0000000000000004ULL
-#define NB_CFG_DISDATMSK 0x0000001000000000ULL
+#define NB_CFG_DISIOREQLOCK 0x0000000000000004ULL
+#define NB_CFG_DISDATMSK 0x0000001000000000ULL
#define MSR_LS_CFG 0xc0011020
-#define LS_CFG_DIS_LS2_SQUISH 0x02000000
+#define LS_CFG_DIS_LS2_SQUISH 0x02000000
#define MSR_IC_CFG 0xc0011021
-#define IC_CFG_DIS_SEQ_PREFETCH 0x00000800
+#define IC_CFG_DIS_SEQ_PREFETCH 0x00000800
#define MSR_DC_CFG 0xc0011022
-#define DC_CFG_DIS_CNV_WC_SSO 0x00000004
-#define DC_CFG_DIS_SMC_CHK_BUF 0x00000400
+#define DC_CFG_DIS_CNV_WC_SSO 0x00000004
+#define DC_CFG_DIS_SMC_CHK_BUF 0x00000400
#define MSR_BU_CFG 0xc0011023
-#define BU_CFG_THRL2IDXCMPDIS 0x0000080000000000ULL
-#define BU_CFG_WBPFSMCCHKDIS 0x0000200000000000ULL
-#define BU_CFG_WBENHWSBDIS 0x0001000000000000ULL
+#define BU_CFG_THRL2IDXCMPDIS 0x0000080000000000ULL
+#define BU_CFG_WBPFSMCCHKDIS 0x0000200000000000ULL
+#define BU_CFG_WBENHWSBDIS 0x0001000000000000ULL
/*
* Constants related to MTRRs
diff --git a/sys/sys/memrange.h b/sys/sys/memrange.h
index 823f9b06f56..5d947efffd0 100644
--- a/sys/sys/memrange.h
+++ b/sys/sys/memrange.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: memrange.h,v 1.6 2013/04/17 16:24:59 deraadt Exp $ */
+/* $OpenBSD: memrange.h,v 1.7 2013/08/24 04:26:16 mlarkin Exp $ */
/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* All rights reserved.
@@ -43,7 +43,6 @@
#define MDF_ACTIVE (1<<27) /* currently active */
#define MDF_BOGUS (1<<28) /* we don't like it */
#define MDF_FIXACTIVE (1<<29) /* can't be turned off */
-#define MDF_BUSY (1<<30) /* range is in use */
#define MDF_FORCE (1<<31) /* force risky changes */
struct mem_range_desc {