diff options
author | Peter Hessler <phessler@cvs.openbsd.org> | 2008-06-11 09:22:40 +0000 |
---|---|---|
committer | Peter Hessler <phessler@cvs.openbsd.org> | 2008-06-11 09:22:40 +0000 |
commit | c1eea89f58e9f16b5e20fa9e85638d8c73a29beb (patch) | |
tree | 988c4b201d5ae709dd0af50a247e78ce77e2396c | |
parent | 8f16e6762c7026470a87ebe8d49972a2f938507b (diff) |
Synchronize the MTRR API with i386, and enable
"just commit it" deraadt@
-rw-r--r-- | sys/arch/amd64/amd64/amd64_mem.c | 601 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/cpu.c | 8 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/mem.c | 106 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/mtrr.c | 64 | ||||
-rw-r--r-- | sys/arch/amd64/amd64/sys_machdep.c | 69 | ||||
-rw-r--r-- | sys/arch/amd64/conf/GENERIC | 5 | ||||
-rw-r--r-- | sys/arch/amd64/conf/files.amd64 | 6 |
7 files changed, 780 insertions, 79 deletions
diff --git a/sys/arch/amd64/amd64/amd64_mem.c b/sys/arch/amd64/amd64/amd64_mem.c new file mode 100644 index 00000000000..7ee4bb35ed7 --- /dev/null +++ b/sys/arch/amd64/amd64/amd64_mem.c @@ -0,0 +1,601 @@ +/* $OpenBSD: amd64_mem.c,v 1.1 2008/06/11 09:22:38 phessler Exp $ */ +/*- + * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8 1999/10/12 22:53:05 green Exp $ + */ + +#include <sys/param.h> +#include <sys/kernel.h> +#include <sys/systm.h> +#include <sys/malloc.h> +#include <sys/memrange.h> + +#include <machine/cpufunc.h> +#include <machine/specialreg.h> + +/* + * AMD64 memory range operations + * + * This code implements a set of MSRs known as MTRR which are defined in + * AMD64 Arch Programming Manual Vol2, section 7.7 + */ + +char *mem_owner_bios = "BIOS"; + +#define MRAMD64_FIXMTRR (1<<0) + +#define mrwithin(mr, a) \ + (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) +#define mroverlap(mra, mrb) \ + (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) + +#define mrvalid(base, len) \ + ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ + ((len) >= (1 << 12)) && /* length is >= 4k */ \ + powerof2((len)) && /* ... and power of two */ \ + !((base) & ((len) - 1))) /* range is not discontiuous */ + +#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) + +void amd64_mrinit(struct mem_range_softc *sc); +int amd64_mrset(struct mem_range_softc *sc, + struct mem_range_desc *mrd, + int *arg); +void amd64_mrAPinit(struct mem_range_softc *sc); + +struct mem_range_ops amd64_mrops = { + amd64_mrinit, + amd64_mrset, + amd64_mrAPinit +}; + +/* XXX for AP startup hook */ +u_int64_t mtrrcap, mtrrdef; + +struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, + struct mem_range_desc *mrd); +void amd64_mrfetch(struct mem_range_softc *sc); +int amd64_mtrrtype(u_int64_t flags); +int amd64_mrt2mtrr(u_int64_t flags, int oldval); +int amd64_mtrr2mrt(int val); +int amd64_mtrrconflict(u_int64_t flag1, u_int64_t flag2); +void amd64_mrstore(struct mem_range_softc *sc); +void amd64_mrstoreone(void *arg); +struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc, + u_int64_t addr); +int amd64_mrsetlow(struct mem_range_softc *sc, + struct mem_range_desc *mrd, + int *arg); +int amd64_mrsetvariable(struct mem_range_softc *sc, + struct mem_range_desc *mrd, + int *arg); + +/* AMD64 MTRR type to memory range type conversion */ +int amd64_mtrrtomrt[] = { + MDF_UNCACHEABLE, + MDF_WRITECOMBINE, + MDF_UNKNOWN, + MDF_UNKNOWN, + MDF_WRITETHROUGH, + MDF_WRITEPROTECT, + MDF_WRITEBACK +}; + +#define MTRRTOMRTLEN (sizeof(amd64_mtrrtomrt) / sizeof(amd64_mtrrtomrt[0])) + +int +amd64_mtrr2mrt(int val) +{ + if (val < 0 || val >= MTRRTOMRTLEN) + return MDF_UNKNOWN; + return amd64_mtrrtomrt[val]; +} + +/* + * AMD64 MTRR conflicts. Writeback and uncachable may overlap. + */ +int +amd64_mtrrconflict(u_int64_t flag1, u_int64_t flag2) +{ + + flag1 &= MDF_ATTRMASK; + flag2 &= MDF_ATTRMASK; + if (flag1 == flag2 || + (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || + (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) + return 0; + return 1; +} + +/* + * Look for an exactly-matching range. + */ +struct mem_range_desc * +mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) +{ + struct mem_range_desc *cand; + int i; + + for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) + if ((cand->mr_base == mrd->mr_base) && + (cand->mr_len == mrd->mr_len)) + return(cand); + return(NULL); +} + +/* + * Fetch the current mtrr settings from the current CPU (assumed to all + * be in sync in the SMP case). Note that if we are here, we assume + * that MTRRs are enabled, and we may or may not have fixed MTRRs. + */ +void +amd64_mrfetch(struct mem_range_softc *sc) +{ + struct mem_range_desc *mrd; + u_int64_t msrv; + int i, j, msr; + + mrd = sc->mr_desc; + + /* Get fixed-range MTRRs */ + if (sc->mr_cap & MRAMD64_FIXMTRR) { + msr = MSR_MTRRfix64K_00000; + for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { + msrv = rdmsr(msr); + for (j = 0; j < 8; j++, mrd++) { + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + amd64_mtrr2mrt(msrv & 0xff) | + MDF_ACTIVE; + if (mrd->mr_owner[0] == 0) + strlcpy(mrd->mr_owner, mem_owner_bios, + sizeof(mrd->mr_owner)); + msrv = msrv >> 8; + } + } + msr = MSR_MTRRfix16K_80000; + for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { + msrv = rdmsr(msr); + for (j = 0; j < 8; j++, mrd++) { + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + amd64_mtrr2mrt(msrv & 0xff) | + MDF_ACTIVE; + if (mrd->mr_owner[0] == 0) + strlcpy(mrd->mr_owner, mem_owner_bios, + sizeof(mrd->mr_owner)); + msrv = msrv >> 8; + } + } + msr = MSR_MTRRfix4K_C0000; + for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { + msrv = rdmsr(msr); + for (j = 0; j < 8; j++, mrd++) { + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + amd64_mtrr2mrt(msrv & 0xff) | + MDF_ACTIVE; + if (mrd->mr_owner[0] == 0) + strlcpy(mrd->mr_owner, mem_owner_bios, + sizeof(mrd->mr_owner)); + msrv = msrv >> 8; + } + } + } + + /* Get remainder which must be variable MTRRs */ + msr = MSR_MTRRphysBase0; + for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { + msrv = rdmsr(msr); + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + amd64_mtrr2mrt(msrv & 0xff); + mrd->mr_base = msrv & 0x0000000ffffff000LL; + msrv = rdmsr(msr + 1); + mrd->mr_flags = (msrv & 0x800) ? + (mrd->mr_flags | MDF_ACTIVE) : + (mrd->mr_flags & ~MDF_ACTIVE); + /* Compute the range from the mask. Ick. */ + mrd->mr_len = (~(msrv & 0xfffffffffffff000LL) & 0x0000000fffffffffLL) + 1; + if (!mrvalid(mrd->mr_base, mrd->mr_len)) + mrd->mr_flags |= MDF_BOGUS; + /* If unclaimed and active, must be the BIOS */ + if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) + strlcpy(mrd->mr_owner, mem_owner_bios, + sizeof(mrd->mr_owner)); + } +} + +/* + * Return the MTRR memory type matching a region's flags + */ +int +amd64_mtrrtype(u_int64_t flags) +{ + int i; + + flags &= MDF_ATTRMASK; + + for (i = 0; i < MTRRTOMRTLEN; i++) { + if (amd64_mtrrtomrt[i] == MDF_UNKNOWN) + continue; + if (flags == amd64_mtrrtomrt[i]) + return(i); + } + return(-1); +} + +int +amd64_mrt2mtrr(u_int64_t flags, int oldval) +{ + int val; + + if ((val = amd64_mtrrtype(flags)) == -1) + return oldval & 0xff; + return val & 0xff; +} + +/* + * Update running CPU(s) MTRRs to match the ranges in the descriptor + * list. + * + * XXX Must be called with interrupts enabled. + */ +void +amd64_mrstore(struct mem_range_softc *sc) +{ + disable_intr(); /* disable interrupts */ + amd64_mrstoreone((void *)sc); + enable_intr(); +} + +/* + * Update the current CPU's MTRRs with those represented in the + * descriptor list. Note that we do this wholesale rather than + * just stuffing one entry; this is simpler (but slower, of course). + */ +void +amd64_mrstoreone(void *arg) +{ + struct mem_range_softc *sc = (struct mem_range_softc *)arg; + struct mem_range_desc *mrd; + u_int64_t omsrv, msrv; + int i, j, msr; + u_int cr4save; + + mrd = sc->mr_desc; + + cr4save = rcr4(); /* save cr4 */ + if (cr4save & CR4_PGE) + lcr4(cr4save & ~CR4_PGE); + lcr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */ + wbinvd(); /* flush caches, TLBs */ + wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */ + + /* Set fixed-range MTRRs */ + if (sc->mr_cap & MRAMD64_FIXMTRR) { + msr = MSR_MTRRfix64K_00000; + for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { + msrv = 0; + omsrv = rdmsr(msr); + for (j = 7; j >= 0; j--) { + msrv = msrv << 8; + msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, + omsrv >> (j*8)); + } + wrmsr(msr, msrv); + mrd += 8; + } + msr = MSR_MTRRfix16K_80000; + for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { + msrv = 0; + omsrv = rdmsr(msr); + for (j = 7; j >= 0; j--) { + msrv = msrv << 8; + msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, + omsrv >> (j*8)); + } + wrmsr(msr, msrv); + mrd += 8; + } + msr = MSR_MTRRfix4K_C0000; + for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { + msrv = 0; + omsrv = rdmsr(msr); + for (j = 7; j >= 0; j--) { + msrv = msrv << 8; + msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags, + omsrv >> (j*8)); + } + wrmsr(msr, msrv); + mrd += 8; + } + } + + /* Set remainder which must be variable MTRRs */ + msr = MSR_MTRRphysBase0; + for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { + /* base/type register */ + omsrv = rdmsr(msr); + if (mrd->mr_flags & MDF_ACTIVE) { + msrv = mrd->mr_base & 0xfffffffffffff000LL; + msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv); + } else { + msrv = 0; + } + wrmsr(msr, msrv); + + /* mask/active register */ + if (mrd->mr_flags & MDF_ACTIVE) { + msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL); + } else { + msrv = 0; + } + wrmsr(msr + 1, msrv); + } + wbinvd(); /* flush caches, TLBs */ + wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */ + lcr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */ + lcr4(cr4save); /* restore cr4 */ +} + +/* + * Hunt for the fixed MTRR referencing (addr) + */ +struct mem_range_desc * +amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) +{ + struct mem_range_desc *mrd; + int i; + + for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++) + if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len))) + return(mrd); + return(NULL); +} + +/* + * Try to satisfy the given range request by manipulating the fixed MTRRs that + * cover low memory. + * + * Note that we try to be generous here; we'll bloat the range out to the + * next higher/lower boundary to avoid the consumer having to know too much + * about the mechanisms here. + * + * XXX note that this will have to be updated when we start supporting "busy" ranges. + */ +int +amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) +{ + struct mem_range_desc *first_md, *last_md, *curr_md; + + /* range check */ + if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || + ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) + return(EINVAL); + + /* check we aren't doing something risky */ + if (!(mrd->mr_flags & MDF_FORCE)) + for (curr_md = first_md; curr_md <= last_md; curr_md++) { + if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN) + return (EACCES); + } + + /* set flags, clear set-by-firmware flag */ + for (curr_md = first_md; curr_md <= last_md; curr_md++) { + curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags); + bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); + } + + return(0); +} + + +/* + * Modify/add a variable MTRR to satisfy the request. + * + * XXX needs to be updated to properly support "busy" ranges. + */ +int +amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, + int *arg) +{ + struct mem_range_desc *curr_md, *free_md; + int i; + + /* + * Scan the currently active variable descriptors, look for + * one we exactly match (straight takeover) and for possible + * accidental overlaps. + * Keep track of the first empty variable descriptor in case we + * can't perform a takeover. + */ + i = (sc->mr_cap & MRAMD64_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; + curr_md = sc->mr_desc + i; + free_md = NULL; + for (; i < sc->mr_ndesc; i++, curr_md++) { + if (curr_md->mr_flags & MDF_ACTIVE) { + /* exact match? */ + if ((curr_md->mr_base == mrd->mr_base) && + (curr_md->mr_len == mrd->mr_len)) { + /* whoops, owned by someone */ + if (curr_md->mr_flags & MDF_BUSY) + return(EBUSY); + /* check we aren't doing something risky */ + if (!(mrd->mr_flags & MDF_FORCE) && + ((curr_md->mr_flags & MDF_ATTRMASK) + == MDF_UNKNOWN)) + return (EACCES); + /* Ok, just hijack this entry */ + free_md = curr_md; + break; + } + /* non-exact overlap ? */ + if (mroverlap(curr_md, mrd)) { + /* between conflicting region types? */ + if (amd64_mtrrconflict(curr_md->mr_flags, + mrd->mr_flags)) + return(EINVAL); + } + } else if (free_md == NULL) { + free_md = curr_md; + } + } + /* got somewhere to put it? */ + if (free_md == NULL) + return(ENOSPC); + + /* Set up new descriptor */ + free_md->mr_base = mrd->mr_base; + free_md->mr_len = mrd->mr_len; + free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); + bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); + return(0); +} + +/* + * Handle requests to set memory range attributes by manipulating MTRRs. + * + */ +int +amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) +{ + struct mem_range_desc *targ; + int error = 0; + + switch(*arg) { + case MEMRANGE_SET_UPDATE: + /* make sure that what's being asked for is even possible at all */ + if (!mrvalid(mrd->mr_base, mrd->mr_len) || + amd64_mtrrtype(mrd->mr_flags) == -1) + return(EINVAL); + +#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) + + /* are the "low memory" conditions applicable? */ + if ((sc->mr_cap & MRAMD64_FIXMTRR) && + ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { + if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0) + return(error); + } else { + /* it's time to play with variable MTRRs */ + if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0) + return(error); + } + break; + + case MEMRANGE_SET_REMOVE: + if ((targ = mem_range_match(sc, mrd)) == NULL) + return(ENOENT); + if (targ->mr_flags & MDF_FIXACTIVE) + return(EPERM); + if (targ->mr_flags & MDF_BUSY) + return(EBUSY); + targ->mr_flags &= ~MDF_ACTIVE; + targ->mr_owner[0] = 0; + break; + + default: + return(EOPNOTSUPP); + } + + /* update the hardware */ + amd64_mrstore(sc); + amd64_mrfetch(sc); /* refetch to see where we're at */ + return(0); +} + +/* + * Work out how many ranges we support, initialise storage for them, + * fetch the initial settings. + */ +void +amd64_mrinit(struct mem_range_softc *sc) +{ + struct mem_range_desc *mrd; + int nmdesc = 0; + int i; + + mtrrcap = rdmsr(MSR_MTRRcap); + mtrrdef = rdmsr(MSR_MTRRdefType); + + /* For now, bail out if MTRRs are not enabled */ + if (!(mtrrdef & 0x800)) { + printf("mtrr: CPU supports MTRRs but not enabled\n"); + return; + } + nmdesc = mtrrcap & 0xff; + printf("mtrr: Pentium Pro MTRR support\n"); + + /* If fixed MTRRs supported and enabled */ + if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) { + sc->mr_cap = MRAMD64_FIXMTRR; + nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; + } + + sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), + M_MEMDESC, M_WAITOK|M_ZERO); + sc->mr_ndesc = nmdesc; + + mrd = sc->mr_desc; + + /* Populate the fixed MTRR entries' base/length */ + if (sc->mr_cap & MRAMD64_FIXMTRR) { + for (i = 0; i < MTRR_N64K; i++, mrd++) { + mrd->mr_base = i * 0x10000; + mrd->mr_len = 0x10000; + mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; + } + for (i = 0; i < MTRR_N16K; i++, mrd++) { + mrd->mr_base = i * 0x4000 + 0x80000; + mrd->mr_len = 0x4000; + mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; + } + for (i = 0; i < MTRR_N4K; i++, mrd++) { + mrd->mr_base = i * 0x1000 + 0xc0000; + mrd->mr_len = 0x1000; + mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; + } + } + + /* + * Get current settings, anything set now is considered to have + * been set by the firmware. (XXX has something already played here?) + */ + amd64_mrfetch(sc); + mrd = sc->mr_desc; + for (i = 0; i < sc->mr_ndesc; i++, mrd++) { + if (mrd->mr_flags & MDF_ACTIVE) + mrd->mr_flags |= MDF_FIRMWARE; + } +} + +/* + * Initialise MTRRs on an AP after the BSP has run the init code. + */ +void +amd64_mrAPinit(struct mem_range_softc *sc) +{ + amd64_mrstoreone((void *)sc); /* set MTRRs to match BSP */ + wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */ +} + diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c index c334b36567b..c86cd4ac502 100644 --- a/sys/arch/amd64/amd64/cpu.c +++ b/sys/arch/amd64/amd64/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.17 2008/04/13 11:35:55 thib Exp $ */ +/* $OpenBSD: cpu.c,v 1.18 2008/06/11 09:22:38 phessler Exp $ */ /* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */ /*- @@ -387,12 +387,6 @@ cpu_init(struct cpu_info *ci) lcr0(rcr0() | CR0_WP); lcr4(rcr4() | CR4_DEFAULT); -#ifdef MTRR - if ((ci->ci_flags & CPUF_AP) == 0) - i686_mtrr_init_first(); - mtrr_init_cpu(ci); -#endif - #ifdef MULTIPROCESSOR ci->ci_flags |= CPUF_RUNNING; #endif diff --git a/sys/arch/amd64/amd64/mem.c b/sys/arch/amd64/amd64/mem.c index 923f1ea33ec..1c5f01e1746 100644 --- a/sys/arch/amd64/amd64/mem.c +++ b/sys/arch/amd64/amd64/mem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mem.c,v 1.10 2008/06/10 02:55:39 weingart Exp $ */ +/* $OpenBSD: mem.c,v 1.11 2008/06/11 09:22:38 phessler Exp $ */ /* * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990, 1993 @@ -63,6 +63,8 @@ #include <uvm/uvm_extern.h> +#include "mtrr.h" + caddr_t zeropage; extern int start, end, etext; @@ -79,6 +81,14 @@ extern int allowaperture; #define BIOS_END 0xFFFFF #endif +#if NMTRR > 0 +struct mem_range_softc mem_range_softc; +int mem_ioctl(dev_t, u_long, caddr_t, int, struct proc *); +int mem_range_attr_get(struct mem_range_desc *, int *); +int mem_range_attr_set(struct mem_range_desc *, int *); +#endif + + /*ARGSUSED*/ int mmopen(dev_t dev, int flag, int mode, struct proc *p) @@ -240,6 +250,100 @@ mmmmap(dev_t dev, off_t off, int prot) int mmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) { +#if NMTRR > 0 + switch (minor(dev)) { + case 0: + case 4: + return mem_ioctl(dev, cmd, data, flags, p); + } +#endif return (ENODEV); } +#if NMTRR > 0 +/* + * Operations for changing memory attributes. + * + * This is basically just an ioctl shim for mem_range_attr_get + * and mem_range_attr_set. + */ +int +mem_ioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) +{ + int nd, error = 0; + struct mem_range_op *mo = (struct mem_range_op *)data; + struct mem_range_desc *md; + + /* is this for us? */ + if ((cmd != MEMRANGE_GET) && + (cmd != MEMRANGE_SET)) + return (ENOTTY); + + /* any chance we can handle this? */ + if (mem_range_softc.mr_op == NULL) + return (EOPNOTSUPP); + /* do we have any descriptors? */ + if (mem_range_softc.mr_ndesc == 0) + return (ENXIO); + + switch (cmd) { + case MEMRANGE_GET: + nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); + if (nd > 0) { + md = (struct mem_range_desc *) + malloc(nd * sizeof(struct mem_range_desc), + M_MEMDESC, M_WAITOK); + error = mem_range_attr_get(md, &nd); + if (!error) + error = copyout(md, mo->mo_desc, + nd * sizeof(struct mem_range_desc)); + free(md, M_MEMDESC); + } else { + nd = mem_range_softc.mr_ndesc; + } + mo->mo_arg[0] = nd; + break; + + case MEMRANGE_SET: + md = malloc(sizeof(struct mem_range_desc), M_MEMDESC, M_WAITOK); + error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); + /* clamp description string */ + md->mr_owner[sizeof(md->mr_owner) - 1] = 0; + if (error == 0) + error = mem_range_attr_set(md, &mo->mo_arg[0]); + free(md, M_MEMDESC); + break; + } + return (error); +} + +/* + * Implementation-neutral, kernel-callable functions for manipulating + * memory range attributes. + */ +int +mem_range_attr_get(struct mem_range_desc *mrd, int *arg) +{ + /* can we handle this? */ + if (mem_range_softc.mr_op == NULL) + return (EOPNOTSUPP); + + if (*arg == 0) { + *arg = mem_range_softc.mr_ndesc; + } else { + bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); + } + return (0); +} + +int +mem_range_attr_set(struct mem_range_desc *mrd, int *arg) +{ + /* can we handle this? */ + if (mem_range_softc.mr_op == NULL) + return (EOPNOTSUPP); + + return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); +} + +#endif /* NMTRR > 0 */ diff --git a/sys/arch/amd64/amd64/mtrr.c b/sys/arch/amd64/amd64/mtrr.c new file mode 100644 index 00000000000..32048871538 --- /dev/null +++ b/sys/arch/amd64/amd64/mtrr.c @@ -0,0 +1,64 @@ +/* $OpenBSD: mtrr.c,v 1.1 2008/06/11 09:22:38 phessler Exp $ */ +/*- + * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> + * Copyright (c) 1999 Brian Fundakowski Feldman + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include <sys/param.h> +#include <sys/memrange.h> +#include <sys/systm.h> + +#include <machine/specialreg.h> + +extern struct mem_range_ops amd64_mrops; + +void mtrrattach(int); + +void +mtrrattach(int num) +{ + int family, model, step; + + if (num > 1) + return; + + family = (cpu_id >> 8) & 0xf; + model = (cpu_id >> 4) & 0xf; + step = (cpu_id >> 0) & 0xf; + + /* Try for i686 MTRRs */ + if ((cpu_feature & CPUID_MTRR) && + (family == 0x6 || family == 0xf) && + ((strcmp(cpu_vendor, "GenuineIntel") == 0) || + (strcmp(cpu_vendor, "AuthenticAMD") == 0))) { + mem_range_softc.mr_op = &amd64_mrops; + + } + /* Initialise memory range handling */ + if (mem_range_softc.mr_op != NULL) + mem_range_softc.mr_op->init(&mem_range_softc); +} + diff --git a/sys/arch/amd64/amd64/sys_machdep.c b/sys/arch/amd64/amd64/sys_machdep.c index a8d37ff41a0..8394f0b8b2f 100644 --- a/sys/arch/amd64/amd64/sys_machdep.c +++ b/sys/arch/amd64/amd64/sys_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sys_machdep.c,v 1.6 2008/05/23 15:39:43 jasper Exp $ */ +/* $OpenBSD: sys_machdep.c,v 1.7 2008/06/11 09:22:38 phessler Exp $ */ /* $NetBSD: sys_machdep.c,v 1.1 2003/04/26 18:39:32 fvdl Exp $ */ /*- @@ -73,8 +73,6 @@ int amd64_get_ioperm(struct proc *, void *, register_t *); int amd64_set_ioperm(struct proc *, void *, register_t *); #endif int amd64_iopl(struct proc *, void *, register_t *); -int amd64_get_mtrr(struct proc *, void *, register_t *); -int amd64_set_mtrr(struct proc *, void *, register_t *); #ifdef APERTURE extern int allowaperture; @@ -145,63 +143,6 @@ amd64_set_ioperm(struct proc *p, void *args, register_t *retval) #endif -#ifdef MTRR - -int -amd64_get_mtrr(struct proc *p, void *args, register_t *retval) -{ - struct amd64_get_mtrr_args ua; - int error, n; - - if (mtrr_funcs == NULL) - return ENOSYS; - - error = copyin(args, &ua, sizeof ua); - if (error != 0) - return error; - - error = copyin(ua.n, &n, sizeof n); - if (error != 0) - return error; - - error = mtrr_get(ua.mtrrp, &n, p, MTRR_GETSET_USER); - - copyout(&n, ua.n, sizeof (int)); - - return error; -} - -int -amd64_set_mtrr(struct proc *p, void *args, register_t *retval) -{ - int error, n; - struct amd64_set_mtrr_args ua; - - if (mtrr_funcs == NULL) - return ENOSYS; - - error = suser(p, 0); - if (error != 0) - return error; - - error = copyin(args, &ua, sizeof ua); - if (error != 0) - return error; - - error = copyin(ua.n, &n, sizeof n); - if (error != 0) - return error; - - error = mtrr_set(ua.mtrrp, &n, p, MTRR_GETSET_USER); - if (n != 0) - mtrr_commit(); - - copyout(&n, ua.n, sizeof n); - - return error; -} -#endif - int sys_sysarch(struct proc *p, void *v, register_t *retval) { @@ -225,14 +166,6 @@ sys_sysarch(struct proc *p, void *v, register_t *retval) error = amd64_set_ioperm(p, SCARG(uap, parms), retval); break; #endif -#ifdef MTRR - case AMD64_GET_MTRR: - error = amd64_get_mtrr(p, SCARG(uap, parms), retval); - break; - case AMD64_SET_MTRR: - error = amd64_set_mtrr(p, SCARG(uap, parms), retval); - break; -#endif #if defined(PERFCTRS) && 0 case AMD64_PMC_INFO: diff --git a/sys/arch/amd64/conf/GENERIC b/sys/arch/amd64/conf/GENERIC index 0bb94c73392..54837df157a 100644 --- a/sys/arch/amd64/conf/GENERIC +++ b/sys/arch/amd64/conf/GENERIC @@ -1,4 +1,4 @@ -# $OpenBSD: GENERIC,v 1.232 2008/06/07 17:00:38 marco Exp $ +# $OpenBSD: GENERIC,v 1.233 2008/06/11 09:22:39 phessler Exp $ # # For further information on compiling OpenBSD kernels, see the config(8) # man page. @@ -17,7 +17,7 @@ option USER_PCICONF # user-space PCI configuration #option VM86 # Virtual 8086 emulation option APERTURE # in-kernel aperture driver for XFree86 -#option MTRR # CPU memory range attributes control +option MTRR # CPU memory range attributes control #option KGDB # Remote debugger support; exclusive of DDB #option "KGDB_DEVNAME=\"com\"",KGDBADDR=0x2f8,KGDBRATE=9600 @@ -561,6 +561,7 @@ pseudo-device nvram 1 pseudo-device sequencer 1 #pseudo-device raid 4 # RAIDframe disk driver pseudo-device hotplug 1 # devices hot plugging +pseudo-device mtrr 1 # Memory range attributes control # mouse & keyboard multiplexor pseudo-devices pseudo-device wsmux 2 diff --git a/sys/arch/amd64/conf/files.amd64 b/sys/arch/amd64/conf/files.amd64 index 7ee92539d4b..35bc57acdbe 100644 --- a/sys/arch/amd64/conf/files.amd64 +++ b/sys/arch/amd64/conf/files.amd64 @@ -1,4 +1,4 @@ -# $OpenBSD: files.amd64,v 1.39 2008/03/16 19:00:28 oga Exp $ +# $OpenBSD: files.amd64,v 1.40 2008/06/11 09:22:39 phessler Exp $ maxpartitions 16 maxusers 2 16 128 @@ -11,6 +11,7 @@ file arch/amd64/amd64/machdep.c file arch/amd64/amd64/identcpu.c file arch/amd64/amd64/amd64errata.c file arch/amd64/amd64/mem.c +file arch/amd64/amd64/amd64_mem.c mtrr file arch/amd64/amd64/pmap.c file arch/amd64/amd64/process_machdep.c file arch/amd64/amd64/sys_machdep.c @@ -177,6 +178,9 @@ file dev/isa/fd.c fd needs-flag pseudo-device pctr file arch/amd64/amd64/pctr.c pctr needs-flag +pseudo-device mtrr +file arch/amd64/amd64/mtrr.c mtrr needs-flag + pseudo-device nvram file arch/amd64/amd64/nvram.c nvram needs-flag |