diff options
author | Matthieu Herrb <matthieu@cvs.openbsd.org> | 1999-11-20 11:11:29 +0000 |
---|---|---|
committer | Matthieu Herrb <matthieu@cvs.openbsd.org> | 1999-11-20 11:11:29 +0000 |
commit | 100c78df16f1a855aebc02c646996cdaaa22f580 (patch) | |
tree | 4e3559e2b00bdd9cfe875f9ff3f0cfe4ef2334d9 /sys | |
parent | 5c49efebb293cf607c240efa71357e01440f9384 (diff) |
add MTRR support from FreeBSD
Diffstat (limited to 'sys')
-rw-r--r-- | sys/arch/i386/i386/i686_mem.c | 576 | ||||
-rw-r--r-- | sys/arch/i386/i386/k6_mem.c | 190 | ||||
-rw-r--r-- | sys/arch/i386/i386/mem.c | 129 | ||||
-rw-r--r-- | sys/arch/i386/i386/mtrr.c | 45 | ||||
-rw-r--r-- | sys/sys/conf.h | 6 | ||||
-rw-r--r-- | sys/sys/malloc.h | 7 | ||||
-rw-r--r-- | sys/sys/memrange.h | 68 |
7 files changed, 1015 insertions, 6 deletions
diff --git a/sys/arch/i386/i386/i686_mem.c b/sys/arch/i386/i386/i686_mem.c new file mode 100644 index 00000000000..7b1457de362 --- /dev/null +++ b/sys/arch/i386/i386/i686_mem.c @@ -0,0 +1,576 @@ +/* $OpenBSD: i686_mem.c,v 1.1 1999/11/20 11:11:28 matthieu Exp $ */ +/*- + * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8 1999/10/12 22:53:05 green Exp $ + */ + +#include <sys/param.h> +#include <sys/kernel.h> +#include <sys/systm.h> +#include <sys/malloc.h> +#include <sys/memrange.h> + +#include <machine/cpufunc.h> +#include <machine/specialreg.h> + +/* + * i686 memory range operations + * + * This code will probably be impenetrable without reference to the + * Intel Pentium Pro documentation. + */ + +char *mem_owner_bios = "BIOS"; + +#define MR686_FIXMTRR (1<<0) + +#define mrwithin(mr, a) \ + (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) +#define mroverlap(mra, mrb) \ + (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) + +#define mrvalid(base, len) \ + ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ + ((len) >= (1 << 12)) && /* length is >= 4k */ \ + powerof2((len)) && /* ... and power of two */ \ + !((base) & ((len) - 1))) /* range is not discontiuous */ + +#define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) + +void i686_mrinit __P((struct mem_range_softc *sc)); +int i686_mrset __P((struct mem_range_softc *sc, + struct mem_range_desc *mrd, + int *arg)); +void i686_mrAPinit __P((struct mem_range_softc *sc)); + +struct mem_range_ops i686_mrops = { + i686_mrinit, + i686_mrset, + i686_mrAPinit +}; + +/* XXX for AP startup hook */ +u_int64_t mtrrcap, mtrrdef; + +struct mem_range_desc *mem_range_match __P((struct mem_range_softc *sc, + struct mem_range_desc *mrd)); +void i686_mrfetch __P((struct mem_range_softc *sc)); +int i686_mtrrtype __P((int flags)); +void i686_mrstore __P((struct mem_range_softc *sc)); +void i686_mrstoreone __P((void *arg)); +struct mem_range_desc *i686_mtrrfixsearch __P((struct mem_range_softc *sc, + u_int64_t addr)); +int i686_mrsetlow __P((struct mem_range_softc *sc, + struct mem_range_desc *mrd, + int *arg)); +int i686_mrsetvariable __P((struct mem_range_softc *sc, + struct mem_range_desc *mrd, + int *arg)); + +/* i686 MTRR type to memory range type conversion */ +int i686_mtrrtomrt[] = { + MDF_UNCACHEABLE, + MDF_WRITECOMBINE, + 0, + 0, + MDF_WRITETHROUGH, + MDF_WRITEPROTECT, + MDF_WRITEBACK +}; + +/* + * i686 MTRR conflict matrix for overlapping ranges + * + * Specifically, this matrix allows writeback and uncached ranges + * to overlap (the overlapped region is uncached). The array index + * is the translated i686 code for the flags (because they map well). + */ +int i686_mtrrconflict[] = { + MDF_WRITECOMBINE | MDF_WRITETHROUGH | MDF_WRITEPROTECT, + MDF_ATTRMASK, + 0, + 0, + MDF_ATTRMASK, + MDF_ATTRMASK, + MDF_WRITECOMBINE | MDF_WRITETHROUGH | MDF_WRITEPROTECT +}; + +/* + * Look for an exactly-matching range. + */ +struct mem_range_desc * +mem_range_match(sc, mrd) + struct mem_range_softc *sc; + struct mem_range_desc *mrd; +{ + struct mem_range_desc *cand; + int i; + + for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) + if ((cand->mr_base == mrd->mr_base) && + (cand->mr_len == mrd->mr_len)) + return(cand); + return(NULL); +} + +/* + * Fetch the current mtrr settings from the current CPU (assumed to all + * be in sync in the SMP case). Note that if we are here, we assume + * that MTRRs are enabled, and we may or may not have fixed MTRRs. + */ +void +i686_mrfetch(sc) + struct mem_range_softc *sc; +{ + struct mem_range_desc *mrd; + u_int64_t msrv; + int i, j, msr; + + mrd = sc->mr_desc; + + /* Get fixed-range MTRRs */ + if (sc->mr_cap & MR686_FIXMTRR) { + msr = MSR_MTRR64kBase; + for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { + msrv = rdmsr(msr); + for (j = 0; j < 8; j++, mrd++) { + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + i686_mtrrtomrt[msrv & 0xff] | + MDF_ACTIVE; + if (mrd->mr_owner[0] == 0) + strcpy(mrd->mr_owner, mem_owner_bios); + msrv = msrv >> 8; + } + } + msr = MSR_MTRR16kBase; + for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { + msrv = rdmsr(msr); + for (j = 0; j < 8; j++, mrd++) { + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + i686_mtrrtomrt[msrv & 0xff] | + MDF_ACTIVE; + if (mrd->mr_owner[0] == 0) + strcpy(mrd->mr_owner, mem_owner_bios); + msrv = msrv >> 8; + } + } + msr = MSR_MTRR4kBase; + for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { + msrv = rdmsr(msr); + for (j = 0; j < 8; j++, mrd++) { + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + i686_mtrrtomrt[msrv & 0xff] | + MDF_ACTIVE; + if (mrd->mr_owner[0] == 0) + strcpy(mrd->mr_owner, mem_owner_bios); + msrv = msrv >> 8; + } + } + } + + /* Get remainder which must be variable MTRRs */ + msr = MSR_MTRRVarBase; + for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { + msrv = rdmsr(msr); + mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | + i686_mtrrtomrt[msrv & 0xff]; + mrd->mr_base = msrv & 0x0000000ffffff000LL; + msrv = rdmsr(msr + 1); + mrd->mr_flags = (msrv & 0x800) ? + (mrd->mr_flags | MDF_ACTIVE) : + (mrd->mr_flags & ~MDF_ACTIVE); + /* Compute the range from the mask. Ick. */ + mrd->mr_len = (~(msrv & 0x0000000ffffff000LL) & 0x0000000fffffffffLL) + 1; + if (!mrvalid(mrd->mr_base, mrd->mr_len)) + mrd->mr_flags |= MDF_BOGUS; + /* If unclaimed and active, must be the BIOS */ + if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) + strcpy(mrd->mr_owner, mem_owner_bios); + } +} + +/* + * Return the MTRR memory type matching a region's flags + */ +int +i686_mtrrtype(flags) + int flags; +{ + int i; + + flags &= MDF_ATTRMASK; + + for (i = 0; i < (sizeof(i686_mtrrtomrt) / sizeof(i686_mtrrtomrt[0])); i++) { + if (i686_mtrrtomrt[i] == 0) + continue; + if (flags == i686_mtrrtomrt[i]) + return(i); + } + return(-1); +} + +/* + * Update running CPU(s) MTRRs to match the ranges in the descriptor + * list. + * + * XXX Must be called with interrupts enabled. + */ +void +i686_mrstore(sc) + struct mem_range_softc *sc; +{ + disable_intr(); /* disable interrupts */ + i686_mrstoreone((void *)sc); + enable_intr(); +} + +/* + * Update the current CPU's MTRRs with those represented in the + * descriptor list. Note that we do this wholesale rather than + * just stuffing one entry; this is simpler (but slower, of course). + */ +void +i686_mrstoreone(arg) + void *arg; +{ + struct mem_range_softc *sc = (struct mem_range_softc *)arg; + struct mem_range_desc *mrd; + u_int64_t msrv; + int i, j, msr; + u_int cr4save; + + mrd = sc->mr_desc; + + cr4save = rcr4(); /* save cr4 */ + if (cr4save & CR4_PGE) + lcr4(cr4save & ~CR4_PGE); + lcr0((rcr0() & ~CR0_NW) | CR0_CD); /* disable caches (CD = 1, NW = 0) */ + wbinvd(); /* flush caches, TLBs */ + wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800); /* disable MTRRs (E = 0) */ + + /* Set fixed-range MTRRs */ + if (sc->mr_cap & MR686_FIXMTRR) { + msr = MSR_MTRR64kBase; + for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { + msrv = 0; + for (j = 7; j >= 0; j--) { + msrv = msrv << 8; + msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); + } + wrmsr(msr, msrv); + mrd += 8; + } + msr = MSR_MTRR16kBase; + for (i = 0; i < (MTRR_N16K / 8); i++, msr++) { + msrv = 0; + for (j = 7; j >= 0; j--) { + msrv = msrv << 8; + msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); + } + wrmsr(msr, msrv); + mrd += 8; + } + msr = MSR_MTRR4kBase; + for (i = 0; i < (MTRR_N4K / 8); i++, msr++) { + msrv = 0; + for (j = 7; j >= 0; j--) { + msrv = msrv << 8; + msrv |= (i686_mtrrtype((mrd + j)->mr_flags) & 0xff); + } + wrmsr(msr, msrv); + mrd += 8; + } + } + + /* Set remainder which must be variable MTRRs */ + msr = MSR_MTRRVarBase; + for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) { + /* base/type register */ + if (mrd->mr_flags & MDF_ACTIVE) { + msrv = mrd->mr_base & 0x0000000ffffff000LL; + msrv |= (i686_mtrrtype(mrd->mr_flags) & 0xff); + } else { + msrv = 0; + } + wrmsr(msr, msrv); + + /* mask/active register */ + if (mrd->mr_flags & MDF_ACTIVE) { + msrv = 0x800 | (~(mrd->mr_len - 1) & 0x0000000ffffff000LL); + } else { + msrv = 0; + } + wrmsr(msr + 1, msrv); + } + wbinvd(); /* flush caches, TLBs */ + wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | 0x800); /* restore MTRR state */ + lcr0(rcr0() & ~(CR0_CD | CR0_NW)); /* enable caches CD = 0 and NW = 0 */ + lcr4(cr4save); /* restore cr4 */ +} + +/* + * Hunt for the fixed MTRR referencing (addr) + */ +struct mem_range_desc * +i686_mtrrfixsearch(sc, addr) + struct mem_range_softc *sc; + u_int64_t addr; +{ + struct mem_range_desc *mrd; + int i; + + for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++) + if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len))) + return(mrd); + return(NULL); +} + +/* + * Try to satisfy the given range request by manipulating the fixed MTRRs that + * cover low memory. + * + * Note that we try to be generous here; we'll bloat the range out to the + * next higher/lower boundary to avoid the consumer having to know too much + * about the mechanisms here. + * + * XXX note that this will have to be updated when we start supporting "busy" ranges. + */ +int +i686_mrsetlow(sc, mrd, arg) + struct mem_range_softc *sc; + struct mem_range_desc *mrd; + int *arg; +{ + struct mem_range_desc *first_md, *last_md, *curr_md; + + /* range check */ + if (((first_md = i686_mtrrfixsearch(sc, mrd->mr_base)) == NULL) || + ((last_md = i686_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL)) + return(EINVAL); + + /* set flags, clear set-by-firmware flag */ + for (curr_md = first_md; curr_md <= last_md; curr_md++) { + curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags); + bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); + } + + return(0); +} + + +/* + * Modify/add a variable MTRR to satisfy the request. + * + * XXX needs to be updated to properly support "busy" ranges. + */ +int +i686_mrsetvariable(sc, mrd, arg) + struct mem_range_softc *sc; + struct mem_range_desc *mrd; + int *arg; +{ + struct mem_range_desc *curr_md, *free_md; + int i; + + /* + * Scan the currently active variable descriptors, look for + * one we exactly match (straight takeover) and for possible + * accidental overlaps. + * Keep track of the first empty variable descriptor in case we + * can't perform a takeover. + */ + i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; + curr_md = sc->mr_desc + i; + free_md = NULL; + for (; i < sc->mr_ndesc; i++, curr_md++) { + if (curr_md->mr_flags & MDF_ACTIVE) { + /* exact match? */ + if ((curr_md->mr_base == mrd->mr_base) && + (curr_md->mr_len == mrd->mr_len)) { + /* whoops, owned by someone */ + if (curr_md->mr_flags & MDF_BUSY) + return(EBUSY); + /* Ok, just hijack this entry */ + free_md = curr_md; + break; + } + /* non-exact overlap ? */ + if (mroverlap(curr_md, mrd)) { + /* between conflicting region types? */ + if ((i686_mtrrconflict[i686_mtrrtype(curr_md->mr_flags)] & mrd->mr_flags) || + (i686_mtrrconflict[i686_mtrrtype(mrd->mr_flags)] & curr_md->mr_flags)) + return(EINVAL); + } + } else if (free_md == NULL) { + free_md = curr_md; + } + } + /* got somewhere to put it? */ + if (free_md == NULL) + return(ENOSPC); + + /* Set up new descriptor */ + free_md->mr_base = mrd->mr_base; + free_md->mr_len = mrd->mr_len; + free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); + bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); + return(0); +} + +/* + * Handle requests to set memory range attributes by manipulating MTRRs. + * + */ +int +i686_mrset(sc, mrd, arg) + struct mem_range_softc *sc; + struct mem_range_desc *mrd; + int *arg; +{ + struct mem_range_desc *targ; + int error = 0; + + switch(*arg) { + case MEMRANGE_SET_UPDATE: + /* make sure that what's being asked for is even possible at all */ + if (!mrvalid(mrd->mr_base, mrd->mr_len) || + (i686_mtrrtype(mrd->mr_flags & MDF_ATTRMASK) == -1)) + return(EINVAL); + +#define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) + + /* are the "low memory" conditions applicable? */ + if ((sc->mr_cap & MR686_FIXMTRR) && + ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) { + if ((error = i686_mrsetlow(sc, mrd, arg)) != 0) + return(error); + } else { + /* it's time to play with variable MTRRs */ + if ((error = i686_mrsetvariable(sc, mrd, arg)) != 0) + return(error); + } + break; + + case MEMRANGE_SET_REMOVE: + if ((targ = mem_range_match(sc, mrd)) == NULL) + return(ENOENT); + if (targ->mr_flags & MDF_FIXACTIVE) + return(EPERM); + if (targ->mr_flags & MDF_BUSY) + return(EBUSY); + targ->mr_flags &= ~MDF_ACTIVE; + targ->mr_owner[0] = 0; + break; + + default: + return(EOPNOTSUPP); + } + + /* update the hardware */ + i686_mrstore(sc); + i686_mrfetch(sc); /* refetch to see where we're at */ + return(0); +} + +/* + * Work out how many ranges we support, initialise storage for them, + * fetch the initial settings. + */ +void +i686_mrinit(sc) + struct mem_range_softc *sc; +{ + struct mem_range_desc *mrd; + int nmdesc = 0; + int i; + + mtrrcap = rdmsr(MSR_MTRRcap); + mtrrdef = rdmsr(MSR_MTRRdefType); + + /* For now, bail out if MTRRs are not enabled */ + if (!(mtrrdef & 0x800)) { + printf("CPU supports MTRRs but not enabled\n"); + return; + } + nmdesc = mtrrcap & 0xff; + printf("Pentium Pro MTRR support enabled\n"); + + /* If fixed MTRRs supported and enabled */ + if ((mtrrcap & 0x100) && (mtrrdef & 0x400)) { + sc->mr_cap = MR686_FIXMTRR; + nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; + } + + sc->mr_desc = + (struct mem_range_desc *)malloc(nmdesc * sizeof(struct mem_range_desc), + M_MEMDESC, M_WAITOK); + bzero(sc->mr_desc, nmdesc * sizeof(struct mem_range_desc)); + sc->mr_ndesc = nmdesc; + + mrd = sc->mr_desc; + + /* Populate the fixed MTRR entries' base/length */ + if (sc->mr_cap & MR686_FIXMTRR) { + for (i = 0; i < MTRR_N64K; i++, mrd++) { + mrd->mr_base = i * 0x10000; + mrd->mr_len = 0x10000; + mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; + } + for (i = 0; i < MTRR_N16K; i++, mrd++) { + mrd->mr_base = i * 0x4000 + 0x80000; + mrd->mr_len = 0x4000; + mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; + } + for (i = 0; i < MTRR_N4K; i++, mrd++) { + mrd->mr_base = i * 0x1000 + 0xc0000; + mrd->mr_len = 0x1000; + mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE; + } + } + + /* + * Get current settings, anything set now is considered to have + * been set by the firmware. (XXX has something already played here?) + */ + i686_mrfetch(sc); + mrd = sc->mr_desc; + for (i = 0; i < sc->mr_ndesc; i++, mrd++) { + if (mrd->mr_flags & MDF_ACTIVE) + mrd->mr_flags |= MDF_FIRMWARE; + } +} + +/* + * Initialise MTRRs on an AP after the BSP has run the init code. + */ +void +i686_mrAPinit(sc) + struct mem_range_softc *sc; +{ + i686_mrstoreone((void *)sc); /* set MTRRs to match BSP */ + wrmsr(MSR_MTRRdefType, mtrrdef); /* set MTRR behaviour to match BSP */ +} + diff --git a/sys/arch/i386/i386/k6_mem.c b/sys/arch/i386/i386/k6_mem.c new file mode 100644 index 00000000000..54e5a760b5b --- /dev/null +++ b/sys/arch/i386/i386/k6_mem.c @@ -0,0 +1,190 @@ +/* $OpenBSD: k6_mem.c,v 1.1 1999/11/20 11:11:28 matthieu Exp $ */ +/*- + * Copyright (c) 1999 Brian Fundakowski Feldman + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/sys/i386/i386/k6_mem.c,v 1.4 1999/09/05 15:45:57 green Exp $ + * + */ + +#include <sys/param.h> +#include <sys/kernel.h> +#include <sys/systm.h> +#include <sys/ioccom.h> +#include <sys/malloc.h> +#include <sys/memrange.h> + +#include <machine/cpufunc.h> +#include <machine/specialreg.h> + +/* + * A K6-2 MTRR is defined as the highest 15 bits having the address, the next + * 15 having the mask, the 1st bit being "write-combining" and the 0th bit + * being "uncacheable". + * + * Address Mask WC UC + * | XXXXXXXXXXXXXXX | XXXXXXXXXXXXXXX | X | X | + * + * There are two of these in the 64-bit UWCCR. + */ + +/* + * NOTE: I do _not_ comment my code unless it's truly necessary. Don't + * expect anything frivolous here, and do NOT touch my bit-shifts + * unless you want to break this. + */ + +#define UWCCR 0xc0000085 + +#define k6_reg_get(reg, addr, mask, wc, uc) do { \ + addr = (reg) & 0xfffe0000; \ + mask = ((reg) & 0x1fffc) >> 2; \ + wc = ((reg) & 0x2) >> 1; \ + uc = (reg) & 0x1; \ + } while (0) + +#define k6_reg_make(addr, mask, wc, uc) \ + ((addr) | ((mask) << 2) | ((wc) << 1) | uc) + +void k6_mrinit __P((struct mem_range_softc *sc)); +int k6_mrset __P((struct mem_range_softc *, struct mem_range_desc *, int *)); +__inline int k6_mrmake __P((struct mem_range_desc *, u_int32_t *)); + +struct mem_range_ops k6_mrops = { + k6_mrinit, + k6_mrset, + NULL +}; + +__inline int +k6_mrmake(desc, mtrr) + struct mem_range_desc *desc; + u_int32_t *mtrr; +{ + u_int32_t len = 0, wc, uc; + register int bit; + + if (desc->mr_base &~ 0xfffe0000) + return EINVAL; + if (desc->mr_len < 131072 || !powerof2(desc->mr_len)) + return EINVAL; + if (desc->mr_flags &~ (MDF_WRITECOMBINE|MDF_UNCACHEABLE)) + return EOPNOTSUPP; + + for (bit = ffs(desc->mr_len >> 17) - 1; bit < 15; bit++) + len |= 1 << (14 - bit); + wc = (desc->mr_flags & MDF_WRITECOMBINE) ? 1 : 0; + uc = (desc->mr_flags & MDF_UNCACHEABLE) ? 1 : 0; + + *mtrr = k6_reg_make(desc->mr_base, len, wc, uc); + return 0; +} + +void +k6_mrinit(sc) + struct mem_range_softc *sc; +{ + u_int64_t reg; + u_int32_t addr, mask, wc, uc; + int d; + + sc->mr_cap = 0; + sc->mr_ndesc = 2; /* XXX (BFF) For now, we only have one msr for this */ + sc->mr_desc = malloc(sc->mr_ndesc * sizeof(struct mem_range_desc), + M_MEMDESC, M_NOWAIT); + if (sc->mr_desc == NULL) + panic("k6_mrinit: malloc returns NULL"); + + bzero(sc->mr_desc, sc->mr_ndesc * sizeof(struct mem_range_desc)); + + reg = rdmsr(UWCCR); + for (d = 0; d < sc->mr_ndesc; d++) { + u_int32_t one = (reg & (0xffffffff << (32 * d))) >> (32 * d); + + k6_reg_get(one, addr, mask, wc, uc); + sc->mr_desc[d].mr_base = addr; + sc->mr_desc[d].mr_len = ffs(mask) << 17; + if (wc) + sc->mr_desc[d].mr_flags |= MDF_WRITECOMBINE; + if (uc) + sc->mr_desc[d].mr_flags |= MDF_UNCACHEABLE; + } + + printf("K6-family MTRR support enabled (%d registers)\n", sc->mr_ndesc); +} + +int +k6_mrset(sc, desc, arg) + struct mem_range_softc *sc; + struct mem_range_desc *desc; + int *arg; +{ + u_int64_t reg; + u_int32_t mtrr; + int error, d; + + switch (*arg) { + case MEMRANGE_SET_UPDATE: + error = k6_mrmake(desc, &mtrr); + if (error) + return error; + for (d = 0; d < sc->mr_ndesc; d++) { + if (!sc->mr_desc[d].mr_len) { + sc->mr_desc[d] = *desc; + goto out; + } + if (sc->mr_desc[d].mr_base == desc->mr_base && + sc->mr_desc[d].mr_len == desc->mr_len) + return EEXIST; + } + + return ENOSPC; + case MEMRANGE_SET_REMOVE: + mtrr = 0; + for (d = 0; d < sc->mr_ndesc; d++) + if (sc->mr_desc[d].mr_base == desc->mr_base && + sc->mr_desc[d].mr_len == desc->mr_len) { + bzero(&sc->mr_desc[d], sizeof(sc->mr_desc[d])); + goto out; + } + + return ENOENT; + default: + return EOPNOTSUPP; + } + +out: + + disable_intr(); + wbinvd(); + reg = rdmsr(UWCCR); + reg &= ~(0xffffffff << (32 * d)); + reg |= mtrr << (32 * d); + wrmsr(UWCCR, reg); + wbinvd(); + enable_intr(); + + return 0; +} + diff --git a/sys/arch/i386/i386/mem.c b/sys/arch/i386/i386/mem.c index f88d03048b6..f9c74f05c08 100644 --- a/sys/arch/i386/i386/mem.c +++ b/sys/arch/i386/i386/mem.c @@ -1,5 +1,5 @@ /* $NetBSD: mem.c,v 1.31 1996/05/03 19:42:19 christos Exp $ */ -/* $OpenBSD: mem.c,v 1.13 1999/09/06 06:19:08 matthieu Exp $ */ +/* $OpenBSD: mem.c,v 1.14 1999/11/20 11:11:28 matthieu Exp $ */ /* * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990, 1993 @@ -48,7 +48,9 @@ #include <sys/buf.h> #include <sys/systm.h> #include <sys/uio.h> +#include <sys/ioccom.h> #include <sys/malloc.h> +#include <sys/memrange.h> #include <sys/proc.h> #include <sys/fcntl.h> @@ -61,6 +63,8 @@ #include <uvm/uvm_extern.h> #endif +#include "mtrr.h" + extern char *vmmap; /* poor name! */ caddr_t zeropage; @@ -73,6 +77,11 @@ extern int allowaperture; #define BIOS_END 0xFFFFF #endif +#if NMTRR > 0 +struct mem_range_softc mem_range_softc; +static int mem_ioctl __P((dev_t, u_long, caddr_t, int, struct proc *)); +#endif + /*ARGSUSED*/ int mmopen(dev, flag, mode, p) @@ -270,3 +279,121 @@ mmmmap(dev, off, prot) return -1; } } + +int +mmioctl(dev, cmd, data, flags, p) + dev_t dev; + u_long cmd; + caddr_t data; + int flags; + struct proc *p; +{ +#if NMTRR > 0 + switch (minor(dev)) { + case 0: + case 4: + return mem_ioctl(dev, cmd, data, flags, p); + } +#endif + return (ENODEV); +} + +#if NMTRR > 0 +/* + * Operations for changing memory attributes. + * + * This is basically just an ioctl shim for mem_range_attr_get + * and mem_range_attr_set. + */ +static int +mem_ioctl(dev, cmd, data, flags, p) + dev_t dev; + u_long cmd; + caddr_t data; + int flags; + struct proc *p; +{ + int nd, error = 0; + struct mem_range_op *mo = (struct mem_range_op *)data; + struct mem_range_desc *md; + + /* is this for us? */ + if ((cmd != MEMRANGE_GET) && + (cmd != MEMRANGE_SET)) + return (ENOTTY); + + /* any chance we can handle this? */ + if (mem_range_softc.mr_op == NULL) + return (EOPNOTSUPP); + + /* do we have any descriptors? */ + if (mem_range_softc.mr_ndesc == 0) + return (ENXIO); + + switch (cmd) { + case MEMRANGE_GET: + nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc); + if (nd > 0) { + md = (struct mem_range_desc *) + malloc(nd * sizeof(struct mem_range_desc), + M_MEMDESC, M_WAITOK); + error = mem_range_attr_get(md, &nd); + if (!error) + error = copyout(md, mo->mo_desc, + nd * sizeof(struct mem_range_desc)); + free(md, M_MEMDESC); + } else { + nd = mem_range_softc.mr_ndesc; + } + mo->mo_arg[0] = nd; + break; + + case MEMRANGE_SET: + md = (struct mem_range_desc *)malloc(sizeof(struct mem_range_desc), + M_MEMDESC, M_WAITOK); + error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc)); + /* clamp description string */ + md->mr_owner[sizeof(md->mr_owner) - 1] = 0; + if (error == 0) + error = mem_range_attr_set(md, &mo->mo_arg[0]); + free(md, M_MEMDESC); + break; + } + return (error); +} + +/* + * Implementation-neutral, kernel-callable functions for manipulating + * memory range attributes. + */ +int +mem_range_attr_get(mrd, arg) + struct mem_range_desc *mrd; + int *arg; +{ + /* can we handle this? */ + if (mem_range_softc.mr_op == NULL) + return (EOPNOTSUPP); + + if (*arg == 0) { + *arg = mem_range_softc.mr_ndesc; + } else { + bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc)); + } + return (0); +} + +int +mem_range_attr_set(mrd, arg) + struct mem_range_desc *mrd; + int *arg; +{ + /* can we handle this? */ + if (mem_range_softc.mr_op == NULL) + return (EOPNOTSUPP); + + return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg)); +} + +#endif /* NMTRR > 0 */ + diff --git a/sys/arch/i386/i386/mtrr.c b/sys/arch/i386/i386/mtrr.c new file mode 100644 index 00000000000..8662f058926 --- /dev/null +++ b/sys/arch/i386/i386/mtrr.c @@ -0,0 +1,45 @@ +/* $OpenBSD: mtrr.c,v 1.1 1999/11/20 11:11:28 matthieu Exp $ */ + +#include <sys/param.h> +#include <sys/memrange.h> + +#include <machine/specialreg.h> + +/* Pull in the cpuid values from locore.s */ +extern int cpu_id; +extern int cpu_feature; +extern char cpu_vendor[]; + +extern struct mem_range_ops i686_mrops; +extern struct mem_range_ops k6_mrops; + +void mtrrattach __P((int)); + +void +mtrrattach (num) + int num; +{ + if (num > 1) + return; + + if (strcmp(cpu_vendor, "AuthenticAMD") == 0 && + (cpu_id & 0xf00) == 0x500 && + ((cpu_id & 0xf0) > 0x80 || + ((cpu_id & 0xf0) == 0x80 && + (cpu_id & 0xf) > 0x7))) { + mem_range_softc.mr_op = &k6_mrops; + + /* Try for i686 MTRRs */ + } else if ((cpu_feature & CPUID_MTRR) && + ((cpu_id & 0xf00) == 0x600) && + ((strcmp(cpu_vendor, "GenuineIntel") == 0) || + (strcmp(cpu_vendor, "AuthenticAMD") == 0))) { + mem_range_softc.mr_op = &i686_mrops; + + } + /* Initialise memory range handling */ + if (mem_range_softc.mr_op != NULL) + mem_range_softc.mr_op->init(&mem_range_softc); +} + + diff --git a/sys/sys/conf.h b/sys/sys/conf.h index bd52cbaa8ed..353744bd7bf 100644 --- a/sys/sys/conf.h +++ b/sys/sys/conf.h @@ -1,4 +1,4 @@ -/* $OpenBSD: conf.h,v 1.30 1999/08/13 05:38:05 fgsch Exp $ */ +/* $OpenBSD: conf.h,v 1.31 1999/11/20 11:11:27 matthieu Exp $ */ /* $NetBSD: conf.h,v 1.33 1996/05/03 20:03:32 christos Exp $ */ /*- @@ -239,10 +239,10 @@ extern struct cdevsw cdevsw[]; dev_init(c,n,write), dev_init(c,n,ioctl), (dev_type_stop((*))) nullop, \ 0, dev_init(c,n,select), (dev_type_mmap((*))) enodev, D_TTY } -/* open, close, read, write, mmap */ +/* open, close, read, write, ioctl, mmap */ #define cdev_mm_init(c,n) { \ dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \ - dev_init(c,n,write), (dev_type_ioctl((*))) enodev, \ + dev_init(c,n,write), dev_init(c,n,ioctl), \ (dev_type_stop((*))) enodev, 0, seltrue, dev_init(c,n,mmap) } /* read, write */ diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h index e1562cb3076..3d405cc1f8f 100644 --- a/sys/sys/malloc.h +++ b/sys/sys/malloc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: malloc.h,v 1.25 1999/10/29 14:08:13 art Exp $ */ +/* $OpenBSD: malloc.h,v 1.26 1999/11/20 11:11:27 matthieu Exp $ */ /* $NetBSD: malloc.h,v 1.39 1998/07/12 19:52:01 augustss Exp $ */ /* @@ -160,6 +160,8 @@ #define M_PIPE 104 /* Pipe structures */ +#define M_MEMDESC 105 /* Memory range */ + #define M_TEMP 127 /* misc temporary data buffers */ #define M_LAST 128 /* Must be last type + 1 */ @@ -270,7 +272,8 @@ "USB device", /* 102 M_USBDEV */ \ "USB HC", /* 103 M_USBHC */ \ "pipe", /* 104 M_PIPE */ \ - NULL, NULL, \ + "memdesc", /* 105 M_MEMDESC */ \ + NULL, \ NULL, NULL, NULL, NULL, NULL, \ NULL, NULL, NULL, NULL, NULL, \ NULL, NULL, NULL, NULL, NULL, \ diff --git a/sys/sys/memrange.h b/sys/sys/memrange.h new file mode 100644 index 00000000000..79daffc57ee --- /dev/null +++ b/sys/sys/memrange.h @@ -0,0 +1,68 @@ +/* $OpenBSD: memrange.h,v 1.1 1999/11/20 11:11:27 matthieu Exp $ */ +/* + * Memory range attribute operations, peformed on /dev/mem + */ + +/* Memory range attributes */ +#define MDF_UNCACHEABLE (1<<0) /* region not cached */ +#define MDF_WRITECOMBINE (1<<1) /* region supports "write combine" action */ +#define MDF_WRITETHROUGH (1<<2) /* write-through cached */ +#define MDF_WRITEBACK (1<<3) /* write-back cached */ +#define MDF_WRITEPROTECT (1<<4) /* read-only region */ +#define MDF_ATTRMASK (0x00ffffff) + +#define MDF_FIXBASE (1<<24) /* fixed base */ +#define MDF_FIXLEN (1<<25) /* fixed length */ +#define MDF_FIRMWARE (1<<26) /* set by firmware (XXX not useful?) */ +#define MDF_ACTIVE (1<<27) /* currently active */ +#define MDF_BOGUS (1<<28) /* we don't like it */ +#define MDF_FIXACTIVE (1<<29) /* can't be turned off */ +#define MDF_BUSY (1<<30) /* range is in use */ + +struct mem_range_desc +{ + u_int64_t mr_base; + u_int64_t mr_len; + int mr_flags; + char mr_owner[8]; +}; + +struct mem_range_op +{ + struct mem_range_desc *mo_desc; + int mo_arg[2]; +#define MEMRANGE_SET_UPDATE 0 +#define MEMRANGE_SET_REMOVE 1 + /* XXX want a flag that says "set and undo when I exit" */ +}; + +#define MEMRANGE_GET _IOWR('m', 50, struct mem_range_op) +#define MEMRANGE_SET _IOW('m', 51, struct mem_range_op) + +#ifdef _KERNEL + +struct mem_range_softc; +struct mem_range_ops +{ + void (*init) __P((struct mem_range_softc *sc)); + int (*set) __P((struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)); + void (*initAP) __P((struct mem_range_softc *sc)); +}; + +struct mem_range_softc +{ + struct mem_range_ops *mr_op; + int mr_cap; + int mr_ndesc; + struct mem_range_desc *mr_desc; +}; + +extern struct mem_range_softc mem_range_softc; + +__BEGIN_DECLS +extern int mem_range_attr_get __P((struct mem_range_desc *mrd, int *arg)); +extern int mem_range_attr_set __P((struct mem_range_desc *mrd, int *arg)); +extern void mem_range_AP_init __P((void)); +__END_DECLS +#endif + |