diff options
-rw-r--r-- | sys/arch/loongson/include/cpu.h | 16 | ||||
-rw-r--r-- | sys/arch/loongson/loongson/bus_dma.c | 5 | ||||
-rw-r--r-- | sys/arch/mips64/include/cache.h | 41 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_loongson2.S | 60 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_octeon.c | 12 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_r10k.S | 162 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_r4k.c | 33 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_r5k.S | 220 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/db_machdep.c | 8 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/pmap.c | 20 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/sys_machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/octeon/include/cpu.h | 16 | ||||
-rw-r--r-- | sys/arch/octeon/octeon/bus_dma.c | 21 | ||||
-rw-r--r-- | sys/arch/sgi/include/autoconf.h | 12 | ||||
-rw-r--r-- | sys/arch/sgi/include/cpu.h | 10 | ||||
-rw-r--r-- | sys/arch/sgi/sgi/bus_dma.c | 11 |
16 files changed, 241 insertions, 410 deletions
diff --git a/sys/arch/loongson/include/cpu.h b/sys/arch/loongson/include/cpu.h index c18ce1eee6e..de97c2dfbbe 100644 --- a/sys/arch/loongson/include/cpu.h +++ b/sys/arch/loongson/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.2 2010/01/09 23:34:26 miod Exp $ */ +/* $OpenBSD: cpu.h,v 1.3 2012/04/21 12:20:30 miod Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. @@ -48,13 +48,13 @@ #define Mips_InvalidateICache(ci, va, l) \ Loongson2_InvalidateICache((ci), (va), (l)) #define Mips_SyncDCachePage(ci, va, pa) \ - Loongson2_SyncDCachePage((ci), (pa)) -#define Mips_HitSyncDCache(ci, va, pa, l) \ - Loongson2_HitSyncDCache((ci), (pa), (l)) -#define Mips_IOSyncDCache(ci, va, pa, l, h) \ - Loongson2_IOSyncDCache((ci), (pa), (l), (h)) -#define Mips_HitInvalidateDCache(ci, va, pa, l) \ - Loongson2_HitInvalidateDCache((ci), (pa), (l)) + Loongson2_SyncDCachePage((ci), (va), (pa)) +#define Mips_HitSyncDCache(ci, va, l) \ + Loongson2_HitSyncDCache((ci), (va), (l)) +#define Mips_IOSyncDCache(ci, va, l, h) \ + Loongson2_IOSyncDCache((ci), (va), (l), (h)) +#define Mips_HitInvalidateDCache(ci, va, l) \ + Loongson2_HitInvalidateDCache((ci), (va), (l)) #endif /* _KERNEL */ diff --git a/sys/arch/loongson/loongson/bus_dma.c b/sys/arch/loongson/loongson/bus_dma.c index 53f6f5cfe38..19e7712930c 100644 --- a/sys/arch/loongson/loongson/bus_dma.c +++ b/sys/arch/loongson/loongson/bus_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_dma.c,v 1.9 2012/03/25 13:52:52 miod Exp $ */ +/* $OpenBSD: bus_dma.c,v 1.10 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -365,8 +365,7 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, } if (cacheop >= 0) - Mips_IOSyncDCache(ci, vaddr, paddr, - ssize, cacheop); + Mips_IOSyncDCache(ci, vaddr, ssize, cacheop); size -= ssize; } curseg++; diff --git a/sys/arch/mips64/include/cache.h b/sys/arch/mips64/include/cache.h index d41e44ad75a..6e0591f2e1f 100644 --- a/sys/arch/mips64/include/cache.h +++ b/sys/arch/mips64/include/cache.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cache.h,v 1.2 2012/04/06 20:11:18 miod Exp $ */ +/* $OpenBSD: cache.h,v 1.3 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2012 Miodrag Vallat. @@ -22,16 +22,11 @@ /* * Declare canonical cache functions for a given processor. * - * Note that the uint64_t arguments are addresses, which can be either - * virtual or physical addresses, depending upon the particular processor - * model. The high-level functions, invoked from pmap, pass both virtual - * and physical addresses to the Mips_* cache macros declared in - * <machine/cpu.h>. It is the responsibility of a given port, when - * implementing these macros, to pass either the virtual or the physical - * address to the final cache routines. + * The following assumptions are made: + * - only L1 has split instruction and data caches. + * - L1 I$ is virtually indexed. * - * Note that there are no ports where the supported processors use a mix - * of virtual and physical addresses. + * Processor-specific routines will make extra assumptions. */ #define CACHE_PROTOS(chip) \ @@ -40,48 +35,38 @@ void chip##_ConfigCache(struct cpu_info *); \ /* Writeback and invalidate all caches */ \ void chip##_SyncCache(struct cpu_info *); \ /* Invalidate all I$ for the given range */ \ -void chip##_InvalidateICache(struct cpu_info *, uint64_t, size_t); \ +void chip##_InvalidateICache(struct cpu_info *, vaddr_t, size_t); \ /* Writeback all D$ for the given page */ \ -void chip##_SyncDCachePage(struct cpu_info *, uint64_t); \ +void chip##_SyncDCachePage(struct cpu_info *, vaddr_t, paddr_t); \ /* Writeback all D$ for the given range */ \ -void chip##_HitSyncDCache(struct cpu_info *, uint64_t, size_t); \ +void chip##_HitSyncDCache(struct cpu_info *, vaddr_t, size_t); \ /* Invalidate all D$ for the given range */ \ -void chip##_HitInvalidateDCache(struct cpu_info *, uint64_t, size_t); \ +void chip##_HitInvalidateDCache(struct cpu_info *, vaddr_t, size_t); \ /* Enforce coherency of the given range */ \ -void chip##_IOSyncDCache(struct cpu_info *, uint64_t, size_t, int); +void chip##_IOSyncDCache(struct cpu_info *, vaddr_t, size_t, int); /* * Cavium Octeon. - * ICache routines take virtual addresses. - * DCache routines take physical addresses. */ CACHE_PROTOS(Octeon); /* - * STC Loongson 2e and 2f. - * ICache routines take virtual addresses. - * DCache routines take physical addresses. + * STC Loongson 2E and 2F. */ CACHE_PROTOS(Loongson2); /* * MIPS R4000 and R4400. - * ICache routines take virtual addresses. - * DCache routines take virtual addresses. */ CACHE_PROTOS(Mips4k); /* - * IDT/QED/PMC-Sierra R5000, RM52xx, RM7xxx, RM9xxx - * ICache routines take virtual addresses. - * DCache routines take virtual addresses. + * IDT/QED/PMC-Sierra R4600, R4700, R5000, RM52xx, RM7xxx, RM9xxx. */ CACHE_PROTOS(Mips5k); /* - * MIPS/NEC R10000/R120000/R140000/R16000 - * ICache routines take virtual addresses. - * DCache routines take virtual addresses. + * MIPS/NEC R10000/R120000/R140000/R16000. */ CACHE_PROTOS(Mips10k); diff --git a/sys/arch/mips64/mips64/cache_loongson2.S b/sys/arch/mips64/mips64/cache_loongson2.S index cb0e336dd27..480e37a291e 100644 --- a/sys/arch/mips64/mips64/cache_loongson2.S +++ b/sys/arch/mips64/mips64/cache_loongson2.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_loongson2.S,v 1.7 2012/04/06 20:11:18 miod Exp $ */ +/* $OpenBSD: cache_loongson2.S,v 1.8 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2009 Miodrag Vallat. @@ -238,9 +238,7 @@ END(Loongson2_SyncCache) /*---------------------------------------------------------------------------- * - * Loongson2_InvalidateICache -- - * - * void Loongson2_SyncICache(struct cpu_info *ci, vaddr_t va, size_t len) + * Loongson2_SyncICache(struct cpu_info *ci, vaddr_t va, size_t len) * * Invalidate the L1 instruction cache for at least range * of va to va + len - 1. @@ -270,18 +268,22 @@ END(Loongson2_InvalidateICache) /*---------------------------------------------------------------------------- * - * Loongson2_SyncDCachePage -- - * - * void Loongson2_SyncDCachePage(struct cpu_info *ci, paddr_t pa) + * Loongson2_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa) * * Sync the L1 and L2 data cache page for address pa. + * The virtual address is not used. + * + * The index for L1 is the low 14 bits of the virtual address. Since + * the page size is 2**14 bits, it is possible to access the page + * through any valid address. + * The index for L2 is the low 17 bits of the physical address. * *---------------------------------------------------------------------------- */ LEAF(Loongson2_SyncDCachePage, 0) sync - LOAD_XKPHYS(a2, CCA_CACHED) + LOAD_XKPHYS(a1, CCA_CACHED) or a1, a2 # a1 now new L1 address dsrl a1, a1, PAGE_SHIFT dsll a1, a1, PAGE_SHIFT # page align pa @@ -307,12 +309,9 @@ END(Loongson2_SyncDCachePage) /*---------------------------------------------------------------------------- * - * Loongson2_HitSyncDCache -- + * Loongson2_HitSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * void Loongson2_HitSyncDCache(struct cpu_info *ci, - * paddr_t pa, size_t len) - * - * Sync L1 and L2 data caches for range of pa to pa + len - 1. + * Sync L1 and L2 data caches for range of va to va + len - 1. * Since L2 is writeback, we need to operate on L1 first, to make sure * L1 is clean. The usual mips strategy of doing L2 first, and then * the L1 orphans, will not work as the orphans would only be pushed @@ -330,8 +329,6 @@ LEAF(Loongson2_HitSyncDCache, 0) dsll a1, a1, 5 # align to cacheline boundary PTR_SUBU a2, a2, a1 dsrl a2, a2, 5 # Compute number of cache lines - LOAD_XKPHYS(a3, CCA_CACHED) - or a1, a3 # build suitable va move a4, a2 # save for L2 move a3, a1 @@ -357,12 +354,9 @@ END(Loongson2_HitSyncDCache) /*---------------------------------------------------------------------------- * - * Loongson2_HitInvalidateDCache -- + * Loongson2_HitInvalidateDCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * void Loongson2_HitInvalidateDCache(struct cpu_info *ci, - * paddr_t pa, size_t len) - * - * Invalidate L1 and L2 data caches for range of pa to pa + len - 1. + * Invalidate L1 and L2 data caches for range of va to va + len - 1. * *---------------------------------------------------------------------------- */ @@ -376,8 +370,6 @@ LEAF(Loongson2_HitInvalidateDCache, 0) dsll a1, a1, 5 # align to cacheline boundary PTR_SUBU a2, a2, a1 dsrl a2, a2, 5 # Compute number of cache lines - LOAD_XKPHYS(a3, CCA_CACHED) - or a1, a3 # build suitable va move a4, a2 # save for L2 move a3, a1 @@ -403,13 +395,10 @@ END(Loongson2_HitInvalidateDCache) /*---------------------------------------------------------------------------- * - * Loongson2_IOSyncDCache -- - * - * void Loongson2_IOSyncDCache(struct cpu_info *ci, - * paddr_t pa, size_t len, int how) + * Loongson2_IOSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len, int how) * - * Invalidate or flush L1 and L2 data caches for range of pa to - * pa + len - 1. + * Invalidate or flush L1 and L2 data caches for range of va to + * va + len - 1. * * If how == 0 (invalidate): * L1 and L2 caches are invalidated or flushed if the area @@ -421,19 +410,15 @@ END(Loongson2_HitInvalidateDCache) * *---------------------------------------------------------------------------- */ -NON_LEAF(Loongson2_IOSyncDCache, FRAMESZ(CF_SZ+REGSZ), ra) +LEAF(Loongson2_IOSyncDCache, 0) sync - PTR_SUBU sp, FRAMESZ(CF_SZ+REGSZ) - PTR_S ra, CF_RA_OFFS+REGSZ(sp) beqz a3, SyncInv # Sync PREREAD nop SyncWBInv: - jal Loongson2_HitSyncDCache + j Loongson2_HitSyncDCache nop - b SyncDone - PTR_L ra, CF_RA_OFFS+REGSZ(sp) SyncInv: or t0, a1, a2 # check if invalidate possible @@ -441,11 +426,6 @@ SyncInv: bnez t0, SyncWBInv # be aligned to the cache size nop - jal Loongson2_HitInvalidateDCache + j Loongson2_HitInvalidateDCache nop - PTR_L ra, CF_RA_OFFS+REGSZ(sp) - -SyncDone: - j ra - PTR_ADDU sp, FRAMESZ(CF_SZ+REGSZ) END(Loongson2_IOSyncDCache) diff --git a/sys/arch/mips64/mips64/cache_octeon.c b/sys/arch/mips64/mips64/cache_octeon.c index 59d53c14b56..b81584802c1 100644 --- a/sys/arch/mips64/mips64/cache_octeon.c +++ b/sys/arch/mips64/mips64/cache_octeon.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_octeon.c,v 1.3 2012/03/25 13:52:52 miod Exp $ */ +/* $OpenBSD: cache_octeon.c,v 1.4 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2010 Takuya ASADA. * @@ -77,29 +77,29 @@ Octeon_SyncCache(struct cpu_info *ci) } void -Octeon_InvalidateICache(struct cpu_info *ci, uint64_t va, size_t len) +Octeon_InvalidateICache(struct cpu_info *ci, vaddr_t va, size_t len) { /* A SYNCI flushes the entire icache on OCTEON */ SYNCI(); } void -Octeon_SyncDCachePage(struct cpu_info *ci, uint64_t pa) +Octeon_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa) { } void -Octeon_HitSyncDCache(struct cpu_info *ci, uint64_t pa, size_t len) +Octeon_HitSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len) { } void -Octeon_HitInvalidateDCache(struct cpu_info *ci, uint64_t pa, size_t len) +Octeon_HitInvalidateDCache(struct cpu_info *ci, vaddr_t va, size_t len) { } void -Octeon_IOSyncDCache(struct cpu_info *ci, uint64_t pa, size_t len, int how) +Octeon_IOSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len, int how) { switch (how) { default: diff --git a/sys/arch/mips64/mips64/cache_r10k.S b/sys/arch/mips64/mips64/cache_r10k.S index 43809059f2c..3325ec31d7b 100644 --- a/sys/arch/mips64/mips64/cache_r10k.S +++ b/sys/arch/mips64/mips64/cache_r10k.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_r10k.S,v 1.15 2012/04/06 20:11:18 miod Exp $ */ +/* $OpenBSD: cache_r10k.S,v 1.16 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2004 Opsycon AB (www.opsycon.se) @@ -29,7 +29,11 @@ /* * Processors supported: * R10000, R12000, R14000 and R16000. - * The cache line and number of ways are hardcoded. + * + * The following assumptions are made: + * - L1 I$ is 2 way, 64 bytes/line + * - L1 D$ is WB, 2 way, 32 bytes/line + * - L2 is WT, 2 way */ #include <sys/errno.h> @@ -83,22 +87,17 @@ #define CTYPE_4WAY 0x0004 /* Cache is FOUR way */ #define CTYPE_WAYMASK 0x0007 -#define CTYPE_HAS_IL2 0x0100 /* Internal L2 Cache present */ #define CTYPE_HAS_XL2 0x0200 /* External L2 Cache present */ -#define CTYPE_HAS_XL3 0x0400 /* External L3 Cache present */ .set noreorder # Noreorder is default style! /*---------------------------------------------------------------------------- * - * Mips10k_ConfigCache(struct cpu_info *ci) -- + * Mips10k_ConfigCache(struct cpu_info *ci) * * Size and configure the caches. * NOTE: should only be called from mips_init(). * - * Results: - * Returns the value of the cpu configuration register. - * * Side effects: * The size of the data cache is stored into ci_l1datacachesize. * The size of instruction cache is stored into ci_l1instcachesize. @@ -184,15 +183,12 @@ END(Mips10k_ConfigCache) /*---------------------------------------------------------------------------- * - * Mips10k_SyncCache(struct cpu_info *ci) -- + * Mips10k_SyncCache(struct cpu_info *ci) * * Sync ALL caches. * No need to look at number of sets since we are cleaning out * the entire cache and thus will address all sets anyway. * - * Results: - * None. - * * Side effects: * The contents of ALL caches are Invalidated or Synched. * @@ -260,19 +256,13 @@ END(Mips10k_SyncCache) /*---------------------------------------------------------------------------- * - * Mips10k_InvalidateICache -- - * - * void Mips10k_SyncICache(struct cpu_info *ci, vaddr_t addr, size_t len) + * Mips10k_InvalidateICache(struct cpu_info *ci, vaddr_t va, size_t len) * - * Invalidate the L1 instruction cache for at least range - * of addr to addr + len - 1. - * The address is reduced to a XKPHYS index to avoid TLB faults. - * - * Results: - * None. + * Invalidate the L1 instruction cache for at least range of va to + * va + len - 1. * * Side effects: - * The contents of the L1 Instruction cache is flushed. + * The contents of the L1 Instruction cache are invalidated. * *---------------------------------------------------------------------------- */ @@ -300,15 +290,10 @@ END(Mips10k_InvalidateICache) /*---------------------------------------------------------------------------- * - * Mips10k_SyncDCachePage -- - * - * void Mips10k_SyncDCachePage(struct cpu_info *ci, vaddr_t addr) - * - * Sync the L1 data cache page for address addr. - * The address is reduced to a XKPHYS index to avoid TLB faults. + * Mips10k_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa) * - * Results: - * None. + * Sync the L1 data cache page for address va. + * The physical address is not used. * * Side effects: * The contents of the cache is written back to primary memory. @@ -344,18 +329,10 @@ END(Mips10k_SyncDCachePage) /*---------------------------------------------------------------------------- * - * Mips10k_HitSyncDCache -- + * Mips10k_HitSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * void Mips10k_HitSyncDCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) - * - * Sync data cache for range of addr to addr + len - 1. - * The address can be any valid virtual address as long - * as no TLB invalid traps occur. Only lines with matching - * addr are flushed. - * - * Results: - * None. + * Sync data cache for range of va to va + len - 1. + * Only lines with matching address are flushed. * * Side effects: * The contents of the L1 cache is written back to primary memory. @@ -382,21 +359,12 @@ LEAF(Mips10k_HitSyncDCache, 0) nop END(Mips10k_HitSyncDCache) - /*---------------------------------------------------------------------------- * - * Mips10k_HitSyncSCache -- - * - * static void Mips10k_HitSyncSCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) + * _mips10k_HitSyncSCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * Sync secondary cache for range of addr to addr + len - 1. - * The address can be any valid virtual address as long - * as no TLB invalid traps occur. Only lines with matching - * addr are flushed. - * - * Results: - * None. + * Sync secondary cache for range of va to va + len - 1. + * Only lines with matching address are flushed. * * Side effects: * The contents of the L2 cache is written back to primary memory. @@ -404,7 +372,7 @@ END(Mips10k_HitSyncDCache) * *---------------------------------------------------------------------------- */ -LEAF(Mips10k_HitSyncSCache, 0) +ALEAF(_mips10k_HitSyncSCache) beq a2, zero, 3f # size is zero! PTR_ADDU a2, a2, a1 # Add in extra from align and a1, a1, -32 # Align address @@ -420,23 +388,14 @@ LEAF(Mips10k_HitSyncSCache, 0) 3: j ra nop -END(Mips10k_HitSyncSCache) /*---------------------------------------------------------------------------- * - * Mips10k_HitInvalidateDCache -- - * - * void Mips10k_HitInvalidateDCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) + * Mips10k_HitInvalidateDCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * Invalidate data cache for range of addr to addr + len - 1. - * The address can be any valid address as long as no TLB misses occur. - * (Be sure to use cached K0SEG kernel addresses or mapped addresses) + * Invalidate data cache for range of va to va + len - 1. * Only lines with matching addresses are invalidated. * - * Results: - * None. - * * Side effects: * The L1 cache line is invalidated. * @@ -461,28 +420,19 @@ LEAF(Mips10k_HitInvalidateDCache, 0) nop END(Mips10k_HitInvalidateDCache) - /*---------------------------------------------------------------------------- * - * Mips10k_HitInvalidateSCache -- - * - * static void Mips10k_HitInvalidateSCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) + * _mips10k_HitInvalidateSCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * Invalidate secondary cache for range of addr to addr + len - 1. - * The address can be any valid address as long as no TLB misses occur. - * (Be sure to use cached K0SEG kernel addresses or mapped addresses) + * Invalidate secondary cache for range of va to va + len - 1. * Only lines with matching addresses are invalidated. * - * Results: - * None. - * * Side effects: * The L2 cache line is invalidated. * *---------------------------------------------------------------------------- */ -LEAF(Mips10k_HitInvalidateSCache, 0) +ALEAF(_mips10k_HitInvalidateSCache) beq a2, zero, 3f # size is zero! PTR_ADDU a2, a2, a1 # Add in extra from align and a1, a1, -32 # Align address @@ -498,48 +448,33 @@ LEAF(Mips10k_HitInvalidateSCache, 0) 3: j ra nop -END(Mips10k_HitInvalidateSCache) /*---------------------------------------------------------------------------- * - * Mips10k_IOSyncDCache -- - * - * void Mips10k_IOSyncDCache(struct cpu_info *ci, vaddr_t addr, - * size_t len, int rw) + * Mips10k_IOSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len, int how) * - * Invalidate or flush data cache for range of addr to addr + len - 1. - * The address can be any valid address as long as no TLB misses occur. - * (Be sure to use cached K0SEG kernel addresses or mapped addresses) - * - * Results: - * None. + * Invalidate or flush data cache for range of va to va + len - 1. * * Side effects: - * If rw == 0 (read), L1 and L2 caches are invalidated or - * flushed if the area does not match the alignment - * requirements. - * If rw == 1 (write), L1 and L2 caches are written back + * If how == 0 (read), L1 and L2 caches are invalidated or flushed if + * the area does not match the alignment requirements. + * If how == 1 (write), L1 and L2 caches are written back * to memory and invalidated. - * If rw == 2 (write-read), L1 and L2 caches are written back + * If how == 2 (write-read), L1 and L2 caches are written back * to memory and invalidated. * *---------------------------------------------------------------------------- */ -NON_LEAF(Mips10k_IOSyncDCache, FRAMESZ(CF_SZ+2*REGSZ), ra) - PTR_SUBU sp, FRAMESZ(CF_SZ+2*REGSZ) - PTR_S ra, CF_RA_OFFS+2*REGSZ(sp) - REG_S a1, CF_ARGSZ(sp) # save args +LEAF(Mips10k_IOSyncDCache, 0) beqz a3, SyncRD # Sync PREREAD - REG_S a2, CF_ARGSZ+REGSZ(sp) + nop addiu a3, -1 bnez a3, SyncRDWB # Sync PREWRITE+PREREAD nop SyncWR: - jal Mips10k_HitSyncSCache # Do L2 cache + j _mips10k_HitSyncSCache # Do L2 cache nop # L1 done in parallel - b SyncDone - PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) SyncRD: and t0, a1, 63 # check if invalidate possible @@ -548,25 +483,16 @@ SyncRD: bnez t0, SyncRDWB nop -/* - * Sync for aligned read, no writeback required. - */ - jal Mips10k_HitInvalidateSCache # L2 cache + /* + * Sync for aligned read, no writeback required. + */ + j _mips10k_HitInvalidateSCache # L2 cache nop # L1 done in parallel - b SyncDone - PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) - -/* - * Sync for unaligned read or write-read. - */ SyncRDWB: - jal Mips10k_HitSyncSCache # L2 cache + /* + * Sync for unaligned read or write-read. + */ + j _mips10k_HitSyncSCache # L2 cache nop # L1 done in parallel - - PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) - -SyncDone: - j ra - PTR_ADDU sp, FRAMESZ(CF_SZ+2*REGSZ) END(Mips10k_IOSyncDCache) diff --git a/sys/arch/mips64/mips64/cache_r4k.c b/sys/arch/mips64/mips64/cache_r4k.c index 754a1ff8c75..e15544b6e61 100644 --- a/sys/arch/mips64/mips64/cache_r4k.c +++ b/sys/arch/mips64/mips64/cache_r4k.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_r4k.c,v 1.2 2012/04/06 20:11:18 miod Exp $ */ +/* $OpenBSD: cache_r4k.c,v 1.3 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2012 Miodrag Vallat. @@ -47,7 +47,7 @@ static __inline__ void mips4k_hitwbinv_secondary(vaddr_t, vsize_t, vsize_t); void Mips4k_ConfigCache(struct cpu_info *ci) { - uint32_t cfg; + uint32_t cfg, ncfg; cfg = cp0_get_config(); @@ -91,7 +91,9 @@ Mips4k_ConfigCache(struct cpu_info *ci) pmap_prefer_mask |= cache_valias_mask; } - if ((cfg & 7) != CCA_CACHED) { + ncfg = (cfg & ~7) | CCA_CACHED; + ncfg &= ~(1 << 4); + if (cfg != ncfg) { void (*fn)(uint32_t); vaddr_t va; paddr_t pa; @@ -106,8 +108,7 @@ Mips4k_ConfigCache(struct cpu_info *ci) } fn = (void (*)(uint32_t))va; - cfg = (cfg & ~7) | CCA_CACHED; - (*fn)(cfg); + (*fn)(ncfg); } } @@ -153,7 +154,7 @@ Mips4k_SyncCache(struct cpu_info *ci) * Invalidate I$ for the given range. */ void -Mips4k_InvalidateICache(struct cpu_info *ci, uint64_t _va, size_t _sz) +Mips4k_InvalidateICache(struct cpu_info *ci, vaddr_t _va, size_t _sz) { vaddr_t va, sva, eva; vsize_t sz; @@ -163,10 +164,10 @@ Mips4k_InvalidateICache(struct cpu_info *ci, uint64_t _va, size_t _sz) /* extend the range to integral cache lines */ if (line == 16) { va = _va & ~(16UL - 1); - sz = ((_va + _sz + 16 - 1) & ~(16UL - 1)) - _va; + sz = ((_va + _sz + 16 - 1) & ~(16UL - 1)) - va; } else { va = _va & ~(32UL - 1); - sz = ((_va + _sz + 32 - 1) & ~(32UL - 1)) - _va; + sz = ((_va + _sz + 32 - 1) & ~(32UL - 1)) - va; } sva = PHYS_TO_XKPHYS(0, CCA_CACHED); @@ -185,7 +186,7 @@ Mips4k_InvalidateICache(struct cpu_info *ci, uint64_t _va, size_t _sz) * Writeback D$ for the given page. */ void -Mips4k_SyncDCachePage(struct cpu_info *ci, uint64_t va) +Mips4k_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa) { vaddr_t sva, eva; vsize_t line; @@ -202,9 +203,7 @@ Mips4k_SyncDCachePage(struct cpu_info *ci, uint64_t va) if (ci->ci_l2size != 0) { line = ci->ci_cacheconfiguration; /* L2 line size */ - sva = PHYS_TO_XKPHYS(0, CCA_CACHED); - /* keep only the index bits */ - sva += va & ((1UL << 22) - 1); /* largest L2 is 4MB */ + sva = PHYS_TO_XKPHYS(pa, CCA_CACHED); eva = sva + PAGE_SIZE; while (sva != eva) { cache(IndexWBInvalidate_S, sva); @@ -246,7 +245,7 @@ mips4k_hitwbinv_secondary(vaddr_t va, vsize_t sz, vsize_t line) } void -Mips4k_HitSyncDCache(struct cpu_info *ci, uint64_t _va, size_t _sz) +Mips4k_HitSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz) { vaddr_t va; vsize_t sz; @@ -305,7 +304,7 @@ mips4k_hitinv_secondary(vaddr_t va, vsize_t sz, vsize_t line) } void -Mips4k_HitInvalidateDCache(struct cpu_info *ci, uint64_t _va, size_t _sz) +Mips4k_HitInvalidateDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz) { vaddr_t va; vsize_t sz; @@ -315,10 +314,10 @@ Mips4k_HitInvalidateDCache(struct cpu_info *ci, uint64_t _va, size_t _sz) /* extend the range to integral cache lines */ if (line == 16) { va = _va & ~(16UL - 1); - sz = ((_va + _sz + 16 - 1) & ~(16UL - 1)) - _va; + sz = ((_va + _sz + 16 - 1) & ~(16UL - 1)) - va; } else { va = _va & ~(32UL - 1); - sz = ((_va + _sz + 32 - 1) & ~(32UL - 1)) - _va; + sz = ((_va + _sz + 32 - 1) & ~(32UL - 1)) - va; } mips4k_hitinv_primary(va, sz, line); @@ -339,7 +338,7 @@ Mips4k_HitInvalidateDCache(struct cpu_info *ci, uint64_t _va, size_t _sz) * operations. */ void -Mips4k_IOSyncDCache(struct cpu_info *ci, uint64_t _va, size_t _sz, int how) +Mips4k_IOSyncDCache(struct cpu_info *ci, vaddr_t _va, size_t _sz, int how) { vaddr_t va; vsize_t sz; diff --git a/sys/arch/mips64/mips64/cache_r5k.S b/sys/arch/mips64/mips64/cache_r5k.S index ecd1960cbdb..6aa93b33802 100644 --- a/sys/arch/mips64/mips64/cache_r5k.S +++ b/sys/arch/mips64/mips64/cache_r5k.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_r5k.S,v 1.34 2012/04/06 20:11:18 miod Exp $ */ +/* $OpenBSD: cache_r5k.S,v 1.35 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 1998-2004 Opsycon AB (www.opsycon.se) @@ -27,18 +27,17 @@ */ /* - * NOTE! - * - * This code does not support caches with other linesize than 32. - * Neither will it support R4000 or R4400 Secondary caches. These - * configurations will need another set of cache functions. - * * Processors supported: - * R4600/R4700 (if option CPUR4600) - * R5000 - * RM52xx - * RM7xxx - * RM9xxx + * R4600/R4700 (if option CPU_R4600) + * R5000, RM52xx, RM7xxx, RM9xxx + * + * The following assumptions are made: + * - L1 I$ is 2 way, 32 bytes/line + * - L1 D$ is WB, 2 way, 32 bytes/line + * - L2 may not exist + * - L3 may not exist + * - L3 implies internal L2 + * - all external caches are WT */ #include <sys/errno.h> @@ -59,35 +58,18 @@ #define IndexInvalidate_I 0x00 #define IndexWBInvalidate_D 0x01 -#define IndexFlashInvalidate_T 0x02 #define IndexWBInvalidate_S 0x03 -#define IndexLoadTag_I 0x04 -#define IndexLoadTag_D 0x05 -#define IndexLoadTag_T 0x06 -#define IndexLoadTag_S 0x07 - -#define IndexStoreTag_I 0x08 -#define IndexStoreTag_D 0x09 -#define IndexStoreTag_T 0x0a #define IndexStoreTag_S 0x0b -#define CreateDirtyExclusive 0x0d - -#define HitInvalidate_I 0x10 #define HitInvalidate_D 0x11 #define HitInvalidate_S 0x13 -#define Fill_I 0x14 #define HitWBInvalidate_D 0x15 #define InvalidatePage_T 0x16 #define HitWBInvalidate_S 0x17 #define InvalidatePage_S 0x17 /* Only RM527[0-1] */ -#define HitWB_I 0x18 -#define HitWB_D 0x19 -#define HitWB_S 0x1b - /* * R5000 config register bits. */ @@ -143,9 +125,6 @@ * Size and configure the caches. * NOTE: should only be called from mips_init(). * - * Results: - * Returns the value of the cpu configuration register. - * * Side effects: * The size of the data cache is stored into ci_l1datacachesize. * The size of instruction cache is stored into ci_l1instcachesize. @@ -196,11 +175,7 @@ LEAF(Mips5k_ConfigCache, 0) li ta3, 0 # Tertiary size 0. and v1, 0xff00 # Recognize CPU's with - li t1, (MIPS_R4600 << 8) # N way L1 caches only. - beq v1, t1, ConfResult # R4K 2 way, no L2 control - li t1, (MIPS_R4700 << 8) - beq v1, t1, ConfResult # R4K 2 way, No L2 control - li t1, (MIPS_R5000 << 8) + li t1, (MIPS_R5000 << 8) # N way L1 caches only. beq v1, t1, Conf5K # R5K 2 way, check L2 li t1, (MIPS_RM52X0 << 8) beq v1, t1, Conf52K # R52K 2 way, check L2 @@ -209,9 +184,8 @@ LEAF(Mips5k_ConfigCache, 0) li t1, (MIPS_RM9000 << 8) beq v1, t1, Conf7K nop - # R4000PC/R4400PC or unknown. - li t2, CTYPE_DIR # default direct mapped cache - b ConfResult + + b ConfResult # R4[67]00 2 way, No L2 control nop #---- R5K ------------------------------ @@ -400,9 +374,6 @@ END(Mips5k_ConfigCache) * No need to look at number of sets since we are cleaning out * the entire cache and thus will address all sets anyway. * - * Results: - * None. - * * Side effects: * The contents of ALL caches are Invalidated or Synched. * @@ -511,16 +482,10 @@ END(Mips5k_SyncCache) /*---------------------------------------------------------------------------- * - * Mips5k_InvalidateICache -- - * - * void Mips5k_SyncICache(struct cpu_info *, vaddr_t addr, size_t len) + * Mips5k_SyncICache(struct cpu_info *, vaddr_t va, size_t len) * - * Invalidate the L1 instruction cache for at least range - * of addr to addr + len - 1. - * The address is reduced to a XKPHYS index to avoid TLB faults. - * - * Results: - * None. + * Invalidate the L1 instruction cache for at least range of va to + * va + len - 1. * * Side effects: * The contents of the L1 Instruction cache is flushed. @@ -576,15 +541,10 @@ END(Mips5k_InvalidateICache) /*---------------------------------------------------------------------------- * - * Mips5k_SyncDCachePage -- - * - * void Mips5k_SyncDCachePage(struct cpu_info *ci, vaddr_t addr) - * - * Sync the L1 data cache page for address addr. - * The address is reduced to a XKPHYS index to avoid TLB faults. + * Mips5k_SyncDCachePage(struct cpu_info *ci, vaddr_t va, paddr_t pa) * - * Results: - * None. + * Sync the L1 data cache page for address va. + * The physical address is used to compute the L2 index. * * Side effects: * The contents of the cache is written back to primary memory. @@ -604,7 +564,7 @@ LEAF(Mips5k_SyncDCachePage, 0) dsrl a1, 34 PTR_ADDU a1, a3 # a1 now new XKPHYS address and a1, ~PAGE_MASK # Page align start address - PTR_ADDU a2, a1, PAGE_SIZE-128 + PTR_ADDU a4, a1, PAGE_SIZE-128 addiu v0, -2 # <0 1way, 0 = two, >0 four lw a3, CI_L1DATACACHESET(a0) @@ -634,9 +594,48 @@ LEAF(Mips5k_SyncDCachePage, 0) cache IndexWBInvalidate_D, 64(a1) cache IndexWBInvalidate_D, 96(a1) - bne a2, a1, 1b + bne a4, a1, 1b PTR_ADDU a1, 128 + lw t0, CI_CACHECONFIGURATION(a0) + and t0, CTYPE_HAS_IL2 # Have internal L2? + beqz t0, 9f + + LOAD_XKPHYS(a3, CCA_CACHED) # Yes, do L2 with the physical + PTR_ADDU a1, a2, a3 # address for the index + and a1, ~PAGE_MASK # Page align start address + PTR_ADDU a4, a1, PAGE_SIZE-128 + lw a3, CI_L2SIZE(a0) + srl a3, 2 # Hardcoded 4-way + +1: + cache IndexWBInvalidate_S, 0(a1) # do set A + cache IndexWBInvalidate_S, 32(a1) + cache IndexWBInvalidate_S, 64(a1) + cache IndexWBInvalidate_S, 96(a1) + + PTR_ADDU t1, a1, a3 + cache IndexWBInvalidate_S, 0(t1) # do set B. + cache IndexWBInvalidate_S, 32(t1) + cache IndexWBInvalidate_S, 64(t1) + cache IndexWBInvalidate_S, 96(t1) + + PTR_ADDU t1, a3 + cache IndexWBInvalidate_S, 0(t1) # do set C + cache IndexWBInvalidate_S, 32(t1) + cache IndexWBInvalidate_S, 64(t1) + cache IndexWBInvalidate_S, 96(t1) + + PTR_ADDU t1, a3 # do set D + cache IndexWBInvalidate_S, 0(t1) + cache IndexWBInvalidate_S, 32(t1) + cache IndexWBInvalidate_S, 64(t1) + cache IndexWBInvalidate_S, 96(t1) + + bne a4, a1, 1b + PTR_ADDU a1, 128 + +9: #ifdef CPUR4600 mtc0 v1, COP_0_STATUS_REG # Restore the status register. #endif @@ -647,18 +646,10 @@ END(Mips5k_SyncDCachePage) /*---------------------------------------------------------------------------- * - * Mips5k_HitSyncDCache -- - * - * void Mips5k_HitSyncDCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) - * - * Sync data cache for range of addr to addr + len - 1. - * The address can be any valid virtual address as long - * as no TLB invalid traps occur. Only lines with matching - * addr are flushed. + * Mips5k_HitSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * Results: - * None. + * Sync data cache for range of va to va + len - 1. + * Only lines with matching addresses are flushed. * * Side effects: * The contents of the L1 cache is written back to primary memory. @@ -703,18 +694,10 @@ END(Mips5k_HitSyncDCache) /*---------------------------------------------------------------------------- * - * Mips5k_HitSyncSCache -- + * _mips5k_HitSyncSCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * static void Mips5k_HitSyncSCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) - * - * Sync secondary cache for range of addr to addr + len - 1. - * The address can be any valid virtual address as long - * as no TLB invalid traps occur. Only lines with matching - * addr are flushed. - * - * Results: - * None. + * Sync secondary cache for range of va to va + len - 1. + * Only lines with matching addresses are flushed. * * Side effects: * The contents of the L2 cache is written back to primary memory. @@ -726,7 +709,7 @@ END(Mips5k_HitSyncDCache) * *---------------------------------------------------------------------------- */ -LEAF(Mips5k_HitSyncSCache, 0) +ALEAF(_mips5k_HitSyncSCache) #ifdef CPUR4600 mfc0 v1, COP_0_STATUS_REG # Save the status register. li v0, SR_DIAG_DE @@ -754,23 +737,14 @@ LEAF(Mips5k_HitSyncSCache, 0) sync j ra nop -END(Mips5k_HitSyncSCache) /*---------------------------------------------------------------------------- * - * Mips5k_HitInvalidateDCache -- - * - * void Mips5k_HitInvalidateDCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) + * Mips5k_HitInvalidateDCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * Invalidate data cache for range of addr to addr + len - 1. - * The address can be any valid address as long as no TLB misses occur. - * (Be sure to use cached K0SEG kernel addresses or mapped addresses) + * Invalidate data cache for range of va to va + len - 1. * Only lines with matching addresses are invalidated. * - * Results: - * None. - * * Side effects: * The L1 cache line is invalidated. * @@ -806,28 +780,19 @@ LEAF(Mips5k_HitInvalidateDCache, 0) nop END(Mips5k_HitInvalidateDCache) - /*---------------------------------------------------------------------------- * - * Mips5k_HitInvalidateSCache -- + * _mips5k_HitInvalidateSCache(struct cpu_info *ci, vaddr_t va, size_t len) * - * static void Mips5k_HitInvalidateSCache(struct cpu_info *ci, - * vaddr_t addr, size_t len) - * - * Invalidate secondary cache for range of addr to addr + len - 1. - * The address can be any valid address as long as no TLB misses occur. - * (Be sure to use cached K0SEG kernel addresses or mapped addresses) + * Invalidate secondary cache for range of va to va + len - 1. * Only lines with matching addresses are invalidated. * - * Results: - * None. - * * Side effects: * The L2 cache line is invalidated. * *---------------------------------------------------------------------------- */ -LEAF(Mips5k_HitInvalidateSCache, 0) +ALEAF(_mips5k_HitInvalidateSCache) #ifdef CPUR4600 mfc0 v1, COP_0_STATUS_REG # Save the status register. li v0, SR_DIAG_DE @@ -855,38 +820,29 @@ LEAF(Mips5k_HitInvalidateSCache, 0) sync j ra nop -END(Mips5k_HitInvalidateSCache) /*---------------------------------------------------------------------------- * - * Mips5k_IOSyncDCache -- + * Mips5k_IOSyncDCache(struct cpu_info *ci, vaddr_t va, size_t len, int how) * - * void Mips5k_IOSyncDCache(struct cpu_info *ci, - * vaddr_t addr, size_t len, int rw) - * - * Invalidate or flush data cache for range of addr to addr + len - 1. - * The address can be any valid address as long as no TLB misses occur. - * (Be sure to use cached K0SEG kernel addresses or mapped addresses) + * Invalidate or flush data cache for range of va to va + len - 1. * * In case of the existence of an external cache we invalidate pages * which are in the given range ONLY if transfer direction is READ. * The assumption here is a 'write through' external cache which is * true for all now supported processors. * - * Results: - * None. - * * Side effects: - * If rw == 0 (read), L1 and on-chip L2 caches are invalidated or - * flushed if the area does not match the alignment - * requirements. Writethrough L2 and L3 cache are - * invalidated for the address range. - * If rw == 1 (write), L1 and on-chip L2 caches are written back - * to memory and invalidated. Writethrough L2 and L3 caches - * are left alone. - * If rw == 2 (write-read), L1 and on-chip L2 caches are written back - * to memory and invalidated. Writethrough L2 and L3 caches - * are invalidated. + * If how == 0 (read), L1 and on-chip L2 caches are invalidated or + * flushed if the area does not match the alignment requirements. + * Writethrough L2 and L3 cache are invalidated for the address + * range. + * If how == 1 (write), L1 and on-chip L2 caches are written back to + * memory and invalidated. Writethrough L2 and L3 caches are + * left alone. + * If how == 2 (write-read), L1 and on-chip L2 caches are written back + * to memory and invalidated. Writethrough L2 and L3 caches are + * invalidated. * *---------------------------------------------------------------------------- */ @@ -910,7 +866,7 @@ NON_LEAF(Mips5k_IOSyncDCache, FRAMESZ(CF_SZ+2*REGSZ), ra) PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) SyncSC: - jal Mips5k_HitSyncSCache # Do internal L2 cache + jal _mips5k_HitSyncSCache # Do internal L2 cache nop # L1 done in parallel b SyncDone PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) @@ -937,7 +893,7 @@ SyncRD: PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # External L2 if present SyncRDL2: - jal Mips5k_HitInvalidateSCache # Internal L2 cache + jal _mips5k_HitInvalidateSCache # Internal L2 cache nop # L1 done in parallel b SyncRDL3 @@ -959,7 +915,7 @@ SyncRDWB: PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # External L2 if present SyncRDWBL2: - jal Mips5k_HitSyncSCache # Internal L2 cache + jal _mips5k_HitSyncSCache # Internal L2 cache nop # L1 done in parallel b SyncRDL3 diff --git a/sys/arch/mips64/mips64/db_machdep.c b/sys/arch/mips64/mips64/db_machdep.c index ce1049f0a7b..5011f330b11 100644 --- a/sys/arch/mips64/mips64/db_machdep.c +++ b/sys/arch/mips64/mips64/db_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: db_machdep.c,v 1.34 2012/04/19 18:15:08 miod Exp $ */ +/* $OpenBSD: db_machdep.c,v 1.35 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se) @@ -335,10 +335,8 @@ db_write_bytes(addr, size, data) if (addr < VM_MAXUSER_ADDRESS) { struct cpu_info *ci = curcpu(); - /* XXX we don't know where this page is mapped... */ - Mips_HitSyncDCache(ci, addr, PHYS_TO_XKPHYS(addr, CCA_CACHED), - size); - Mips_InvalidateICache(ci, PHYS_TO_CKSEG0(addr & 0xffff), size); + Mips_HitSyncDCache(ci, addr, size); + Mips_InvalidateICache(ci, addr, size); } } diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c index 8ef94263b94..d8bad751c85 100644 --- a/sys/arch/mips64/mips64/pmap.c +++ b/sys/arch/mips64/mips64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.59 2012/04/19 18:12:40 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.60 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -797,8 +797,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) continue; if ((entry & PG_M) != 0 /* && p != PG_M */) if ((entry & PG_CACHEMODE) == PG_CACHED) - Mips_HitSyncDCache(ci, sva, - pfn_to_pad(entry), PAGE_SIZE); + Mips_HitSyncDCache(ci, sva, PAGE_SIZE); entry = (entry & ~(PG_M | PG_RO)) | p; *pte = entry; /* @@ -1027,6 +1026,8 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) else npte |= PG_ROPAGE; pte = kvtopte(va); + if ((*pte & PG_V) == 0) + pmap_kernel()->pm_stats.resident_count++; *pte = npte; pmap_update_kernel_page(va, npte); } @@ -1056,9 +1057,10 @@ pmap_kremove(vaddr_t va, vsize_t len) entry = *pte; if (!(entry & PG_V)) continue; - Mips_HitSyncDCache(ci, va, pfn_to_pad(entry), PAGE_SIZE); + Mips_HitSyncDCache(ci, va, PAGE_SIZE); *pte = PG_NV | PG_G; pmap_invalidate_kernel_page(va); + pmap_kernel()->pm_stats.resident_count--; } } @@ -1171,7 +1173,7 @@ pmap_zero_page(struct vm_page *pg) Mips_SyncDCachePage(ci, pv->pv_va, phys); } mem_zero_page(va); - Mips_HitSyncDCache(ci, va, phys, PAGE_SIZE); + Mips_HitSyncDCache(ci, va, PAGE_SIZE); } /* @@ -1212,9 +1214,9 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) memcpy((void *)d, (void *)s, PAGE_SIZE); if (sf) { - Mips_HitSyncDCache(ci, s, src, PAGE_SIZE); + Mips_HitSyncDCache(ci, s, PAGE_SIZE); } - Mips_HitSyncDCache(ci, d, dst, PAGE_SIZE); + Mips_HitSyncDCache(ci, d, PAGE_SIZE); } /* @@ -1626,7 +1628,7 @@ pmap_pg_free(struct pool *pp, void *item) paddr_t pa = XKPHYS_TO_PHYS(va); vm_page_t pg = PHYS_TO_VM_PAGE(pa); - Mips_HitInvalidateDCache(curcpu(), va, pa, PAGE_SIZE); + Mips_HitInvalidateDCache(curcpu(), va, PAGE_SIZE); uvm_pagefree(pg); } @@ -1688,7 +1690,7 @@ pmap_unmap_direct(vaddr_t va) pg = PHYS_TO_VM_PAGE(pa); if (cache_valias_mask) - Mips_HitInvalidateDCache(curcpu(), va, pa, PAGE_SIZE); + Mips_HitInvalidateDCache(curcpu(), va, PAGE_SIZE); return pg; } diff --git a/sys/arch/mips64/mips64/sys_machdep.c b/sys/arch/mips64/mips64/sys_machdep.c index e4e2316a931..5773eda01c4 100644 --- a/sys/arch/mips64/mips64/sys_machdep.c +++ b/sys/arch/mips64/mips64/sys_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sys_machdep.c,v 1.6 2012/03/25 13:52:52 miod Exp $ */ +/* $OpenBSD: sys_machdep.c,v 1.7 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 1992, 1993 @@ -127,7 +127,7 @@ mips64_cacheflush(struct proc *p, struct mips64_cacheflush_args *cfa) if (cfa->which & ICACHE) Mips_InvalidateICache(p->p_cpu, va, chunk); if (cfa->which & DCACHE) - Mips_HitSyncDCache(p->p_cpu, va, pa, chunk); + Mips_HitSyncDCache(p->p_cpu, va, chunk); } else { if (uvm_map_lookup_entry(map, va, &entry) == FALSE) { rc = EFAULT; diff --git a/sys/arch/octeon/include/cpu.h b/sys/arch/octeon/include/cpu.h index edaea5ce55b..b8e76774aab 100644 --- a/sys/arch/octeon/include/cpu.h +++ b/sys/arch/octeon/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.3 2011/03/23 16:54:36 pirofti Exp $ */ +/* $OpenBSD: cpu.h,v 1.4 2012/04/21 12:20:30 miod Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. @@ -63,13 +63,13 @@ void hw_cpu_init_secondary(struct cpu_info *); #define Mips_InvalidateICache(ci, va, l) \ Octeon_InvalidateICache((ci), (va), (l)) #define Mips_SyncDCachePage(ci, va, pa) \ - Octeon_SyncDCachePage((ci), (pa)) -#define Mips_HitSyncDCache(ci, va, pa, l) \ - Octeon_HitSyncDCache((ci), (pa), (l)) -#define Mips_IOSyncDCache(ci, va, pa, l, h) \ - Octeon_IOSyncDCache((ci), (pa), (l), (h)) -#define Mips_HitInvalidateDCache(ci, va, pa, l) \ - Octeon_HitInvalidateDCache((ci), (pa), (l)) + Octeon_SyncDCachePage((ci), (va), (pa)) +#define Mips_HitSyncDCache(ci, va, l) \ + Octeon_HitSyncDCache((ci), (va), (l)) +#define Mips_IOSyncDCache(ci, va, l, h) \ + Octeon_IOSyncDCache((ci), (va), (l), (h)) +#define Mips_HitInvalidateDCache(ci, va, l) \ + Octeon_HitInvalidateDCache((ci), (va), (l)) #endif/* _KERNEL */ diff --git a/sys/arch/octeon/octeon/bus_dma.c b/sys/arch/octeon/octeon/bus_dma.c index a0addf3ceee..b9a5a3c2648 100644 --- a/sys/arch/octeon/octeon/bus_dma.c +++ b/sys/arch/octeon/octeon/bus_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_dma.c,v 1.5 2012/03/25 13:52:52 miod Exp $ */ +/* $OpenBSD: bus_dma.c,v 1.6 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -350,24 +350,16 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, * Otherwise, just invalidate (if noncoherent). */ if (op & BUS_DMASYNC_PREWRITE) { -#ifdef TGT_COHERENT - Mips_IOSyncDCache(ci, vaddr, paddr, - ssize, CACHE_SYNC_W); -#else if (op & BUS_DMASYNC_PREREAD) - Mips_IOSyncDCache(ci, vaddr, paddr, + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_X); else - Mips_IOSyncDCache(ci, vaddr, paddr, + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_W); -#endif } else if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) { -#ifdef TGT_COHERENT -#else - Mips_IOSyncDCache(ci, vaddr, paddr, + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_R); -#endif } size -= ssize; } @@ -435,11 +427,6 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, bus_addr_t addr; int curseg, error; -#ifdef TGT_COHERENT - if (ISSET(flags, BUS_DMA_COHERENT)) - CLR(flags, BUS_DMA_COHERENT); -#endif - if (nsegs == 1) { pa = (*t->_device_to_pa)(segs[0].ds_addr); if (flags & BUS_DMA_COHERENT) diff --git a/sys/arch/sgi/include/autoconf.h b/sys/arch/sgi/include/autoconf.h index f6317ad7c9b..eba8e8b5786 100644 --- a/sys/arch/sgi/include/autoconf.h +++ b/sys/arch/sgi/include/autoconf.h @@ -1,4 +1,4 @@ -/* $OpenBSD: autoconf.h,v 1.34 2012/04/03 21:17:35 miod Exp $ */ +/* $OpenBSD: autoconf.h,v 1.35 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -46,11 +46,11 @@ struct sys_rec { /* Published cache operations. */ void (*_SyncCache)(struct cpu_info *); - void (*_InvalidateICache)(struct cpu_info *, uint64_t, size_t); - void (*_SyncDCachePage)(struct cpu_info *, uint64_t); - void (*_HitSyncDCache)(struct cpu_info *, uint64_t, size_t); - void (*_IOSyncDCache)(struct cpu_info *, uint64_t, size_t, int); - void (*_HitInvalidateDCache)(struct cpu_info *, uint64_t, size_t); + void (*_InvalidateICache)(struct cpu_info *, vaddr_t, size_t); + void (*_SyncDCachePage)(struct cpu_info *, vaddr_t, paddr_t); + void (*_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t); + void (*_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t, int); + void (*_HitInvalidateDCache)(struct cpu_info *, vaddr_t, size_t); /* Serial console configuration. */ struct mips_bus_space console_io; diff --git a/sys/arch/sgi/include/cpu.h b/sys/arch/sgi/include/cpu.h index 41ba8aa4834..2aa2e9e4877 100644 --- a/sys/arch/sgi/include/cpu.h +++ b/sys/arch/sgi/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.12 2011/03/23 16:54:36 pirofti Exp $ */ +/* $OpenBSD: cpu.h,v 1.13 2012/04/21 12:20:30 miod Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. @@ -66,12 +66,12 @@ void hw_cpu_init_secondary(struct cpu_info *); #define Mips_InvalidateICache(ci, va, l) \ (*(sys_config._InvalidateICache))((ci), (va), (l)) #define Mips_SyncDCachePage(ci, va, pa) \ - (*(sys_config._SyncDCachePage))((ci), (va)) -#define Mips_HitSyncDCache(ci, va, pa, l) \ + (*(sys_config._SyncDCachePage))((ci), (va), (pa)) +#define Mips_HitSyncDCache(ci, va, l) \ (*(sys_config._HitSyncDCache))((ci), (va), (l)) -#define Mips_IOSyncDCache(ci, va, pa, l, h) \ +#define Mips_IOSyncDCache(ci, va, l, h) \ (*(sys_config._IOSyncDCache))((ci), (va), (l), (h)) -#define Mips_HitInvalidateDCache(ci, va, pa, l) \ +#define Mips_HitInvalidateDCache(ci, va, l) \ (*(sys_config._HitInvalidateDCache))((ci), (va), (l)) #endif/* _KERNEL */ diff --git a/sys/arch/sgi/sgi/bus_dma.c b/sys/arch/sgi/sgi/bus_dma.c index 6f11e25af23..87caaf041aa 100644 --- a/sys/arch/sgi/sgi/bus_dma.c +++ b/sys/arch/sgi/sgi/bus_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_dma.c,v 1.26 2012/04/05 21:49:58 miod Exp $ */ +/* $OpenBSD: bus_dma.c,v 1.27 2012/04/21 12:20:30 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -351,8 +351,7 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, if (ssize != 0) { #ifdef TGT_COHERENT /* we only need to writeback here */ - Mips_IOSyncDCache(ci, vaddr, paddr, - ssize, CACHE_SYNC_W); + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_W); #else /* * If only PREWRITE is requested, writeback. @@ -362,14 +361,14 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, */ if (op & BUS_DMASYNC_PREWRITE) { if (op & BUS_DMASYNC_PREREAD) - Mips_IOSyncDCache(ci, vaddr, paddr, + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_X); else - Mips_IOSyncDCache(ci, vaddr, paddr, + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_W); } else if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) { - Mips_IOSyncDCache(ci, vaddr, paddr, + Mips_IOSyncDCache(ci, vaddr, ssize, CACHE_SYNC_R); } #endif |