diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2010-01-09 23:34:30 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2010-01-09 23:34:30 +0000 |
commit | 37b05ee009caca11d0f57263b3a3902de27bca0c (patch) | |
tree | 47378705ad7c72e3d82faccd4f012a00060d6007 /sys/arch | |
parent | 966ea9e5a601fa2a77dd33a8791b3729ef5d0e93 (diff) |
Move cache information from global variables to per-cpu_info fields; this
allows processors with different cache sizes to be used.
Cache management routines now take a struct cpu_info * as first parameter.
Diffstat (limited to 'sys/arch')
-rw-r--r-- | sys/arch/loongson/include/cpu.h | 26 | ||||
-rw-r--r-- | sys/arch/loongson/loongson/bus_dma.c | 28 | ||||
-rw-r--r-- | sys/arch/loongson/loongson/machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/mips64/include/asm.h | 6 | ||||
-rw-r--r-- | sys/arch/mips64/include/cpu.h | 88 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_loongson2.S | 198 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_r10k.S | 244 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cache_r5k.S | 323 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cp0access.S | 8 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/cpu.c | 83 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/db_machdep.c | 8 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/genassym.cf | 12 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/pmap.c | 40 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/sys_machdep.c | 6 | ||||
-rw-r--r-- | sys/arch/mips64/mips64/trap.c | 8 | ||||
-rw-r--r-- | sys/arch/sgi/include/autoconf.h | 16 | ||||
-rw-r--r-- | sys/arch/sgi/include/cpu.h | 26 | ||||
-rw-r--r-- | sys/arch/sgi/sgi/bus_dma.c | 17 | ||||
-rw-r--r-- | sys/arch/sgi/sgi/ip30_machdep.c | 6 | ||||
-rw-r--r-- | sys/arch/sgi/sgi/machdep.c | 8 |
20 files changed, 571 insertions, 584 deletions
diff --git a/sys/arch/loongson/include/cpu.h b/sys/arch/loongson/include/cpu.h index 121181e899b..c18ce1eee6e 100644 --- a/sys/arch/loongson/include/cpu.h +++ b/sys/arch/loongson/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.1 2009/11/23 10:32:35 miod Exp $ */ +/* $OpenBSD: cpu.h,v 1.2 2010/01/09 23:34:26 miod Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. @@ -43,18 +43,18 @@ #ifdef _KERNEL -#define Mips_SyncCache() \ - Loongson2_SyncCache() -#define Mips_InvalidateICache(va, l) \ - Loongson2_InvalidateICache((va), (l)) -#define Mips_SyncDCachePage(va, pa) \ - Loongson2_SyncDCachePage((pa)) -#define Mips_HitSyncDCache(va, pa, l) \ - Loongson2_HitSyncDCache((pa), (l)) -#define Mips_IOSyncDCache(va, pa, l, h) \ - Loongson2_IOSyncDCache((pa), (l), (h)) -#define Mips_HitInvalidateDCache(va, pa, l) \ - Loongson2_HitInvalidateDCache((pa), (l)) +#define Mips_SyncCache(ci) \ + Loongson2_SyncCache((ci)) +#define Mips_InvalidateICache(ci, va, l) \ + Loongson2_InvalidateICache((ci), (va), (l)) +#define Mips_SyncDCachePage(ci, va, pa) \ + Loongson2_SyncDCachePage((ci), (pa)) +#define Mips_HitSyncDCache(ci, va, pa, l) \ + Loongson2_HitSyncDCache((ci), (pa), (l)) +#define Mips_IOSyncDCache(ci, va, pa, l, h) \ + Loongson2_IOSyncDCache((ci), (pa), (l), (h)) +#define Mips_HitInvalidateDCache(ci, va, pa, l) \ + Loongson2_HitInvalidateDCache((ci), (pa), (l)) #endif /* _KERNEL */ diff --git a/sys/arch/loongson/loongson/bus_dma.c b/sys/arch/loongson/loongson/bus_dma.c index b47df55f4f1..77134d175ee 100644 --- a/sys/arch/loongson/loongson/bus_dma.c +++ b/sys/arch/loongson/loongson/bus_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_dma.c,v 1.1 2009/12/11 17:23:29 miod Exp $ */ +/* $OpenBSD: bus_dma.c,v 1.2 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -313,6 +313,7 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, int nsegs; int curseg; int cacheop; + struct cpu_info *ci = curcpu(); nsegs = map->dm_nsegs; curseg = 0; @@ -357,21 +358,18 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, cacheop = SYNC_X; else cacheop = SYNC_W; - } else -#if 0 - if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) - cacheop = SYNC_R; -#else - if (op & BUS_DMASYNC_PREREAD) - cacheop = SYNC_X; - else if (op & BUS_DMASYNC_POSTREAD) - cacheop = SYNC_R; -#endif - else - cacheop = -1; - if (cacheop >= 0) { - Mips_IOSyncDCache(vaddr, paddr, ssize, cacheop); + } else { + if (op & BUS_DMASYNC_PREREAD) + cacheop = SYNC_X; + else if (op & BUS_DMASYNC_POSTREAD) + cacheop = SYNC_R; + else + cacheop = -1; } + + if (cacheop >= 0) + Mips_IOSyncDCache(ci, vaddr, paddr, + ssize, cacheop); size -= ssize; } curseg++; diff --git a/sys/arch/loongson/loongson/machdep.c b/sys/arch/loongson/loongson/machdep.c index 5b59fedf80b..b8e4e3e81bd 100644 --- a/sys/arch/loongson/loongson/machdep.c +++ b/sys/arch/loongson/loongson/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.2 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.3 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2009 Miodrag Vallat. @@ -398,7 +398,7 @@ mips_init(int32_t argc, int32_t argv, int32_t envp, int32_t cv) * need to invalidate or flush it. */ - Loongson2_ConfigCache(); + Loongson2_ConfigCache(curcpu()); tlb_set_page_mask(TLB_PAGE_MASK); tlb_set_wired(0); diff --git a/sys/arch/mips64/include/asm.h b/sys/arch/mips64/include/asm.h index e44fa221c95..c35d23a53b1 100644 --- a/sys/arch/mips64/include/asm.h +++ b/sys/arch/mips64/include/asm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: asm.h,v 1.9 2009/12/11 05:10:17 miod Exp $ */ +/* $OpenBSD: asm.h,v 1.10 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2001-2002 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -293,6 +293,10 @@ x: ; \ .asciiz str; \ .align 3 +#define LOAD_XKPHYS(reg, cca) \ + li reg, cca | 0x10; \ + dsll reg, reg, 59 + #ifdef MULTIPROCESSOR #define GET_CPU_INFO(ci, tmp) \ HW_CPU_NUMBER(tmp); \ diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h index e60cd3f65d1..e4ed017564f 100644 --- a/sys/arch/mips64/include/cpu.h +++ b/sys/arch/mips64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.52 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: cpu.h,v 1.53 2010/01/09 23:34:29 miod Exp $ */ /*- * Copyright (c) 1992, 1993 @@ -381,6 +381,19 @@ struct cpu_info { struct proc *ci_fpuproc; /* pointer to last proc to use FP */ struct cpu_hwinfo ci_hw; + + /* cache information */ + uint ci_cacheconfiguration; + uint ci_cacheways; + uint ci_l1instcachesize; + uint ci_l1instcacheline; + uint ci_l1instcacheset; + uint ci_l1datacachesize; + uint ci_l1datacacheline; + uint ci_l1datacacheset; + uint ci_l2size; + uint ci_l3size; + struct schedstate_percpu ci_schedstate; int ci_want_resched; /* need_resched() invoked */ @@ -550,27 +563,14 @@ extern int int_nest_cntr; #if defined(_KERNEL) && !defined(_LOCORE) -extern u_int CpuPrimaryInstCacheSize; -extern u_int CpuPrimaryInstCacheLSize; -extern u_int CpuPrimaryInstSetSize; -extern u_int CpuPrimaryDataCacheSize; -extern u_int CpuPrimaryDataCacheLSize; -extern u_int CpuPrimaryDataSetSize; -extern u_int CpuCacheAliasMask; -extern u_int CpuSecondaryCacheSize; -extern u_int CpuTertiaryCacheSize; -extern u_int CpuNWayCache; -extern u_int CpuCacheType; /* R4K, R5K, RM7K */ -extern u_int CpuConfigRegister; -extern u_int CpuStatusRegister; -extern u_int CpuExternalCacheOn; /* R5K, RM7K */ -extern u_int CpuOnboardCacheOn; /* RM7K */ +extern vaddr_t CpuCacheAliasMask; struct tlb_entry; struct user; u_int cp0_get_count(void); -u_int cp0_get_prid(void); +uint32_t cp0_get_config(void); +uint32_t cp0_get_prid(void); void cp0_set_compare(u_int); u_int cp1_get_prid(void); void tlb_set_page_mask(uint32_t); @@ -581,29 +581,29 @@ void tlb_set_wired(int); * Available cache operation routines. See <machine/cpu.h> for more. */ -int Loongson2_ConfigCache(void); -void Loongson2_SyncCache(void); -void Loongson2_InvalidateICache(vaddr_t, size_t); -void Loongson2_SyncDCachePage(paddr_t); -void Loongson2_HitSyncDCache(paddr_t, size_t); -void Loongson2_HitInvalidateDCache(paddr_t, size_t); -void Loongson2_IOSyncDCache(paddr_t, size_t, int); - -int Mips5k_ConfigCache(void); -void Mips5k_SyncCache(void); -void Mips5k_InvalidateICache(vaddr_t, size_t); -void Mips5k_SyncDCachePage(vaddr_t); -void Mips5k_HitSyncDCache(vaddr_t, size_t); -void Mips5k_HitInvalidateDCache(vaddr_t, size_t); -void Mips5k_IOSyncDCache(vaddr_t, size_t, int); - -int Mips10k_ConfigCache(void); -void Mips10k_SyncCache(void); -void Mips10k_InvalidateICache(vaddr_t, size_t); -void Mips10k_SyncDCachePage(vaddr_t); -void Mips10k_HitSyncDCache(vaddr_t, size_t); -void Mips10k_HitInvalidateDCache(vaddr_t, size_t); -void Mips10k_IOSyncDCache(vaddr_t, size_t, int); +int Loongson2_ConfigCache(struct cpu_info *); +void Loongson2_SyncCache(struct cpu_info *); +void Loongson2_InvalidateICache(struct cpu_info *, vaddr_t, size_t); +void Loongson2_SyncDCachePage(struct cpu_info *, paddr_t); +void Loongson2_HitSyncDCache(struct cpu_info *, paddr_t, size_t); +void Loongson2_HitInvalidateDCache(struct cpu_info *, paddr_t, size_t); +void Loongson2_IOSyncDCache(struct cpu_info *, paddr_t, size_t, int); + +int Mips5k_ConfigCache(struct cpu_info *); +void Mips5k_SyncCache(struct cpu_info *); +void Mips5k_InvalidateICache(struct cpu_info *, vaddr_t, size_t); +void Mips5k_SyncDCachePage(struct cpu_info *, vaddr_t); +void Mips5k_HitSyncDCache(struct cpu_info *, vaddr_t, size_t); +void Mips5k_HitInvalidateDCache(struct cpu_info *, vaddr_t, size_t); +void Mips5k_IOSyncDCache(struct cpu_info *, vaddr_t, size_t, int); + +int Mips10k_ConfigCache(struct cpu_info *); +void Mips10k_SyncCache(struct cpu_info *); +void Mips10k_InvalidateICache(struct cpu_info *, vaddr_t, size_t); +void Mips10k_SyncDCachePage(struct cpu_info *, vaddr_t); +void Mips10k_HitSyncDCache(struct cpu_info *, vaddr_t, size_t); +void Mips10k_HitInvalidateDCache(struct cpu_info *, vaddr_t, size_t); +void Mips10k_IOSyncDCache(struct cpu_info *, vaddr_t, size_t, int); void tlb_flush(int); void tlb_flush_addr(vaddr_t); @@ -630,10 +630,10 @@ void setsoftintr0(void); void clearsoftintr0(void); void setsoftintr1(void); void clearsoftintr1(void); -u_int32_t enableintr(void); -u_int32_t disableintr(void); -void setsr(u_int32_t); -u_int32_t getsr(void); +uint32_t enableintr(void); +uint32_t disableintr(void); +uint32_t getsr(void); +uint32_t setsr(uint32_t); #endif /* _KERNEL */ #endif /* !_MIPS_CPU_H_ */ diff --git a/sys/arch/mips64/mips64/cache_loongson2.S b/sys/arch/mips64/mips64/cache_loongson2.S index db0b64d5ec0..f05d3aee032 100644 --- a/sys/arch/mips64/mips64/cache_loongson2.S +++ b/sys/arch/mips64/mips64/cache_loongson2.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_loongson2.S,v 1.3 2009/12/25 21:02:15 miod Exp $ */ +/* $OpenBSD: cache_loongson2.S,v 1.4 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2009 Miodrag Vallat. @@ -45,6 +45,8 @@ * Processors supported: * Loongson 2E/2F (code could be modified to work on 2C by not hardcoding * the number of ways). + * The L1 virtual index bits, as well as the cache line size (32 bytes), + * are hardcoded. */ #include <machine/param.h> @@ -56,10 +58,6 @@ .set mips3 -#define LOAD_XKPHYS(reg, cca) \ - li reg, cca | 0x10; \ - dsll reg, reg, 59 - /* L1 cache operations */ #define IndexInvalidate_I 0x00 #define IndexWBInvalidate_D 0x01 @@ -95,15 +93,15 @@ /*---------------------------------------------------------------------------- * - * Loongson2_ConfigCache -- + * Loongson2_ConfigCache(struct cpu_info *ci) -- * * Setup various cache-dependent variables: - * The size of the data cache is stored into CpuPrimaryDataCacheSize. - * The size of instruction cache is stored into CpuPrimaryInstCacheSize. + * The size of the data cache is stored into ci_l1datacachesize. + * The size of instruction cache is stored into ci_l1instcachesize. * Alignment mask for cache aliasing test is stored in CpuCacheAliasMask. - * CpuSecondaryCacheSize is set to the size of the secondary cache. - * CpuTertiaryCacheSize is set to the size of the tertiary cache. - * CpuNWayCache is set to 0 for direct mapped caches, 2 for two way + * ci_l2size is set to the size of the secondary cache. + * ci_l3size is set to the size of the tertiary cache. + * ci_cacheways is set to 0 for direct mapped caches, 2 for two way * caches and 4 for four way caches. This primarily indicates the * primary cache associativity. * @@ -123,7 +121,7 @@ LEAF(Loongson2_ConfigCache, 0) and t2, v0, 0x20 srl t2, t2, 1 # Get I cache line size. addu t2, t2, 16 - sw t2, CpuPrimaryInstCacheLSize + sw t2, CI_L1INSTCACHELINE(a0) srl t1, v0, 6 # Get D cache size. and t1, 7 @@ -132,7 +130,7 @@ LEAF(Loongson2_ConfigCache, 0) and t2, v0, 0x10 addu t2, t2, 16 # Get D cache line size. - sw t2, CpuPrimaryDataCacheLSize + sw t2, CI_L1DATACACHELINE(a0) li ta3, 0 # Tertiary size 0. @@ -144,24 +142,21 @@ LEAF(Loongson2_ConfigCache, 0) * ta2 = secondary size, ta3 = tertiary size. */ ConfResult: - sw v0, CpuConfigRegister - mfc0 t3, COP_0_STATUS_REG - sw t2, CpuCacheType # Save cache attributes - sw t3, CpuStatusRegister + sw t2, CI_CACHECONFIGURATION(a0) # Save cache attributes and t2, CTYPE_WAYMASK # isolate number of sets. - sw t2, CpuNWayCache + sw t2, CI_CACHEWAYS(a0) srl t2, 1 # get div shift for set size. - sw ta2, CpuSecondaryCacheSize - sw ta3, CpuTertiaryCacheSize + sw ta2, CI_L2SIZE(a0) + sw ta3, CI_L3SIZE(a0) addu t1, ta0, -1 # Use icache for alias mask srl t1, t2 # Some cpus have different and t1, ~(PAGE_SIZE - 1) # i and d cache sizes... - sw t1, CpuCacheAliasMask + PTR_S t1, CpuCacheAliasMask - sw ta0, CpuPrimaryInstCacheSize # store cache size. - sw ta1, CpuPrimaryDataCacheSize # store cache size. + sw ta0, CI_L1INSTCACHESIZE(a0) # store cache size. + sw ta1, CI_L1DATACACHESIZE(a0) # store cache size. /* * Cache way number encoding is done in the lowest bits, and @@ -169,8 +164,8 @@ ConfResult: * that `mi' code can divide by them if necessary. */ li ta1, 1 - sw ta1, CpuPrimaryInstSetSize - sw ta1, CpuPrimaryDataSetSize + sw ta1, CI_L1INSTCACHESET(a0) + sw ta1, CI_L1DATACACHESET(a0) j ra nop @@ -178,7 +173,7 @@ END(Loongson2_ConfigCache) /*---------------------------------------------------------------------------- * - * Loongson2_SyncCache -- + * Loongson2_SyncCache(struct cpu_info *ci) -- * * Sync ALL caches. * No need to look at number of sets since we are cleaning out @@ -189,9 +184,9 @@ END(Loongson2_ConfigCache) LEAF(Loongson2_SyncCache, 0) sync - lw t1, CpuPrimaryInstCacheSize + lw t1, CI_L1INSTCACHESIZE(a0) srl t1, t1, 2 # / 4ways - lw t2, CpuPrimaryDataCacheSize + lw t2, CI_L1DATACACHESIZE(a0) srl t2, t2, 2 # / 4ways /* L1 I$ */ @@ -220,7 +215,7 @@ LEAF(Loongson2_SyncCache, 0) /* L2 */ LOAD_XKPHYS(t0, CCA_CACHED) - lw t2, CpuSecondaryCacheSize + lw t2, CI_L2SIZE(a0) srl t2, 2 # because cache is 4 way PTR_ADDU t1, t0, t2 PTR_SUBU t1, 32 @@ -240,7 +235,7 @@ END(Loongson2_SyncCache) * * Loongson2_InvalidateICache -- * - * void Loongson2_SyncICache(vaddr_t va, size_t len) + * void Loongson2_SyncICache(struct cpu_info *ci, vaddr_t va, size_t len) * * Invalidate the L1 instruction cache for at least range * of va to va + len - 1. @@ -249,20 +244,20 @@ END(Loongson2_SyncCache) *---------------------------------------------------------------------------- */ LEAF(Loongson2_InvalidateICache, 0) - andi a0, ((1 << 14) - 1) # only keep index bits - PTR_ADDU a1, 31 # Round up size - LOAD_XKPHYS(a2, CCA_CACHED) - PTR_ADDU a1, a0 # Add extra from address - dsrl a0, a0, 5 - dsll a0, a0, 5 # align address - PTR_SUBU a1, a1, a0 - PTR_ADDU a0, a2 # a0 now new XKPHYS address - dsrl a1, a1, 5 # Number of unrolled loops + andi a1, ((1 << 14) - 1) # only keep index bits + PTR_ADDU a2, 31 # Round up size + LOAD_XKPHYS(a3, CCA_CACHED) + PTR_ADDU a2, a1 # Add extra from address + dsrl a1, a1, 5 + dsll a1, a1, 5 # align address + PTR_SUBU a2, a2, a1 + PTR_ADDU a1, a3 # a1 now new XKPHYS address + dsrl a2, a2, 5 # Number of unrolled loops 1: - PTR_ADDU a1, -1 - cache IndexInvalidate_I, 0(a0) - bne a1, zero, 1b - PTR_ADDU a0, 32 + PTR_ADDU a2, -1 + cache IndexInvalidate_I, 0(a1) + bne a2, zero, 1b + PTR_ADDU a1, 32 j ra nop @@ -272,7 +267,7 @@ END(Loongson2_InvalidateICache) * * Loongson2_SyncDCachePage -- * - * void Loongson2_SyncDCachePage(paddr_t pa) + * void Loongson2_SyncDCachePage(struct cpu_info *ci, paddr_t pa) * * Sync the L1 and L2 data cache page for address pa. * @@ -281,25 +276,25 @@ END(Loongson2_InvalidateICache) LEAF(Loongson2_SyncDCachePage, 0) sync - LOAD_XKPHYS(a1, CCA_CACHED) - or a0, a1 # a0 now new L1 address - dsrl a0, a0, PAGE_SHIFT - dsll a0, a0, PAGE_SHIFT # page align pa - move a1, a0 # save for L2 + LOAD_XKPHYS(a2, CCA_CACHED) + or a1, a2 # a1 now new L1 address + dsrl a1, a1, PAGE_SHIFT + dsll a1, a1, PAGE_SHIFT # page align pa + move a2, a1 # save for L2 /* L1 */ - PTR_ADDU a2, a0, PAGE_SIZE-32 + PTR_ADDU a3, a1, PAGE_SIZE-32 1: - cache HitWBInvalidate_D, 0(a0) - bne a2, a0, 1b - PTR_ADDU a0, 32 + cache HitWBInvalidate_D, 0(a1) + bne a3, a1, 1b + PTR_ADDU a1, 32 /* L2 */ - PTR_ADDU a2, a1, PAGE_SIZE-32 + PTR_ADDU a3, a2, PAGE_SIZE-32 2: - cache HitWBInvalidate_S, 0(a1) - bne a2, a1, 2b - PTR_ADDU a1, 32 + cache HitWBInvalidate_S, 0(a2) + bne a3, a2, 2b + PTR_ADDU a2, 32 j ra nop @@ -309,7 +304,8 @@ END(Loongson2_SyncDCachePage) * * Loongson2_HitSyncDCache -- * - * void Loongson2_HitSyncDCache(paddr_t pa, size_t len) + * void Loongson2_HitSyncDCache(struct cpu_info *ci, + * paddr_t pa, size_t len) * * Sync L1 and L2 data caches for range of pa to pa + len - 1. * Since L2 is writeback, we need to operate on L1 first, to make sure @@ -322,32 +318,32 @@ END(Loongson2_SyncDCachePage) LEAF(Loongson2_HitSyncDCache, 0) sync - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, 31 # Round up - PTR_ADDU a1, a1, a0 # Add extra from address - dsrl a0, a0, 5 - dsll a0, a0, 5 # align to cacheline boundary - PTR_SUBU a1, a1, a0 - dsrl a1, a1, 5 # Compute number of cache lines - LOAD_XKPHYS(a2, CCA_CACHED) - or a0, a2 # build suitable va + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, 31 # Round up + PTR_ADDU a2, a2, a1 # Add extra from address + dsrl a1, a1, 5 + dsll a1, a1, 5 # align to cacheline boundary + PTR_SUBU a2, a2, a1 + dsrl a2, a2, 5 # Compute number of cache lines + LOAD_XKPHYS(a3, CCA_CACHED) + or a1, a3 # build suitable va - move a3, a1 # save for L2 - move a2, a0 + move a4, a2 # save for L2 + move a3, a1 /* L1 */ 1: - PTR_ADDU a1, -1 - cache HitWBInvalidate_D, 0(a0) - bne a1, zero, 1b - PTR_ADDU a0, 32 + PTR_ADDU a2, -1 + cache HitWBInvalidate_D, 0(a1) + bne a2, zero, 1b + PTR_ADDU a1, 32 /* L2 */ 2: - PTR_ADDU a3, -1 - cache HitWBInvalidate_S, 0(a2) - bne a3, zero, 2b - PTR_ADDU a2, 32 + PTR_ADDU a4, -1 + cache HitWBInvalidate_S, 0(a3) + bne a4, zero, 2b + PTR_ADDU a3, 32 3: j ra @@ -358,7 +354,8 @@ END(Loongson2_HitSyncDCache) * * Loongson2_HitInvalidateDCache -- * - * void Loongson2_HitInvalidateDCache(paddr_t pa, size_t len) + * void Loongson2_HitInvalidateDCache(struct cpu_info *ci, + * paddr_t pa, size_t len) * * Invalidate L1 and L2 data caches for range of pa to pa + len - 1. * @@ -367,32 +364,32 @@ END(Loongson2_HitSyncDCache) LEAF(Loongson2_HitInvalidateDCache, 0) sync - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, 31 # Round up - PTR_ADDU a1, a1, a0 # Add extra from address - dsrl a0, a0, 5 - dsll a0, a0, 5 # align to cacheline boundary - PTR_SUBU a1, a1, a0 - dsrl a1, a1, 5 # Compute number of cache lines - LOAD_XKPHYS(a2, CCA_CACHED) - or a0, a2 # build suitable va + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, 31 # Round up + PTR_ADDU a2, a2, a1 # Add extra from address + dsrl a1, a1, 5 + dsll a1, a1, 5 # align to cacheline boundary + PTR_SUBU a2, a2, a1 + dsrl a2, a2, 5 # Compute number of cache lines + LOAD_XKPHYS(a3, CCA_CACHED) + or a1, a3 # build suitable va - move a3, a1 # save for L2 - move a2, a0 + move a4, a2 # save for L2 + move a3, a1 /* L1 */ 1: - PTR_ADDU a1, -1 - cache HitInvalidate_D, 0(a0) - bne a1, zero, 1b - PTR_ADDU a0, 32 + PTR_ADDU a2, -1 + cache HitInvalidate_D, 0(a1) + bne a2, zero, 1b + PTR_ADDU a1, 32 /* L2 */ 2: - PTR_ADDU a3, -1 - cache HitInvalidate_S, 0(a2) - bne a3, zero, 2b - PTR_ADDU a2, 32 + PTR_ADDU a4, -1 + cache HitInvalidate_S, 0(a3) + bne a4, zero, 2b + PTR_ADDU a3, 32 3: j ra @@ -403,7 +400,8 @@ END(Loongson2_HitInvalidateDCache) * * Loongson2_IOSyncDCache -- * - * void Loongson2_IOSyncDCache(paddr_t pa, size_t len, int how) + * void Loongson2_IOSyncDCache(struct cpu_info *ci, + * paddr_t pa, size_t len, int how) * * Invalidate or flush L1 and L2 data caches for range of pa to * pa + len - 1. @@ -423,7 +421,7 @@ NON_LEAF(Loongson2_IOSyncDCache, FRAMESZ(CF_SZ+REGSZ), ra) PTR_SUBU sp, FRAMESZ(CF_SZ+REGSZ) PTR_S ra, CF_RA_OFFS+REGSZ(sp) - beqz a2, SyncInv # Sync PREREAD + beqz a3, SyncInv # Sync PREREAD nop SyncWBInv: @@ -433,7 +431,7 @@ SyncWBInv: PTR_L ra, CF_RA_OFFS+REGSZ(sp) SyncInv: - or t0, a0, a1 # check if invalidate possible + or t0, a1, a2 # check if invalidate possible and t0, t0, 31 # both address and size must bnez t0, SyncWBInv # be aligned to the cache size nop diff --git a/sys/arch/mips64/mips64/cache_r10k.S b/sys/arch/mips64/mips64/cache_r10k.S index 6b3a89ff058..b09c8dd6be7 100644 --- a/sys/arch/mips64/mips64/cache_r10k.S +++ b/sys/arch/mips64/mips64/cache_r10k.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_r10k.S,v 1.10 2009/12/25 20:59:45 miod Exp $ */ +/* $OpenBSD: cache_r10k.S,v 1.11 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2004 Opsycon AB (www.opsycon.se) @@ -27,11 +27,9 @@ */ /* - * Processors supported: - * R10000 - * R12000 - * R14000 - * R16000 + * Processors supported: + * R10000, R12000, R14000 and R16000. + * The cache line and number of ways are hardcoded. */ #include <sys/errno.h> @@ -46,10 +44,6 @@ .set mips3 -#define LOAD_XKPHYS(reg, cca) \ - li reg, cca | 0x10; \ - dsll reg, reg, 59 - /* * Skip the .h file. Noone else need to know! */ @@ -97,7 +91,7 @@ /*---------------------------------------------------------------------------- * - * Mips10k_ConfigCache -- + * Mips10k_ConfigCache(struct cpu_info *ci) -- * * Size and configure the caches. * NOTE: should only be called from mips_init(). @@ -106,12 +100,12 @@ * Returns the value of the cpu configuration register. * * Side effects: - * The size of the data cache is stored into CpuPrimaryDataCacheSize. - * The size of instruction cache is stored into CpuPrimaryInstCacheSize. + * The size of the data cache is stored into ci_l1datacachesize. + * The size of instruction cache is stored into ci_l1instcachesize. * Alignment mask for cache aliasing test is stored in CpuCacheAliasMask. - * CpuSecondaryCacheSize is set to the size of the secondary cache. - * CpuTertiaryCacheSize is set to the size of the tertiary cache. - * CpuNWayCache is set to 0 for direct mapped caches, 2 for two way + * ci_l2size is set to the size of the secondary cache. + * ci_l3size is set to the size of the tertiary cache. + * ci_cacheways is set to 0 for direct mapped caches, 2 for two way * caches and 4 for four way caches. This primarily indicates the * primary cache associativity. * @@ -130,7 +124,7 @@ LEAF(Mips10k_ConfigCache, 0) sllv ta0, t2, t1 # ta0 = Initial I set size. li t2, 64 - sw t2, CpuPrimaryInstCacheLSize + sw t2, CI_L1INSTCACHELINE(a0) srl t1, v0, 26 # Get D cache size. and t1, 7 @@ -138,14 +132,13 @@ LEAF(Mips10k_ConfigCache, 0) sllv ta1, t2, t1 li t2, 32 # Get D cache line size. - sw t2, CpuPrimaryDataCacheLSize + sw t2, CI_L1DATACACHELINE(a0) li t2, CTYPE_2WAY # Assume two way cache - li ta2, 0 # Secondary size 0. li ta3, 0 # Tertiary size 0. or t2, CTYPE_HAS_XL2 # External L2 present. - srl t1, v0, 16 # Get I cache size. + srl t1, v0, 16 # Get L2 cache size. and t1, 7 li ta2, 512*1024 # 512k per 'click'. sll ta2, t1 @@ -155,42 +148,38 @@ LEAF(Mips10k_ConfigCache, 0) * ta2 = secondary size, ta3 = tertiary size. */ ConfResult: - sw v0, CpuConfigRegister - mfc0 t3, COP_0_STATUS_REG - sw t2, CpuCacheType # Save cache attributes - sw t3, CpuStatusRegister + sw t2, CI_CACHECONFIGURATION(a0) # Save cache attributes and t2, CTYPE_WAYMASK # isolate number of sets. - sw t2, CpuNWayCache + sw t2, CI_CACHEWAYS(a0) srl t2, 1 # get div shift for set size. - sw ta2, CpuSecondaryCacheSize - sw ta3, CpuTertiaryCacheSize + sw ta2, CI_L2SIZE(a0) + sw ta3, CI_L3SIZE(a0) addu t1, ta0, -1 # Use icache for alias mask srl t1, t2 and t1, ~(NBPG - 1) - sw t1, CpuCacheAliasMask +#ifdef MULTIPROCESSOR + PTR_L ta2, CpuCacheAliasMask + or t1, ta2 # Pick largest mask +#endif + PTR_S t1, CpuCacheAliasMask - sw ta0, CpuPrimaryInstCacheSize # store cache size. + sw ta0, CI_L1INSTCACHESIZE(a0) # store cache size. srl ta0, t2 # calculate set size. - sw ta0, CpuPrimaryInstSetSize + sw ta0, CI_L1INSTCACHESET(a0) - sw ta1, CpuPrimaryDataCacheSize # store cache size. + sw ta1, CI_L1DATACACHESIZE(a0) # store cache size. srl ta1, t2 # calculate set size. - sw ta1, CpuPrimaryDataSetSize + sw ta1, CI_L1DATACACHESET(a0) -#if 0 - and v0, ~7 - or v0, CCA_CACHED # set cachable writeback kseg0 - mtc0 v0, COP_0_CONFIG # establish any new config -#endif j ra nop END(Mips10k_ConfigCache) /*---------------------------------------------------------------------------- * - * Mips10k_SyncCache -- + * Mips10k_SyncCache(struct cpu_info *ci) -- * * Sync ALL caches. * No need to look at number of sets since we are cleaning out @@ -206,8 +195,8 @@ END(Mips10k_ConfigCache) */ LEAF(Mips10k_SyncCache, 0) .set noreorder - lw t1, CpuPrimaryInstSetSize - lw t2, CpuPrimaryDataSetSize + lw t1, CI_L1INSTCACHESET(a0) + lw t2, CI_L1DATACACHESET(a0) /* * Sync the instruction cache. @@ -251,11 +240,11 @@ LEAF(Mips10k_SyncCache, 0) /* Do L2 */ LOAD_XKPHYS(t3, CCA_CACHED) - lw ta0, CpuSecondaryCacheSize # XXX Need set size here. + lw ta0, CI_L2SIZE(a0) # XXX Need set size here. 10: cache IndexWBInvalidate_S, 0(t3) cache IndexWBInvalidate_S, 1(t3) - PTR_SUBU ta0, 32 # Fixed cache line size. + PTR_SUBU ta0, 32 # Fixed L2 cache line size. bgtz ta0, 10b PTR_ADDU t3, 32 @@ -267,8 +256,7 @@ END(Mips10k_SyncCache) * * Mips10k_InvalidateICache -- * - * void Mips10k_SyncICache(addr, len) - * vaddr_t addr, len; + * void Mips10k_SyncICache(struct cpu_info *ci, vaddr_t addr, size_t len) * * Invalidate the L1 instruction cache for at least range * of addr to addr + len - 1. @@ -283,22 +271,22 @@ END(Mips10k_SyncCache) *---------------------------------------------------------------------------- */ LEAF(Mips10k_InvalidateICache, 0) - LOAD_XKPHYS(a2, CCA_CACHED) - and a0, 0x00ffffff # Reduce addr to cache index - PTR_ADDU a1, 63 # Round up size - PTR_ADDU a1, a0 # Add extra from address - and a0, -64 # Align start address - PTR_SUBU a1, a1, a0 - PTR_ADDU a0, a2 # a0 now new XKPHYS address - srl a1, a1, 6 # Number of unrolled loops + LOAD_XKPHYS(a3, CCA_CACHED) + and a1, 0x00ffffff # Reduce addr to cache index + PTR_ADDU a2, 63 # Round up size + PTR_ADDU a2, a1 # Add extra from address + and a1, -64 # Align start address + PTR_SUBU a2, a2, a1 + PTR_ADDU a1, a3 # a1 now new XKPHYS address + srl a2, a2, 6 # Number of unrolled loops 1: - addu a1, -1 + addu a2, -1 - cache IndexInvalidate_I, 0(a0) # do set A - cache IndexInvalidate_I, 1(a0) # do set B + cache IndexInvalidate_I, 0(a1) # do set A + cache IndexInvalidate_I, 1(a1) # do set B - bne a1, zero, 1b - PTR_ADDU a0, 64 + bne a2, zero, 1b + PTR_ADDU a1, 64 j ra nop @@ -308,8 +296,7 @@ END(Mips10k_InvalidateICache) * * Mips10k_SyncDCachePage -- * - * void Mips10k_SyncDCachePage(addr) - * vaddr_t addr; + * void Mips10k_SyncDCachePage(struct cpu_info *ci, vaddr_t addr) * * Sync the L1 data cache page for address addr. * The address is reduced to a XKPHYS index to avoid TLB faults. @@ -324,26 +311,26 @@ END(Mips10k_InvalidateICache) *---------------------------------------------------------------------------- */ LEAF(Mips10k_SyncDCachePage, 0) - LOAD_XKPHYS(a2, CCA_CACHED) - dsll a0, 34 - dsrl a0, 34 - PTR_ADDU a0, a2 # a0 now new XKPHYS address - and a0, ~PAGE_MASK # Page align start address - PTR_ADDU a1, a0, PAGE_SIZE-128 + LOAD_XKPHYS(a3, CCA_CACHED) + dsll a1, 34 + dsrl a1, 34 + PTR_ADDU a1, a3 # a1 now new XKPHYS address + and a1, ~PAGE_MASK # Page align start address + PTR_ADDU a2, a1, PAGE_SIZE-128 1: - cache IndexWBInvalidate_D, 0(a0) # do set A - cache IndexWBInvalidate_D, 32(a0) - cache IndexWBInvalidate_D, 64(a0) - cache IndexWBInvalidate_D, 96(a0) + cache IndexWBInvalidate_D, 0(a1) # do set A + cache IndexWBInvalidate_D, 32(a1) + cache IndexWBInvalidate_D, 64(a1) + cache IndexWBInvalidate_D, 96(a1) - cache IndexWBInvalidate_D, 1(a0) # do set B - cache IndexWBInvalidate_D, 33(a0) - cache IndexWBInvalidate_D, 65(a0) - cache IndexWBInvalidate_D, 97(a0) + cache IndexWBInvalidate_D, 1(a1) # do set B + cache IndexWBInvalidate_D, 33(a1) + cache IndexWBInvalidate_D, 65(a1) + cache IndexWBInvalidate_D, 97(a1) - bne a1, a0, 1b - PTR_ADDU a0, 128 + bne a2, a1, 1b + PTR_ADDU a1, 128 j ra nop @@ -353,17 +340,14 @@ END(Mips10k_SyncDCachePage) * * Mips10k_HitSyncDCache -- * - * void Mips10k_HitSyncDCache(addr, len) - * vaddr_t addr, len; + * void Mips10k_HitSyncDCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Sync data cache for range of addr to addr + len - 1. * The address can be any valid virtual address as long * as no TLB invalid traps occur. Only lines with matching * addr are flushed. * - * Note: Use the CpuNWayCache flag to select 16 or 32 byte linesize. - * All Nway cpu's now available have a fixed 32byte linesize. - * * Results: * None. * @@ -374,18 +358,18 @@ END(Mips10k_SyncDCachePage) *---------------------------------------------------------------------------- */ LEAF(Mips10k_HitSyncDCache, 0) - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, 31 # Round up - PTR_ADDU a1, a1, a0 # Add extra from address - and a0, a0, -32 # align address - PTR_SUBU a1, a1, a0 - srl a1, a1, 5 # Compute number of cache lines + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, 31 # Round up + PTR_ADDU a2, a2, a1 # Add extra from address + and a1, a1, -32 # align address + PTR_SUBU a2, a2, a1 + srl a2, a2, 5 # Compute number of cache lines 1: - PTR_ADDU a1, -1 - cache HitWBInvalidate_D, 0(a0) - bne a1, zero, 1b - PTR_ADDU a0, 32 + PTR_ADDU a2, -1 + cache HitWBInvalidate_D, 0(a1) + bne a2, zero, 1b + PTR_ADDU a1, 32 3: j ra @@ -397,8 +381,8 @@ END(Mips10k_HitSyncDCache) * * Mips10k_HitSyncSCache -- * - * void Mips10k_HitSyncSCache(addr, len) - * vaddr_t addr, len; + * static void Mips10k_HitSyncSCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Sync secondary cache for range of addr to addr + len - 1. * The address can be any valid virtual address as long @@ -415,17 +399,17 @@ END(Mips10k_HitSyncDCache) *---------------------------------------------------------------------------- */ LEAF(Mips10k_HitSyncSCache, 0) - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, a1, a0 # Add in extra from align - and a0, a0, -32 # Align address - PTR_SUBU a1, a1, a0 + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, a2, a1 # Add in extra from align + and a1, a1, -32 # Align address + PTR_SUBU a2, a2, a1 1: - PTR_ADDU a1, -32 + PTR_ADDU a2, -32 - cache HitWBInvalidate_S, 0(a0) + cache HitWBInvalidate_S, 0(a1) - bgtz a1, 1b - PTR_ADDU a0, 32 + bgtz a2, 1b + PTR_ADDU a1, 32 3: j ra @@ -436,8 +420,8 @@ END(Mips10k_HitSyncSCache) * * Mips10k_HitInvalidateDCache -- * - * void Mips10k_HitInvalidateDCache(addr, len) - * vaddr_t addr, len; + * void Mips10k_HitInvalidateDCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Invalidate data cache for range of addr to addr + len - 1. * The address can be any valid address as long as no TLB misses occur. @@ -453,18 +437,18 @@ END(Mips10k_HitSyncSCache) *---------------------------------------------------------------------------- */ LEAF(Mips10k_HitInvalidateDCache, 0) - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, a1, a0 # Add in extra from align - and a0, a0, -32 # Align address - PTR_SUBU a1, a1, a0 + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, a2, a1 # Add in extra from align + and a1, a1, -32 # Align address + PTR_SUBU a2, a2, a1 1: - PTR_ADDU a1, -32 + PTR_ADDU a2, -32 - cache HitInvalidate_D, 0(a0) + cache HitInvalidate_D, 0(a1) - bgtz a1, 1b - PTR_ADDU a0, 32 + bgtz a2, 1b + PTR_ADDU a1, 32 3: j ra @@ -476,8 +460,8 @@ END(Mips10k_HitInvalidateDCache) * * Mips10k_HitInvalidateSCache -- * - * void Mips10k_HitInvalidateSCache(addr, len) - * vaddr_t addr, len; + * static void Mips10k_HitInvalidateSCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Invalidate secondary cache for range of addr to addr + len - 1. * The address can be any valid address as long as no TLB misses occur. @@ -493,17 +477,17 @@ END(Mips10k_HitInvalidateDCache) *---------------------------------------------------------------------------- */ LEAF(Mips10k_HitInvalidateSCache, 0) - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, a1, a0 # Add in extra from align - and a0, a0, -32 # Align address - PTR_SUBU a1, a1, a0 + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, a2, a1 # Add in extra from align + and a1, a1, -32 # Align address + PTR_SUBU a2, a2, a1 1: - PTR_ADDU a1, -32 + PTR_ADDU a2, -32 - cache HitInvalidate_S, 0(a0) + cache HitInvalidate_S, 0(a1) - bgtz a1, 1b - PTR_ADDU a0, 32 + bgtz a2, 1b + PTR_ADDU a1, 32 3: j ra @@ -514,9 +498,8 @@ END(Mips10k_HitInvalidateSCache) * * Mips10k_IOSyncDCache -- * - * void Mips10k_IOSyncDCache(addr, len, rw) - * vaddr_t addr; - * int len, rw; + * void Mips10k_IOSyncDCache(struct cpu_info *ci, vaddr_t addr, + * size_t len, int rw) * * Invalidate or flush data cache for range of addr to addr + len - 1. * The address can be any valid address as long as no TLB misses occur. @@ -537,14 +520,13 @@ END(Mips10k_HitInvalidateSCache) *---------------------------------------------------------------------------- */ NON_LEAF(Mips10k_IOSyncDCache, FRAMESZ(CF_SZ+2*REGSZ), ra) - PTR_SUBU sp, FRAMESZ(CF_SZ+2*REGSZ) PTR_S ra, CF_RA_OFFS+2*REGSZ(sp) - REG_S a0, CF_ARGSZ(sp) # save args - beqz a2, SyncRD # Sync PREREAD - REG_S a1, CF_ARGSZ+REGSZ(sp) - addiu a2, -1 - bnez a2, SyncRDWB # Sync PREWRITE+PREREAD + REG_S a1, CF_ARGSZ(sp) # save args + beqz a3, SyncRD # Sync PREREAD + REG_S a2, CF_ARGSZ+REGSZ(sp) + addiu a3, -1 + bnez a3, SyncRDWB # Sync PREWRITE+PREREAD nop SyncWR: @@ -554,9 +536,9 @@ SyncWR: PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) SyncRD: - and t0, a0, 63 # check if invalidate possible + and t0, a1, 63 # check if invalidate possible bnez t0, SyncRDWB # both address and size must - and t0, a1, 63 # be aligned at the cache size + and t0, a2, 63 # be aligned at the cache size bnez t0, SyncRDWB nop diff --git a/sys/arch/mips64/mips64/cache_r5k.S b/sys/arch/mips64/mips64/cache_r5k.S index a9b54934bbb..8164f5e0745 100644 --- a/sys/arch/mips64/mips64/cache_r5k.S +++ b/sys/arch/mips64/mips64/cache_r5k.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cache_r5k.S,v 1.27 2009/12/25 20:59:45 miod Exp $ */ +/* $OpenBSD: cache_r5k.S,v 1.28 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 1998-2004 Opsycon AB (www.opsycon.se) @@ -27,18 +27,18 @@ */ /* - * NOTE! - * - * This code does not support caches with other linesize than 32. - * Neither will it support R4000 or R4400 Secondary caches. These - * configurations will need another set of cache functions. - * - * Processors supported: - * R4600/R4700 (if option CPUR4600) - * R5000 - * RM52xx - * RM7xxx - * RM9xxx + * NOTE! + * + * This code does not support caches with other linesize than 32. + * Neither will it support R4000 or R4400 Secondary caches. These + * configurations will need another set of cache functions. + * + * Processors supported: + * R4600/R4700 (if option CPUR4600) + * R5000 + * RM52xx + * RM7xxx + * RM9xxx */ #include <sys/errno.h> @@ -53,10 +53,6 @@ .set mips3 -#define LOAD_XKPHYS(reg, cca) \ - li reg, cca | 0x10; \ - dsll reg, reg, 59 - /* * Skip the .h file. Noone else need to know! */ @@ -145,7 +141,7 @@ /*---------------------------------------------------------------------------- * - * Mips5k_ConfigCache -- + * Mips5k_ConfigCache(struct cpu_info *ci) -- * * Size and configure the caches. * NOTE: should only be called from mips_init(). @@ -154,12 +150,12 @@ * Returns the value of the cpu configuration register. * * Side effects: - * The size of the data cache is stored into CpuPrimaryDataCacheSize. - * The size of instruction cache is stored into CpuPrimaryInstCacheSize. + * The size of the data cache is stored into ci_l1datacachesize. + * The size of instruction cache is stored into ci_l1instcachesize. * Alignment mask for cache aliasing test is stored in CpuCacheAliasMask. - * CpuSecondaryCacheSize is set to the size of the secondary cache. - * CpuTertiaryCacheSize is set to the size of the tertiary cache. - * CpuNWayCache is set to 0 for direct mapped caches, 2 for two way + * ci_l2size is set to the size of the secondary cache. + * ci_l3size is set to the size of the tertiary cache. + * ci_cacheways is set to 0 for direct mapped caches, 2 for two way * caches and 4 for four way caches. This primarily indicates the * primary cache associativity. * @@ -187,7 +183,7 @@ LEAF(Mips5k_ConfigCache, 0) and t2, v0, 0x20 srl t2, t2, 1 # Get I cache line size. addu t2, t2, 16 - sw t2, CpuPrimaryInstCacheLSize + sw t2, CI_L1INSTCACHELINE(a0) srl t1, v0, 6 # Get D cache size. and t1, 7 @@ -196,7 +192,7 @@ LEAF(Mips5k_ConfigCache, 0) and t2, v0, 0x10 addu t2, t2, 16 # Get D cache line size. - sw t2, CpuPrimaryDataCacheLSize + sw t2, CI_L1DATACACHELINE(a0) li t2, CTYPE_2WAY # Assume two way cache li ta2, 0 # Secondary size 0. @@ -300,7 +296,7 @@ Conf7K: # RM7000, check for L2 and L3 cache li ta3, 512*1024 # 512k per 'click'. sll ta3, t1 #else - lw ta3, CpuTertiaryCacheSize + lw ta3, CI_L3SIZE(a0) and t2, ~CTYPE_HAS_XL3 beqz ta3, Conf7KL2 # No L3 cache present nop @@ -362,29 +358,26 @@ Conf7KL2: * ta2 = secondary size, ta3 = tertiary size. */ ConfResult: - sw v0, CpuConfigRegister - mfc0 t3, COP_0_STATUS_REG - sw t2, CpuCacheType # Save cache attributes - sw t3, CpuStatusRegister + sw t2, CI_CACHECONFIGURATION(a0) # Save cache attributes and t2, CTYPE_WAYMASK # isolate number of sets. - sw t2, CpuNWayCache + sw t2, CI_CACHEWAYS(a0) srl t2, 1 # get div shift for set size. - sw ta2, CpuSecondaryCacheSize - sw ta3, CpuTertiaryCacheSize + sw ta2, CI_L2SIZE(a0) + sw ta3, CI_L3SIZE(a0) addu t1, ta0, -1 # Use icache for alias mask srl t1, t2 # Some cpus have different and t1, ~(NBPG - 1) # i and d cache sizes... - sw t1, CpuCacheAliasMask + PTR_S t1, CpuCacheAliasMask - sw ta0, CpuPrimaryInstCacheSize # store cache size. + sw ta0, CI_L1INSTCACHESIZE(a0) # store cache size. srl ta0, t2 # calculate set size. - sw ta0, CpuPrimaryInstSetSize + sw ta0, CI_L1INSTCACHESET(a0) - sw ta1, CpuPrimaryDataCacheSize # store cache size. + sw ta1, CI_L1DATACACHESIZE(a0) # store cache size. srl ta1, t2 # calculate set size. - sw ta1, CpuPrimaryDataSetSize + sw ta1, CI_L1DATACACHESET(a0) and v0, ~7 or v0, CCA_CACHED # set cachable writeback kseg0 @@ -396,7 +389,7 @@ END(Mips5k_ConfigCache) /*---------------------------------------------------------------------------- * - * Mips5k_SyncCache -- + * Mips5k_SyncCache(struct cpu_info *ci) -- * * Sync ALL caches. * No need to look at number of sets since we are cleaning out @@ -412,8 +405,8 @@ END(Mips5k_ConfigCache) */ LEAF(Mips5k_SyncCache, 0) .set noreorder - lw t1, CpuPrimaryInstCacheSize - lw t2, CpuPrimaryDataCacheSize + lw t1, CI_L1INSTCACHESIZE(a0) + lw t2, CI_L1DATACACHESIZE(a0) /* * Sync the instruction cache. @@ -455,14 +448,14 @@ LEAF(Mips5k_SyncCache, 0) PTR_ADDU t0, t0, 128 /* Do on chip L2 if present */ - lw t0, CpuCacheType + lw t0, CI_CACHECONFIGURATION(a0) and t0, CTYPE_HAS_IL2 beqz t0, 20f nop 3: LOAD_XKPHYS(t3, CCA_CACHED) - lw ta0, CpuSecondaryCacheSize + lw ta0, CI_L2SIZE(a0) 10: cache IndexWBInvalidate_S, 0(t3) PTR_SUBU ta0, 32 # Fixed cache line size. @@ -471,14 +464,14 @@ LEAF(Mips5k_SyncCache, 0) /* Do off chip L2 if present */ 20: - lw t0, CpuCacheType + lw t0, CI_CACHECONFIGURATION(a0) and t0, CTYPE_HAS_XL2 beqz t0, 30f nop mtc0 zero, COP_0_TAG_LO LOAD_XKPHYS(t3, CCA_CACHED) - lw ta0, CpuSecondaryCacheSize + lw ta0, CI_L2SIZE(a0) 21: cache InvalidateSecondaryPage, 0(t3) PTR_SUBU ta0, 4096 # Fixed cache page size. @@ -487,14 +480,14 @@ LEAF(Mips5k_SyncCache, 0) /* Do off chip L3 if present */ 30: - lw t0, CpuCacheType + lw t0, CI_CACHECONFIGURATION(a0) and t0, CTYPE_HAS_XL3 beqz t0, 99f nop mtc0 zero, COP_0_TAG_LO LOAD_XKPHYS(t3, CCA_CACHED) - lw ta0, CpuTertiaryCacheSize + lw ta0, CI_L3SIZE(a0) 31: cache InvalidatePage_T, 0(t3) PTR_SUBU ta0, 4096 # Fixed cache page size. @@ -514,8 +507,7 @@ END(Mips5k_SyncCache) * * Mips5k_InvalidateICache -- * - * void Mips5k_SyncICache(addr, len) - * vaddr_t addr, len; + * void Mips5k_SyncICache(struct cpu_info *, vaddr_t addr, size_t len) * * Invalidate the L1 instruction cache for at least range * of addr to addr + len - 1. @@ -535,25 +527,25 @@ LEAF(Mips5k_InvalidateICache, 0) li v0, SR_DIAG_DE mtc0 v0, COP_0_STATUS_REG # Disable interrupts #endif - lw v0, CpuNWayCache # Cache properties - lw t0, CpuPrimaryInstSetSize # Set size - and a0, 0x00ffffff # Reduce addr to cache index - LOAD_XKPHYS(a2, CCA_CACHED) - PTR_ADDU a1, 31 # Round up size - PTR_ADDU a1, a0 # Add extra from address - and a0, -32 # Align start address - PTR_SUBU a1, a1, a0 - PTR_ADDU a0, a2 # a0 now new XKPHYS address - srl a1, a1, 5 # Number of unrolled loops + lw v0, CI_CACHEWAYS(a0) # Cache properties + lw t0, CI_L1INSTCACHESET(a0) # Set size + and a1, 0x00ffffff # Reduce addr to cache index + LOAD_XKPHYS(a3, CCA_CACHED) + PTR_ADDU a2, 31 # Round up size + PTR_ADDU a2, a1 # Add extra from address + and a1, -32 # Align start address + PTR_SUBU a2, a2, a1 + PTR_ADDU a1, a3 # a1 now new XKPHYS address + srl a2, a2, 5 # Number of unrolled loops addiu v0, -2 # <0 1way, 0 = two, >0 four 1: bltz v0, 3f - PTR_ADDU a1, -1 + PTR_ADDU a2, -1 2: - PTR_ADDU t1, t0, a0 # Nway cache, flush set B. + PTR_ADDU t1, t0, a1 # Nway cache, flush set B. cache IndexInvalidate_I, 0(t1) - beqz v0, 3f # Is two way do set A + beqz v0, 3f # If two way do set A PTR_ADDU t1, t0 # else step to set C. cache IndexInvalidate_I, 0(t1) @@ -562,10 +554,10 @@ LEAF(Mips5k_InvalidateICache, 0) cache IndexInvalidate_I, 0(t1) 3: - cache IndexInvalidate_I, 0(a0) # do set (A if NWay) + cache IndexInvalidate_I, 0(a1) # do set (A if NWay) - bne a1, zero, 1b - PTR_ADDU a0, 32 + bne a2, zero, 1b + PTR_ADDU a1, 32 #ifdef CPUR4600 mtc0 v1, COP_0_STATUS_REG # Restore the status register. @@ -579,8 +571,7 @@ END(Mips5k_InvalidateICache) * * Mips5k_SyncDCachePage -- * - * void Mips5k_SyncDCachePage(addr) - * vaddr_t addr; + * void Mips5k_SyncDCachePage(struct cpu_info *ci, vaddr_t addr) * * Sync the L1 data cache page for address addr. * The address is reduced to a XKPHYS index to avoid TLB faults. @@ -600,45 +591,44 @@ LEAF(Mips5k_SyncDCachePage, 0) li v0, SR_DIAG_DE mtc0 v0, COP_0_STATUS_REG # Disable interrupts #endif - LOAD_XKPHYS(a2, CCA_CACHED) - lw v0, CpuNWayCache - dsll a0, 34 - dsrl a0, 34 - PTR_ADDU a0, a2 # a0 now new XKPHYS address - and a0, ~PAGE_MASK # Page align start address - PTR_ADDU a1, a0, PAGE_SIZE-128 + LOAD_XKPHYS(a3, CCA_CACHED) + lw v0, CI_CACHEWAYS(a0) + dsll a1, 34 + dsrl a1, 34 + PTR_ADDU a1, a3 # a1 now new XKPHYS address + and a1, ~PAGE_MASK # Page align start address + PTR_ADDU a2, a1, PAGE_SIZE-128 addiu v0, -2 # <0 1way, 0 = two, >0 four - lw a2, CpuPrimaryDataSetSize + lw a3, CI_L1DATACACHESET(a0) 1: bltz v0, 3f - PTR_ADDU t1, a0, a2 # flush set B. - cache IndexWBInvalidate_D, 0(t1) + PTR_ADDU t1, a1, a3 + cache IndexWBInvalidate_D, 0(t1) # flush set B. cache IndexWBInvalidate_D, 32(t1) cache IndexWBInvalidate_D, 64(t1) cache IndexWBInvalidate_D, 96(t1) - beqz v0, 3f # Two way, do set A, - PTR_ADDU t1, a2 + beqz v0, 3f # two way, skip C and D. + PTR_ADDU t1, a3 cache IndexWBInvalidate_D, 0(t1) # do set C cache IndexWBInvalidate_D, 32(t1) cache IndexWBInvalidate_D, 64(t1) cache IndexWBInvalidate_D, 96(t1) - PTR_ADDU t1, a2 # do set D + PTR_ADDU t1, a3 # do set D cache IndexWBInvalidate_D, 0(t1) cache IndexWBInvalidate_D, 32(t1) cache IndexWBInvalidate_D, 64(t1) cache IndexWBInvalidate_D, 96(t1) - 3: - cache IndexWBInvalidate_D, 0(a0) # do set A - cache IndexWBInvalidate_D, 32(a0) - cache IndexWBInvalidate_D, 64(a0) - cache IndexWBInvalidate_D, 96(a0) + cache IndexWBInvalidate_D, 0(a1) # do set A + cache IndexWBInvalidate_D, 32(a1) + cache IndexWBInvalidate_D, 64(a1) + cache IndexWBInvalidate_D, 96(a1) - bne a1, a0, 1b - PTR_ADDU a0, 128 + bne a2, a1, 1b + PTR_ADDU a1, 128 #ifdef CPUR4600 mtc0 v1, COP_0_STATUS_REG # Restore the status register. @@ -651,8 +641,8 @@ END(Mips5k_SyncDCachePage) * * Mips5k_HitSyncDCache -- * - * void Mips5k_HitSyncDCache(addr, len) - * vaddr_t addr, len; + * void Mips5k_HitSyncDCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Sync data cache for range of addr to addr + len - 1. * The address can be any valid virtual address as long @@ -679,18 +669,18 @@ LEAF(Mips5k_HitSyncDCache, 0) mtc0 v0, COP_0_STATUS_REG # Disable interrupts #endif - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, 31 # Round up - PTR_ADDU a1, a1, a0 # Add extra from address - and a0, a0, -32 # align address - PTR_SUBU a1, a1, a0 - srl a1, a1, 5 # Compute number of cache lines + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, 31 # Round up + PTR_ADDU a2, a2, a1 # Add extra from address + and a1, a1, -32 # align address + PTR_SUBU a2, a2, a1 + srl a2, a2, 5 # Compute number of cache lines 1: - PTR_ADDU a1, -1 - cache HitWBInvalidate_D, 0(a0) - bne a1, zero, 1b - PTR_ADDU a0, 32 + PTR_ADDU a2, -1 + cache HitWBInvalidate_D, 0(a1) + bne a2, zero, 1b + PTR_ADDU a1, 32 3: #ifdef CPUR4600 @@ -706,8 +696,8 @@ END(Mips5k_HitSyncDCache) * * Mips5k_HitSyncSCache -- * - * void Mips5k_HitSyncSCache(addr, len) - * vaddr_t addr, len; + * static void Mips5k_HitSyncSCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Sync secondary cache for range of addr to addr + len - 1. * The address can be any valid virtual address as long @@ -734,18 +724,18 @@ LEAF(Mips5k_HitSyncSCache, 0) mtc0 v0, COP_0_STATUS_REG # Disable interrupts #endif - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, a1, a0 # Add in extra from align - and a0, a0, -32 # Align address - PTR_SUBU a1, a1, a0 + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, a2, a1 # Add in extra from align + and a1, a1, -32 # Align address + PTR_SUBU a2, a2, a1 1: - PTR_ADDU a1, -32 + PTR_ADDU a2, -32 - cache HitWBInvalidate_S, 0(a0) - cache HitWBInvalidate_D, 0(a0) # Kill any orphans... + cache HitWBInvalidate_S, 0(a1) + cache HitWBInvalidate_D, 0(a1) # Kill any orphans... - bgtz a1, 1b - PTR_ADDU a0, 32 + bgtz a2, 1b + PTR_ADDU a1, 32 3: #ifdef CPUR4600 @@ -760,8 +750,8 @@ END(Mips5k_HitSyncSCache) * * Mips5k_HitInvalidateDCache -- * - * void Mips5k_HitInvalidateDCache(addr, len) - * vaddr_t addr, len; + * void Mips5k_HitInvalidateDCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Invalidate data cache for range of addr to addr + len - 1. * The address can be any valid address as long as no TLB misses occur. @@ -783,18 +773,18 @@ LEAF(Mips5k_HitInvalidateDCache, 0) mtc0 v0, COP_0_STATUS_REG # Disable interrupts #endif - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, a1, a0 # Add in extra from align - and a0, a0, -32 # Align address - PTR_SUBU a1, a1, a0 + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, a2, a1 # Add in extra from align + and a1, a1, -32 # Align address + PTR_SUBU a2, a2, a1 1: - PTR_ADDU a1, -32 + PTR_ADDU a2, -32 - cache HitInvalidate_D, 0(a0) + cache HitInvalidate_D, 0(a1) - bgtz a1, 1b - PTR_ADDU a0, 32 + bgtz a2, 1b + PTR_ADDU a1, 32 3: #ifdef CPUR4600 @@ -810,8 +800,8 @@ END(Mips5k_HitInvalidateDCache) * * Mips5k_HitInvalidateSCache -- * - * void Mips5k_HitInvalidateSCache(addr, len) - * vaddr_t addr, len; + * static void Mips5k_HitInvalidateSCache(struct cpu_info *ci, + * vaddr_t addr, size_t len) * * Invalidate secondary cache for range of addr to addr + len - 1. * The address can be any valid address as long as no TLB misses occur. @@ -833,18 +823,18 @@ LEAF(Mips5k_HitInvalidateSCache, 0) mtc0 v0, COP_0_STATUS_REG # Disable interrupts #endif - beq a1, zero, 3f # size is zero! - PTR_ADDU a1, a1, a0 # Add in extra from align - and a0, a0, -32 # Align address - PTR_SUBU a1, a1, a0 + beq a2, zero, 3f # size is zero! + PTR_ADDU a2, a2, a1 # Add in extra from align + and a1, a1, -32 # Align address + PTR_SUBU a2, a2, a1 1: - PTR_ADDU a1, -32 + PTR_ADDU a2, -32 - cache HitInvalidate_S, 0(a0) - cache HitInvalidate_D, 0(a0) # Orphans in L1 + cache HitInvalidate_S, 0(a1) + cache HitInvalidate_D, 0(a1) # Orphans in L1 - bgtz a1, 1b - PTR_ADDU a0, 32 + bgtz a2, 1b + PTR_ADDU a1, 32 3: #ifdef CPUR4600 @@ -859,9 +849,8 @@ END(Mips5k_HitInvalidateSCache) * * Mips5k_IOSyncDCache -- * - * void Mips5k_IOSyncDCache(addr, len, rw) - * vaddr_t addr; - * int len, rw; + * void Mips5k_IOSyncDCache(struct cpu_info *ci, + * vaddr_t addr, size_t len, int rw) * * Invalidate or flush data cache for range of addr to addr + len - 1. * The address can be any valid address as long as no TLB misses occur. @@ -893,14 +882,14 @@ NON_LEAF(Mips5k_IOSyncDCache, FRAMESZ(CF_SZ+2*REGSZ), ra) PTR_SUBU sp, FRAMESZ(CF_SZ+2*REGSZ) PTR_S ra, CF_RA_OFFS+2*REGSZ(sp) - REG_S a0, CF_ARGSZ(sp) # save args - beqz a2, SyncRD # Sync PREREAD - REG_S a1, CF_ARGSZ+REGSZ(sp) - addiu a2, -1 - bnez a2, SyncRDWB # Sync PREWRITE+PREREAD + REG_S a1, CF_ARGSZ(sp) # save args + beqz a3, SyncRD # Sync PREREAD + REG_S a2, CF_ARGSZ+REGSZ(sp) + addiu a3, -1 + bnez a3, SyncRDWB # Sync PREWRITE+PREREAD nop - lw t0, CpuCacheType # Sync PREWRITE + lw t0, CI_CACHECONFIGURATION(a0) # Sync PREWRITE and t0, CTYPE_HAS_IL2 # Have internal L2? bnez t0, SyncSC # Yes nop @@ -916,16 +905,16 @@ SyncSC: PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) SyncRD: - and t0, a0, 31 # check if invalidate possible + and t0, a1, 31 # check if invalidate possible bnez t0, SyncRDWB # both address and size must - and t0, a1, 31 # be aligned at the cache size + and t0, a2, 31 # be aligned at the cache size bnez t0, SyncRDWB nop /* * Sync for aligned read, no writeback required. */ - lw t0, CpuCacheType # Aligned, do invalidate + lw t0, CI_CACHECONFIGURATION(a0) # Aligned, do invalidate and t0, CTYPE_HAS_IL2 # Have internal L2? bnez t0, SyncRDL2 nop @@ -941,13 +930,13 @@ SyncRDL2: nop # L1 done in parallel b SyncRDL3 - PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # L3 invalidate if present + PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # L3 invalidate if present /* * Sync for unaligned read or write-read. */ SyncRDWB: - lw t0, CpuCacheType + lw t0, CI_CACHECONFIGURATION(a0) and t0, CTYPE_HAS_IL2 # Have internal L2? bnez t0, SyncRDWBL2 # Yes, do L2 nop @@ -956,48 +945,48 @@ SyncRDWB: nop b SyncRDXL2 - PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # External L2 if present + PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # External L2 if present SyncRDWBL2: jal Mips5k_HitSyncSCache # Internal L2 cache nop # L1 done in parallel b SyncRDL3 - PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # L3 invalidate if present + PTR_L ra, CF_RA_OFFS+2*REGSZ(sp) # L3 invalidate if present SyncRDXL2: - lw t0, CpuCacheType + lw t0, CI_CACHECONFIGURATION(a0) and t0, CTYPE_HAS_XL2 # Have external L2? beqz t0, SyncRDL3 # Nope. - REG_L a0, CF_ARGSZ(sp) - REG_L a1, CF_ARGSZ+REGSZ(sp) - and a2, a0, 4095 # align on page size - PTR_SUBU a0, a2 - PTR_ADDU a1, a2 + REG_L a1, CF_ARGSZ(sp) + REG_L a2, CF_ARGSZ+REGSZ(sp) + and a3, a1, 4095 # align on page size + PTR_SUBU a1, a3 + PTR_ADDU a2, a3 50: - blez a1, SyncDone - PTR_SUBU a1, 4096 # Fixed cache page size. + blez a2, SyncDone + PTR_SUBU a2, 4096 # Fixed cache page size. - cache InvalidateSecondaryPage, 0(a0) + cache InvalidateSecondaryPage, 0(a1) b 50b - PTR_ADDU a0, 4096 + PTR_ADDU a1, 4096 SyncRDL3: - lw t0, CpuCacheType + lw t0, CI_CACHECONFIGURATION(a0) and t0, CTYPE_HAS_XL3 # Have L3? beqz t0, SyncDone # Nope. - REG_L a0, CF_ARGSZ(sp) - REG_L a1, CF_ARGSZ+REGSZ(sp) - and a2, a0, 4095 # align on page size - PTR_SUBU a0, a2 - PTR_ADDU a1, a2 + REG_L a1, CF_ARGSZ(sp) + REG_L a2, CF_ARGSZ+REGSZ(sp) + and a3, a1, 4095 # align on page size + PTR_SUBU a1, a3 + PTR_ADDU a2, a3 40: - blez a1, SyncDone - PTR_SUBU a1, 4096 # Fixed cache page size. + blez a2, SyncDone + PTR_SUBU a2, 4096 # Fixed cache page size. - cache InvalidatePage_T, 0(a0) + cache InvalidatePage_T, 0(a1) b 40b - PTR_ADDU a0, 4096 + PTR_ADDU a1, 4096 SyncDone: j ra diff --git a/sys/arch/mips64/mips64/cp0access.S b/sys/arch/mips64/mips64/cp0access.S index 3b95bba058a..5888e611b66 100644 --- a/sys/arch/mips64/mips64/cp0access.S +++ b/sys/arch/mips64/mips64/cp0access.S @@ -1,4 +1,4 @@ -/* $OpenBSD: cp0access.S,v 1.11 2009/11/19 20:15:04 miod Exp $ */ +/* $OpenBSD: cp0access.S,v 1.12 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -148,6 +148,12 @@ LEAF(getsr, 0) nop END(getsr) +LEAF(cp0_get_config, 0) + mfc0 v0, COP_0_CONFIG + j ra + nop +END(cp0_get_config) + LEAF(cp0_get_prid, 0) mfc0 v0, COP_0_PRID j ra diff --git a/sys/arch/mips64/mips64/cpu.c b/sys/arch/mips64/mips64/cpu.c index 5c7a6e5d2e3..802f8357eae 100644 --- a/sys/arch/mips64/mips64/cpu.c +++ b/sys/arch/mips64/mips64/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.24 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: cpu.c,v 1.25 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 1997-2004 Opsycon AB (www.opsycon.se) @@ -52,21 +52,7 @@ struct cpuset cpus_running; struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary }; #endif -u_int CpuPrimaryInstCacheSize; -u_int CpuPrimaryInstCacheLSize; -u_int CpuPrimaryInstSetSize; -u_int CpuPrimaryDataCacheSize; -u_int CpuPrimaryDataCacheLSize; -u_int CpuPrimaryDataSetSize; -u_int CpuCacheAliasMask; -u_int CpuSecondaryCacheSize; -u_int CpuTertiaryCacheSize; -u_int CpuNWayCache; -u_int CpuCacheType; /* R4K, R5K, RM7K */ -u_int CpuConfigRegister; -u_int CpuStatusRegister; -u_int CpuExternalCacheOn; /* R5K, RM7K */ -u_int CpuOnboardCacheOn; /* RM7K */ +vaddr_t CpuCacheAliasMask; int cpu_is_rm7k = 0; @@ -131,7 +117,7 @@ cpuattach(struct device *parent, struct device *dev, void *aux) vers_min = ch->c0prid & 0x0f; switch (ch->type) { case MIPS_R4000: - if (CpuPrimaryInstCacheSize == 16384) + if (ci->ci_l1instcachesize == 16384) printf("MIPS R4400 CPU"); else printf("MIPS R4000 CPU"); @@ -245,10 +231,10 @@ cpuattach(struct device *parent, struct device *dev, void *aux) printf(" rev %d.%d", vers_maj, vers_min); printf("\n"); - printf("cpu%d: cache L1-I %dKB", cpuno, CpuPrimaryInstCacheSize / 1024); - printf(" D %dKB ", CpuPrimaryDataCacheSize / 1024); + printf("cpu%d: cache L1-I %dKB D %dKB ", cpuno, + ci->ci_l1instcachesize / 1024, ci->ci_l1datacachesize / 1024); - switch (CpuNWayCache) { + switch (ci->ci_cacheways) { case 2: printf("2 way"); break; @@ -260,58 +246,59 @@ cpuattach(struct device *parent, struct device *dev, void *aux) break; } - if (CpuSecondaryCacheSize != 0) { + if (ci->ci_l2size != 0) { switch (ch->type) { case MIPS_R10000: case MIPS_R12000: case MIPS_R14000: - printf(", L2 %dKB 2 way", CpuSecondaryCacheSize / 1024); + printf(", L2 %dKB 2 way", ci->ci_l2size / 1024); break; case MIPS_RM7000: case MIPS_RM9000: case MIPS_LOONGSON2: - printf(", L2 %dKB 4 way", CpuSecondaryCacheSize / 1024); + printf(", L2 %dKB 4 way", ci->ci_l2size / 1024); break; default: - printf(", L2 %dKB direct", CpuSecondaryCacheSize / 1024); + printf(", L2 %dKB direct", ci->ci_l2size / 1024); break; } } - if (CpuTertiaryCacheSize != 0) - printf(", L3 %dKB direct", CpuTertiaryCacheSize / 1024); + if (ci->ci_l3size != 0) + printf(", L3 %dKB direct", ci->ci_l3size / 1024); printf("\n"); #ifdef DEBUG printf("cpu%d: Setsize %d:%d\n", cpuno, - CpuPrimaryInstSetSize, CpuPrimaryDataSetSize); - printf("cpu%d: Alias mask 0x%x\n", cpuno, CpuCacheAliasMask); - printf("cpu%d: Config Register %x\n", cpuno, CpuConfigRegister); - printf("cpu%d: Cache type %x\n", cpuno, CpuCacheType); + ci->ci_l1instset, ci->ci_l1dataset); + printf("cpu%d: Alias mask %p\n", cpuno, CpuCacheAliasMask); + printf("cpu%d: Config Register %08x\n", cpuno, cp0_get_config()); + printf("cpu%d: Cache configuration %x\n", + cpuno, ci->ci_cacheconfiguration); if (ch->type == MIPS_RM7000) { - u_int tmp = CpuConfigRegister; + uint32_t tmp = cp0_get_config(); printf("cpu%d: ", cpuno); - printf("K0 = %1d ",0x7 & tmp); - printf("SE = %1d ",0x1 & (tmp>>3)); - printf("DB = %1d ",0x1 & (tmp>>4)); - printf("IB = %1d\n",0x1 & (tmp>>5)); + printf("K0 = %1d ", 0x7 & tmp); + printf("SE = %1d ", 0x1 & (tmp>>3)); + printf("DB = %1d ", 0x1 & (tmp>>4)); + printf("IB = %1d\n", 0x1 & (tmp>>5)); printf("cpu%d: ", cpuno); - printf("DC = %1d ",0x7 & (tmp>>6)); - printf("IC = %1d ",0x7 & (tmp>>9)); - printf("TE = %1d ",0x1 & (tmp>>12)); - printf("EB = %1d\n",0x1 & (tmp>>13)); + printf("DC = %1d ", 0x7 & (tmp>>6)); + printf("IC = %1d ", 0x7 & (tmp>>9)); + printf("TE = %1d ", 0x1 & (tmp>>12)); + printf("EB = %1d\n", 0x1 & (tmp>>13)); printf("cpu%d: ", cpuno); - printf("EM = %1d ",0x1 & (tmp>>14)); - printf("BE = %1d ",0x1 & (tmp>>15)); - printf("TC = %1d ",0x1 & (tmp>>17)); - printf("EW = %1d\n",0x3 & (tmp>>18)); + printf("EM = %1d ", 0x1 & (tmp>>14)); + printf("BE = %1d ", 0x1 & (tmp>>15)); + printf("TC = %1d ", 0x1 & (tmp>>17)); + printf("EW = %1d\n", 0x3 & (tmp>>18)); printf("cpu%d: ", cpuno); - printf("TS = %1d ",0x3 & (tmp>>20)); - printf("EP = %1d ",0xf & (tmp>>24)); - printf("EC = %1d ",0x7 & (tmp>>28)); - printf("SC = %1d\n",0x1 & (tmp>>31)); + printf("TS = %1d ", 0x3 & (tmp>>20)); + printf("EP = %1d ", 0xf & (tmp>>24)); + printf("EC = %1d ", 0x7 & (tmp>>28)); + printf("SC = %1d\n", 0x1 & (tmp>>31)); } - printf("cpu%d: Status Register %x\n", cpuno, CpuStatusRegister); + printf("cpu%d: Status Register %08x\n", cpuno, getsr()); #endif } diff --git a/sys/arch/mips64/mips64/db_machdep.c b/sys/arch/mips64/mips64/db_machdep.c index 9cd02ddadf2..010f2e7efd5 100644 --- a/sys/arch/mips64/mips64/db_machdep.c +++ b/sys/arch/mips64/mips64/db_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: db_machdep.c,v 1.22 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: db_machdep.c,v 1.23 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se) @@ -197,10 +197,12 @@ db_write_bytes(addr, size, data) kdbpokeb(ptr, *data++); } if (addr < VM_MAXUSER_ADDRESS) { + struct cpu_info *ci = curcpu(); + /* XXX we don't know where this page is mapped... */ - Mips_HitSyncDCache(addr, PHYS_TO_XKPHYS(addr, CCA_CACHED), + Mips_HitSyncDCache(ci, addr, PHYS_TO_XKPHYS(addr, CCA_CACHED), size); - Mips_InvalidateICache(PHYS_TO_CKSEG0(addr & 0xffff), size); + Mips_InvalidateICache(ci, PHYS_TO_CKSEG0(addr & 0xffff), size); } } diff --git a/sys/arch/mips64/mips64/genassym.cf b/sys/arch/mips64/mips64/genassym.cf index 9185d298736..3b455fdf17b 100644 --- a/sys/arch/mips64/mips64/genassym.cf +++ b/sys/arch/mips64/mips64/genassym.cf @@ -1,4 +1,4 @@ -# $OpenBSD: genassym.cf,v 1.4 2010/01/08 06:35:16 syuu Exp $ +# $OpenBSD: genassym.cf,v 1.5 2010/01/09 23:34:29 miod Exp $ # # Copyright (c) 1997 Per Fogelstrom / Opsycon AB # @@ -62,6 +62,16 @@ member ci_curproc member ci_curprocpaddr member ci_fpuproc member ci_ipl +member ci_cacheconfiguration +member ci_cacheways +member ci_l1instcachesize +member ci_l1instcacheline +member ci_l1instcacheset +member ci_l1datacachesize +member ci_l1datacacheline +member ci_l1datacacheset +member ci_l2size +member ci_l3size export CKSEG0_BASE export CKSEG1_BASE diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c index 6f274949350..bf54b232223 100644 --- a/sys/arch/mips64/mips64/pmap.c +++ b/sys/arch/mips64/mips64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.47 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: pmap.c,v 1.48 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -754,6 +754,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) vaddr_t nssva; pt_entry_t *pte, entry; u_int p; + struct cpu_info *ci = curcpu(); DPRINTF(PDB_FOLLOW|PDB_PROTECT, ("pmap_protect(%p, %p, %p, %p)\n", pmap, sva, eva, prot)); @@ -786,7 +787,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) continue; if ((entry & PG_M) != 0 /* && p != PG_M */) if ((entry & PG_CACHEMODE) == PG_CACHED) - Mips_HitSyncDCache(sva, + Mips_HitSyncDCache(ci, sva, pfn_to_pad(entry), PAGE_SIZE); entry = (entry & ~(PG_M | PG_RO)) | p; *pte = entry; @@ -824,7 +825,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) continue; if ((entry & PG_M) != 0 /* && p != PG_M */) if ((entry & PG_CACHEMODE) == PG_CACHED) - Mips_SyncDCachePage(sva, + Mips_SyncDCachePage(ci, sva, pfn_to_pad(entry)); entry = (entry & ~(PG_M | PG_RO)) | p; *pte = entry; @@ -847,7 +848,8 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) { pt_entry_t *pte, npte; vm_page_t pg; - u_long cpuid = cpu_number(); + struct cpu_info *ci = curcpu(); + u_long cpuid = ci->ci_cpuid; DPRINTF(PDB_FOLLOW|PDB_ENTER, ("pmap_enter(%p, %p, %p, %p, %p)\n", pmap, va, pa, prot, flags)); @@ -990,7 +992,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) * If mapping a memory space address invalidate ICache. */ if (pg != NULL && (prot & VM_PROT_EXECUTE)) - Mips_InvalidateICache(va, PAGE_SIZE); + Mips_InvalidateICache(ci, va, PAGE_SIZE); return 0; } @@ -1029,6 +1031,7 @@ pmap_kremove(vaddr_t va, vsize_t len) { pt_entry_t *pte, entry; vaddr_t eva; + struct cpu_info *ci = curcpu(); DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("pmap_kremove(%p, %p)\n", va, len)); @@ -1043,7 +1046,7 @@ pmap_kremove(vaddr_t va, vsize_t len) entry = *pte; if (!(entry & PG_V)) continue; - Mips_HitSyncDCache(va, pfn_to_pad(entry), PAGE_SIZE); + Mips_HitSyncDCache(ci, va, pfn_to_pad(entry), PAGE_SIZE); *pte = PG_NV | PG_G; pmap_invalidate_kernel_page(va); } @@ -1150,6 +1153,7 @@ pmap_zero_page(struct vm_page *pg) paddr_t phys = VM_PAGE_TO_PHYS(pg); vaddr_t va; pv_entry_t pv; + struct cpu_info *ci = curcpu(); DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%p)\n", phys)); @@ -1157,10 +1161,10 @@ pmap_zero_page(struct vm_page *pg) pv = pg_to_pvh(pg); if ((pg->pg_flags & PV_CACHED) && ((pv->pv_va ^ va) & CpuCacheAliasMask) != 0) { - Mips_SyncDCachePage(pv->pv_va, phys); + Mips_SyncDCachePage(ci, pv->pv_va, phys); } mem_zero_page(va); - Mips_HitSyncDCache(va, phys, PAGE_SIZE); + Mips_HitSyncDCache(ci, va, phys, PAGE_SIZE); } /* @@ -1178,6 +1182,7 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) int df = 1; int sf = 1; pv_entry_t pv; + struct cpu_info *ci = curcpu(); src = VM_PAGE_TO_PHYS(srcpg); dst = VM_PAGE_TO_PHYS(dstpg); @@ -1189,20 +1194,20 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) pv = pg_to_pvh(srcpg); if ((srcpg->pg_flags & PV_CACHED) && (sf = ((pv->pv_va ^ s) & CpuCacheAliasMask) != 0)) { - Mips_SyncDCachePage(pv->pv_va, src); + Mips_SyncDCachePage(ci, pv->pv_va, src); } pv = pg_to_pvh(dstpg); if ((dstpg->pg_flags & PV_CACHED) && (df = ((pv->pv_va ^ d) & CpuCacheAliasMask) != 0)) { - Mips_SyncDCachePage(pv->pv_va, dst); + Mips_SyncDCachePage(ci, pv->pv_va, dst); } memcpy((void *)d, (void *)s, PAGE_SIZE); if (sf) { - Mips_HitSyncDCache(s, src, PAGE_SIZE); + Mips_HitSyncDCache(ci, s, src, PAGE_SIZE); } - Mips_HitSyncDCache(d, dst, PAGE_SIZE); + Mips_HitSyncDCache(ci, d, dst, PAGE_SIZE); } /* @@ -1226,7 +1231,7 @@ pmap_clear_modify(struct vm_page *pg) rv = TRUE; } if (pg->pg_flags & PV_CACHED) - Mips_SyncDCachePage(pv->pv_va, VM_PAGE_TO_PHYS(pg)); + Mips_SyncDCachePage(curcpu(), pv->pv_va, VM_PAGE_TO_PHYS(pg)); for (; pv != NULL; pv = pv->pv_next) { if (pv->pv_pmap == pmap_kernel()) { @@ -1472,7 +1477,7 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte) VM_PAGE_TO_PHYS(pg), npv->pv_va, va); #endif pmap_page_cache(pg, PV_UNCACHED); - Mips_SyncDCachePage(pv->pv_va, + Mips_SyncDCachePage(curcpu(), pv->pv_va, VM_PAGE_TO_PHYS(pg)); *npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED; } @@ -1522,6 +1527,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa) pv_entry_t pv, npv; vm_page_t pg; int s; + struct cpu_info *ci = curcpu(); DPRINTF(PDB_FOLLOW|PDB_PVENTRY, ("pmap_remove_pv(%p, %p, %p)\n", pmap, va, pa)); @@ -1542,7 +1548,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa) */ if (pmap == pv->pv_pmap && va == pv->pv_va) { if (pg->pg_flags & PV_CACHED) - Mips_SyncDCachePage(va, pa); + Mips_SyncDCachePage(ci, va, pa); npv = pv->pv_next; if (npv) { *pv = *npv; @@ -1562,7 +1568,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa) } if (npv != NULL) { if (pg->pg_flags & PV_CACHED) - Mips_SyncDCachePage(va, pa); + Mips_SyncDCachePage(ci, va, pa); pv->pv_next = npv->pv_next; pmap_pv_free(npv); } else { @@ -1613,6 +1619,6 @@ pmap_pg_free(struct pool *pp, void *item) paddr_t pa = XKPHYS_TO_PHYS(va); vm_page_t pg = PHYS_TO_VM_PAGE(pa); - Mips_HitInvalidateDCache(va, pa, PAGE_SIZE); + Mips_HitInvalidateDCache(curcpu(), va, pa, PAGE_SIZE); uvm_pagefree(pg); } diff --git a/sys/arch/mips64/mips64/sys_machdep.c b/sys/arch/mips64/mips64/sys_machdep.c index ab0c1eec0f1..930f4b60768 100644 --- a/sys/arch/mips64/mips64/sys_machdep.c +++ b/sys/arch/mips64/mips64/sys_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: sys_machdep.c,v 1.4 2009/12/25 21:02:15 miod Exp $ */ +/* $OpenBSD: sys_machdep.c,v 1.5 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 1992, 1993 @@ -124,9 +124,9 @@ mips64_cacheflush(struct proc *p, struct mips64_cacheflush_args *cfa) */ if (pmap_extract(pm, va, &pa) != FALSE) { if (cfa->which & ICACHE) - Mips_InvalidateICache(va, chunk); + Mips_InvalidateICache(p->p_cpu, va, chunk); if (cfa->which & DCACHE) - Mips_HitSyncDCache(va, pa, chunk); + Mips_HitSyncDCache(p->p_cpu, va, pa, chunk); } else { if (uvm_map_lookup_entry(map, va, &entry) == FALSE) { rc = EFAULT; diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c index f6a1de4982f..6fb16bcb460 100644 --- a/sys/arch/mips64/mips64/trap.c +++ b/sys/arch/mips64/mips64/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.57 2010/01/08 01:35:52 syuu Exp $ */ +/* $OpenBSD: trap.c,v 1.58 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -580,7 +580,7 @@ printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapfr locr0->a3 = 1; } if (code == SYS_ptrace) - Mips_SyncCache(); + Mips_SyncCache(curcpu()); #ifdef SYSCALL_DEBUG KERNEL_PROC_LOCK(p); scdebug_ret(p, code, i, rval); @@ -668,7 +668,7 @@ printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapfr uio.uio_procp = curproc; error = process_domem(curproc, p, &uio, PT_WRITE_I); - Mips_SyncCache(); + Mips_SyncCache(curcpu()); if (error) printf("Warning: can't restore instruction at %x: %x\n", @@ -1105,7 +1105,7 @@ cpu_singlestep(p) uio.uio_rw = UIO_WRITE; uio.uio_procp = curproc; error = process_domem(curproc, p, &uio, PT_WRITE_I); - Mips_SyncCache(); + Mips_SyncCache(curcpu()); if (error) return (EFAULT); diff --git a/sys/arch/sgi/include/autoconf.h b/sys/arch/sgi/include/autoconf.h index f638cd0a560..b2325bd0212 100644 --- a/sys/arch/sgi/include/autoconf.h +++ b/sys/arch/sgi/include/autoconf.h @@ -1,4 +1,4 @@ -/* $OpenBSD: autoconf.h,v 1.28 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: autoconf.h,v 1.29 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -38,17 +38,19 @@ /* * Structure holding all misc config information. */ +struct cpu_info; + struct sys_rec { int system_type; int system_subtype; /* IP35 only */ /* Published cache operations. */ - void (*_SyncCache)(void); - void (*_InvalidateICache)(vaddr_t, size_t); - void (*_SyncDCachePage)(vaddr_t); - void (*_HitSyncDCache)(vaddr_t, size_t); - void (*_IOSyncDCache)(vaddr_t, size_t, int); - void (*_HitInvalidateDCache)(vaddr_t, size_t); + void (*_SyncCache)(struct cpu_info *); + void (*_InvalidateICache)(struct cpu_info *, vaddr_t, size_t); + void (*_SyncDCachePage)(struct cpu_info *, vaddr_t); + void (*_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t); + void (*_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t, int); + void (*_HitInvalidateDCache)(struct cpu_info *, vaddr_t, size_t); /* Serial console configuration. */ struct mips_bus_space console_io; diff --git a/sys/arch/sgi/include/cpu.h b/sys/arch/sgi/include/cpu.h index 9bfd6a126c6..cfa370c3622 100644 --- a/sys/arch/sgi/include/cpu.h +++ b/sys/arch/sgi/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.7 2009/12/28 07:18:39 syuu Exp $ */ +/* $OpenBSD: cpu.h,v 1.8 2010/01/09 23:34:29 miod Exp $ */ /*- * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. @@ -75,18 +75,18 @@ void hw_ipi_intr_clear(u_long); /* * Define soft selected cache functions. */ -#define Mips_SyncCache() \ - (*(sys_config._SyncCache))() -#define Mips_InvalidateICache(va, l) \ - (*(sys_config._InvalidateICache))((va), (l)) -#define Mips_SyncDCachePage(va, pa) \ - (*(sys_config._SyncDCachePage))((va)) -#define Mips_HitSyncDCache(va, pa, l) \ - (*(sys_config._HitSyncDCache))((va), (l)) -#define Mips_IOSyncDCache(va, pa, l, h) \ - (*(sys_config._IOSyncDCache))((va), (l), (h)) -#define Mips_HitInvalidateDCache(va, pa, l) \ - (*(sys_config._HitInvalidateDCache))((va), (l)) +#define Mips_SyncCache(ci) \ + (*(sys_config._SyncCache))((ci)) +#define Mips_InvalidateICache(ci, va, l) \ + (*(sys_config._InvalidateICache))((ci), (va), (l)) +#define Mips_SyncDCachePage(ci, va, pa) \ + (*(sys_config._SyncDCachePage))((ci), (va)) +#define Mips_HitSyncDCache(ci, va, pa, l) \ + (*(sys_config._HitSyncDCache))((ci), (va), (l)) +#define Mips_IOSyncDCache(ci, va, pa, l, h) \ + (*(sys_config._IOSyncDCache))((ci), (va), (l), (h)) +#define Mips_HitInvalidateDCache(ci, va, pa, l) \ + (*(sys_config._HitInvalidateDCache))((ci), (va), (l)) #endif/* _KERNEL */ diff --git a/sys/arch/sgi/sgi/bus_dma.c b/sys/arch/sgi/sgi/bus_dma.c index 7a226d4bf79..f5967a1c60f 100644 --- a/sys/arch/sgi/sgi/bus_dma.c +++ b/sys/arch/sgi/sgi/bus_dma.c @@ -1,4 +1,4 @@ -/* $OpenBSD: bus_dma.c,v 1.16 2009/12/25 21:02:18 miod Exp $ */ +/* $OpenBSD: bus_dma.c,v 1.17 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -312,6 +312,7 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, #define SYNC_X 2 /* WB writeback + invalidate, WT invalidate */ int nsegs; int curseg; + struct cpu_info *ci = curcpu(); nsegs = map->dm_nsegs; curseg = 0; @@ -353,20 +354,22 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr, */ if (op & BUS_DMASYNC_PREWRITE) { #ifdef TGT_COHERENT - Mips_IOSyncDCache(vaddr, paddr, ssize, SYNC_W); + Mips_IOSyncDCache(ci, vaddr, paddr, + ssize, SYNC_W); #else if (op & BUS_DMASYNC_PREREAD) - Mips_IOSyncDCache(vaddr, paddr, ssize, - SYNC_X); + Mips_IOSyncDCache(ci, vaddr, paddr, + ssize, SYNC_X); else - Mips_IOSyncDCache(vaddr, paddr, ssize, - SYNC_W); + Mips_IOSyncDCache(ci, vaddr, paddr, + ssize, SYNC_W); #endif } else if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) { #ifdef TGT_COHERENT #else - Mips_IOSyncDCache(vaddr, paddr, ssize, SYNC_R); + Mips_IOSyncDCache(ci, vaddr, paddr, + ssize, SYNC_R); #endif } size -= ssize; diff --git a/sys/arch/sgi/sgi/ip30_machdep.c b/sys/arch/sgi/sgi/ip30_machdep.c index 64392b292ed..a5802857308 100644 --- a/sys/arch/sgi/sgi/ip30_machdep.c +++ b/sys/arch/sgi/sgi/ip30_machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: ip30_machdep.c,v 1.30 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: ip30_machdep.c,v 1.31 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2008, 2009 Miodrag Vallat. @@ -416,7 +416,7 @@ hw_cpu_hatch(struct cpu_info *ci) */ setsr(getsr() | SR_KX | SR_UX); - Mips10k_ConfigCache(); + Mips10k_ConfigCache(ci); tlb_set_page_mask(TLB_PAGE_MASK); tlb_set_wired(0); @@ -433,7 +433,7 @@ hw_cpu_hatch(struct cpu_info *ci) /* * Clear out the I and D caches. */ - Mips_SyncCache(); + Mips_SyncCache(ci); cpu_startclock(ci); diff --git a/sys/arch/sgi/sgi/machdep.c b/sys/arch/sgi/sgi/machdep.c index b05941d6829..1b26fdef6b8 100644 --- a/sys/arch/sgi/sgi/machdep.c +++ b/sys/arch/sgi/sgi/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.96 2010/01/09 20:33:16 miod Exp $ */ +/* $OpenBSD: machdep.c,v 1.97 2010/01/09 23:34:29 miod Exp $ */ /* * Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com) @@ -385,7 +385,7 @@ mips_init(int argc, void *argv, caddr_t boot_esym) default: #if defined(CPU_R5000) || defined(CPU_RM7000) case MIPS_R5000: - Mips5k_ConfigCache(); + Mips5k_ConfigCache(curcpu()); sys_config._SyncCache = Mips5k_SyncCache; sys_config._InvalidateICache = Mips5k_InvalidateICache; sys_config._SyncDCachePage = Mips5k_SyncDCachePage; @@ -396,7 +396,7 @@ mips_init(int argc, void *argv, caddr_t boot_esym) #endif #ifdef CPU_R10000 case MIPS_R10000: - Mips10k_ConfigCache(); + Mips10k_ConfigCache(curcpu()); sys_config._SyncCache = Mips10k_SyncCache; sys_config._InvalidateICache = Mips10k_InvalidateICache; sys_config._SyncDCachePage = Mips10k_SyncDCachePage; @@ -494,7 +494,7 @@ mips_init(int argc, void *argv, caddr_t boot_esym) /* * Clear out the I and D caches. */ - Mips_SyncCache(); + Mips_SyncCache(curcpu()); #ifdef DDB db_machine_init(); |