summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiod Vallat <miod@cvs.openbsd.org>2009-12-25 21:02:19 +0000
committerMiod Vallat <miod@cvs.openbsd.org>2009-12-25 21:02:19 +0000
commit3836ca8306b726cffaf7e80d5dfb0a635b09c6b0 (patch)
treeadd3a4bf286f00beaa238d506fad53482f7f0d2e
parentca308c11ec65dd43d8b9a8592692d0479e41c60b (diff)
Pass both the virtual address and the physical address of the memory range
when invoking the cache functions. The physical address is needed when operating on physically-indexed caches, such as the L2 cache on Loongson processors. Preprocessor abuse makes sure that the physical address computation gets compiled out when running on a kernel compiled for virtually-indexed caches only, such as the sgi kernel.
-rw-r--r--sys/arch/mips64/include/cpu.h43
-rw-r--r--sys/arch/mips64/mips64/cache_loongson2.S471
-rw-r--r--sys/arch/mips64/mips64/db_machdep.c6
-rw-r--r--sys/arch/mips64/mips64/pmap.c41
-rw-r--r--sys/arch/mips64/mips64/sys_machdep.c6
-rw-r--r--sys/arch/sgi/include/autoconf.h10
-rw-r--r--sys/arch/sgi/include/bus.h5
-rw-r--r--sys/arch/sgi/include/cpu.h89
-rw-r--r--sys/arch/sgi/sgi/bus_dma.c23
-rw-r--r--sys/arch/sgi/xbow/xbridge.c16
10 files changed, 310 insertions, 400 deletions
diff --git a/sys/arch/mips64/include/cpu.h b/sys/arch/mips64/include/cpu.h
index 8ef35ced8ef..34cb7ecc529 100644
--- a/sys/arch/mips64/include/cpu.h
+++ b/sys/arch/mips64/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.47 2009/12/07 19:05:57 miod Exp $ */
+/* $OpenBSD: cpu.h,v 1.48 2009/12/25 21:02:13 miod Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -212,7 +212,7 @@ extern vaddr_t uncached_base;
/*
* Location of exception vectors.
*/
-#define RESET_EXC_VEC (CKSEG0_BASE + 0x3fc00000)
+#define RESET_EXC_VEC (CKSEG1_BASE + 0x1fc00000)
#define TLB_MISS_EXC_VEC (CKSEG0_BASE + 0x00000000)
#define XTLB_MISS_EXC_VEC (CKSEG0_BASE + 0x00000080)
#define CACHE_ERR_EXC_VEC (CKSEG0_BASE + 0x00000100)
@@ -563,43 +563,32 @@ void tlb_set_pid(int);
void tlb_set_wired(int);
/*
- * Define soft selected cache functions.
+ * Available cache operation routines. See <machine/cpu.h> for more.
*/
-#define Mips_SyncCache() (*(sys_config._SyncCache))()
-#define Mips_InvalidateICache(a, l) \
- (*(sys_config._InvalidateICache))((a), (l))
-#define Mips_SyncDCachePage(a) \
- (*(sys_config._SyncDCachePage))((a))
-#define Mips_HitSyncDCache(a, l) \
- (*(sys_config._HitSyncDCache))((a), (l))
-#define Mips_IOSyncDCache(a, l, h) \
- (*(sys_config._IOSyncDCache))((a), (l), (h))
-#define Mips_HitInvalidateDCache(a, l) \
- (*(sys_config._HitInvalidateDCache))((a), (l))
int Loongson2_ConfigCache(void);
void Loongson2_SyncCache(void);
-void Loongson2_InvalidateICache(vaddr_t, int);
-void Loongson2_SyncDCachePage(vaddr_t);
-void Loongson2_HitSyncDCache(vaddr_t, int);
-void Loongson2_IOSyncDCache(vaddr_t, int, int);
-void Loongson2_HitInvalidateDCache(vaddr_t, int);
+void Loongson2_InvalidateICache(vaddr_t, size_t);
+void Loongson2_SyncDCachePage(paddr_t);
+void Loongson2_HitSyncDCache(paddr_t, size_t);
+void Loongson2_HitInvalidateDCache(paddr_t, size_t);
+void Loongson2_IOSyncDCache(paddr_t, size_t, int);
int Mips5k_ConfigCache(void);
void Mips5k_SyncCache(void);
-void Mips5k_InvalidateICache(vaddr_t, int);
+void Mips5k_InvalidateICache(vaddr_t, size_t);
void Mips5k_SyncDCachePage(vaddr_t);
-void Mips5k_HitSyncDCache(vaddr_t, int);
-void Mips5k_IOSyncDCache(vaddr_t, int, int);
-void Mips5k_HitInvalidateDCache(vaddr_t, int);
+void Mips5k_HitSyncDCache(vaddr_t, size_t);
+void Mips5k_HitInvalidateDCache(vaddr_t, size_t);
+void Mips5k_IOSyncDCache(vaddr_t, size_t, int);
int Mips10k_ConfigCache(void);
void Mips10k_SyncCache(void);
-void Mips10k_InvalidateICache(vaddr_t, int);
+void Mips10k_InvalidateICache(vaddr_t, size_t);
void Mips10k_SyncDCachePage(vaddr_t);
-void Mips10k_HitSyncDCache(vaddr_t, int);
-void Mips10k_IOSyncDCache(vaddr_t, int, int);
-void Mips10k_HitInvalidateDCache(vaddr_t, int);
+void Mips10k_HitSyncDCache(vaddr_t, size_t);
+void Mips10k_HitInvalidateDCache(vaddr_t, size_t);
+void Mips10k_IOSyncDCache(vaddr_t, size_t, int);
void tlb_flush(int);
void tlb_flush_addr(vaddr_t);
diff --git a/sys/arch/mips64/mips64/cache_loongson2.S b/sys/arch/mips64/mips64/cache_loongson2.S
index 1b725fc7e10..db0b64d5ec0 100644
--- a/sys/arch/mips64/mips64/cache_loongson2.S
+++ b/sys/arch/mips64/mips64/cache_loongson2.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: cache_loongson2.S,v 1.2 2009/11/19 20:16:27 miod Exp $ */
+/* $OpenBSD: cache_loongson2.S,v 1.3 2009/12/25 21:02:15 miod Exp $ */
/*
* Copyright (c) 2009 Miodrag Vallat.
@@ -42,12 +42,11 @@
*/
/*
- * Processors supported:
- * Loongson 2E/2F
+ * Processors supported:
+ * Loongson 2E/2F (code could be modified to work on 2C by not hardcoding
+ * the number of ways).
*/
-#include <sys/syscall.h>
-
#include <machine/param.h>
#include <machine/asm.h>
#include <machine/cpu.h>
@@ -61,32 +60,24 @@
li reg, cca | 0x10; \
dsll reg, reg, 59
-/*
- * Skip the .h file. Noone else need to know!
- */
-
+/* L1 cache operations */
#define IndexInvalidate_I 0x00
#define IndexWBInvalidate_D 0x01
-#define IndexWBInvalidate_S 0x03
-
-#define IndexLoadTag_I 0x04
#define IndexLoadTag_D 0x05
-#define IndexLoadTag_S 0x07
-
-#define IndexStoreTag_I 0x08
#define IndexStoreTag_D 0x09
-#define IndexStoreTag_S 0x0b
-
#define HitInvalidate_D 0x11
-#define HitInvalidate_S 0x13
-
-#define Fill_I 0x14
#define HitWBInvalidate_D 0x15
-#define HitWBInvalidate_S 0x17
+#define IndexLoadData_D 0x19
+#define IndexStoreData_D 0x1d
-#define HitWB_I 0x18
-#define HitWB_D 0x19
-#define HitWB_S 0x1b
+/* L2 cache operations */
+#define IndexWBInvalidate_S 0x03
+#define IndexLoadTag_S 0x07
+#define IndexStoreTag_S 0x0b
+#define HitInvalidate_S 0x13
+#define HitWBInvalidate_S 0x17
+#define IndexLoadData_S 0x1b
+#define IndexStoreData_S 0x1f
/*
* Define cache type definition bits. NOTE! the 3 lsb may NOT change!
@@ -106,10 +97,7 @@
*
* Loongson2_ConfigCache --
*
- * Size and configure the caches.
- * NOTE: should only be called from mips_init().
- *
- * Side effects:
+ * Setup various cache-dependent variables:
* The size of the data cache is stored into CpuPrimaryDataCacheSize.
* The size of instruction cache is stored into CpuPrimaryInstCacheSize.
* Alignment mask for cache aliasing test is stored in CpuCacheAliasMask.
@@ -125,13 +113,6 @@
*----------------------------------------------------------------------------
*/
LEAF(Loongson2_ConfigCache, 0)
- .set noreorder
- LA v0, 1f
- LA v1, CKSEG1_BASE
- or v0, v1
- jr v0 # Switch to uncached.
- nop
-1:
mfc0 v0, COP_0_CONFIG # Get configuration register
srl t1, v0, 9 # Get I cache size.
@@ -155,19 +136,7 @@ LEAF(Loongson2_ConfigCache, 0)
li ta3, 0 # Tertiary size 0.
- LOAD_XKPHYS(t0, CCA_CACHED)
- li ta2, 128 * 1024 # fixed L2 size...
- PTR_ADDU t1, t0, ta2
- sll ta2, 2 # ...is 512KB
-1:
- cache IndexWBInvalidate_S, 0(t0)
- cache IndexWBInvalidate_S, 1(t0)
- cache IndexWBInvalidate_S, 2(t0)
- cache IndexWBInvalidate_S, 3(t0)
- PTR_ADDU t0, t0, 32
- bne t0, t1, 1b
- nop
-
+ li ta2, 512 * 1024 # fixed L2 size...
li t2, (CTYPE_4WAY|CTYPE_HAS_IL2) # caches are 4-way, internal L2
/*
@@ -188,20 +157,21 @@ ConfResult:
addu t1, ta0, -1 # Use icache for alias mask
srl t1, t2 # Some cpus have different
- and t1, ~(NBPG - 1) # i and d cache sizes...
+ and t1, ~(PAGE_SIZE - 1) # i and d cache sizes...
sw t1, CpuCacheAliasMask
sw ta0, CpuPrimaryInstCacheSize # store cache size.
sw ta1, CpuPrimaryDataCacheSize # store cache size.
- /* Loongson 2E/2F: cache way information is in the lowest bits */
- sw zero, CpuPrimaryInstSetSize
+ /*
+ * Cache way number encoding is done in the lowest bits, and
+ * these variables are not used. We make them nonzero so
+ * that `mi' code can divide by them if necessary.
+ */
li ta1, 1
+ sw ta1, CpuPrimaryInstSetSize
sw ta1, CpuPrimaryDataSetSize
- and v0, ~7
- or v0, CCA_CACHED # set cachable writeback kseg0
- mtc0 v0, COP_0_CONFIG # establish any new config
j ra
nop
END(Loongson2_ConfigCache)
@@ -214,64 +184,53 @@ END(Loongson2_ConfigCache)
* No need to look at number of sets since we are cleaning out
* the entire cache and thus will address all sets anyway.
*
- * Results:
- * None.
- *
- * Side effects:
- * The contents of ALL caches are Invalidated or Synched.
- *
*----------------------------------------------------------------------------
*/
LEAF(Loongson2_SyncCache, 0)
- .set noreorder
+ sync
+
lw t1, CpuPrimaryInstCacheSize
+ srl t1, t1, 2 # / 4ways
lw t2, CpuPrimaryDataCacheSize
+ srl t2, t2, 2 # / 4ways
+
+ /* L1 I$ */
-/*
- * Sync the instruction cache.
- */
LOAD_XKPHYS(t0, CCA_CACHED)
PTR_ADDU t1, t0, t1 # Compute end address
- PTR_SUBU t1, 128
-
+ PTR_SUBU t1, 32
1:
cache IndexInvalidate_I, 0(t0)
- cache IndexInvalidate_I, 32(t0)
- cache IndexInvalidate_I, 64(t0)
- cache IndexInvalidate_I, 96(t0)
-
bne t0, t1, 1b
- PTR_ADDU t0, t0, 128
+ PTR_ADDU t0, 32
-/*
- * Sync the data cache. Do L1 first. Indexed only operate on
- * the selected cache and differs from Hit in that sense.
- */
+ /* L1 D$ */
LOAD_XKPHYS(t0, CCA_CACHED)
PTR_ADDU t1, t0, t2 # End address
- PTR_SUBU t1, t1, 128
+ PTR_SUBU t1, 32
1:
cache IndexWBInvalidate_D, 0(t0)
- cache IndexWBInvalidate_D, 32(t0)
- cache IndexWBInvalidate_D, 64(t0)
- cache IndexWBInvalidate_D, 96(t0)
+ cache IndexWBInvalidate_D, 1(t0)
+ cache IndexWBInvalidate_D, 2(t0)
+ cache IndexWBInvalidate_D, 3(t0)
+ bne t0, t1, 1b
+ PTR_ADDU t0, 32
+ /* L2 */
+
+ LOAD_XKPHYS(t0, CCA_CACHED)
+ lw t2, CpuSecondaryCacheSize
+ srl t2, 2 # because cache is 4 way
+ PTR_ADDU t1, t0, t2
+ PTR_SUBU t1, 32
+1:
+ cache IndexWBInvalidate_S, 0(t0)
+ cache IndexWBInvalidate_S, 1(t0)
+ cache IndexWBInvalidate_S, 2(t0)
+ cache IndexWBInvalidate_S, 3(t0)
bne t0, t1, 1b
- PTR_ADDU t0, t0, 128
-
-/* Do on chip L2 */
- LOAD_XKPHYS(t3, CCA_CACHED)
- lw ta0, CpuSecondaryCacheSize
- sll ta0, 2 # because cache is 4 way
-10:
- cache IndexWBInvalidate_S, 0(t3)
- cache IndexWBInvalidate_S, 1(t3)
- cache IndexWBInvalidate_S, 2(t3)
- cache IndexWBInvalidate_S, 3(t3)
- PTR_SUBU ta0, 32 # Fixed cache line size.
- bgtz ta0, 10b
- PTR_ADDU t3, 32
+ PTR_ADDU t0, 32
j ra
nop
@@ -281,32 +240,24 @@ END(Loongson2_SyncCache)
*
* Loongson2_InvalidateICache --
*
- * void Loongson2_SyncICache(addr, len)
- * vaddr_t addr, len;
+ * void Loongson2_SyncICache(vaddr_t va, size_t len)
*
* Invalidate the L1 instruction cache for at least range
- * of addr to addr + len - 1.
+ * of va to va + len - 1.
* The address is reduced to a XKPHYS index to avoid TLB faults.
*
- * Results:
- * None.
- *
- * Side effects:
- * The contents of the L1 Instruction cache is flushed.
- * Must not touch v0.
- *
*----------------------------------------------------------------------------
*/
LEAF(Loongson2_InvalidateICache, 0)
- /* Loongson2: I$ index ops affect all ways */
- and a0, 0x00ffffff # Reduce addr to cache index
- LOAD_XKPHYS(a2, CCA_CACHED)
+ andi a0, ((1 << 14) - 1) # only keep index bits
PTR_ADDU a1, 31 # Round up size
+ LOAD_XKPHYS(a2, CCA_CACHED)
PTR_ADDU a1, a0 # Add extra from address
- and a0, -32 # Align start address
+ dsrl a0, a0, 5
+ dsll a0, a0, 5 # align address
PTR_SUBU a1, a1, a0
PTR_ADDU a0, a2 # a0 now new XKPHYS address
- srl a1, a1, 5 # Number of unrolled loops
+ dsrl a1, a1, 5 # Number of unrolled loops
1:
PTR_ADDU a1, -1
cache IndexInvalidate_I, 0(a0)
@@ -314,59 +265,41 @@ LEAF(Loongson2_InvalidateICache, 0)
PTR_ADDU a0, 32
j ra
- move v0, zero
+ nop
END(Loongson2_InvalidateICache)
/*----------------------------------------------------------------------------
*
* Loongson2_SyncDCachePage --
*
- * void Loongson2_SyncDCachePage(addr)
- * vaddr_t addr;
- *
- * Sync the L1 data cache page for address addr.
- * The address is reduced to a XKPHYS index to avoid TLB faults.
- *
- * Results:
- * None.
+ * void Loongson2_SyncDCachePage(paddr_t pa)
*
- * Side effects:
- * The contents of the cache is written back to primary memory.
- * The cache line is invalidated.
+ * Sync the L1 and L2 data cache page for address pa.
*
*----------------------------------------------------------------------------
*/
LEAF(Loongson2_SyncDCachePage, 0)
- LOAD_XKPHYS(a2, CCA_CACHED)
- dsll a0, 34
- dsrl a0, 34
- PTR_ADDU a0, a2 # a0 now new XKPHYS address
- and a0, ~PAGE_MASK # Page align start address
- PTR_ADDU a1, a0, PAGE_SIZE-128
-
-1:
- cache IndexWBInvalidate_D, 0(a0) # do set A
- cache IndexWBInvalidate_D, 32(a0)
- cache IndexWBInvalidate_D, 64(a0)
- cache IndexWBInvalidate_D, 96(a0)
+ sync
- cache IndexWBInvalidate_D, 1(a0) # do set B
- cache IndexWBInvalidate_D, 33(a0)
- cache IndexWBInvalidate_D, 65(a0)
- cache IndexWBInvalidate_D, 97(a0)
+ LOAD_XKPHYS(a1, CCA_CACHED)
+ or a0, a1 # a0 now new L1 address
+ dsrl a0, a0, PAGE_SHIFT
+ dsll a0, a0, PAGE_SHIFT # page align pa
+ move a1, a0 # save for L2
- cache IndexWBInvalidate_D, 2(a0) # do set C
- cache IndexWBInvalidate_D, 34(a0)
- cache IndexWBInvalidate_D, 66(a0)
- cache IndexWBInvalidate_D, 98(a0)
-
- cache IndexWBInvalidate_D, 3(a0) # do set D
- cache IndexWBInvalidate_D, 35(a0)
- cache IndexWBInvalidate_D, 67(a0)
- cache IndexWBInvalidate_D, 99(a0)
+ /* L1 */
+ PTR_ADDU a2, a0, PAGE_SIZE-32
+1:
+ cache HitWBInvalidate_D, 0(a0)
+ bne a2, a0, 1b
+ PTR_ADDU a0, 32
- bne a1, a0, 1b
- PTR_ADDU a0, 128
+ /* L2 */
+ PTR_ADDU a2, a1, PAGE_SIZE-32
+2:
+ cache HitWBInvalidate_S, 0(a1)
+ bne a2, a1, 2b
+ PTR_ADDU a1, 32
j ra
nop
@@ -376,232 +309,140 @@ END(Loongson2_SyncDCachePage)
*
* Loongson2_HitSyncDCache --
*
- * void Loongson2_HitSyncDCache(addr, len)
- * vaddr_t addr, len;
+ * void Loongson2_HitSyncDCache(paddr_t pa, size_t len)
*
- * Sync data cache for range of addr to addr + len - 1.
- * The address can be any valid virtual address as long
- * as no TLB invalid traps occur. Only lines with matching
- * addr are flushed.
- *
- * Results:
- * None.
- *
- * Side effects:
- * The contents of the L1 cache is written back to primary memory.
- * The cache line is invalidated.
- *
- * IMPORTANT NOTE:
- * Since orphaned L1 cache entries will not be synched it is
- * mandatory to pass over the L1 cache once after the L2 is done.
+ * Sync L1 and L2 data caches for range of pa to pa + len - 1.
+ * Since L2 is writeback, we need to operate on L1 first, to make sure
+ * L1 is clean. The usual mips strategy of doing L2 first, and then
+ * the L1 orphans, will not work as the orphans would only be pushed
+ * to L2, and not to physical memory.
*
*----------------------------------------------------------------------------
*/
LEAF(Loongson2_HitSyncDCache, 0)
+ sync
+
beq a1, zero, 3f # size is zero!
PTR_ADDU a1, 31 # Round up
PTR_ADDU a1, a1, a0 # Add extra from address
- and a0, a0, -32 # align address
+ dsrl a0, a0, 5
+ dsll a0, a0, 5 # align to cacheline boundary
PTR_SUBU a1, a1, a0
- srl a1, a1, 5 # Compute number of cache lines
+ dsrl a1, a1, 5 # Compute number of cache lines
+ LOAD_XKPHYS(a2, CCA_CACHED)
+ or a0, a2 # build suitable va
+
+ move a3, a1 # save for L2
+ move a2, a0
+ /* L1 */
1:
PTR_ADDU a1, -1
cache HitWBInvalidate_D, 0(a0)
bne a1, zero, 1b
PTR_ADDU a0, 32
-3:
- j ra
- nop
-END(Loongson2_HitSyncDCache)
-
-
-/*----------------------------------------------------------------------------
- *
- * Loongson2_HitSyncSCache --
- *
- * void Loongson2_HitSyncSCache(addr, len)
- * vaddr_t addr, len;
- *
- * Sync secondary cache for range of addr to addr + len - 1.
- * The address can be any valid virtual address as long
- * as no TLB invalid traps occur. Only lines with matching
- * addr are flushed.
- *
- * Results:
- * None.
- *
- * Side effects:
- * The contents of the L2 cache is written back to primary memory.
- * The cache line is invalidated.
- *
- * IMPORTANT NOTE:
- * Since orphaned L1 cache entries will not be synched it is
- * mandatory to pass over the L1 cache once after the L2 is done.
- *
- *----------------------------------------------------------------------------
- */
-LEAF(Loongson2_HitSyncSCache, 0)
- beq a1, zero, 3f # size is zero!
- PTR_ADDU a1, a1, a0 # Add in extra from align
- and a0, a0, -32 # Align address
- PTR_SUBU a1, a1, a0
-1:
- PTR_ADDU a1, -32
-
- cache HitWBInvalidate_S, 0(a0)
- cache HitWBInvalidate_D, 0(a0) # Kill any orphans...
-
- bgtz a1, 1b
- PTR_ADDU a0, 32
+ /* L2 */
+2:
+ PTR_ADDU a3, -1
+ cache HitWBInvalidate_S, 0(a2)
+ bne a3, zero, 2b
+ PTR_ADDU a2, 32
3:
j ra
nop
-END(Loongson2_HitSyncSCache)
+END(Loongson2_HitSyncDCache)
/*----------------------------------------------------------------------------
*
* Loongson2_HitInvalidateDCache --
*
- * void Loongson2_HitInvalidateDCache(addr, len)
- * vaddr_t addr, len;
- *
- * Invalidate data cache for range of addr to addr + len - 1.
- * The address can be any valid address as long as no TLB misses occur.
- * (Be sure to use cached K0SEG kernel addresses or mapped addresses)
- * Only lines with matching addresses are invalidated.
+ * void Loongson2_HitInvalidateDCache(paddr_t pa, size_t len)
*
- * Results:
- * None.
- *
- * Side effects:
- * The L1 cache line is invalidated.
+ * Invalidate L1 and L2 data caches for range of pa to pa + len - 1.
*
*----------------------------------------------------------------------------
*/
LEAF(Loongson2_HitInvalidateDCache, 0)
+ sync
+
beq a1, zero, 3f # size is zero!
- PTR_ADDU a1, a1, a0 # Add in extra from align
- and a0, a0, -32 # Align address
+ PTR_ADDU a1, 31 # Round up
+ PTR_ADDU a1, a1, a0 # Add extra from address
+ dsrl a0, a0, 5
+ dsll a0, a0, 5 # align to cacheline boundary
PTR_SUBU a1, a1, a0
+ dsrl a1, a1, 5 # Compute number of cache lines
+ LOAD_XKPHYS(a2, CCA_CACHED)
+ or a0, a2 # build suitable va
-1:
- PTR_ADDU a1, -32
+ move a3, a1 # save for L2
+ move a2, a0
+ /* L1 */
+1:
+ PTR_ADDU a1, -1
cache HitInvalidate_D, 0(a0)
-
- bgtz a1, 1b
+ bne a1, zero, 1b
PTR_ADDU a0, 32
-3:
- j ra
- nop
-END(Loongson2_HitInvalidateDCache)
-
-
-/*----------------------------------------------------------------------------
- *
- * Loongson2_HitInvalidateSCache --
- *
- * void Loongson2_HitInvalidateSCache(addr, len)
- * vaddr_t addr, len;
- *
- * Invalidate secondary cache for range of addr to addr + len - 1.
- * The address can be any valid address as long as no TLB misses occur.
- * (Be sure to use cached K0SEG kernel addresses or mapped addresses)
- * Only lines with matching addresses are invalidated.
- *
- * Results:
- * None.
- *
- * Side effects:
- * The L2 cache line is invalidated.
- *
- *----------------------------------------------------------------------------
- */
-LEAF(Loongson2_HitInvalidateSCache, 0)
- beq a1, zero, 3f # size is zero!
- PTR_ADDU a1, a1, a0 # Add in extra from align
- and a0, a0, -32 # Align address
- PTR_SUBU a1, a1, a0
-1:
- PTR_ADDU a1, -32
-
- cache HitInvalidate_S, 0(a0)
- cache HitInvalidate_D, 0(a0) # Orphans in L1
-
- bgtz a1, 1b
- PTR_ADDU a0, 32
+ /* L2 */
+2:
+ PTR_ADDU a3, -1
+ cache HitInvalidate_S, 0(a2)
+ bne a3, zero, 2b
+ PTR_ADDU a2, 32
3:
j ra
nop
-END(Loongson2_HitInvalidateSCache)
+END(Loongson2_HitInvalidateDCache)
/*----------------------------------------------------------------------------
*
* Loongson2_IOSyncDCache --
*
- * void Loongson2_IOSyncDCache(addr, len, rw)
- * vaddr_t addr;
- * int len, rw;
- *
- * Invalidate or flush data cache for range of addr to addr + len - 1.
- * The address can be any valid address as long as no TLB misses occur.
- * (Be sure to use cached K0SEG kernel addresses or mapped addresses)
- *
- * In case of the existence of an external cache we invalidate pages
- * which are in the given range ONLY if transfer direction is READ.
- * The assumption here is a 'write through' external cache which is
- * true for all now supported processors.
+ * void Loongson2_IOSyncDCache(paddr_t pa, size_t len, int how)
*
- * Results:
- * None.
+ * Invalidate or flush L1 and L2 data caches for range of pa to
+ * pa + len - 1.
*
- * Side effects:
- * If rw == 0 (read), L1 and on-chip L2 caches are invalidated or
- * flushed if the area does not match the alignment
- * requirements.
- * If rw == 1 (write) or rw == 2 (write-read), L1 and on-chip L2 caches
- * are written back to memory and invalidated.
+ * If how == 0 (invalidate):
+ * L1 and L2 caches are invalidated or flushed if the area
+ * does not match the alignment requirements.
+ * If how == 1 (writeback):
+ * L1 and L2 are written back.
+ * If how == 2 (writeback and invalidate):
+ * L1 and L2 are written back to memory and invalidated (flushed).
*
*----------------------------------------------------------------------------
*/
-NON_LEAF(Loongson2_IOSyncDCache, FRAMESZ(CF_SZ+2*REGSZ), ra)
+NON_LEAF(Loongson2_IOSyncDCache, FRAMESZ(CF_SZ+REGSZ), ra)
+ sync
- PTR_SUBU sp, FRAMESZ(CF_SZ+2*REGSZ)
- PTR_S ra, CF_RA_OFFS+2*REGSZ(sp)
- REG_S a0, CF_ARGSZ(sp) # save args
- beqz a2, SyncRD # Sync PREREAD
- REG_S a1, CF_ARGSZ+REGSZ(sp)
+ PTR_SUBU sp, FRAMESZ(CF_SZ+REGSZ)
+ PTR_S ra, CF_RA_OFFS+REGSZ(sp)
+ beqz a2, SyncInv # Sync PREREAD
+ nop
-/*
- * Sync for unaligned read or write-read.
- */
-SyncRDWB:
- jal Loongson2_HitSyncSCache # Do internal L2 cache
- nop # L1 done in parallel
+SyncWBInv:
+ jal Loongson2_HitSyncDCache
+ nop
b SyncDone
- PTR_L ra, CF_RA_OFFS+2*REGSZ(sp)
+ PTR_L ra, CF_RA_OFFS+REGSZ(sp)
-SyncRD:
- and t0, a0, 31 # check if invalidate possible
- bnez t0, SyncRDWB # both address and size must
- and t0, a1, 31 # be aligned at the cache size
- bnez t0, SyncRDWB
+SyncInv:
+ or t0, a0, a1 # check if invalidate possible
+ and t0, t0, 31 # both address and size must
+ bnez t0, SyncWBInv # be aligned to the cache size
nop
-/*
- * Sync for aligned read, no writeback required.
- */
- jal Loongson2_HitInvalidateSCache # Internal L2 cache
- nop # L1 done in parallel
-
- PTR_L ra, CF_RA_OFFS+2*REGSZ(sp)
+ jal Loongson2_HitInvalidateDCache
+ nop
+ PTR_L ra, CF_RA_OFFS+REGSZ(sp)
SyncDone:
j ra
- PTR_ADDU sp, FRAMESZ(CF_SZ+2*REGSZ)
+ PTR_ADDU sp, FRAMESZ(CF_SZ+REGSZ)
END(Loongson2_IOSyncDCache)
diff --git a/sys/arch/mips64/mips64/db_machdep.c b/sys/arch/mips64/mips64/db_machdep.c
index ed82826b099..4dc4e3b8f80 100644
--- a/sys/arch/mips64/mips64/db_machdep.c
+++ b/sys/arch/mips64/mips64/db_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_machdep.c,v 1.20 2009/11/21 23:56:56 miod Exp $ */
+/* $OpenBSD: db_machdep.c,v 1.21 2009/12/25 21:02:15 miod Exp $ */
/*
* Copyright (c) 1998-2003 Opsycon AB (www.opsycon.se)
@@ -197,7 +197,9 @@ db_write_bytes(addr, size, data)
kdbpokeb(ptr, *data++);
}
if (addr < VM_MAXUSER_ADDRESS) {
- Mips_HitSyncDCache(addr, size);
+ /* XXX we don't know where this page is mapped... */
+ Mips_HitSyncDCache(addr, PHYS_TO_XKPHYS(addr, CCA_CACHED),
+ size);
Mips_InvalidateICache(PHYS_TO_CKSEG0(addr & 0xffff), size);
}
}
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
index 899c92538c7..6e2eeecb834 100644
--- a/sys/arch/mips64/mips64/pmap.c
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.41 2009/12/07 18:58:34 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.42 2009/12/25 21:02:15 miod Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -583,7 +583,8 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
continue;
if ((entry & PG_M) != 0 /* && p != PG_M */)
if ((entry & PG_CACHEMODE) == PG_CACHED)
- Mips_HitSyncDCache(sva, PAGE_SIZE);
+ Mips_HitSyncDCache(sva,
+ pfn_to_pad(entry), PAGE_SIZE);
entry = (entry & ~(PG_M | PG_RO)) | p;
*pte = entry;
/*
@@ -620,7 +621,8 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
continue;
if ((entry & PG_M) != 0 /* && p != PG_M */)
if ((entry & PG_CACHEMODE) == PG_CACHED)
- Mips_SyncDCachePage(sva);
+ Mips_SyncDCachePage(sva,
+ pfn_to_pad(entry));
entry = (entry & ~(PG_M | PG_RO)) | p;
*pte = entry;
if (pmap->pm_tlbgen == tlbpid_gen)
@@ -841,7 +843,7 @@ pmap_kremove(vaddr_t va, vsize_t len)
entry = *pte;
if (!(entry & PG_V))
continue;
- Mips_HitSyncDCache(va, PAGE_SIZE);
+ Mips_HitSyncDCache(va, pfn_to_pad(entry), PAGE_SIZE);
*pte = PG_NV | PG_G;
tlb_flush_addr(va);
}
@@ -912,9 +914,9 @@ pmap_prefer(paddr_t foff, vaddr_t *vap)
{
if (CpuCacheAliasMask != 0) {
#if 1
- *vap += (foff - *vap) & (CpuCacheAliasMask | PAGE_MASK);
+ *vap += (foff - *vap) & (CpuCacheAliasMask | PAGE_MASK);
#else
- *vap += (*vap ^ foff) & CpuCacheAliasMask;
+ *vap += (*vap ^ foff) & CpuCacheAliasMask;
#endif
}
}
@@ -955,10 +957,10 @@ pmap_zero_page(struct vm_page *pg)
pv = pg_to_pvh(pg);
if ((pg->pg_flags & PV_CACHED) &&
((pv->pv_va ^ va) & CpuCacheAliasMask) != 0) {
- Mips_SyncDCachePage(pv->pv_va);
+ Mips_SyncDCachePage(pv->pv_va, phys);
}
mem_zero_page(va);
- Mips_HitSyncDCache(va, PAGE_SIZE);
+ Mips_HitSyncDCache(va, phys, PAGE_SIZE);
}
/*
@@ -987,20 +989,20 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
pv = pg_to_pvh(srcpg);
if ((srcpg->pg_flags & PV_CACHED) &&
(sf = ((pv->pv_va ^ s) & CpuCacheAliasMask) != 0)) {
- Mips_SyncDCachePage(pv->pv_va);
+ Mips_SyncDCachePage(pv->pv_va, src);
}
pv = pg_to_pvh(dstpg);
if ((dstpg->pg_flags & PV_CACHED) &&
(df = ((pv->pv_va ^ d) & CpuCacheAliasMask) != 0)) {
- Mips_SyncDCachePage(pv->pv_va);
+ Mips_SyncDCachePage(pv->pv_va, dst);
}
memcpy((void *)d, (void *)s, PAGE_SIZE);
if (sf) {
- Mips_HitSyncDCache(s, PAGE_SIZE);
+ Mips_HitSyncDCache(s, src, PAGE_SIZE);
}
- Mips_HitSyncDCache(d, PAGE_SIZE);
+ Mips_HitSyncDCache(d, dst, PAGE_SIZE);
}
/*
@@ -1024,7 +1026,7 @@ pmap_clear_modify(struct vm_page *pg)
rv = TRUE;
}
if (pg->pg_flags & PV_CACHED)
- Mips_SyncDCachePage(pv->pv_va);
+ Mips_SyncDCachePage(pv->pv_va, VM_PAGE_TO_PHYS(pg));
for (; pv != NULL; pv = pv->pv_next) {
if (pv->pv_pmap == pmap_kernel()) {
@@ -1272,7 +1274,8 @@ pmap_enter_pv(pmap_t pmap, vaddr_t va, vm_page_t pg, pt_entry_t *npte)
VM_PAGE_TO_PHYS(pg), npv->pv_va, va);
#endif
pmap_page_cache(pg, PV_UNCACHED);
- Mips_SyncDCachePage(pv->pv_va);
+ Mips_SyncDCachePage(pv->pv_va,
+ VM_PAGE_TO_PHYS(pg));
*npte = (*npte & ~PG_CACHEMODE) | PG_UNCACHED;
}
}
@@ -1341,7 +1344,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
*/
if (pmap == pv->pv_pmap && va == pv->pv_va) {
if (pg->pg_flags & PV_CACHED)
- Mips_SyncDCachePage(va);
+ Mips_SyncDCachePage(va, pa);
npv = pv->pv_next;
if (npv) {
*pv = *npv;
@@ -1361,7 +1364,7 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, paddr_t pa)
}
if (npv != NULL) {
if (pg->pg_flags & PV_CACHED)
- Mips_SyncDCachePage(va);
+ Mips_SyncDCachePage(va, pa);
pv->pv_next = npv->pv_next;
pmap_pv_free(npv);
} else {
@@ -1409,9 +1412,9 @@ void
pmap_pg_free(struct pool *pp, void *item)
{
vaddr_t va = (vaddr_t)item;
- vm_page_t pg;
+ paddr_t pa = XKPHYS_TO_PHYS(va);
+ vm_page_t pg = PHYS_TO_VM_PAGE(pa);
- Mips_HitInvalidateDCache(va, PAGE_SIZE);
- pg = PHYS_TO_VM_PAGE(XKPHYS_TO_PHYS(va));
+ Mips_HitInvalidateDCache(va, pa, PAGE_SIZE);
uvm_pagefree(pg);
}
diff --git a/sys/arch/mips64/mips64/sys_machdep.c b/sys/arch/mips64/mips64/sys_machdep.c
index e492c10e565..ab0c1eec0f1 100644
--- a/sys/arch/mips64/mips64/sys_machdep.c
+++ b/sys/arch/mips64/mips64/sys_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sys_machdep.c,v 1.3 2009/09/27 18:20:13 miod Exp $ */
+/* $OpenBSD: sys_machdep.c,v 1.4 2009/12/25 21:02:15 miod Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -122,11 +122,11 @@ mips64_cacheflush(struct proc *p, struct mips64_cacheflush_args *cfa)
* Check for a resident mapping first, this is faster than
* uvm_map_lookup_entry().
*/
- if (pmap_extract(pm, va, &pa) != 0) {
+ if (pmap_extract(pm, va, &pa) != FALSE) {
if (cfa->which & ICACHE)
Mips_InvalidateICache(va, chunk);
if (cfa->which & DCACHE)
- Mips_HitSyncDCache(va, chunk);
+ Mips_HitSyncDCache(va, pa, chunk);
} else {
if (uvm_map_lookup_entry(map, va, &entry) == FALSE) {
rc = EFAULT;
diff --git a/sys/arch/sgi/include/autoconf.h b/sys/arch/sgi/include/autoconf.h
index abaf1715f3a..f3466677cea 100644
--- a/sys/arch/sgi/include/autoconf.h
+++ b/sys/arch/sgi/include/autoconf.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: autoconf.h,v 1.26 2009/11/12 19:46:46 miod Exp $ */
+/* $OpenBSD: autoconf.h,v 1.27 2009/12/25 21:02:18 miod Exp $ */
/*
* Copyright (c) 2001-2003 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -58,11 +58,11 @@ struct sys_rec {
/* Published cache operations. */
void (*_SyncCache)(void);
- void (*_InvalidateICache)(vaddr_t, int);
+ void (*_InvalidateICache)(vaddr_t, size_t);
void (*_SyncDCachePage)(vaddr_t);
- void (*_HitSyncDCache)(vaddr_t, int);
- void (*_IOSyncDCache)(vaddr_t, int, int);
- void (*_HitInvalidateDCache)(vaddr_t, int);
+ void (*_HitSyncDCache)(vaddr_t, size_t);
+ void (*_IOSyncDCache)(vaddr_t, size_t, int);
+ void (*_HitInvalidateDCache)(vaddr_t, size_t);
/* Serial console configuration. */
struct mips_bus_space console_io;
diff --git a/sys/arch/sgi/include/bus.h b/sys/arch/sgi/include/bus.h
index 6d14065c540..f74d4d354b2 100644
--- a/sys/arch/sgi/include/bus.h
+++ b/sys/arch/sgi/include/bus.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus.h,v 1.21 2009/07/30 21:39:15 miod Exp $ */
+/* $OpenBSD: bus.h,v 1.22 2009/12/25 21:02:18 miod Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB Sweden. All rights reserved.
@@ -334,7 +334,8 @@ struct machine_bus_dma_segment {
bus_addr_t ds_addr; /* DMA address */
bus_size_t ds_len; /* length of transfer */
- bus_addr_t _ds_vaddr; /* CPU address */
+ paddr_t _ds_paddr; /* CPU address */
+ vaddr_t _ds_vaddr; /* CPU address */
};
typedef struct machine_bus_dma_segment bus_dma_segment_t;
diff --git a/sys/arch/sgi/include/cpu.h b/sys/arch/sgi/include/cpu.h
index da585d19f37..56d0188192f 100644
--- a/sys/arch/sgi/include/cpu.h
+++ b/sys/arch/sgi/include/cpu.h
@@ -1,29 +1,94 @@
-/* $OpenBSD: cpu.h,v 1.5 2009/11/25 17:39:51 syuu Exp $ */
+/* $OpenBSD: cpu.h,v 1.6 2009/12/25 21:02:18 miod Exp $ */
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Ralph Campbell and Rick Macklem.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Copyright (C) 1989 Digital Equipment Corporation.
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies.
+ * Digital Equipment Corporation makes no representations about the
+ * suitability of this software for any purpose. It is provided "as is"
+ * without express or implied warranty.
+ *
+ * from: @(#)cpu.h 8.4 (Berkeley) 1/4/94
+ */
#ifndef _SGI_CPU_H_
#define _SGI_CPU_H_
#ifdef _KERNEL
+
#ifdef MULTIPROCESSOR
+
#if defined(TGT_OCTANE)
#define HW_CPU_NUMBER_REG 0x900000000ff50000 /* HEART_PRID */
-#else /* TGT_OCTANE */
+#else
#error MULTIPROCESSOR kernel not supported on this configuration
-#endif /* TGT_OCTANE */
-#define hw_cpu_number() (*(uint64_t *)HW_CPU_NUMBER_REG)
-#else/* MULTIPROCESSOR */
-#define hw_cpu_number() 0
-#endif/* MULTIPROCESSOR */
-#endif/* _KERNEL */
+#endif
-#include <mips64/cpu.h>
-
-#if defined(_KERNEL) && defined(MULTIPROCESSOR) && !defined(_LOCORE)
+#if !defined(_LOCORE)
void hw_cpu_boot_secondary(struct cpu_info *);
void hw_cpu_hatch(struct cpu_info *);
void hw_cpu_spinup_trampoline(struct cpu_info *);
int hw_ipi_intr_establish(int (*)(void *), u_long);
void hw_ipi_intr_set(u_long);
void hw_ipi_intr_clear(u_long);
-#endif/* _KERNEL && MULTIPROCESSOR && !_LOCORE */
+#endif
+
+#define hw_cpu_number() (*(uint64_t *)HW_CPU_NUMBER_REG)
+
+#else /* MULTIPROCESSOR */
+
+#define hw_cpu_number() 0
+
+#endif /* MULTIPROCESSOR */
+
+/*
+ * Define soft selected cache functions.
+ */
+#define Mips_SyncCache() \
+ (*(sys_config._SyncCache))()
+#define Mips_InvalidateICache(va, l) \
+ (*(sys_config._InvalidateICache))((va), (l))
+#define Mips_SyncDCachePage(va, pa) \
+ (*(sys_config._SyncDCachePage))((va))
+#define Mips_HitSyncDCache(va, pa, l) \
+ (*(sys_config._HitSyncDCache))((va), (l))
+#define Mips_IOSyncDCache(va, pa, l, h) \
+ (*(sys_config._IOSyncDCache))((va), (l), (h))
+#define Mips_HitInvalidateDCache(va, pa, l) \
+ (*(sys_config._HitInvalidateDCache))((va), (l))
+
+#endif/* _KERNEL */
+
+#include <mips64/cpu.h>
+
#endif /* !_SGI_CPU_H_ */
diff --git a/sys/arch/sgi/sgi/bus_dma.c b/sys/arch/sgi/sgi/bus_dma.c
index 19c177a3c23..7a226d4bf79 100644
--- a/sys/arch/sgi/sgi/bus_dma.c
+++ b/sys/arch/sgi/sgi/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.15 2009/10/14 21:26:54 miod Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.16 2009/12/25 21:02:18 miod Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -317,10 +317,12 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
curseg = 0;
while (size && nsegs) {
- bus_addr_t vaddr;
+ paddr_t paddr;
+ vaddr_t vaddr;
bus_size_t ssize;
ssize = map->dm_segs[curseg].ds_len;
+ paddr = map->dm_segs[curseg]._ds_paddr;
vaddr = map->dm_segs[curseg]._ds_vaddr;
if (addr != 0) {
@@ -329,6 +331,7 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
ssize = 0;
} else {
vaddr += addr;
+ paddr += addr;
ssize -= addr;
addr = 0;
}
@@ -350,18 +353,20 @@ _dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t addr,
*/
if (op & BUS_DMASYNC_PREWRITE) {
#ifdef TGT_COHERENT
- Mips_IOSyncDCache(vaddr, ssize, SYNC_W);
+ Mips_IOSyncDCache(vaddr, paddr, ssize, SYNC_W);
#else
if (op & BUS_DMASYNC_PREREAD)
- Mips_IOSyncDCache(vaddr, ssize, SYNC_X);
+ Mips_IOSyncDCache(vaddr, paddr, ssize,
+ SYNC_X);
else
- Mips_IOSyncDCache(vaddr, ssize, SYNC_W);
+ Mips_IOSyncDCache(vaddr, paddr, ssize,
+ SYNC_W);
#endif
} else
if (op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD)) {
#ifdef TGT_COHERENT
#else
- Mips_IOSyncDCache(vaddr, ssize, SYNC_R);
+ Mips_IOSyncDCache(vaddr, paddr, ssize, SYNC_R);
#endif
}
size -= ssize;
@@ -584,7 +589,8 @@ _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
map->dm_segs[seg].ds_addr =
(*t->_pa_to_device)(curaddr);
map->dm_segs[seg].ds_len = sgsize;
- map->dm_segs[seg]._ds_vaddr = (vaddr_t)vaddr;
+ map->dm_segs[seg]._ds_paddr = curaddr;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
first = 0;
} else {
if ((bus_addr_t)curaddr == lastaddr &&
@@ -600,7 +606,8 @@ _dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
map->dm_segs[seg].ds_addr =
(*t->_pa_to_device)(curaddr);
map->dm_segs[seg].ds_len = sgsize;
- map->dm_segs[seg]._ds_vaddr = (vaddr_t)vaddr;
+ map->dm_segs[seg]._ds_paddr = curaddr;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
}
}
diff --git a/sys/arch/sgi/xbow/xbridge.c b/sys/arch/sgi/xbow/xbridge.c
index da82dc060aa..de3b00be75b 100644
--- a/sys/arch/sgi/xbow/xbridge.c
+++ b/sys/arch/sgi/xbow/xbridge.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: xbridge.c,v 1.64 2009/11/25 11:23:30 miod Exp $ */
+/* $OpenBSD: xbridge.c,v 1.65 2009/12/25 21:02:18 miod Exp $ */
/*
* Copyright (c) 2008, 2009 Miodrag Vallat.
@@ -867,7 +867,7 @@ xbridge_intr_map(struct pci_attach_args *pa, pci_intr_handle_t *ihp)
}
#endif
- xbridge_decompose_tag(pa->pa_pc, pa->pa_tag, &bus, &device, NULL);
+ pci_decompose_tag(pa->pa_pc, pa->pa_tag, &bus, &device, NULL);
if (pa->pa_bridgetag) {
pin = PPB_INTERRUPT_SWIZZLE(pa->pa_rawintrpin, device);
@@ -1933,9 +1933,9 @@ xbridge_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
* Make sure we don't cross any boundaries.
*/
if (map->_dm_boundary > 0) {
- baddr = (pa + map->_dm_boundary) & bmask;
- if (sgsize > (baddr - pa))
- sgsize = baddr - pa;
+ baddr = (busaddr + map->_dm_boundary) & bmask;
+ if (sgsize > (baddr - busaddr))
+ sgsize = baddr - busaddr;
}
/*
@@ -1945,7 +1945,8 @@ xbridge_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
if (first) {
map->dm_segs[seg].ds_addr = busaddr;
map->dm_segs[seg].ds_len = sgsize;
- map->dm_segs[seg]._ds_vaddr = (vaddr_t)vaddr;
+ map->dm_segs[seg]._ds_paddr = pa;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
first = 0;
} else {
if (busaddr == lastaddr &&
@@ -1964,7 +1965,8 @@ xbridge_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
}
map->dm_segs[seg].ds_addr = busaddr;
map->dm_segs[seg].ds_len = sgsize;
- map->dm_segs[seg]._ds_vaddr = (vaddr_t)vaddr;
+ map->dm_segs[seg]._ds_paddr = pa;
+ map->dm_segs[seg]._ds_vaddr = vaddr;
}
}