summaryrefslogtreecommitdiff
path: root/sys/arch/powerpc
diff options
context:
space:
mode:
authorDale Rahn <drahn@cvs.openbsd.org>2003-07-02 21:30:14 +0000
committerDale Rahn <drahn@cvs.openbsd.org>2003-07-02 21:30:14 +0000
commit5035c30707e318455efb4630f1bade1beb524b25 (patch)
treedd8de754f63df3af8948c3c6d5bf201a7d5e354a /sys/arch/powerpc
parenta3cd3a488774228f4f17f1843c18df95f43a9089 (diff)
Reduce the amount of asm code in powerpc/macppc by replacing it with
inlined functions, helps improve readability and fix a couple of bugs. ok miod@
Diffstat (limited to 'sys/arch/powerpc')
-rw-r--r--sys/arch/powerpc/include/cpu.h94
-rw-r--r--sys/arch/powerpc/powerpc/fpu.c24
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c25
-rw-r--r--sys/arch/powerpc/powerpc/trap.c22
4 files changed, 123 insertions, 42 deletions
diff --git a/sys/arch/powerpc/include/cpu.h b/sys/arch/powerpc/include/cpu.h
index 9d4e4705c61..bc4607e7a31 100644
--- a/sys/arch/powerpc/include/cpu.h
+++ b/sys/arch/powerpc/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.14 2003/07/02 21:23:35 drahn Exp $ */
+/* $OpenBSD: cpu.h,v 1.15 2003/07/02 21:30:12 drahn Exp $ */
/* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */
/*
@@ -100,6 +100,90 @@ invdcache(void *from, int len)
__asm__ __volatile__ ("sync");
}
+#define FUNC_SPR(n, name) \
+static __inline u_int32_t ppc_mf ## name (void) \
+{ \
+ int ret; \
+ __asm __volatile ("mfspr %0," # n : "=r" (ret)); \
+ return ret; \
+} \
+static __inline void ppc_mt ## name (u_int32_t val) \
+{ \
+ __asm __volatile ("mtspr "# n ",%0" :: "r" (val)); \
+} \
+
+FUNC_SPR(0, mq)
+FUNC_SPR(1, xer)
+FUNC_SPR(4, rtcu)
+FUNC_SPR(5, rtcl)
+FUNC_SPR(8, lr)
+FUNC_SPR(9, ctr)
+FUNC_SPR(18, dsisr)
+FUNC_SPR(19, dar)
+FUNC_SPR(22, dec)
+FUNC_SPR(25, sdr1)
+FUNC_SPR(26, srr0)
+FUNC_SPR(27, srr1)
+FUNC_SPR(256, vrsave)
+FUNC_SPR(272, sprg0)
+FUNC_SPR(273, sprg1)
+FUNC_SPR(274, sprg2)
+FUNC_SPR(275, sprg3)
+FUNC_SPR(282, ear)
+FUNC_SPR(287, pvr)
+FUNC_SPR(528, ibat0u)
+FUNC_SPR(529, ibat0l)
+FUNC_SPR(530, ibat1u)
+FUNC_SPR(531, ibat1l)
+FUNC_SPR(532, ibat2u)
+FUNC_SPR(533, ibat2l)
+FUNC_SPR(534, ibat3u)
+FUNC_SPR(535, ibat3l)
+FUNC_SPR(536, dbat0u)
+FUNC_SPR(537, dbat0l)
+FUNC_SPR(538, dbat1u)
+FUNC_SPR(539, dbat1l)
+FUNC_SPR(540, dbat2u)
+FUNC_SPR(541, dbat2l)
+FUNC_SPR(542, dbat3u)
+FUNC_SPR(543, dbat3l)
+FUNC_SPR(1008, hid0)
+FUNC_SPR(1009, hid1)
+FUNC_SPR(1010, iabr)
+FUNC_SPR(1017, l2cr)
+FUNC_SPR(1018, l3cr)
+FUNC_SPR(1013, dabr)
+FUNC_SPR(1023, pir)
+
+static __inline u_int32_t
+ppc_mftbl (void)
+{
+ int ret;
+ __asm __volatile ("mftb %0" : "=r" (ret));
+ return ret;
+}
+
+static __inline u_int32_t
+ppc_mfmsr (void)
+{
+ int ret;
+ __asm __volatile ("mfmsr %0" : "=r" (ret));
+ return ret;
+}
+
+static __inline void
+ppc_mtmsr (u_int32_t val)
+{
+ __asm __volatile ("mtmsr %0" :: "r" (val));
+}
+
+static __inline void
+ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
+{
+ asm volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted) );
+
+}
+
/*
* General functions to enable and disable interrupts
* without having inlined assembly code in many functions.
@@ -109,9 +193,9 @@ ppc_intr_enable(int enable)
{
u_int32_t msr;
if (enable != 0) {
- __asm__ volatile("mfmsr %0" : "=r"(msr));
+ msr = ppc_mfmsr();
msr |= PSL_EE;
- __asm__ volatile("mtmsr %0" :: "r"(msr));
+ ppc_mtmsr(msr);
}
}
@@ -119,9 +203,9 @@ static __inline int
ppc_intr_disable(void)
{
u_int32_t emsr, dmsr;
- __asm__ volatile("mfmsr %0" : "=r"(emsr));
+ emsr = ppc_mfmsr();
dmsr = emsr & ~PSL_EE;
- __asm__ volatile("mtmsr %0" :: "r"(dmsr));
+ ppc_mtmsr(dmsr);
return (emsr & PSL_EE);
}
#endif /* _POWERPC_CPU_H_ */
diff --git a/sys/arch/powerpc/powerpc/fpu.c b/sys/arch/powerpc/powerpc/fpu.c
index b2bc9600b9f..b7338c2d31a 100644
--- a/sys/arch/powerpc/powerpc/fpu.c
+++ b/sys/arch/powerpc/powerpc/fpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: fpu.c,v 1.4 2002/09/15 09:01:59 deraadt Exp $ */
+/* $OpenBSD: fpu.c,v 1.5 2003/07/02 21:30:12 drahn Exp $ */
/* $NetBSD: fpu.c,v 1.1 1996/09/30 16:34:44 ws Exp $ */
/*
@@ -42,7 +42,7 @@ void
enable_fpu(p)
struct proc *p;
{
- int msr, scratch;
+ int msr;
struct pcb *pcb = &p->p_addr->u_pcb;
struct trapframe *tf = trapframe(p);
@@ -51,8 +51,10 @@ enable_fpu(p)
bzero(&pcb->pcb_fpu, sizeof pcb->pcb_fpu);
pcb->pcb_flags |= PCB_FPU;
}
- asm volatile ("mfmsr %0; ori %1,%0,%2; mtmsr %1; isync"
- : "=r"(msr), "=r"(scratch) : "K"(PSL_FP));
+ msr = ppc_mfmsr();
+ ppc_mtmsr(msr | PSL_FP);
+ __asm volatile("isync");
+
asm volatile ("lfd 0,0(%0); mtfsf 0xff,0" :: "b"(&pcb->pcb_fpu.fpcsr));
asm ("lfd 0,0(%0);"
"lfd 1,8(%0);"
@@ -86,18 +88,21 @@ enable_fpu(p)
"lfd 29,232(%0);"
"lfd 30,240(%0);"
"lfd 31,248(%0)" :: "b"(&pcb->pcb_fpu.fpr[0]));
- asm volatile ("mtmsr %0; isync" :: "r"(msr));
+ ppc_mtmsr(msr);
+ __asm volatile("isync");
}
void
save_fpu(p)
struct proc *p;
{
- int msr, scratch;
+ int msr;
struct pcb *pcb = &p->p_addr->u_pcb;
- asm volatile ("mfmsr %0; ori %1,%0,%2; mtmsr %1; isync"
- : "=r"(msr), "=r"(scratch) : "K"(PSL_FP));
+ msr = ppc_mfmsr();
+ ppc_mtmsr(msr | PSL_FP);
+ __asm volatile("isync");
+
asm ("stfd 0,0(%0);"
"stfd 1,8(%0);"
"stfd 2,16(%0);"
@@ -131,5 +136,6 @@ save_fpu(p)
"stfd 30,240(%0);"
"stfd 31,248(%0)" :: "b"(&pcb->pcb_fpu.fpr[0]));
asm volatile ("mffs 0; stfd 0,0(%0)" :: "b"(&pcb->pcb_fpu.fpcsr));
- asm volatile ("mtmsr %0; isync" :: "r"(msr));
+ ppc_mtmsr(msr);
+ __asm volatile("isync");
}
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index d9e1baaabae..74e04ec9758 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.81 2003/06/03 01:35:30 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.82 2003/07/02 21:30:12 drahn Exp $ */
/*
* Copyright (c) 2001, 2002 Dale Rahn.
@@ -527,9 +527,8 @@ pmap_enter(pm, va, pa, prot, flags)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
if (pattr != NULL)
*pattr |= (PTE_EXE >> ATTRSHIFT);
@@ -656,9 +655,8 @@ pmap_remove_pg(pmap_t pm, vaddr_t va)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
}
@@ -741,9 +739,8 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
}
@@ -805,9 +802,8 @@ pmap_kremove_pg(vaddr_t va)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
}
@@ -1464,8 +1460,7 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
#endif
for (i = 0; i < 16; i++) {
pmap_kernel()->pm_sr[i] = (KERNEL_SEG0 + i) | SR_NOEXEC;
- asm volatile ("mtsrin %0,%1"
- :: "r"( KERNEL_SEG0 + i), "r"(i << ADDR_SR_SHIFT) );
+ ppc_mtsrin(KERNEL_SEG0 + i, i << ADDR_SR_SHIFT);
}
asm volatile ("sync; mtsdr1 %0; isync"
:: "r"((u_int)pmap_ptable | (pmap_ptab_mask >> 10)));
diff --git a/sys/arch/powerpc/powerpc/trap.c b/sys/arch/powerpc/powerpc/trap.c
index 74f0a3f076e..cb8f69c20bc 100644
--- a/sys/arch/powerpc/powerpc/trap.c
+++ b/sys/arch/powerpc/powerpc/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.58 2003/03/04 19:11:37 deraadt Exp $ */
+/* $OpenBSD: trap.c,v 1.59 2003/07/02 21:30:12 drahn Exp $ */
/* $NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $ */
/*
@@ -114,17 +114,15 @@ save_vec(struct proc *p)
struct pcb *pcb = &p->p_addr->u_pcb;
struct vreg *pcb_vr = pcb->pcb_vr;
u_int32_t oldmsr, msr;
- u_int32_t tmp;
/* first we enable vector so that we dont throw an exception
* in kernel mode
*/
- __asm__ volatile ("mfmsr %0" : "=r" (oldmsr));
+ oldmsr = ppc_mfmsr();
msr = oldmsr | PSL_VEC;
- __asm__ volatile ("mtmsr %0" :: "r" (msr));
+ ppc_mtmsr(msr);
__asm__ volatile ("sync;isync");
- __asm__ volatile ("mfspr %0, 256" : "=r" (tmp));
- pcb->pcb_vr->vrsave = tmp;
+ pcb->pcb_vr->vrsave = ppc_mfvrsave();
#define STR(x) #x
#define SAVE_VEC_REG(reg, addr) \
@@ -166,7 +164,7 @@ save_vec(struct proc *p)
SAVE_VEC_REG(0,&pcb_vr->vscr);
/* fix kernel msr back */
- __asm__ volatile ("mfmsr %0" :: "r" (oldmsr));
+ ppc_mtmsr(oldmsr);
}
/*
@@ -178,7 +176,6 @@ enable_vec(struct proc *p)
struct pcb *pcb = &p->p_addr->u_pcb;
struct vreg *pcb_vr = pcb->pcb_vr;
u_int32_t oldmsr, msr;
- u_int32_t tmp;
/* If this is the very first altivec instruction executed
* by this process, create a context.
@@ -191,9 +188,9 @@ enable_vec(struct proc *p)
/* first we enable vector so that we dont throw an exception
* in kernel mode
*/
- __asm__ volatile ("mfmsr %0" : "=r" (oldmsr));
+ oldmsr = ppc_mfmsr();
msr = oldmsr | PSL_VEC;
- __asm__ volatile ("mtmsr %0" :: "r" (msr));
+ ppc_mtmsr(msr);
__asm__ volatile ("sync;isync");
#define LOAD_VEC_REG(reg, addr) \
@@ -201,8 +198,7 @@ enable_vec(struct proc *p)
LOAD_VEC_REG(0, &pcb_vr->vscr);
__asm__ volatile ("mtvscr 0");
- tmp = pcb_vr->vrsave;
- __asm__ volatile ("mtspr 256, %0" :: "r" (tmp));
+ ppc_mtvrsave(pcb_vr->vrsave);
LOAD_VEC_REG(0, &pcb_vr->vreg[0]);
LOAD_VEC_REG(1, &pcb_vr->vreg[1]);
@@ -238,7 +234,7 @@ enable_vec(struct proc *p)
LOAD_VEC_REG(31, &pcb_vr->vreg[31]);
/* fix kernel msr back */
- __asm__ volatile ("mfmsr %0" :: "r" (oldmsr));
+ ppc_mtmsr(oldmsr);
}
#endif /* ALTIVEC */