summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorDale Rahn <drahn@cvs.openbsd.org>2003-07-02 21:30:14 +0000
committerDale Rahn <drahn@cvs.openbsd.org>2003-07-02 21:30:14 +0000
commit5035c30707e318455efb4630f1bade1beb524b25 (patch)
treedd8de754f63df3af8948c3c6d5bf201a7d5e354a /sys
parenta3cd3a488774228f4f17f1843c18df95f43a9089 (diff)
Reduce the amount of asm code in powerpc/macppc by replacing it with
inlined functions, helps improve readability and fix a couple of bugs. ok miod@
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/macppc/dev/if_bm.c4
-rw-r--r--sys/arch/macppc/dev/macintr.c10
-rw-r--r--sys/arch/macppc/dev/openpic.c10
-rw-r--r--sys/arch/macppc/macppc/clock.c33
-rw-r--r--sys/arch/macppc/macppc/cpu.c28
-rw-r--r--sys/arch/macppc/macppc/machdep.c51
-rw-r--r--sys/arch/macppc/macppc/ofw_machdep.c12
-rw-r--r--sys/arch/powerpc/include/cpu.h94
-rw-r--r--sys/arch/powerpc/powerpc/fpu.c24
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c25
-rw-r--r--sys/arch/powerpc/powerpc/trap.c22
11 files changed, 190 insertions, 123 deletions
diff --git a/sys/arch/macppc/dev/if_bm.c b/sys/arch/macppc/dev/if_bm.c
index 58b6153c375..abdc5765f01 100644
--- a/sys/arch/macppc/dev/if_bm.c
+++ b/sys/arch/macppc/dev/if_bm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_bm.c,v 1.13 2002/09/15 09:01:58 deraadt Exp $ */
+/* $OpenBSD: if_bm.c,v 1.14 2003/07/02 21:30:13 drahn Exp $ */
/* $NetBSD: if_bm.c,v 1.1 1999/01/01 01:27:52 tsubai Exp $ */
/*-
@@ -400,7 +400,7 @@ bmac_init(sc)
bmac_set_bits(sc, XCVRIF, ClkBit|SerialMode|COLActiveLow);
}
- __asm __volatile ("mftb %0" : "=r"(tb));
+ tb = ppc_mftbl();
bmac_write_reg(sc, RSEED, tb);
bmac_set_bits(sc, XIFC, TxOutputEnable);
bmac_read_reg(sc, PAREG);
diff --git a/sys/arch/macppc/dev/macintr.c b/sys/arch/macppc/dev/macintr.c
index ba086122650..7268676fc4d 100644
--- a/sys/arch/macppc/dev/macintr.c
+++ b/sys/arch/macppc/dev/macintr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: macintr.c,v 1.17 2003/06/02 23:27:49 millert Exp $ */
+/* $OpenBSD: macintr.c,v 1.18 2003/07/02 21:30:13 drahn Exp $ */
/*-
* Copyright (c) 1995 Per Fogelstrom
@@ -568,7 +568,7 @@ mac_intr_do_pending_int()
int irq;
int pcpl;
int hwpend;
- int emsr, dmsr;
+ int s;
static int processing;
if (processing)
@@ -576,9 +576,7 @@ mac_intr_do_pending_int()
processing = 1;
pcpl = splhigh(); /* Turn off all */
- asm volatile("mfmsr %0" : "=r"(emsr));
- dmsr = emsr & ~PSL_EE;
- asm volatile("mtmsr %0" :: "r"(dmsr));
+ s = ppc_intr_disable();
hwpend = ipending & ~pcpl; /* Do now unmasked pendings */
imen_m &= ~hwpend;
@@ -617,7 +615,7 @@ mac_intr_do_pending_int()
} while (ipending & (SINT_NET|SINT_CLOCK|SINT_TTY) & ~cpl);
ipending &= pcpl;
cpl = pcpl; /* Don't use splx... we are here already! */
- asm volatile("mtmsr %0" :: "r"(emsr));
+ ppc_intr_enable(s);
processing = 0;
}
diff --git a/sys/arch/macppc/dev/openpic.c b/sys/arch/macppc/dev/openpic.c
index 04deaeb1332..f42d5681472 100644
--- a/sys/arch/macppc/dev/openpic.c
+++ b/sys/arch/macppc/dev/openpic.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: openpic.c,v 1.20 2003/06/02 23:27:49 millert Exp $ */
+/* $OpenBSD: openpic.c,v 1.21 2003/07/02 21:30:13 drahn Exp $ */
/*-
* Copyright (c) 1995 Per Fogelstrom
@@ -474,7 +474,7 @@ openpic_do_pending_int()
int irq;
int pcpl;
int hwpend;
- int emsr, dmsr;
+ int s;
static int processing;
if (processing)
@@ -482,9 +482,7 @@ openpic_do_pending_int()
processing = 1;
pcpl = splhigh(); /* Turn off all */
- asm volatile("mfmsr %0" : "=r"(emsr));
- dmsr = emsr & ~PSL_EE;
- asm volatile("mtmsr %0" :: "r"(dmsr));
+ s = ppc_intr_disable();
hwpend = ipending & ~pcpl; /* Do now unmasked pendings */
imen_o &= ~hwpend;
@@ -523,7 +521,7 @@ openpic_do_pending_int()
} while (ipending & (SINT_NET|SINT_CLOCK|SINT_TTY) & ~cpl);
ipending &= pcpl;
cpl = pcpl; /* Don't use splx... we are here already! */
- asm volatile("mtmsr %0" :: "r"(emsr));
+ ppc_intr_enable(s);
processing = 0;
}
diff --git a/sys/arch/macppc/macppc/clock.c b/sys/arch/macppc/macppc/clock.c
index 44ae7ace03c..0dfdcb4f0fd 100644
--- a/sys/arch/macppc/macppc/clock.c
+++ b/sys/arch/macppc/macppc/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.5 2002/09/15 09:01:58 deraadt Exp $ */
+/* $OpenBSD: clock.c,v 1.6 2003/07/02 21:30:13 drahn Exp $ */
/* $NetBSD: clock.c,v 1.1 1996/09/30 16:34:40 ws Exp $ */
/*
@@ -39,6 +39,7 @@
#include <machine/autoconf.h>
#include <machine/pio.h>
#include <machine/intr.h>
+#include <machine/powerpc.h>
#include <dev/ofw/openfirm.h>
void resettodr(void);
@@ -231,7 +232,7 @@ decr_intr(frame)
for (nticks = 0; tick < 0; nticks++)
tick += ticks_per_intr;
- asm volatile ("mtdec %0" :: "r"(tick));
+ ppc_mtdec(tick);
/*
* lasttb is used during microtime. Set it to the virtual
* start of this tick interval.
@@ -267,12 +268,11 @@ decr_intr(frame)
void
cpu_initclocks()
{
- int msr, scratch;
- asm volatile ("mfmsr %0; andi. %1, %0, %2; mtmsr %1"
- : "=r"(msr), "=r"(scratch) : "K"((u_short)~PSL_EE));
- asm volatile ("mftb %0" : "=r"(lasttb));
- asm volatile ("mtdec %0" :: "r"(ticks_per_intr));
- asm volatile ("mtmsr %0" :: "r"(msr));
+ int s;
+ s = ppc_intr_disable();
+ lasttb = ppc_mftbl();
+ ppc_mtdec(ticks_per_intr);
+ ppc_intr_enable(s);
}
void
@@ -280,7 +280,7 @@ calc_delayconst()
{
int qhandle, phandle;
char name[32];
- int msr, scratch;
+ int s;
/*
* Get this info during autoconf? XXX
@@ -293,12 +293,10 @@ calc_delayconst()
/*
* Should check for correct CPU here? XXX
*/
- asm volatile ("mfmsr %0; andi. %1, %0, %2; mtmsr %1"
- : "=r"(msr), "=r"(scratch)
- : "K"((u_short)~PSL_EE));
+ s = ppc_intr_disable();
ns_per_tick = 1000000000 / ticks_per_sec;
ticks_per_intr = ticks_per_sec / hz;
- asm volatile ("mtmsr %0" :: "r"(msr));
+ ppc_intr_enable(s);
break;
}
if ((phandle = OF_child(qhandle)))
@@ -333,14 +331,13 @@ microtime(tvp)
{
u_long tb;
u_long ticks;
- int msr, scratch;
+ int s;
- asm volatile ("mfmsr %0; andi. %1,%0,%2; mtmsr %1"
- : "=r"(msr), "=r"(scratch) : "K"((u_short)~PSL_EE));
- asm ("mftb %0" : "=r"(tb));
+ s = ppc_intr_disable();
+ tb = ppc_mftbl();
ticks = (tb - lasttb) * ns_per_tick;
*tvp = time;
- asm volatile ("mtmsr %0" :: "r"(msr));
+ ppc_intr_enable(s);
ticks /= 1000;
tvp->tv_usec += ticks;
while (tvp->tv_usec >= 1000000) {
diff --git a/sys/arch/macppc/macppc/cpu.c b/sys/arch/macppc/macppc/cpu.c
index ab78d9875b0..19fd81436de 100644
--- a/sys/arch/macppc/macppc/cpu.c
+++ b/sys/arch/macppc/macppc/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.12 2003/06/23 01:38:05 drahn Exp $ */
+/* $OpenBSD: cpu.c,v 1.13 2003/07/02 21:30:13 drahn Exp $ */
/*
* Copyright (c) 1997 Per Fogelstrom
@@ -110,7 +110,7 @@ cpuattach(parent, dev, aux)
int qhandle, phandle;
unsigned int clock_freq = 0;
- __asm__ ("mfpvr %0" : "=r"(pvr));
+ pvr = ppc_mfpvr();
cpu = pvr >> 16;
switch (cpu) {
case MPC601:
@@ -187,7 +187,7 @@ cpuattach(parent, dev, aux)
}
/* power savings mode */
- __asm __volatile ("mfspr %0,1008" : "=r" (hid0));
+ hid0 = ppc_mfhid0();
switch (cpu) {
case MPC603:
case MPC603e:
@@ -211,7 +211,7 @@ cpuattach(parent, dev, aux)
if (cpu == MPC7450 && (pvr & 0xffff) < 0x0200)
hid0 &= ~HID0_BTIC;
}
- __asm __volatile ("mtspr 1008,%0" :: "r" (hid0));
+ ppc_mthid0(hid0);
/* if processor is G3 or G4, configure l2 cache */
if ( (cpu == MPC750) || (cpu == MPC7400) || (cpu == IBM750FX)
@@ -223,8 +223,7 @@ cpuattach(parent, dev, aux)
}
-#define L2CR 1017
-
+/* L2CR bit definitions */
#define L2CR_L2E 0x80000000 /* 0: L2 enable */
#define L2CR_L2PE 0x40000000 /* 1: L2 data parity enable */
#define L2CR_L2SIZ 0x30000000 /* 2-3: L2 size */
@@ -257,14 +256,14 @@ cpuattach(parent, dev, aux)
#define L2CR_L2DF 0x00004000 /* 17: L2 differential clock. */
#define L2CR_L2BYP 0x00002000 /* 18: L2 DLL bypass. */
#define L2CR_L2IP 0x00000001 /* 31: L2 global invalidate in progress
- (read only). */
+ (read only). */
#ifdef L2CR_CONFIG
u_int l2cr_config = L2CR_CONFIG;
#else
u_int l2cr_config = 0;
#endif
-#define SPR_L3CR 0x3fa /* .6. L3 Control Register */
+/* L3CR bit definitions */
#define L3CR_L3E 0x80000000 /* 0: L3 enable */
#define L3CR_L3SIZ 0x10000000 /* 3: L3 size (0=1MB, 1=2MB) */
@@ -273,29 +272,29 @@ config_l2cr(int cpu)
{
u_int l2cr, x;
- __asm __volatile ("mfspr %0, 1017" : "=r"(l2cr));
+ l2cr = ppc_mfl2cr();
/*
* Configure L2 cache if not enabled.
*/
if ((l2cr & L2CR_L2E) == 0 && l2cr_config != 0) {
l2cr = l2cr_config;
- asm volatile ("mtspr 1017,%0" :: "r"(l2cr));
+ ppc_mtl2cr(l2cr);
/* Wait for L2 clock to be stable (640 L2 clocks). */
delay(100);
/* Invalidate all L2 contents. */
l2cr |= L2CR_L2I;
- asm volatile ("mtspr 1017,%0" :: "r"(l2cr));
+ ppc_mtl2cr(l2cr);
do {
- asm volatile ("mfspr %0, 1017" : "=r"(x));
+ x = ppc_mfl2cr();
} while (x & L2CR_L2IP);
/* Enable L2 cache. */
l2cr &= ~L2CR_L2I;
l2cr |= L2CR_L2E;
- asm volatile ("mtspr 1017,%0" :: "r"(l2cr));
+ ppc_mtl2cr(l2cr);
}
if (l2cr & L2CR_L2E) {
@@ -304,8 +303,7 @@ config_l2cr(int cpu)
printf(": 256KB L2 cache");
- __asm__ volatile("mfspr %0, %1" : "=r"(l3cr) :
- "n"(SPR_L3CR) );
+ l3cr = ppc_mfl3cr();
if (l3cr & L3CR_L3E)
printf(", %cMB L3 cache",
l3cr & L3CR_L3SIZ ? '2' : '1');
diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c
index ac252e14dbb..3513c6fefab 100644
--- a/sys/arch/macppc/macppc/machdep.c
+++ b/sys/arch/macppc/macppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.52 2003/07/02 21:23:35 drahn Exp $ */
+/* $OpenBSD: machdep.c,v 1.53 2003/07/02 21:30:13 drahn Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -219,14 +219,14 @@ where = 3;
* Initialize BAT registers to unmapped to not generate
* overlapping mappings below.
*/
- __asm__ volatile ("mtibatu 0,%0" :: "r"(0));
- __asm__ volatile ("mtibatu 1,%0" :: "r"(0));
- __asm__ volatile ("mtibatu 2,%0" :: "r"(0));
- __asm__ volatile ("mtibatu 3,%0" :: "r"(0));
- __asm__ volatile ("mtdbatu 0,%0" :: "r"(0));
- __asm__ volatile ("mtdbatu 1,%0" :: "r"(0));
- __asm__ volatile ("mtdbatu 2,%0" :: "r"(0));
- __asm__ volatile ("mtdbatu 3,%0" :: "r"(0));
+ ppc_mtibat0u(0);
+ ppc_mtibat1u(0);
+ ppc_mtibat2u(0);
+ ppc_mtibat3u(0);
+ ppc_mtdbat0u(0);
+ ppc_mtdbat1u(0);
+ ppc_mtdbat2u(0);
+ ppc_mtdbat3u(0);
/*
* Set up initial BAT table to only map the lowest 256 MB area
@@ -241,13 +241,12 @@ where = 3;
* registers were cleared above.
*/
/* IBAT0 used for initial 256 MB segment */
- __asm__ volatile ("mtibatl 0,%0; mtibatu 0,%1"
- :: "r"(battable[0].batl), "r"(battable[0].batu));
- /* DBAT0 used similar */
- __asm__ volatile ("mtdbatl 0,%0; mtdbatu 0,%1"
- :: "r"(battable[0].batl), "r"(battable[0].batu));
-
+ ppc_mtibat0l(battable[0].batl);
+ ppc_mtibat0u(battable[0].batu);
+ /* DBAT0 used similar */
+ ppc_mtdbat0l(battable[0].batl);
+ ppc_mtdbat0u(battable[0].batu);
/*
* Set up trap vectors
@@ -322,19 +321,16 @@ where = 3;
/* use BATs to map 1GB memory, no pageable BATs now */
if (physmem > btoc(0x10000000)) {
- __asm__ volatile ("mtdbatl 1,%0; mtdbatu 1,%1"
- :: "r"(BATL(0x10000000, BAT_M)),
- "r"(BATU(0x10000000)));
+ ppc_mtdbat1l(BATL(0x10000000, BAT_M));
+ ppc_mtdbat1u(BATU(0x10000000));
}
if (physmem > btoc(0x20000000)) {
- __asm__ volatile ("mtdbatl 2,%0; mtdbatu 2,%1"
- :: "r"(BATL(0x20000000, BAT_M)),
- "r"(BATU(0x20000000)));
+ ppc_mtdbat2l(BATL(0x20000000, BAT_M));
+ ppc_mtdbat2u(BATU(0x20000000));
}
if (physmem > btoc(0x30000000)) {
- __asm__ volatile ("mtdbatl 3,%0; mtdbatu 3,%1"
- :: "r"(BATL(0x30000000, BAT_M)),
- "r"(BATU(0x30000000)));
+ ppc_mtdbat3l(BATL(0x30000000, BAT_M));
+ ppc_mtdbat3u(BATU(0x30000000));
}
#if 0
/* now that we know physmem size, map physical memory with BATs */
@@ -510,13 +506,14 @@ install_extint(handler)
if (offset > 0x1ffffff)
panic("install_extint: too far away");
#endif
- __asm__ volatile ("mfmsr %0; andi. %1, %0, %2; mtmsr %1"
- : "=r"(omsr), "=r"(msr) : "K"((u_short)~PSL_EE));
+ omsr = ppc_mfmsr();
+ msr = omsr & ~PSL_EE;
+ ppc_mtmsr(msr);
extint_call = (extint_call & 0xfc000003) | offset;
bcopy(&extint, (void *)EXC_EXI, (size_t)&extsize);
syncicache((void *)&extint_call, sizeof extint_call);
syncicache((void *)EXC_EXI, (int)&extsize);
- __asm__ volatile ("mtmsr %0" :: "r"(omsr));
+ ppc_mtmsr(omsr);
}
/*
diff --git a/sys/arch/macppc/macppc/ofw_machdep.c b/sys/arch/macppc/macppc/ofw_machdep.c
index 0e450cf34c7..d2017b84bd0 100644
--- a/sys/arch/macppc/macppc/ofw_machdep.c
+++ b/sys/arch/macppc/macppc/ofw_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ofw_machdep.c,v 1.18 2003/06/16 00:37:35 drahn Exp $ */
+/* $OpenBSD: ofw_machdep.c,v 1.19 2003/07/02 21:30:13 drahn Exp $ */
/* $NetBSD: ofw_machdep.c,v 1.1 1996/09/30 16:34:50 ws Exp $ */
/*
@@ -273,17 +273,15 @@ void
ofw_do_pending_int()
{
int pcpl;
- int emsr, dmsr;
+ int s;
+
static int processing;
if(processing)
return;
processing = 1;
- __asm__ volatile("mfmsr %0" : "=r"(emsr));
- dmsr = emsr & ~PSL_EE;
- __asm__ volatile("mtmsr %0" :: "r"(dmsr));
-
+ s = ppc_intr_disable();
pcpl = splhigh(); /* Turn off all */
if((ipending & SINT_CLOCK) && ((pcpl & imask[IPL_CLOCK]) == 0)) {
@@ -299,7 +297,7 @@ ofw_do_pending_int()
}
ipending &= pcpl;
cpl = pcpl; /* Don't use splx... we are here already! */
- __asm__ volatile("mtmsr %0" :: "r"(emsr));
+ ppc_intr_enable(s);
processing = 0;
}
diff --git a/sys/arch/powerpc/include/cpu.h b/sys/arch/powerpc/include/cpu.h
index 9d4e4705c61..bc4607e7a31 100644
--- a/sys/arch/powerpc/include/cpu.h
+++ b/sys/arch/powerpc/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.14 2003/07/02 21:23:35 drahn Exp $ */
+/* $OpenBSD: cpu.h,v 1.15 2003/07/02 21:30:12 drahn Exp $ */
/* $NetBSD: cpu.h,v 1.1 1996/09/30 16:34:21 ws Exp $ */
/*
@@ -100,6 +100,90 @@ invdcache(void *from, int len)
__asm__ __volatile__ ("sync");
}
+#define FUNC_SPR(n, name) \
+static __inline u_int32_t ppc_mf ## name (void) \
+{ \
+ int ret; \
+ __asm __volatile ("mfspr %0," # n : "=r" (ret)); \
+ return ret; \
+} \
+static __inline void ppc_mt ## name (u_int32_t val) \
+{ \
+ __asm __volatile ("mtspr "# n ",%0" :: "r" (val)); \
+} \
+
+FUNC_SPR(0, mq)
+FUNC_SPR(1, xer)
+FUNC_SPR(4, rtcu)
+FUNC_SPR(5, rtcl)
+FUNC_SPR(8, lr)
+FUNC_SPR(9, ctr)
+FUNC_SPR(18, dsisr)
+FUNC_SPR(19, dar)
+FUNC_SPR(22, dec)
+FUNC_SPR(25, sdr1)
+FUNC_SPR(26, srr0)
+FUNC_SPR(27, srr1)
+FUNC_SPR(256, vrsave)
+FUNC_SPR(272, sprg0)
+FUNC_SPR(273, sprg1)
+FUNC_SPR(274, sprg2)
+FUNC_SPR(275, sprg3)
+FUNC_SPR(282, ear)
+FUNC_SPR(287, pvr)
+FUNC_SPR(528, ibat0u)
+FUNC_SPR(529, ibat0l)
+FUNC_SPR(530, ibat1u)
+FUNC_SPR(531, ibat1l)
+FUNC_SPR(532, ibat2u)
+FUNC_SPR(533, ibat2l)
+FUNC_SPR(534, ibat3u)
+FUNC_SPR(535, ibat3l)
+FUNC_SPR(536, dbat0u)
+FUNC_SPR(537, dbat0l)
+FUNC_SPR(538, dbat1u)
+FUNC_SPR(539, dbat1l)
+FUNC_SPR(540, dbat2u)
+FUNC_SPR(541, dbat2l)
+FUNC_SPR(542, dbat3u)
+FUNC_SPR(543, dbat3l)
+FUNC_SPR(1008, hid0)
+FUNC_SPR(1009, hid1)
+FUNC_SPR(1010, iabr)
+FUNC_SPR(1017, l2cr)
+FUNC_SPR(1018, l3cr)
+FUNC_SPR(1013, dabr)
+FUNC_SPR(1023, pir)
+
+static __inline u_int32_t
+ppc_mftbl (void)
+{
+ int ret;
+ __asm __volatile ("mftb %0" : "=r" (ret));
+ return ret;
+}
+
+static __inline u_int32_t
+ppc_mfmsr (void)
+{
+ int ret;
+ __asm __volatile ("mfmsr %0" : "=r" (ret));
+ return ret;
+}
+
+static __inline void
+ppc_mtmsr (u_int32_t val)
+{
+ __asm __volatile ("mtmsr %0" :: "r" (val));
+}
+
+static __inline void
+ppc_mtsrin(u_int32_t val, u_int32_t sn_shifted)
+{
+ asm volatile ("mtsrin %0,%1" :: "r"(val), "r"(sn_shifted) );
+
+}
+
/*
* General functions to enable and disable interrupts
* without having inlined assembly code in many functions.
@@ -109,9 +193,9 @@ ppc_intr_enable(int enable)
{
u_int32_t msr;
if (enable != 0) {
- __asm__ volatile("mfmsr %0" : "=r"(msr));
+ msr = ppc_mfmsr();
msr |= PSL_EE;
- __asm__ volatile("mtmsr %0" :: "r"(msr));
+ ppc_mtmsr(msr);
}
}
@@ -119,9 +203,9 @@ static __inline int
ppc_intr_disable(void)
{
u_int32_t emsr, dmsr;
- __asm__ volatile("mfmsr %0" : "=r"(emsr));
+ emsr = ppc_mfmsr();
dmsr = emsr & ~PSL_EE;
- __asm__ volatile("mtmsr %0" :: "r"(dmsr));
+ ppc_mtmsr(dmsr);
return (emsr & PSL_EE);
}
#endif /* _POWERPC_CPU_H_ */
diff --git a/sys/arch/powerpc/powerpc/fpu.c b/sys/arch/powerpc/powerpc/fpu.c
index b2bc9600b9f..b7338c2d31a 100644
--- a/sys/arch/powerpc/powerpc/fpu.c
+++ b/sys/arch/powerpc/powerpc/fpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: fpu.c,v 1.4 2002/09/15 09:01:59 deraadt Exp $ */
+/* $OpenBSD: fpu.c,v 1.5 2003/07/02 21:30:12 drahn Exp $ */
/* $NetBSD: fpu.c,v 1.1 1996/09/30 16:34:44 ws Exp $ */
/*
@@ -42,7 +42,7 @@ void
enable_fpu(p)
struct proc *p;
{
- int msr, scratch;
+ int msr;
struct pcb *pcb = &p->p_addr->u_pcb;
struct trapframe *tf = trapframe(p);
@@ -51,8 +51,10 @@ enable_fpu(p)
bzero(&pcb->pcb_fpu, sizeof pcb->pcb_fpu);
pcb->pcb_flags |= PCB_FPU;
}
- asm volatile ("mfmsr %0; ori %1,%0,%2; mtmsr %1; isync"
- : "=r"(msr), "=r"(scratch) : "K"(PSL_FP));
+ msr = ppc_mfmsr();
+ ppc_mtmsr(msr | PSL_FP);
+ __asm volatile("isync");
+
asm volatile ("lfd 0,0(%0); mtfsf 0xff,0" :: "b"(&pcb->pcb_fpu.fpcsr));
asm ("lfd 0,0(%0);"
"lfd 1,8(%0);"
@@ -86,18 +88,21 @@ enable_fpu(p)
"lfd 29,232(%0);"
"lfd 30,240(%0);"
"lfd 31,248(%0)" :: "b"(&pcb->pcb_fpu.fpr[0]));
- asm volatile ("mtmsr %0; isync" :: "r"(msr));
+ ppc_mtmsr(msr);
+ __asm volatile("isync");
}
void
save_fpu(p)
struct proc *p;
{
- int msr, scratch;
+ int msr;
struct pcb *pcb = &p->p_addr->u_pcb;
- asm volatile ("mfmsr %0; ori %1,%0,%2; mtmsr %1; isync"
- : "=r"(msr), "=r"(scratch) : "K"(PSL_FP));
+ msr = ppc_mfmsr();
+ ppc_mtmsr(msr | PSL_FP);
+ __asm volatile("isync");
+
asm ("stfd 0,0(%0);"
"stfd 1,8(%0);"
"stfd 2,16(%0);"
@@ -131,5 +136,6 @@ save_fpu(p)
"stfd 30,240(%0);"
"stfd 31,248(%0)" :: "b"(&pcb->pcb_fpu.fpr[0]));
asm volatile ("mffs 0; stfd 0,0(%0)" :: "b"(&pcb->pcb_fpu.fpcsr));
- asm volatile ("mtmsr %0; isync" :: "r"(msr));
+ ppc_mtmsr(msr);
+ __asm volatile("isync");
}
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index d9e1baaabae..74e04ec9758 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.81 2003/06/03 01:35:30 drahn Exp $ */
+/* $OpenBSD: pmap.c,v 1.82 2003/07/02 21:30:12 drahn Exp $ */
/*
* Copyright (c) 2001, 2002 Dale Rahn.
@@ -527,9 +527,8 @@ pmap_enter(pm, va, pa, prot, flags)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
if (pattr != NULL)
*pattr |= (PTE_EXE >> ATTRSHIFT);
@@ -656,9 +655,8 @@ pmap_remove_pg(pmap_t pm, vaddr_t va)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
}
@@ -741,9 +739,8 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
}
@@ -805,9 +802,8 @@ pmap_kremove_pg(vaddr_t va)
* and this pmap is current active pmap
*/
if (sn != USER_SR && sn != KERNEL_SR && curpm == pm)
- asm volatile ("mtsrin %0,%1"
- :: "r"(pm->pm_sr[sn]),
- "r"(sn << ADDR_SR_SHIFT) );
+ ppc_mtsrin(pm->pm_sr[sn],
+ sn << ADDR_SR_SHIFT);
}
}
@@ -1464,8 +1460,7 @@ pmap_bootstrap(u_int kernelstart, u_int kernelend)
#endif
for (i = 0; i < 16; i++) {
pmap_kernel()->pm_sr[i] = (KERNEL_SEG0 + i) | SR_NOEXEC;
- asm volatile ("mtsrin %0,%1"
- :: "r"( KERNEL_SEG0 + i), "r"(i << ADDR_SR_SHIFT) );
+ ppc_mtsrin(KERNEL_SEG0 + i, i << ADDR_SR_SHIFT);
}
asm volatile ("sync; mtsdr1 %0; isync"
:: "r"((u_int)pmap_ptable | (pmap_ptab_mask >> 10)));
diff --git a/sys/arch/powerpc/powerpc/trap.c b/sys/arch/powerpc/powerpc/trap.c
index 74f0a3f076e..cb8f69c20bc 100644
--- a/sys/arch/powerpc/powerpc/trap.c
+++ b/sys/arch/powerpc/powerpc/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.58 2003/03/04 19:11:37 deraadt Exp $ */
+/* $OpenBSD: trap.c,v 1.59 2003/07/02 21:30:12 drahn Exp $ */
/* $NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $ */
/*
@@ -114,17 +114,15 @@ save_vec(struct proc *p)
struct pcb *pcb = &p->p_addr->u_pcb;
struct vreg *pcb_vr = pcb->pcb_vr;
u_int32_t oldmsr, msr;
- u_int32_t tmp;
/* first we enable vector so that we dont throw an exception
* in kernel mode
*/
- __asm__ volatile ("mfmsr %0" : "=r" (oldmsr));
+ oldmsr = ppc_mfmsr();
msr = oldmsr | PSL_VEC;
- __asm__ volatile ("mtmsr %0" :: "r" (msr));
+ ppc_mtmsr(msr);
__asm__ volatile ("sync;isync");
- __asm__ volatile ("mfspr %0, 256" : "=r" (tmp));
- pcb->pcb_vr->vrsave = tmp;
+ pcb->pcb_vr->vrsave = ppc_mfvrsave();
#define STR(x) #x
#define SAVE_VEC_REG(reg, addr) \
@@ -166,7 +164,7 @@ save_vec(struct proc *p)
SAVE_VEC_REG(0,&pcb_vr->vscr);
/* fix kernel msr back */
- __asm__ volatile ("mfmsr %0" :: "r" (oldmsr));
+ ppc_mtmsr(oldmsr);
}
/*
@@ -178,7 +176,6 @@ enable_vec(struct proc *p)
struct pcb *pcb = &p->p_addr->u_pcb;
struct vreg *pcb_vr = pcb->pcb_vr;
u_int32_t oldmsr, msr;
- u_int32_t tmp;
/* If this is the very first altivec instruction executed
* by this process, create a context.
@@ -191,9 +188,9 @@ enable_vec(struct proc *p)
/* first we enable vector so that we dont throw an exception
* in kernel mode
*/
- __asm__ volatile ("mfmsr %0" : "=r" (oldmsr));
+ oldmsr = ppc_mfmsr();
msr = oldmsr | PSL_VEC;
- __asm__ volatile ("mtmsr %0" :: "r" (msr));
+ ppc_mtmsr(msr);
__asm__ volatile ("sync;isync");
#define LOAD_VEC_REG(reg, addr) \
@@ -201,8 +198,7 @@ enable_vec(struct proc *p)
LOAD_VEC_REG(0, &pcb_vr->vscr);
__asm__ volatile ("mtvscr 0");
- tmp = pcb_vr->vrsave;
- __asm__ volatile ("mtspr 256, %0" :: "r" (tmp));
+ ppc_mtvrsave(pcb_vr->vrsave);
LOAD_VEC_REG(0, &pcb_vr->vreg[0]);
LOAD_VEC_REG(1, &pcb_vr->vreg[1]);
@@ -238,7 +234,7 @@ enable_vec(struct proc *p)
LOAD_VEC_REG(31, &pcb_vr->vreg[31]);
/* fix kernel msr back */
- __asm__ volatile ("mfmsr %0" :: "r" (oldmsr));
+ ppc_mtmsr(oldmsr);
}
#endif /* ALTIVEC */