diff options
author | Dale Rahn <drahn@cvs.openbsd.org> | 2002-01-21 17:18:47 +0000 |
---|---|---|
committer | Dale Rahn <drahn@cvs.openbsd.org> | 2002-01-21 17:18:47 +0000 |
commit | 160845aaacd9696e989d1441d7281ae99f263350 (patch) | |
tree | 1b851eecaac4d8ec8ae6bdf9b99c7b8ce71034b3 /sys/arch/powerpc | |
parent | 4851de89f7fc84cee471b74a09dec6f4f8f1b0fe (diff) |
Better barrier protection. pefo found an instance where the previous
protection was not good enough. work from theo, pefo, toby.
Diffstat (limited to 'sys/arch/powerpc')
-rw-r--r-- | sys/arch/powerpc/include/intr.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/sys/arch/powerpc/include/intr.h b/sys/arch/powerpc/include/intr.h index f84c00c85b6..626120e0a82 100644 --- a/sys/arch/powerpc/include/intr.h +++ b/sys/arch/powerpc/include/intr.h @@ -1,4 +1,4 @@ -/* $OpenBSD: intr.h,v 1.13 2001/11/19 05:13:50 drahn Exp $ */ +/* $OpenBSD: intr.h,v 1.14 2002/01/21 17:18:46 drahn Exp $ */ /* * Copyright (c) 1997 Per Fogelstrom, Opsycon AB and RTMX Inc, USA. @@ -72,39 +72,39 @@ extern int imask[7]; * achived with an empty asm volatile statement. the compiler * will not move instructions past asm volatiles. */ -static __inline int +volatile static __inline int splraise(int newcpl) { int oldcpl; - __asm__ volatile(""); /* don't reorder.... */ + __asm__ volatile("":::"memory"); /* don't reorder.... */ oldcpl = cpl; cpl = oldcpl | newcpl; - __asm__ volatile(""); /* reorder protect */ + __asm__ volatile("":::"memory"); /* don't reorder.... */ return(oldcpl); } -static __inline void +static _volatile _inline void splx(int newcpl) { - __asm__ volatile(""); /* reorder protect */ + __asm__ volatile("":::"memory"); /* reorder protect */ cpl = newcpl; if(ipending & ~newcpl) do_pending_int(); - __asm__ volatile(""); /* reorder protect */ + __asm__ volatile("":::"memory"); /* reorder protect */ } -static __inline int +volatile static __inline int spllower(int newcpl) { int oldcpl; - __asm__ volatile(""); /* reorder protect */ + __asm__ volatile("":::"memory"); /* reorder protect */ oldcpl = cpl; cpl = newcpl; if(ipending & ~newcpl) do_pending_int(); - __asm__ volatile(""); /* reorder protect */ + __asm__ volatile("":::"memory"); /* reorder protect */ return(oldcpl); } |