diff options
author | Miod Vallat <miod@cvs.openbsd.org> | 2014-10-12 20:39:47 +0000 |
---|---|---|
committer | Miod Vallat <miod@cvs.openbsd.org> | 2014-10-12 20:39:47 +0000 |
commit | bc1910b452482848a4b55d1b17bd7e6cf0f4ae86 (patch) | |
tree | eef45e53a8361e4146efcb281166aae40a51c70c | |
parent | 83b916f266473564c92e86d84dca20b6f3654351 (diff) |
Rough sync with hppa to make this compile again.
-rw-r--r-- | sys/arch/hppa64/conf/RAMDISK | 8 | ||||
-rw-r--r-- | sys/arch/hppa64/hppa64/autoconf.c | 9 | ||||
-rw-r--r-- | sys/arch/hppa64/hppa64/machdep.c | 4 | ||||
-rw-r--r-- | sys/arch/hppa64/hppa64/mutex.c | 23 | ||||
-rw-r--r-- | sys/arch/hppa64/hppa64/pmap.c | 32 | ||||
-rw-r--r-- | sys/arch/hppa64/hppa64/trap.c | 8 | ||||
-rw-r--r-- | sys/arch/hppa64/include/atomic.h | 237 | ||||
-rw-r--r-- | sys/arch/hppa64/include/mutex.h | 34 |
8 files changed, 290 insertions, 65 deletions
diff --git a/sys/arch/hppa64/conf/RAMDISK b/sys/arch/hppa64/conf/RAMDISK index d73cb721a40..8626998b771 100644 --- a/sys/arch/hppa64/conf/RAMDISK +++ b/sys/arch/hppa64/conf/RAMDISK @@ -1,4 +1,4 @@ -# $OpenBSD: RAMDISK,v 1.21 2014/10/10 05:43:35 deraadt Exp $ +# $OpenBSD: RAMDISK,v 1.22 2014/10/12 20:39:46 miod Exp $ machine hppa64 maxusers 4 @@ -26,11 +26,11 @@ mainbus0 at root mem* at mainbus0 flags 0x00 # /dev/*mem and memory controller pdc0 at mainbus0 # PDC/IODC wrapper for boot console -power0 at mainbus0 # power/fail manager (iv 30) +#power0 at mainbus0 # power/fail manager (iv 30) cpu* at mainbus0 # HP PA-RISC cpu -plut0 at mainbus0 # Astro Runway-Ropes, MIOC and IOA +astro0 at mainbus0 # Astro Runway-Ropes, MIOC and IOA -elroy* at plut0 +elroy* at astro0 pci* at elroy? ppb* at pci? diff --git a/sys/arch/hppa64/hppa64/autoconf.c b/sys/arch/hppa64/hppa64/autoconf.c index 23b06d05dc0..0df825000b6 100644 --- a/sys/arch/hppa64/hppa64/autoconf.c +++ b/sys/arch/hppa64/hppa64/autoconf.c @@ -1,4 +1,4 @@ -/* $OpenBSD: autoconf.c,v 1.21 2014/09/15 19:08:21 miod Exp $ */ +/* $OpenBSD: autoconf.c,v 1.22 2014/10/12 20:39:46 miod Exp $ */ /* * Copyright (c) 1998-2005 Michael Shalayeff @@ -232,8 +232,9 @@ print_devpath(const char *label, struct pz_device *pz) for (i = 1; i < 6 && pz->pz_layers[i]; i++) printf(".%x", pz->pz_layers[i]); - printf(" class=%d flags=%b hpa=%p spa=%p io=%p\n", pz->pz_class, - pz->pz_flags, PZF_BITS, pz->pz_hpa, pz->pz_spa, pz->pz_iodc_io); + printf(" class=%d flags=%b hpa=0x%08x spa=0x%08x io=0x%08x\n", + pz->pz_class, pz->pz_flags, PZF_BITS, pz->pz_hpa, pz->pz_spa, + pz->pz_iodc_io); } u_int32_t pdc_rt[16 / 4 * sizeof(struct pdc_pat_pci_rt)] PDC_ALIGNMENT; @@ -374,7 +375,7 @@ pdc_getirt(int *pn) return (NULL); } -printf("num %ld ", pdc_pat_io_num.num); +printf("num %d ", pdc_pat_io_num.num); *pn = num = pdc_pat_io_num.num; if (num > sizeof(pdc_rt) / sizeof(*rt)) { printf("\nPCI IRT is too big %d\n", num); diff --git a/sys/arch/hppa64/hppa64/machdep.c b/sys/arch/hppa64/hppa64/machdep.c index 7eb043828ec..c3437abd796 100644 --- a/sys/arch/hppa64/hppa64/machdep.c +++ b/sys/arch/hppa64/hppa64/machdep.c @@ -1,4 +1,4 @@ -/* $OpenBSD: machdep.c,v 1.73 2014/09/19 18:21:14 kettenis Exp $ */ +/* $OpenBSD: machdep.c,v 1.74 2014/10/12 20:39:46 miod Exp $ */ /* * Copyright (c) 2005 Michael Shalayeff @@ -378,7 +378,7 @@ cpu_startup(void) printf("%s%s\n", version, cpu_model); printf("real mem = %lu (%luMB)\n", ptoa((psize_t)physmem), ptoa((psize_t)physmem) / 1024 / 1024); - printf("rsvd mem = %u (%uKB)\n", ptoa(resvmem), ptoa(resvmem) / 1024); + printf("rsvd mem = %lu (%luKB)\n", ptoa(resvmem), ptoa(resvmem) / 1024); /* * Allocate a submap for exec arguments. This map effectively diff --git a/sys/arch/hppa64/hppa64/mutex.c b/sys/arch/hppa64/hppa64/mutex.c index a6206e0817f..1e24878eafd 100644 --- a/sys/arch/hppa64/hppa64/mutex.c +++ b/sys/arch/hppa64/hppa64/mutex.c @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.c,v 1.8 2012/06/05 11:43:41 jsing Exp $ */ +/* $OpenBSD: mutex.c,v 1.9 2014/10/12 20:39:46 miod Exp $ */ /* * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> @@ -28,6 +28,7 @@ #include <sys/param.h> #include <sys/mutex.h> #include <sys/systm.h> +#include <sys/atomic.h> #include <machine/intr.h> @@ -36,11 +37,12 @@ static inline int try_lock(struct mutex *mtx) { - volatile int *lock = &mtx->mtx_lock; + volatile int *lock = (int *)(((vaddr_t)mtx->mtx_lock + 0xf) & ~0xf); volatile register_t ret = 0; + /* Note: lock must be 16-byte aligned. */ asm volatile ( - "ldcw,co 0(%2), %0" + "ldcw,co 0(%2), %0" : "=&r" (ret), "+m" (lock) : "r" (lock) ); @@ -49,9 +51,12 @@ try_lock(struct mutex *mtx) } void -mtx_init(struct mutex *mtx, int wantipl) +__mtx_init(struct mutex *mtx, int wantipl) { - mtx->mtx_lock = MUTEX_UNLOCKED; + mtx->mtx_lock[0] = 1; + mtx->mtx_lock[1] = 1; + mtx->mtx_lock[2] = 1; + mtx->mtx_lock[3] = 1; mtx->mtx_wantipl = wantipl; mtx->mtx_oldipl = IPL_NONE; } @@ -65,6 +70,7 @@ mtx_enter(struct mutex *mtx) if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); if (try_lock(mtx)) { + membar_enter(); if (mtx->mtx_wantipl != IPL_NONE) mtx->mtx_oldipl = s; mtx->mtx_owner = curcpu(); @@ -86,6 +92,7 @@ mtx_enter_try(struct mutex *mtx) if (mtx->mtx_wantipl != IPL_NONE) s = splraise(mtx->mtx_wantipl); if (try_lock(mtx)) { + membar_enter(); if (mtx->mtx_wantipl != IPL_NONE) mtx->mtx_oldipl = s; mtx->mtx_owner = curcpu(); @@ -112,8 +119,12 @@ mtx_leave(struct mutex *mtx) #endif s = mtx->mtx_oldipl; mtx->mtx_owner = NULL; + membar_exit(); - mtx->mtx_lock = MUTEX_UNLOCKED; + mtx->mtx_lock[0] = 1; + mtx->mtx_lock[1] = 1; + mtx->mtx_lock[2] = 1; + mtx->mtx_lock[3] = 1; if (mtx->mtx_wantipl != IPL_NONE) splx(s); diff --git a/sys/arch/hppa64/hppa64/pmap.c b/sys/arch/hppa64/hppa64/pmap.c index 26a59f7684d..0b066cd6439 100644 --- a/sys/arch/hppa64/hppa64/pmap.c +++ b/sys/arch/hppa64/hppa64/pmap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.c,v 1.23 2012/06/03 13:28:40 jsing Exp $ */ +/* $OpenBSD: pmap.c,v 1.24 2014/10/12 20:39:46 miod Exp $ */ /* * Copyright (c) 2005 Michael Shalayeff @@ -17,7 +17,9 @@ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#ifndef SMALL_KERNEL #define PMAPDEBUG +#endif #include <sys/param.h> #include <sys/systm.h> @@ -253,7 +255,7 @@ static __inline void pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte) { DPRINTF(PDB_FOLLOW|PDB_VP, - ("pmap_pte_set(%p, 0x%lx, 0x%lx)\n", pde, va, pte)); + ("pmap_pte_set(%p, 0x%lx, 0x%lx)\n", pde, va, (long)pte)); pde[(va & PTE_MASK) >> PTE_SHIFT] = pte; } @@ -309,8 +311,8 @@ pmap_dump_table(pa_space_t space, vaddr_t sva) if (!(pte = pmap_pte_get(pde, va))) continue; - printf("0x%08lx-0x%08lx:%b\n", - va, PTE_PAGE(pte), PTE_GETBITS(pte), PTE_BITS); + printf("0x%08lx-0x%08llx:%lb\n", + va, PTE_PAGE(pte), (long)PTE_GETBITS(pte), PTE_BITS); } } @@ -460,7 +462,7 @@ printf("pa 0x%lx tpa 0x%lx\n", pa, tpa); epde = pde + (PTE_MASK >> PTE_SHIFT) + 1; if (pa + (PTE_MASK + (1 << PTE_SHIFT)) > tpa) epde = pde + ((tpa & PTE_MASK) >> PTE_SHIFT); -printf("pde %p epde %p pte 0x%lx\n", pde, epde, pte); +printf("pde %p epde %p pte 0x%lx\n", pde, epde, (long)pte); for (pde += (pa & PTE_MASK) >> PTE_SHIFT; pde < epde;) *pde++ = pte; pa += PTE_MASK + (1 << PTE_SHIFT); @@ -480,6 +482,7 @@ pmap_bootstrap(vaddr_t vstart) DPRINTF(PDB_FOLLOW|PDB_INIT, ("pmap_bootstrap(0x%lx)\n", vstart)); + uvmexp.pagesize = PAGE_SIZE; uvm_setpagesize(); hppa_prot[UVM_PROT_NONE] = PTE_ORDER|PTE_ACC_NONE; @@ -536,7 +539,7 @@ pmap_bootstrap(vaddr_t vstart) eaddr = physmem - atop(round_page(MSGBUFSIZE)); resvphysmem = atop(addr); - DPRINTF(PDB_INIT, ("physmem: 0x%lx - 0x%lx\n", resvphysmem, eaddr)); + DPRINTF(PDB_INIT, ("physmem: 0x%x - 0x%lx\n", resvphysmem, eaddr)); uvm_page_physload(0, physmem, resvphysmem, eaddr, 0); } @@ -580,7 +583,7 @@ pmap_init(void) #ifdef PMAP_STEAL_MEMORY vaddr_t -pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) +pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp, int zero) { vaddr_t va; int npg; @@ -602,7 +605,8 @@ pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp) vm_physmem[0].end -= npg; vm_physmem[0].avail_end -= npg; va = ptoa(vm_physmem[0].avail_end); - bzero((void *)va, size); + if (zero) + bzero((void *)va, size); DPRINTF(PDB_FOLLOW|PDB_PHYS, ("pmap_steal_memory: 0x%lx\n", va)); @@ -637,7 +641,7 @@ pmap_growkernel(vaddr_t kva) } else { paddr_t pa; - pa = pmap_steal_memory(PAGE_SIZE, NULL, NULL); + pa = pmap_steal_memory(PAGE_SIZE, NULL, NULL, 1); if (pa) panic("pmap_growkernel: out of memory"); pmap_pde_set(pmap_kernel(), va, pa); @@ -739,7 +743,7 @@ pmap_destroy(struct pmap *pmap) npv = pv->pv_next; if (pv->pv_pmap == pmap) { #ifdef PMAPDEBUG - printf(" 0x%x", pv->pv_va); + printf(" 0x%lx", pv->pv_va); #endif pmap_remove(pmap, pv->pv_va, pv->pv_va + PAGE_SIZE); @@ -818,7 +822,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) if ((pte = pmap_pte_get(pde, va))) { DPRINTF(PDB_ENTER, - ("pmap_enter: remapping 0x%lx -> 0x%lx\n", pte, pa)); + ("pmap_enter: remapping 0x%lx -> 0x%lx\n", (long)pte, pa)); pmap_pte_flush(pmap, va, pte); if (wired && !(pte & PTE_WIRED)) @@ -979,7 +983,7 @@ pmap_write_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) DPRINTF(PDB_PMAP, ("pmap_write_protect: va=0x%lx pte=0x%lx\n", - sva, pte)); + sva, (long)pte)); /* * Determine if mapping is changing. * If not, nothing to do. @@ -1078,7 +1082,7 @@ pmap_changebit(struct vm_page *pg, pt_entry_t set, pt_entry_t clear) pt_entry_t res; DPRINTF(PDB_FOLLOW|PDB_BITS, - ("pmap_changebit(%p, %lx, %lx)\n", pg, set, clear)); + ("pmap_changebit(%p, %lx, %lx)\n", pg, (long)set, (long)clear)); simple_lock(&pg->mdpage.pvh_lock); res = pg->mdpage.pvh_attrs = 0; @@ -1121,7 +1125,7 @@ pmap_testbit(struct vm_page *pg, pt_entry_t bit) struct pv_entry *pve; pt_entry_t pte; - DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %lx)\n", pg, bit)); + DPRINTF(PDB_FOLLOW|PDB_BITS, ("pmap_testbit(%p, %lx)\n", pg, (long)bit)); simple_lock(&pg->mdpage.pvh_lock); for(pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve; diff --git a/sys/arch/hppa64/hppa64/trap.c b/sys/arch/hppa64/hppa64/trap.c index 857268e43ca..afa646c7370 100644 --- a/sys/arch/hppa64/hppa64/trap.c +++ b/sys/arch/hppa64/hppa64/trap.c @@ -1,4 +1,4 @@ -/* $OpenBSD: trap.c,v 1.39 2014/05/11 00:12:44 guenther Exp $ */ +/* $OpenBSD: trap.c,v 1.40 2014/10/12 20:39:46 miod Exp $ */ /* * Copyright (c) 2005 Michael Shalayeff @@ -250,7 +250,7 @@ trap(int type, struct trapframe *frame) } #else if (type == T_DATALIGN) - panic ("trap: %s at 0x%x", tts, va); + panic ("trap: %s at 0x%lx", tts, va); else panic ("trap: no debugger for \"%s\" (%d)", tts, type); #endif @@ -378,7 +378,7 @@ trap(int type, struct trapframe *frame) sv.sival_int = va; trapsignal(p, SIGILL, type & ~T_USER, ILL_ILLTRP, sv); } else - panic("trap: %s @ 0x%x:0x%x for 0x%x:0x%x irr 0x%08x", + panic("trap: %s @ 0x%lx:0x%lx for 0x%x:0x%lx irr 0x%08x", tts, frame->tf_iisq[0], frame->tf_iioq[0], space, va, opcode); break; @@ -635,7 +635,7 @@ syscall(struct trapframe *frame) #ifdef DIAGNOSTIC if (curcpu()->ci_cpl != oldcpl) { printf("WARNING: SPL (0x%x) NOT LOWERED ON " - "syscall(0x%x, 0x%x, 0x%x, 0x%x...) EXIT, PID %d\n", + "syscall(0x%x, 0x%lx, 0x%lx, 0x%lx...) EXIT, PID %d\n", curcpu()->ci_cpl, code, args[0], args[1], args[2], p->p_pid); curcpu()->ci_cpl = oldcpl; diff --git a/sys/arch/hppa64/include/atomic.h b/sys/arch/hppa64/include/atomic.h index 9cfe677db9c..e1c122ea09e 100644 --- a/sys/arch/hppa64/include/atomic.h +++ b/sys/arch/hppa64/include/atomic.h @@ -1,4 +1,4 @@ -/* $OpenBSD: atomic.h,v 1.6 2014/03/29 18:09:29 guenther Exp $ */ +/* $OpenBSD: atomic.h,v 1.7 2014/10/12 20:39:46 miod Exp $ */ /* Public Domain */ @@ -7,41 +7,219 @@ #if defined(_KERNEL) -#include <sys/mutex.h> +typedef volatile u_int __cpu_simple_lock_t __attribute__((__aligned__(16))); + +#define __SIMPLELOCK_LOCKED 0 +#define __SIMPLELOCK_UNLOCKED 1 + +static inline void +__cpu_simple_lock_init(__cpu_simple_lock_t *l) +{ + *l = __SIMPLELOCK_UNLOCKED; +} + +static inline unsigned int +__cpu_simple_lock_ldcws(__cpu_simple_lock_t *l) +{ + unsigned int o; + + asm volatile("ldcws 0(%2), %0" : "=&r" (o), "+m" (l) : "r" (l)); + + return (o); +} + +static inline void +__cpu_simple_lock(__cpu_simple_lock_t *l) +{ + while (__cpu_simple_lock_ldcws(l) == __SIMPLELOCK_UNLOCKED) + ; +} + +static inline int +__cpu_simple_lock_try(__cpu_simple_lock_t *l) +{ + return (__cpu_simple_lock_ldcws(l) == __SIMPLELOCK_UNLOCKED); +} + +static inline void +__cpu_simple_unlock(__cpu_simple_lock_t *l) +{ + *l = __SIMPLELOCK_UNLOCKED; +} #ifdef MULTIPROCESSOR -extern struct mutex mtx_atomic; -#define ATOMIC_LOCK mtx_enter(&mtx_atomic) -#define ATOMIC_UNLOCK mtx_leave(&mtx_atomic) +extern __cpu_simple_lock_t atomic_lock; +#define ATOMIC_LOCK __cpu_simple_lock(&atomic_lock); +#define ATOMIC_UNLOCK __cpu_simple_unlock(&atomic_lock); #else #define ATOMIC_LOCK #define ATOMIC_UNLOCK #endif -static __inline void -atomic_setbits_int(volatile unsigned int *uip, unsigned int v) +static inline register_t +atomic_enter(void) { register_t eiem; __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); __asm volatile("mtctl %r0, %cr15"); ATOMIC_LOCK; - *uip |= v; + + return (eiem); +} + +static inline void +atomic_leave(register_t eiem) +{ ATOMIC_UNLOCK; __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); } +static inline unsigned int +_atomic_cas_uint(volatile unsigned int *uip, unsigned int o, unsigned int n) +{ + register_t eiem; + unsigned int rv; + + eiem = atomic_enter(); + rv = *uip; + if (rv == o) + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n)) + +static inline unsigned long +_atomic_cas_ulong(volatile unsigned long *uip, unsigned long o, unsigned long n) +{ + register_t eiem; + unsigned long rv; + + eiem = atomic_enter(); + rv = *uip; + if (rv == o) + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n)) + +static inline void * +_atomic_cas_ptr(volatile void *uip, void *o, void *n) +{ + register_t eiem; + void * volatile *uipp = (void * volatile *)uip; + void *rv; + + eiem = atomic_enter(); + rv = *uipp; + if (rv == o) + *uipp = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n)) + +static inline unsigned int +_atomic_swap_uint(volatile unsigned int *uip, unsigned int n) +{ + register_t eiem; + unsigned int rv; + + eiem = atomic_enter(); + rv = *uip; + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_swap_uint(_p, _n) _atomic_swap_uint((_p), (_n)) + +static inline unsigned long +_atomic_swap_ulong(volatile unsigned long *uip, unsigned long n) +{ + register_t eiem; + unsigned long rv; + + eiem = atomic_enter(); + rv = *uip; + *uip = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_swap_ulong(_p, _n) _atomic_swap_ulong((_p), (_n)) + +static inline void * +_atomic_swap_ptr(volatile void *uip, void *n) +{ + register_t eiem; + void * volatile *uipp = (void * volatile *)uip; + void *rv; + + eiem = atomic_enter(); + rv = *uipp; + *uipp = n; + atomic_leave(eiem); + + return (rv); +} +#define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n)) + +static __inline unsigned int +_atomic_add_int_nv(volatile unsigned int *uip, unsigned int v) +{ + register_t eiem; + unsigned int rv; + + eiem = atomic_enter(); + rv = *uip + v; + *uip = rv; + atomic_leave(eiem); + + return (rv); +} +#define atomic_add_int_nv(_uip, _v) _atomic_add_int_nv((_uip), (_v)) +#define atomic_sub_int_nv(_uip, _v) _atomic_add_int_nv((_uip), 0 - (_v)) + +static __inline unsigned long +_atomic_add_long_nv(volatile unsigned long *uip, unsigned long v) +{ + register_t eiem; + unsigned long rv; + + eiem = atomic_enter(); + rv = *uip + v; + *uip = rv; + atomic_leave(eiem); + + return (rv); +} +#define atomic_add_long_nv(_uip, _v) _atomic_add_long_nv((_uip), (_v)) +#define atomic_sub_long_nv(_uip, _v) _atomic_add_long_nv((_uip), 0 - (_v)) + +static __inline void +atomic_setbits_int(volatile unsigned int *uip, unsigned int v) +{ + register_t eiem; + + eiem = atomic_enter(); + *uip |= v; + atomic_leave(eiem); +} + static __inline void atomic_clearbits_int(volatile unsigned int *uip, unsigned int v) { register_t eiem; - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - ATOMIC_LOCK; + eiem = atomic_enter(); *uip &= ~v; - ATOMIC_UNLOCK; - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); + atomic_leave(eiem); } static __inline void @@ -49,12 +227,9 @@ atomic_setbits_long(volatile unsigned long *uip, unsigned long v) { register_t eiem; - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - ATOMIC_LOCK; + eiem = atomic_enter(); *uip |= v; - ATOMIC_UNLOCK; - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); + atomic_leave(eiem); } static __inline void @@ -62,13 +237,29 @@ atomic_clearbits_long(volatile unsigned long *uip, unsigned long v) { register_t eiem; - __asm volatile("mfctl %%cr15, %0": "=r" (eiem)); - __asm volatile("mtctl %r0, %cr15"); - ATOMIC_LOCK; + eiem = atomic_enter(); *uip &= ~v; - ATOMIC_UNLOCK; - __asm volatile("mtctl %0, %%cr15":: "r" (eiem)); + atomic_leave(eiem); +} + +/* + * Although the PA-RISC 2.0 architecture allows an implementation to + * be weakly ordered, all PA-RISC processers to date implement a + * strong memory ordering model. So all we need is a compiler + * barrier. + */ + +static inline void +__insn_barrier(void) +{ + __asm volatile("" : : : "memory"); } +#define membar_enter() __insn_barrier() +#define membar_exit() __insn_barrier() +#define membar_producer() __insn_barrier() +#define membar_consumer() __insn_barrier() +#define membar_sync() __insn_barrier() + #endif /* defined(_KERNEL) */ #endif /* _MACHINE_ATOMIC_H_ */ diff --git a/sys/arch/hppa64/include/mutex.h b/sys/arch/hppa64/include/mutex.h index 6f20f5cb85b..a4b19bfa63e 100644 --- a/sys/arch/hppa64/include/mutex.h +++ b/sys/arch/hppa64/include/mutex.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mutex.h,v 1.2 2012/06/05 11:43:41 jsing Exp $ */ +/* $OpenBSD: mutex.h,v 1.3 2014/10/12 20:39:46 miod Exp $ */ /* * Copyright (c) 2004 Artur Grabowski <art@openbsd.org> @@ -28,28 +28,46 @@ #ifndef _MACHINE_MUTEX_H_ #define _MACHINE_MUTEX_H_ -#define MUTEX_LOCKED 0 -#define MUTEX_UNLOCKED 1 +#define MUTEX_LOCKED { 0, 0, 0, 0 } +#define MUTEX_UNLOCKED { 1, 1, 1, 1 } +/* Note: mtx_lock must be 16-byte aligned. */ struct mutex { - volatile int mtx_lock; + volatile int mtx_lock[4]; int mtx_wantipl; int mtx_oldipl; void *mtx_owner; }; -void mtx_init(struct mutex *, int); +/* + * To prevent lock ordering problems with the kernel lock, we need to + * make sure we block all interrupts that can grab the kernel lock. + * The simplest way to achieve this is to make sure mutexes always + * raise the interrupt priority level to the highest level that has + * interrupts that grab the kernel lock. + */ +#ifdef MULTIPROCESSOR +#define __MUTEX_IPL(ipl) \ + (((ipl) > IPL_NONE && (ipl) < IPL_AUDIO) ? IPL_AUDIO : (ipl)) +#else +#define __MUTEX_IPL(ipl) (ipl) +#endif + +#define MUTEX_INITIALIZER(ipl) { MUTEX_UNLOCKED, __MUTEX_IPL((ipl)), 0, NULL } -#define MUTEX_INITIALIZER(ipl) { MUTEX_UNLOCKED, (ipl), 0, NULL } +void __mtx_init(struct mutex *, int); +#define mtx_init(mtx, ipl) __mtx_init((mtx), __MUTEX_IPL((ipl))) #ifdef DIAGNOSTIC #define MUTEX_ASSERT_LOCKED(mtx) do { \ - if ((mtx)->mtx_lock != MUTEX_LOCKED) \ + if ((mtx)->mtx_lock[0] == 1 && (mtx)->mtx_lock[1] == 1 && \ + (mtx)->mtx_lock[2] == 1 && (mtx)->mtx_lock[3] == 1) \ panic("mutex %p not held in %s", (mtx), __func__); \ } while (0) #define MUTEX_ASSERT_UNLOCKED(mtx) do { \ - if ((mtx)->mtx_lock[0] != MUTEX_UNLOCKED) \ + if ((mtx)->mtx_lock[0] != 1 && (mtx)->mtx_lock[1] != 1 && \ + (mtx)->mtx_lock[2] != 1 && (mtx)->mtx_lock[3] != 1) \ panic("mutex %p held in %s", (mtx), __func__); \ } while (0) #else |