summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/libpthread/include/spinlock.h78
-rw-r--r--lib/librthread/arch/alpha/_atomic_lock.S4
-rw-r--r--lib/librthread/arch/amd64/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/arm/_atomic_lock.c14
-rw-r--r--lib/librthread/arch/hppa/_atomic_lock.c10
-rw-r--r--lib/librthread/arch/hppa64/_atomic_lock.c10
-rw-r--r--lib/librthread/arch/i386/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/m68k/_atomic_lock.c14
-rw-r--r--lib/librthread/arch/m88k/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/mips64/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/powerpc/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/sh/_atomic_lock.c15
-rw-r--r--lib/librthread/arch/sparc/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/sparc64/_atomic_lock.c12
-rw-r--r--lib/librthread/arch/vax/_atomic_lock.c21
-rw-r--r--lib/librthread/rthread.c59
-rw-r--r--lib/librthread/rthread.h42
-rw-r--r--lib/librthread/rthread_file.c6
-rw-r--r--lib/librthread/rthread_fork.c10
-rw-r--r--lib/librthread/rthread_libc.c8
-rw-r--r--lib/librthread/rthread_rwlock.c14
-rw-r--r--lib/librthread/rthread_sem.c8
-rw-r--r--lib/librthread/rthread_spin_lock.c6
-rw-r--r--lib/librthread/rthread_stack.c4
-rw-r--r--lib/librthread/rthread_sync.c22
-rw-r--r--lib/librthread/rthread_tls.c4
26 files changed, 189 insertions, 244 deletions
diff --git a/lib/libpthread/include/spinlock.h b/lib/libpthread/include/spinlock.h
deleted file mode 100644
index 1e836d41605..00000000000
--- a/lib/libpthread/include/spinlock.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by John Birrell.
- * 4. Neither the name of the author nor the names of any co-contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $Id: spinlock.h,v 1.9 2012/12/05 23:20:05 deraadt Exp $
- * $OpenBSD: spinlock.h,v 1.9 2012/12/05 23:20:05 deraadt Exp $
- *
- * Lock definitions used in both libc and libpthread.
- *
- */
-
-#ifndef _SPINLOCK_H_
-#define _SPINLOCK_H_
-#include <sys/types.h>
-#include <machine/spinlock.h>
-
-/*
- * Lock structure with room for debugging information.
- */
-typedef volatile struct {
- _spinlock_lock_t access_lock;
- void * lock_owner;
- const char * fname;
- int lineno;
-} spinlock_t;
-
-#define _SPINLOCK_INITIALIZER { _SPINLOCK_UNLOCKED, 0, 0, 0 }
-
-#define _SPINUNLOCK(_lck) (_lck)->access_lock = _SPINLOCK_UNLOCKED
-#ifdef _LOCK_DEBUG
-#define _SPINLOCK(_lck) _spinlock_debug(_lck, __FILE__, __LINE__)
-#else
-#define _SPINLOCK(_lck) _spinlock(_lck)
-#endif
-
-#define _SPINLOCK_INIT(_lck) _SPINUNLOCK(_lck)
-
-/*
- * Thread function prototype definitions:
- */
-__BEGIN_DECLS
-void _spinlock(spinlock_t *);
-void _spinlock_debug(spinlock_t *, const char *, int);
-
-/* lock() functions return 0 if lock was acquired. */
-/* is_locked functions() return 1 if lock is locked. */
-int _atomic_lock(volatile _spinlock_lock_t *);
-int _atomic_is_locked(volatile _spinlock_lock_t *);
-__END_DECLS
-
-#endif /* _SPINLOCK_H_ */
diff --git a/lib/librthread/arch/alpha/_atomic_lock.S b/lib/librthread/arch/alpha/_atomic_lock.S
index 41481216c4a..a0b2993cc80 100644
--- a/lib/librthread/arch/alpha/_atomic_lock.S
+++ b/lib/librthread/arch/alpha/_atomic_lock.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.S,v 1.1 2005/12/04 05:47:38 brad Exp $ */
+/* $OpenBSD: _atomic_lock.S,v 1.2 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
#include <machine/asm.h>
@@ -8,7 +8,7 @@ LEAF(_atomic_lock,1)
/* NOTE: using ldl_l/stl_c instead of
ldq_l and ldq_c as machine/spinlock.h
- defines _spinlock_lock_t as int */
+ defines _atomic_lock_t as int */
0: ldl_l v0, 0(a0) /* read existing lock value */
mov 1, t0 /* locked value to store */
stl_c t0, 0(a0) /* attempt to store, status in t0 */
diff --git a/lib/librthread/arch/amd64/_atomic_lock.c b/lib/librthread/arch/amd64/_atomic_lock.c
index 9f60c785f40..f3527aaf081 100644
--- a/lib/librthread/arch/amd64/_atomic_lock.c
+++ b/lib/librthread/arch/amd64/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.3 2009/06/01 22:52:38 guenther Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.4 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
@@ -6,21 +6,21 @@
* Atomic lock for amd64 -- taken from i386 code.
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* Use the eXCHanGe instruction to swap the lock value with
* a local variable containing the locked state.
*/
- old = _SPINLOCK_LOCKED;
+ old = _ATOMIC_LOCK_LOCKED;
__asm__("xchg %0,(%2)"
: "=r" (old)
: "0" (old), "r" (lock));
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/arch/arm/_atomic_lock.c b/lib/librthread/arch/arm/_atomic_lock.c
index 48bca7da276..6a5956d26d7 100644
--- a/lib/librthread/arch/arm/_atomic_lock.c
+++ b/lib/librthread/arch/arm/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.3 2013/01/23 20:49:55 patrick Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.4 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004 Dale Rahn. All rights reserved.
@@ -28,12 +28,12 @@
* Atomic lock for arm
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
#ifdef ARM_V7PLUS_LOCKS
uint32_t scratch = 0;
@@ -43,12 +43,12 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
" cmp %2, #0 \n"
" bne 1b \n"
: "+r" (old), "+r" (lock), "+r" (scratch)
- : "r" (_SPINLOCK_LOCKED));
+ : "r" (_ATOMIC_LOCK_LOCKED));
#else
__asm__("swp %0, %2, [%1]"
: "=r" (old), "=r" (lock)
- : "r" (_SPINLOCK_LOCKED), "1" (lock) );
+ : "r" (_ATOMIC_LOCK_LOCKED), "1" (lock) );
#endif
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/arch/hppa/_atomic_lock.c b/lib/librthread/arch/hppa/_atomic_lock.c
index c01fa9f78f7..cdde57c2a0e 100644
--- a/lib/librthread/arch/hppa/_atomic_lock.c
+++ b/lib/librthread/arch/hppa/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.6 2012/03/03 14:42:33 miod Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.7 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
*
@@ -15,16 +15,16 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
#ifdef DIAGNOSTIC
#include <stdio.h>
#include <stdlib.h>
#endif
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- volatile _spinlock_lock_t old;
+ volatile _atomic_lock_t old;
#ifdef DIAGNOSTIC
if ((unsigned long)lock & 0xf) {
@@ -37,5 +37,5 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
: "=&r" (old), "+m" (lock)
: "r" (lock));
- return (old == _SPINLOCK_LOCKED);
+ return (old == _ATOMIC_LOCK_LOCKED);
}
diff --git a/lib/librthread/arch/hppa64/_atomic_lock.c b/lib/librthread/arch/hppa64/_atomic_lock.c
index 756af207428..886d2d132a0 100644
--- a/lib/librthread/arch/hppa64/_atomic_lock.c
+++ b/lib/librthread/arch/hppa64/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.1 2012/04/13 14:38:22 jsing Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.2 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
*
@@ -15,16 +15,16 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
#ifdef DIAGNOSTIC
#include <stdio.h>
#include <stdlib.h>
#endif
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- volatile _spinlock_lock_t old;
+ volatile _atomic_lock_t old;
#ifdef DIAGNOSTIC
if ((unsigned long)lock & 0xf) {
@@ -37,5 +37,5 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
: "=&r" (old), "+m" (lock)
: "r" (lock));
- return (old == _SPINLOCK_LOCKED);
+ return (old == _ATOMIC_LOCK_LOCKED);
}
diff --git a/lib/librthread/arch/i386/_atomic_lock.c b/lib/librthread/arch/i386/_atomic_lock.c
index 2a42259680e..b765644fb82 100644
--- a/lib/librthread/arch/i386/_atomic_lock.c
+++ b/lib/librthread/arch/i386/_atomic_lock.c
@@ -1,25 +1,25 @@
-/* $OpenBSD: _atomic_lock.c,v 1.3 2010/12/03 19:44:22 miod Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.4 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
/*
* Atomic lock for i386
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* Use the eXCHanGe instruction to swap the lock value with
* a local variable containing the locked state.
*/
- old = _SPINLOCK_LOCKED;
+ old = _ATOMIC_LOCK_LOCKED;
__asm__("xchg %0,(%2)"
: "=r" (old)
: "0" (old), "r" (lock));
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/arch/m68k/_atomic_lock.c b/lib/librthread/arch/m68k/_atomic_lock.c
index 50f5dcab8a0..6d1bdacfc37 100644
--- a/lib/librthread/arch/m68k/_atomic_lock.c
+++ b/lib/librthread/arch/m68k/_atomic_lock.c
@@ -1,16 +1,16 @@
-/* $OpenBSD: _atomic_lock.c,v 1.3 2008/10/02 23:29:26 deraadt Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.4 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
/*
* Atomic lock for m68k
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* The Compare And Swap instruction (mc68020 and above)
@@ -29,10 +29,10 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
* if Z then Du -> <ea>
* else <ea> -> Dc;
*/
- old = _SPINLOCK_UNLOCKED;
+ old = _ATOMIC_LOCK_UNLOCKED;
__asm__("casl %0, %2, %1" : "=d" (old), "=m" (*lock)
- : "d" (_SPINLOCK_LOCKED),
+ : "d" (_ATOMIC_LOCK_LOCKED),
"0" (old), "1" (*lock)
: "cc");
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/arch/m88k/_atomic_lock.c b/lib/librthread/arch/m88k/_atomic_lock.c
index e1ff84b76c4..445ee7ac065 100644
--- a/lib/librthread/arch/m88k/_atomic_lock.c
+++ b/lib/librthread/arch/m88k/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.3 2013/01/05 11:20:55 miod Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.4 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2003, Miodrag Vallat.
@@ -29,16 +29,16 @@
* Atomic lock for m88k
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
- old = _SPINLOCK_LOCKED;
+ old = _ATOMIC_LOCK_LOCKED;
__asm__ __volatile__
("xmem %0, %2, %%r0" : "=r" (old) : "0" (old), "r" (lock));
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/arch/mips64/_atomic_lock.c b/lib/librthread/arch/mips64/_atomic_lock.c
index 3b000eb0914..e02abd40af4 100644
--- a/lib/librthread/arch/mips64/_atomic_lock.c
+++ b/lib/librthread/arch/mips64/_atomic_lock.c
@@ -1,16 +1,16 @@
-/* $OpenBSD: _atomic_lock.c,v 1.5 2013/05/06 00:23:49 guenther Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.6 2013/06/01 20:47:40 tedu Exp $ */
/*
* Atomic lock for mips
* Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain.
*/
-#include "spinlock.h"
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
__asm__ __volatile__ (
".set noreorder\n"
@@ -20,8 +20,8 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
" addi %2, $0, %3\n"
".set reorder\n"
: "=&r"(old)
- : "r"(lock), "r"(_SPINLOCK_LOCKED), "i"(_SPINLOCK_LOCKED)
+ : "r"(lock), "r"(_ATOMIC_LOCK_LOCKED), "i"(_SPINLOCK_LOCKED)
: "memory");
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/arch/powerpc/_atomic_lock.c b/lib/librthread/arch/powerpc/_atomic_lock.c
index 68035588c22..705f682114c 100644
--- a/lib/librthread/arch/powerpc/_atomic_lock.c
+++ b/lib/librthread/arch/powerpc/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.4 2008/10/01 14:59:18 drahn Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.5 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 1998 Dale Rahn <drahn@openbsd.org>
*
@@ -19,22 +19,22 @@
* Atomic lock for powerpc
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
__asm__("1: lwarx 0,0,%1 \n"
" stwcx. %2,0,%1 \n"
" bne- 1b \n"
" mr %0, 0 \n"
: "=r" (old), "=r" (lock)
- : "r" (_SPINLOCK_LOCKED), "1" (lock) : "0"
+ : "r" (_ATOMIC_LOCK_LOCKED), "1" (lock) : "0"
);
- return (old != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
/*
* Dale <drahn@openbsd.org> says:
diff --git a/lib/librthread/arch/sh/_atomic_lock.c b/lib/librthread/arch/sh/_atomic_lock.c
index ec68b6d09ef..4dca89705db 100644
--- a/lib/librthread/arch/sh/_atomic_lock.c
+++ b/lib/librthread/arch/sh/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.2 2008/06/26 05:42:05 ray Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.3 2013/06/01 20:47:40 tedu Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc.
@@ -29,12 +29,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include "spinlock.h"
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
__asm volatile(
" tas.b %0 \n"
@@ -44,10 +44,3 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
return (old == 0);
}
-
-int
-_atomic_is_locked(volatile _spinlock_lock_t *lock)
-{
-
- return (*lock != _SPINLOCK_UNLOCKED);
-}
diff --git a/lib/librthread/arch/sparc/_atomic_lock.c b/lib/librthread/arch/sparc/_atomic_lock.c
index 036a7abb6e9..9c95d05e844 100644
--- a/lib/librthread/arch/sparc/_atomic_lock.c
+++ b/lib/librthread/arch/sparc/_atomic_lock.c
@@ -1,23 +1,23 @@
-/* $OpenBSD: _atomic_lock.c,v 1.4 2011/10/13 05:41:06 guenther Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.5 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
/*
* Atomic lock for sparc
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t * lock)
+_atomic_lock(volatile _atomic_lock_t * lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* " ldstub [address], reg_rd
*
* The atomic load-store instructions copy a byte from memory
* into r[rd]m then rewrite the addressed byte in memory to all
- * ones [_SPINLOCK_LOCKED]. The operation is performed
+ * ones [_ATOMIC_LOCK_LOCKED]. The operation is performed
* atomically, that is, without allowing intervening interrupts
* or deferred traps. In a multiprocessor system, two or more
* processors executing atomic load-store unsigned byte [...]
@@ -37,5 +37,5 @@ _atomic_lock(volatile _spinlock_lock_t * lock)
*/
__asm__("ldstub [%1], %0" : "=&r" (old) : "r" (lock) : "memory");
- return (old == _SPINLOCK_LOCKED);
+ return (old == _ATOMIC_LOCK_LOCKED);
}
diff --git a/lib/librthread/arch/sparc64/_atomic_lock.c b/lib/librthread/arch/sparc64/_atomic_lock.c
index 750d9e5aa14..e18426b848e 100644
--- a/lib/librthread/arch/sparc64/_atomic_lock.c
+++ b/lib/librthread/arch/sparc64/_atomic_lock.c
@@ -1,23 +1,23 @@
-/* $OpenBSD: _atomic_lock.c,v 1.4 2011/10/13 05:41:06 guenther Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.5 2013/06/01 20:47:40 tedu Exp $ */
/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
/*
* Atomic lock for sparc64
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t * lock)
+_atomic_lock(volatile _atomic_lock_t * lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* " ldstub [address], reg_rd
*
* The atomic load-store instructions copy a byte from memory
* into r[rd]m then rewrite the addressed byte in memory to all
- * ones [_SPINLOCK_LOCKED]. The operation is performed
+ * ones [_ATOMIC_LOCK_LOCKED]. The operation is performed
* atomically, that is, without allowing intervening interrupts
* or deferred traps. In a multiprocessor system, two or more
* processors executing atomic load-store unsigned byte [...]
@@ -37,5 +37,5 @@ _atomic_lock(volatile _spinlock_lock_t * lock)
*/
__asm__("ldstub [%1], %0" : "=&r" (old) : "r" (lock) : "memory");
- return (old == _SPINLOCK_LOCKED);
+ return (old == _ATOMIC_LOCK_LOCKED);
}
diff --git a/lib/librthread/arch/vax/_atomic_lock.c b/lib/librthread/arch/vax/_atomic_lock.c
index ac8b8bf704a..7357d01f1b6 100644
--- a/lib/librthread/arch/vax/_atomic_lock.c
+++ b/lib/librthread/arch/vax/_atomic_lock.c
@@ -1,16 +1,16 @@
-/* $OpenBSD: _atomic_lock.c,v 1.2 2006/01/05 22:33:24 marc Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.3 2013/06/01 20:47:40 tedu Exp $ */
/*
* Atomic lock for vax
* Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain.
*/
-#include <spinlock.h>
+#include <machine/spinlock.h>
int
-_atomic_lock(volatile _spinlock_lock_t *lock)
+_atomic_lock(volatile _atomic_lock_t *lock)
{
- _spinlock_lock_t old;
+ _atomic_lock_t old;
/*
* The Branch on Bit Set and Set Interlocked instruction
@@ -24,19 +24,12 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
* ``Control instructions''.
*/
__asm__ (
- "movl $1, %1\n" /* _SPINLOCK_LOCKED */
+ "movl $1, %1\n" /* _ATOMIC_LOCK_LOCKED */
"bbssi $0, %0, 1f\n"
- "movl $0, %1\n" /* _SPINLOCK_UNLOCKED */
+ "movl $0, %1\n" /* _ATOMIC_LOCK_UNLOCKED */
"1: \n"
: "=m" (*lock), "=r" (old) : "0" (*lock)
);
- return (old != _SPINLOCK_UNLOCKED);
-}
-
-int
-_atomic_is_locked(volatile _spinlock_lock_t *lock)
-{
-
- return (*lock != _SPINLOCK_UNLOCKED);
+ return (old != _ATOMIC_LOCK_UNLOCKED);
}
diff --git a/lib/librthread/rthread.c b/lib/librthread/rthread.c
index 49b4f1ddd14..3a2e6bf55b2 100644
--- a/lib/librthread/rthread.c
+++ b/lib/librthread/rthread.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.c,v 1.69 2013/04/06 04:25:01 tedu Exp $ */
+/* $OpenBSD: rthread.c,v 1.70 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -49,13 +49,15 @@
static int concurrency_level; /* not used */
+struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN = _SPINLOCK_UNLOCKED;
+
int _threads_ready;
size_t _thread_pagesize;
struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
-_spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
+struct _spinlock _thread_lock = _SPINLOCK_UNLOCKED;
static struct pthread_queue _thread_gc_list
= TAILQ_HEAD_INITIALIZER(_thread_gc_list);
-static _spinlock_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock _thread_gc_lock = _SPINLOCK_UNLOCKED;
struct pthread _initial_thread;
struct thread_control_block _initial_thread_tcb;
@@ -78,18 +80,45 @@ struct pthread_attr _rthread_attr_default = {
* internal support functions
*/
void
-_spinlock(_spinlock_lock_t *lock)
+_spinlock(volatile struct _spinlock *lock)
{
+ uint32_t me;
- while (_atomic_lock(lock))
+ while (_atomic_lock(&lock->atomiclock))
sched_yield();
+ me = lock->waiter++;
+ lock->atomiclock = _ATOMIC_LOCK_UNLOCKED;
+ while (me != lock->ready) {
+ if (me < lock->ready) {
+ _rthread_debug(0, "SPINLOCK FAIL: %d %d\n",
+ me, lock->ready);
+ _exit(1);
+ }
+ if (me > lock->ready + 1)
+ sched_yield();
+ }
}
-void
-_spinunlock(_spinlock_lock_t *lock)
+int
+_spinlocktry(volatile struct _spinlock *lock)
{
+ int gotit = 0;
+
+ while (_atomic_lock(&lock->atomiclock))
+ sched_yield();
+ if (lock->waiter == lock->ready) {
+ lock->waiter++;
+ gotit = 1;
+ }
+ lock->atomiclock = _ATOMIC_LOCK_UNLOCKED;
+
+ return gotit;
+}
- *lock = _SPINLOCK_UNLOCKED;
+void
+_spinunlock(volatile struct _spinlock *lock)
+{
+ lock->ready++;
}
/*
@@ -164,9 +193,9 @@ _rthread_init(void)
struct sigaction sa;
thread->tid = getthrid();
- thread->donesem.lock = _SPINLOCK_UNLOCKED;
+ thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
- thread->flags_lock = _SPINLOCK_UNLOCKED;
+ thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
strlcpy(thread->name, "Main process", sizeof(thread->name));
LIST_INSERT_HEAD(&_thread_list, thread, threads);
_rthread_debug_init();
@@ -403,8 +432,8 @@ pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
thread = calloc(1, sizeof(*thread));
if (!thread)
return (errno);
- thread->donesem.lock = _SPINLOCK_UNLOCKED;
- thread->flags_lock = _SPINLOCK_UNLOCKED;
+ thread->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
+ thread->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
thread->fn = start_routine;
thread->arg = arg;
thread->tid = -1;
@@ -603,7 +632,7 @@ _thread_dump_info(void)
void
_rthread_dl_lock(int what)
{
- static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
+ static struct _spinlock lock = _SPINLOCK_UNLOCKED;
static pthread_t owner = NULL;
static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
static int count = 0;
@@ -619,7 +648,7 @@ _rthread_dl_lock(int what)
} else if (owner != self) {
TAILQ_INSERT_TAIL(&lockers, self, waiting);
while (owner != self) {
- __thrsleep(self, 0, NULL, &lock, NULL);
+ __thrsleep(self, 0 | 0x8, NULL, &lock.ready, NULL);
_spinlock(&lock);
}
}
@@ -646,7 +675,7 @@ _rthread_dl_lock(int what)
void
_rthread_bind_lock(int what)
{
- static _spinlock_lock_t lock = _SPINLOCK_UNLOCKED;
+ static struct _spinlock lock = _SPINLOCK_UNLOCKED;
if (what == 0)
_spinlock(&lock);
diff --git a/lib/librthread/rthread.h b/lib/librthread/rthread.h
index 186a3459041..df83b7dcc43 100644
--- a/lib/librthread/rthread.h
+++ b/lib/librthread/rthread.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.h,v 1.41 2013/03/24 19:55:45 guenther Exp $ */
+/* $OpenBSD: rthread.h,v 1.42 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -20,7 +20,7 @@
* Since only the thread library cares about their size or arrangement,
* it should be possible to switch libraries without relinking.
*
- * Do not reorder _spinlock_lock_t and sem_t variables in the structs.
+ * Do not reorder struct _spinlock and sem_t variables in the structs.
* This is due to alignment requirements of certain arches like hppa.
* The current requirement is 16 bytes.
*/
@@ -36,6 +36,16 @@
#define RTHREAD_STACK_SIZE_DEF (256 * 1024)
#endif
+struct _spinlock {
+ _atomic_lock_t atomiclock;
+ uint32_t waiter;
+ uint32_t ready;
+ int pad;
+};
+
+#define _SPINLOCK_UNLOCKED { _ATOMIC_LOCK_UNLOCKED, 0, 0 }
+extern struct _spinlock _SPINLOCK_UNLOCKED_ASSIGN;
+
struct stack {
SLIST_ENTRY(stack) link; /* link for free default stacks */
void *sp; /* machine stack pointer */
@@ -46,16 +56,15 @@ struct stack {
};
struct __sem {
- _spinlock_lock_t lock;
+ struct _spinlock lock;
volatile int waitcount;
volatile int value;
- int pad;
};
TAILQ_HEAD(pthread_queue, pthread);
struct pthread_mutex {
- _spinlock_lock_t lock;
+ struct _spinlock lock;
struct pthread_queue lockers;
int type;
pthread_t owner;
@@ -70,7 +79,7 @@ struct pthread_mutex_attr {
};
struct pthread_cond {
- _spinlock_lock_t lock;
+ struct _spinlock lock;
struct pthread_queue waiters;
struct pthread_mutex *mutex;
clockid_t clock;
@@ -81,7 +90,7 @@ struct pthread_cond_attr {
};
struct pthread_rwlock {
- _spinlock_lock_t lock;
+ struct _spinlock lock;
pthread_t owner;
struct pthread_queue writers;
int readers;
@@ -135,18 +144,18 @@ struct pthread_barrierattr {
};
struct pthread_spinlock {
- _spinlock_lock_t lock;
+ struct _spinlock lock;
pthread_t owner;
};
struct pthread {
struct __sem donesem;
+ pid_t tid;
+ unsigned int flags;
+ struct _spinlock flags_lock;
#if TLS_VARIANT == 1
int *errno_ptr;
#endif
- pid_t tid;
- unsigned int flags;
- _spinlock_lock_t flags_lock;
void *retval;
void *(*fn)(void *);
void *arg;
@@ -184,14 +193,15 @@ extern int _threads_ready;
extern size_t _thread_pagesize;
extern LIST_HEAD(listhead, pthread) _thread_list;
extern struct pthread _initial_thread;
-extern _spinlock_lock_t _thread_lock;
+extern struct _spinlock _thread_lock;
extern struct pthread_attr _rthread_attr_default;
#define ROUND_TO_PAGE(size) \
(((size) + (_thread_pagesize - 1)) & ~(_thread_pagesize - 1))
-void _spinlock(_spinlock_lock_t *);
-void _spinunlock(_spinlock_lock_t *);
+void _spinlock(volatile struct _spinlock *);
+int _spinlocktry(volatile struct _spinlock *);
+void _spinunlock(volatile struct _spinlock *);
int _sem_wait(sem_t, int, const struct timespec *, int *);
int _sem_post(sem_t);
@@ -217,13 +227,11 @@ void _leave_delayed_cancel(pthread_t, int);
void _thread_dump_info(void);
-int _atomic_lock(register volatile _spinlock_lock_t *);
-
/* syscalls */
int getthrid(void);
void __threxit(pid_t *);
int __thrsleep(const volatile void *, clockid_t, const struct timespec *,
- void *, const int *);
+ volatile void *, const int *);
int __thrwakeup(const volatile void *, int n);
int __thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
int sched_yield(void);
diff --git a/lib/librthread/rthread_file.c b/lib/librthread/rthread_file.c
index 28bad52d0c2..06afc366015 100644
--- a/lib/librthread/rthread_file.c
+++ b/lib/librthread/rthread_file.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_file.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
+/* $OpenBSD: rthread_file.c,v 1.5 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
@@ -86,7 +86,7 @@ static struct static_file_lock {
} flh[NUM_HEADS];
/* Lock for accesses to the hash table: */
-static _spinlock_lock_t hash_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock hash_lock = _SPINLOCK_UNLOCKED;
/*
* Find a lock structure for a FILE, return NULL if the file is
@@ -204,7 +204,7 @@ void
*/
TAILQ_INSERT_TAIL(&p->lockers,self,waiting);
while (p->owner != self) {
- __thrsleep(self, 0, NULL, &hash_lock, NULL);
+ __thrsleep(self, 0 | 0x8, NULL, &hash_lock.ready, NULL);
_spinlock(&hash_lock);
}
}
diff --git a/lib/librthread/rthread_fork.c b/lib/librthread/rthread_fork.c
index c5847363929..b46698b75c9 100644
--- a/lib/librthread/rthread_fork.c
+++ b/lib/librthread/rthread_fork.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_fork.c,v 1.6 2012/08/22 23:43:32 matthew Exp $ */
+/* $OpenBSD: rthread_fork.c,v 1.7 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2008 Kurt Miller <kurt@openbsd.org>
@@ -55,7 +55,7 @@ struct rthread_atfork {
static TAILQ_HEAD(atfork_listhead, rthread_atfork) _atfork_list =
TAILQ_HEAD_INITIALIZER(_atfork_list);
-static _spinlock_lock_t _atfork_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock _atfork_lock = _SPINLOCK_UNLOCKED;
pid_t _thread_sys_fork(void);
pid_t _thread_sys_vfork(void);
@@ -116,9 +116,9 @@ _dofork(int is_vfork)
if (newid == 0) {
/* update this thread's structure */
me->tid = getthrid();
- me->donesem.lock = _SPINLOCK_UNLOCKED;
+ me->donesem.lock = _SPINLOCK_UNLOCKED_ASSIGN;
me->flags &= ~THREAD_DETACHED;
- me->flags_lock = _SPINLOCK_UNLOCKED;
+ me->flags_lock = _SPINLOCK_UNLOCKED_ASSIGN;
/* this thread is the initial thread for the new process */
_initial_thread = *me;
@@ -126,7 +126,7 @@ _dofork(int is_vfork)
/* reinit the thread list */
LIST_INIT(&_thread_list);
LIST_INSERT_HEAD(&_thread_list, &_initial_thread, threads);
- _thread_lock = _SPINLOCK_UNLOCKED;
+ _thread_lock = _SPINLOCK_UNLOCKED_ASSIGN;
/* single threaded now */
__isthreaded = 0;
diff --git a/lib/librthread/rthread_libc.c b/lib/librthread/rthread_libc.c
index fad70524c75..13df27c498e 100644
--- a/lib/librthread/rthread_libc.c
+++ b/lib/librthread/rthread_libc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_libc.c,v 1.10 2012/04/17 15:10:11 miod Exp $ */
+/* $OpenBSD: rthread_libc.c,v 1.11 2013/06/01 20:47:40 tedu Exp $ */
/* $snafu: libc_tag.c,v 1.4 2004/11/30 07:00:06 marc Exp $ */
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
@@ -152,7 +152,7 @@ _thread_mutex_destroy(void **mutex)
/*
* the malloc lock
*/
-static _spinlock_lock_t malloc_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock malloc_lock = _SPINLOCK_UNLOCKED;
void
_thread_malloc_lock(void)
@@ -169,7 +169,7 @@ _thread_malloc_unlock(void)
/*
* atexit lock
*/
-static _spinlock_lock_t atexit_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock atexit_lock = _SPINLOCK_UNLOCKED;
void
_thread_atexit_lock(void)
@@ -186,7 +186,7 @@ _thread_atexit_unlock(void)
/*
* arc4random lock
*/
-static _spinlock_lock_t arc4_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock arc4_lock = _SPINLOCK_UNLOCKED;
void
_thread_arc4_lock(void)
diff --git a/lib/librthread/rthread_rwlock.c b/lib/librthread/rthread_rwlock.c
index c148089d996..f3b68367ead 100644
--- a/lib/librthread/rthread_rwlock.c
+++ b/lib/librthread/rthread_rwlock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_rwlock.c,v 1.2 2012/01/17 02:34:18 guenther Exp $ */
+/* $OpenBSD: rthread_rwlock.c,v 1.3 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
@@ -31,7 +31,7 @@
#include "rthread.h"
-static _spinlock_lock_t rwlock_init_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock rwlock_init_lock = _SPINLOCK_UNLOCKED;
/* ARGSUSED1 */
int
@@ -43,7 +43,7 @@ pthread_rwlock_init(pthread_rwlock_t *lockp,
lock = calloc(1, sizeof(*lock));
if (!lock)
return (errno);
- lock->lock = _SPINLOCK_UNLOCKED;
+ lock->lock = _SPINLOCK_UNLOCKED_ASSIGN;
TAILQ_INIT(&lock->writers);
*lockp = lock;
@@ -117,8 +117,8 @@ _rthread_rwlock_rdlock(pthread_rwlock_t *lockp, const struct timespec *abstime,
error = EDEADLK;
else {
do {
- if (__thrsleep(lock, CLOCK_REALTIME, abstime,
- &lock->lock, NULL) == EWOULDBLOCK)
+ if (__thrsleep(lock, CLOCK_REALTIME | 0x8, abstime,
+ &lock->lock.ready, NULL) == EWOULDBLOCK)
return (ETIMEDOUT);
_spinlock(&lock->lock);
} while (lock->owner != NULL || !TAILQ_EMPTY(&lock->writers));
@@ -180,8 +180,8 @@ _rthread_rwlock_wrlock(pthread_rwlock_t *lockp, const struct timespec *abstime,
/* gotta block */
TAILQ_INSERT_TAIL(&lock->writers, thread, waiting);
do {
- do_wait = __thrsleep(thread, CLOCK_REALTIME, abstime,
- &lock->lock, NULL) != EWOULDBLOCK;
+ do_wait = __thrsleep(thread, CLOCK_REALTIME | 0x8, abstime,
+ &lock->lock.ready, NULL) != EWOULDBLOCK;
_spinlock(&lock->lock);
} while (lock->owner != thread && do_wait);
diff --git a/lib/librthread/rthread_sem.c b/lib/librthread/rthread_sem.c
index 40b24a1122f..81d9b951aa4 100644
--- a/lib/librthread/rthread_sem.c
+++ b/lib/librthread/rthread_sem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_sem.c,v 1.7 2012/03/03 11:09:19 guenther Exp $ */
+/* $OpenBSD: rthread_sem.c,v 1.8 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -42,8 +42,8 @@ _sem_wait(sem_t sem, int tryonly, const struct timespec *abstime,
} else {
sem->waitcount++;
do {
- r = __thrsleep(&sem->waitcount, CLOCK_REALTIME,
- abstime, &sem->lock, delayed_cancel);
+ r = __thrsleep(&sem->waitcount, CLOCK_REALTIME | 0x8,
+ abstime, &sem->lock.ready, delayed_cancel);
_spinlock(&sem->lock);
/* ignore interruptions other than cancelation */
if (r == EINTR && (delayed_cancel == NULL ||
@@ -97,7 +97,7 @@ sem_init(sem_t *semp, int pshared, unsigned int value)
errno = ENOSPC;
return (-1);
}
- sem->lock = _SPINLOCK_UNLOCKED;
+ sem->lock = _SPINLOCK_UNLOCKED_ASSIGN;
sem->value = value;
*semp = sem;
diff --git a/lib/librthread/rthread_spin_lock.c b/lib/librthread/rthread_spin_lock.c
index ad1adfa1587..0957486b11b 100644
--- a/lib/librthread/rthread_spin_lock.c
+++ b/lib/librthread/rthread_spin_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_spin_lock.c,v 1.2 2012/05/06 10:01:18 pirofti Exp $ */
+/* $OpenBSD: rthread_spin_lock.c,v 1.3 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2012 Paul Irofti <pirofti@openbsd.org>
*
@@ -37,7 +37,7 @@ pthread_spin_init(pthread_spinlock_t *lock, int pshared)
if (l == NULL)
return (ENOMEM);
- l->lock = _SPINLOCK_UNLOCKED;
+ l->lock = _SPINLOCK_UNLOCKED_ASSIGN;
*lock = l;
return (0);
}
@@ -69,7 +69,7 @@ pthread_spin_trylock(pthread_spinlock_t *lock)
if (l->owner == self)
return (EDEADLK);
- if (_atomic_lock(&l->lock))
+ if (!_spinlocktry(&l->lock))
return (EBUSY);
l->owner = self;
diff --git a/lib/librthread/rthread_stack.c b/lib/librthread/rthread_stack.c
index b194907534a..557a6b4267b 100644
--- a/lib/librthread/rthread_stack.c
+++ b/lib/librthread/rthread_stack.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_stack.c,v 1.9 2013/03/21 21:59:31 deraadt Exp $ */
+/* $OpenBSD: rthread_stack.c,v 1.10 2013/06/01 20:47:40 tedu Exp $ */
/* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
@@ -19,7 +19,7 @@
* attributes for possible reuse.
*/
static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
-static _spinlock_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock def_stacks_lock = _SPINLOCK_UNLOCKED;
struct stack *
_rthread_alloc_stack(pthread_t thread)
diff --git a/lib/librthread/rthread_sync.c b/lib/librthread/rthread_sync.c
index 394c0d4b503..d8ee607194d 100644
--- a/lib/librthread/rthread_sync.c
+++ b/lib/librthread/rthread_sync.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_sync.c,v 1.37 2013/06/01 19:47:28 tedu Exp $ */
+/* $OpenBSD: rthread_sync.c,v 1.38 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
@@ -31,7 +31,7 @@
#include "rthread.h"
-static _spinlock_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
+static struct _spinlock static_init_lock = _SPINLOCK_UNLOCKED;
/*
* mutexen
@@ -44,7 +44,7 @@ pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
mutex = calloc(1, sizeof(*mutex));
if (!mutex)
return (errno);
- mutex->lock = _SPINLOCK_UNLOCKED;
+ mutex->lock = _SPINLOCK_UNLOCKED_ASSIGN;
TAILQ_INIT(&mutex->lockers);
if (attr == NULL) {
mutex->type = PTHREAD_MUTEX_DEFAULT;
@@ -127,8 +127,8 @@ _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
abort();
/* self-deadlock, possibly until timeout */
- while (__thrsleep(self, CLOCK_REALTIME, abstime,
- &mutex->lock, NULL) != EWOULDBLOCK)
+ while (__thrsleep(self, CLOCK_REALTIME | 0x8, abstime,
+ &mutex->lock.ready, NULL) != EWOULDBLOCK)
_spinlock(&mutex->lock);
return (ETIMEDOUT);
}
@@ -144,8 +144,8 @@ _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
/* add to the wait queue and block until at the head */
TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
while (mutex->owner != self) {
- ret = __thrsleep(self, CLOCK_REALTIME, abstime,
- &mutex->lock, NULL);
+ ret = __thrsleep(self, CLOCK_REALTIME | 0x8, abstime,
+ &mutex->lock.ready, NULL);
_spinlock(&mutex->lock);
assert(mutex->owner != NULL);
if (ret == EWOULDBLOCK) {
@@ -245,7 +245,7 @@ pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
cond = calloc(1, sizeof(*cond));
if (!cond)
return (errno);
- cond->lock = _SPINLOCK_UNLOCKED;
+ cond->lock = _SPINLOCK_UNLOCKED_ASSIGN;
TAILQ_INIT(&cond->waiters);
if (attr == NULL)
cond->clock = CLOCK_REALTIME;
@@ -350,8 +350,8 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
/* wait until we're the owner of the mutex again */
while (mutex->owner != self) {
- error = __thrsleep(self, cond->clock, abstime, &mutex->lock,
- &self->delayed_cancel);
+ error = __thrsleep(self, cond->clock | 0x8, abstime,
+ &mutex->lock.ready, &self->delayed_cancel);
/*
* If abstime == NULL, then we're definitely waiting
@@ -497,7 +497,7 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
/* wait until we're the owner of the mutex again */
while (mutex->owner != self) {
- error = __thrsleep(self, 0, NULL, &mutex->lock,
+ error = __thrsleep(self, 0 | 0x8, NULL, &mutex->lock.ready,
&self->delayed_cancel);
/*
diff --git a/lib/librthread/rthread_tls.c b/lib/librthread/rthread_tls.c
index dfa14b0aa34..df02f3c0262 100644
--- a/lib/librthread/rthread_tls.c
+++ b/lib/librthread/rthread_tls.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_tls.c,v 1.13 2011/11/06 11:48:59 guenther Exp $ */
+/* $OpenBSD: rthread_tls.c,v 1.14 2013/06/01 20:47:40 tedu Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -27,7 +27,7 @@
#include "rthread.h"
static struct rthread_key rkeys[PTHREAD_KEYS_MAX];
-static _spinlock_lock_t rkeyslock = _SPINLOCK_UNLOCKED;
+static struct _spinlock rkeyslock = _SPINLOCK_UNLOCKED;
int
pthread_key_create(pthread_key_t *key, void (*destructor)(void*))