diff options
author | Philip Guenthe <guenther@cvs.openbsd.org> | 2012-01-17 02:34:19 +0000 |
---|---|---|
committer | Philip Guenthe <guenther@cvs.openbsd.org> | 2012-01-17 02:34:19 +0000 |
commit | b954f00c23f17debc35a7aa94bac39a336fc3359 (patch) | |
tree | aec06661a73fe3843a229172ea9cbd95181c2a90 | |
parent | c01d953b375c486f2bad253c49e4aed9a2f0938f (diff) |
Reimplement mutexes, condvars, and rwlocks to eliminate bugs,
particularly the "consume the signal you just sent" hang, and putting
the wait queues in userspace.
Do cancellation handling in pthread_cond_*wait(), pthread_join(),
and sem_wait().
Add __ prefix to thr{sleep,wakeup,exit,sigdivert}() syscalls; add
'abort" argument to thrsleep to close cancellation race; make
thr{sleep,wakeup} return errno values via *retval to avoid touching
userspace errno.
34 files changed, 839 insertions, 390 deletions
diff --git a/lib/libc/arch/amd64/Makefile.inc b/lib/libc/arch/amd64/Makefile.inc index 2cfeef61734..891bde15d16 100644 --- a/lib/libc/arch/amd64/Makefile.inc +++ b/lib/libc/arch/amd64/Makefile.inc @@ -1,4 +1,4 @@ -# $OpenBSD: Makefile.inc,v 1.3 2005/06/29 14:03:25 mickey Exp $ +# $OpenBSD: Makefile.inc,v 1.4 2012/01/17 02:34:18 guenther Exp $ KMINCLUDES= arch/amd64/SYS.h -KMSRCS= ffs.S strlen.S htonl.S ntohl.S htons.S ntohs.S +KMSRCS= ffs.S htonl.S ntohl.S htons.S ntohs.S diff --git a/lib/libc/arch/amd64/string/Makefile.inc b/lib/libc/arch/amd64/string/Makefile.inc index a384f0f20ae..e9662393875 100644 --- a/lib/libc/arch/amd64/string/Makefile.inc +++ b/lib/libc/arch/amd64/string/Makefile.inc @@ -1,8 +1,8 @@ -# $OpenBSD: Makefile.inc,v 1.2 2007/05/15 18:42:31 otto Exp $ +# $OpenBSD: Makefile.inc,v 1.3 2012/01/17 02:34:18 guenther Exp $ SRCS+= bcmp.c ffs.S index.c memchr.c memcmp.c bcopy.c bzero.c \ - rindex.c strcat.c strcmp.c strcpy.c strcspn.c strlen.S \ + rindex.c strcat.c strcmp.c strcpy.c strcspn.c strlen.c \ strncat.c strncmp.c strncpy.c strpbrk.c strsep.c \ strspn.c strstr.c swab.c memset.c strlcpy.c strlcat.c -LSRCS+= ffs.c strlen.c +LSRCS+= ffs.c diff --git a/lib/libc/shlib_version b/lib/libc/shlib_version index 73198ebb5bc..d9553a8cf66 100644 --- a/lib/libc/shlib_version +++ b/lib/libc/shlib_version @@ -1,4 +1,4 @@ -major=61 +major=62 minor=0 # note: If changes were made to include/thread_private.h or if system # calls were added/changed then libpthread must also be updated. diff --git a/lib/libc/sys/Makefile.inc b/lib/libc/sys/Makefile.inc index 2902075c49e..affed8158c4 100644 --- a/lib/libc/sys/Makefile.inc +++ b/lib/libc/sys/Makefile.inc @@ -1,4 +1,4 @@ -# $OpenBSD: Makefile.inc,v 1.96 2011/11/22 21:13:30 guenther Exp $ +# $OpenBSD: Makefile.inc,v 1.97 2012/01/17 02:34:18 guenther Exp $ # $NetBSD: Makefile.inc,v 1.35 1995/10/16 23:49:07 jtc Exp $ # @(#)Makefile.inc 8.1 (Berkeley) 6/17/93 @@ -66,7 +66,7 @@ ASM= accept.o access.o acct.o adjfreq.o adjtime.o bind.o chdir.o chflags.o \ symlink.o sync.o sysarch.o umask.o unlink.o unmount.o \ utimes.o wait4.o write.o writev.o nnpfspioctl.o __semctl.o \ __syscall.o __sysctl.o __getcwd.o sched_yield.o getthrid.o \ - thrsleep.o thrwakeup.o threxit.o thrsigdivert.o \ + __thrsleep.o __thrwakeup.o __threxit.o __thrsigdivert.o \ setrtable.o getrtable.o __set_tcb.o __get_tcb.o \ openat.o fchmodat.o fstatat.o mkdirat.o mkfifoat.o mknodat.o \ faccessat.o fchownat.o linkat.o readlinkat.o renameat.o symlinkat.o \ diff --git a/lib/librthread/arch/alpha/rfork_thread.S b/lib/librthread/arch/alpha/rfork_thread.S index 3205958f6a9..3cd2ab68106 100644 --- a/lib/librthread/arch/alpha/rfork_thread.S +++ b/lib/librthread/arch/alpha/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -52,6 +52,6 @@ LEAF(__tfork_thread,0) jsr ra, (pv) mov zero, a0 - CALLSYS_NOERROR(threxit) + CALLSYS_NOERROR(__threxit) END(__tfork_thread) diff --git a/lib/librthread/arch/amd64/rfork_thread.S b/lib/librthread/arch/amd64/rfork_thread.S index 77b62ef8005..d5bd9a428eb 100644 --- a/lib/librthread/arch/amd64/rfork_thread.S +++ b/lib/librthread/arch/amd64/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.5 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.6 2012/01/17 02:34:18 guenther Exp $ */ /*- * Copyright (c) 2000 Peter Wemm <peter@FreeBSD.org> * Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu> @@ -68,7 +68,7 @@ ENTRY(__tfork_thread) /* * If we are in the child (new thread), then * set-up the call to the internal subroutine. If it - * returns, then call threxit. + * returns, then call __threxit. */ 1: movq %rsi, %rsp @@ -78,7 +78,7 @@ ENTRY(__tfork_thread) /* * Thread exit system call */ - movl $SYS_threxit, %eax + movl $SYS___threxit, %eax xorl %edi, %edi syscall diff --git a/lib/librthread/arch/arm/rfork_thread.S b/lib/librthread/arch/arm/rfork_thread.S index a631b1b8ce3..749ba0490fa 100644 --- a/lib/librthread/arch/arm/rfork_thread.S +++ b/lib/librthread/arch/arm/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005 Dale Rahn <drahn@openbsd.org> * @@ -42,7 +42,7 @@ ENTRY(__tfork_thread) mov lr, pc mov pc, r2 nop - SYSTRAP(threxit) + SYSTRAP(__threxit) 1: ldmia sp!, {r4} b PIC_SYM(CERROR, PLT) diff --git a/lib/librthread/arch/hppa/rfork_thread.S b/lib/librthread/arch/hppa/rfork_thread.S index c98d4ee3bac..e29488db4d1 100644 --- a/lib/librthread/arch/hppa/rfork_thread.S +++ b/lib/librthread/arch/hppa/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -56,7 +56,7 @@ ENTRY(__tfork_thread, 0) copy r31, rp copy r0, arg0 - SYSCALL(threxit) + SYSCALL(__threxit) 1: bv r0(rp) diff --git a/lib/librthread/arch/i386/rfork_thread.S b/lib/librthread/arch/i386/rfork_thread.S index 1bc1d3e9e32..9fe8da9310c 100644 --- a/lib/librthread/arch/i386/rfork_thread.S +++ b/lib/librthread/arch/i386/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.4 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.5 2012/01/17 02:34:18 guenther Exp $ */ /*- * Copyright (c) 2000 Peter Wemm <peter@FreeBSD.org> * All rights reserved. @@ -101,7 +101,7 @@ ENTRY(__tfork_thread) */ pushl %eax pushl $0 - movl $SYS_threxit, %eax + movl $SYS___threxit, %eax int $0x80 /* diff --git a/lib/librthread/arch/m68k/rfork_thread.S b/lib/librthread/arch/m68k/rfork_thread.S index 80c682baf50..3931a4a8eb1 100644 --- a/lib/librthread/arch/m68k/rfork_thread.S +++ b/lib/librthread/arch/m68k/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -61,7 +61,7 @@ ENTRY(__tfork_thread) jsr a1@ /* func */ addq #4, sp - __DO_SYSCALL(threxit) + __DO_SYSCALL(__threxit) 9: /* diff --git a/lib/librthread/arch/m88k/rfork_thread.S b/lib/librthread/arch/m88k/rfork_thread.S index 2c820232a40..d8f7657bd30 100644 --- a/lib/librthread/arch/m88k/rfork_thread.S +++ b/lib/librthread/arch/m88k/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -51,5 +51,5 @@ ENTRY(__tfork_thread) jsr.n r4 /* func */ or r2, r5, r0 /* arg */ - or r13, r0, __SYSCALLNAME(SYS_,threxit) + or r13, r0, __SYSCALLNAME(SYS_,__threxit) tb0 0, r0, 128 diff --git a/lib/librthread/arch/mips64/rfork_thread.S b/lib/librthread/arch/mips64/rfork_thread.S index 32ee5307bee..adb73dbc24d 100644 --- a/lib/librthread/arch/mips64/rfork_thread.S +++ b/lib/librthread/arch/mips64/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -64,7 +64,7 @@ LEAF(__tfork_thread, 32) move v0, zero move a0, zero - __DO_SYSCALL(threxit) + __DO_SYSCALL(__threxit) 9: /* diff --git a/lib/librthread/arch/powerpc/rfork_thread.S b/lib/librthread/arch/powerpc/rfork_thread.S index 5731553fbe7..103ff9f10a0 100644 --- a/lib/librthread/arch/powerpc/rfork_thread.S +++ b/lib/librthread/arch/powerpc/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.4 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.5 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005 Tim Wiess <tim@nop.cx> @@ -45,7 +45,7 @@ ENTRY(__tfork_thread) blrl /* child returned, call _exit */ - li %r0, SYS_threxit + li %r0, SYS___threxit sc 1: li %r3, -1 diff --git a/lib/librthread/arch/sh/rfork_thread.S b/lib/librthread/arch/sh/rfork_thread.S index fb6002477fc..51100162285 100644 --- a/lib/librthread/arch/sh/rfork_thread.S +++ b/lib/librthread/arch/sh/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2007 Miodrag Vallat. @@ -45,7 +45,7 @@ ENTRY(__tfork_thread) jsr @r6 mov r7, r4 - mov.l .LSYS_threxit, r0 + mov.l .LSYS___threxit, r0 .word 0xc380 /* trapa #0x80 */ 9: @@ -56,6 +56,6 @@ ENTRY(__tfork_thread) .align 2 .LSYS___tfork: .long SYS___tfork -.LSYS_threxit: .long SYS_threxit +.LSYS___threxit: .long SYS___threxit SET_ENTRY_SIZE(__tfork_thread) diff --git a/lib/librthread/arch/sparc/rfork_thread.S b/lib/librthread/arch/sparc/rfork_thread.S index fc4b4f998c7..2feac006077 100644 --- a/lib/librthread/arch/sparc/rfork_thread.S +++ b/lib/librthread/arch/sparc/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -63,7 +63,7 @@ ENTRY(__tfork_thread) call %o2 /* func */ mov %o3, %o0 /* arg */ - mov SYS_threxit, %g1 + mov SYS___threxit, %g1 clr %o0 t ST_SYSCALL /* will not return */ diff --git a/lib/librthread/arch/sparc64/rfork_thread.S b/lib/librthread/arch/sparc64/rfork_thread.S index d35f6ba1d74..86b876db714 100644 --- a/lib/librthread/arch/sparc64/rfork_thread.S +++ b/lib/librthread/arch/sparc64/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -62,7 +62,7 @@ ENTRY(__tfork_thread) call %o2 /* func */ mov %o3, %o0 /* arg */ - mov SYS_threxit, %g1 + mov SYS___threxit, %g1 clr %o0 t ST_SYSCALL /* will not return */ diff --git a/lib/librthread/arch/vax/rfork_thread.S b/lib/librthread/arch/vax/rfork_thread.S index a7e6259b9e0..f5cc4231c58 100644 --- a/lib/librthread/arch/vax/rfork_thread.S +++ b/lib/librthread/arch/vax/rfork_thread.S @@ -1,4 +1,4 @@ -/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */ +/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005, Miodrag Vallat @@ -60,7 +60,7 @@ ENTRY(__tfork_thread, R2|R3|R4) pushl r4 /* arg */ calls $1, *4(sp) /* func */ - __DO_SYSCALL(threxit) + __DO_SYSCALL(__threxit) 9: /* diff --git a/lib/librthread/rthread.c b/lib/librthread/rthread.c index b4016f656d5..8f761c3896d 100644 --- a/lib/librthread/rthread.c +++ b/lib/librthread/rthread.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rthread.c,v 1.49 2011/12/28 04:59:31 guenther Exp $ */ +/* $OpenBSD: rthread.c,v 1.50 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> * All Rights Reserved. @@ -103,9 +103,31 @@ sigthr_handler(__unused int sig) { pthread_t self = pthread_self(); - if ((self->flags & (THREAD_CANCELED | THREAD_CANCEL_COND)) == - THREAD_CANCELED && (self->cancel_point || - (self->flags & THREAD_CANCEL_DEFERRED) == 0)) + /* + * Do nothing unless + * 1) pthread_cancel() has been called on this thread, + * 2) cancelation is enabled for it, and + * 3) we're not already in cancelation processing + */ + if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING)) + != (THREAD_CANCELED|THREAD_CANCEL_ENABLE)) + return; + + /* + * If delaying cancels inside complex ops (pthread_cond_wait, + * pthread_join, etc), just mark that this has happened to + * prevent a race with going to sleep + */ + if (self->flags & THREAD_CANCEL_DELAY) { + self->delayed_cancel = 1; + return; + } + + /* + * otherwise, if in a cancel point or async cancels are + * enabled, then exit + */ + if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0) pthread_exit(PTHREAD_CANCELED); } @@ -123,10 +145,12 @@ _rthread_init(void) strlcpy(thread->name, "Main process", sizeof(thread->name)); LIST_INSERT_HEAD(&_thread_list, thread, threads); _rthread_debug_init(); - _rthread_debug(1, "rthread init\n"); + _threads_ready = 1; __isthreaded = 1; + _rthread_debug(1, "rthread init\n"); + #if defined(__ELF__) && defined(PIC) /* * To avoid recursion problems in ld.so, we need to trigger the @@ -177,7 +201,7 @@ _rthread_free(pthread_t thread) } } -static void +void _rthread_setflag(pthread_t thread, int flag) { _spinlock(&thread->flags_lock); @@ -185,7 +209,7 @@ _rthread_setflag(pthread_t thread, int flag) _spinunlock(&thread->flags_lock); } -static void +void _rthread_clearflag(pthread_t thread, int flag) { _spinlock(&thread->flags_lock); @@ -269,34 +293,38 @@ pthread_exit(void *retval) _sem_post(&thread->donesem); } - threxit(&thread->tid); + __threxit(&thread->tid); for(;;); } int pthread_join(pthread_t thread, void **retval) { - int e; + int e, r; pthread_t self = pthread_self(); + e = r = 0; + _enter_delayed_cancel(self); if (thread == NULL) e = EINVAL; else if (thread == self) e = EDEADLK; else if (thread->flags & THREAD_DETACHED) e = EINVAL; - else { - _sem_wait(&thread->donesem, 0); + else if ((r = _sem_wait(&thread->donesem, 0, &self->delayed_cancel))) { if (retval) *retval = thread->retval; - e = 0; - /* We should be the last having a ref to this thread, but - * someone stupid or evil might haved detached it; - * in that case the thread will cleanup itself */ + + /* + * We should be the last having a ref to this thread, + * but someone stupid or evil might haved detached it; + * in that case the thread will clean up itself + */ if ((thread->flags & THREAD_DETACHED) == 0) _rthread_free(thread); } + _leave_delayed_cancel(self, !r); _rthread_reaper(); return (e); } @@ -439,7 +467,6 @@ pthread_setcancelstate(int state, int *oldstatep) PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE; if (state == PTHREAD_CANCEL_ENABLE) { _rthread_setflag(self, THREAD_CANCEL_ENABLE); - pthread_testcancel(); } else if (state == PTHREAD_CANCEL_DISABLE) { _rthread_clearflag(self, THREAD_CANCEL_ENABLE); } else { diff --git a/lib/librthread/rthread.h b/lib/librthread/rthread.h index e20bf11d734..391f0746699 100644 --- a/lib/librthread/rthread.h +++ b/lib/librthread/rthread.h @@ -1,4 +1,4 @@ -/* $OpenBSD: rthread.h,v 1.30 2011/12/21 00:49:47 guenther Exp $ */ +/* $OpenBSD: rthread.h,v 1.31 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> * All Rights Reserved. @@ -54,7 +54,8 @@ struct sem { TAILQ_HEAD(pthread_queue, pthread); struct pthread_mutex { - struct sem sem; + _spinlock_lock_t lock; + struct pthread_queue lockers; int type; pthread_t owner; int count; @@ -68,7 +69,9 @@ struct pthread_mutex_attr { }; struct pthread_cond { - struct sem sem; + _spinlock_lock_t lock; + struct pthread_queue waiters; + struct pthread_mutex *mutex; }; struct pthread_cond_attr { @@ -76,10 +79,10 @@ struct pthread_cond_attr { }; struct pthread_rwlock { - struct sem sem; _spinlock_lock_t lock; + pthread_t owner; + struct pthread_queue writers; int readers; - int writer; }; struct pthread_rwlockattr { @@ -133,22 +136,32 @@ struct pthread { struct stack *stack; LIST_ENTRY(pthread) threads; TAILQ_ENTRY(pthread) waiting; + pthread_cond_t blocking_cond; int sched_policy; struct pthread_attr attr; struct sched_param sched_param; struct rthread_storage *local_storage; struct rthread_cleanup_fn *cleanup_fns; int myerrno; + + /* currently in a cancel point? */ int cancel_point; + + /* cancel received in a delayed cancel block? */ + int delayed_cancel; }; #define THREAD_DONE 0x001 #define THREAD_DETACHED 0x002 #define THREAD_CANCELED 0x004 #define THREAD_CANCEL_ENABLE 0x008 #define THREAD_CANCEL_DEFERRED 0x010 -#define THREAD_CANCEL_COND 0x020 +#define THREAD_CANCEL_DELAY 0x020 #define THREAD_DYING 0x040 +#define IS_CANCELED(thread) \ + (((thread)->flags & (THREAD_CANCELED|THREAD_DYING)) == THREAD_CANCELED) + + extern int _threads_ready; extern LIST_HEAD(listhead, pthread) _thread_list; extern struct pthread _initial_thread; @@ -156,12 +169,11 @@ extern _spinlock_lock_t _thread_lock; void _spinlock(_spinlock_lock_t *); void _spinunlock(_spinlock_lock_t *); -int _sem_wait(sem_t, int); -int _sem_waitl(sem_t, int, clockid_t, const struct timespec *); +int _sem_wait(sem_t, int, int *); int _sem_post(sem_t); -int _sem_wakeup(sem_t); -int _sem_wakeall(sem_t); +void _rthread_setflag(pthread_t, int); +void _rthread_clearflag(pthread_t, int); struct stack *_rthread_alloc_stack(pthread_t); void _rthread_free_stack(struct stack *); void _rthread_tls_destructors(pthread_t); @@ -174,19 +186,22 @@ void _rthread_bind_lock(int); #endif /* rthread_cancel.c */ -void _leave_cancel(pthread_t); void _enter_cancel(pthread_t); +void _leave_cancel(pthread_t); +void _enter_delayed_cancel(pthread_t); +void _leave_delayed_cancel(pthread_t, int); void _thread_dump_info(void); int _atomic_lock(register volatile _spinlock_lock_t *); /* syscalls */ -int getthrid(void); -void threxit(pid_t *); -int thrsleep(const volatile void *, clockid_t, const struct timespec *, - volatile void *); -int thrwakeup(void *, int n); -int sched_yield(void); -int thrsigdivert(sigset_t, siginfo_t *, const struct timespec *); -int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *); +int getthrid(void); +void __threxit(pid_t *); +int __thrsleep(const volatile void *, clockid_t, const struct timespec *, + void *, const int *); +int __thrwakeup(const volatile void *, int n); +int __thrsigdivert(sigset_t, siginfo_t *, const struct timespec *); +int sched_yield(void); +int _thread_sys_sigaction(int, const struct sigaction *, + struct sigaction *); diff --git a/lib/librthread/rthread_cancel.c b/lib/librthread/rthread_cancel.c index 4d57c8ee530..1db785a9a9e 100644 --- a/lib/librthread/rthread_cancel.c +++ b/lib/librthread/rthread_cancel.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rthread_cancel.c,v 1.3 2012/01/04 05:46:38 guenther Exp $ */ +/* $OpenBSD: rthread_cancel.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* $snafu: libc_tag.c,v 1.4 2004/11/30 07:00:06 marc Exp $ */ /* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */ @@ -66,8 +66,7 @@ _enter_cancel(pthread_t self) { if (self->flags & THREAD_CANCEL_ENABLE) { self->cancel_point++; - if ((self->flags & (THREAD_CANCELED | THREAD_DYING)) == - THREAD_CANCELED) + if (IS_CANCELED(self)) pthread_exit(PTHREAD_CANCELED); } } @@ -79,6 +78,31 @@ _leave_cancel(pthread_t self) self->cancel_point--; } +void +_enter_delayed_cancel(pthread_t self) +{ + if (self->flags & THREAD_CANCEL_ENABLE) { + self->delayed_cancel = 0; + self->cancel_point++; + if (IS_CANCELED(self)) + pthread_exit(PTHREAD_CANCELED); + _rthread_setflag(self, THREAD_CANCEL_DELAY); + } +} + +void +_leave_delayed_cancel(pthread_t self, int can_cancel) +{ + if (self->flags & THREAD_CANCEL_ENABLE) { + if (self->flags & THREAD_CANCEL_DELAY) { + self->cancel_point--; + _rthread_clearflag(self, THREAD_CANCEL_DELAY); + } + if (IS_CANCELED(self) && can_cancel) + pthread_exit(PTHREAD_CANCELED); + self->delayed_cancel = 0; + } +} int accept(int fd, struct sockaddr *addr, socklen_t *addrlen) @@ -419,7 +443,7 @@ select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, #if 0 sem_timedwait() /* don't have yet */ -sem_wait() /* don't have yet */ +sem_wait() /* in rthread_sem.c */ send() /* built on sendto() */ #endif diff --git a/lib/librthread/rthread_file.c b/lib/librthread/rthread_file.c index 14190662885..28bad52d0c2 100644 --- a/lib/librthread/rthread_file.c +++ b/lib/librthread/rthread_file.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rthread_file.c,v 1.3 2011/11/06 11:48:59 guenther Exp $ */ +/* $OpenBSD: rthread_file.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -204,7 +204,7 @@ void */ TAILQ_INSERT_TAIL(&p->lockers,self,waiting); while (p->owner != self) { - thrsleep(self, 0, NULL, &hash_lock); + __thrsleep(self, 0, NULL, &hash_lock, NULL); _spinlock(&hash_lock); } } @@ -292,7 +292,7 @@ void */ p->count = 1; - thrwakeup(p->owner, 1); + __thrwakeup(p->owner, 1); } } } diff --git a/lib/librthread/rthread_rwlock.c b/lib/librthread/rthread_rwlock.c index 0d391fe9afd..c148089d996 100644 --- a/lib/librthread/rthread_rwlock.c +++ b/lib/librthread/rthread_rwlock.c @@ -1,6 +1,7 @@ -/* $OpenBSD: rthread_rwlock.c,v 1.1 2011/12/21 23:59:03 guenther Exp $ */ +/* $OpenBSD: rthread_rwlock.c,v 1.2 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> + * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org> * All Rights Reserved. * * Permission to use, copy, modify, and distribute this software for any @@ -20,6 +21,7 @@ */ +#include <assert.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> @@ -31,8 +33,10 @@ static _spinlock_lock_t rwlock_init_lock = _SPINLOCK_UNLOCKED; +/* ARGSUSED1 */ int -pthread_rwlock_init(pthread_rwlock_t *lockp, const pthread_rwlockattr_t *attrp) +pthread_rwlock_init(pthread_rwlock_t *lockp, + const pthread_rwlockattr_t *attrp __unused) { pthread_rwlock_t lock; @@ -40,7 +44,8 @@ pthread_rwlock_init(pthread_rwlock_t *lockp, const pthread_rwlockattr_t *attrp) if (!lock) return (errno); lock->lock = _SPINLOCK_UNLOCKED; - lock->sem.lock = _SPINLOCK_UNLOCKED; + TAILQ_INIT(&lock->writers); + *lockp = lock; return (0); @@ -49,13 +54,19 @@ pthread_rwlock_init(pthread_rwlock_t *lockp, const pthread_rwlockattr_t *attrp) int pthread_rwlock_destroy(pthread_rwlock_t *lockp) { - if ((*lockp) && ((*lockp)->readers || (*lockp)->writer)) { + pthread_rwlock_t lock; + + assert(lockp); + lock = *lockp; + if (lock) { + if (lock->readers || !TAILQ_EMPTY(&lock->writers)) { #define MSG "pthread_rwlock_destroy on rwlock with waiters!\n" - write(2, MSG, sizeof(MSG) - 1); + write(2, MSG, sizeof(MSG) - 1); #undef MSG - return (EBUSY); + return (EBUSY); + } + free(lock); } - free(*lockp); *lockp = NULL; return (0); @@ -81,86 +92,72 @@ _rthread_rwlock_ensure_init(pthread_rwlock_t *lockp) } -int -pthread_rwlock_rdlock(pthread_rwlock_t *lockp) +static int +_rthread_rwlock_rdlock(pthread_rwlock_t *lockp, const struct timespec *abstime, + int try) { pthread_rwlock_t lock; + pthread_t thread = pthread_self(); int error; if ((error = _rthread_rwlock_ensure_init(lockp))) return (error); lock = *lockp; -again: + _rthread_debug(5, "%p: rwlock_rdlock %p\n", (void *)thread, + (void *)lock); _spinlock(&lock->lock); - if (lock->writer) { - _spinlock(&lock->sem.lock); - _spinunlock(&lock->lock); - _sem_waitl(&lock->sem, 0, 0, NULL); - goto again; + + /* writers have precedence */ + if (lock->owner == NULL && TAILQ_EMPTY(&lock->writers)) + lock->readers++; + else if (try) + error = EBUSY; + else if (lock->owner == thread) + error = EDEADLK; + else { + do { + if (__thrsleep(lock, CLOCK_REALTIME, abstime, + &lock->lock, NULL) == EWOULDBLOCK) + return (ETIMEDOUT); + _spinlock(&lock->lock); + } while (lock->owner != NULL || !TAILQ_EMPTY(&lock->writers)); + lock->readers++; } - lock->readers++; _spinunlock(&lock->lock); - return (0); + return (error); } int -pthread_rwlock_timedrdlock(pthread_rwlock_t *lockp, - const struct timespec *abstime) +pthread_rwlock_rdlock(pthread_rwlock_t *lockp) { - pthread_rwlock_t lock; - int do_wait = 1; - int error; - - if ((error = _rthread_rwlock_ensure_init(lockp))) - return (error); - - lock = *lockp; - _spinlock(&lock->lock); - while (lock->writer && do_wait) { - _spinlock(&lock->sem.lock); - _spinunlock(&lock->lock); - do_wait = _sem_waitl(&lock->sem, 0, CLOCK_REALTIME, abstime); - _spinlock(&lock->lock); - } - if (lock->writer) { - /* do_wait must be 0, so timed out */ - _spinunlock(&lock->lock); - return (ETIMEDOUT); - } - lock->readers++; - _spinunlock(&lock->lock); - - return (0); + return (_rthread_rwlock_rdlock(lockp, NULL, 0)); } int pthread_rwlock_tryrdlock(pthread_rwlock_t *lockp) { - pthread_rwlock_t lock; - int error; - - if ((error = _rthread_rwlock_ensure_init(lockp))) - return (error); - - lock = *lockp; - - _spinlock(&lock->lock); - if (lock->writer) { - _spinunlock(&lock->lock); - return (EBUSY); - } - lock->readers++; - _spinunlock(&lock->lock); - - return (0); + return (_rthread_rwlock_rdlock(lockp, NULL, 1)); } int -pthread_rwlock_wrlock(pthread_rwlock_t *lockp) +pthread_rwlock_timedrdlock(pthread_rwlock_t *lockp, + const struct timespec *abstime) +{ + if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || + abstime->tv_nsec > 1000000000) + return (EINVAL); + return (_rthread_rwlock_rdlock(lockp, abstime, 0)); +} + + +static int +_rthread_rwlock_wrlock(pthread_rwlock_t *lockp, const struct timespec *abstime, + int try) { pthread_rwlock_t lock; + pthread_t thread = pthread_self(); int error; if ((error = _rthread_rwlock_ensure_init(lockp))) @@ -168,95 +165,98 @@ pthread_rwlock_wrlock(pthread_rwlock_t *lockp) lock = *lockp; + _rthread_debug(5, "%p: rwlock_timedwrlock %p\n", (void *)thread, + (void *)lock); _spinlock(&lock->lock); - lock->writer++; - while (lock->readers) { - _spinlock(&lock->sem.lock); - _spinunlock(&lock->lock); - _sem_waitl(&lock->sem, 0, 0, NULL); - _spinlock(&lock->lock); + if (lock->readers == 0 && lock->owner == NULL) + lock->owner = thread; + else if (try) + error = EBUSY; + else if (lock->owner == thread) + error = EDEADLK; + else { + int do_wait; + + /* gotta block */ + TAILQ_INSERT_TAIL(&lock->writers, thread, waiting); + do { + do_wait = __thrsleep(thread, CLOCK_REALTIME, abstime, + &lock->lock, NULL) != EWOULDBLOCK; + _spinlock(&lock->lock); + } while (lock->owner != thread && do_wait); + + if (lock->owner != thread) { + /* timed out, sigh */ + TAILQ_REMOVE(&lock->writers, thread, waiting); + error = ETIMEDOUT; + } } - lock->readers = -pthread_self()->tid; _spinunlock(&lock->lock); - return (0); + return (error); } int -pthread_rwlock_timedwrlock(pthread_rwlock_t *lockp, - const struct timespec *abstime) +pthread_rwlock_wrlock(pthread_rwlock_t *lockp) { - pthread_rwlock_t lock; - int do_wait = 1; - int error; - - if ((error = _rthread_rwlock_ensure_init(lockp))) - return (error); - - lock = *lockp; - - _spinlock(&lock->lock); - lock->writer++; - while (lock->readers && do_wait) { - _spinlock(&lock->sem.lock); - _spinunlock(&lock->lock); - do_wait = _sem_waitl(&lock->sem, 0, CLOCK_REALTIME, abstime); - _spinlock(&lock->lock); - } - if (lock->readers) { - /* do_wait must be 0, so timed out */ - lock->writer--; - _spinunlock(&lock->lock); - return (ETIMEDOUT); - } - lock->readers = -pthread_self()->tid; - _spinunlock(&lock->lock); - - return (0); + return (_rthread_rwlock_wrlock(lockp, NULL, 0)); } int pthread_rwlock_trywrlock(pthread_rwlock_t *lockp) { - pthread_rwlock_t lock; - int error; - - if ((error = _rthread_rwlock_ensure_init(lockp))) - return (error); - - lock = *lockp; - - _spinlock(&lock->lock); - if (lock->readers || lock->writer) { - _spinunlock(&lock->lock); - return (EBUSY); - } - lock->writer = 1; - lock->readers = -pthread_self()->tid; - _spinunlock(&lock->lock); + return (_rthread_rwlock_wrlock(lockp, NULL, 1)); +} - return (0); +int +pthread_rwlock_timedwrlock(pthread_rwlock_t *lockp, + const struct timespec *abstime) +{ + if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || + abstime->tv_nsec > 1000000000) + return (EINVAL); + return (_rthread_rwlock_wrlock(lockp, abstime, 0)); } + int pthread_rwlock_unlock(pthread_rwlock_t *lockp) { pthread_rwlock_t lock; + pthread_t thread = pthread_self(); + pthread_t next; + int was_writer; lock = *lockp; + _rthread_debug(5, "%p: rwlock_unlock %p\n", (void *)thread, + (void *)lock); _spinlock(&lock->lock); - if (lock->readers == -pthread_self()->tid) { - lock->readers = 0; - lock->writer--; - } else if (lock->readers > 0) { - lock->readers--; + if (lock->owner != NULL) { + assert(lock->owner == thread); + was_writer = 1; } else { + assert(lock->readers > 0); + lock->readers--; + if (lock->readers > 0) + goto out; + was_writer = 0; + } + + lock->owner = next = TAILQ_FIRST(&lock->writers); + if (next != NULL) { + /* dequeue and wake first writer */ + TAILQ_REMOVE(&lock->writers, next, waiting); _spinunlock(&lock->lock); - return (EPERM); + __thrwakeup(next, 1); + return (0); } + + /* could there have been blocked readers? wake them all */ + if (was_writer) + __thrwakeup(lock, 0); +out: _spinunlock(&lock->lock); - _sem_wakeall(&lock->sem); return (0); } diff --git a/lib/librthread/rthread_sem.c b/lib/librthread/rthread_sem.c index 6c065cc4b78..66c22af4b9a 100644 --- a/lib/librthread/rthread_sem.c +++ b/lib/librthread/rthread_sem.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rthread_sem.c,v 1.3 2012/01/04 21:01:25 guenther Exp $ */ +/* $OpenBSD: rthread_sem.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> * All Rights Reserved. @@ -28,42 +28,29 @@ * Internal implementation of semaphores */ int -_sem_wait(sem_t sem, int tryonly) +_sem_wait(sem_t sem, int tryonly, int *delayed_cancel) { + int r; _spinlock(&sem->lock); - return (_sem_waitl(sem, tryonly, 0, NULL)); -} - -int -_sem_waitl(sem_t sem, int tryonly, clockid_t clock_id, - const struct timespec *abstime) -{ - int do_sleep; - -again: - if (sem->value == 0) { - if (tryonly) { - _spinunlock(&sem->lock); - return (0); - } - sem->waitcount++; - do_sleep = 1; - } else { + if (sem->value) { sem->value--; - do_sleep = 0; - } - - if (do_sleep) { - if (thrsleep(sem, clock_id, abstime, &sem->lock) == -1 && - errno == EWOULDBLOCK) - return (0); - _spinlock(&sem->lock); + r = 1; + } else if (tryonly) { + r = 0; + } else { + sem->waitcount++; + do { + r = __thrsleep(&sem->waitcount, 0, NULL, &sem->lock, + delayed_cancel) == 0; + _spinlock(&sem->lock); + } while (r && sem->value == 0); sem->waitcount--; - goto again; + if (r) + sem->value--; } _spinunlock(&sem->lock); - return (1); + return (r); } /* always increment count */ @@ -75,44 +62,13 @@ _sem_post(sem_t sem) _spinlock(&sem->lock); sem->value++; if (sem->waitcount) { - thrwakeup(sem, 1); + __thrwakeup(&sem->waitcount, 1); rv = 1; } _spinunlock(&sem->lock); return (rv); } -/* only increment count if a waiter */ -int -_sem_wakeup(sem_t sem) -{ - int rv = 0; - - _spinlock(&sem->lock); - if (sem->waitcount) { - sem->value++; - thrwakeup(sem, 1); - rv = 1; - } - _spinunlock(&sem->lock); - return (rv); -} - - -int -_sem_wakeall(sem_t sem) -{ - int rv; - - _spinlock(&sem->lock); - rv = sem->waitcount; - sem->value += rv; - thrwakeup(sem, 0); - _spinunlock(&sem->lock); - - return (rv); -} - /* * exported semaphores */ @@ -199,13 +155,17 @@ int sem_wait(sem_t *semp) { sem_t sem = *semp; + pthread_t self = pthread_self(); + int r; if (!semp || !*semp) { errno = EINVAL; return (-1); } - _sem_wait(sem, 0); + _enter_delayed_cancel(self); + r = _sem_wait(sem, 0, &self->delayed_cancel); + _leave_delayed_cancel(self, !r); return (0); } @@ -221,7 +181,7 @@ sem_trywait(sem_t *semp) return (-1); } - rv = _sem_wait(sem, 1); + rv = _sem_wait(sem, 1, NULL); if (!rv) { errno = EAGAIN; @@ -231,22 +191,25 @@ sem_trywait(sem_t *semp) return (0); } +/* ARGSUSED */ sem_t * -sem_open(const char *name, int oflag, ...) +sem_open(const char *name __unused, int oflag __unused, ...) { errno = ENOSYS; return (SEM_FAILED); } +/* ARGSUSED */ int -sem_close(sem_t *sem) +sem_close(sem_t *sem __unused) { errno = ENOSYS; return (-1); } +/* ARGSUSED */ int -sem_unlink(const char *name) +sem_unlink(const char *name __unused) { errno = ENOSYS; return (-1); diff --git a/lib/librthread/rthread_sig.c b/lib/librthread/rthread_sig.c index 11351ddafc3..c38fed3998b 100644 --- a/lib/librthread/rthread_sig.c +++ b/lib/librthread/rthread_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: rthread_sig.c,v 1.11 2011/12/27 17:36:59 guenther Exp $ */ +/* $OpenBSD: rthread_sig.c,v 1.12 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2005 Ted Unangst <tedu@openbsd.org> * All Rights Reserved. @@ -56,7 +56,7 @@ sigwait(const sigset_t *set, int *sig) sigdelset(&s, SIGTHR); _enter_cancel(self); - ret = thrsigdivert(s, NULL, NULL); + ret = __thrsigdivert(s, NULL, NULL); _leave_cancel(self); if (ret == -1) return (errno); diff --git a/lib/librthread/rthread_sync.c b/lib/librthread/rthread_sync.c index cbe529a83aa..ae3bdbb40d9 100644 --- a/lib/librthread/rthread_sync.c +++ b/lib/librthread/rthread_sync.c @@ -1,6 +1,7 @@ -/* $OpenBSD: rthread_sync.c,v 1.28 2012/01/04 17:43:34 mpi Exp $ */ +/* $OpenBSD: rthread_sync.c,v 1.29 2012/01/17 02:34:18 guenther Exp $ */ /* * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org> + * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org> * All Rights Reserved. * * Permission to use, copy, modify, and distribute this software for any @@ -20,7 +21,9 @@ */ +#include <assert.h> #include <stdlib.h> +#include <string.h> #include <unistd.h> #include <errno.h> @@ -36,16 +39,16 @@ static _spinlock_lock_t static_init_lock = _SPINLOCK_UNLOCKED; int pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr) { - pthread_mutex_t mutex; + struct pthread_mutex *mutex; mutex = calloc(1, sizeof(*mutex)); if (!mutex) return (errno); - mutex->sem.lock = _SPINLOCK_UNLOCKED; - mutex->sem.value = 1; /* unlocked */ + mutex->lock = _SPINLOCK_UNLOCKED; + TAILQ_INIT(&mutex->lockers); if (attr == NULL) { mutex->type = PTHREAD_MUTEX_ERRORCHECK; - mutex->prioceiling = PTHREAD_PRIO_NONE; + mutex->prioceiling = -1; } else { mutex->type = (*attr)->ma_type; mutex->prioceiling = (*attr)->ma_protocol == @@ -59,23 +62,29 @@ pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr) int pthread_mutex_destroy(pthread_mutex_t *mutexp) { + struct pthread_mutex *mutex; - if ((*mutexp) && (*mutexp)->count) { + assert(mutexp); + mutex = (struct pthread_mutex *)*mutexp; + if (mutex) { + if (mutex->count || mutex->owner != NULL || + !TAILQ_EMPTY(&mutex->lockers)) { #define MSG "pthread_mutex_destroy on mutex with waiters!\n" - write(2, MSG, sizeof(MSG) - 1); + write(2, MSG, sizeof(MSG) - 1); #undef MSG - return (EBUSY); + return (EBUSY); + } + free(mutex); + *mutexp = NULL; } - free((void *)*mutexp); - *mutexp = NULL; return (0); } static int _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait) { - pthread_mutex_t mutex; - pthread_t thread = pthread_self(); + struct pthread_mutex *mutex; + pthread_t self = pthread_self(); int ret = 0; /* @@ -92,19 +101,42 @@ _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait) if (ret != 0) return (EINVAL); } - mutex = *mutexp; - if (mutex->owner == thread) { - if (mutex->type == PTHREAD_MUTEX_RECURSIVE) { - mutex->count++; - return (0); + mutex = (struct pthread_mutex *)*mutexp; + + _rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex); + _spinlock(&mutex->lock); + if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) { + assert(mutex->count == 0); + mutex->owner = self; + } else if (mutex->owner == self) { + assert(mutex->count > 0); + + /* already owner? handle recursive behavior */ + if (mutex->type != PTHREAD_MUTEX_RECURSIVE) + { + if (trywait || + mutex->type == PTHREAD_MUTEX_ERRORCHECK) { + _spinunlock(&mutex->lock); + return (trywait ? EBUSY : EDEADLK); + } + abort(); } - if (mutex->type == PTHREAD_MUTEX_ERRORCHECK) - return (trywait ? EBUSY : EDEADLK); - } - if (!_sem_wait((void *)&mutex->sem, trywait)) + } else if (trywait) { + /* try failed */ + _spinunlock(&mutex->lock); return (EBUSY); - mutex->owner = thread; - mutex->count = 1; + } else { + /* add to the wait queue and block until at the head */ + TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); + while (mutex->owner != self) { + __thrsleep(self, 0, NULL, &mutex->lock, NULL); + _spinlock(&mutex->lock); + assert(mutex->owner != NULL); + } + } + + mutex->count++; + _spinunlock(&mutex->lock); return (0); } @@ -124,15 +156,25 @@ pthread_mutex_trylock(pthread_mutex_t *p) int pthread_mutex_unlock(pthread_mutex_t *mutexp) { - pthread_t thread = pthread_self(); - pthread_mutex_t mutex = *mutexp; + pthread_t self = pthread_self(); + struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp; + + _rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self, + (void *)mutex); - if (mutex->owner != thread) + if (mutex->owner != self) return (EPERM); if (--mutex->count == 0) { - mutex->owner = NULL; - _sem_post((void *)&mutex->sem); + pthread_t next; + + _spinlock(&mutex->lock); + mutex->owner = next = TAILQ_FIRST(&mutex->lockers); + if (next != NULL) + TAILQ_REMOVE(&mutex->lockers, next, waiting); + _spinunlock(&mutex->lock); + if (next != NULL) + __thrwakeup(next, 1); } return (0); @@ -141,15 +183,18 @@ pthread_mutex_unlock(pthread_mutex_t *mutexp) /* * condition variables */ +/* ARGSUSED1 */ int -pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attrp) +pthread_cond_init(pthread_cond_t *condp, + const pthread_condattr_t *attrp __unused) { pthread_cond_t cond; cond = calloc(1, sizeof(*cond)); if (!cond) return (errno); - cond->sem.lock = _SPINLOCK_UNLOCKED; + cond->lock = _SPINLOCK_UNLOCKED; + TAILQ_INIT(&cond->waiters); *condp = cond; @@ -159,8 +204,19 @@ pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attrp) int pthread_cond_destroy(pthread_cond_t *condp) { + pthread_cond_t cond; - free(*condp); + assert(condp); + cond = *condp; + if (cond) { + if (!TAILQ_EMPTY(&cond->waiters)) { +#define MSG "pthread_cond_destroy on condvar with waiters!\n" + write(2, MSG, sizeof(MSG) - 1); +#undef MSG + return (EBUSY); + } + free(cond); + } *condp = NULL; return (0); @@ -170,37 +226,310 @@ int pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp, const struct timespec *abstime) { + pthread_cond_t cond; + struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp; + pthread_t self = pthread_self(); + pthread_t next; + int mutex_count; + int canceled = 0; + int rv = 0; int error; - int rv; if (!*condp) if ((error = pthread_cond_init(condp, NULL))) return (error); + cond = *condp; + _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self, + (void *)cond, (void *)mutex); - _spinlock(&(*condp)->sem.lock); - pthread_mutex_unlock(mutexp); - rv = _sem_waitl(&(*condp)->sem, 0, CLOCK_REALTIME, abstime); - error = pthread_mutex_lock(mutexp); + if (mutex->owner != self) + return (EPERM); + if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 || + abstime->tv_nsec >= 1000000000) + return (EINVAL); + + _enter_delayed_cancel(self); + + _spinlock(&cond->lock); + + /* mark the condvar as being associated with this mutex */ + if (cond->mutex == NULL) { + cond->mutex = mutex; + assert(TAILQ_EMPTY(&cond->waiters)); + } else if (cond->mutex != mutex) { + assert(cond->mutex == mutex); + _spinunlock(&cond->lock); + _leave_delayed_cancel(self, 1); + return (EINVAL); + } else + assert(! TAILQ_EMPTY(&cond->waiters)); + + /* snag the count in case this is a recursive mutex */ + mutex_count = mutex->count; + + /* transfer from the mutex queue to the condvar queue */ + _spinlock(&mutex->lock); + self->blocking_cond = cond; + TAILQ_INSERT_TAIL(&cond->waiters, self, waiting); + _spinunlock(&cond->lock); + + /* wake the next guy blocked on the mutex */ + mutex->count = 0; + mutex->owner = next = TAILQ_FIRST(&mutex->lockers); + if (next != NULL) { + TAILQ_REMOVE(&mutex->lockers, next, waiting); + __thrwakeup(next, 1); + } - return (error ? error : rv ? 0 : ETIMEDOUT); + /* wait until we're the owner of the mutex again */ + while (mutex->owner != self) { + error = __thrsleep(self, CLOCK_REALTIME, abstime, &mutex->lock, + &self->delayed_cancel); + + /* + * If abstime == NULL, then we're definitely waiting + * on the mutex instead of the condvar, and are + * just waiting for mutex ownership, regardless of + * why we woke up. + */ + if (abstime == NULL) { + _spinlock(&mutex->lock); + continue; + } + + /* + * If we took a normal signal (not from + * cancellation) then we should just go back to + * sleep without changing state (timeouts, etc). + */ + if (error == EINTR && !IS_CANCELED(self)) { + _spinlock(&mutex->lock); + continue; + } + + /* + * The remaining reasons for waking up (normal + * wakeup, timeout, and cancellation) all mean that + * we won't be staying in the condvar queue and + * we'll no longer time out or be cancelable. + */ + abstime = NULL; + _leave_delayed_cancel(self, 0); + + /* + * If we're no longer in the condvar's queue then + * we're just waiting for mutex ownership. Need + * cond->lock here to prevent race with cond_signal(). + */ + _spinlock(&cond->lock); + if (self->blocking_cond == NULL) { + _spinunlock(&cond->lock); + _spinlock(&mutex->lock); + continue; + } + assert(self->blocking_cond == cond); + + /* if timeout or canceled, make note of that */ + if (error == EWOULDBLOCK) + rv = ETIMEDOUT; + else if (error == EINTR) + canceled = 1; + + /* transfer between the queues */ + TAILQ_REMOVE(&cond->waiters, self, waiting); + assert(mutex == cond->mutex); + if (TAILQ_EMPTY(&cond->waiters)) + cond->mutex = NULL; + self->blocking_cond = NULL; + _spinunlock(&cond->lock); + _spinlock(&mutex->lock); + + /* mutex unlocked right now? */ + if (mutex->owner == NULL && + TAILQ_EMPTY(&mutex->lockers)) { + assert(mutex->count == 0); + mutex->owner = self; + break; + } + TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); + } + + /* restore the mutex's count */ + mutex->count = mutex_count; + _spinunlock(&mutex->lock); + + _leave_delayed_cancel(self, canceled); + + return (rv); } int pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp) { - return (pthread_cond_timedwait(condp, mutexp, NULL)); + pthread_cond_t cond; + struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp; + pthread_t self = pthread_self(); + pthread_t next; + int mutex_count; + int canceled = 0; + int error; + + if (!*condp) + if ((error = pthread_cond_init(condp, NULL))) + return (error); + cond = *condp; + _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self, + (void *)cond, (void *)mutex); + + if (mutex->owner != self) + return (EPERM); + + _enter_delayed_cancel(self); + + _spinlock(&cond->lock); + + /* mark the condvar as being associated with this mutex */ + if (cond->mutex == NULL) { + cond->mutex = mutex; + assert(TAILQ_EMPTY(&cond->waiters)); + } else if (cond->mutex != mutex) { + assert(cond->mutex == mutex); + _spinunlock(&cond->lock); + _leave_delayed_cancel(self, 1); + return (EINVAL); + } else + assert(! TAILQ_EMPTY(&cond->waiters)); + + /* snag the count in case this is a recursive mutex */ + mutex_count = mutex->count; + + /* transfer from the mutex queue to the condvar queue */ + _spinlock(&mutex->lock); + self->blocking_cond = cond; + TAILQ_INSERT_TAIL(&cond->waiters, self, waiting); + _spinunlock(&cond->lock); + + /* wake the next guy blocked on the mutex */ + mutex->count = 0; + mutex->owner = next = TAILQ_FIRST(&mutex->lockers); + if (next != NULL) { + TAILQ_REMOVE(&mutex->lockers, next, waiting); + __thrwakeup(next, 1); + } + + /* wait until we're the owner of the mutex again */ + while (mutex->owner != self) { + error = __thrsleep(self, 0, NULL, &mutex->lock, + &self->delayed_cancel); + + /* + * If we took a normal signal (not from + * cancellation) then we should just go back to + * sleep without changing state (timeouts, etc). + */ + if (error == EINTR && !IS_CANCELED(self)) { + _spinlock(&mutex->lock); + continue; + } + + /* + * The remaining reasons for waking up (normal + * wakeup and cancellation) all mean that we won't + * be staying in the condvar queue and we'll no + * longer be cancelable. + */ + _leave_delayed_cancel(self, 0); + + /* + * If we're no longer in the condvar's queue then + * we're just waiting for mutex ownership. Need + * cond->lock here to prevent race with cond_signal(). + */ + _spinlock(&cond->lock); + if (self->blocking_cond == NULL) { + _spinunlock(&cond->lock); + _spinlock(&mutex->lock); + continue; + } + assert(self->blocking_cond == cond); + + /* if canceled, make note of that */ + if (error == EINTR) + canceled = 1; + + /* transfer between the queues */ + TAILQ_REMOVE(&cond->waiters, self, waiting); + assert(mutex == cond->mutex); + if (TAILQ_EMPTY(&cond->waiters)) + cond->mutex = NULL; + self->blocking_cond = NULL; + _spinunlock(&cond->lock); + _spinlock(&mutex->lock); + + /* mutex unlocked right now? */ + if (mutex->owner == NULL && + TAILQ_EMPTY(&mutex->lockers)) { + assert(mutex->count == 0); + mutex->owner = self; + break; + } + TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); + } + + /* restore the mutex's count */ + mutex->count = mutex_count; + _spinunlock(&mutex->lock); + + _leave_delayed_cancel(self, canceled); + + return (0); } + int pthread_cond_signal(pthread_cond_t *condp) { - int error; + pthread_cond_t cond; + struct pthread_mutex *mutex; + pthread_t thread; + int wakeup; + /* uninitialized? Then there's obviously no one waiting! */ if (!*condp) - if ((error = pthread_cond_init(condp, NULL))) - return (error); + return 0; + + cond = *condp; + _rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(), + (void *)cond, (void *)cond->mutex); + _spinlock(&cond->lock); + thread = TAILQ_FIRST(&cond->waiters); + if (thread == NULL) { + assert(cond->mutex == NULL); + _spinunlock(&cond->lock); + return (0); + } - _sem_wakeup(&(*condp)->sem); + assert(thread->blocking_cond == cond); + TAILQ_REMOVE(&cond->waiters, thread, waiting); + thread->blocking_cond = NULL; + + mutex = cond->mutex; + assert(mutex != NULL); + if (TAILQ_EMPTY(&cond->waiters)) + cond->mutex = NULL; + + /* link locks to prevent race with timedwait */ + _spinlock(&mutex->lock); + _spinunlock(&cond->lock); + + wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers); + if (wakeup) + mutex->owner = thread; + else + TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting); + _spinunlock(&mutex->lock); + if (wakeup) + __thrwakeup(thread, 1); return (0); } @@ -208,10 +537,65 @@ pthread_cond_signal(pthread_cond_t *condp) int pthread_cond_broadcast(pthread_cond_t *condp) { + pthread_cond_t cond; + struct pthread_mutex *mutex; + pthread_t thread; + pthread_t p; + int wakeup; + + /* uninitialized? Then there's obviously no one waiting! */ if (!*condp) - pthread_cond_init(condp, NULL); + return 0; + + cond = *condp; + _rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(), + (void *)cond, (void *)cond->mutex); + _spinlock(&cond->lock); + thread = TAILQ_FIRST(&cond->waiters); + if (thread == NULL) { + assert(cond->mutex == NULL); + _spinunlock(&cond->lock); + return (0); + } + + mutex = cond->mutex; + assert(mutex != NULL); + + /* walk the list, clearing the "blocked on condvar" pointer */ + p = thread; + do + p->blocking_cond = NULL; + while ((p = TAILQ_NEXT(p, waiting)) != NULL); + + /* + * We want to transfer all the threads from the condvar's list + * to the mutex's list. The TAILQ_* macros don't let us do that + * efficiently, so this is direct list surgery. Pay attention! + */ - _sem_wakeall(&(*condp)->sem); + /* 1) attach the first thread to the end of the mutex's list */ + _spinlock(&mutex->lock); + wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers); + thread->waiting.tqe_prev = mutex->lockers.tqh_last; + *(mutex->lockers.tqh_last) = thread; + + /* 2) fix up the end pointer for the mutex's list */ + mutex->lockers.tqh_last = cond->waiters.tqh_last; + _spinunlock(&mutex->lock); + + if (wakeup) { + TAILQ_REMOVE(&mutex->lockers, thread, waiting); + mutex->owner = thread; + _spinunlock(&mutex->lock); + __thrwakeup(thread, 1); + } else + _spinunlock(&mutex->lock); + + /* 3) reset the condvar's list and mutex pointer */ + TAILQ_INIT(&cond->waiters); + assert(cond->mutex != NULL); + cond->mutex = NULL; + _spinunlock(&cond->lock); return (0); } diff --git a/lib/librthread/shlib_version b/lib/librthread/shlib_version index 890c57389b5..3066b9771e7 100644 --- a/lib/librthread/shlib_version +++ b/lib/librthread/shlib_version @@ -1,2 +1,2 @@ -major=4 -minor=1 +major=5 +minor=0 diff --git a/sys/kern/init_sysent.c b/sys/kern/init_sysent.c index 215f213454e..be822ad146e 100644 --- a/sys/kern/init_sysent.c +++ b/sys/kern/init_sysent.c @@ -1,4 +1,4 @@ -/* $OpenBSD: init_sysent.c,v 1.131 2011/10/15 23:36:31 guenther Exp $ */ +/* $OpenBSD: init_sysent.c,v 1.132 2012/01/17 02:34:18 guenther Exp $ */ /* * System call switch table. @@ -731,14 +731,14 @@ struct sysent sysent[] = { sys_sched_yield }, /* 298 = sched_yield */ { 0, 0, SY_NOLOCK | 0, sys_getthrid }, /* 299 = getthrid */ - { 4, s(struct sys_thrsleep_args), 0, - sys_thrsleep }, /* 300 = thrsleep */ - { 2, s(struct sys_thrwakeup_args), 0, - sys_thrwakeup }, /* 301 = thrwakeup */ - { 1, s(struct sys_threxit_args), 0, - sys_threxit }, /* 302 = threxit */ - { 3, s(struct sys_thrsigdivert_args), 0, - sys_thrsigdivert }, /* 303 = thrsigdivert */ + { 5, s(struct sys___thrsleep_args), 0, + sys___thrsleep }, /* 300 = __thrsleep */ + { 2, s(struct sys___thrwakeup_args), 0, + sys___thrwakeup }, /* 301 = __thrwakeup */ + { 1, s(struct sys___threxit_args), 0, + sys___threxit }, /* 302 = __threxit */ + { 3, s(struct sys___thrsigdivert_args), 0, + sys___thrsigdivert }, /* 303 = __thrsigdivert */ { 2, s(struct sys___getcwd_args), 0, sys___getcwd }, /* 304 = __getcwd */ { 2, s(struct sys_adjfreq_args), 0, diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index ecd63cf0fda..50e9e0e3cf9 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_exit.c,v 1.105 2011/12/14 07:32:16 guenther Exp $ */ +/* $OpenBSD: kern_exit.c,v 1.106 2012/01/17 02:34:18 guenther Exp $ */ /* $NetBSD: kern_exit.c,v 1.39 1996/04/22 01:38:25 christos Exp $ */ /* @@ -91,9 +91,9 @@ sys_exit(struct proc *p, void *v, register_t *retval) } int -sys_threxit(struct proc *p, void *v, register_t *retval) +sys___threxit(struct proc *p, void *v, register_t *retval) { - struct sys_threxit_args /* { + struct sys___threxit_args /* { syscallarg(pid_t *) notdead; } */ *uap = v; diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 0775a1edf52..859e1ff75aa 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_sig.c,v 1.131 2011/12/11 19:42:28 guenther Exp $ */ +/* $OpenBSD: kern_sig.c,v 1.132 2012/01/17 02:34:18 guenther Exp $ */ /* $NetBSD: kern_sig.c,v 1.54 1996/04/22 01:38:32 christos Exp $ */ /* @@ -1503,9 +1503,9 @@ sys_nosys(struct proc *p, void *v, register_t *retval) } int -sys_thrsigdivert(struct proc *p, void *v, register_t *retval) +sys___thrsigdivert(struct proc *p, void *v, register_t *retval) { - struct sys_thrsigdivert_args /* { + struct sys___thrsigdivert_args /* { syscallarg(sigset_t) sigmask; syscallarg(siginfo_t *) info; syscallarg(const struct timespec *) timeout; diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 4b711711c02..6b7f80ab747 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -1,4 +1,4 @@ -/* $OpenBSD: kern_synch.c,v 1.98 2011/12/11 19:42:28 guenther Exp $ */ +/* $OpenBSD: kern_synch.c,v 1.99 2012/01/17 02:34:18 guenther Exp $ */ /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ /* @@ -404,37 +404,49 @@ sys_sched_yield(struct proc *p, void *v, register_t *retval) } int -sys_thrsleep(struct proc *p, void *v, register_t *revtal) +sys___thrsleep(struct proc *p, void *v, register_t *retval) { - struct sys_thrsleep_args /* { - syscallarg(void *) ident; + struct sys___thrsleep_args /* { + syscallarg(const volatile void *) ident; syscallarg(clockid_t) clock_id; syscallarg(struct timespec *) tp; syscallarg(void *) lock; + syscallarg(const int *) abort; } */ *uap = v; long ident = (long)SCARG(uap, ident); _spinlock_lock_t *lock = SCARG(uap, lock); static _spinlock_lock_t unlocked = _SPINLOCK_UNLOCKED; long long to_ticks = 0; - int error; + int abort, error; - if (!rthreads_enabled) - return (ENOTSUP); + if (!rthreads_enabled) { + *retval = ENOTSUP; + return (0); + } + if (ident == 0) { + *retval = EINVAL; + return (0); + } if (SCARG(uap, tp) != NULL) { struct timespec now, ats; - if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0 || - (error = clock_gettime(p, SCARG(uap, clock_id), &now)) != 0) - return (error); + if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) || + (error = clock_gettime(p, SCARG(uap, clock_id), &now))) { + *retval = error; + return (0); + } if (timespeccmp(&ats, &now, <)) { /* already passed: still do the unlock */ if (lock) { if ((error = copyout(&unlocked, lock, - sizeof(unlocked))) != 0) - return (error); + sizeof(unlocked))) != 0) { + *retval = error; + return (0); + } } - return (EWOULDBLOCK); + *retval = EWOULDBLOCK; + return (0); } timespecsub(&ats, &now, &ats); @@ -450,23 +462,41 @@ sys_thrsleep(struct proc *p, void *v, register_t *revtal) if (lock) { if ((error = copyout(&unlocked, lock, sizeof(unlocked))) != 0) - return (error); + goto out; } - error = tsleep(&p->p_thrslpid, PUSER | PCATCH, "thrsleep", - (int)to_ticks); + + if (SCARG(uap, abort) != NULL) { + if ((error = copyin(SCARG(uap, abort), &abort, + sizeof(abort))) != 0) + goto out; + if (abort) { + error = EINTR; + goto out; + } + } + + if (p->p_thrslpid == 0) + error = 0; + else + error = tsleep(&p->p_thrslpid, PUSER | PCATCH, "thrsleep", + (int)to_ticks); + +out: + p->p_thrslpid = 0; if (error == ERESTART) error = EINTR; - return (error); + *retval = error; + return (0); } int -sys_thrwakeup(struct proc *p, void *v, register_t *retval) +sys___thrwakeup(struct proc *p, void *v, register_t *retval) { - struct sys_thrwakeup_args /* { - syscallarg(void *) ident; + struct sys___thrwakeup_args /* { + syscallarg(const volatile void *) ident; syscallarg(int) n; } */ *uap = v; long ident = (long)SCARG(uap, ident); @@ -475,17 +505,20 @@ sys_thrwakeup(struct proc *p, void *v, register_t *retval) int found = 0; if (!rthreads_enabled) - return (ENOTSUP); - TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { - if (q->p_thrslpid == ident) { - wakeup_one(&q->p_thrslpid); - q->p_thrslpid = 0; - if (++found == n) - return (0); + *retval = ENOTSUP; + else if (ident == 0) + *retval = EINVAL; + else { + TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { + if (q->p_thrslpid == ident) { + wakeup_one(&q->p_thrslpid); + q->p_thrslpid = 0; + if (++found == n) + break; + } } + *retval = found ? 0 : ESRCH; } - if (!found) - return (ESRCH); return (0); } diff --git a/sys/kern/syscalls.c b/sys/kern/syscalls.c index 29bad4c5909..a7880b9ef64 100644 --- a/sys/kern/syscalls.c +++ b/sys/kern/syscalls.c @@ -1,4 +1,4 @@ -/* $OpenBSD: syscalls.c,v 1.132 2011/10/15 23:36:31 guenther Exp $ */ +/* $OpenBSD: syscalls.c,v 1.133 2012/01/17 02:34:18 guenther Exp $ */ /* * System call names. @@ -382,10 +382,10 @@ char *syscallnames[] = { #endif "sched_yield", /* 298 = sched_yield */ "getthrid", /* 299 = getthrid */ - "thrsleep", /* 300 = thrsleep */ - "thrwakeup", /* 301 = thrwakeup */ - "threxit", /* 302 = threxit */ - "thrsigdivert", /* 303 = thrsigdivert */ + "__thrsleep", /* 300 = __thrsleep */ + "__thrwakeup", /* 301 = __thrwakeup */ + "__threxit", /* 302 = __threxit */ + "__thrsigdivert", /* 303 = __thrsigdivert */ "__getcwd", /* 304 = __getcwd */ "adjfreq", /* 305 = adjfreq */ "getfsstat", /* 306 = getfsstat */ diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master index c8011d97d42..af289aab619 100644 --- a/sys/kern/syscalls.master +++ b/sys/kern/syscalls.master @@ -1,4 +1,4 @@ -; $OpenBSD: syscalls.master,v 1.119 2011/10/15 23:35:29 guenther Exp $ +; $OpenBSD: syscalls.master,v 1.120 2012/01/17 02:34:18 guenther Exp $ ; $NetBSD: syscalls.master,v 1.32 1996/04/23 10:24:21 mycroft Exp $ ; @(#)syscalls.master 8.2 (Berkeley) 1/13/94 @@ -524,11 +524,13 @@ #endif 298 STD NOLOCK { int sys_sched_yield(void); } 299 STD NOLOCK { pid_t sys_getthrid(void); } -300 STD { int sys_thrsleep(void *ident, clockid_t clock_id, \ - const struct timespec *tp, void *lock); } -301 STD { int sys_thrwakeup(void *ident, int n); } -302 STD { void sys_threxit(pid_t *notdead); } -303 STD { int sys_thrsigdivert(sigset_t sigmask, \ +300 STD { int sys___thrsleep(const volatile void *ident, \ + clockid_t clock_id, const struct timespec *tp, \ + void *lock, const int *abort); } +301 STD { int sys___thrwakeup(const volatile void *ident, \ + int n); } +302 STD { void sys___threxit(pid_t *notdead); } +303 STD { int sys___thrsigdivert(sigset_t sigmask, \ siginfo_t *info, const struct timespec *timeout); } 304 STD { int sys___getcwd(char *buf, size_t len); } 305 STD { int sys_adjfreq(const int64_t *freq, \ diff --git a/sys/sys/syscall.h b/sys/sys/syscall.h index 0145f63cc6b..ccd89b97435 100644 --- a/sys/sys/syscall.h +++ b/sys/sys/syscall.h @@ -1,4 +1,4 @@ -/* $OpenBSD: syscall.h,v 1.131 2011/10/15 23:36:31 guenther Exp $ */ +/* $OpenBSD: syscall.h,v 1.132 2012/01/17 02:34:18 guenther Exp $ */ /* * System call numbers. @@ -589,17 +589,17 @@ /* syscall: "getthrid" ret: "pid_t" args: */ #define SYS_getthrid 299 -/* syscall: "thrsleep" ret: "int" args: "void *" "clockid_t" "const struct timespec *" "void *" */ -#define SYS_thrsleep 300 +/* syscall: "__thrsleep" ret: "int" args: "const volatile void *" "clockid_t" "const struct timespec *" "void *" "const int *" */ +#define SYS___thrsleep 300 -/* syscall: "thrwakeup" ret: "int" args: "void *" "int" */ -#define SYS_thrwakeup 301 +/* syscall: "__thrwakeup" ret: "int" args: "const volatile void *" "int" */ +#define SYS___thrwakeup 301 -/* syscall: "threxit" ret: "void" args: "pid_t *" */ -#define SYS_threxit 302 +/* syscall: "__threxit" ret: "void" args: "pid_t *" */ +#define SYS___threxit 302 -/* syscall: "thrsigdivert" ret: "int" args: "sigset_t" "siginfo_t *" "const struct timespec *" */ -#define SYS_thrsigdivert 303 +/* syscall: "__thrsigdivert" ret: "int" args: "sigset_t" "siginfo_t *" "const struct timespec *" */ +#define SYS___thrsigdivert 303 /* syscall: "__getcwd" ret: "int" args: "char *" "size_t" */ #define SYS___getcwd 304 diff --git a/sys/sys/syscallargs.h b/sys/sys/syscallargs.h index 5e3077f901a..f43f66d46f3 100644 --- a/sys/sys/syscallargs.h +++ b/sys/sys/syscallargs.h @@ -1,4 +1,4 @@ -/* $OpenBSD: syscallargs.h,v 1.133 2011/10/15 23:36:31 guenther Exp $ */ +/* $OpenBSD: syscallargs.h,v 1.134 2012/01/17 02:34:18 guenther Exp $ */ /* * System call argument lists. @@ -880,23 +880,24 @@ struct sys_msgctl_args { syscallarg(struct msqid_ds *) buf; }; -struct sys_thrsleep_args { - syscallarg(void *) ident; +struct sys___thrsleep_args { + syscallarg(const volatile void *) ident; syscallarg(clockid_t) clock_id; syscallarg(const struct timespec *) tp; syscallarg(void *) lock; + syscallarg(const int *) abort; }; -struct sys_thrwakeup_args { - syscallarg(void *) ident; +struct sys___thrwakeup_args { + syscallarg(const volatile void *) ident; syscallarg(int) n; }; -struct sys_threxit_args { +struct sys___threxit_args { syscallarg(pid_t *) notdead; }; -struct sys_thrsigdivert_args { +struct sys___thrsigdivert_args { syscallarg(sigset_t) sigmask; syscallarg(siginfo_t *) info; syscallarg(const struct timespec *) timeout; @@ -1279,10 +1280,10 @@ int sys_msgctl(struct proc *, void *, register_t *); #endif int sys_sched_yield(struct proc *, void *, register_t *); int sys_getthrid(struct proc *, void *, register_t *); -int sys_thrsleep(struct proc *, void *, register_t *); -int sys_thrwakeup(struct proc *, void *, register_t *); -int sys_threxit(struct proc *, void *, register_t *); -int sys_thrsigdivert(struct proc *, void *, register_t *); +int sys___thrsleep(struct proc *, void *, register_t *); +int sys___thrwakeup(struct proc *, void *, register_t *); +int sys___threxit(struct proc *, void *, register_t *); +int sys___thrsigdivert(struct proc *, void *, register_t *); int sys___getcwd(struct proc *, void *, register_t *); int sys_adjfreq(struct proc *, void *, register_t *); int sys_getfsstat(struct proc *, void *, register_t *); |