diff options
author | David Leonard <d@cvs.openbsd.org> | 1999-05-26 00:18:27 +0000 |
---|---|---|
committer | David Leonard <d@cvs.openbsd.org> | 1999-05-26 00:18:27 +0000 |
commit | ce75e095891d0e7855c5b4c94f24393cf7c2b610 (patch) | |
tree | d4a6264e2baa2c3ecbb2de438c063ad6883e6078 | |
parent | 8fd8f5d111ec2aa383bf3a5dd4a7f0394d0f9f57 (diff) |
sync with FreeBSD
90 files changed, 6562 insertions, 1664 deletions
diff --git a/lib/libc_r/include/pthread.h b/lib/libc_r/include/pthread.h index 1d997eb7e0b..41b198b67b6 100644 --- a/lib/libc_r/include/pthread.h +++ b/lib/libc_r/include/pthread.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: pthread.h,v 1.6 1999/03/10 10:00:47 d Exp $ + * $OpenBSD: pthread.h,v 1.7 1999/05/26 00:17:41 d Exp $ * */ #ifndef _PTHREAD_H_ @@ -65,9 +65,9 @@ #define _POSIX_THREAD_ATTR_STACKADDR #define _POSIX_THREAD_ATTR_STACKSIZE -/* #define _POSIX_THREAD_PRIORITY_SCHEDULING */ -/* #define _POSIX_THREAD_PRIO_INHERIT */ -/* #define _POSIX_THREAD_PRIO_PROTECT */ +#define _POSIX_THREAD_PRIORITY_SCHEDULING +#define _POSIX_THREAD_PRIO_INHERIT +#define _POSIX_THREAD_PRIO_PROTECT /* #define _POSIX_THREAD_PROCESS_SHARED */ #define _POSIX_THREAD_SAFE_FUNCTIONS @@ -164,19 +164,37 @@ struct pthread_once { /* * Static initialization values. */ -#define PTHREAD_MUTEX_INITIALIZER ((pthread_mutex_t) NULL) -#define PTHREAD_COND_INITIALIZER ((pthread_cond_t) NULL) -#define PTHREAD_RWLOCK_INITIALIZER ((pthread_rwlock_t) NULL) +#define PTHREAD_MUTEX_INITIALIZER NULL +#define PTHREAD_COND_INITIALIZER NULL +#define PTHREAD_RWLOCK_INITIALIZER NULL +#define PTHREAD_PRIO_NONE 0 +#ifdef _POSIX_THREAD_PRIO_PROTECT +#define PTHREAD_PRIO_INHERIT 1 +#define PTHREAD_PRIO_PROTECT 2 +#endif + +/* + * Mutex types (Single UNIX Specification, Version 2, 1997). + * + * Note that a mutex attribute with one of the following types: + * + * PTHREAD_MUTEX_NORMAL + * PTHREAD_MUTEX_RECURSIVE + * MUTEX_TYPE_FAST (deprecated) + * MUTEX_TYPE_COUNTING_FAST (deprecated) + * + * will deviate from POSIX specified semantics. + */ enum pthread_mutextype { - PTHREAD_MUTEX_DEFAULT = 1, - PTHREAD_MUTEX_RECURSIVE = 2, - PTHREAD_MUTEX_NORMAL = 3, - PTHREAD_MUTEX_ERRORCHECK = 4, + PTHREAD_MUTEX_ERRORCHECK = 1, /* Default POSIX mutex */ + PTHREAD_MUTEX_RECURSIVE = 2, /* Recursive mutex */ + PTHREAD_MUTEX_NORMAL = 3, /* No error checking */ MUTEX_TYPE_MAX }; -#define MUTEX_TYPE_FAST PTHREAD_MUTEX_DEFAULT +#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_ERRORCHECK +#define MUTEX_TYPE_FAST PTHREAD_MUTEX_NORMAL #define MUTEX_TYPE_COUNTING_FAST PTHREAD_MUTEX_RECURSIVE /* @@ -185,20 +203,10 @@ enum pthread_mutextype { __BEGIN_DECLS int pthread_atfork(void (*)(void), void (*)(void), void (*)(void)); int pthread_attr_destroy __P((pthread_attr_t *)); -int pthread_attr_getinheritsched __P((const pthread_attr_t *, int *)); -int pthread_attr_getschedparam __P((const pthread_attr_t *, - struct sched_param *)); -int pthread_attr_getschedpolicy __P((const pthread_attr_t *, int *)); -int pthread_attr_getscope __P((const pthread_attr_t *, int *)); int pthread_attr_getstacksize __P((pthread_attr_t *, size_t *)); int pthread_attr_getstackaddr __P((pthread_attr_t *, void **)); int pthread_attr_getdetachstate __P((pthread_attr_t *, int *)); int pthread_attr_init __P((pthread_attr_t *)); -int pthread_attr_setinheritsched __P((pthread_attr_t *, int)); -int pthread_attr_setschedparam __P((pthread_attr_t *, - const struct sched_param *)); -int pthread_attr_setschedpolicy __P((pthread_attr_t *, int)); -int pthread_attr_setscope __P((pthread_attr_t *, int)); int pthread_attr_setstacksize __P((pthread_attr_t *, size_t)); int pthread_attr_setstackaddr __P((pthread_attr_t *, void *)); int pthread_attr_setdetachstate __P((pthread_attr_t *, int)); @@ -208,10 +216,14 @@ void pthread_cleanup_push __P((void (*routine) (void *), void *routine_arg)); int pthread_condattr_destroy __P((pthread_condattr_t *attr)); int pthread_condattr_init __P((pthread_condattr_t *attr)); + +#if defined(_POSIX_THREAD_PROCESS_SHARED) int pthread_condattr_getpshared __P((const pthread_condattr_t *attr, int *pshared)); int pthread_condattr_setpshared __P((pthread_condattr_t *attr, int pshared)); +#endif + int pthread_cond_broadcast __P((pthread_cond_t *)); int pthread_cond_destroy __P((pthread_cond_t *)); int pthread_cond_init __P((pthread_cond_t *, @@ -231,27 +243,13 @@ int pthread_key_create __P((pthread_key_t *, void (*routine) (void *))); int pthread_key_delete __P((pthread_key_t)); int pthread_kill __P((struct pthread *, int)); -int pthread_mutexattr_destroy __P((pthread_mutexattr_t *)); -int pthread_mutexattr_getprioceiling __P((pthread_mutexattr_t *, - int *prioceiling)); -int pthread_mutexattr_getprotocol __P((pthread_mutexattr_t *, - int *protocol)); -int pthread_mutexattr_getpshared __P((pthread_mutexattr_t *, - int *pshared)); int pthread_mutexattr_init __P((pthread_mutexattr_t *)); -int pthread_mutexattr_setprioceiling __P((pthread_mutexattr_t *, - int prioceiling)); -int pthread_mutexattr_setprotocol __P((pthread_mutexattr_t *, - int protocol)); -int pthread_mutexattr_setpshared __P((pthread_mutexattr_t *, - int pshared)); +int pthread_mutexattr_destroy __P((pthread_mutexattr_t *)); int pthread_mutexattr_settype __P((pthread_mutexattr_t *, int)); int pthread_mutex_destroy __P((pthread_mutex_t *)); -int pthread_mutex_getprioceiling __P((pthread_mutex_t *)); int pthread_mutex_init __P((pthread_mutex_t *, const pthread_mutexattr_t *)); int pthread_mutex_lock __P((pthread_mutex_t *)); -int pthread_mutex_setprioceiling __P((pthread_mutex_t *)); int pthread_mutex_trylock __P((pthread_mutex_t *)); int pthread_mutex_unlock __P((pthread_mutex_t *)); int pthread_once __P((pthread_once_t *, @@ -281,10 +279,48 @@ void pthread_testcancel __P((void)); int pthread_getprio __P((pthread_t)); int pthread_setprio __P((pthread_t, int)); void pthread_yield __P((void)); -int pthread_setschedparam __P((pthread_t pthread, int policy, - const struct sched_param * param)); + +#if defined(_POSIX_THREAD_PROCESS_SHARED) +int pthread_mutexattr_getpshared __P((pthread_mutexattr_t *, + int *pshared)); +int pthread_mutexattr_setpshared __P((pthread_mutexattr_t *, + int pshared)); +#endif + +#if defined(_POSIX_THREAD_PRIO_PROTECT) +int pthread_mutexattr_getprioceiling __P((pthread_mutexattr_t *, + int *prioceiling)); +int pthread_mutexattr_setprioceiling __P((pthread_mutexattr_t *, + int prioceiling)); +int pthread_mutex_getprioceiling __P((pthread_mutex_t *, int *)); +int pthread_mutex_setprioceiling __P((pthread_mutex_t *, int, + int *)); +#endif + +#if defined(_POSIX_THREAD_PRIO_PROTECT) || defined (_POSIX_THREAD_PRIO_INHERIT) +int pthread_mutexattr_getprotocol __P((pthread_mutexattr_t *, + int *protocol)); +int pthread_mutexattr_setprotocol __P((pthread_mutexattr_t *, + int protocol)); +#endif + +#if defined(_POSIX_THREAD_PRIORITY_SCHEDULING) +int pthread_attr_getinheritsched __P((const pthread_attr_t *, int *)); +int pthread_attr_getschedparam __P((const pthread_attr_t *, + struct sched_param *)); +int pthread_attr_getschedpolicy __P((const pthread_attr_t *, int *)); +int pthread_attr_getscope __P((const pthread_attr_t *, int *)); +int pthread_attr_setinheritsched __P((pthread_attr_t *, int)); +int pthread_attr_setschedparam __P((pthread_attr_t *, + const struct sched_param *)); +int pthread_attr_setschedpolicy __P((pthread_attr_t *, int)); +int pthread_attr_setscope __P((pthread_attr_t *, int)); int pthread_getschedparam __P((pthread_t pthread, int *policy, struct sched_param * param)); +int pthread_setschedparam __P((pthread_t pthread, int policy, + const struct sched_param * param)); +#endif + int pthread_attr_setfloatstate __P((pthread_attr_t *, int)); int pthread_attr_getfloatstate __P((pthread_attr_t *, int *)); int pthread_attr_setcleanup __P((pthread_attr_t *, diff --git a/lib/libc_r/include/pthread_np.h b/lib/libc_r/include/pthread_np.h index 51cc4e23ee1..6f8b7b7ef44 100644 --- a/lib/libc_r/include/pthread_np.h +++ b/lib/libc_r/include/pthread_np.h @@ -1,3 +1,4 @@ +/* $OpenBSD: pthread_np.h,v 1.2 1999/05/26 00:17:41 d Exp $ */ /* * Copyright (c) 1996-98 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -29,12 +30,16 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: pthread_np.h,v 1.1 1998/09/05 07:40:47 d Exp $ */ #ifndef _PTHREAD_NP_H_ #define _PTHREAD_NP_H_ /* + * Non-POSIX type definitions: + */ +typedef void (*pthread_switch_routine_t) __P((pthread_t, pthread_t)); + +/* * Non-POSIX thread function prototype definitions: */ __BEGIN_DECLS @@ -46,6 +51,8 @@ int pthread_suspend_np __P((pthread_t)); int pthread_mutexattr_getkind_np __P((pthread_mutexattr_t attr)); int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t *attr, int kind)); void pthread_set_name_np __P((pthread_t, char *)); +int pthread_switch_add_np (pthread_switch_routine_t routine); +int pthread_switch_delete_np (pthread_switch_routine_t routine); __END_DECLS #endif diff --git a/lib/libc_r/uthread/Makefile.inc b/lib/libc_r/uthread/Makefile.inc index e525110ebb3..22bfb740e07 100644 --- a/lib/libc_r/uthread/Makefile.inc +++ b/lib/libc_r/uthread/Makefile.inc @@ -1,5 +1,5 @@ -# $Id: Makefile.inc,v 1.6 1999/01/18 00:00:32 d Exp $ -# $OpenBSD: Makefile.inc,v 1.6 1999/01/18 00:00:32 d Exp $ +# $Id: Makefile.inc,v 1.7 1999/05/26 00:18:21 d Exp $ +# $OpenBSD: Makefile.inc,v 1.7 1999/05/26 00:18:21 d Exp $ # uthread sources .PATH: ${.CURDIR}/uthread @@ -12,11 +12,19 @@ SRCS+= \ uthread_attr_destroy.c \ uthread_attr_init.c \ uthread_attr_getdetachstate.c \ + uthread_attr_getinheritsched.c \ + uthread_attr_getschedparam.c \ + uthread_attr_getschedpolicy.c \ + uthread_attr_getscope.c \ + uthread_attr_setstackaddr.c \ uthread_attr_getstackaddr.c \ uthread_attr_getstacksize.c \ - uthread_attr_priosched.c \ uthread_attr_setcreatesuspend_np.c \ uthread_attr_setdetachstate.c \ + uthread_attr_setinheritsched.c \ + uthread_attr_setschedparam.c \ + uthread_attr_setschedpolicy.c \ + uthread_attr_setscope.c \ uthread_attr_setstackaddr.c \ uthread_attr_setstacksize.c \ uthread_autoinit.c \ @@ -53,6 +61,7 @@ SRCS+= \ uthread_getdirentries.c \ uthread_getpeername.c \ uthread_getprio.c \ + uthread_getschedparam.c \ uthread_getsockname.c \ uthread_getsockopt.c \ uthread_info.c \ @@ -67,11 +76,14 @@ SRCS+= \ uthread_msync.c \ uthread_multi_np.c \ uthread_mutex.c \ + uthread_mutex_prioceiling.c \ + uthread_mutex_protocol.c \ uthread_mutexattr_destroy.c \ uthread_nanosleep.c \ uthread_once.c \ uthread_open.c \ uthread_pipe.c \ + uthread_priority_queue.c \ uthread_poll.c \ uthread_queue.c \ uthread_read.c \ @@ -87,6 +99,7 @@ SRCS+= \ uthread_sendto.c \ uthread_seterrno.c \ uthread_setprio.c \ + uthread_setschedparam.c \ uthread_setsockopt.c \ uthread_shutdown.c \ uthread_sig.c \ @@ -94,6 +107,7 @@ SRCS+= \ uthread_sigaltstack.c \ uthread_sigblock.c \ uthread_sigmask.c \ + uthread_sigpending.c \ uthread_sigprocmask.c \ uthread_sigsetmask.c \ uthread_sigsuspend.c \ @@ -104,6 +118,7 @@ SRCS+= \ uthread_spec.c \ uthread_spinlock.c \ uthread_suspend_np.c \ + uthread_switch_np.c \ uthread_vfork.c \ uthread_wait4.c \ uthread_write.c \ diff --git a/lib/libc_r/uthread/pthread_private.h b/lib/libc_r/uthread/pthread_private.h index 529c647fa57..7643b99a19c 100644 --- a/lib/libc_r/uthread/pthread_private.h +++ b/lib/libc_r/uthread/pthread_private.h @@ -31,7 +31,7 @@ * * Private thread definitions for the uthread kernel. * - * $OpenBSD: pthread_private.h,v 1.13 1999/02/16 16:44:07 millert Exp $ + * $OpenBSD: pthread_private.h,v 1.14 1999/05/26 00:18:21 d Exp $ * */ @@ -50,10 +50,17 @@ #include <sys/time.h> #include <sched.h> #include <spinlock.h> +#include <pthread_np.h> #ifndef _NO_UTHREAD_MACHDEP #include "uthread_machdep.h" #endif +#ifdef __OpenBSD__ +/* Steal TAILQ_FOREACH from FreeBSD's <sys/queue.h> */ +#define TAILQ_FOREACH(var, head, field) \ + for (var = TAILQ_FIRST(head); var; var = TAILQ_NEXT(var, field)) +#endif + /* * Kernel fatal error handler macro. */ @@ -63,16 +70,59 @@ #define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x)); #define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x)); + /* - * State change macro: + * Priority queue manipulation macros: */ -#define PTHREAD_NEW_STATE(thrd, newstate) { \ +#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) +#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) +#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) +#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq) + +/* + * Waiting queue manipulation macros: + */ +#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe) +#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe) + +/* + * State change macro without scheduling queue change: + */ +#define PTHREAD_SET_STATE(thrd, newstate) { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } /* + * State change macro with scheduling queue change - This must be + * called with preemption deferred (see thread_kern_sched_[un]defer). + */ +#define PTHREAD_NEW_STATE(thrd, newstate) { \ + if ((thrd)->state != newstate) { \ + if ((thrd)->state == PS_RUNNING) { \ + PTHREAD_PRIOQ_REMOVE(thrd); \ + PTHREAD_WAITQ_INSERT(thrd); \ + } else if (newstate == PS_RUNNING) { \ + PTHREAD_WAITQ_REMOVE(thrd); \ + PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ + } \ + } \ + PTHREAD_SET_STATE(thrd, newstate); \ +} + +/* + * Define the signals to be used for scheduling. + */ +#if defined(_PTHREADS_COMPAT_SCHED) +#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL +#define _SCHED_SIGNAL SIGVTALRM +#else +#define _ITIMER_SCHED_TIMER ITIMER_PROF +#define _SCHED_SIGNAL SIGPROF +#endif + +/* * Queue definitions. */ struct pthread_queue { @@ -82,10 +132,34 @@ struct pthread_queue { }; /* + * Priority queues. + * + * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. + */ +typedef struct pq_list { + TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ + TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ + int pl_prio; /* the priority of this list */ + int pl_queued; /* is this in the priority queue */ +} pq_list_t; + +typedef struct pq_queue { + TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ + pq_list_t *pq_lists; /* array of all priority lists */ + int pq_size; /* number of priority lists */ +} pq_queue_t; + + +/* * Static queue initialization values. */ #define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL } +/* + * TailQ initialization values. + */ +#define TAILQ_INITIALIZER { NULL, NULL } + /* * Mutex definitions. */ @@ -96,10 +170,31 @@ union pthread_mutex_data { struct pthread_mutex { enum pthread_mutextype m_type; - struct pthread_queue m_queue; + int m_protocol; + TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; + int m_refcount; + + /* + * Used for priority inheritence and protection. + * + * m_prio - For priority inheritence, the highest active + * priority (threads locking the mutex inherit + * this priority). For priority protection, the + * ceiling priority of this mutex. + * m_saved_prio - mutex owners inherited priority before + * taking the mutex, restored when the owner + * unlocks the mutex. + */ + int m_prio; + int m_saved_prio; + + /* + * Link for list of all mutexes a thread currently owns. + */ + TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. @@ -118,11 +213,13 @@ struct pthread_mutex { * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ - { MUTEX_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, \ - NULL, { NULL }, MUTEX_FLAGS_INITED } + { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ + NULL, { NULL }, MUTEX_FLAGS_INITED, 0, 0, 0, TAILQ_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; + int m_protocol; + int m_ceiling; long m_flags; }; @@ -135,15 +232,16 @@ enum pthread_cond_type { }; struct pthread_cond { - enum pthread_cond_type c_type; - struct pthread_queue c_queue; - void *c_data; - long c_flags; + enum pthread_cond_type c_type; + TAILQ_HEAD(cond_head, pthread) c_queue; + pthread_mutex_t c_mutex; + void *c_data; + long c_flags; /* * Lock for accesses to this structure. */ - spinlock_t lock; + spinlock_t lock; }; struct pthread_cond_attr { @@ -162,7 +260,8 @@ struct pthread_cond_attr { * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ - { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, COND_FLAGS_INITED } + { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL, \ + COND_FLAGS_INITED } /* * Cleanup definitions. @@ -174,7 +273,9 @@ struct pthread_cleanup { }; struct pthread_attr { - int schedparam_policy; + int sched_policy; + int sched_inherit; + int sched_interval; int prio; int suspend; int flags; @@ -256,9 +357,11 @@ enum pthread_state { PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, + PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, + PS_DEADLOCK, PS_STATE_MAX }; @@ -302,8 +405,8 @@ struct pthread_select_data { }; union pthread_wait_data { - pthread_mutex_t *mutex; - pthread_cond_t *cond; + pthread_mutex_t mutex; + pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ @@ -311,6 +414,7 @@ union pthread_wait_data { const char *fname; /* Source file name for debugging.*/ } fd; struct pthread_select_data * select_data; + spinlock_t *spinlock; }; /* @@ -419,7 +523,11 @@ struct pthread { struct pthread_queue join_queue; /* - * The current thread can belong to only one queue at a time. + * The current thread can belong to only one scheduling queue + * at a time (ready or waiting queue). It can also belong to + * a queue of threads waiting on mutexes or condition variables. + * Use pqe for the scheduling queue link (both ready and waiting), + * and qe for other links (mutexes and condition variables). * * Pointer to queue (if any) on which the current thread is waiting. * @@ -431,8 +539,11 @@ struct pthread { /* Pointer to next element in queue. */ struct pthread *qnxt; + /* Priority queue entry for this thread: */ + TAILQ_ENTRY(pthread) pqe; + /* Queue entry for this thread: */ - TAILQ_ENTRY(pthread) qe; + TAILQ_ENTRY(pthread) qe; /* Wait data. */ union pthread_wait_data data; @@ -446,12 +557,61 @@ struct pthread { /* Signal number when in state PS_SIGWAIT: */ int signo; + /* + * Set to non-zero when this thread has deferred thread + * scheduling. We allow for recursive deferral. + */ + int sched_defer_count; + + /* + * Set to TRUE if this thread should yield after undeferring + * thread scheduling. + */ + int yield_on_sched_undefer; + /* Miscellaneous data. */ int flags; -#define PTHREAD_EXITING (0x0100) -#define PTHREAD_CANCELLING (0x0200) /* thread has been cancelled */ -#define PTHREAD_AT_CANCEL_POINT (0x0400) /* thread at cancel point */ - char pthread_priority; +#define PTHREAD_FLAGS_PRIVATE 0x0001 +#define PTHREAD_EXITING 0x0002 +#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */ +#define PTHREAD_FLAGS_TRACE 0x0008 +#define PTHREAD_CANCELLING 0x0010 /* thread has been cancelled */ +#define PTHREAD_AT_CANCEL_POINT 0x0020 /* thread at cancel point */ + + /* + * Base priority is the user setable and retrievable priority + * of the thread. It is only affected by explicit calls to + * set thread priority and upon thread creation via a thread + * attribute or default priority. + */ + char base_priority; + + /* + * Inherited priority is the priority a thread inherits by + * taking a priority inheritence or protection mutex. It + * is not affected by base priority changes. Inherited + * priority defaults to and remains 0 until a mutex is taken + * that is being waited on by any other thread whose priority + * is non-zero. + */ + char inherited_priority; + + /* + * Active priority is always the maximum of the threads base + * priority and inherited priority. When there is a change + * in either the real or inherited priority, the active + * priority must be recalculated. + */ + char active_priority; + + /* Number of priority ceiling or protection mutexes owned. */ + int priority_mutex_count; + + /* + * Queue of currently owned mutexes. + */ + TAILQ_HEAD(, pthread_mutex) mutexq; + void *ret; const void **specific_data; int specific_data_count; @@ -484,6 +644,9 @@ extern struct pthread * volatile _thread_kern_threadp; /* Ptr to the thread structure for the running thread: */ extern struct pthread * volatile _thread_run; +/* Ptr to the thread structure for the last user thread to run: */ +extern struct pthread * volatile _last_user_thread; + /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). @@ -528,6 +691,7 @@ extern int _pthread_stdio_flags[3]; /* File table information: */ extern struct fd_table_entry **_thread_fd_table; +extern const int dtablecount; extern int _thread_dtablesize; /* Garbage collector mutex and condition variable. */ @@ -540,6 +704,19 @@ extern pthread_cond_t _gc_cond; extern struct sigaction _thread_sigact[NSIG]; /* + * Scheduling queues: + */ +extern pq_queue_t _readyq; +typedef TAILQ_HEAD(, pthread) _waitingq_t; +extern _waitingq_t _waitingq; + +/* Indicates that the waitingq now has threads ready to run. */ +extern volatile int _waitingq_check_reqd; + +/* Thread switch hook. */ +extern pthread_switch_routine_t _sched_switch_hook; + +/* * Where SIGINFO writes thread states when /dev/tty cannot be opened */ #define INFO_DUMP_FILE "/tmp/uthread.dump" @@ -569,6 +746,14 @@ void _lock_thread(void); void _lock_thread_list(void); void _unlock_thread(void); void _unlock_thread_list(void); +int _mutex_cv_lock(pthread_mutex_t *); +int _mutex_cv_unlock(pthread_mutex_t *); +void _mutex_notify_priochange(struct pthread *); +int _pq_init(struct pq_queue *pq, int, int); +void _pq_remove(struct pq_queue *pq, struct pthread *); +void _pq_insert_head(struct pq_queue *pq, struct pthread *); +void _pq_insert_tail(struct pq_queue *pq, struct pthread *); +struct pthread *_pq_first(struct pq_queue *pq); void _thread_exit(const char *, int, const char *) __attribute__((noreturn)); void _thread_fd_unlock(int, int); @@ -576,12 +761,14 @@ void _thread_fd_unlock_debug(int, int, char *, int); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); -void _thread_init(void) /* __attribute__((constructor)) */; +void _thread_init(void); void _thread_kern_sched(struct sigcontext *); void _thread_kern_sched_state(enum pthread_state, const char *, int); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(struct timespec *); +void _thread_kern_sched_defer(void); +void _thread_kern_sched_undefer(void); void _thread_sig_handler(int, int, struct sigcontext *); void _thread_start(void); void _thread_start_sig_handler(void); @@ -723,7 +910,7 @@ int _thread_sys_fchdir(int); int _thread_sys_fchown(int, uid_t, gid_t); int _thread_sys_fsync(int); int _thread_sys_ftruncate(int, off_t); -long _thread_sys_fpathconf(int, int); +long _thread_sys_fpathconf(int, int); int _thread_sys_pause(void); int _thread_sys_pipe(int *); int _thread_sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); diff --git a/lib/libc_r/uthread/uthread_attr_getinheritsched.c b/lib/libc_r/uthread/uthread_attr_getinheritsched.c new file mode 100644 index 00000000000..54294c2e74b --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_getinheritsched.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_getinheritsched.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL)) + ret = EINVAL; + else + *sched_inherit = (*attr)->sched_inherit; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_getschedparam.c b/lib/libc_r/uthread/uthread_attr_getschedparam.c new file mode 100644 index 00000000000..1a51d23b069 --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_getschedparam.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_getschedparam.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (param == NULL)) + ret = EINVAL; + else + param->sched_priority = (*attr)->prio; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_getschedpolicy.c b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c new file mode 100644 index 00000000000..54977df53a0 --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_getschedpolicy.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_getschedpolicy.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (policy == NULL)) + ret = EINVAL; + else + *policy = (*attr)->sched_policy; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_getscope.c b/lib/libc_r/uthread/uthread_attr_getscope.c new file mode 100644 index 00000000000..5dd4772ae46 --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_getscope.c @@ -0,0 +1,54 @@ +/* $OpenBSD: uthread_attr_getscope.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL)) + /* Return an invalid argument: */ + ret = EINVAL; + + else + *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ? + PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_priosched.c b/lib/libc_r/uthread/uthread_attr_priosched.c deleted file mode 100644 index cc415768165..00000000000 --- a/lib/libc_r/uthread/uthread_attr_priosched.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * David Leonard <d@openbsd.org>, 1998. Public Domain. - * - * $OpenBSD: uthread_attr_priosched.c,v 1.1 1999/01/18 00:06:56 d Exp $ - */ -#include <errno.h> -#ifdef _THREAD_SAFE -#include <pthread.h> -#include "pthread_private.h" - -int -pthread_attr_setscope(attr, contentionscope) - pthread_attr_t *attr; - int contentionscope; -{ - - return (ENOSYS); -} - -int -pthread_attr_getscope(attr, contentionscope) - const pthread_attr_t *attr; - int *contentionscope; -{ - - return (ENOSYS); -} - -int -pthread_attr_setinheritsched(attr, inheritsched) - pthread_attr_t *attr; - int inheritsched; -{ - - return (ENOSYS); -} - -int -pthread_attr_getinheritsched(attr, inheritsched) - const pthread_attr_t *attr; - int *inheritsched; -{ - - return (ENOSYS); -} - -int -pthread_attr_setschedpolicy(attr, policy) - pthread_attr_t *attr; - int policy; -{ - - return (ENOSYS); -} - -int -pthread_attr_getschedpolicy(attr, policy) - const pthread_attr_t *attr; - int *policy; -{ - - return (ENOSYS); -} - -int -pthread_attr_setschedparam(attr, param) - pthread_attr_t *attr; - const struct sched_param *param; -{ - - return (ENOSYS); -} - -int -pthread_attr_getschedparam(attr, param) - const pthread_attr_t *attr; - struct sched_param *param; -{ - - return (ENOSYS); -} -#endif diff --git a/lib/libc_r/uthread/uthread_attr_setinheritsched.c b/lib/libc_r/uthread/uthread_attr_setinheritsched.c new file mode 100644 index 00000000000..0e0e015558a --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_setinheritsched.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_setinheritsched.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL)) + ret = EINVAL; + else + (*attr)->sched_inherit = sched_inherit; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_setschedparam.c b/lib/libc_r/uthread/uthread_attr_setschedparam.c new file mode 100644 index 00000000000..2ff67680fe8 --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_setschedparam.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_setschedparam.c,v 1.1 1999/05/26 00:18:23 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (param == NULL)) + ret = EINVAL; + else + (*attr)->prio = param->sched_priority; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_setschedpolicy.c b/lib/libc_r/uthread/uthread_attr_setschedpolicy.c new file mode 100644 index 00000000000..2b47a9d9339 --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_setschedpolicy.c @@ -0,0 +1,52 @@ +/* $OpenBSD: uthread_attr_setschedpolicy.c,v 1.1 1999/05/26 00:18:23 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (policy < SCHED_FIFO) || + (policy > SCHED_RR)) + ret = EINVAL; + else + (*attr)->sched_policy = policy; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_attr_setscope.c b/lib/libc_r/uthread/uthread_attr_setscope.c new file mode 100644 index 00000000000..26fdaf6f32c --- /dev/null +++ b/lib/libc_r/uthread/uthread_attr_setscope.c @@ -0,0 +1,63 @@ +/* $OpenBSD: uthread_attr_setscope.c,v 1.1 1999/05/26 00:18:23 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setscope(pthread_attr_t *attr, int contentionscope) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || + (contentionscope != PTHREAD_SCOPE_PROCESS) || + (contentionscope != PTHREAD_SCOPE_SYSTEM)) + /* Return an invalid argument: */ + ret = EINVAL; + + else if (contentionscope == PTHREAD_SCOPE_SYSTEM) + /* We don't support system wide contention: */ +#ifdef NOT_YET + ret = ENOTSUP; +#else + ret = EOPNOTSUPP; +#endif + + else + (*attr)->flags |= contentionscope; + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_cond.c b/lib/libc_r/uthread/uthread_cond.c index 525f86049b0..501ad84478a 100644 --- a/lib/libc_r/uthread/uthread_cond.c +++ b/lib/libc_r/uthread/uthread_cond.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_cond.c,v 1.6 1999/05/26 00:18:23 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_cond.c,v 1.5 1999/01/17 23:57:27 d Exp $ */ #include <stdlib.h> #include <errno.h> @@ -38,6 +38,14 @@ #include <pthread.h> #include "pthread_private.h" +/* + * Prototypes + */ +static inline pthread_t cond_queue_deq(pthread_cond_t); +static inline void cond_queue_remove(pthread_cond_t, pthread_t); +static inline void cond_queue_enq(pthread_cond_t, pthread_t); + + int pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) { @@ -84,9 +92,10 @@ pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) * Initialise the condition variable * structure: */ - _thread_queue_init(&pcond->c_queue); + TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; + pcond->c_mutex = NULL; _SPINUNLOCK(&pcond->lock); *cond = pcond; } @@ -137,6 +146,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) */ else if (*cond != NULL || (rval = pthread_cond_init(cond,NULL)) == 0) { + /* This is a cancellation point: */ _thread_enter_cancellation_point(); /* Lock the condition variable structure: */ @@ -146,34 +156,57 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* Wait forever: */ - _thread_run->wakeup_time.tv_sec = -1; - - /* - * Queue the running thread for the condition - * variable: - */ - _thread_queue_enq(&(*cond)->c_queue, _thread_run); - _thread_run->data.cond = cond; - - /* Unlock the mutex: */ - if ((rval = pthread_mutex_unlock(mutex)) != 0) { - /* - * Cannot unlock the mutex, so remove the - * running thread from the condition - * variable queue: - */ - _thread_queue_deq(&(*cond)->c_queue); - + if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && + ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); + + /* Return invalid argument error: */ + rval = EINVAL; } else { - /* Schedule the next thread: */ - _thread_kern_sched_state_unlock(PS_COND_WAIT, - &(*cond)->lock, __FILE__, __LINE__); + /* Reset the timeout flag: */ + _thread_run->timeout = 0; - /* Lock the mutex: */ - rval = pthread_mutex_lock(mutex); + /* + * Queue the running thread for the condition + * variable: + */ + cond_queue_enq(*cond, _thread_run); + + /* Remember the mutex that is being used: */ + (*cond)->c_mutex = *mutex; + + /* Wait forever: */ + _thread_run->wakeup_time.tv_sec = -1; + + /* Unlock the mutex: */ + if ((rval = _mutex_cv_unlock(mutex)) != 0) { + /* + * Cannot unlock the mutex, so remove + * the running thread from the condition + * variable queue: + */ + cond_queue_remove(*cond, _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == + NULL) + (*cond)->c_mutex = NULL; + + /* Unlock the condition variable structure: */ + _SPINUNLOCK(&(*cond)->lock); + } + else { + /* + * Schedule the next thread and unlock + * the condition variable structure: + */ + _thread_kern_sched_state_unlock(PS_COND_WAIT, + &(*cond)->lock, __FILE__, __LINE__); + + /* Lock the mutex: */ + rval = _mutex_cv_lock(mutex); + } } break; @@ -187,6 +220,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) break; } + /* No longer in a cancellation point: */ _thread_leave_cancellation_point(); } @@ -209,7 +243,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, */ else if (*cond != NULL || (rval = pthread_cond_init(cond,NULL)) == 0) { + /* This is a cancellation point: */ _thread_enter_cancellation_point(); + /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -217,43 +253,88 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* Set the wakeup time: */ - _thread_run->wakeup_time.tv_sec = abstime->tv_sec; - _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec; - - /* Reset the timeout flag: */ - _thread_run->timeout = 0; - - /* - * Queue the running thread for the condition - * variable: - */ - _thread_queue_enq(&(*cond)->c_queue, _thread_run); - _thread_run->data.cond = cond; - - /* Unlock the mutex: */ - if ((rval = pthread_mutex_unlock(mutex)) != 0) { - /* - * Cannot unlock the mutex, so remove the - * running thread from the condition - * variable queue: - */ - _thread_queue_deq(&(*cond)->c_queue); + if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && + ((*cond)->c_mutex != *mutex))) { + /* Return invalid argument error: */ + rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { - /* Schedule the next thread: */ - _thread_kern_sched_state_unlock(PS_COND_WAIT, - &(*cond)->lock, __FILE__, __LINE__); + /* Set the wakeup time: */ + _thread_run->wakeup_time.tv_sec = + abstime->tv_sec; + _thread_run->wakeup_time.tv_nsec = + abstime->tv_nsec; - /* Lock the mutex: */ - if ((rval = pthread_mutex_lock(mutex)) != 0) { - } - /* Check if the wait timed out: */ - else if (_thread_run->timeout) { - /* Return a timeout error: */ - rval = ETIMEDOUT; + /* Reset the timeout flag: */ + _thread_run->timeout = 0; + + /* + * Queue the running thread for the condition + * variable: + */ + cond_queue_enq(*cond, _thread_run); + + /* Remember the mutex that is being used: */ + (*cond)->c_mutex = *mutex; + + /* Unlock the mutex: */ + if ((rval = _mutex_cv_unlock(mutex)) != 0) { + /* + * Cannot unlock the mutex, so remove + * the running thread from the condition + * variable queue: + */ + cond_queue_remove(*cond, _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; + + /* Unlock the condition variable structure: */ + _SPINUNLOCK(&(*cond)->lock); + } else { + /* + * Schedule the next thread and unlock + * the condition variable structure: + */ + _thread_kern_sched_state_unlock(PS_COND_WAIT, + &(*cond)->lock, __FILE__, __LINE__); + + /* Check if the wait timedout: */ + if (_thread_run->timeout == 0) { + /* Lock the mutex: */ + rval = _mutex_cv_lock(mutex); + } + else { + /* Lock the condition variable structure: */ + _SPINLOCK(&(*cond)->lock); + + /* + * The wait timed out; remove + * the thread from the condition + * variable queue: + */ + cond_queue_remove(*cond, + _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; + + /* Unock the condition variable structure: */ + _SPINUNLOCK(&(*cond)->lock); + + /* Return a timeout error: */ + rval = ETIMEDOUT; + + /* + * Lock the mutex and ignore + * any errors: + */ + (void)_mutex_cv_lock(mutex); + } } } break; @@ -267,6 +348,8 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, rval = EINVAL; break; } + + /* No longer in a cancellation point: */ _thread_leave_cancellation_point(); } @@ -290,11 +373,22 @@ pthread_cond_signal(pthread_cond_t * cond) switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* Bring the next thread off the condition queue: */ - if ((pthread = _thread_queue_deq(&(*cond)->c_queue)) != NULL) { + /* + * Enter a loop to dequeue threads from the condition + * queue until we find one that hasn't previously + * timed out. + */ + while (((pthread = cond_queue_deq(*cond)) != NULL) && + (pthread->timeout != 0)) { + } + + if (pthread != NULL) /* Allow the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); - } + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ @@ -321,6 +415,16 @@ pthread_cond_broadcast(pthread_cond_t * cond) if (cond == NULL || *cond == NULL) rval = EINVAL; else { + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. In addition, we must assure + * that all threads currently waiting on the condition + * variable are signaled and are not timedout by a + * scheduling signal that causes a preemption. + */ + _thread_kern_sched_defer(); + /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -332,11 +436,17 @@ pthread_cond_broadcast(pthread_cond_t * cond) * Enter a loop to bring all threads off the * condition queue: */ - while ((pthread = - _thread_queue_deq(&(*cond)->c_queue)) != NULL) { - /* Allow the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + while ((pthread = cond_queue_deq(*cond)) != NULL) { + /* + * The thread is already running if the + * timeout flag is set. + */ + if (pthread->timeout == 0) + PTHREAD_NEW_STATE(pthread,PS_RUNNING); } + + /* There are no more waiting threads: */ + (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ @@ -348,9 +458,74 @@ pthread_cond_broadcast(pthread_cond_t * cond) /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); + + /* Reenable preemption and yield if necessary. + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ return (rval); } + +/* + * Dequeue a waiting thread from the head of a condition queue in + * descending priority order. + */ +static inline pthread_t +cond_queue_deq(pthread_cond_t cond) +{ + pthread_t pthread; + + if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { + TAILQ_REMOVE(&cond->c_queue, pthread, qe); + pthread->flags &= ~PTHREAD_FLAGS_QUEUED; + } + + return(pthread); +} + +/* + * Remove a waiting thread from a condition queue in descending priority + * order. + */ +static inline void +cond_queue_remove(pthread_cond_t cond, pthread_t pthread) +{ + /* + * Because pthread_cond_timedwait() can timeout as well + * as be signaled by another thread, it is necessary to + * guard against removing the thread from the queue if + * it isn't in the queue. + */ + if (pthread->flags & PTHREAD_FLAGS_QUEUED) { + TAILQ_REMOVE(&cond->c_queue, pthread, qe); + pthread->flags &= ~PTHREAD_FLAGS_QUEUED; + } +} + +/* + * Enqueue a waiting thread to a condition queue in descending priority + * order. + */ +static inline void +cond_queue_enq(pthread_cond_t cond, pthread_t pthread) +{ + pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); + + /* + * For the common case of all threads having equal priority, + * we perform a quick check against the priority of the thread + * at the tail of the queue. + */ + if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) + TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe); + else { + tid = TAILQ_FIRST(&cond->c_queue); + while (pthread->active_priority <= tid->active_priority) + tid = TAILQ_NEXT(tid, qe); + TAILQ_INSERT_BEFORE(tid, pthread, qe); + } + pthread->flags |= PTHREAD_FLAGS_QUEUED; +} #endif diff --git a/lib/libc_r/uthread/uthread_create.c b/lib/libc_r/uthread/uthread_create.c index eede7c6b5e9..002f563d14a 100644 --- a/lib/libc_r/uthread/uthread_create.c +++ b/lib/libc_r/uthread/uthread_create.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_create.c,v 1.9 1999/05/12 06:00:00 d Exp $ + * $OpenBSD: uthread_create.c,v 1.10 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <stdlib.h> @@ -135,21 +135,26 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr, */ if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) { /* Copy the scheduling attributes: */ - new_thread->pthread_priority = _thread_run->pthread_priority; - new_thread->attr.prio = _thread_run->pthread_priority; - new_thread->attr.schedparam_policy = _thread_run->attr.schedparam_policy; + new_thread->base_priority = _thread_run->base_priority; + new_thread->attr.prio = _thread_run->base_priority; + new_thread->attr.sched_policy = _thread_run->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ - new_thread->pthread_priority = new_thread->attr.prio; + new_thread->base_priority = new_thread->attr.prio; } + new_thread->active_priority = new_thread->base_priority; + new_thread->inherited_priority = 0; /* Initialise the join queue for the new thread: */ _thread_queue_init(&(new_thread->join_queue)); + /* Initialize the mutex queue: */ + TAILQ_INIT(&new_thread->mutexq); + /* Initialise hooks in the thread structure: */ new_thread->specific_data = NULL; new_thread->cleanup = NULL; @@ -173,6 +178,27 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr, /* Unlock the thread list: */ _unlock_thread_list(); + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + + if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) { + new_thread->state = PS_SUSPENDED; + PTHREAD_WAITQ_INSERT(new_thread); + } else { + new_thread->state = PS_RUNNING; + PTHREAD_PRIOQ_INSERT_TAIL(new_thread); + } + + /* + * Reenable preemption and yield if a scheduling + * signal occurred while in the critical region. + */ + _thread_kern_sched_undefer(); + /* Return a pointer to the thread structure: */ if (thread != NULL) (*thread) = new_thread; diff --git a/lib/libc_r/uthread/uthread_detach.c b/lib/libc_r/uthread/uthread_detach.c index d4fa639fef8..cb51ff0e43d 100644 --- a/lib/libc_r/uthread/uthread_detach.c +++ b/lib/libc_r/uthread/uthread_detach.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_detach.c,v 1.3 1999/01/06 05:29:23 d Exp $ + * $OpenBSD: uthread_detach.c,v 1.4 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -52,11 +52,24 @@ pthread_detach(pthread_t pthread) /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + /* Enter a loop to bring all threads off the join queue: */ while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) { /* Make the thread run: */ PTHREAD_NEW_STATE(next_thread,PS_RUNNING); } + + /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } else /* Return an error: */ rval = EINVAL; diff --git a/lib/libc_r/uthread/uthread_execve.c b/lib/libc_r/uthread/uthread_execve.c index a223527d635..ba87ccf8d98 100644 --- a/lib/libc_r/uthread/uthread_execve.c +++ b/lib/libc_r/uthread/uthread_execve.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_execve.c,v 1.3 1999/01/06 05:29:23 d Exp $ + * $OpenBSD: uthread_execve.c,v 1.4 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <fcntl.h> @@ -53,7 +53,7 @@ execve(const char *name, char *const * argv, char *const * envp) itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; - setitimer(ITIMER_VIRTUAL, &itimer, NULL); + setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); diff --git a/lib/libc_r/uthread/uthread_exit.c b/lib/libc_r/uthread/uthread_exit.c index 780e99cb4e1..fdd0bdd4d7c 100644 --- a/lib/libc_r/uthread/uthread_exit.c +++ b/lib/libc_r/uthread/uthread_exit.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_exit.c,v 1.7 1999/01/06 05:29:23 d Exp $ + * $OpenBSD: uthread_exit.c,v 1.8 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <unistd.h> @@ -52,7 +52,7 @@ void _exit(int status) itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; - setitimer(ITIMER_VIRTUAL, &itimer, NULL); + setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); @@ -126,8 +126,8 @@ _thread_exit(const char *fname, int lineno, const char *string) /* Write a dump of the current thread status: */ _thread_dump_info(); - /* Force this process to exit: */ - _exit(1); + /* Try to dump a core file: */ + abort(); } void @@ -160,6 +160,13 @@ pthread_exit(void *status) /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } + + /* + * Guard against preemption by a scheduling signal. A change of + * thread state modifies the waiting and priority queues. + */ + _thread_kern_sched_defer(); + /* Check if there are any threads joined to this one: */ while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) { /* Wake the joined thread and let it detach this thread: */ @@ -167,6 +174,12 @@ pthread_exit(void *status) } /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); + + /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ @@ -184,12 +197,18 @@ pthread_exit(void *status) if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); + /* + * Mark the thread as dead so it will not return if it + * gets context switched out when the mutex is unlocked. + */ + PTHREAD_SET_STATE(_thread_run, PS_DEAD); + /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); - /* This thread will never be re-scheduled. */ - _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); + /* This this thread will never be re-scheduled. */ + _thread_kern_sched(NULL); /* This point should not be reached. */ PANIC("Dead thread has resumed"); diff --git a/lib/libc_r/uthread/uthread_fd.c b/lib/libc_r/uthread/uthread_fd.c index c46615b0270..0b51f75b737 100644 --- a/lib/libc_r/uthread/uthread_fd.c +++ b/lib/libc_r/uthread/uthread_fd.c @@ -29,8 +29,8 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: uthread_fd.c,v 1.9 1998/09/13 15:33:42 dt Exp $ - * $OpenBSD: uthread_fd.c,v 1.4 1999/01/10 23:09:36 d Exp $ + * $FreeBSD: uthread_fd.c,v 1.10 1999/03/23 05:07:55 jb Exp $ + * $OpenBSD: uthread_fd.c,v 1.5 1999/05/26 00:18:23 d Exp $ * */ #include <errno.h> @@ -200,7 +200,7 @@ _thread_fd_unlock(int fd, int lock_type) } else { /* * Set the state of the new owner of - * the thread to running: + * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING); diff --git a/lib/libc_r/uthread/uthread_fork.c b/lib/libc_r/uthread/uthread_fork.c index 913e79e0bdd..9dc90825233 100644 --- a/lib/libc_r/uthread/uthread_fork.c +++ b/lib/libc_r/uthread/uthread_fork.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_fork.c,v 1.4 1999/01/17 23:46:26 d Exp $ + * $OpenBSD: uthread_fork.c,v 1.5 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <string.h> @@ -43,7 +43,7 @@ pid_t fork(void) { - int flags; + int i, flags; pid_t ret; pthread_t pthread; pthread_t pthread_next; @@ -95,6 +95,11 @@ fork(void) else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ abort(); + /* Initialize the ready queue: */ + } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, + PTHREAD_MAX_PRIORITY) != 0) { + /* Abort this application: */ + PANIC("Cannot allocate priority ready queue."); } else { /* Point to the first thread in the list: */ pthread = _thread_link_list; @@ -126,6 +131,34 @@ fork(void) /* Point to the next thread: */ pthread = pthread_next; } + + /* Re-init the waiting queues. */ + TAILQ_INIT(&_waitingq); + + /* Initialize the scheduling switch hook routine: */ + _sched_switch_hook = NULL; + + /* Clear out any locks in the file descriptor table: */ + for (i = 0; i < _thread_dtablesize; i++) { + if (_thread_fd_table[i] != NULL) { + /* Initialise the file locks: */ + _SPINUNLOCK(&_thread_fd_table[i]->lock); + _thread_fd_table[i]->r_owner = NULL; + _thread_fd_table[i]->w_owner = NULL; + _thread_fd_table[i]->r_fname = NULL; + _thread_fd_table[i]->w_fname = NULL; + _thread_fd_table[i]->r_lineno = 0;; + _thread_fd_table[i]->w_lineno = 0;; + _thread_fd_table[i]->r_lockcount = 0;; + _thread_fd_table[i]->w_lockcount = 0;; + + /* Initialise the read/write queues: */ + _thread_queue_init(&_thread_fd_table[i]->r_queue); + _thread_queue_init(&_thread_fd_table[i]->w_queue); + } + } + + /* Initialise the atfork handler: */ _thread_atfork(PTHREAD_ATFORK_CHILD); } } diff --git a/lib/libc_r/uthread/uthread_gc.c b/lib/libc_r/uthread/uthread_gc.c index 56fbbb79e12..1a2790c74bd 100644 --- a/lib/libc_r/uthread/uthread_gc.c +++ b/lib/libc_r/uthread/uthread_gc.c @@ -30,7 +30,7 @@ * SUCH DAMAGE. * * $FreeBSD: uthread_gc.c,v 1.2 1998/09/30 19:17:51 dt Exp $ - * $OpenBSD: uthread_gc.c,v 1.4 1999/02/01 08:24:42 d Exp $ + * $OpenBSD: uthread_gc.c,v 1.5 1999/05/26 00:18:24 d Exp $ * * Garbage collector thread. Frees memory allocated for dead threads. * @@ -52,18 +52,20 @@ _thread_gc(pthread_addr_t arg) int f_debug; int f_done = 0; int ret; + sigset_t mask; pthread_t pthread; pthread_t pthread_cln; pthread_t pthread_nxt; pthread_t pthread_prv; struct timespec abstime; void *p_stack; - sigset_t ss; - /* Don't handle signals in this thread */ - sigfillset(&ss); - if (ret = pthread_sigmask(SIG_BLOCK, &ss, NULL)) - PANIC("Can't block signals in GC thread"); + /* Block all signals */ + sigfillset (&mask); + sigprocmask (SIG_BLOCK, &mask, NULL); + + /* Mark this thread as a library thread (not a user thread). */ + _thread_run->flags |= PTHREAD_FLAGS_PRIVATE; /* Set a debug flag based on an environment variable. */ f_debug = (getenv("LIBC_R_DEBUG") != NULL); diff --git a/lib/libc_r/uthread/uthread_getprio.c b/lib/libc_r/uthread/uthread_getprio.c index b304013efa8..074bff3e514 100644 --- a/lib/libc_r/uthread/uthread_getprio.c +++ b/lib/libc_r/uthread/uthread_getprio.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_getprio.c,v 1.3 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_getprio.c,v 1.2 1999/01/06 05:29:24 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -39,12 +39,11 @@ int pthread_getprio(pthread_t pthread) { - int ret; + int policy, ret; + struct sched_param param; - /* Find the thread in the list of active threads: */ - if ((ret = _find_thread(pthread)) == 0) - /* Get the thread priority: */ - ret = pthread->pthread_priority; + if ((ret = pthread_getschedparam(pthread, &policy, ¶m)) == 0) + ret = param.sched_priority; else { /* Invalid thread: */ errno = ret; diff --git a/lib/libc_r/uthread/uthread_getschedparam.c b/lib/libc_r/uthread/uthread_getschedparam.c new file mode 100644 index 00000000000..7905c1960f4 --- /dev/null +++ b/lib/libc_r/uthread/uthread_getschedparam.c @@ -0,0 +1,57 @@ +/* $OpenBSD: uthread_getschedparam.c,v 1.1 1999/05/26 00:18:24 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param) +{ + int ret; + + if ((param == NULL) || (policy == NULL)) + /* Return an invalid argument error: */ + ret = EINVAL; + + /* Find the thread in the list of active threads: */ + else if ((ret = _find_thread(pthread)) == 0) { + /* Return the threads base priority and scheduling policy: */ + param->sched_priority = pthread->base_priority; + *policy = pthread->attr.sched_policy; + } + + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_info.c b/lib/libc_r/uthread/uthread_info.c index 3e682752474..870a283701d 100644 --- a/lib/libc_r/uthread/uthread_info.c +++ b/lib/libc_r/uthread/uthread_info.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_info.c,v 1.7 1999/02/01 08:23:46 d Exp $ + * $OpenBSD: uthread_info.c,v 1.8 1999/05/26 00:18:24 d Exp $ */ #include <stdio.h> #include <fcntl.h> @@ -64,9 +64,11 @@ static const struct s_thread_info thread_info[] = { {PS_WAIT_WAIT , "wait_wait"}, {PS_SIGSUSPEND , "sigsuspend"}, {PS_SIGWAIT , "sigwait"}, + {PS_SPINBLOCK , "spinblock"}, {PS_JOIN , "join"}, {PS_SUSPENDED , "suspended"}, {PS_DEAD , "dead"}, + {PS_DEADLOCK , "deadlock"}, {PS_STATE_MAX , "xxx"} }; @@ -151,7 +153,7 @@ _thread_dump_info(void) (void *)pthread, (pthread == _thread_run) ? '*' : ' ', state, - pthread->pthread_priority, + pthread->base_priority, (pthread->flags & PTHREAD_EXITING) ? 'E' : (pthread->flags & PTHREAD_CANCELLING) ? 'C' : (pthread->flags & PTHREAD_AT_CANCEL_POINT) ? 'c' : ' ', @@ -207,7 +209,7 @@ _thread_dump_info(void) snprintf(s, sizeof(s), "%s owner %p\n", info_lead, - (*pthread->data.mutex)->m_owner); + NULL /* (*pthread->data.mutex)->m_owner*/); _thread_sys_write(fd, s, strlen(s)); } break; diff --git a/lib/libc_r/uthread/uthread_init.c b/lib/libc_r/uthread/uthread_init.c index 0cd5bd376c4..281808e75f4 100644 --- a/lib/libc_r/uthread/uthread_init.c +++ b/lib/libc_r/uthread/uthread_init.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_init.c,v 1.9 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_init.c,v 1.8 1999/01/17 23:57:27 d Exp $ */ #include <errno.h> @@ -54,6 +54,7 @@ static struct pthread kern_thread; struct pthread * volatile _thread_kern_threadp = &kern_thread; struct pthread * volatile _thread_run = &kern_thread; +struct pthread * volatile _last_user_thread = &kern_thread; struct pthread * volatile _thread_single = NULL; struct pthread * volatile _thread_link_list = NULL; int _thread_kern_pipe[2] = { -1, -1 }; @@ -63,7 +64,9 @@ struct timeval kern_inc_prio_time = { 0, 0 }; struct pthread * volatile _thread_dead = NULL; struct pthread * _thread_initial = NULL; struct pthread_attr pthread_attr_default = { - SCHED_RR, /* schedparam_policy */ + SCHED_RR, /* sched_policy */ + 0, /* sched_inherit */ + TIMESLICE_USEC, /* sched_interval */ PTHREAD_DEFAULT_PRIORITY, /* prio */ PTHREAD_CREATE_RUNNING, /* suspend */ PTHREAD_CREATE_JOINABLE, /* flags */ @@ -73,7 +76,9 @@ struct pthread_attr pthread_attr_default = { PTHREAD_STACK_DEFAULT /* stacksize_attr */ }; struct pthread_mutex_attr pthread_mutexattr_default = { - MUTEX_TYPE_FAST, /* m_type */ + PTHREAD_MUTEX_DEFAULT, /* m_type */ + PTHREAD_PRIO_NONE, /* m_protocol */ + 0, /* m_ceiling */ 0 /* m_flags */ }; struct pthread_cond_attr pthread_condattr_default = { @@ -87,6 +92,12 @@ pthread_mutex_t _gc_mutex = NULL; pthread_cond_t _gc_cond = NULL; struct sigaction _thread_sigact[NSIG]; +const int dtablecount = 4096/sizeof(struct fd_table_entry); +pq_queue_t _readyq; +_waitingq_t _waitingq; +volatile int _waitingq_check_reqd = 0; +pthread_switch_routine_t _sched_switch_hook = NULL; + /* Automatic init module. */ extern int _thread_autoinit_dummy_decl; @@ -189,7 +200,13 @@ _thread_init(void) /* Make the write pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ - PANIC("Cannot make kernel write pipe non-blocking"); + PANIC("Cannot get kernel write pipe flags"); + } + /* Initialize the ready queue: */ + else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) +!= 0) { + /* Abort this application: */ + PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { @@ -202,13 +219,32 @@ _thread_init(void) /* Zero the global kernel thread structure: */ memset(_thread_kern_threadp, 0, sizeof(struct pthread)); _thread_kern_threadp->magic = PTHREAD_MAGIC; + + /* Set the kernel's name for the debugger: */ pthread_set_name_np(_thread_kern_threadp, "kern"); + /* The kernel thread is a library thread: */ + _thread_kern_threadp->flags = PTHREAD_FLAGS_PRIVATE; + + /* Initialize the waiting queue: */ + TAILQ_INIT(&_waitingq); + + /* Initialize the scheduling switch hook routine: */ + _sched_switch_hook = NULL; + /* Zero the initial thread: */ memset(_thread_initial, 0, sizeof(struct pthread)); + /* + * Write a magic value to the thread structure + * to help identify valid ones: + */ + _thread_initial->magic = PTHREAD_MAGIC; + /* Default the priority of the initial thread: */ - _thread_initial->pthread_priority = PTHREAD_DEFAULT_PRIORITY; + _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; + _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; + _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; @@ -216,7 +252,13 @@ _thread_init(void) /* Initialise the queue: */ _thread_queue_init(&(_thread_initial->join_queue)); + /* Initialize the owned mutex queue and count: */ + TAILQ_INIT(&(_thread_initial->mutexq)); + _thread_initial->priority_mutex_count = 0; + /* Initialise the rest of the fields: */ + _thread_initial->sched_defer_count = 0; + _thread_initial->yield_on_sched_undefer = 0; _thread_initial->specific_data = NULL; _thread_initial->cleanup = NULL; _thread_initial->queue = NULL; @@ -226,7 +268,6 @@ _thread_init(void) _thread_initial->error = 0; _thread_initial->cancelstate = PTHREAD_CANCEL_ENABLE; _thread_initial->canceltype = PTHREAD_CANCEL_DEFERRED; - _thread_initial->magic = PTHREAD_MAGIC; pthread_set_name_np(_thread_initial, "init"); _SPINUNLOCK(&_thread_initial->lock); _thread_link_list = _thread_initial; @@ -259,9 +300,9 @@ _thread_init(void) * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ - if (_thread_sys_sigaction(SIGVTALRM, &act, NULL) != 0 || - _thread_sys_sigaction(SIGINFO , &act, NULL) != 0 || - _thread_sys_sigaction(SIGCHLD , &act, NULL) != 0) { + if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || + _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 || + _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ @@ -309,6 +350,8 @@ _thread_init(void) pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); + gettimeofday(&kern_inc_prio_time, NULL); + /* Pull in automatic thread unit. */ _thread_autoinit_dummy_decl = 1; diff --git a/lib/libc_r/uthread/uthread_join.c b/lib/libc_r/uthread/uthread_join.c index 52baee31c33..ab7b7860ecc 100644 --- a/lib/libc_r/uthread/uthread_join.c +++ b/lib/libc_r/uthread/uthread_join.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_join.c,v 1.3 1999/01/17 23:57:27 d Exp $ + * $OpenBSD: uthread_join.c,v 1.4 1999/05/26 00:18:24 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -42,6 +42,7 @@ pthread_join(pthread_t pthread, void **thread_return) int ret = 0; pthread_t pthread1 = NULL; + /* This operation is a cancel point: */ _thread_enter_cancellation_point(); /* Check if the caller has specified an invalid thread: */ @@ -99,6 +100,7 @@ pthread_join(pthread_t pthread, void **thread_return) /* Return the thread's return value: */ *thread_return = pthread->ret; + /* This operation was a cancel point: */ _thread_leave_cancellation_point(); /* Return the completion status: */ diff --git a/lib/libc_r/uthread/uthread_kern.c b/lib/libc_r/uthread/uthread_kern.c index 88fa55e156c..78223f4ad10 100644 --- a/lib/libc_r/uthread/uthread_kern.c +++ b/lib/libc_r/uthread/uthread_kern.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_kern.c,v 1.8 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,8 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: uthread_kern.c,v 1.15 1998/11/15 09:58:26 jb Exp $ - * $OpenBSD: uthread_kern.c,v 1.7 1999/02/01 08:23:46 d Exp $ + * $FreeBSD: uthread_kern.c,v 1.18 1999/05/08 07:50:05 jasone Exp $ * */ #include <errno.h> @@ -43,7 +43,9 @@ #include <sys/stat.h> #include <sys/time.h> #include <sys/socket.h> +#ifdef _THREAD_RUSAGE #include <sys/resource.h> +#endif #include <sys/uio.h> #include <sys/syscall.h> #include <fcntl.h> @@ -55,13 +57,15 @@ static void _thread_kern_select(int wait_reqd); +static inline void +thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in); + void _thread_kern_sched(struct sigcontext * scp) { - int prio = -1; pthread_t pthread; pthread_t pthread_h = NULL; - pthread_t pthread_s = NULL; + pthread_t last_thread = NULL; struct itimerval itimer; struct timespec ts; struct timespec ts1; @@ -103,6 +107,11 @@ _thread_kern_sched(struct sigcontext * scp) */ _thread_kern_in_sched = 0; + if (_sched_switch_hook != NULL) { + /* Run the installed switch hook: */ + thread_run_switch_hook(_last_user_thread, _thread_run); + } + if (!(_thread_run->flags & PTHREAD_AT_CANCEL_POINT) && (_thread_run->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)) { /* @@ -122,11 +131,16 @@ _thread_kern_sched(struct sigcontext * scp) * dispatch any that aren't blocked: */ _dispatch_signals(); + return; } else /* Flag the jump buffer was the last state saved: */ _thread_run->sig_saved = 0; + /* If the currently running thread is a user thread, save it: */ + if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0) + _last_user_thread = _thread_run; + /* Save errno. */ _thread_run->error = errno; @@ -143,7 +157,7 @@ _thread_kern_sched(struct sigcontext * scp) #endif /* _THREAD_RUSAGE */ /* - * Enter a the scheduling loop that finds the next thread that is + * Enter a scheduling loop that finds the next thread that is * ready to run. This loop completes when there are no more threads * in the global list or when a thread has its state restored by * either a sigreturn (if the state was saved as a sigcontext) or a @@ -161,12 +175,48 @@ _thread_kern_sched(struct sigcontext * scp) _thread_kern_select(0); /* - * Enter a loop to look for sleeping threads that are ready: + * Define the maximum time before a scheduling signal + * is required: + */ + itimer.it_value.tv_sec = 0; + itimer.it_value.tv_usec = TIMESLICE_USEC; + + /* + * The interval timer is not reloaded when it + * times out. The interval time needs to be + * calculated every time. + */ + itimer.it_interval.tv_sec = 0; + itimer.it_interval.tv_usec = 0; + + /* + * Enter a loop to look for sleeping threads that are ready + * or timedout. While we're at it, also find the smallest + * timeout value for threads waiting for a time. */ - for (pthread = _thread_link_list; pthread != NULL; - pthread = pthread->nxt) { + _waitingq_check_reqd = 0; /* reset flag before loop */ + TAILQ_FOREACH(pthread, &_waitingq, pqe) { + /* Check if this thread is ready: */ + if (pthread->state == PS_RUNNING) { + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + + /* + * Check if this thread is blocked by an + * atomic lock: + */ + else if (pthread->state == PS_SPINBLOCK) { + /* + * If the lock is available, let + * the thread run. + */ + if (pthread->data.spinlock->access_lock == 0) { + PTHREAD_NEW_STATE(pthread,PS_RUNNING); + } + /* Check if this thread is to timeout: */ - if (pthread->state == PS_COND_WAIT || + } else if (pthread->state == PS_COND_WAIT || pthread->state == PS_SLEEP_WAIT || pthread->state == PS_FDR_WAIT || pthread->state == PS_FDW_WAIT || @@ -190,9 +240,9 @@ _thread_kern_sched(struct sigcontext * scp) */ if (pthread->state == PS_SELECT_WAIT) { /* - * The select has timed out, - * so zero the file - * descriptor sets: + * The select has timed out, so + * zero the file descriptor + * sets: */ FD_ZERO(&pthread->data.select_data->readfds); FD_ZERO(&pthread->data.select_data->writefds); @@ -216,13 +266,72 @@ _thread_kern_sched(struct sigcontext * scp) * it to be restarted: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); + } else { + /* + * Calculate the time until this thread + * is ready, allowing for the clock + * resolution: + */ + ts1.tv_sec = pthread->wakeup_time.tv_sec + - ts.tv_sec; + ts1.tv_nsec = pthread->wakeup_time.tv_nsec + - ts.tv_nsec + CLOCK_RES_NSEC; + + /* + * Check for underflow of the + * nanosecond field: + */ + if (ts1.tv_nsec < 0) { + /* + * Allow for the underflow + * of the nanosecond field: + */ + ts1.tv_sec--; + ts1.tv_nsec += 1000000000; + } + /* + * Check for overflow of the nanosecond + * field: + */ + if (ts1.tv_nsec >= 1000000000) { + /* + * Allow for the overflow of + * the nanosecond field: + */ + ts1.tv_sec++; + ts1.tv_nsec -= 1000000000; + } + /* + * Convert the timespec structure + * to a timeval structure: + */ + TIMESPEC_TO_TIMEVAL(&tv1, &ts1); + + /* + * Check if the thread will be ready + * sooner than the earliest ones found + * so far: + */ + if (timercmp(&tv1, &itimer.it_value, <)) { + /* + * Update the time value: + */ + itimer.it_value.tv_sec = tv1.tv_sec; + itimer.it_value.tv_usec = tv1.tv_usec; + } } + } } /* Check if there is a current thread: */ if (_thread_run != _thread_kern_threadp) { /* + * This thread no longer needs to yield the CPU. + */ + _thread_run->yield_on_sched_undefer = 0; + + /* * Save the current time as the time that the thread * became inactive: */ @@ -231,202 +340,64 @@ _thread_kern_sched(struct sigcontext * scp) /* * Accumulate the number of microseconds that this - * thread has run for: + * thread has run for: */ - if (_thread_run->slice_usec != -1) { - if (timerisset(&_thread_run->last_active)) { - struct timeval s; - - timersub(&_thread_run->last_inactive, - &_thread_run->last_active, - &s); - _thread_run->slice_usec = - s.tv_usec + 1000000 * s.tv_sec; - if (_thread_run->slice_usec < 0) - PANIC("slice_usec"); - } else + if ((_thread_run->slice_usec != -1) && + (_thread_run->attr.sched_policy != SCHED_FIFO)) { + _thread_run->slice_usec += + (_thread_run->last_inactive.tv_sec - + _thread_run->last_active.tv_sec) * 1000000 + + _thread_run->last_inactive.tv_usec - + _thread_run->last_active.tv_usec; + + /* Check for time quantum exceeded: */ + if (_thread_run->slice_usec > TIMESLICE_USEC) _thread_run->slice_usec = -1; - } - - /* - * Check if this thread has reached its allocated - * time slice period: - */ - if (_thread_run->slice_usec > TIMESLICE_USEC) { - /* - * Flag the allocated time slice period as - * up: - */ - _thread_run->slice_usec = -1; } - } - /* Check if an incremental priority update is required: */ - if (((tv.tv_sec - kern_inc_prio_time.tv_sec) * 1000000 + - tv.tv_usec - kern_inc_prio_time.tv_usec) > INC_PRIO_USEC) { - /* - * Enter a loop to look for run-enabled threads that - * have not run since the last time that an - * incremental priority update was performed: - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* Check if this thread is unable to run: */ - if (pthread->state != PS_RUNNING) { - } - /* - * Check if the last time that this thread - * was run (as indicated by the last time it - * became inactive) is before the time that - * the last incremental priority check was - * made: - */ - else if (timercmp(&pthread->last_inactive, &kern_inc_prio_time, <)) { + if (_thread_run->state == PS_RUNNING) { + if (_thread_run->slice_usec == -1) { /* - * Increment the incremental priority - * for this thread in the hope that - * it will eventually get a chance to - * run: + * The thread exceeded its time + * quantum or it yielded the CPU; + * place it at the tail of the + * queue for its priority. */ - (pthread->inc_prio)++; + PTHREAD_PRIOQ_INSERT_TAIL(_thread_run); + } else { + /* + * The thread hasn't exceeded its + * interval. Place it at the head + * of the queue for its priority. + */ + PTHREAD_PRIOQ_INSERT_HEAD(_thread_run); } } - - /* Save the new incremental priority update time: */ - kern_inc_prio_time.tv_sec = tv.tv_sec; - kern_inc_prio_time.tv_usec = tv.tv_usec; - } - /* - * Enter a loop to look for the first thread of the highest - * priority that is ready to run: - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* Check if the current thread is unable to run: */ - if (pthread->state != PS_RUNNING) { - } - /* - * Check if no run-enabled thread has been seen or if - * the current thread has a priority higher than the - * highest seen so far: - */ - else if (pthread_h == NULL || (pthread->pthread_priority + pthread->inc_prio) > prio) { - /* - * Save this thread as the highest priority - * thread seen so far: - */ - pthread_h = pthread; - prio = pthread->pthread_priority + pthread->inc_prio; - } - } - - /* - * Enter a loop to look for a thread that: 1. Is run-enabled. - * 2. Has the required agregate priority. 3. Has not been - * allocated its allocated time slice. 4. Became inactive - * least recently. - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* Check if the current thread is unable to run: */ - if (pthread->state != PS_RUNNING) { - /* Ignore threads that are not ready to run. */ - } - - /* - * Check if the current thread as an agregate - * priority not equal to the highest priority found - * above: - */ - else if ((pthread->pthread_priority + pthread->inc_prio) != prio) { + else if (_thread_run->state == PS_DEAD) { /* - * Ignore threads which have lower agregate - * priority. + * Don't add dead threads to the waiting + * queue, because when they're reaped, it + * will corrupt the queue. */ } - - /* - * Check if the current thread reached its time slice - * allocation last time it ran (or if it has not run - * yet): - */ - else if (pthread->slice_usec == -1) { - } - - /* - * Check if an eligible thread has not been found - * yet, or if the current thread has an inactive time - * earlier than the last one seen: - */ - else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) { + else { /* - * Save the pointer to the current thread as - * the most eligible thread seen so far: + * This thread has changed state and needs + * to be placed in the waiting queue. */ - pthread_s = pthread; + PTHREAD_WAITQ_INSERT(_thread_run); - /* - * Save the time that the selected thread - * became inactive: - */ - tv1.tv_sec = pthread->last_inactive.tv_sec; - tv1.tv_usec = pthread->last_inactive.tv_usec; + /* Restart the time slice: */ + _thread_run->slice_usec = -1; } } /* - * Check if no thread was selected according to incomplete - * time slice allocation: + * Get the highest priority thread in the ready queue. */ - if (pthread_s == NULL) { - /* - * Enter a loop to look for any other thread that: 1. - * Is run-enabled. 2. Has the required agregate - * priority. 3. Became inactive least recently. - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* - * Check if the current thread is unable to - * run: - */ - if (pthread->state != PS_RUNNING) { - /* - * Ignore threads that are not ready - * to run. - */ - } - /* - * Check if the current thread as an agregate - * priority not equal to the highest priority - * found above: - */ - else if ((pthread->pthread_priority + pthread->inc_prio) != prio) { - /* - * Ignore threads which have lower - * agregate priority. - */ - } - /* - * Check if an eligible thread has not been - * found yet, or if the current thread has an - * inactive time earlier than the last one - * seen: - */ - else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) { - /* - * Save the pointer to the current - * thread as the most eligible thread - * seen so far: - */ - pthread_s = pthread; + pthread_h = PTHREAD_PRIOQ_FIRST; - /* - * Save the time that the selected - * thread became inactive: - */ - tv1.tv_sec = pthread->last_inactive.tv_sec; - tv1.tv_usec = pthread->last_inactive.tv_usec; - } - } - } /* Check if there are no threads ready to run: */ - if (pthread_s == NULL) { + if (pthread_h == NULL) { /* * Lock the pthread kernel by changing the pointer to * the running thread to point to the global kernel @@ -441,7 +412,10 @@ _thread_kern_sched(struct sigcontext * scp) _thread_kern_select(1); } else { /* Make the selected thread the current thread: */ - _thread_run = pthread_s; + _thread_run = pthread_h; + + /* Remove the thread from the ready queue. */ + PTHREAD_PRIOQ_REMOVE(_thread_run); /* * Save the current time as the time that the thread @@ -459,116 +433,19 @@ _thread_kern_sched(struct sigcontext * scp) /* Reset the accumulated time slice period: */ _thread_run->slice_usec = 0; } - /* - * Reset the incremental priority now that this - * thread has been given the chance to run: - */ - _thread_run->inc_prio = 0; /* Check if there is more than one thread: */ if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) { /* - * Define the maximum time before a SIGVTALRM - * is required: - */ - itimer.it_value.tv_sec = 0; - itimer.it_value.tv_usec = TIMESLICE_USEC; - - /* - * The interval timer is not reloaded when it - * times out. The interval time needs to be - * calculated every time. - */ - timerclear(&itimer.it_interval); - - /* - * Enter a loop to look for threads waiting - * for a time: - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* - * Check if this thread is to - * timeout: - */ - if (pthread->state == PS_COND_WAIT || - pthread->state == PS_SLEEP_WAIT || - pthread->state == PS_FDR_WAIT || - pthread->state == PS_FDW_WAIT || - pthread->state == PS_SELECT_WAIT) { - /* - * Check if this thread is to - * wait forever: - */ - if (pthread->wakeup_time.tv_sec == -1) { - } - /* - * Check if this thread is to - * wakeup immediately: - */ - else if (pthread->wakeup_time.tv_sec == 0 && - pthread->wakeup_time.tv_nsec == 0) { - } - /* - * Check if the current time - * is after the wakeup time: - */ - else if (timespeccmp(&ts, - &pthread->wakeup_time, > )){ - } else { - /* - * Calculate the time - * until this thread - * is ready, allowing - * for the clock - * resolution: - */ - struct timespec - clock_res - = {0,CLOCK_RES_NSEC}; - timespecsub( - &pthread->wakeup_time, - &ts, &ts1); - timespecadd( - &ts1, &clock_res, - &ts1); - /* - * Convert the - * timespec structure - * to a timeval - * structure: - */ - TIMESPEC_TO_TIMEVAL(&tv, &ts1); - - /* - * Check if the - * thread will be - * ready sooner than - * the earliest one - * found so far: - */ - if (timercmp(&tv, &itimer.it_value, <)) { - /* - * Update the - * time - * value: - */ - itimer.it_value.tv_sec = tv.tv_sec; - itimer.it_value.tv_usec = tv.tv_usec; - } - } - } - } - - /* * Start the interval timer for the * calculated time interval: */ - if (setitimer(ITIMER_VIRTUAL, &itimer, NULL) != 0) { + if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) { /* * Cannot initialise the timer, so * abort this process: */ - PANIC("Cannot set virtual timer"); + PANIC("Cannot set scheduling timer"); } } @@ -585,7 +462,17 @@ _thread_kern_sched(struct sigcontext * scp) * Do a sigreturn to restart the thread that * was interrupted by a signal: */ - _thread_kern_in_sched = 0; + _thread_kern_in_sched = 0; + + /* + * If we had a context switch, run any + * installed switch hooks. + */ + if ((_sched_switch_hook != NULL) && + (_last_user_thread != _thread_run)) { + thread_run_switch_hook(_last_user_thread, + _thread_run); + } _thread_sys_sigreturn(&_thread_run->saved_sigcontext); } else /* @@ -680,7 +567,8 @@ _thread_kern_select(int wait_reqd) * Enter a loop to process threads waiting on either file descriptors * or times: */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { + _waitingq_check_reqd = 0; /* reset flag before loop */ + TAILQ_FOREACH (pthread, &_waitingq, pqe) { /* Assume that this state does not time out: */ settimeout = 0; @@ -691,12 +579,12 @@ _thread_kern_select(int wait_reqd) * operations or timeouts: */ case PS_DEAD: + case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: - case PS_RUNNING: case PS_SIGTHREAD: case PS_SIGWAIT: case PS_STATE_MAX: @@ -706,6 +594,16 @@ _thread_kern_select(int wait_reqd) /* Nothing to do here. */ break; + case PS_RUNNING: + /* + * A signal occurred and made this thread ready + * while in the scheduler or while the scheduling + * queues were protected. + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + break; + /* File descriptor read wait: */ case PS_FDR_WAIT: /* Add the file descriptor to the read set: */ @@ -1012,16 +910,16 @@ _thread_kern_select(int wait_reqd) * descriptors that are flagged as available by the * _select syscall: */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { + TAILQ_FOREACH (pthread, &_waitingq, pqe) { /* Process according to thread state: */ switch (pthread->state) { /* * States which do not depend on file * descriptor I/O operations: */ - case PS_RUNNING: case PS_COND_WAIT: case PS_DEAD: + case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: @@ -1037,6 +935,15 @@ _thread_kern_select(int wait_reqd) /* Nothing to do here. */ break; + case PS_RUNNING: + /* + * A signal occurred and made this thread + * ready while in the scheduler. + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + break; + /* File descriptor read wait: */ case PS_FDR_WAIT: /* @@ -1050,6 +957,13 @@ _thread_kern_select(int wait_reqd) * is scheduled next: */ pthread->state = PS_RUNNING; + + /* + * Remove it from the waiting queue + * and add it to the ready queue: + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); } break; @@ -1066,6 +980,13 @@ _thread_kern_select(int wait_reqd) * scheduled next: */ pthread->state = PS_RUNNING; + + /* + * Remove it from the waiting queue + * and add it to the ready queue: + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); } break; @@ -1272,6 +1193,13 @@ _thread_kern_select(int wait_reqd) * thread to run: */ pthread->state = PS_RUNNING; + + /* + * Remove it from the waiting queue + * and add it to the ready queue: + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); } break; } @@ -1323,4 +1251,80 @@ _thread_kern_set_timeout(struct timespec * timeout) } return; } + +void +_thread_kern_sched_defer(void) +{ + /* Allow scheduling deferral to be recursive. */ + _thread_run->sched_defer_count++; +} + +void +_thread_kern_sched_undefer(void) +{ + pthread_t pthread; + int need_resched = 0; + + /* + * Perform checks to yield only if we are about to undefer + * scheduling. + */ + if (_thread_run->sched_defer_count == 1) { + /* + * Check if the waiting queue needs to be examined for + * threads that are now ready: + */ + while (_waitingq_check_reqd != 0) { + /* Clear the flag before checking the waiting queue: */ + _waitingq_check_reqd = 0; + + TAILQ_FOREACH(pthread, &_waitingq, pqe) { + if (pthread->state == PS_RUNNING) { + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + } + } + + /* + * We need to yield if a thread change of state caused a + * higher priority thread to become ready, or if a + * scheduling signal occurred while preemption was disabled. + */ + if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) && + (pthread->active_priority > _thread_run->active_priority)) || + (_thread_run->yield_on_sched_undefer != 0)) { + _thread_run->yield_on_sched_undefer = 0; + need_resched = 1; + } + } + + if (_thread_run->sched_defer_count > 0) { + /* Decrement the scheduling deferral count. */ + _thread_run->sched_defer_count--; + + /* Yield the CPU if necessary: */ + if (need_resched) + _thread_kern_sched(NULL); + } +} + +static inline void +thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in) +{ + pthread_t tid_out = thread_out; + pthread_t tid_in = thread_in; + + if ((tid_out != NULL) && + (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0)) + tid_out = NULL; + if ((tid_in != NULL) && + (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0)) + tid_in = NULL; + + if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) { + /* Run the scheduler switch hook: */ + _sched_switch_hook(tid_out, tid_in); + } +} #endif diff --git a/lib/libc_r/uthread/uthread_kill.c b/lib/libc_r/uthread/uthread_kill.c index 132dea74464..dc698ff1660 100644 --- a/lib/libc_r/uthread/uthread_kill.c +++ b/lib/libc_r/uthread/uthread_kill.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_kill.c,v 1.6 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_kill.c,v 1.5 1999/01/06 05:29:24 d Exp $ */ #include <errno.h> #include <signal.h> @@ -53,6 +53,13 @@ pthread_kill(pthread_t pthread, int sig) /* Find the thread in the list of active threads: */ else if ((ret = _find_thread(pthread)) == 0) { + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + switch (pthread->state) { case PS_SIGSUSPEND: /* @@ -109,6 +116,12 @@ pthread_kill(pthread_t pthread, int sig) sigaddset(&pthread->sigpend,sig); break; } + + /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ diff --git a/lib/libc_r/uthread/uthread_mattr_init.c b/lib/libc_r/uthread/uthread_mattr_init.c index d24958b111e..65b79e9539f 100644 --- a/lib/libc_r/uthread/uthread_mattr_init.c +++ b/lib/libc_r/uthread/uthread_mattr_init.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_mattr_init.c,v 1.3 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1996 Jeffrey Hsu <hsu@freebsd.org>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_mattr_init.c,v 1.2 1999/01/06 05:29:24 d Exp $ */ #include <string.h> #include <stdlib.h> diff --git a/lib/libc_r/uthread/uthread_mattr_kind_np.c b/lib/libc_r/uthread/uthread_mattr_kind_np.c index 76311dfec2e..5f5d1b3a992 100644 --- a/lib/libc_r/uthread/uthread_mattr_kind_np.c +++ b/lib/libc_r/uthread/uthread_mattr_kind_np.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_mattr_kind_np.c,v 1.3 1999/01/06 05:29:24 d Exp $ + * $OpenBSD: uthread_mattr_kind_np.c,v 1.4 1999/05/26 00:18:25 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -68,8 +68,7 @@ pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) { int ret; if (attr == NULL || *attr == NULL || type >= MUTEX_TYPE_MAX) { - errno = EINVAL; - ret = -1; + return EINVAL; } else { (*attr)->m_type = type; ret = 0; diff --git a/lib/libc_r/uthread/uthread_mutex.c b/lib/libc_r/uthread/uthread_mutex.c index 74127fde790..1968c953a27 100644 --- a/lib/libc_r/uthread/uthread_mutex.c +++ b/lib/libc_r/uthread/uthread_mutex.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_mutex.c,v 1.7 1999/05/26 00:18:25 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,83 +30,120 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_mutex.c,v 1.6 1999/01/06 05:29:25 d Exp $ - * */ #include <stdlib.h> #include <errno.h> #include <string.h> +#include <sys/param.h> +#include <sys/queue.h> #ifdef _THREAD_SAFE #include <pthread.h> #include "pthread_private.h" + +/* + * Prototypes + */ +static inline int mutex_self_trylock(pthread_mutex_t); +static inline int mutex_self_lock(pthread_mutex_t); +static inline int mutex_unlock_common(pthread_mutex_t *, int); +static void mutex_priority_adjust(pthread_mutex_t); +static void mutex_rescan_owned (pthread_t, pthread_mutex_t); +static inline pthread_t mutex_queue_deq(pthread_mutex_t); +static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); +static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); + + static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; + int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { - enum pthread_mutextype type; + enum pthread_mutextype type; + int protocol; + int ceiling; pthread_mutex_t pmutex; int ret = 0; - if (mutex == NULL) { + if (mutex == NULL) ret = EINVAL; - } else { - /* Check if default mutex attributes: */ - if (mutex_attr == NULL || *mutex_attr == NULL) - /* Default to a fast mutex: */ - type = PTHREAD_MUTEX_DEFAULT; - else if ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX) - /* Return an invalid argument error: */ - ret = EINVAL; - else - /* Use the requested mutex type: */ - type = (*mutex_attr)->m_type; - - /* Check no errors so far: */ - if (ret == 0) { - if ((pmutex = (pthread_mutex_t) - malloc(sizeof(struct pthread_mutex))) == NULL) - ret = ENOMEM; - else { - /* Reset the mutex flags: */ - pmutex->m_flags = 0; - - /* Process according to mutex type: */ - switch (type) { - /* Fast mutex: */ - case PTHREAD_MUTEX_DEFAULT: - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_ERRORCHECK: - /* Nothing to do here. */ - break; - - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* Reset the mutex count: */ - pmutex->m_data.m_count = 0; - break; - - /* Trap invalid mutex types: */ - default: - /* Return an invalid argument error: */ - ret = EINVAL; - break; - } - if (ret == 0) { - /* Initialise the rest of the mutex: */ - _thread_queue_init(&pmutex->m_queue); - pmutex->m_flags |= MUTEX_FLAGS_INITED; - pmutex->m_owner = NULL; - pmutex->m_type = type; - _SPINUNLOCK(&pmutex->lock); - *mutex = pmutex; - } else { - free(pmutex); - *mutex = NULL; - } + /* Check if default mutex attributes: */ + else if (mutex_attr == NULL || *mutex_attr == NULL) { + /* Default to a (error checking) POSIX mutex: */ + type = PTHREAD_MUTEX_ERRORCHECK; + protocol = PTHREAD_PRIO_NONE; + ceiling = PTHREAD_MAX_PRIORITY; + } + + /* Check mutex type: */ + else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || + ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) + /* Return an invalid argument error: */ + ret = EINVAL; + + /* Check mutex protocol: */ + else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || + ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) + /* Return an invalid argument error: */ + ret = EINVAL; + + else { + /* Use the requested mutex type and protocol: */ + type = (*mutex_attr)->m_type; + protocol = (*mutex_attr)->m_protocol; + ceiling = (*mutex_attr)->m_ceiling; + } + + /* Check no errors so far: */ + if (ret == 0) { + if ((pmutex = (pthread_mutex_t) + malloc(sizeof(struct pthread_mutex))) == NULL) + ret = ENOMEM; + else { + /* Reset the mutex flags: */ + pmutex->m_flags = 0; + + /* Process according to mutex type: */ + switch (type) { + /* case PTHREAD_MUTEX_DEFAULT: */ + case PTHREAD_MUTEX_ERRORCHECK: + case PTHREAD_MUTEX_NORMAL: + /* Nothing to do here. */ + break; + + /* Single UNIX Spec 2 recursive mutex: */ + case PTHREAD_MUTEX_RECURSIVE: + /* Reset the mutex count: */ + pmutex->m_data.m_count = 0; + break; + + /* Trap invalid mutex types: */ + default: + /* Return an invalid argument error: */ + ret = EINVAL; + break; + } + if (ret == 0) { + /* Initialise the rest of the mutex: */ + TAILQ_INIT(&pmutex->m_queue); + pmutex->m_flags |= MUTEX_FLAGS_INITED; + pmutex->m_owner = NULL; + pmutex->m_type = type; + pmutex->m_protocol = protocol; + pmutex->m_refcount = 0; + if (protocol == PTHREAD_PRIO_PROTECT) + pmutex->m_prio = ceiling; + else + pmutex->m_prio = 0; + pmutex->m_saved_prio = 0; + _SPINUNLOCK(&pmutex->lock); + *mutex = pmutex; + } else { + free(pmutex); + *mutex = NULL; } } } @@ -125,16 +163,29 @@ pthread_mutex_destroy(pthread_mutex_t * mutex) _SPINLOCK(&(*mutex)->lock); /* - * Free the memory allocated for the mutex - * structure: + * Check to see if this mutex is in use: */ - free(*mutex); + if (((*mutex)->m_owner != NULL) || + (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || + ((*mutex)->m_refcount != 0)) { + ret = EBUSY; - /* - * Leave the caller's pointer NULL now that - * the mutex has been destroyed: - */ - *mutex = NULL; + /* Unlock the mutex structure: */ + _SPINUNLOCK(&(*mutex)->lock); + } + else { + /* + * Free the memory allocated for the mutex + * structure: + */ + free(*mutex); + + /* + * Leave the caller's pointer NULL now that + * the mutex has been destroyed: + */ + *mutex = NULL; + } } /* Return the completion status: */ @@ -171,44 +222,100 @@ pthread_mutex_trylock(pthread_mutex_t * mutex) * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + /* + * Guard against being preempted by a scheduling signal. + * To support priority inheritence mutexes, we need to + * maintain lists of mutex ownerships for each thread as + * well as lists of waiting threads for each mutex. In + * order to propagate priorities we need to atomically + * walk these lists and cannot rely on a single mutex + * lock to provide protection against modification. + */ + _thread_kern_sched_defer(); + /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ - switch ((*mutex)->m_type) { - /* Fast mutex: */ - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_DEFAULT: - case PTHREAD_MUTEX_ERRORCHECK: + switch ((*mutex)->m_protocol) { + /* Default POSIX mutex: */ + case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; - } else { + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_trylock(*mutex); + else /* Return a busy error: */ ret = EBUSY; - } break; - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* Check if this mutex is locked: */ - if ((*mutex)->m_owner != NULL) { + /* POSIX priority inheritence mutex: */ + case PTHREAD_PRIO_INHERIT: + /* Check if this mutex is not locked: */ + if ((*mutex)->m_owner == NULL) { + /* Lock the mutex for the running thread: */ + (*mutex)->m_owner = _thread_run; + + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; + /* - * Check if the mutex is locked by the running - * thread: + * The mutex takes on the attributes of the + * running thread when there are no waiters. */ - if ((*mutex)->m_owner == _thread_run) { - /* Increment the lock count: */ - (*mutex)->m_data.m_count++; - } else { - /* Return a busy error: */ - ret = EBUSY; - } - } else { + (*mutex)->m_prio = _thread_run->active_priority; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_trylock(*mutex); + else + /* Return a busy error: */ + ret = EBUSY; + break; + + /* POSIX priority protection mutex: */ + case PTHREAD_PRIO_PROTECT: + /* Check for a priority ceiling violation: */ + if (_thread_run->active_priority > (*mutex)->m_prio) + ret = EINVAL; + + /* Check if this mutex is not locked: */ + else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; - } + + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; + + /* + * The running thread inherits the ceiling + * priority of the mutex and executes at that + * priority. + */ + _thread_run->active_priority = (*mutex)->m_prio; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + _thread_run->inherited_priority = + (*mutex)->m_prio; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_trylock(*mutex); + else + /* Return a busy error: */ + ret = EBUSY; break; /* Trap invalid mutex types: */ @@ -220,6 +327,12 @@ pthread_mutex_trylock(pthread_mutex_t * mutex) /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ @@ -239,94 +352,200 @@ pthread_mutex_lock(pthread_mutex_t * mutex) * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + /* + * Guard against being preempted by a scheduling signal. + * To support priority inheritence mutexes, we need to + * maintain lists of mutex ownerships for each thread as + * well as lists of waiting threads for each mutex. In + * order to propagate priorities we need to atomically + * walk these lists and cannot rely on a single mutex + * lock to provide protection against modification. + */ + _thread_kern_sched_defer(); + /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ - switch ((*mutex)->m_type) { - /* What SS2 define as a 'normal' mutex. This has to deadlock - on attempts to get a lock you already own. */ - case PTHREAD_MUTEX_NORMAL: - if ((*mutex)->m_owner == _thread_run) { - /* Intentionally deadlock: */ - _thread_run->data.mutex = mutex; - for (;;) - _thread_kern_sched_state(PS_MUTEX_WAIT, __FILE__, __LINE__); - } - goto COMMON_LOCK; - - /* Return error (not OK) on attempting to re-lock */ - case PTHREAD_MUTEX_ERRORCHECK: - if ((*mutex)->m_owner == _thread_run) { - ret = EDEADLK; - break; - } - - /* Fast mutexes do not check for any error conditions: */ - case PTHREAD_MUTEX_DEFAULT: - COMMON_LOCK: - /* - * Enter a loop to wait for the mutex to be locked by the - * current thread: - */ - while ((*mutex)->m_owner != _thread_run) { - /* Check if the mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for this thread: */ - (*mutex)->m_owner = _thread_run; - } else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - _thread_queue_enq(&(*mutex)->m_queue, _thread_run); - _thread_run->data.mutex = mutex; + switch ((*mutex)->m_protocol) { + /* Default POSIX mutex: */ + case PTHREAD_PRIO_NONE: + if ((*mutex)->m_owner == NULL) { + /* Lock the mutex for this thread: */ + (*mutex)->m_owner = _thread_run; - /* Wait for the mutex: */ - _thread_kern_sched_state_unlock( - PS_MUTEX_WAIT, &(*mutex)->lock, - __FILE__, __LINE__); + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); - /* Lock the mutex again: */ - _SPINLOCK(&(*mutex)->lock); - } + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_lock(*mutex); + else { + /* + * Join the queue of threads waiting to lock + * the mutex: + */ + mutex_queue_enq(*mutex, _thread_run); + + /* + * Keep a pointer to the mutex this thread + * is waiting on: + */ + _thread_run->data.mutex = *mutex; + + /* + * Unlock the mutex structure and schedule the + * next thread: + */ + _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, + &(*mutex)->lock, __FILE__, __LINE__); + + /* Lock the mutex structure again: */ + _SPINLOCK(&(*mutex)->lock); + + /* + * This thread is no longer waiting for + * the mutex: + */ + _thread_run->data.mutex = NULL; } break; - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* - * Enter a loop to wait for the mutex to be locked by the - * current thread: - */ - while ((*mutex)->m_owner != _thread_run) { - /* Check if the mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for this thread: */ - (*mutex)->m_owner = _thread_run; - - /* Reset the lock count for this mutex: */ - (*mutex)->m_data.m_count = 0; - } else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - _thread_queue_enq(&(*mutex)->m_queue, _thread_run); - _thread_run->data.mutex = mutex; + /* POSIX priority inheritence mutex: */ + case PTHREAD_PRIO_INHERIT: + /* Check if this mutex is not locked: */ + if ((*mutex)->m_owner == NULL) { + /* Lock the mutex for this thread: */ + (*mutex)->m_owner = _thread_run; - /* Wait for the mutex: */ - _thread_kern_sched_state_unlock( - PS_MUTEX_WAIT, &(*mutex)->lock, - __FILE__, __LINE__); + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; - /* Lock the mutex again: */ - _SPINLOCK(&(*mutex)->lock); - } + /* + * The mutex takes on attributes of the + * running thread when there are no waiters. + */ + (*mutex)->m_prio = _thread_run->active_priority; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + _thread_run->inherited_priority = + (*mutex)->m_prio; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_lock(*mutex); + else { + /* + * Join the queue of threads waiting to lock + * the mutex: + */ + mutex_queue_enq(*mutex, _thread_run); + + /* + * Keep a pointer to the mutex this thread + * is waiting on: + */ + _thread_run->data.mutex = *mutex; + + if (_thread_run->active_priority > + (*mutex)->m_prio) + /* Adjust priorities: */ + mutex_priority_adjust(*mutex); + + /* + * Unlock the mutex structure and schedule the + * next thread: + */ + _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, + &(*mutex)->lock, __FILE__, __LINE__); + + /* Lock the mutex structure again: */ + _SPINLOCK(&(*mutex)->lock); + + /* + * This thread is no longer waiting for + * the mutex: + */ + _thread_run->data.mutex = NULL; } + break; + + /* POSIX priority protection mutex: */ + case PTHREAD_PRIO_PROTECT: + /* Check for a priority ceiling violation: */ + if (_thread_run->active_priority > (*mutex)->m_prio) + ret = EINVAL; + + /* Check if this mutex is not locked: */ + else if ((*mutex)->m_owner == NULL) { + /* + * Lock the mutex for the running + * thread: + */ + (*mutex)->m_owner = _thread_run; + + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; - /* Increment the lock count for this mutex: */ - (*mutex)->m_data.m_count++; + /* + * The running thread inherits the ceiling + * priority of the mutex and executes at that + * priority: + */ + _thread_run->active_priority = (*mutex)->m_prio; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + _thread_run->inherited_priority = + (*mutex)->m_prio; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_lock(*mutex); + else { + /* + * Join the queue of threads waiting to lock + * the mutex: + */ + mutex_queue_enq(*mutex, _thread_run); + + /* + * Keep a pointer to the mutex this thread + * is waiting on: + */ + _thread_run->data.mutex = *mutex; + + /* Clear any previous error: */ + _thread_run->error = 0; + + /* + * Unlock the mutex structure and schedule the + * next thread: + */ + _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, + &(*mutex)->lock, __FILE__, __LINE__); + + /* Lock the mutex structure again: */ + _SPINLOCK(&(*mutex)->lock); + + /* + * The threads priority may have changed while + * waiting for the mutex causing a ceiling + * violation. + */ + ret = _thread_run->error; + _thread_run->error = 0; + + /* + * This thread is no longer waiting for + * the mutex: + */ + _thread_run->data.mutex = NULL; + } break; /* Trap invalid mutex types: */ @@ -338,6 +557,12 @@ pthread_mutex_lock(pthread_mutex_t * mutex) /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ @@ -347,56 +572,375 @@ pthread_mutex_lock(pthread_mutex_t * mutex) int pthread_mutex_unlock(pthread_mutex_t * mutex) { - int ret = 0; + return (mutex_unlock_common(mutex, /* add reference */ 0)); +} + +int +_mutex_cv_unlock(pthread_mutex_t * mutex) +{ + return (mutex_unlock_common(mutex, /* add reference */ 1)); +} + +int +_mutex_cv_lock(pthread_mutex_t * mutex) +{ + int ret; + if ((ret = pthread_mutex_lock(mutex)) == 0) + (*mutex)->m_refcount--; + return (ret); +} + +static inline int +mutex_self_trylock(pthread_mutex_t mutex) +{ + int ret = 0; + + switch (mutex->m_type) { + + /* case PTHREAD_MUTEX_DEFAULT: */ + case PTHREAD_MUTEX_ERRORCHECK: + case PTHREAD_MUTEX_NORMAL: + /* + * POSIX specifies that mutexes should return EDEADLK if a + * recursive lock is detected. + */ + ret = EBUSY; + break; + + case PTHREAD_MUTEX_RECURSIVE: + /* Increment the lock count: */ + mutex->m_data.m_count++; + break; + + default: + /* Trap invalid mutex types; */ + ret = EINVAL; + } + + return(ret); +} + +static inline int +mutex_self_lock(pthread_mutex_t mutex) +{ + int ret = 0; + + switch (mutex->m_type) { + /* case PTHREAD_MUTEX_DEFAULT: */ + case PTHREAD_MUTEX_ERRORCHECK: + /* + * POSIX specifies that mutexes should return EDEADLK if a + * recursive lock is detected. + */ + ret = EDEADLK; + break; + + case PTHREAD_MUTEX_NORMAL: + /* + * What SS2 define as a 'normal' mutex. Intentionally + * deadlock on attempts to get a lock you already own. + */ + _thread_kern_sched_state_unlock(PS_DEADLOCK, + &mutex->lock, __FILE__, __LINE__); + break; + + case PTHREAD_MUTEX_RECURSIVE: + /* Increment the lock count: */ + mutex->m_data.m_count++; + break; + + default: + /* Trap invalid mutex types; */ + ret = EINVAL; + } + + return(ret); +} + +static inline int +mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) +{ + int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { + /* + * Guard against being preempted by a scheduling signal. + * To support priority inheritence mutexes, we need to + * maintain lists of mutex ownerships for each thread as + * well as lists of waiting threads for each mutex. In + * order to propagate priorities we need to atomically + * walk these lists and cannot rely on a single mutex + * lock to provide protection against modification. + */ + _thread_kern_sched_defer(); + /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ - switch ((*mutex)->m_type) { - /* Default & normal mutexes do not really need to check for - any error conditions: */ - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_DEFAULT: - case PTHREAD_MUTEX_ERRORCHECK: - /* Check if the running thread is not the owner of the mutex: */ + switch ((*mutex)->m_protocol) { + /* Default POSIX mutex: */ + case PTHREAD_PRIO_NONE: + /* + * Check if the running thread is not the owner of the + * mutex: + */ if ((*mutex)->m_owner != _thread_run) { - /* This thread doesn't have permission: */ + /* + * Return a permission error when the thread + * doesn't own the lock: + */ ret = EPERM; } + else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && + ((*mutex)->m_data.m_count > 1)) { + /* Decrement the count: */ + (*mutex)->m_data.m_count--; + } else { + /* + * Clear the count in case this is recursive + * mutex. + */ + (*mutex)->m_data.m_count = 0; + + /* Remove the mutex from the threads queue. */ + TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * Get the next thread from the queue of + * threads waiting on the mutex: + */ + if (((*mutex)->m_owner = + mutex_queue_deq(*mutex)) != NULL) { + /* + * Allow the new owner of the mutex to + * run: + */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); + } + } + break; + + /* POSIX priority inheritence mutex: */ + case PTHREAD_PRIO_INHERIT: /* - * Get the next thread from the queue of threads waiting on - * the mutex: + * Check if the running thread is not the owner of the + * mutex: */ - else if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) { - /* Allow the new owner of the mutex to run: */ - PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING); + if ((*mutex)->m_owner != _thread_run) { + /* + * Return a permission error when the thread + * doesn't own the lock: + */ + ret = EPERM; + } + else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && + ((*mutex)->m_data.m_count > 1)) { + /* Decrement the count: */ + (*mutex)->m_data.m_count--; + } else { + /* + * Clear the count in case this is recursive + * mutex. + */ + (*mutex)->m_data.m_count = 0; + + /* + * Restore the threads inherited priority and + * recompute the active priority (being careful + * not to override changes in the threads base + * priority subsequent to locking the mutex). + */ + _thread_run->inherited_priority = + (*mutex)->m_saved_prio; + _thread_run->active_priority = + MAX(_thread_run->inherited_priority, + _thread_run->base_priority); + + /* + * This thread now owns one less priority mutex. + */ + _thread_run->priority_mutex_count--; + + /* Remove the mutex from the threads queue. */ + TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * Get the next thread from the queue of threads + * waiting on the mutex: + */ + if (((*mutex)->m_owner = + mutex_queue_deq(*mutex)) == NULL) + /* This mutex has no priority. */ + (*mutex)->m_prio = 0; + else { + /* + * Track number of priority mutexes owned: + */ + (*mutex)->m_owner->priority_mutex_count++; + + /* + * Add the mutex to the threads list + * of owned mutexes: + */ + TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * The owner is no longer waiting for + * this mutex: + */ + (*mutex)->m_owner->data.mutex = NULL; + + /* + * Set the priority of the mutex. Since + * our waiting threads are in descending + * priority order, the priority of the + * mutex becomes the active priority of + * the thread we just dequeued. + */ + (*mutex)->m_prio = + (*mutex)->m_owner->active_priority; + + /* + * Save the owning threads inherited + * priority: + */ + (*mutex)->m_saved_prio = + (*mutex)->m_owner->inherited_priority; + + /* + * The owning threads inherited priority + * now becomes his active priority (the + * priority of the mutex). + */ + (*mutex)->m_owner->inherited_priority = + (*mutex)->m_prio; + + /* + * Allow the new owner of the mutex to + * run: + */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); + } } break; - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* Check if the running thread is not the owner of the mutex: */ + /* POSIX priority ceiling mutex: */ + case PTHREAD_PRIO_PROTECT: + /* + * Check if the running thread is not the owner of the + * mutex: + */ if ((*mutex)->m_owner != _thread_run) { - /* Return an invalid argument error: */ - ret = EINVAL; + /* + * Return a permission error when the thread + * doesn't own the lock: + */ + ret = EPERM; } - /* Check if there are still counts: */ - else if ((*mutex)->m_data.m_count > 1) { + else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && + ((*mutex)->m_data.m_count > 1)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { + /* + * Clear the count in case this is recursive + * mutex. + */ (*mutex)->m_data.m_count = 0; + /* - * Get the next thread from the queue of threads waiting on - * the mutex: + * Restore the threads inherited priority and + * recompute the active priority (being careful + * not to override changes in the threads base + * priority subsequent to locking the mutex). + */ + _thread_run->inherited_priority = + (*mutex)->m_saved_prio; + _thread_run->active_priority = + MAX(_thread_run->inherited_priority, + _thread_run->base_priority); + + /* + * This thread now owns one less priority mutex. + */ + _thread_run->priority_mutex_count--; + + /* Remove the mutex from the threads queue. */ + TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * Enter a loop to find a waiting thread whose + * active priority will not cause a ceiling + * violation: */ - if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) { - /* Allow the new owner of the mutex to run: */ - PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING); + while ((((*mutex)->m_owner = + mutex_queue_deq(*mutex)) != NULL) && + ((*mutex)->m_owner->active_priority > + (*mutex)->m_prio)) { + /* + * Either the mutex ceiling priority + * been lowered and/or this threads + * priority has been raised subsequent + * to this thread being queued on the + * waiting list. + */ + (*mutex)->m_owner->error = EINVAL; + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); + } + + /* Check for a new owner: */ + if ((*mutex)->m_owner != NULL) { + /* + * Track number of priority mutexes owned: + */ + (*mutex)->m_owner->priority_mutex_count++; + + /* + * Add the mutex to the threads list + * of owned mutexes: + */ + TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * The owner is no longer waiting for + * this mutex: + */ + (*mutex)->m_owner->data.mutex = NULL; + + /* + * Save the owning threads inherited + * priority: + */ + (*mutex)->m_saved_prio = + (*mutex)->m_owner->inherited_priority; + + /* + * The owning thread inherits the + * ceiling priority of the mutex and + * executes at that priority: + */ + (*mutex)->m_owner->inherited_priority = + (*mutex)->m_prio; + (*mutex)->m_owner->active_priority = + (*mutex)->m_prio; + + /* + * Allow the new owner of the mutex to + * run: + */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; @@ -408,11 +952,348 @@ pthread_mutex_unlock(pthread_mutex_t * mutex) break; } + if ((ret == 0) && (add_reference != 0)) { + /* Increment the reference count: */ + (*mutex)->m_refcount++; + } + /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ return (ret); } + + +/* + * This function is called when a change in base priority occurs + * for a thread that is thread holding, or waiting for, a priority + * protection or inheritence mutex. A change in a threads base + * priority can effect changes to active priorities of other threads + * and to the ordering of mutex locking by waiting threads. + * + * This must be called while thread scheduling is deferred. + */ +void +_mutex_notify_priochange(pthread_t pthread) +{ + /* Adjust the priorites of any owned priority mutexes: */ + if (pthread->priority_mutex_count > 0) { + /* + * Rescan the mutexes owned by this thread and correct + * their priorities to account for this threads change + * in priority. This has the side effect of changing + * the threads active priority. + */ + mutex_rescan_owned(pthread, /* rescan all owned */ NULL); + } + + /* + * If this thread is waiting on a priority inheritence mutex, + * check for priority adjustments. A change in priority can + * also effect a ceiling violation(*) for a thread waiting on + * a priority protection mutex; we don't perform the check here + * as it is done in pthread_mutex_unlock. + * + * (*) It should be noted that a priority change to a thread + * _after_ taking and owning a priority ceiling mutex + * does not affect ownership of that mutex; the ceiling + * priority is only checked before mutex ownership occurs. + */ + if (pthread->state == PS_MUTEX_WAIT) { + /* Lock the mutex structure: */ + _SPINLOCK(&pthread->data.mutex->lock); + + /* + * Check to make sure this thread is still in the same state + * (the spinlock above can yield the CPU to another thread): + */ + if (pthread->state == PS_MUTEX_WAIT) { + /* + * Remove and reinsert this thread into the list of + * waiting threads to preserve decreasing priority + * order. + */ + mutex_queue_remove(pthread->data.mutex, pthread); + mutex_queue_enq(pthread->data.mutex, pthread); + + if (pthread->data.mutex->m_protocol == + PTHREAD_PRIO_INHERIT) { + /* Adjust priorities: */ + mutex_priority_adjust(pthread->data.mutex); + } + } + + /* Unlock the mutex structure: */ + _SPINUNLOCK(&pthread->data.mutex->lock); + } +} + +/* + * Called when a new thread is added to the mutex waiting queue or + * when a threads priority changes that is already in the mutex + * waiting queue. + */ +static void +mutex_priority_adjust(pthread_mutex_t mutex) +{ + pthread_t pthread_next, pthread = mutex->m_owner; + int temp_prio; + pthread_mutex_t m = mutex; + + /* + * Calculate the mutex priority as the maximum of the highest + * active priority of any waiting threads and the owning threads + * active priority(*). + * + * (*) Because the owning threads current active priority may + * reflect priority inherited from this mutex (and the mutex + * priority may have changed) we must recalculate the active + * priority based on the threads saved inherited priority + * and its base priority. + */ + pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ + temp_prio = MAX(pthread_next->active_priority, + MAX(m->m_saved_prio, pthread->base_priority)); + + /* See if this mutex really needs adjusting: */ + if (temp_prio == m->m_prio) + /* No need to propagate the priority: */ + return; + + /* Set new priority of the mutex: */ + m->m_prio = temp_prio; + + while (m != NULL) { + /* + * Save the threads priority before rescanning the + * owned mutexes: + */ + temp_prio = pthread->active_priority; + + /* + * Fix the priorities for all the mutexes this thread has + * locked since taking this mutex. This also has a + * potential side-effect of changing the threads priority. + */ + mutex_rescan_owned(pthread, m); + + /* + * If the thread is currently waiting on a mutex, check + * to see if the threads new priority has affected the + * priority of the mutex. + */ + if ((temp_prio != pthread->active_priority) && + (pthread->state == PS_MUTEX_WAIT) && + (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { + /* Grab the mutex this thread is waiting on: */ + m = pthread->data.mutex; + + /* + * The priority for this thread has changed. Remove + * and reinsert this thread into the list of waiting + * threads to preserve decreasing priority order. + */ + mutex_queue_remove(m, pthread); + mutex_queue_enq(m, pthread); + + /* Grab the waiting thread with highest priority: */ + pthread_next = TAILQ_FIRST(&m->m_queue); + + /* + * Calculate the mutex priority as the maximum of the + * highest active priority of any waiting threads and + * the owning threads active priority. + */ + temp_prio = MAX(pthread_next->active_priority, + MAX(m->m_saved_prio, m->m_owner->base_priority)); + + if (temp_prio != m->m_prio) { + /* + * The priority needs to be propagated to the + * mutex this thread is waiting on and up to + * the owner of that mutex. + */ + m->m_prio = temp_prio; + pthread = m->m_owner; + } + else + /* We're done: */ + m = NULL; + + } + else + /* We're done: */ + m = NULL; + } +} + +static void +mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex) +{ + int active_prio, inherited_prio; + pthread_mutex_t m; + pthread_t pthread_next; + + /* + * Start walking the mutexes the thread has taken since + * taking this mutex. + */ + if (mutex == NULL) { + /* + * A null mutex means start at the beginning of the owned + * mutex list. + */ + m = TAILQ_FIRST(&pthread->mutexq); + + /* There is no inherited priority yet. */ + inherited_prio = 0; + } + else { + /* + * The caller wants to start after a specific mutex. It + * is assumed that this mutex is a priority inheritence + * mutex and that its priority has been correctly + * calculated. + */ + m = TAILQ_NEXT(mutex, m_qe); + + /* Start inheriting priority from the specified mutex. */ + inherited_prio = mutex->m_prio; + } + active_prio = MAX(inherited_prio, pthread->base_priority); + + while (m != NULL) { + /* + * We only want to deal with priority inheritence + * mutexes. This might be optimized by only placing + * priority inheritence mutexes into the owned mutex + * list, but it may prove to be useful having all + * owned mutexes in this list. Consider a thread + * exiting while holding mutexes... + */ + if (m->m_protocol == PTHREAD_PRIO_INHERIT) { + /* + * Fix the owners saved (inherited) priority to + * reflect the priority of the previous mutex. + */ + m->m_saved_prio = inherited_prio; + + if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) + /* Recalculate the priority of the mutex: */ + m->m_prio = MAX(active_prio, + pthread_next->active_priority); + else + m->m_prio = active_prio; + + /* Recalculate new inherited and active priorities: */ + inherited_prio = m->m_prio; + active_prio = MAX(m->m_prio, pthread->base_priority); + } + + /* Advance to the next mutex owned by this thread: */ + m = TAILQ_NEXT(m, m_qe); + } + + /* + * Fix the threads inherited priority and recalculate its + * active priority. + */ + pthread->inherited_priority = inherited_prio; + active_prio = MAX(inherited_prio, pthread->base_priority); + + if (active_prio != pthread->active_priority) { + /* + * If this thread is in the priority queue, it must be + * removed and reinserted for its new priority. + */ + if ((pthread != _thread_run) && + (pthread->state == PS_RUNNING)) { + /* + * Remove the thread from the priority queue + * before changing its priority: + */ + PTHREAD_PRIOQ_REMOVE(pthread); + + /* + * POSIX states that if the priority is being + * lowered, the thread must be inserted at the + * head of the queue for its priority if it owns + * any priority protection or inheritence mutexes. + */ + if ((active_prio < pthread->active_priority) && + (pthread->priority_mutex_count > 0)) { + /* Set the new active priority. */ + pthread->active_priority = active_prio; + + PTHREAD_PRIOQ_INSERT_HEAD(pthread); + } + else { + /* Set the new active priority. */ + pthread->active_priority = active_prio; + + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + } + else { + /* Set the new active priority. */ + pthread->active_priority = active_prio; + } + } +} + +/* + * Dequeue a waiting thread from the head of a mutex queue in descending + * priority order. + */ +static inline pthread_t +mutex_queue_deq(pthread_mutex_t mutex) +{ + pthread_t pthread; + + if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) + TAILQ_REMOVE(&mutex->m_queue, pthread, qe); + + return(pthread); +} + +/* + * Remove a waiting thread from a mutex queue in descending priority order. + */ +static inline void +mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) +{ + TAILQ_REMOVE(&mutex->m_queue, pthread, qe); +} + +/* + * Enqueue a waiting thread to a queue in descending priority order. + */ +static inline void +mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) +{ + pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); + + /* + * For the common case of all threads having equal priority, + * we perform a quick check against the priority of the thread + * at the tail of the queue. + */ + if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) + TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe); + else { + tid = TAILQ_FIRST(&mutex->m_queue); + while (pthread->active_priority <= tid->active_priority) + tid = TAILQ_NEXT(tid, qe); + TAILQ_INSERT_BEFORE(tid, pthread, qe); + } +} + #endif diff --git a/lib/libc_r/uthread/uthread_mutex_prioceiling.c b/lib/libc_r/uthread/uthread_mutex_prioceiling.c new file mode 100644 index 00000000000..779c238cfe7 --- /dev/null +++ b/lib/libc_r/uthread/uthread_mutex_prioceiling.c @@ -0,0 +1,110 @@ +/* $OpenBSD: uthread_mutex_prioceiling.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <string.h> +#include <stdlib.h> +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else + *prioceiling = (*mattr)->m_ceiling; + + return(ret); +} + +int +pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else + (*mattr)->m_ceiling = prioceiling; + + return(ret); +} + +int +pthread_mutex_getprioceiling(pthread_mutex_t *mutex, + int *prioceiling) +{ + int ret; + + if ((mutex == NULL) || (*mutex == NULL)) + ret = EINVAL; + else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else + ret = (*mutex)->m_prio; + + return(ret); +} + +int +pthread_mutex_setprioceiling(pthread_mutex_t *mutex, + int prioceiling, int *old_ceiling) +{ + int ret = 0; + + if ((mutex == NULL) || (*mutex == NULL)) + ret = EINVAL; + else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else { + /* Lock the mutex: */ + if ((ret = pthread_mutex_lock(mutex)) == 0) { + /* Return the old ceiling and set the new ceiling: */ + *old_ceiling = (*mutex)->m_prio; + (*mutex)->m_prio = prioceiling; + + /* Unlock the mutex: */ + ret = pthread_mutex_unlock(mutex); + } + } + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_mutex_protocol.c b/lib/libc_r/uthread/uthread_mutex_protocol.c new file mode 100644 index 00000000000..fa0b9804d57 --- /dev/null +++ b/lib/libc_r/uthread/uthread_mutex_protocol.c @@ -0,0 +1,69 @@ +/* $OpenBSD: uthread_mutex_protocol.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <string.h> +#include <stdlib.h> +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else + *protocol = (*mattr)->m_protocol; + + return(ret); +} + +int +pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL) || + (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT)) + ret = EINVAL; + else { + (*mattr)->m_protocol = protocol; + (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY; + } + return(ret); +} + +#endif diff --git a/lib/libc_r/uthread/uthread_priority_queue.c b/lib/libc_r/uthread/uthread_priority_queue.c new file mode 100644 index 00000000000..a5f45ef7064 --- /dev/null +++ b/lib/libc_r/uthread/uthread_priority_queue.c @@ -0,0 +1,156 @@ +/* $OpenBSD: uthread_priority_queue.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <stdlib.h> +#include <sys/queue.h> +#include <string.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +/* Prototypes: */ +static void pq_insert_prio_list(pq_queue_t *pq, int prio); + + +int +_pq_init(pq_queue_t *pq, int minprio, int maxprio) +{ + int i, ret = 0; + int prioslots = maxprio - minprio + 1; + + if (pq == NULL) + ret = -1; + + /* Create the priority queue with (maxprio - minprio + 1) slots: */ + else if ((pq->pq_lists = + (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) + ret = -1; + + else { + /* Initialize the queue for each priority slot: */ + for (i = 0; i < prioslots; i++) { + TAILQ_INIT(&pq->pq_lists[i].pl_head); + pq->pq_lists[i].pl_prio = i; + pq->pq_lists[i].pl_queued = 0; + } + + /* Initialize the priority queue: */ + TAILQ_INIT(&pq->pq_queue); + + /* Remember the queue size: */ + pq->pq_size = prioslots; + } + return (ret); +} + +void +_pq_remove(pq_queue_t *pq, pthread_t pthread) +{ + int prio = pthread->active_priority; + + TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); +} + + +void +_pq_insert_head(pq_queue_t *pq, pthread_t pthread) +{ + int prio = pthread->active_priority; + + TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); +} + + +void +_pq_insert_tail(pq_queue_t *pq, pthread_t pthread) +{ + int prio = pthread->active_priority; + + TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); +} + + +pthread_t +_pq_first(pq_queue_t *pq) +{ + pq_list_t *pql; + pthread_t pthread = NULL; + + while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && + (pthread == NULL)) { + if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { + /* + * The priority list is empty; remove the list + * from the queue. + */ + TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); + + /* Mark the list as not being in the queue: */ + pql->pl_queued = 0; + } + } + return (pthread); +} + + +static void +pq_insert_prio_list(pq_queue_t *pq, int prio) +{ + pq_list_t *pql; + + /* + * The priority queue is in descending priority order. Start at + * the beginning of the queue and find the list before which the + * new list should to be inserted. + */ + pql = TAILQ_FIRST(&pq->pq_queue); + while ((pql != NULL) && (pql->pl_prio > prio)) + pql = TAILQ_NEXT(pql, pl_link); + + /* Insert the list: */ + if (pql == NULL) + TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); + else + TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); + + /* Mark this list as being in the queue: */ + pq->pq_lists[prio].pl_queued = 1; +} + +#endif diff --git a/lib/libc_r/uthread/uthread_resume_np.c b/lib/libc_r/uthread/uthread_resume_np.c index 6e211f34ece..e4be286bde6 100644 --- a/lib/libc_r/uthread/uthread_resume_np.c +++ b/lib/libc_r/uthread/uthread_resume_np.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_resume_np.c,v 1.2 1999/01/06 05:29:26 d Exp $ + * $OpenBSD: uthread_resume_np.c,v 1.3 1999/05/26 00:18:25 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -46,8 +46,21 @@ pthread_resume_np(pthread_t thread) if ((ret = _find_thread(thread)) == 0) { /* The thread exists. Is it suspended? */ if (thread->state != PS_SUSPENDED) { + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + /* Allow the thread to run. */ PTHREAD_NEW_STATE(thread,PS_RUNNING); + + /* + * Reenable preemption and yield if a scheduling + * signal occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } } return(ret); diff --git a/lib/libc_r/uthread/uthread_select.c b/lib/libc_r/uthread/uthread_select.c index 7793633fd75..dd9714e7b3e 100644 --- a/lib/libc_r/uthread/uthread_select.c +++ b/lib/libc_r/uthread/uthread_select.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_select.c,v 1.2 1999/01/06 05:29:26 d Exp $ + * $OpenBSD: uthread_select.c,v 1.3 1999/05/26 00:18:25 d Exp $ */ #include <unistd.h> #include <errno.h> @@ -48,6 +48,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds, struct timespec ts; struct timeval zero_timeout = {0, 0}; int i, ret = 0, got_all_locks = 1; + int f_wait = 1; struct pthread_select_data data; if (numfds > _thread_dtablesize) { @@ -60,6 +61,8 @@ select(int numfds, fd_set * readfds, fd_set * writefds, /* Set the wake up time: */ _thread_kern_set_timeout(&ts); + if (ts.tv_sec == 0 && ts.tv_nsec == 0) + f_wait = 0; } else { /* Wait for ever: */ _thread_kern_set_timeout(NULL); @@ -111,7 +114,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds, if (exceptfds != NULL) { memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds)); } - if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0) { + if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) { data.nfds = numfds; FD_ZERO(&data.readfds); FD_ZERO(&data.writefds); diff --git a/lib/libc_r/uthread/uthread_setprio.c b/lib/libc_r/uthread/uthread_setprio.c index 7de34d3c7c1..575eb62da2c 100644 --- a/lib/libc_r/uthread/uthread_setprio.c +++ b/lib/libc_r/uthread/uthread_setprio.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_setprio.c,v 1.4 1999/05/26 00:18:25 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,60 +30,24 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_setprio.c,v 1.3 1999/01/17 23:57:16 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE #include <pthread.h> -#include <sched.h> #include "pthread_private.h" int pthread_setprio(pthread_t pthread, int prio) { - int ret; + int ret, policy; + struct sched_param param; - /* Check if the priority is invalid: */ - if (prio < PTHREAD_MIN_PRIORITY || prio > PTHREAD_MAX_PRIORITY) - /* Return an invalid argument error: */ - ret = EINVAL; - - /* Find the thread in the list of active threads: */ - else if ((ret = _find_thread(pthread)) == 0) - /* Set the thread priority: */ - pthread->pthread_priority = prio; + if ((ret = pthread_getschedparam(pthread, &policy, ¶m)) == 0) { + param.sched_priority = prio; + ret = pthread_setschedparam(pthread, policy, ¶m); + } /* Return the error status: */ return (ret); } - -int -pthread_getschedparam(thread, policy, param) - pthread_t thread; - int *policy; - struct sched_param *param; -{ - int ret = 0; - - if ((ret = _find_thread(thread)) == 0) { - if (policy) - *policy = SCHED_RR; - if (param) - param->sched_priority = thread->pthread_priority; - } - return (ret); -} - -int -pthread_setschedparam(thread, policy, param) - pthread_t thread; - int policy; - const struct sched_param *param; -{ - - if (policy == SCHED_RR) - return pthread_setprio(thread, param->sched_priority); - else - return (EINVAL); -} #endif diff --git a/lib/libc_r/uthread/uthread_setschedparam.c b/lib/libc_r/uthread/uthread_setschedparam.c new file mode 100644 index 00000000000..0024460c003 --- /dev/null +++ b/lib/libc_r/uthread/uthread_setschedparam.c @@ -0,0 +1,114 @@ +/* $OpenBSD: uthread_setschedparam.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#include <sys/param.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param) +{ + int old_prio, in_readyq = 0, ret = 0; + + if ((param == NULL) || (param->sched_priority < PTHREAD_MIN_PRIORITY) || + (param->sched_priority > PTHREAD_MAX_PRIORITY) || + (policy < SCHED_FIFO) || (policy > SCHED_RR)) + /* Return an invalid argument error: */ + ret = EINVAL; + + /* Find the thread in the list of active threads: */ + else if ((ret = _find_thread(pthread)) == 0) { + /* + * Guard against being preempted by a scheduling + * signal: + */ + _thread_kern_sched_defer(); + + if (param->sched_priority != pthread->base_priority) { + /* + * Remove the thread from its current priority + * queue before any adjustments are made to its + * active priority: + */ + if ((pthread != _thread_run) && + (pthread->state == PS_RUNNING)) { + in_readyq = 1; + old_prio = pthread->active_priority; + PTHREAD_PRIOQ_REMOVE(pthread); + } + + /* Set the thread base priority: */ + pthread->base_priority = param->sched_priority; + + /* Recalculate the active priority: */ + pthread->active_priority = MAX(pthread->base_priority, + pthread->inherited_priority); + + if (in_readyq) { + if ((pthread->priority_mutex_count > 0) && + (old_prio > pthread->active_priority)) { + /* + * POSIX states that if the priority is + * being lowered, the thread must be + * inserted at the head of the queue for + * its priority if it owns any priority + * protection or inheritence mutexes. + */ + PTHREAD_PRIOQ_INSERT_HEAD(pthread); + } + else + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + + /* + * Check for any mutex priority adjustments. This + * includes checking for a priority mutex on which + * this thread is waiting. + */ + _mutex_notify_priochange(pthread); + } + + /* Set the scheduling policy: */ + pthread->attr.sched_policy = policy; + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); + } + return(ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_sig.c b/lib/libc_r/uthread/uthread_sig.c index 350ce71c6cf..1d32d376fe0 100644 --- a/lib/libc_r/uthread/uthread_sig.c +++ b/lib/libc_r/uthread/uthread_sig.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_sig.c,v 1.5 1999/05/26 00:18:26 d Exp $ */ /* * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> * All rights reserved. @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_sig.c,v 1.4 1999/01/06 05:29:27 d Exp $ */ #include <signal.h> #include <fcntl.h> @@ -39,6 +39,19 @@ #include <pthread.h> #include "pthread_private.h" +/* + * State change macro for signal handler: + */ +#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \ + if ((_thread_run->sched_defer_count == 0) && \ + (_thread_kern_in_sched == 0)) { \ + PTHREAD_NEW_STATE(thrd, newstate); \ + } else { \ + _waitingq_check_reqd = 1; \ + PTHREAD_SET_STATE(thrd, newstate); \ + } \ +} + /* Static variables: */ static int volatile yield_on_unlock_thread = 0; static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER; @@ -109,7 +122,7 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) _thread_dump_info(); /* Check if an interval timer signal: */ - else if (sig == SIGVTALRM) { + else if (sig == _SCHED_SIGNAL) { /* Check if the scheduler interrupt has come at an * unfortunate time which one of the threads is * modifying the thread list: @@ -123,6 +136,14 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) yield_on_unlock_thread = 1; /* + * Check if the scheduler interrupt has come when + * the currently running thread has deferred thread + * scheduling. + */ + else if (_thread_run->sched_defer_count) + _thread_run->yield_on_sched_undefer = 1; + + /* * Check if the kernel has not been interrupted while * executing scheduler code: */ @@ -178,18 +199,17 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) } /* - * Enter a loop to process each thread in the linked + * Enter a loop to process each thread in the waiting * list that is sigwait-ing on a signal. Since POSIX * doesn't specify which thread will get the signal * if there are multiple waiters, we'll give it to the * first one we find. */ - for (pthread = _thread_link_list; pthread != NULL; - pthread = pthread->nxt) { + TAILQ_FOREACH(pthread, &_waitingq, pqe) { if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; @@ -209,11 +229,18 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) * list: */ for (pthread = _thread_link_list; pthread != NULL; - pthread = pthread->nxt) - _thread_signal(pthread,sig); + pthread = pthread->nxt) { + pthread_t pthread_saved = _thread_run; - /* Dispatch pending signals to the running thread: */ - _dispatch_signals(); + _thread_run = pthread; + _thread_signal(pthread,sig); + /* + * Dispatch pending signals to the + * running thread: + */ + _dispatch_signals(); + _thread_run = pthread_saved; + } } /* Returns nothing. */ @@ -265,7 +292,7 @@ _thread_signal(pthread_t pthread, int sig) pthread->interrupted = 1; /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; @@ -285,7 +312,7 @@ _thread_signal(pthread_t pthread, int sig) pthread->interrupted = 1; /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; @@ -300,7 +327,7 @@ _thread_signal(pthread_t pthread, int sig) if (!sigismember(&pthread->sigmask, sig) && _thread_sigact[sig - 1].sa_handler != SIG_DFL) { /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; diff --git a/lib/libc_r/uthread/uthread_sigaction.c b/lib/libc_r/uthread/uthread_sigaction.c index 156d60a6583..6d709a8803f 100644 --- a/lib/libc_r/uthread/uthread_sigaction.c +++ b/lib/libc_r/uthread/uthread_sigaction.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_sigaction.c,v 1.3 1999/01/06 05:29:27 d Exp $ + * $OpenBSD: uthread_sigaction.c,v 1.4 1999/05/26 00:18:26 d Exp $ */ #include <signal.h> #include <errno.h> @@ -72,7 +72,7 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact) * Check if the kernel needs to be advised of a change * in signal action: */ - if (act != NULL && sig != SIGVTALRM && sig != SIGCHLD && + if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD && sig != SIGINFO) { /* Initialise the global signal action structure: */ gact.sa_mask = act->sa_mask; diff --git a/lib/libc_r/uthread/uthread_sigpending.c b/lib/libc_r/uthread/uthread_sigpending.c new file mode 100644 index 00000000000..2daf3159b59 --- /dev/null +++ b/lib/libc_r/uthread/uthread_sigpending.c @@ -0,0 +1,56 @@ +/* $OpenBSD: uthread_sigpending.c,v 1.1 1999/05/26 00:18:26 d Exp $ */ +/* + * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by John Birrell. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <signal.h> +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +sigpending(sigset_t * set) +{ + int ret = 0; + + /* Check for a null signal set pointer: */ + if (set == NULL) { + /* Return an invalid argument: */ + ret = EINVAL; + } + else { + *set = _thread_run->sigpend; + } + /* Return the completion status: */ + return (ret); +} +#endif diff --git a/lib/libc_r/uthread/uthread_sigwait.c b/lib/libc_r/uthread/uthread_sigwait.c index 3593b72853f..b4277133310 100644 --- a/lib/libc_r/uthread/uthread_sigwait.c +++ b/lib/libc_r/uthread/uthread_sigwait.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_sigwait.c,v 1.4 1999/01/17 23:57:27 d Exp $ + * $OpenBSD: uthread_sigwait.c,v 1.5 1999/05/26 00:18:26 d Exp $ */ #include <signal.h> #include <errno.h> @@ -58,7 +58,7 @@ sigwait(const sigset_t * set, int *sig) */ sigdelset(&act.sa_mask, SIGKILL); sigdelset(&act.sa_mask, SIGSTOP); - sigdelset(&act.sa_mask, SIGVTALRM); + sigdelset(&act.sa_mask, _SCHED_SIGNAL); sigdelset(&act.sa_mask, SIGCHLD); sigdelset(&act.sa_mask, SIGINFO); diff --git a/lib/libc_r/uthread/uthread_spinlock.c b/lib/libc_r/uthread/uthread_spinlock.c index a7284cd0139..3ea96013bc6 100644 --- a/lib/libc_r/uthread/uthread_spinlock.c +++ b/lib/libc_r/uthread/uthread_spinlock.c @@ -29,8 +29,8 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: uthread_spinlock.c,v 1.4 1998/06/09 23:13:10 jb Exp $ - * $OpenBSD: uthread_spinlock.c,v 1.4 1999/01/10 23:13:24 d Exp $ + * $FreeBSD: uthread_spinlock.c,v 1.5 1999/03/23 05:07:56 jb Exp $ + * $OpenBSD: uthread_spinlock.c,v 1.5 1999/05/26 00:18:26 d Exp $ * */ @@ -57,12 +57,9 @@ _spinlock(spinlock_t *lck) * it before we do. */ while(_atomic_lock(&lck->access_lock)) { - /* Give up the time slice: */ - sched_yield(); - - /* Check if already locked by the running thread: */ - if (lck->lock_owner == _thread_run) - return; + /* Block the thread until the lock. */ + _thread_run->data.spinlock = lck; + _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__); } /* The running thread now owns the lock: */ @@ -82,24 +79,25 @@ _spinlock(spinlock_t *lck) void _spinlock_debug(spinlock_t *lck, const char *fname, int lineno) { + int cnt = 0; + /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { - /* Give up the time slice: */ - sched_yield(); - - /* Check if already locked by the running thread: */ - if (lck->lock_owner == _thread_run) { + cnt++; + if (cnt > 100) { char str[256]; - snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) which it had already locked in %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno); + snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno); _thread_sys_write(2,str,strlen(str)); - - /* Create a thread dump to help debug this problem: */ - _thread_dump_info(); - return; + sleep(1); + cnt = 0; } + + /* Block the thread until the lock. */ + _thread_run->data.spinlock = lck; + _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno); } /* The running thread now owns the lock: */ diff --git a/lib/libc_r/uthread/uthread_suspend_np.c b/lib/libc_r/uthread/uthread_suspend_np.c index 632dbf5169b..daeb60a661a 100644 --- a/lib/libc_r/uthread/uthread_suspend_np.c +++ b/lib/libc_r/uthread/uthread_suspend_np.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_suspend_np.c,v 1.2 1999/01/06 05:29:29 d Exp $ + * $OpenBSD: uthread_suspend_np.c,v 1.3 1999/05/26 00:18:26 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -52,8 +52,21 @@ pthread_suspend_np(pthread_t thread) thread->interrupted = 1; } + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + /* Suspend the thread. */ PTHREAD_NEW_STATE(thread,PS_SUSPENDED); + + /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } return(ret); } diff --git a/lib/libc_r/uthread/uthread_switch_np.c b/lib/libc_r/uthread/uthread_switch_np.c new file mode 100644 index 00000000000..598edacd2bb --- /dev/null +++ b/lib/libc_r/uthread/uthread_switch_np.c @@ -0,0 +1,70 @@ +/* $OpenBSD: uthread_switch_np.c,v 1.1 1999/05/26 00:18:26 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include <pthread_np.h> +#include "pthread_private.h" + + +int +pthread_switch_add_np(pthread_switch_routine_t routine) +{ + int ret = 0; + + if (routine == NULL) + /* Return an invalid argument error: */ + ret = EINVAL; + else + /* Shouldn't need a lock to protect this assigment. */ + _sched_switch_hook = routine; + + return(ret); +} + +int +pthread_switch_delete_np(pthread_switch_routine_t routine) +{ + int ret = 0; + + if (routine != _sched_switch_hook) + /* Return an invalid argument error: */ + ret = EINVAL; + else + /* Shouldn't need a lock to protect this assigment. */ + _sched_switch_hook = NULL; + + return(ret); +} +#endif diff --git a/lib/libpthread/include/pthread.h b/lib/libpthread/include/pthread.h index 1d997eb7e0b..41b198b67b6 100644 --- a/lib/libpthread/include/pthread.h +++ b/lib/libpthread/include/pthread.h @@ -30,7 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: pthread.h,v 1.6 1999/03/10 10:00:47 d Exp $ + * $OpenBSD: pthread.h,v 1.7 1999/05/26 00:17:41 d Exp $ * */ #ifndef _PTHREAD_H_ @@ -65,9 +65,9 @@ #define _POSIX_THREAD_ATTR_STACKADDR #define _POSIX_THREAD_ATTR_STACKSIZE -/* #define _POSIX_THREAD_PRIORITY_SCHEDULING */ -/* #define _POSIX_THREAD_PRIO_INHERIT */ -/* #define _POSIX_THREAD_PRIO_PROTECT */ +#define _POSIX_THREAD_PRIORITY_SCHEDULING +#define _POSIX_THREAD_PRIO_INHERIT +#define _POSIX_THREAD_PRIO_PROTECT /* #define _POSIX_THREAD_PROCESS_SHARED */ #define _POSIX_THREAD_SAFE_FUNCTIONS @@ -164,19 +164,37 @@ struct pthread_once { /* * Static initialization values. */ -#define PTHREAD_MUTEX_INITIALIZER ((pthread_mutex_t) NULL) -#define PTHREAD_COND_INITIALIZER ((pthread_cond_t) NULL) -#define PTHREAD_RWLOCK_INITIALIZER ((pthread_rwlock_t) NULL) +#define PTHREAD_MUTEX_INITIALIZER NULL +#define PTHREAD_COND_INITIALIZER NULL +#define PTHREAD_RWLOCK_INITIALIZER NULL +#define PTHREAD_PRIO_NONE 0 +#ifdef _POSIX_THREAD_PRIO_PROTECT +#define PTHREAD_PRIO_INHERIT 1 +#define PTHREAD_PRIO_PROTECT 2 +#endif + +/* + * Mutex types (Single UNIX Specification, Version 2, 1997). + * + * Note that a mutex attribute with one of the following types: + * + * PTHREAD_MUTEX_NORMAL + * PTHREAD_MUTEX_RECURSIVE + * MUTEX_TYPE_FAST (deprecated) + * MUTEX_TYPE_COUNTING_FAST (deprecated) + * + * will deviate from POSIX specified semantics. + */ enum pthread_mutextype { - PTHREAD_MUTEX_DEFAULT = 1, - PTHREAD_MUTEX_RECURSIVE = 2, - PTHREAD_MUTEX_NORMAL = 3, - PTHREAD_MUTEX_ERRORCHECK = 4, + PTHREAD_MUTEX_ERRORCHECK = 1, /* Default POSIX mutex */ + PTHREAD_MUTEX_RECURSIVE = 2, /* Recursive mutex */ + PTHREAD_MUTEX_NORMAL = 3, /* No error checking */ MUTEX_TYPE_MAX }; -#define MUTEX_TYPE_FAST PTHREAD_MUTEX_DEFAULT +#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_ERRORCHECK +#define MUTEX_TYPE_FAST PTHREAD_MUTEX_NORMAL #define MUTEX_TYPE_COUNTING_FAST PTHREAD_MUTEX_RECURSIVE /* @@ -185,20 +203,10 @@ enum pthread_mutextype { __BEGIN_DECLS int pthread_atfork(void (*)(void), void (*)(void), void (*)(void)); int pthread_attr_destroy __P((pthread_attr_t *)); -int pthread_attr_getinheritsched __P((const pthread_attr_t *, int *)); -int pthread_attr_getschedparam __P((const pthread_attr_t *, - struct sched_param *)); -int pthread_attr_getschedpolicy __P((const pthread_attr_t *, int *)); -int pthread_attr_getscope __P((const pthread_attr_t *, int *)); int pthread_attr_getstacksize __P((pthread_attr_t *, size_t *)); int pthread_attr_getstackaddr __P((pthread_attr_t *, void **)); int pthread_attr_getdetachstate __P((pthread_attr_t *, int *)); int pthread_attr_init __P((pthread_attr_t *)); -int pthread_attr_setinheritsched __P((pthread_attr_t *, int)); -int pthread_attr_setschedparam __P((pthread_attr_t *, - const struct sched_param *)); -int pthread_attr_setschedpolicy __P((pthread_attr_t *, int)); -int pthread_attr_setscope __P((pthread_attr_t *, int)); int pthread_attr_setstacksize __P((pthread_attr_t *, size_t)); int pthread_attr_setstackaddr __P((pthread_attr_t *, void *)); int pthread_attr_setdetachstate __P((pthread_attr_t *, int)); @@ -208,10 +216,14 @@ void pthread_cleanup_push __P((void (*routine) (void *), void *routine_arg)); int pthread_condattr_destroy __P((pthread_condattr_t *attr)); int pthread_condattr_init __P((pthread_condattr_t *attr)); + +#if defined(_POSIX_THREAD_PROCESS_SHARED) int pthread_condattr_getpshared __P((const pthread_condattr_t *attr, int *pshared)); int pthread_condattr_setpshared __P((pthread_condattr_t *attr, int pshared)); +#endif + int pthread_cond_broadcast __P((pthread_cond_t *)); int pthread_cond_destroy __P((pthread_cond_t *)); int pthread_cond_init __P((pthread_cond_t *, @@ -231,27 +243,13 @@ int pthread_key_create __P((pthread_key_t *, void (*routine) (void *))); int pthread_key_delete __P((pthread_key_t)); int pthread_kill __P((struct pthread *, int)); -int pthread_mutexattr_destroy __P((pthread_mutexattr_t *)); -int pthread_mutexattr_getprioceiling __P((pthread_mutexattr_t *, - int *prioceiling)); -int pthread_mutexattr_getprotocol __P((pthread_mutexattr_t *, - int *protocol)); -int pthread_mutexattr_getpshared __P((pthread_mutexattr_t *, - int *pshared)); int pthread_mutexattr_init __P((pthread_mutexattr_t *)); -int pthread_mutexattr_setprioceiling __P((pthread_mutexattr_t *, - int prioceiling)); -int pthread_mutexattr_setprotocol __P((pthread_mutexattr_t *, - int protocol)); -int pthread_mutexattr_setpshared __P((pthread_mutexattr_t *, - int pshared)); +int pthread_mutexattr_destroy __P((pthread_mutexattr_t *)); int pthread_mutexattr_settype __P((pthread_mutexattr_t *, int)); int pthread_mutex_destroy __P((pthread_mutex_t *)); -int pthread_mutex_getprioceiling __P((pthread_mutex_t *)); int pthread_mutex_init __P((pthread_mutex_t *, const pthread_mutexattr_t *)); int pthread_mutex_lock __P((pthread_mutex_t *)); -int pthread_mutex_setprioceiling __P((pthread_mutex_t *)); int pthread_mutex_trylock __P((pthread_mutex_t *)); int pthread_mutex_unlock __P((pthread_mutex_t *)); int pthread_once __P((pthread_once_t *, @@ -281,10 +279,48 @@ void pthread_testcancel __P((void)); int pthread_getprio __P((pthread_t)); int pthread_setprio __P((pthread_t, int)); void pthread_yield __P((void)); -int pthread_setschedparam __P((pthread_t pthread, int policy, - const struct sched_param * param)); + +#if defined(_POSIX_THREAD_PROCESS_SHARED) +int pthread_mutexattr_getpshared __P((pthread_mutexattr_t *, + int *pshared)); +int pthread_mutexattr_setpshared __P((pthread_mutexattr_t *, + int pshared)); +#endif + +#if defined(_POSIX_THREAD_PRIO_PROTECT) +int pthread_mutexattr_getprioceiling __P((pthread_mutexattr_t *, + int *prioceiling)); +int pthread_mutexattr_setprioceiling __P((pthread_mutexattr_t *, + int prioceiling)); +int pthread_mutex_getprioceiling __P((pthread_mutex_t *, int *)); +int pthread_mutex_setprioceiling __P((pthread_mutex_t *, int, + int *)); +#endif + +#if defined(_POSIX_THREAD_PRIO_PROTECT) || defined (_POSIX_THREAD_PRIO_INHERIT) +int pthread_mutexattr_getprotocol __P((pthread_mutexattr_t *, + int *protocol)); +int pthread_mutexattr_setprotocol __P((pthread_mutexattr_t *, + int protocol)); +#endif + +#if defined(_POSIX_THREAD_PRIORITY_SCHEDULING) +int pthread_attr_getinheritsched __P((const pthread_attr_t *, int *)); +int pthread_attr_getschedparam __P((const pthread_attr_t *, + struct sched_param *)); +int pthread_attr_getschedpolicy __P((const pthread_attr_t *, int *)); +int pthread_attr_getscope __P((const pthread_attr_t *, int *)); +int pthread_attr_setinheritsched __P((pthread_attr_t *, int)); +int pthread_attr_setschedparam __P((pthread_attr_t *, + const struct sched_param *)); +int pthread_attr_setschedpolicy __P((pthread_attr_t *, int)); +int pthread_attr_setscope __P((pthread_attr_t *, int)); int pthread_getschedparam __P((pthread_t pthread, int *policy, struct sched_param * param)); +int pthread_setschedparam __P((pthread_t pthread, int policy, + const struct sched_param * param)); +#endif + int pthread_attr_setfloatstate __P((pthread_attr_t *, int)); int pthread_attr_getfloatstate __P((pthread_attr_t *, int *)); int pthread_attr_setcleanup __P((pthread_attr_t *, diff --git a/lib/libpthread/include/pthread_np.h b/lib/libpthread/include/pthread_np.h index 51cc4e23ee1..6f8b7b7ef44 100644 --- a/lib/libpthread/include/pthread_np.h +++ b/lib/libpthread/include/pthread_np.h @@ -1,3 +1,4 @@ +/* $OpenBSD: pthread_np.h,v 1.2 1999/05/26 00:17:41 d Exp $ */ /* * Copyright (c) 1996-98 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -29,12 +30,16 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: pthread_np.h,v 1.1 1998/09/05 07:40:47 d Exp $ */ #ifndef _PTHREAD_NP_H_ #define _PTHREAD_NP_H_ /* + * Non-POSIX type definitions: + */ +typedef void (*pthread_switch_routine_t) __P((pthread_t, pthread_t)); + +/* * Non-POSIX thread function prototype definitions: */ __BEGIN_DECLS @@ -46,6 +51,8 @@ int pthread_suspend_np __P((pthread_t)); int pthread_mutexattr_getkind_np __P((pthread_mutexattr_t attr)); int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t *attr, int kind)); void pthread_set_name_np __P((pthread_t, char *)); +int pthread_switch_add_np (pthread_switch_routine_t routine); +int pthread_switch_delete_np (pthread_switch_routine_t routine); __END_DECLS #endif diff --git a/lib/libpthread/uthread/Makefile.inc b/lib/libpthread/uthread/Makefile.inc index e525110ebb3..22bfb740e07 100644 --- a/lib/libpthread/uthread/Makefile.inc +++ b/lib/libpthread/uthread/Makefile.inc @@ -1,5 +1,5 @@ -# $Id: Makefile.inc,v 1.6 1999/01/18 00:00:32 d Exp $ -# $OpenBSD: Makefile.inc,v 1.6 1999/01/18 00:00:32 d Exp $ +# $Id: Makefile.inc,v 1.7 1999/05/26 00:18:21 d Exp $ +# $OpenBSD: Makefile.inc,v 1.7 1999/05/26 00:18:21 d Exp $ # uthread sources .PATH: ${.CURDIR}/uthread @@ -12,11 +12,19 @@ SRCS+= \ uthread_attr_destroy.c \ uthread_attr_init.c \ uthread_attr_getdetachstate.c \ + uthread_attr_getinheritsched.c \ + uthread_attr_getschedparam.c \ + uthread_attr_getschedpolicy.c \ + uthread_attr_getscope.c \ + uthread_attr_setstackaddr.c \ uthread_attr_getstackaddr.c \ uthread_attr_getstacksize.c \ - uthread_attr_priosched.c \ uthread_attr_setcreatesuspend_np.c \ uthread_attr_setdetachstate.c \ + uthread_attr_setinheritsched.c \ + uthread_attr_setschedparam.c \ + uthread_attr_setschedpolicy.c \ + uthread_attr_setscope.c \ uthread_attr_setstackaddr.c \ uthread_attr_setstacksize.c \ uthread_autoinit.c \ @@ -53,6 +61,7 @@ SRCS+= \ uthread_getdirentries.c \ uthread_getpeername.c \ uthread_getprio.c \ + uthread_getschedparam.c \ uthread_getsockname.c \ uthread_getsockopt.c \ uthread_info.c \ @@ -67,11 +76,14 @@ SRCS+= \ uthread_msync.c \ uthread_multi_np.c \ uthread_mutex.c \ + uthread_mutex_prioceiling.c \ + uthread_mutex_protocol.c \ uthread_mutexattr_destroy.c \ uthread_nanosleep.c \ uthread_once.c \ uthread_open.c \ uthread_pipe.c \ + uthread_priority_queue.c \ uthread_poll.c \ uthread_queue.c \ uthread_read.c \ @@ -87,6 +99,7 @@ SRCS+= \ uthread_sendto.c \ uthread_seterrno.c \ uthread_setprio.c \ + uthread_setschedparam.c \ uthread_setsockopt.c \ uthread_shutdown.c \ uthread_sig.c \ @@ -94,6 +107,7 @@ SRCS+= \ uthread_sigaltstack.c \ uthread_sigblock.c \ uthread_sigmask.c \ + uthread_sigpending.c \ uthread_sigprocmask.c \ uthread_sigsetmask.c \ uthread_sigsuspend.c \ @@ -104,6 +118,7 @@ SRCS+= \ uthread_spec.c \ uthread_spinlock.c \ uthread_suspend_np.c \ + uthread_switch_np.c \ uthread_vfork.c \ uthread_wait4.c \ uthread_write.c \ diff --git a/lib/libpthread/uthread/pthread_private.h b/lib/libpthread/uthread/pthread_private.h index 529c647fa57..7643b99a19c 100644 --- a/lib/libpthread/uthread/pthread_private.h +++ b/lib/libpthread/uthread/pthread_private.h @@ -31,7 +31,7 @@ * * Private thread definitions for the uthread kernel. * - * $OpenBSD: pthread_private.h,v 1.13 1999/02/16 16:44:07 millert Exp $ + * $OpenBSD: pthread_private.h,v 1.14 1999/05/26 00:18:21 d Exp $ * */ @@ -50,10 +50,17 @@ #include <sys/time.h> #include <sched.h> #include <spinlock.h> +#include <pthread_np.h> #ifndef _NO_UTHREAD_MACHDEP #include "uthread_machdep.h" #endif +#ifdef __OpenBSD__ +/* Steal TAILQ_FOREACH from FreeBSD's <sys/queue.h> */ +#define TAILQ_FOREACH(var, head, field) \ + for (var = TAILQ_FIRST(head); var; var = TAILQ_NEXT(var, field)) +#endif + /* * Kernel fatal error handler macro. */ @@ -63,16 +70,59 @@ #define stdout_debug(_x) _thread_sys_write(1,_x,strlen(_x)); #define stderr_debug(_x) _thread_sys_write(2,_x,strlen(_x)); + /* - * State change macro: + * Priority queue manipulation macros: */ -#define PTHREAD_NEW_STATE(thrd, newstate) { \ +#define PTHREAD_PRIOQ_INSERT_HEAD(thrd) _pq_insert_head(&_readyq,thrd) +#define PTHREAD_PRIOQ_INSERT_TAIL(thrd) _pq_insert_tail(&_readyq,thrd) +#define PTHREAD_PRIOQ_REMOVE(thrd) _pq_remove(&_readyq,thrd) +#define PTHREAD_PRIOQ_FIRST _pq_first(&_readyq) + +/* + * Waiting queue manipulation macros: + */ +#define PTHREAD_WAITQ_INSERT(thrd) TAILQ_INSERT_TAIL(&_waitingq,thrd,pqe) +#define PTHREAD_WAITQ_REMOVE(thrd) TAILQ_REMOVE(&_waitingq,thrd,pqe) + +/* + * State change macro without scheduling queue change: + */ +#define PTHREAD_SET_STATE(thrd, newstate) { \ (thrd)->state = newstate; \ (thrd)->fname = __FILE__; \ (thrd)->lineno = __LINE__; \ } /* + * State change macro with scheduling queue change - This must be + * called with preemption deferred (see thread_kern_sched_[un]defer). + */ +#define PTHREAD_NEW_STATE(thrd, newstate) { \ + if ((thrd)->state != newstate) { \ + if ((thrd)->state == PS_RUNNING) { \ + PTHREAD_PRIOQ_REMOVE(thrd); \ + PTHREAD_WAITQ_INSERT(thrd); \ + } else if (newstate == PS_RUNNING) { \ + PTHREAD_WAITQ_REMOVE(thrd); \ + PTHREAD_PRIOQ_INSERT_TAIL(thrd); \ + } \ + } \ + PTHREAD_SET_STATE(thrd, newstate); \ +} + +/* + * Define the signals to be used for scheduling. + */ +#if defined(_PTHREADS_COMPAT_SCHED) +#define _ITIMER_SCHED_TIMER ITIMER_VIRTUAL +#define _SCHED_SIGNAL SIGVTALRM +#else +#define _ITIMER_SCHED_TIMER ITIMER_PROF +#define _SCHED_SIGNAL SIGPROF +#endif + +/* * Queue definitions. */ struct pthread_queue { @@ -82,10 +132,34 @@ struct pthread_queue { }; /* + * Priority queues. + * + * XXX It'd be nice if these were contained in uthread_priority_queue.[ch]. + */ +typedef struct pq_list { + TAILQ_HEAD(, pthread) pl_head; /* list of threads at this priority */ + TAILQ_ENTRY(pq_list) pl_link; /* link for queue of priority lists */ + int pl_prio; /* the priority of this list */ + int pl_queued; /* is this in the priority queue */ +} pq_list_t; + +typedef struct pq_queue { + TAILQ_HEAD(, pq_list) pq_queue; /* queue of priority lists */ + pq_list_t *pq_lists; /* array of all priority lists */ + int pq_size; /* number of priority lists */ +} pq_queue_t; + + +/* * Static queue initialization values. */ #define PTHREAD_QUEUE_INITIALIZER { NULL, NULL, NULL } +/* + * TailQ initialization values. + */ +#define TAILQ_INITIALIZER { NULL, NULL } + /* * Mutex definitions. */ @@ -96,10 +170,31 @@ union pthread_mutex_data { struct pthread_mutex { enum pthread_mutextype m_type; - struct pthread_queue m_queue; + int m_protocol; + TAILQ_HEAD(mutex_head, pthread) m_queue; struct pthread *m_owner; union pthread_mutex_data m_data; long m_flags; + int m_refcount; + + /* + * Used for priority inheritence and protection. + * + * m_prio - For priority inheritence, the highest active + * priority (threads locking the mutex inherit + * this priority). For priority protection, the + * ceiling priority of this mutex. + * m_saved_prio - mutex owners inherited priority before + * taking the mutex, restored when the owner + * unlocks the mutex. + */ + int m_prio; + int m_saved_prio; + + /* + * Link for list of all mutexes a thread currently owns. + */ + TAILQ_ENTRY(pthread_mutex) m_qe; /* * Lock for accesses to this structure. @@ -118,11 +213,13 @@ struct pthread_mutex { * Static mutex initialization values. */ #define PTHREAD_MUTEX_STATIC_INITIALIZER \ - { MUTEX_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, \ - NULL, { NULL }, MUTEX_FLAGS_INITED } + { PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \ + NULL, { NULL }, MUTEX_FLAGS_INITED, 0, 0, 0, TAILQ_INITIALIZER } struct pthread_mutex_attr { enum pthread_mutextype m_type; + int m_protocol; + int m_ceiling; long m_flags; }; @@ -135,15 +232,16 @@ enum pthread_cond_type { }; struct pthread_cond { - enum pthread_cond_type c_type; - struct pthread_queue c_queue; - void *c_data; - long c_flags; + enum pthread_cond_type c_type; + TAILQ_HEAD(cond_head, pthread) c_queue; + pthread_mutex_t c_mutex; + void *c_data; + long c_flags; /* * Lock for accesses to this structure. */ - spinlock_t lock; + spinlock_t lock; }; struct pthread_cond_attr { @@ -162,7 +260,8 @@ struct pthread_cond_attr { * Static cond initialization values. */ #define PTHREAD_COND_STATIC_INITIALIZER \ - { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, COND_FLAGS_INITED } + { COND_TYPE_FAST, PTHREAD_QUEUE_INITIALIZER, NULL, NULL, \ + COND_FLAGS_INITED } /* * Cleanup definitions. @@ -174,7 +273,9 @@ struct pthread_cleanup { }; struct pthread_attr { - int schedparam_policy; + int sched_policy; + int sched_inherit; + int sched_interval; int prio; int suspend; int flags; @@ -256,9 +357,11 @@ enum pthread_state { PS_WAIT_WAIT, PS_SIGSUSPEND, PS_SIGWAIT, + PS_SPINBLOCK, PS_JOIN, PS_SUSPENDED, PS_DEAD, + PS_DEADLOCK, PS_STATE_MAX }; @@ -302,8 +405,8 @@ struct pthread_select_data { }; union pthread_wait_data { - pthread_mutex_t *mutex; - pthread_cond_t *cond; + pthread_mutex_t mutex; + pthread_cond_t cond; const sigset_t *sigwait; /* Waiting on a signal in sigwait */ struct { short fd; /* Used when thread waiting on fd */ @@ -311,6 +414,7 @@ union pthread_wait_data { const char *fname; /* Source file name for debugging.*/ } fd; struct pthread_select_data * select_data; + spinlock_t *spinlock; }; /* @@ -419,7 +523,11 @@ struct pthread { struct pthread_queue join_queue; /* - * The current thread can belong to only one queue at a time. + * The current thread can belong to only one scheduling queue + * at a time (ready or waiting queue). It can also belong to + * a queue of threads waiting on mutexes or condition variables. + * Use pqe for the scheduling queue link (both ready and waiting), + * and qe for other links (mutexes and condition variables). * * Pointer to queue (if any) on which the current thread is waiting. * @@ -431,8 +539,11 @@ struct pthread { /* Pointer to next element in queue. */ struct pthread *qnxt; + /* Priority queue entry for this thread: */ + TAILQ_ENTRY(pthread) pqe; + /* Queue entry for this thread: */ - TAILQ_ENTRY(pthread) qe; + TAILQ_ENTRY(pthread) qe; /* Wait data. */ union pthread_wait_data data; @@ -446,12 +557,61 @@ struct pthread { /* Signal number when in state PS_SIGWAIT: */ int signo; + /* + * Set to non-zero when this thread has deferred thread + * scheduling. We allow for recursive deferral. + */ + int sched_defer_count; + + /* + * Set to TRUE if this thread should yield after undeferring + * thread scheduling. + */ + int yield_on_sched_undefer; + /* Miscellaneous data. */ int flags; -#define PTHREAD_EXITING (0x0100) -#define PTHREAD_CANCELLING (0x0200) /* thread has been cancelled */ -#define PTHREAD_AT_CANCEL_POINT (0x0400) /* thread at cancel point */ - char pthread_priority; +#define PTHREAD_FLAGS_PRIVATE 0x0001 +#define PTHREAD_EXITING 0x0002 +#define PTHREAD_FLAGS_QUEUED 0x0004 /* in queue (qe is used) */ +#define PTHREAD_FLAGS_TRACE 0x0008 +#define PTHREAD_CANCELLING 0x0010 /* thread has been cancelled */ +#define PTHREAD_AT_CANCEL_POINT 0x0020 /* thread at cancel point */ + + /* + * Base priority is the user setable and retrievable priority + * of the thread. It is only affected by explicit calls to + * set thread priority and upon thread creation via a thread + * attribute or default priority. + */ + char base_priority; + + /* + * Inherited priority is the priority a thread inherits by + * taking a priority inheritence or protection mutex. It + * is not affected by base priority changes. Inherited + * priority defaults to and remains 0 until a mutex is taken + * that is being waited on by any other thread whose priority + * is non-zero. + */ + char inherited_priority; + + /* + * Active priority is always the maximum of the threads base + * priority and inherited priority. When there is a change + * in either the real or inherited priority, the active + * priority must be recalculated. + */ + char active_priority; + + /* Number of priority ceiling or protection mutexes owned. */ + int priority_mutex_count; + + /* + * Queue of currently owned mutexes. + */ + TAILQ_HEAD(, pthread_mutex) mutexq; + void *ret; const void **specific_data; int specific_data_count; @@ -484,6 +644,9 @@ extern struct pthread * volatile _thread_kern_threadp; /* Ptr to the thread structure for the running thread: */ extern struct pthread * volatile _thread_run; +/* Ptr to the thread structure for the last user thread to run: */ +extern struct pthread * volatile _last_user_thread; + /* * Ptr to the thread running in single-threaded mode or NULL if * running multi-threaded (default POSIX behaviour). @@ -528,6 +691,7 @@ extern int _pthread_stdio_flags[3]; /* File table information: */ extern struct fd_table_entry **_thread_fd_table; +extern const int dtablecount; extern int _thread_dtablesize; /* Garbage collector mutex and condition variable. */ @@ -540,6 +704,19 @@ extern pthread_cond_t _gc_cond; extern struct sigaction _thread_sigact[NSIG]; /* + * Scheduling queues: + */ +extern pq_queue_t _readyq; +typedef TAILQ_HEAD(, pthread) _waitingq_t; +extern _waitingq_t _waitingq; + +/* Indicates that the waitingq now has threads ready to run. */ +extern volatile int _waitingq_check_reqd; + +/* Thread switch hook. */ +extern pthread_switch_routine_t _sched_switch_hook; + +/* * Where SIGINFO writes thread states when /dev/tty cannot be opened */ #define INFO_DUMP_FILE "/tmp/uthread.dump" @@ -569,6 +746,14 @@ void _lock_thread(void); void _lock_thread_list(void); void _unlock_thread(void); void _unlock_thread_list(void); +int _mutex_cv_lock(pthread_mutex_t *); +int _mutex_cv_unlock(pthread_mutex_t *); +void _mutex_notify_priochange(struct pthread *); +int _pq_init(struct pq_queue *pq, int, int); +void _pq_remove(struct pq_queue *pq, struct pthread *); +void _pq_insert_head(struct pq_queue *pq, struct pthread *); +void _pq_insert_tail(struct pq_queue *pq, struct pthread *); +struct pthread *_pq_first(struct pq_queue *pq); void _thread_exit(const char *, int, const char *) __attribute__((noreturn)); void _thread_fd_unlock(int, int); @@ -576,12 +761,14 @@ void _thread_fd_unlock_debug(int, int, char *, int); void *_thread_cleanup(pthread_t); void _thread_cleanupspecific(void); void _thread_dump_info(void); -void _thread_init(void) /* __attribute__((constructor)) */; +void _thread_init(void); void _thread_kern_sched(struct sigcontext *); void _thread_kern_sched_state(enum pthread_state, const char *, int); void _thread_kern_sched_state_unlock(enum pthread_state state, spinlock_t *lock, char *fname, int lineno); void _thread_kern_set_timeout(struct timespec *); +void _thread_kern_sched_defer(void); +void _thread_kern_sched_undefer(void); void _thread_sig_handler(int, int, struct sigcontext *); void _thread_start(void); void _thread_start_sig_handler(void); @@ -723,7 +910,7 @@ int _thread_sys_fchdir(int); int _thread_sys_fchown(int, uid_t, gid_t); int _thread_sys_fsync(int); int _thread_sys_ftruncate(int, off_t); -long _thread_sys_fpathconf(int, int); +long _thread_sys_fpathconf(int, int); int _thread_sys_pause(void); int _thread_sys_pipe(int *); int _thread_sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *); diff --git a/lib/libpthread/uthread/uthread_attr_getinheritsched.c b/lib/libpthread/uthread/uthread_attr_getinheritsched.c new file mode 100644 index 00000000000..54294c2e74b --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_getinheritsched.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_getinheritsched.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL)) + ret = EINVAL; + else + *sched_inherit = (*attr)->sched_inherit; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_getschedparam.c b/lib/libpthread/uthread/uthread_attr_getschedparam.c new file mode 100644 index 00000000000..1a51d23b069 --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_getschedparam.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_getschedparam.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (param == NULL)) + ret = EINVAL; + else + param->sched_priority = (*attr)->prio; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_getschedpolicy.c b/lib/libpthread/uthread/uthread_attr_getschedpolicy.c new file mode 100644 index 00000000000..54977df53a0 --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_getschedpolicy.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_getschedpolicy.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (policy == NULL)) + ret = EINVAL; + else + *policy = (*attr)->sched_policy; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_getscope.c b/lib/libpthread/uthread/uthread_attr_getscope.c new file mode 100644 index 00000000000..5dd4772ae46 --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_getscope.c @@ -0,0 +1,54 @@ +/* $OpenBSD: uthread_attr_getscope.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL)) + /* Return an invalid argument: */ + ret = EINVAL; + + else + *contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ? + PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_priosched.c b/lib/libpthread/uthread/uthread_attr_priosched.c deleted file mode 100644 index cc415768165..00000000000 --- a/lib/libpthread/uthread/uthread_attr_priosched.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * David Leonard <d@openbsd.org>, 1998. Public Domain. - * - * $OpenBSD: uthread_attr_priosched.c,v 1.1 1999/01/18 00:06:56 d Exp $ - */ -#include <errno.h> -#ifdef _THREAD_SAFE -#include <pthread.h> -#include "pthread_private.h" - -int -pthread_attr_setscope(attr, contentionscope) - pthread_attr_t *attr; - int contentionscope; -{ - - return (ENOSYS); -} - -int -pthread_attr_getscope(attr, contentionscope) - const pthread_attr_t *attr; - int *contentionscope; -{ - - return (ENOSYS); -} - -int -pthread_attr_setinheritsched(attr, inheritsched) - pthread_attr_t *attr; - int inheritsched; -{ - - return (ENOSYS); -} - -int -pthread_attr_getinheritsched(attr, inheritsched) - const pthread_attr_t *attr; - int *inheritsched; -{ - - return (ENOSYS); -} - -int -pthread_attr_setschedpolicy(attr, policy) - pthread_attr_t *attr; - int policy; -{ - - return (ENOSYS); -} - -int -pthread_attr_getschedpolicy(attr, policy) - const pthread_attr_t *attr; - int *policy; -{ - - return (ENOSYS); -} - -int -pthread_attr_setschedparam(attr, param) - pthread_attr_t *attr; - const struct sched_param *param; -{ - - return (ENOSYS); -} - -int -pthread_attr_getschedparam(attr, param) - const pthread_attr_t *attr; - struct sched_param *param; -{ - - return (ENOSYS); -} -#endif diff --git a/lib/libpthread/uthread/uthread_attr_setinheritsched.c b/lib/libpthread/uthread/uthread_attr_setinheritsched.c new file mode 100644 index 00000000000..0e0e015558a --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_setinheritsched.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_setinheritsched.c,v 1.1 1999/05/26 00:18:22 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL)) + ret = EINVAL; + else + (*attr)->sched_inherit = sched_inherit; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_setschedparam.c b/lib/libpthread/uthread/uthread_attr_setschedparam.c new file mode 100644 index 00000000000..2ff67680fe8 --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_setschedparam.c @@ -0,0 +1,51 @@ +/* $OpenBSD: uthread_attr_setschedparam.c,v 1.1 1999/05/26 00:18:23 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (param == NULL)) + ret = EINVAL; + else + (*attr)->prio = param->sched_priority; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_setschedpolicy.c b/lib/libpthread/uthread/uthread_attr_setschedpolicy.c new file mode 100644 index 00000000000..2b47a9d9339 --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_setschedpolicy.c @@ -0,0 +1,52 @@ +/* $OpenBSD: uthread_attr_setschedpolicy.c,v 1.1 1999/05/26 00:18:23 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || (policy < SCHED_FIFO) || + (policy > SCHED_RR)) + ret = EINVAL; + else + (*attr)->sched_policy = policy; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_attr_setscope.c b/lib/libpthread/uthread/uthread_attr_setscope.c new file mode 100644 index 00000000000..26fdaf6f32c --- /dev/null +++ b/lib/libpthread/uthread/uthread_attr_setscope.c @@ -0,0 +1,63 @@ +/* $OpenBSD: uthread_attr_setscope.c,v 1.1 1999/05/26 00:18:23 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_attr_setscope(pthread_attr_t *attr, int contentionscope) +{ + int ret = 0; + + if ((attr == NULL) || (*attr == NULL) || + (contentionscope != PTHREAD_SCOPE_PROCESS) || + (contentionscope != PTHREAD_SCOPE_SYSTEM)) + /* Return an invalid argument: */ + ret = EINVAL; + + else if (contentionscope == PTHREAD_SCOPE_SYSTEM) + /* We don't support system wide contention: */ +#ifdef NOT_YET + ret = ENOTSUP; +#else + ret = EOPNOTSUPP; +#endif + + else + (*attr)->flags |= contentionscope; + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_cond.c b/lib/libpthread/uthread/uthread_cond.c index 525f86049b0..501ad84478a 100644 --- a/lib/libpthread/uthread/uthread_cond.c +++ b/lib/libpthread/uthread/uthread_cond.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_cond.c,v 1.6 1999/05/26 00:18:23 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_cond.c,v 1.5 1999/01/17 23:57:27 d Exp $ */ #include <stdlib.h> #include <errno.h> @@ -38,6 +38,14 @@ #include <pthread.h> #include "pthread_private.h" +/* + * Prototypes + */ +static inline pthread_t cond_queue_deq(pthread_cond_t); +static inline void cond_queue_remove(pthread_cond_t, pthread_t); +static inline void cond_queue_enq(pthread_cond_t, pthread_t); + + int pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) { @@ -84,9 +92,10 @@ pthread_cond_init(pthread_cond_t * cond, const pthread_condattr_t * cond_attr) * Initialise the condition variable * structure: */ - _thread_queue_init(&pcond->c_queue); + TAILQ_INIT(&pcond->c_queue); pcond->c_flags |= COND_FLAGS_INITED; pcond->c_type = type; + pcond->c_mutex = NULL; _SPINUNLOCK(&pcond->lock); *cond = pcond; } @@ -137,6 +146,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) */ else if (*cond != NULL || (rval = pthread_cond_init(cond,NULL)) == 0) { + /* This is a cancellation point: */ _thread_enter_cancellation_point(); /* Lock the condition variable structure: */ @@ -146,34 +156,57 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* Wait forever: */ - _thread_run->wakeup_time.tv_sec = -1; - - /* - * Queue the running thread for the condition - * variable: - */ - _thread_queue_enq(&(*cond)->c_queue, _thread_run); - _thread_run->data.cond = cond; - - /* Unlock the mutex: */ - if ((rval = pthread_mutex_unlock(mutex)) != 0) { - /* - * Cannot unlock the mutex, so remove the - * running thread from the condition - * variable queue: - */ - _thread_queue_deq(&(*cond)->c_queue); - + if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && + ((*cond)->c_mutex != *mutex))) { /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); + + /* Return invalid argument error: */ + rval = EINVAL; } else { - /* Schedule the next thread: */ - _thread_kern_sched_state_unlock(PS_COND_WAIT, - &(*cond)->lock, __FILE__, __LINE__); + /* Reset the timeout flag: */ + _thread_run->timeout = 0; - /* Lock the mutex: */ - rval = pthread_mutex_lock(mutex); + /* + * Queue the running thread for the condition + * variable: + */ + cond_queue_enq(*cond, _thread_run); + + /* Remember the mutex that is being used: */ + (*cond)->c_mutex = *mutex; + + /* Wait forever: */ + _thread_run->wakeup_time.tv_sec = -1; + + /* Unlock the mutex: */ + if ((rval = _mutex_cv_unlock(mutex)) != 0) { + /* + * Cannot unlock the mutex, so remove + * the running thread from the condition + * variable queue: + */ + cond_queue_remove(*cond, _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == + NULL) + (*cond)->c_mutex = NULL; + + /* Unlock the condition variable structure: */ + _SPINUNLOCK(&(*cond)->lock); + } + else { + /* + * Schedule the next thread and unlock + * the condition variable structure: + */ + _thread_kern_sched_state_unlock(PS_COND_WAIT, + &(*cond)->lock, __FILE__, __LINE__); + + /* Lock the mutex: */ + rval = _mutex_cv_lock(mutex); + } } break; @@ -187,6 +220,7 @@ pthread_cond_wait(pthread_cond_t * cond, pthread_mutex_t * mutex) break; } + /* No longer in a cancellation point: */ _thread_leave_cancellation_point(); } @@ -209,7 +243,9 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, */ else if (*cond != NULL || (rval = pthread_cond_init(cond,NULL)) == 0) { + /* This is a cancellation point: */ _thread_enter_cancellation_point(); + /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -217,43 +253,88 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* Set the wakeup time: */ - _thread_run->wakeup_time.tv_sec = abstime->tv_sec; - _thread_run->wakeup_time.tv_nsec = abstime->tv_nsec; - - /* Reset the timeout flag: */ - _thread_run->timeout = 0; - - /* - * Queue the running thread for the condition - * variable: - */ - _thread_queue_enq(&(*cond)->c_queue, _thread_run); - _thread_run->data.cond = cond; - - /* Unlock the mutex: */ - if ((rval = pthread_mutex_unlock(mutex)) != 0) { - /* - * Cannot unlock the mutex, so remove the - * running thread from the condition - * variable queue: - */ - _thread_queue_deq(&(*cond)->c_queue); + if ((mutex == NULL) || (((*cond)->c_mutex != NULL) && + ((*cond)->c_mutex != *mutex))) { + /* Return invalid argument error: */ + rval = EINVAL; /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); } else { - /* Schedule the next thread: */ - _thread_kern_sched_state_unlock(PS_COND_WAIT, - &(*cond)->lock, __FILE__, __LINE__); + /* Set the wakeup time: */ + _thread_run->wakeup_time.tv_sec = + abstime->tv_sec; + _thread_run->wakeup_time.tv_nsec = + abstime->tv_nsec; - /* Lock the mutex: */ - if ((rval = pthread_mutex_lock(mutex)) != 0) { - } - /* Check if the wait timed out: */ - else if (_thread_run->timeout) { - /* Return a timeout error: */ - rval = ETIMEDOUT; + /* Reset the timeout flag: */ + _thread_run->timeout = 0; + + /* + * Queue the running thread for the condition + * variable: + */ + cond_queue_enq(*cond, _thread_run); + + /* Remember the mutex that is being used: */ + (*cond)->c_mutex = *mutex; + + /* Unlock the mutex: */ + if ((rval = _mutex_cv_unlock(mutex)) != 0) { + /* + * Cannot unlock the mutex, so remove + * the running thread from the condition + * variable queue: + */ + cond_queue_remove(*cond, _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; + + /* Unlock the condition variable structure: */ + _SPINUNLOCK(&(*cond)->lock); + } else { + /* + * Schedule the next thread and unlock + * the condition variable structure: + */ + _thread_kern_sched_state_unlock(PS_COND_WAIT, + &(*cond)->lock, __FILE__, __LINE__); + + /* Check if the wait timedout: */ + if (_thread_run->timeout == 0) { + /* Lock the mutex: */ + rval = _mutex_cv_lock(mutex); + } + else { + /* Lock the condition variable structure: */ + _SPINLOCK(&(*cond)->lock); + + /* + * The wait timed out; remove + * the thread from the condition + * variable queue: + */ + cond_queue_remove(*cond, + _thread_run); + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; + + /* Unock the condition variable structure: */ + _SPINUNLOCK(&(*cond)->lock); + + /* Return a timeout error: */ + rval = ETIMEDOUT; + + /* + * Lock the mutex and ignore + * any errors: + */ + (void)_mutex_cv_lock(mutex); + } } } break; @@ -267,6 +348,8 @@ pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex, rval = EINVAL; break; } + + /* No longer in a cancellation point: */ _thread_leave_cancellation_point(); } @@ -290,11 +373,22 @@ pthread_cond_signal(pthread_cond_t * cond) switch ((*cond)->c_type) { /* Fast condition variable: */ case COND_TYPE_FAST: - /* Bring the next thread off the condition queue: */ - if ((pthread = _thread_queue_deq(&(*cond)->c_queue)) != NULL) { + /* + * Enter a loop to dequeue threads from the condition + * queue until we find one that hasn't previously + * timed out. + */ + while (((pthread = cond_queue_deq(*cond)) != NULL) && + (pthread->timeout != 0)) { + } + + if (pthread != NULL) /* Allow the thread to run: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); - } + + /* Check for no more waiters: */ + if (TAILQ_FIRST(&(*cond)->c_queue) == NULL) + (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ @@ -321,6 +415,16 @@ pthread_cond_broadcast(pthread_cond_t * cond) if (cond == NULL || *cond == NULL) rval = EINVAL; else { + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. In addition, we must assure + * that all threads currently waiting on the condition + * variable are signaled and are not timedout by a + * scheduling signal that causes a preemption. + */ + _thread_kern_sched_defer(); + /* Lock the condition variable structure: */ _SPINLOCK(&(*cond)->lock); @@ -332,11 +436,17 @@ pthread_cond_broadcast(pthread_cond_t * cond) * Enter a loop to bring all threads off the * condition queue: */ - while ((pthread = - _thread_queue_deq(&(*cond)->c_queue)) != NULL) { - /* Allow the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + while ((pthread = cond_queue_deq(*cond)) != NULL) { + /* + * The thread is already running if the + * timeout flag is set. + */ + if (pthread->timeout == 0) + PTHREAD_NEW_STATE(pthread,PS_RUNNING); } + + /* There are no more waiting threads: */ + (*cond)->c_mutex = NULL; break; /* Trap invalid condition variable types: */ @@ -348,9 +458,74 @@ pthread_cond_broadcast(pthread_cond_t * cond) /* Unlock the condition variable structure: */ _SPINUNLOCK(&(*cond)->lock); + + /* Reenable preemption and yield if necessary. + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ return (rval); } + +/* + * Dequeue a waiting thread from the head of a condition queue in + * descending priority order. + */ +static inline pthread_t +cond_queue_deq(pthread_cond_t cond) +{ + pthread_t pthread; + + if ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) { + TAILQ_REMOVE(&cond->c_queue, pthread, qe); + pthread->flags &= ~PTHREAD_FLAGS_QUEUED; + } + + return(pthread); +} + +/* + * Remove a waiting thread from a condition queue in descending priority + * order. + */ +static inline void +cond_queue_remove(pthread_cond_t cond, pthread_t pthread) +{ + /* + * Because pthread_cond_timedwait() can timeout as well + * as be signaled by another thread, it is necessary to + * guard against removing the thread from the queue if + * it isn't in the queue. + */ + if (pthread->flags & PTHREAD_FLAGS_QUEUED) { + TAILQ_REMOVE(&cond->c_queue, pthread, qe); + pthread->flags &= ~PTHREAD_FLAGS_QUEUED; + } +} + +/* + * Enqueue a waiting thread to a condition queue in descending priority + * order. + */ +static inline void +cond_queue_enq(pthread_cond_t cond, pthread_t pthread) +{ + pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head); + + /* + * For the common case of all threads having equal priority, + * we perform a quick check against the priority of the thread + * at the tail of the queue. + */ + if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) + TAILQ_INSERT_TAIL(&cond->c_queue, pthread, qe); + else { + tid = TAILQ_FIRST(&cond->c_queue); + while (pthread->active_priority <= tid->active_priority) + tid = TAILQ_NEXT(tid, qe); + TAILQ_INSERT_BEFORE(tid, pthread, qe); + } + pthread->flags |= PTHREAD_FLAGS_QUEUED; +} #endif diff --git a/lib/libpthread/uthread/uthread_create.c b/lib/libpthread/uthread/uthread_create.c index eede7c6b5e9..002f563d14a 100644 --- a/lib/libpthread/uthread/uthread_create.c +++ b/lib/libpthread/uthread/uthread_create.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_create.c,v 1.9 1999/05/12 06:00:00 d Exp $ + * $OpenBSD: uthread_create.c,v 1.10 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <stdlib.h> @@ -135,21 +135,26 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr, */ if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) { /* Copy the scheduling attributes: */ - new_thread->pthread_priority = _thread_run->pthread_priority; - new_thread->attr.prio = _thread_run->pthread_priority; - new_thread->attr.schedparam_policy = _thread_run->attr.schedparam_policy; + new_thread->base_priority = _thread_run->base_priority; + new_thread->attr.prio = _thread_run->base_priority; + new_thread->attr.sched_policy = _thread_run->attr.sched_policy; } else { /* * Use just the thread priority, leaving the * other scheduling attributes as their * default values: */ - new_thread->pthread_priority = new_thread->attr.prio; + new_thread->base_priority = new_thread->attr.prio; } + new_thread->active_priority = new_thread->base_priority; + new_thread->inherited_priority = 0; /* Initialise the join queue for the new thread: */ _thread_queue_init(&(new_thread->join_queue)); + /* Initialize the mutex queue: */ + TAILQ_INIT(&new_thread->mutexq); + /* Initialise hooks in the thread structure: */ new_thread->specific_data = NULL; new_thread->cleanup = NULL; @@ -173,6 +178,27 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr, /* Unlock the thread list: */ _unlock_thread_list(); + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + + if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) { + new_thread->state = PS_SUSPENDED; + PTHREAD_WAITQ_INSERT(new_thread); + } else { + new_thread->state = PS_RUNNING; + PTHREAD_PRIOQ_INSERT_TAIL(new_thread); + } + + /* + * Reenable preemption and yield if a scheduling + * signal occurred while in the critical region. + */ + _thread_kern_sched_undefer(); + /* Return a pointer to the thread structure: */ if (thread != NULL) (*thread) = new_thread; diff --git a/lib/libpthread/uthread/uthread_detach.c b/lib/libpthread/uthread/uthread_detach.c index d4fa639fef8..cb51ff0e43d 100644 --- a/lib/libpthread/uthread/uthread_detach.c +++ b/lib/libpthread/uthread/uthread_detach.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_detach.c,v 1.3 1999/01/06 05:29:23 d Exp $ + * $OpenBSD: uthread_detach.c,v 1.4 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -52,11 +52,24 @@ pthread_detach(pthread_t pthread) /* Flag the thread as detached: */ pthread->attr.flags |= PTHREAD_DETACHED; + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + /* Enter a loop to bring all threads off the join queue: */ while ((next_thread = _thread_queue_deq(&pthread->join_queue)) != NULL) { /* Make the thread run: */ PTHREAD_NEW_STATE(next_thread,PS_RUNNING); } + + /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } else /* Return an error: */ rval = EINVAL; diff --git a/lib/libpthread/uthread/uthread_execve.c b/lib/libpthread/uthread/uthread_execve.c index a223527d635..ba87ccf8d98 100644 --- a/lib/libpthread/uthread/uthread_execve.c +++ b/lib/libpthread/uthread/uthread_execve.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_execve.c,v 1.3 1999/01/06 05:29:23 d Exp $ + * $OpenBSD: uthread_execve.c,v 1.4 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <fcntl.h> @@ -53,7 +53,7 @@ execve(const char *name, char *const * argv, char *const * envp) itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; - setitimer(ITIMER_VIRTUAL, &itimer, NULL); + setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); diff --git a/lib/libpthread/uthread/uthread_exit.c b/lib/libpthread/uthread/uthread_exit.c index 780e99cb4e1..fdd0bdd4d7c 100644 --- a/lib/libpthread/uthread/uthread_exit.c +++ b/lib/libpthread/uthread/uthread_exit.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_exit.c,v 1.7 1999/01/06 05:29:23 d Exp $ + * $OpenBSD: uthread_exit.c,v 1.8 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <unistd.h> @@ -52,7 +52,7 @@ void _exit(int status) itimer.it_interval.tv_usec = 0; itimer.it_value.tv_sec = 0; itimer.it_value.tv_usec = 0; - setitimer(ITIMER_VIRTUAL, &itimer, NULL); + setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL); /* Close the pthread kernel pipe: */ _thread_sys_close(_thread_kern_pipe[0]); @@ -126,8 +126,8 @@ _thread_exit(const char *fname, int lineno, const char *string) /* Write a dump of the current thread status: */ _thread_dump_info(); - /* Force this process to exit: */ - _exit(1); + /* Try to dump a core file: */ + abort(); } void @@ -160,6 +160,13 @@ pthread_exit(void *status) /* Run the thread-specific data destructors: */ _thread_cleanupspecific(); } + + /* + * Guard against preemption by a scheduling signal. A change of + * thread state modifies the waiting and priority queues. + */ + _thread_kern_sched_defer(); + /* Check if there are any threads joined to this one: */ while ((pthread = _thread_queue_deq(&(_thread_run->join_queue))) != NULL) { /* Wake the joined thread and let it detach this thread: */ @@ -167,6 +174,12 @@ pthread_exit(void *status) } /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); + + /* * Lock the garbage collector mutex to ensure that the garbage * collector is not using the dead thread list. */ @@ -184,12 +197,18 @@ pthread_exit(void *status) if (pthread_cond_signal(&_gc_cond) != 0) PANIC("Cannot signal gc cond"); + /* + * Mark the thread as dead so it will not return if it + * gets context switched out when the mutex is unlocked. + */ + PTHREAD_SET_STATE(_thread_run, PS_DEAD); + /* Unlock the garbage collector mutex: */ if (pthread_mutex_unlock(&_gc_mutex) != 0) PANIC("Cannot lock gc mutex"); - /* This thread will never be re-scheduled. */ - _thread_kern_sched_state(PS_DEAD, __FILE__, __LINE__); + /* This this thread will never be re-scheduled. */ + _thread_kern_sched(NULL); /* This point should not be reached. */ PANIC("Dead thread has resumed"); diff --git a/lib/libpthread/uthread/uthread_fd.c b/lib/libpthread/uthread/uthread_fd.c index c46615b0270..0b51f75b737 100644 --- a/lib/libpthread/uthread/uthread_fd.c +++ b/lib/libpthread/uthread/uthread_fd.c @@ -29,8 +29,8 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: uthread_fd.c,v 1.9 1998/09/13 15:33:42 dt Exp $ - * $OpenBSD: uthread_fd.c,v 1.4 1999/01/10 23:09:36 d Exp $ + * $FreeBSD: uthread_fd.c,v 1.10 1999/03/23 05:07:55 jb Exp $ + * $OpenBSD: uthread_fd.c,v 1.5 1999/05/26 00:18:23 d Exp $ * */ #include <errno.h> @@ -200,7 +200,7 @@ _thread_fd_unlock(int fd, int lock_type) } else { /* * Set the state of the new owner of - * the thread to running: + * the thread to running: */ PTHREAD_NEW_STATE(_thread_fd_table[fd]->r_owner,PS_RUNNING); diff --git a/lib/libpthread/uthread/uthread_fork.c b/lib/libpthread/uthread/uthread_fork.c index 913e79e0bdd..9dc90825233 100644 --- a/lib/libpthread/uthread/uthread_fork.c +++ b/lib/libpthread/uthread/uthread_fork.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_fork.c,v 1.4 1999/01/17 23:46:26 d Exp $ + * $OpenBSD: uthread_fork.c,v 1.5 1999/05/26 00:18:23 d Exp $ */ #include <errno.h> #include <string.h> @@ -43,7 +43,7 @@ pid_t fork(void) { - int flags; + int i, flags; pid_t ret; pthread_t pthread; pthread_t pthread_next; @@ -95,6 +95,11 @@ fork(void) else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ abort(); + /* Initialize the ready queue: */ + } else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, + PTHREAD_MAX_PRIORITY) != 0) { + /* Abort this application: */ + PANIC("Cannot allocate priority ready queue."); } else { /* Point to the first thread in the list: */ pthread = _thread_link_list; @@ -126,6 +131,34 @@ fork(void) /* Point to the next thread: */ pthread = pthread_next; } + + /* Re-init the waiting queues. */ + TAILQ_INIT(&_waitingq); + + /* Initialize the scheduling switch hook routine: */ + _sched_switch_hook = NULL; + + /* Clear out any locks in the file descriptor table: */ + for (i = 0; i < _thread_dtablesize; i++) { + if (_thread_fd_table[i] != NULL) { + /* Initialise the file locks: */ + _SPINUNLOCK(&_thread_fd_table[i]->lock); + _thread_fd_table[i]->r_owner = NULL; + _thread_fd_table[i]->w_owner = NULL; + _thread_fd_table[i]->r_fname = NULL; + _thread_fd_table[i]->w_fname = NULL; + _thread_fd_table[i]->r_lineno = 0;; + _thread_fd_table[i]->w_lineno = 0;; + _thread_fd_table[i]->r_lockcount = 0;; + _thread_fd_table[i]->w_lockcount = 0;; + + /* Initialise the read/write queues: */ + _thread_queue_init(&_thread_fd_table[i]->r_queue); + _thread_queue_init(&_thread_fd_table[i]->w_queue); + } + } + + /* Initialise the atfork handler: */ _thread_atfork(PTHREAD_ATFORK_CHILD); } } diff --git a/lib/libpthread/uthread/uthread_gc.c b/lib/libpthread/uthread/uthread_gc.c index 56fbbb79e12..1a2790c74bd 100644 --- a/lib/libpthread/uthread/uthread_gc.c +++ b/lib/libpthread/uthread/uthread_gc.c @@ -30,7 +30,7 @@ * SUCH DAMAGE. * * $FreeBSD: uthread_gc.c,v 1.2 1998/09/30 19:17:51 dt Exp $ - * $OpenBSD: uthread_gc.c,v 1.4 1999/02/01 08:24:42 d Exp $ + * $OpenBSD: uthread_gc.c,v 1.5 1999/05/26 00:18:24 d Exp $ * * Garbage collector thread. Frees memory allocated for dead threads. * @@ -52,18 +52,20 @@ _thread_gc(pthread_addr_t arg) int f_debug; int f_done = 0; int ret; + sigset_t mask; pthread_t pthread; pthread_t pthread_cln; pthread_t pthread_nxt; pthread_t pthread_prv; struct timespec abstime; void *p_stack; - sigset_t ss; - /* Don't handle signals in this thread */ - sigfillset(&ss); - if (ret = pthread_sigmask(SIG_BLOCK, &ss, NULL)) - PANIC("Can't block signals in GC thread"); + /* Block all signals */ + sigfillset (&mask); + sigprocmask (SIG_BLOCK, &mask, NULL); + + /* Mark this thread as a library thread (not a user thread). */ + _thread_run->flags |= PTHREAD_FLAGS_PRIVATE; /* Set a debug flag based on an environment variable. */ f_debug = (getenv("LIBC_R_DEBUG") != NULL); diff --git a/lib/libpthread/uthread/uthread_getprio.c b/lib/libpthread/uthread/uthread_getprio.c index b304013efa8..074bff3e514 100644 --- a/lib/libpthread/uthread/uthread_getprio.c +++ b/lib/libpthread/uthread/uthread_getprio.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_getprio.c,v 1.3 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_getprio.c,v 1.2 1999/01/06 05:29:24 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -39,12 +39,11 @@ int pthread_getprio(pthread_t pthread) { - int ret; + int policy, ret; + struct sched_param param; - /* Find the thread in the list of active threads: */ - if ((ret = _find_thread(pthread)) == 0) - /* Get the thread priority: */ - ret = pthread->pthread_priority; + if ((ret = pthread_getschedparam(pthread, &policy, ¶m)) == 0) + ret = param.sched_priority; else { /* Invalid thread: */ errno = ret; diff --git a/lib/libpthread/uthread/uthread_getschedparam.c b/lib/libpthread/uthread/uthread_getschedparam.c new file mode 100644 index 00000000000..7905c1960f4 --- /dev/null +++ b/lib/libpthread/uthread/uthread_getschedparam.c @@ -0,0 +1,57 @@ +/* $OpenBSD: uthread_getschedparam.c,v 1.1 1999/05/26 00:18:24 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_getschedparam(pthread_t pthread, int *policy, struct sched_param *param) +{ + int ret; + + if ((param == NULL) || (policy == NULL)) + /* Return an invalid argument error: */ + ret = EINVAL; + + /* Find the thread in the list of active threads: */ + else if ((ret = _find_thread(pthread)) == 0) { + /* Return the threads base priority and scheduling policy: */ + param->sched_priority = pthread->base_priority; + *policy = pthread->attr.sched_policy; + } + + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_info.c b/lib/libpthread/uthread/uthread_info.c index 3e682752474..870a283701d 100644 --- a/lib/libpthread/uthread/uthread_info.c +++ b/lib/libpthread/uthread/uthread_info.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_info.c,v 1.7 1999/02/01 08:23:46 d Exp $ + * $OpenBSD: uthread_info.c,v 1.8 1999/05/26 00:18:24 d Exp $ */ #include <stdio.h> #include <fcntl.h> @@ -64,9 +64,11 @@ static const struct s_thread_info thread_info[] = { {PS_WAIT_WAIT , "wait_wait"}, {PS_SIGSUSPEND , "sigsuspend"}, {PS_SIGWAIT , "sigwait"}, + {PS_SPINBLOCK , "spinblock"}, {PS_JOIN , "join"}, {PS_SUSPENDED , "suspended"}, {PS_DEAD , "dead"}, + {PS_DEADLOCK , "deadlock"}, {PS_STATE_MAX , "xxx"} }; @@ -151,7 +153,7 @@ _thread_dump_info(void) (void *)pthread, (pthread == _thread_run) ? '*' : ' ', state, - pthread->pthread_priority, + pthread->base_priority, (pthread->flags & PTHREAD_EXITING) ? 'E' : (pthread->flags & PTHREAD_CANCELLING) ? 'C' : (pthread->flags & PTHREAD_AT_CANCEL_POINT) ? 'c' : ' ', @@ -207,7 +209,7 @@ _thread_dump_info(void) snprintf(s, sizeof(s), "%s owner %p\n", info_lead, - (*pthread->data.mutex)->m_owner); + NULL /* (*pthread->data.mutex)->m_owner*/); _thread_sys_write(fd, s, strlen(s)); } break; diff --git a/lib/libpthread/uthread/uthread_init.c b/lib/libpthread/uthread/uthread_init.c index 0cd5bd376c4..281808e75f4 100644 --- a/lib/libpthread/uthread/uthread_init.c +++ b/lib/libpthread/uthread/uthread_init.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_init.c,v 1.9 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_init.c,v 1.8 1999/01/17 23:57:27 d Exp $ */ #include <errno.h> @@ -54,6 +54,7 @@ static struct pthread kern_thread; struct pthread * volatile _thread_kern_threadp = &kern_thread; struct pthread * volatile _thread_run = &kern_thread; +struct pthread * volatile _last_user_thread = &kern_thread; struct pthread * volatile _thread_single = NULL; struct pthread * volatile _thread_link_list = NULL; int _thread_kern_pipe[2] = { -1, -1 }; @@ -63,7 +64,9 @@ struct timeval kern_inc_prio_time = { 0, 0 }; struct pthread * volatile _thread_dead = NULL; struct pthread * _thread_initial = NULL; struct pthread_attr pthread_attr_default = { - SCHED_RR, /* schedparam_policy */ + SCHED_RR, /* sched_policy */ + 0, /* sched_inherit */ + TIMESLICE_USEC, /* sched_interval */ PTHREAD_DEFAULT_PRIORITY, /* prio */ PTHREAD_CREATE_RUNNING, /* suspend */ PTHREAD_CREATE_JOINABLE, /* flags */ @@ -73,7 +76,9 @@ struct pthread_attr pthread_attr_default = { PTHREAD_STACK_DEFAULT /* stacksize_attr */ }; struct pthread_mutex_attr pthread_mutexattr_default = { - MUTEX_TYPE_FAST, /* m_type */ + PTHREAD_MUTEX_DEFAULT, /* m_type */ + PTHREAD_PRIO_NONE, /* m_protocol */ + 0, /* m_ceiling */ 0 /* m_flags */ }; struct pthread_cond_attr pthread_condattr_default = { @@ -87,6 +92,12 @@ pthread_mutex_t _gc_mutex = NULL; pthread_cond_t _gc_cond = NULL; struct sigaction _thread_sigact[NSIG]; +const int dtablecount = 4096/sizeof(struct fd_table_entry); +pq_queue_t _readyq; +_waitingq_t _waitingq; +volatile int _waitingq_check_reqd = 0; +pthread_switch_routine_t _sched_switch_hook = NULL; + /* Automatic init module. */ extern int _thread_autoinit_dummy_decl; @@ -189,7 +200,13 @@ _thread_init(void) /* Make the write pipe non-blocking: */ else if (_thread_sys_fcntl(_thread_kern_pipe[1], F_SETFL, flags | O_NONBLOCK) == -1) { /* Abort this application: */ - PANIC("Cannot make kernel write pipe non-blocking"); + PANIC("Cannot get kernel write pipe flags"); + } + /* Initialize the ready queue: */ + else if (_pq_init(&_readyq, PTHREAD_MIN_PRIORITY, PTHREAD_MAX_PRIORITY) +!= 0) { + /* Abort this application: */ + PANIC("Cannot allocate priority ready queue."); } /* Allocate memory for the thread structure of the initial thread: */ else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) { @@ -202,13 +219,32 @@ _thread_init(void) /* Zero the global kernel thread structure: */ memset(_thread_kern_threadp, 0, sizeof(struct pthread)); _thread_kern_threadp->magic = PTHREAD_MAGIC; + + /* Set the kernel's name for the debugger: */ pthread_set_name_np(_thread_kern_threadp, "kern"); + /* The kernel thread is a library thread: */ + _thread_kern_threadp->flags = PTHREAD_FLAGS_PRIVATE; + + /* Initialize the waiting queue: */ + TAILQ_INIT(&_waitingq); + + /* Initialize the scheduling switch hook routine: */ + _sched_switch_hook = NULL; + /* Zero the initial thread: */ memset(_thread_initial, 0, sizeof(struct pthread)); + /* + * Write a magic value to the thread structure + * to help identify valid ones: + */ + _thread_initial->magic = PTHREAD_MAGIC; + /* Default the priority of the initial thread: */ - _thread_initial->pthread_priority = PTHREAD_DEFAULT_PRIORITY; + _thread_initial->base_priority = PTHREAD_DEFAULT_PRIORITY; + _thread_initial->active_priority = PTHREAD_DEFAULT_PRIORITY; + _thread_initial->inherited_priority = 0; /* Initialise the state of the initial thread: */ _thread_initial->state = PS_RUNNING; @@ -216,7 +252,13 @@ _thread_init(void) /* Initialise the queue: */ _thread_queue_init(&(_thread_initial->join_queue)); + /* Initialize the owned mutex queue and count: */ + TAILQ_INIT(&(_thread_initial->mutexq)); + _thread_initial->priority_mutex_count = 0; + /* Initialise the rest of the fields: */ + _thread_initial->sched_defer_count = 0; + _thread_initial->yield_on_sched_undefer = 0; _thread_initial->specific_data = NULL; _thread_initial->cleanup = NULL; _thread_initial->queue = NULL; @@ -226,7 +268,6 @@ _thread_init(void) _thread_initial->error = 0; _thread_initial->cancelstate = PTHREAD_CANCEL_ENABLE; _thread_initial->canceltype = PTHREAD_CANCEL_DEFERRED; - _thread_initial->magic = PTHREAD_MAGIC; pthread_set_name_np(_thread_initial, "init"); _SPINUNLOCK(&_thread_initial->lock); _thread_link_list = _thread_initial; @@ -259,9 +300,9 @@ _thread_init(void) * signals that the user-thread kernel needs. Actually * SIGINFO isn't really needed, but it is nice to have. */ - if (_thread_sys_sigaction(SIGVTALRM, &act, NULL) != 0 || - _thread_sys_sigaction(SIGINFO , &act, NULL) != 0 || - _thread_sys_sigaction(SIGCHLD , &act, NULL) != 0) { + if (_thread_sys_sigaction(_SCHED_SIGNAL, &act, NULL) != 0 || + _thread_sys_sigaction(SIGINFO, &act, NULL) != 0 || + _thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) { /* * Abort this process if signal initialisation fails: */ @@ -309,6 +350,8 @@ _thread_init(void) pthread_cond_init(&_gc_cond,NULL) != 0) PANIC("Failed to initialise garbage collector mutex or condvar"); + gettimeofday(&kern_inc_prio_time, NULL); + /* Pull in automatic thread unit. */ _thread_autoinit_dummy_decl = 1; diff --git a/lib/libpthread/uthread/uthread_join.c b/lib/libpthread/uthread/uthread_join.c index 52baee31c33..ab7b7860ecc 100644 --- a/lib/libpthread/uthread/uthread_join.c +++ b/lib/libpthread/uthread/uthread_join.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_join.c,v 1.3 1999/01/17 23:57:27 d Exp $ + * $OpenBSD: uthread_join.c,v 1.4 1999/05/26 00:18:24 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -42,6 +42,7 @@ pthread_join(pthread_t pthread, void **thread_return) int ret = 0; pthread_t pthread1 = NULL; + /* This operation is a cancel point: */ _thread_enter_cancellation_point(); /* Check if the caller has specified an invalid thread: */ @@ -99,6 +100,7 @@ pthread_join(pthread_t pthread, void **thread_return) /* Return the thread's return value: */ *thread_return = pthread->ret; + /* This operation was a cancel point: */ _thread_leave_cancellation_point(); /* Return the completion status: */ diff --git a/lib/libpthread/uthread/uthread_kern.c b/lib/libpthread/uthread/uthread_kern.c index 88fa55e156c..78223f4ad10 100644 --- a/lib/libpthread/uthread/uthread_kern.c +++ b/lib/libpthread/uthread/uthread_kern.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_kern.c,v 1.8 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,8 +30,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: uthread_kern.c,v 1.15 1998/11/15 09:58:26 jb Exp $ - * $OpenBSD: uthread_kern.c,v 1.7 1999/02/01 08:23:46 d Exp $ + * $FreeBSD: uthread_kern.c,v 1.18 1999/05/08 07:50:05 jasone Exp $ * */ #include <errno.h> @@ -43,7 +43,9 @@ #include <sys/stat.h> #include <sys/time.h> #include <sys/socket.h> +#ifdef _THREAD_RUSAGE #include <sys/resource.h> +#endif #include <sys/uio.h> #include <sys/syscall.h> #include <fcntl.h> @@ -55,13 +57,15 @@ static void _thread_kern_select(int wait_reqd); +static inline void +thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in); + void _thread_kern_sched(struct sigcontext * scp) { - int prio = -1; pthread_t pthread; pthread_t pthread_h = NULL; - pthread_t pthread_s = NULL; + pthread_t last_thread = NULL; struct itimerval itimer; struct timespec ts; struct timespec ts1; @@ -103,6 +107,11 @@ _thread_kern_sched(struct sigcontext * scp) */ _thread_kern_in_sched = 0; + if (_sched_switch_hook != NULL) { + /* Run the installed switch hook: */ + thread_run_switch_hook(_last_user_thread, _thread_run); + } + if (!(_thread_run->flags & PTHREAD_AT_CANCEL_POINT) && (_thread_run->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)) { /* @@ -122,11 +131,16 @@ _thread_kern_sched(struct sigcontext * scp) * dispatch any that aren't blocked: */ _dispatch_signals(); + return; } else /* Flag the jump buffer was the last state saved: */ _thread_run->sig_saved = 0; + /* If the currently running thread is a user thread, save it: */ + if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0) + _last_user_thread = _thread_run; + /* Save errno. */ _thread_run->error = errno; @@ -143,7 +157,7 @@ _thread_kern_sched(struct sigcontext * scp) #endif /* _THREAD_RUSAGE */ /* - * Enter a the scheduling loop that finds the next thread that is + * Enter a scheduling loop that finds the next thread that is * ready to run. This loop completes when there are no more threads * in the global list or when a thread has its state restored by * either a sigreturn (if the state was saved as a sigcontext) or a @@ -161,12 +175,48 @@ _thread_kern_sched(struct sigcontext * scp) _thread_kern_select(0); /* - * Enter a loop to look for sleeping threads that are ready: + * Define the maximum time before a scheduling signal + * is required: + */ + itimer.it_value.tv_sec = 0; + itimer.it_value.tv_usec = TIMESLICE_USEC; + + /* + * The interval timer is not reloaded when it + * times out. The interval time needs to be + * calculated every time. + */ + itimer.it_interval.tv_sec = 0; + itimer.it_interval.tv_usec = 0; + + /* + * Enter a loop to look for sleeping threads that are ready + * or timedout. While we're at it, also find the smallest + * timeout value for threads waiting for a time. */ - for (pthread = _thread_link_list; pthread != NULL; - pthread = pthread->nxt) { + _waitingq_check_reqd = 0; /* reset flag before loop */ + TAILQ_FOREACH(pthread, &_waitingq, pqe) { + /* Check if this thread is ready: */ + if (pthread->state == PS_RUNNING) { + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + + /* + * Check if this thread is blocked by an + * atomic lock: + */ + else if (pthread->state == PS_SPINBLOCK) { + /* + * If the lock is available, let + * the thread run. + */ + if (pthread->data.spinlock->access_lock == 0) { + PTHREAD_NEW_STATE(pthread,PS_RUNNING); + } + /* Check if this thread is to timeout: */ - if (pthread->state == PS_COND_WAIT || + } else if (pthread->state == PS_COND_WAIT || pthread->state == PS_SLEEP_WAIT || pthread->state == PS_FDR_WAIT || pthread->state == PS_FDW_WAIT || @@ -190,9 +240,9 @@ _thread_kern_sched(struct sigcontext * scp) */ if (pthread->state == PS_SELECT_WAIT) { /* - * The select has timed out, - * so zero the file - * descriptor sets: + * The select has timed out, so + * zero the file descriptor + * sets: */ FD_ZERO(&pthread->data.select_data->readfds); FD_ZERO(&pthread->data.select_data->writefds); @@ -216,13 +266,72 @@ _thread_kern_sched(struct sigcontext * scp) * it to be restarted: */ PTHREAD_NEW_STATE(pthread,PS_RUNNING); + } else { + /* + * Calculate the time until this thread + * is ready, allowing for the clock + * resolution: + */ + ts1.tv_sec = pthread->wakeup_time.tv_sec + - ts.tv_sec; + ts1.tv_nsec = pthread->wakeup_time.tv_nsec + - ts.tv_nsec + CLOCK_RES_NSEC; + + /* + * Check for underflow of the + * nanosecond field: + */ + if (ts1.tv_nsec < 0) { + /* + * Allow for the underflow + * of the nanosecond field: + */ + ts1.tv_sec--; + ts1.tv_nsec += 1000000000; + } + /* + * Check for overflow of the nanosecond + * field: + */ + if (ts1.tv_nsec >= 1000000000) { + /* + * Allow for the overflow of + * the nanosecond field: + */ + ts1.tv_sec++; + ts1.tv_nsec -= 1000000000; + } + /* + * Convert the timespec structure + * to a timeval structure: + */ + TIMESPEC_TO_TIMEVAL(&tv1, &ts1); + + /* + * Check if the thread will be ready + * sooner than the earliest ones found + * so far: + */ + if (timercmp(&tv1, &itimer.it_value, <)) { + /* + * Update the time value: + */ + itimer.it_value.tv_sec = tv1.tv_sec; + itimer.it_value.tv_usec = tv1.tv_usec; + } } + } } /* Check if there is a current thread: */ if (_thread_run != _thread_kern_threadp) { /* + * This thread no longer needs to yield the CPU. + */ + _thread_run->yield_on_sched_undefer = 0; + + /* * Save the current time as the time that the thread * became inactive: */ @@ -231,202 +340,64 @@ _thread_kern_sched(struct sigcontext * scp) /* * Accumulate the number of microseconds that this - * thread has run for: + * thread has run for: */ - if (_thread_run->slice_usec != -1) { - if (timerisset(&_thread_run->last_active)) { - struct timeval s; - - timersub(&_thread_run->last_inactive, - &_thread_run->last_active, - &s); - _thread_run->slice_usec = - s.tv_usec + 1000000 * s.tv_sec; - if (_thread_run->slice_usec < 0) - PANIC("slice_usec"); - } else + if ((_thread_run->slice_usec != -1) && + (_thread_run->attr.sched_policy != SCHED_FIFO)) { + _thread_run->slice_usec += + (_thread_run->last_inactive.tv_sec - + _thread_run->last_active.tv_sec) * 1000000 + + _thread_run->last_inactive.tv_usec - + _thread_run->last_active.tv_usec; + + /* Check for time quantum exceeded: */ + if (_thread_run->slice_usec > TIMESLICE_USEC) _thread_run->slice_usec = -1; - } - - /* - * Check if this thread has reached its allocated - * time slice period: - */ - if (_thread_run->slice_usec > TIMESLICE_USEC) { - /* - * Flag the allocated time slice period as - * up: - */ - _thread_run->slice_usec = -1; } - } - /* Check if an incremental priority update is required: */ - if (((tv.tv_sec - kern_inc_prio_time.tv_sec) * 1000000 + - tv.tv_usec - kern_inc_prio_time.tv_usec) > INC_PRIO_USEC) { - /* - * Enter a loop to look for run-enabled threads that - * have not run since the last time that an - * incremental priority update was performed: - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* Check if this thread is unable to run: */ - if (pthread->state != PS_RUNNING) { - } - /* - * Check if the last time that this thread - * was run (as indicated by the last time it - * became inactive) is before the time that - * the last incremental priority check was - * made: - */ - else if (timercmp(&pthread->last_inactive, &kern_inc_prio_time, <)) { + if (_thread_run->state == PS_RUNNING) { + if (_thread_run->slice_usec == -1) { /* - * Increment the incremental priority - * for this thread in the hope that - * it will eventually get a chance to - * run: + * The thread exceeded its time + * quantum or it yielded the CPU; + * place it at the tail of the + * queue for its priority. */ - (pthread->inc_prio)++; + PTHREAD_PRIOQ_INSERT_TAIL(_thread_run); + } else { + /* + * The thread hasn't exceeded its + * interval. Place it at the head + * of the queue for its priority. + */ + PTHREAD_PRIOQ_INSERT_HEAD(_thread_run); } } - - /* Save the new incremental priority update time: */ - kern_inc_prio_time.tv_sec = tv.tv_sec; - kern_inc_prio_time.tv_usec = tv.tv_usec; - } - /* - * Enter a loop to look for the first thread of the highest - * priority that is ready to run: - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* Check if the current thread is unable to run: */ - if (pthread->state != PS_RUNNING) { - } - /* - * Check if no run-enabled thread has been seen or if - * the current thread has a priority higher than the - * highest seen so far: - */ - else if (pthread_h == NULL || (pthread->pthread_priority + pthread->inc_prio) > prio) { - /* - * Save this thread as the highest priority - * thread seen so far: - */ - pthread_h = pthread; - prio = pthread->pthread_priority + pthread->inc_prio; - } - } - - /* - * Enter a loop to look for a thread that: 1. Is run-enabled. - * 2. Has the required agregate priority. 3. Has not been - * allocated its allocated time slice. 4. Became inactive - * least recently. - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* Check if the current thread is unable to run: */ - if (pthread->state != PS_RUNNING) { - /* Ignore threads that are not ready to run. */ - } - - /* - * Check if the current thread as an agregate - * priority not equal to the highest priority found - * above: - */ - else if ((pthread->pthread_priority + pthread->inc_prio) != prio) { + else if (_thread_run->state == PS_DEAD) { /* - * Ignore threads which have lower agregate - * priority. + * Don't add dead threads to the waiting + * queue, because when they're reaped, it + * will corrupt the queue. */ } - - /* - * Check if the current thread reached its time slice - * allocation last time it ran (or if it has not run - * yet): - */ - else if (pthread->slice_usec == -1) { - } - - /* - * Check if an eligible thread has not been found - * yet, or if the current thread has an inactive time - * earlier than the last one seen: - */ - else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) { + else { /* - * Save the pointer to the current thread as - * the most eligible thread seen so far: + * This thread has changed state and needs + * to be placed in the waiting queue. */ - pthread_s = pthread; + PTHREAD_WAITQ_INSERT(_thread_run); - /* - * Save the time that the selected thread - * became inactive: - */ - tv1.tv_sec = pthread->last_inactive.tv_sec; - tv1.tv_usec = pthread->last_inactive.tv_usec; + /* Restart the time slice: */ + _thread_run->slice_usec = -1; } } /* - * Check if no thread was selected according to incomplete - * time slice allocation: + * Get the highest priority thread in the ready queue. */ - if (pthread_s == NULL) { - /* - * Enter a loop to look for any other thread that: 1. - * Is run-enabled. 2. Has the required agregate - * priority. 3. Became inactive least recently. - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* - * Check if the current thread is unable to - * run: - */ - if (pthread->state != PS_RUNNING) { - /* - * Ignore threads that are not ready - * to run. - */ - } - /* - * Check if the current thread as an agregate - * priority not equal to the highest priority - * found above: - */ - else if ((pthread->pthread_priority + pthread->inc_prio) != prio) { - /* - * Ignore threads which have lower - * agregate priority. - */ - } - /* - * Check if an eligible thread has not been - * found yet, or if the current thread has an - * inactive time earlier than the last one - * seen: - */ - else if (pthread_s == NULL || timercmp(&pthread->last_inactive, &tv1, <)) { - /* - * Save the pointer to the current - * thread as the most eligible thread - * seen so far: - */ - pthread_s = pthread; + pthread_h = PTHREAD_PRIOQ_FIRST; - /* - * Save the time that the selected - * thread became inactive: - */ - tv1.tv_sec = pthread->last_inactive.tv_sec; - tv1.tv_usec = pthread->last_inactive.tv_usec; - } - } - } /* Check if there are no threads ready to run: */ - if (pthread_s == NULL) { + if (pthread_h == NULL) { /* * Lock the pthread kernel by changing the pointer to * the running thread to point to the global kernel @@ -441,7 +412,10 @@ _thread_kern_sched(struct sigcontext * scp) _thread_kern_select(1); } else { /* Make the selected thread the current thread: */ - _thread_run = pthread_s; + _thread_run = pthread_h; + + /* Remove the thread from the ready queue. */ + PTHREAD_PRIOQ_REMOVE(_thread_run); /* * Save the current time as the time that the thread @@ -459,116 +433,19 @@ _thread_kern_sched(struct sigcontext * scp) /* Reset the accumulated time slice period: */ _thread_run->slice_usec = 0; } - /* - * Reset the incremental priority now that this - * thread has been given the chance to run: - */ - _thread_run->inc_prio = 0; /* Check if there is more than one thread: */ if (_thread_run != _thread_link_list || _thread_run->nxt != NULL) { /* - * Define the maximum time before a SIGVTALRM - * is required: - */ - itimer.it_value.tv_sec = 0; - itimer.it_value.tv_usec = TIMESLICE_USEC; - - /* - * The interval timer is not reloaded when it - * times out. The interval time needs to be - * calculated every time. - */ - timerclear(&itimer.it_interval); - - /* - * Enter a loop to look for threads waiting - * for a time: - */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { - /* - * Check if this thread is to - * timeout: - */ - if (pthread->state == PS_COND_WAIT || - pthread->state == PS_SLEEP_WAIT || - pthread->state == PS_FDR_WAIT || - pthread->state == PS_FDW_WAIT || - pthread->state == PS_SELECT_WAIT) { - /* - * Check if this thread is to - * wait forever: - */ - if (pthread->wakeup_time.tv_sec == -1) { - } - /* - * Check if this thread is to - * wakeup immediately: - */ - else if (pthread->wakeup_time.tv_sec == 0 && - pthread->wakeup_time.tv_nsec == 0) { - } - /* - * Check if the current time - * is after the wakeup time: - */ - else if (timespeccmp(&ts, - &pthread->wakeup_time, > )){ - } else { - /* - * Calculate the time - * until this thread - * is ready, allowing - * for the clock - * resolution: - */ - struct timespec - clock_res - = {0,CLOCK_RES_NSEC}; - timespecsub( - &pthread->wakeup_time, - &ts, &ts1); - timespecadd( - &ts1, &clock_res, - &ts1); - /* - * Convert the - * timespec structure - * to a timeval - * structure: - */ - TIMESPEC_TO_TIMEVAL(&tv, &ts1); - - /* - * Check if the - * thread will be - * ready sooner than - * the earliest one - * found so far: - */ - if (timercmp(&tv, &itimer.it_value, <)) { - /* - * Update the - * time - * value: - */ - itimer.it_value.tv_sec = tv.tv_sec; - itimer.it_value.tv_usec = tv.tv_usec; - } - } - } - } - - /* * Start the interval timer for the * calculated time interval: */ - if (setitimer(ITIMER_VIRTUAL, &itimer, NULL) != 0) { + if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) { /* * Cannot initialise the timer, so * abort this process: */ - PANIC("Cannot set virtual timer"); + PANIC("Cannot set scheduling timer"); } } @@ -585,7 +462,17 @@ _thread_kern_sched(struct sigcontext * scp) * Do a sigreturn to restart the thread that * was interrupted by a signal: */ - _thread_kern_in_sched = 0; + _thread_kern_in_sched = 0; + + /* + * If we had a context switch, run any + * installed switch hooks. + */ + if ((_sched_switch_hook != NULL) && + (_last_user_thread != _thread_run)) { + thread_run_switch_hook(_last_user_thread, + _thread_run); + } _thread_sys_sigreturn(&_thread_run->saved_sigcontext); } else /* @@ -680,7 +567,8 @@ _thread_kern_select(int wait_reqd) * Enter a loop to process threads waiting on either file descriptors * or times: */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { + _waitingq_check_reqd = 0; /* reset flag before loop */ + TAILQ_FOREACH (pthread, &_waitingq, pqe) { /* Assume that this state does not time out: */ settimeout = 0; @@ -691,12 +579,12 @@ _thread_kern_select(int wait_reqd) * operations or timeouts: */ case PS_DEAD: + case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: case PS_JOIN: case PS_MUTEX_WAIT: - case PS_RUNNING: case PS_SIGTHREAD: case PS_SIGWAIT: case PS_STATE_MAX: @@ -706,6 +594,16 @@ _thread_kern_select(int wait_reqd) /* Nothing to do here. */ break; + case PS_RUNNING: + /* + * A signal occurred and made this thread ready + * while in the scheduler or while the scheduling + * queues were protected. + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + break; + /* File descriptor read wait: */ case PS_FDR_WAIT: /* Add the file descriptor to the read set: */ @@ -1012,16 +910,16 @@ _thread_kern_select(int wait_reqd) * descriptors that are flagged as available by the * _select syscall: */ - for (pthread = _thread_link_list; pthread != NULL; pthread = pthread->nxt) { + TAILQ_FOREACH (pthread, &_waitingq, pqe) { /* Process according to thread state: */ switch (pthread->state) { /* * States which do not depend on file * descriptor I/O operations: */ - case PS_RUNNING: case PS_COND_WAIT: case PS_DEAD: + case PS_DEADLOCK: case PS_FDLR_WAIT: case PS_FDLW_WAIT: case PS_FILE_WAIT: @@ -1037,6 +935,15 @@ _thread_kern_select(int wait_reqd) /* Nothing to do here. */ break; + case PS_RUNNING: + /* + * A signal occurred and made this thread + * ready while in the scheduler. + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + break; + /* File descriptor read wait: */ case PS_FDR_WAIT: /* @@ -1050,6 +957,13 @@ _thread_kern_select(int wait_reqd) * is scheduled next: */ pthread->state = PS_RUNNING; + + /* + * Remove it from the waiting queue + * and add it to the ready queue: + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); } break; @@ -1066,6 +980,13 @@ _thread_kern_select(int wait_reqd) * scheduled next: */ pthread->state = PS_RUNNING; + + /* + * Remove it from the waiting queue + * and add it to the ready queue: + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); } break; @@ -1272,6 +1193,13 @@ _thread_kern_select(int wait_reqd) * thread to run: */ pthread->state = PS_RUNNING; + + /* + * Remove it from the waiting queue + * and add it to the ready queue: + */ + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); } break; } @@ -1323,4 +1251,80 @@ _thread_kern_set_timeout(struct timespec * timeout) } return; } + +void +_thread_kern_sched_defer(void) +{ + /* Allow scheduling deferral to be recursive. */ + _thread_run->sched_defer_count++; +} + +void +_thread_kern_sched_undefer(void) +{ + pthread_t pthread; + int need_resched = 0; + + /* + * Perform checks to yield only if we are about to undefer + * scheduling. + */ + if (_thread_run->sched_defer_count == 1) { + /* + * Check if the waiting queue needs to be examined for + * threads that are now ready: + */ + while (_waitingq_check_reqd != 0) { + /* Clear the flag before checking the waiting queue: */ + _waitingq_check_reqd = 0; + + TAILQ_FOREACH(pthread, &_waitingq, pqe) { + if (pthread->state == PS_RUNNING) { + PTHREAD_WAITQ_REMOVE(pthread); + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + } + } + + /* + * We need to yield if a thread change of state caused a + * higher priority thread to become ready, or if a + * scheduling signal occurred while preemption was disabled. + */ + if ((((pthread = PTHREAD_PRIOQ_FIRST) != NULL) && + (pthread->active_priority > _thread_run->active_priority)) || + (_thread_run->yield_on_sched_undefer != 0)) { + _thread_run->yield_on_sched_undefer = 0; + need_resched = 1; + } + } + + if (_thread_run->sched_defer_count > 0) { + /* Decrement the scheduling deferral count. */ + _thread_run->sched_defer_count--; + + /* Yield the CPU if necessary: */ + if (need_resched) + _thread_kern_sched(NULL); + } +} + +static inline void +thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in) +{ + pthread_t tid_out = thread_out; + pthread_t tid_in = thread_in; + + if ((tid_out != NULL) && + (tid_out->flags & PTHREAD_FLAGS_PRIVATE != 0)) + tid_out = NULL; + if ((tid_in != NULL) && + (tid_in->flags & PTHREAD_FLAGS_PRIVATE != 0)) + tid_in = NULL; + + if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) { + /* Run the scheduler switch hook: */ + _sched_switch_hook(tid_out, tid_in); + } +} #endif diff --git a/lib/libpthread/uthread/uthread_kill.c b/lib/libpthread/uthread/uthread_kill.c index 132dea74464..dc698ff1660 100644 --- a/lib/libpthread/uthread/uthread_kill.c +++ b/lib/libpthread/uthread/uthread_kill.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_kill.c,v 1.6 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_kill.c,v 1.5 1999/01/06 05:29:24 d Exp $ */ #include <errno.h> #include <signal.h> @@ -53,6 +53,13 @@ pthread_kill(pthread_t pthread, int sig) /* Find the thread in the list of active threads: */ else if ((ret = _find_thread(pthread)) == 0) { + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + switch (pthread->state) { case PS_SIGSUSPEND: /* @@ -109,6 +116,12 @@ pthread_kill(pthread_t pthread, int sig) sigaddset(&pthread->sigpend,sig); break; } + + /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ diff --git a/lib/libpthread/uthread/uthread_mattr_init.c b/lib/libpthread/uthread/uthread_mattr_init.c index d24958b111e..65b79e9539f 100644 --- a/lib/libpthread/uthread/uthread_mattr_init.c +++ b/lib/libpthread/uthread/uthread_mattr_init.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_mattr_init.c,v 1.3 1999/05/26 00:18:24 d Exp $ */ /* * Copyright (c) 1996 Jeffrey Hsu <hsu@freebsd.org>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_mattr_init.c,v 1.2 1999/01/06 05:29:24 d Exp $ */ #include <string.h> #include <stdlib.h> diff --git a/lib/libpthread/uthread/uthread_mattr_kind_np.c b/lib/libpthread/uthread/uthread_mattr_kind_np.c index 76311dfec2e..5f5d1b3a992 100644 --- a/lib/libpthread/uthread/uthread_mattr_kind_np.c +++ b/lib/libpthread/uthread/uthread_mattr_kind_np.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_mattr_kind_np.c,v 1.3 1999/01/06 05:29:24 d Exp $ + * $OpenBSD: uthread_mattr_kind_np.c,v 1.4 1999/05/26 00:18:25 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -68,8 +68,7 @@ pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type) { int ret; if (attr == NULL || *attr == NULL || type >= MUTEX_TYPE_MAX) { - errno = EINVAL; - ret = -1; + return EINVAL; } else { (*attr)->m_type = type; ret = 0; diff --git a/lib/libpthread/uthread/uthread_mutex.c b/lib/libpthread/uthread/uthread_mutex.c index 74127fde790..1968c953a27 100644 --- a/lib/libpthread/uthread/uthread_mutex.c +++ b/lib/libpthread/uthread/uthread_mutex.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_mutex.c,v 1.7 1999/05/26 00:18:25 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,83 +30,120 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_mutex.c,v 1.6 1999/01/06 05:29:25 d Exp $ - * */ #include <stdlib.h> #include <errno.h> #include <string.h> +#include <sys/param.h> +#include <sys/queue.h> #ifdef _THREAD_SAFE #include <pthread.h> #include "pthread_private.h" + +/* + * Prototypes + */ +static inline int mutex_self_trylock(pthread_mutex_t); +static inline int mutex_self_lock(pthread_mutex_t); +static inline int mutex_unlock_common(pthread_mutex_t *, int); +static void mutex_priority_adjust(pthread_mutex_t); +static void mutex_rescan_owned (pthread_t, pthread_mutex_t); +static inline pthread_t mutex_queue_deq(pthread_mutex_t); +static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); +static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); + + static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; + int pthread_mutex_init(pthread_mutex_t * mutex, const pthread_mutexattr_t * mutex_attr) { - enum pthread_mutextype type; + enum pthread_mutextype type; + int protocol; + int ceiling; pthread_mutex_t pmutex; int ret = 0; - if (mutex == NULL) { + if (mutex == NULL) ret = EINVAL; - } else { - /* Check if default mutex attributes: */ - if (mutex_attr == NULL || *mutex_attr == NULL) - /* Default to a fast mutex: */ - type = PTHREAD_MUTEX_DEFAULT; - else if ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX) - /* Return an invalid argument error: */ - ret = EINVAL; - else - /* Use the requested mutex type: */ - type = (*mutex_attr)->m_type; - - /* Check no errors so far: */ - if (ret == 0) { - if ((pmutex = (pthread_mutex_t) - malloc(sizeof(struct pthread_mutex))) == NULL) - ret = ENOMEM; - else { - /* Reset the mutex flags: */ - pmutex->m_flags = 0; - - /* Process according to mutex type: */ - switch (type) { - /* Fast mutex: */ - case PTHREAD_MUTEX_DEFAULT: - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_ERRORCHECK: - /* Nothing to do here. */ - break; - - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* Reset the mutex count: */ - pmutex->m_data.m_count = 0; - break; - - /* Trap invalid mutex types: */ - default: - /* Return an invalid argument error: */ - ret = EINVAL; - break; - } - if (ret == 0) { - /* Initialise the rest of the mutex: */ - _thread_queue_init(&pmutex->m_queue); - pmutex->m_flags |= MUTEX_FLAGS_INITED; - pmutex->m_owner = NULL; - pmutex->m_type = type; - _SPINUNLOCK(&pmutex->lock); - *mutex = pmutex; - } else { - free(pmutex); - *mutex = NULL; - } + /* Check if default mutex attributes: */ + else if (mutex_attr == NULL || *mutex_attr == NULL) { + /* Default to a (error checking) POSIX mutex: */ + type = PTHREAD_MUTEX_ERRORCHECK; + protocol = PTHREAD_PRIO_NONE; + ceiling = PTHREAD_MAX_PRIORITY; + } + + /* Check mutex type: */ + else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || + ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) + /* Return an invalid argument error: */ + ret = EINVAL; + + /* Check mutex protocol: */ + else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || + ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) + /* Return an invalid argument error: */ + ret = EINVAL; + + else { + /* Use the requested mutex type and protocol: */ + type = (*mutex_attr)->m_type; + protocol = (*mutex_attr)->m_protocol; + ceiling = (*mutex_attr)->m_ceiling; + } + + /* Check no errors so far: */ + if (ret == 0) { + if ((pmutex = (pthread_mutex_t) + malloc(sizeof(struct pthread_mutex))) == NULL) + ret = ENOMEM; + else { + /* Reset the mutex flags: */ + pmutex->m_flags = 0; + + /* Process according to mutex type: */ + switch (type) { + /* case PTHREAD_MUTEX_DEFAULT: */ + case PTHREAD_MUTEX_ERRORCHECK: + case PTHREAD_MUTEX_NORMAL: + /* Nothing to do here. */ + break; + + /* Single UNIX Spec 2 recursive mutex: */ + case PTHREAD_MUTEX_RECURSIVE: + /* Reset the mutex count: */ + pmutex->m_data.m_count = 0; + break; + + /* Trap invalid mutex types: */ + default: + /* Return an invalid argument error: */ + ret = EINVAL; + break; + } + if (ret == 0) { + /* Initialise the rest of the mutex: */ + TAILQ_INIT(&pmutex->m_queue); + pmutex->m_flags |= MUTEX_FLAGS_INITED; + pmutex->m_owner = NULL; + pmutex->m_type = type; + pmutex->m_protocol = protocol; + pmutex->m_refcount = 0; + if (protocol == PTHREAD_PRIO_PROTECT) + pmutex->m_prio = ceiling; + else + pmutex->m_prio = 0; + pmutex->m_saved_prio = 0; + _SPINUNLOCK(&pmutex->lock); + *mutex = pmutex; + } else { + free(pmutex); + *mutex = NULL; } } } @@ -125,16 +163,29 @@ pthread_mutex_destroy(pthread_mutex_t * mutex) _SPINLOCK(&(*mutex)->lock); /* - * Free the memory allocated for the mutex - * structure: + * Check to see if this mutex is in use: */ - free(*mutex); + if (((*mutex)->m_owner != NULL) || + (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || + ((*mutex)->m_refcount != 0)) { + ret = EBUSY; - /* - * Leave the caller's pointer NULL now that - * the mutex has been destroyed: - */ - *mutex = NULL; + /* Unlock the mutex structure: */ + _SPINUNLOCK(&(*mutex)->lock); + } + else { + /* + * Free the memory allocated for the mutex + * structure: + */ + free(*mutex); + + /* + * Leave the caller's pointer NULL now that + * the mutex has been destroyed: + */ + *mutex = NULL; + } } /* Return the completion status: */ @@ -171,44 +222,100 @@ pthread_mutex_trylock(pthread_mutex_t * mutex) * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + /* + * Guard against being preempted by a scheduling signal. + * To support priority inheritence mutexes, we need to + * maintain lists of mutex ownerships for each thread as + * well as lists of waiting threads for each mutex. In + * order to propagate priorities we need to atomically + * walk these lists and cannot rely on a single mutex + * lock to provide protection against modification. + */ + _thread_kern_sched_defer(); + /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ - switch ((*mutex)->m_type) { - /* Fast mutex: */ - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_DEFAULT: - case PTHREAD_MUTEX_ERRORCHECK: + switch ((*mutex)->m_protocol) { + /* Default POSIX mutex: */ + case PTHREAD_PRIO_NONE: /* Check if this mutex is not locked: */ if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; - } else { + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_trylock(*mutex); + else /* Return a busy error: */ ret = EBUSY; - } break; - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* Check if this mutex is locked: */ - if ((*mutex)->m_owner != NULL) { + /* POSIX priority inheritence mutex: */ + case PTHREAD_PRIO_INHERIT: + /* Check if this mutex is not locked: */ + if ((*mutex)->m_owner == NULL) { + /* Lock the mutex for the running thread: */ + (*mutex)->m_owner = _thread_run; + + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; + /* - * Check if the mutex is locked by the running - * thread: + * The mutex takes on the attributes of the + * running thread when there are no waiters. */ - if ((*mutex)->m_owner == _thread_run) { - /* Increment the lock count: */ - (*mutex)->m_data.m_count++; - } else { - /* Return a busy error: */ - ret = EBUSY; - } - } else { + (*mutex)->m_prio = _thread_run->active_priority; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_trylock(*mutex); + else + /* Return a busy error: */ + ret = EBUSY; + break; + + /* POSIX priority protection mutex: */ + case PTHREAD_PRIO_PROTECT: + /* Check for a priority ceiling violation: */ + if (_thread_run->active_priority > (*mutex)->m_prio) + ret = EINVAL; + + /* Check if this mutex is not locked: */ + else if ((*mutex)->m_owner == NULL) { /* Lock the mutex for the running thread: */ (*mutex)->m_owner = _thread_run; - } + + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; + + /* + * The running thread inherits the ceiling + * priority of the mutex and executes at that + * priority. + */ + _thread_run->active_priority = (*mutex)->m_prio; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + _thread_run->inherited_priority = + (*mutex)->m_prio; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_trylock(*mutex); + else + /* Return a busy error: */ + ret = EBUSY; break; /* Trap invalid mutex types: */ @@ -220,6 +327,12 @@ pthread_mutex_trylock(pthread_mutex_t * mutex) /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ @@ -239,94 +352,200 @@ pthread_mutex_lock(pthread_mutex_t * mutex) * initialization: */ else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { + /* + * Guard against being preempted by a scheduling signal. + * To support priority inheritence mutexes, we need to + * maintain lists of mutex ownerships for each thread as + * well as lists of waiting threads for each mutex. In + * order to propagate priorities we need to atomically + * walk these lists and cannot rely on a single mutex + * lock to provide protection against modification. + */ + _thread_kern_sched_defer(); + /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ - switch ((*mutex)->m_type) { - /* What SS2 define as a 'normal' mutex. This has to deadlock - on attempts to get a lock you already own. */ - case PTHREAD_MUTEX_NORMAL: - if ((*mutex)->m_owner == _thread_run) { - /* Intentionally deadlock: */ - _thread_run->data.mutex = mutex; - for (;;) - _thread_kern_sched_state(PS_MUTEX_WAIT, __FILE__, __LINE__); - } - goto COMMON_LOCK; - - /* Return error (not OK) on attempting to re-lock */ - case PTHREAD_MUTEX_ERRORCHECK: - if ((*mutex)->m_owner == _thread_run) { - ret = EDEADLK; - break; - } - - /* Fast mutexes do not check for any error conditions: */ - case PTHREAD_MUTEX_DEFAULT: - COMMON_LOCK: - /* - * Enter a loop to wait for the mutex to be locked by the - * current thread: - */ - while ((*mutex)->m_owner != _thread_run) { - /* Check if the mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for this thread: */ - (*mutex)->m_owner = _thread_run; - } else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - _thread_queue_enq(&(*mutex)->m_queue, _thread_run); - _thread_run->data.mutex = mutex; + switch ((*mutex)->m_protocol) { + /* Default POSIX mutex: */ + case PTHREAD_PRIO_NONE: + if ((*mutex)->m_owner == NULL) { + /* Lock the mutex for this thread: */ + (*mutex)->m_owner = _thread_run; - /* Wait for the mutex: */ - _thread_kern_sched_state_unlock( - PS_MUTEX_WAIT, &(*mutex)->lock, - __FILE__, __LINE__); + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); - /* Lock the mutex again: */ - _SPINLOCK(&(*mutex)->lock); - } + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_lock(*mutex); + else { + /* + * Join the queue of threads waiting to lock + * the mutex: + */ + mutex_queue_enq(*mutex, _thread_run); + + /* + * Keep a pointer to the mutex this thread + * is waiting on: + */ + _thread_run->data.mutex = *mutex; + + /* + * Unlock the mutex structure and schedule the + * next thread: + */ + _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, + &(*mutex)->lock, __FILE__, __LINE__); + + /* Lock the mutex structure again: */ + _SPINLOCK(&(*mutex)->lock); + + /* + * This thread is no longer waiting for + * the mutex: + */ + _thread_run->data.mutex = NULL; } break; - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* - * Enter a loop to wait for the mutex to be locked by the - * current thread: - */ - while ((*mutex)->m_owner != _thread_run) { - /* Check if the mutex is not locked: */ - if ((*mutex)->m_owner == NULL) { - /* Lock the mutex for this thread: */ - (*mutex)->m_owner = _thread_run; - - /* Reset the lock count for this mutex: */ - (*mutex)->m_data.m_count = 0; - } else { - /* - * Join the queue of threads waiting to lock - * the mutex: - */ - _thread_queue_enq(&(*mutex)->m_queue, _thread_run); - _thread_run->data.mutex = mutex; + /* POSIX priority inheritence mutex: */ + case PTHREAD_PRIO_INHERIT: + /* Check if this mutex is not locked: */ + if ((*mutex)->m_owner == NULL) { + /* Lock the mutex for this thread: */ + (*mutex)->m_owner = _thread_run; - /* Wait for the mutex: */ - _thread_kern_sched_state_unlock( - PS_MUTEX_WAIT, &(*mutex)->lock, - __FILE__, __LINE__); + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; - /* Lock the mutex again: */ - _SPINLOCK(&(*mutex)->lock); - } + /* + * The mutex takes on attributes of the + * running thread when there are no waiters. + */ + (*mutex)->m_prio = _thread_run->active_priority; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + _thread_run->inherited_priority = + (*mutex)->m_prio; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_lock(*mutex); + else { + /* + * Join the queue of threads waiting to lock + * the mutex: + */ + mutex_queue_enq(*mutex, _thread_run); + + /* + * Keep a pointer to the mutex this thread + * is waiting on: + */ + _thread_run->data.mutex = *mutex; + + if (_thread_run->active_priority > + (*mutex)->m_prio) + /* Adjust priorities: */ + mutex_priority_adjust(*mutex); + + /* + * Unlock the mutex structure and schedule the + * next thread: + */ + _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, + &(*mutex)->lock, __FILE__, __LINE__); + + /* Lock the mutex structure again: */ + _SPINLOCK(&(*mutex)->lock); + + /* + * This thread is no longer waiting for + * the mutex: + */ + _thread_run->data.mutex = NULL; } + break; + + /* POSIX priority protection mutex: */ + case PTHREAD_PRIO_PROTECT: + /* Check for a priority ceiling violation: */ + if (_thread_run->active_priority > (*mutex)->m_prio) + ret = EINVAL; + + /* Check if this mutex is not locked: */ + else if ((*mutex)->m_owner == NULL) { + /* + * Lock the mutex for the running + * thread: + */ + (*mutex)->m_owner = _thread_run; + + /* Track number of priority mutexes owned: */ + _thread_run->priority_mutex_count++; - /* Increment the lock count for this mutex: */ - (*mutex)->m_data.m_count++; + /* + * The running thread inherits the ceiling + * priority of the mutex and executes at that + * priority: + */ + _thread_run->active_priority = (*mutex)->m_prio; + (*mutex)->m_saved_prio = + _thread_run->inherited_priority; + _thread_run->inherited_priority = + (*mutex)->m_prio; + + /* Add to the list of owned mutexes: */ + TAILQ_INSERT_TAIL(&_thread_run->mutexq, + (*mutex), m_qe); + } else if ((*mutex)->m_owner == _thread_run) + ret = mutex_self_lock(*mutex); + else { + /* + * Join the queue of threads waiting to lock + * the mutex: + */ + mutex_queue_enq(*mutex, _thread_run); + + /* + * Keep a pointer to the mutex this thread + * is waiting on: + */ + _thread_run->data.mutex = *mutex; + + /* Clear any previous error: */ + _thread_run->error = 0; + + /* + * Unlock the mutex structure and schedule the + * next thread: + */ + _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, + &(*mutex)->lock, __FILE__, __LINE__); + + /* Lock the mutex structure again: */ + _SPINLOCK(&(*mutex)->lock); + + /* + * The threads priority may have changed while + * waiting for the mutex causing a ceiling + * violation. + */ + ret = _thread_run->error; + _thread_run->error = 0; + + /* + * This thread is no longer waiting for + * the mutex: + */ + _thread_run->data.mutex = NULL; + } break; /* Trap invalid mutex types: */ @@ -338,6 +557,12 @@ pthread_mutex_lock(pthread_mutex_t * mutex) /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ @@ -347,56 +572,375 @@ pthread_mutex_lock(pthread_mutex_t * mutex) int pthread_mutex_unlock(pthread_mutex_t * mutex) { - int ret = 0; + return (mutex_unlock_common(mutex, /* add reference */ 0)); +} + +int +_mutex_cv_unlock(pthread_mutex_t * mutex) +{ + return (mutex_unlock_common(mutex, /* add reference */ 1)); +} + +int +_mutex_cv_lock(pthread_mutex_t * mutex) +{ + int ret; + if ((ret = pthread_mutex_lock(mutex)) == 0) + (*mutex)->m_refcount--; + return (ret); +} + +static inline int +mutex_self_trylock(pthread_mutex_t mutex) +{ + int ret = 0; + + switch (mutex->m_type) { + + /* case PTHREAD_MUTEX_DEFAULT: */ + case PTHREAD_MUTEX_ERRORCHECK: + case PTHREAD_MUTEX_NORMAL: + /* + * POSIX specifies that mutexes should return EDEADLK if a + * recursive lock is detected. + */ + ret = EBUSY; + break; + + case PTHREAD_MUTEX_RECURSIVE: + /* Increment the lock count: */ + mutex->m_data.m_count++; + break; + + default: + /* Trap invalid mutex types; */ + ret = EINVAL; + } + + return(ret); +} + +static inline int +mutex_self_lock(pthread_mutex_t mutex) +{ + int ret = 0; + + switch (mutex->m_type) { + /* case PTHREAD_MUTEX_DEFAULT: */ + case PTHREAD_MUTEX_ERRORCHECK: + /* + * POSIX specifies that mutexes should return EDEADLK if a + * recursive lock is detected. + */ + ret = EDEADLK; + break; + + case PTHREAD_MUTEX_NORMAL: + /* + * What SS2 define as a 'normal' mutex. Intentionally + * deadlock on attempts to get a lock you already own. + */ + _thread_kern_sched_state_unlock(PS_DEADLOCK, + &mutex->lock, __FILE__, __LINE__); + break; + + case PTHREAD_MUTEX_RECURSIVE: + /* Increment the lock count: */ + mutex->m_data.m_count++; + break; + + default: + /* Trap invalid mutex types; */ + ret = EINVAL; + } + + return(ret); +} + +static inline int +mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) +{ + int ret = 0; if (mutex == NULL || *mutex == NULL) { ret = EINVAL; } else { + /* + * Guard against being preempted by a scheduling signal. + * To support priority inheritence mutexes, we need to + * maintain lists of mutex ownerships for each thread as + * well as lists of waiting threads for each mutex. In + * order to propagate priorities we need to atomically + * walk these lists and cannot rely on a single mutex + * lock to provide protection against modification. + */ + _thread_kern_sched_defer(); + /* Lock the mutex structure: */ _SPINLOCK(&(*mutex)->lock); /* Process according to mutex type: */ - switch ((*mutex)->m_type) { - /* Default & normal mutexes do not really need to check for - any error conditions: */ - case PTHREAD_MUTEX_NORMAL: - case PTHREAD_MUTEX_DEFAULT: - case PTHREAD_MUTEX_ERRORCHECK: - /* Check if the running thread is not the owner of the mutex: */ + switch ((*mutex)->m_protocol) { + /* Default POSIX mutex: */ + case PTHREAD_PRIO_NONE: + /* + * Check if the running thread is not the owner of the + * mutex: + */ if ((*mutex)->m_owner != _thread_run) { - /* This thread doesn't have permission: */ + /* + * Return a permission error when the thread + * doesn't own the lock: + */ ret = EPERM; } + else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && + ((*mutex)->m_data.m_count > 1)) { + /* Decrement the count: */ + (*mutex)->m_data.m_count--; + } else { + /* + * Clear the count in case this is recursive + * mutex. + */ + (*mutex)->m_data.m_count = 0; + + /* Remove the mutex from the threads queue. */ + TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * Get the next thread from the queue of + * threads waiting on the mutex: + */ + if (((*mutex)->m_owner = + mutex_queue_deq(*mutex)) != NULL) { + /* + * Allow the new owner of the mutex to + * run: + */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); + } + } + break; + + /* POSIX priority inheritence mutex: */ + case PTHREAD_PRIO_INHERIT: /* - * Get the next thread from the queue of threads waiting on - * the mutex: + * Check if the running thread is not the owner of the + * mutex: */ - else if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) { - /* Allow the new owner of the mutex to run: */ - PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING); + if ((*mutex)->m_owner != _thread_run) { + /* + * Return a permission error when the thread + * doesn't own the lock: + */ + ret = EPERM; + } + else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && + ((*mutex)->m_data.m_count > 1)) { + /* Decrement the count: */ + (*mutex)->m_data.m_count--; + } else { + /* + * Clear the count in case this is recursive + * mutex. + */ + (*mutex)->m_data.m_count = 0; + + /* + * Restore the threads inherited priority and + * recompute the active priority (being careful + * not to override changes in the threads base + * priority subsequent to locking the mutex). + */ + _thread_run->inherited_priority = + (*mutex)->m_saved_prio; + _thread_run->active_priority = + MAX(_thread_run->inherited_priority, + _thread_run->base_priority); + + /* + * This thread now owns one less priority mutex. + */ + _thread_run->priority_mutex_count--; + + /* Remove the mutex from the threads queue. */ + TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * Get the next thread from the queue of threads + * waiting on the mutex: + */ + if (((*mutex)->m_owner = + mutex_queue_deq(*mutex)) == NULL) + /* This mutex has no priority. */ + (*mutex)->m_prio = 0; + else { + /* + * Track number of priority mutexes owned: + */ + (*mutex)->m_owner->priority_mutex_count++; + + /* + * Add the mutex to the threads list + * of owned mutexes: + */ + TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * The owner is no longer waiting for + * this mutex: + */ + (*mutex)->m_owner->data.mutex = NULL; + + /* + * Set the priority of the mutex. Since + * our waiting threads are in descending + * priority order, the priority of the + * mutex becomes the active priority of + * the thread we just dequeued. + */ + (*mutex)->m_prio = + (*mutex)->m_owner->active_priority; + + /* + * Save the owning threads inherited + * priority: + */ + (*mutex)->m_saved_prio = + (*mutex)->m_owner->inherited_priority; + + /* + * The owning threads inherited priority + * now becomes his active priority (the + * priority of the mutex). + */ + (*mutex)->m_owner->inherited_priority = + (*mutex)->m_prio; + + /* + * Allow the new owner of the mutex to + * run: + */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); + } } break; - /* Counting mutex: */ - case PTHREAD_MUTEX_RECURSIVE: - /* Check if the running thread is not the owner of the mutex: */ + /* POSIX priority ceiling mutex: */ + case PTHREAD_PRIO_PROTECT: + /* + * Check if the running thread is not the owner of the + * mutex: + */ if ((*mutex)->m_owner != _thread_run) { - /* Return an invalid argument error: */ - ret = EINVAL; + /* + * Return a permission error when the thread + * doesn't own the lock: + */ + ret = EPERM; } - /* Check if there are still counts: */ - else if ((*mutex)->m_data.m_count > 1) { + else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && + ((*mutex)->m_data.m_count > 1)) { /* Decrement the count: */ (*mutex)->m_data.m_count--; } else { + /* + * Clear the count in case this is recursive + * mutex. + */ (*mutex)->m_data.m_count = 0; + /* - * Get the next thread from the queue of threads waiting on - * the mutex: + * Restore the threads inherited priority and + * recompute the active priority (being careful + * not to override changes in the threads base + * priority subsequent to locking the mutex). + */ + _thread_run->inherited_priority = + (*mutex)->m_saved_prio; + _thread_run->active_priority = + MAX(_thread_run->inherited_priority, + _thread_run->base_priority); + + /* + * This thread now owns one less priority mutex. + */ + _thread_run->priority_mutex_count--; + + /* Remove the mutex from the threads queue. */ + TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * Enter a loop to find a waiting thread whose + * active priority will not cause a ceiling + * violation: */ - if (((*mutex)->m_owner = _thread_queue_deq(&(*mutex)->m_queue)) != NULL) { - /* Allow the new owner of the mutex to run: */ - PTHREAD_NEW_STATE((*mutex)->m_owner,PS_RUNNING); + while ((((*mutex)->m_owner = + mutex_queue_deq(*mutex)) != NULL) && + ((*mutex)->m_owner->active_priority > + (*mutex)->m_prio)) { + /* + * Either the mutex ceiling priority + * been lowered and/or this threads + * priority has been raised subsequent + * to this thread being queued on the + * waiting list. + */ + (*mutex)->m_owner->error = EINVAL; + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); + } + + /* Check for a new owner: */ + if ((*mutex)->m_owner != NULL) { + /* + * Track number of priority mutexes owned: + */ + (*mutex)->m_owner->priority_mutex_count++; + + /* + * Add the mutex to the threads list + * of owned mutexes: + */ + TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, + (*mutex), m_qe); + + /* + * The owner is no longer waiting for + * this mutex: + */ + (*mutex)->m_owner->data.mutex = NULL; + + /* + * Save the owning threads inherited + * priority: + */ + (*mutex)->m_saved_prio = + (*mutex)->m_owner->inherited_priority; + + /* + * The owning thread inherits the + * ceiling priority of the mutex and + * executes at that priority: + */ + (*mutex)->m_owner->inherited_priority = + (*mutex)->m_prio; + (*mutex)->m_owner->active_priority = + (*mutex)->m_prio; + + /* + * Allow the new owner of the mutex to + * run: + */ + PTHREAD_NEW_STATE((*mutex)->m_owner, + PS_RUNNING); } } break; @@ -408,11 +952,348 @@ pthread_mutex_unlock(pthread_mutex_t * mutex) break; } + if ((ret == 0) && (add_reference != 0)) { + /* Increment the reference count: */ + (*mutex)->m_refcount++; + } + /* Unlock the mutex structure: */ _SPINUNLOCK(&(*mutex)->lock); + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); } /* Return the completion status: */ return (ret); } + + +/* + * This function is called when a change in base priority occurs + * for a thread that is thread holding, or waiting for, a priority + * protection or inheritence mutex. A change in a threads base + * priority can effect changes to active priorities of other threads + * and to the ordering of mutex locking by waiting threads. + * + * This must be called while thread scheduling is deferred. + */ +void +_mutex_notify_priochange(pthread_t pthread) +{ + /* Adjust the priorites of any owned priority mutexes: */ + if (pthread->priority_mutex_count > 0) { + /* + * Rescan the mutexes owned by this thread and correct + * their priorities to account for this threads change + * in priority. This has the side effect of changing + * the threads active priority. + */ + mutex_rescan_owned(pthread, /* rescan all owned */ NULL); + } + + /* + * If this thread is waiting on a priority inheritence mutex, + * check for priority adjustments. A change in priority can + * also effect a ceiling violation(*) for a thread waiting on + * a priority protection mutex; we don't perform the check here + * as it is done in pthread_mutex_unlock. + * + * (*) It should be noted that a priority change to a thread + * _after_ taking and owning a priority ceiling mutex + * does not affect ownership of that mutex; the ceiling + * priority is only checked before mutex ownership occurs. + */ + if (pthread->state == PS_MUTEX_WAIT) { + /* Lock the mutex structure: */ + _SPINLOCK(&pthread->data.mutex->lock); + + /* + * Check to make sure this thread is still in the same state + * (the spinlock above can yield the CPU to another thread): + */ + if (pthread->state == PS_MUTEX_WAIT) { + /* + * Remove and reinsert this thread into the list of + * waiting threads to preserve decreasing priority + * order. + */ + mutex_queue_remove(pthread->data.mutex, pthread); + mutex_queue_enq(pthread->data.mutex, pthread); + + if (pthread->data.mutex->m_protocol == + PTHREAD_PRIO_INHERIT) { + /* Adjust priorities: */ + mutex_priority_adjust(pthread->data.mutex); + } + } + + /* Unlock the mutex structure: */ + _SPINUNLOCK(&pthread->data.mutex->lock); + } +} + +/* + * Called when a new thread is added to the mutex waiting queue or + * when a threads priority changes that is already in the mutex + * waiting queue. + */ +static void +mutex_priority_adjust(pthread_mutex_t mutex) +{ + pthread_t pthread_next, pthread = mutex->m_owner; + int temp_prio; + pthread_mutex_t m = mutex; + + /* + * Calculate the mutex priority as the maximum of the highest + * active priority of any waiting threads and the owning threads + * active priority(*). + * + * (*) Because the owning threads current active priority may + * reflect priority inherited from this mutex (and the mutex + * priority may have changed) we must recalculate the active + * priority based on the threads saved inherited priority + * and its base priority. + */ + pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ + temp_prio = MAX(pthread_next->active_priority, + MAX(m->m_saved_prio, pthread->base_priority)); + + /* See if this mutex really needs adjusting: */ + if (temp_prio == m->m_prio) + /* No need to propagate the priority: */ + return; + + /* Set new priority of the mutex: */ + m->m_prio = temp_prio; + + while (m != NULL) { + /* + * Save the threads priority before rescanning the + * owned mutexes: + */ + temp_prio = pthread->active_priority; + + /* + * Fix the priorities for all the mutexes this thread has + * locked since taking this mutex. This also has a + * potential side-effect of changing the threads priority. + */ + mutex_rescan_owned(pthread, m); + + /* + * If the thread is currently waiting on a mutex, check + * to see if the threads new priority has affected the + * priority of the mutex. + */ + if ((temp_prio != pthread->active_priority) && + (pthread->state == PS_MUTEX_WAIT) && + (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { + /* Grab the mutex this thread is waiting on: */ + m = pthread->data.mutex; + + /* + * The priority for this thread has changed. Remove + * and reinsert this thread into the list of waiting + * threads to preserve decreasing priority order. + */ + mutex_queue_remove(m, pthread); + mutex_queue_enq(m, pthread); + + /* Grab the waiting thread with highest priority: */ + pthread_next = TAILQ_FIRST(&m->m_queue); + + /* + * Calculate the mutex priority as the maximum of the + * highest active priority of any waiting threads and + * the owning threads active priority. + */ + temp_prio = MAX(pthread_next->active_priority, + MAX(m->m_saved_prio, m->m_owner->base_priority)); + + if (temp_prio != m->m_prio) { + /* + * The priority needs to be propagated to the + * mutex this thread is waiting on and up to + * the owner of that mutex. + */ + m->m_prio = temp_prio; + pthread = m->m_owner; + } + else + /* We're done: */ + m = NULL; + + } + else + /* We're done: */ + m = NULL; + } +} + +static void +mutex_rescan_owned (pthread_t pthread, pthread_mutex_t mutex) +{ + int active_prio, inherited_prio; + pthread_mutex_t m; + pthread_t pthread_next; + + /* + * Start walking the mutexes the thread has taken since + * taking this mutex. + */ + if (mutex == NULL) { + /* + * A null mutex means start at the beginning of the owned + * mutex list. + */ + m = TAILQ_FIRST(&pthread->mutexq); + + /* There is no inherited priority yet. */ + inherited_prio = 0; + } + else { + /* + * The caller wants to start after a specific mutex. It + * is assumed that this mutex is a priority inheritence + * mutex and that its priority has been correctly + * calculated. + */ + m = TAILQ_NEXT(mutex, m_qe); + + /* Start inheriting priority from the specified mutex. */ + inherited_prio = mutex->m_prio; + } + active_prio = MAX(inherited_prio, pthread->base_priority); + + while (m != NULL) { + /* + * We only want to deal with priority inheritence + * mutexes. This might be optimized by only placing + * priority inheritence mutexes into the owned mutex + * list, but it may prove to be useful having all + * owned mutexes in this list. Consider a thread + * exiting while holding mutexes... + */ + if (m->m_protocol == PTHREAD_PRIO_INHERIT) { + /* + * Fix the owners saved (inherited) priority to + * reflect the priority of the previous mutex. + */ + m->m_saved_prio = inherited_prio; + + if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) + /* Recalculate the priority of the mutex: */ + m->m_prio = MAX(active_prio, + pthread_next->active_priority); + else + m->m_prio = active_prio; + + /* Recalculate new inherited and active priorities: */ + inherited_prio = m->m_prio; + active_prio = MAX(m->m_prio, pthread->base_priority); + } + + /* Advance to the next mutex owned by this thread: */ + m = TAILQ_NEXT(m, m_qe); + } + + /* + * Fix the threads inherited priority and recalculate its + * active priority. + */ + pthread->inherited_priority = inherited_prio; + active_prio = MAX(inherited_prio, pthread->base_priority); + + if (active_prio != pthread->active_priority) { + /* + * If this thread is in the priority queue, it must be + * removed and reinserted for its new priority. + */ + if ((pthread != _thread_run) && + (pthread->state == PS_RUNNING)) { + /* + * Remove the thread from the priority queue + * before changing its priority: + */ + PTHREAD_PRIOQ_REMOVE(pthread); + + /* + * POSIX states that if the priority is being + * lowered, the thread must be inserted at the + * head of the queue for its priority if it owns + * any priority protection or inheritence mutexes. + */ + if ((active_prio < pthread->active_priority) && + (pthread->priority_mutex_count > 0)) { + /* Set the new active priority. */ + pthread->active_priority = active_prio; + + PTHREAD_PRIOQ_INSERT_HEAD(pthread); + } + else { + /* Set the new active priority. */ + pthread->active_priority = active_prio; + + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + } + else { + /* Set the new active priority. */ + pthread->active_priority = active_prio; + } + } +} + +/* + * Dequeue a waiting thread from the head of a mutex queue in descending + * priority order. + */ +static inline pthread_t +mutex_queue_deq(pthread_mutex_t mutex) +{ + pthread_t pthread; + + if ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) + TAILQ_REMOVE(&mutex->m_queue, pthread, qe); + + return(pthread); +} + +/* + * Remove a waiting thread from a mutex queue in descending priority order. + */ +static inline void +mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) +{ + TAILQ_REMOVE(&mutex->m_queue, pthread, qe); +} + +/* + * Enqueue a waiting thread to a queue in descending priority order. + */ +static inline void +mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) +{ + pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); + + /* + * For the common case of all threads having equal priority, + * we perform a quick check against the priority of the thread + * at the tail of the queue. + */ + if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) + TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe); + else { + tid = TAILQ_FIRST(&mutex->m_queue); + while (pthread->active_priority <= tid->active_priority) + tid = TAILQ_NEXT(tid, qe); + TAILQ_INSERT_BEFORE(tid, pthread, qe); + } +} + #endif diff --git a/lib/libpthread/uthread/uthread_mutex_prioceiling.c b/lib/libpthread/uthread/uthread_mutex_prioceiling.c new file mode 100644 index 00000000000..779c238cfe7 --- /dev/null +++ b/lib/libpthread/uthread/uthread_mutex_prioceiling.c @@ -0,0 +1,110 @@ +/* $OpenBSD: uthread_mutex_prioceiling.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <string.h> +#include <stdlib.h> +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else + *prioceiling = (*mattr)->m_ceiling; + + return(ret); +} + +int +pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else + (*mattr)->m_ceiling = prioceiling; + + return(ret); +} + +int +pthread_mutex_getprioceiling(pthread_mutex_t *mutex, + int *prioceiling) +{ + int ret; + + if ((mutex == NULL) || (*mutex == NULL)) + ret = EINVAL; + else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else + ret = (*mutex)->m_prio; + + return(ret); +} + +int +pthread_mutex_setprioceiling(pthread_mutex_t *mutex, + int prioceiling, int *old_ceiling) +{ + int ret = 0; + + if ((mutex == NULL) || (*mutex == NULL)) + ret = EINVAL; + else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT) + ret = EINVAL; + else { + /* Lock the mutex: */ + if ((ret = pthread_mutex_lock(mutex)) == 0) { + /* Return the old ceiling and set the new ceiling: */ + *old_ceiling = (*mutex)->m_prio; + (*mutex)->m_prio = prioceiling; + + /* Unlock the mutex: */ + ret = pthread_mutex_unlock(mutex); + } + } + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_mutex_protocol.c b/lib/libpthread/uthread/uthread_mutex_protocol.c new file mode 100644 index 00000000000..fa0b9804d57 --- /dev/null +++ b/lib/libpthread/uthread/uthread_mutex_protocol.c @@ -0,0 +1,69 @@ +/* $OpenBSD: uthread_mutex_protocol.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <string.h> +#include <stdlib.h> +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL)) + ret = EINVAL; + else + *protocol = (*mattr)->m_protocol; + + return(ret); +} + +int +pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol) +{ + int ret = 0; + + if ((mattr == NULL) || (*mattr == NULL) || + (protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT)) + ret = EINVAL; + else { + (*mattr)->m_protocol = protocol; + (*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY; + } + return(ret); +} + +#endif diff --git a/lib/libpthread/uthread/uthread_priority_queue.c b/lib/libpthread/uthread/uthread_priority_queue.c new file mode 100644 index 00000000000..a5f45ef7064 --- /dev/null +++ b/lib/libpthread/uthread/uthread_priority_queue.c @@ -0,0 +1,156 @@ +/* $OpenBSD: uthread_priority_queue.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <stdlib.h> +#include <sys/queue.h> +#include <string.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +/* Prototypes: */ +static void pq_insert_prio_list(pq_queue_t *pq, int prio); + + +int +_pq_init(pq_queue_t *pq, int minprio, int maxprio) +{ + int i, ret = 0; + int prioslots = maxprio - minprio + 1; + + if (pq == NULL) + ret = -1; + + /* Create the priority queue with (maxprio - minprio + 1) slots: */ + else if ((pq->pq_lists = + (pq_list_t *) malloc(sizeof(pq_list_t) * prioslots)) == NULL) + ret = -1; + + else { + /* Initialize the queue for each priority slot: */ + for (i = 0; i < prioslots; i++) { + TAILQ_INIT(&pq->pq_lists[i].pl_head); + pq->pq_lists[i].pl_prio = i; + pq->pq_lists[i].pl_queued = 0; + } + + /* Initialize the priority queue: */ + TAILQ_INIT(&pq->pq_queue); + + /* Remember the queue size: */ + pq->pq_size = prioslots; + } + return (ret); +} + +void +_pq_remove(pq_queue_t *pq, pthread_t pthread) +{ + int prio = pthread->active_priority; + + TAILQ_REMOVE(&pq->pq_lists[prio].pl_head, pthread, pqe); +} + + +void +_pq_insert_head(pq_queue_t *pq, pthread_t pthread) +{ + int prio = pthread->active_priority; + + TAILQ_INSERT_HEAD(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); +} + + +void +_pq_insert_tail(pq_queue_t *pq, pthread_t pthread) +{ + int prio = pthread->active_priority; + + TAILQ_INSERT_TAIL(&pq->pq_lists[prio].pl_head, pthread, pqe); + if (pq->pq_lists[prio].pl_queued == 0) + /* Insert the list into the priority queue: */ + pq_insert_prio_list(pq, prio); +} + + +pthread_t +_pq_first(pq_queue_t *pq) +{ + pq_list_t *pql; + pthread_t pthread = NULL; + + while (((pql = TAILQ_FIRST(&pq->pq_queue)) != NULL) && + (pthread == NULL)) { + if ((pthread = TAILQ_FIRST(&pql->pl_head)) == NULL) { + /* + * The priority list is empty; remove the list + * from the queue. + */ + TAILQ_REMOVE(&pq->pq_queue, pql, pl_link); + + /* Mark the list as not being in the queue: */ + pql->pl_queued = 0; + } + } + return (pthread); +} + + +static void +pq_insert_prio_list(pq_queue_t *pq, int prio) +{ + pq_list_t *pql; + + /* + * The priority queue is in descending priority order. Start at + * the beginning of the queue and find the list before which the + * new list should to be inserted. + */ + pql = TAILQ_FIRST(&pq->pq_queue); + while ((pql != NULL) && (pql->pl_prio > prio)) + pql = TAILQ_NEXT(pql, pl_link); + + /* Insert the list: */ + if (pql == NULL) + TAILQ_INSERT_TAIL(&pq->pq_queue, &pq->pq_lists[prio], pl_link); + else + TAILQ_INSERT_BEFORE(pql, &pq->pq_lists[prio], pl_link); + + /* Mark this list as being in the queue: */ + pq->pq_lists[prio].pl_queued = 1; +} + +#endif diff --git a/lib/libpthread/uthread/uthread_resume_np.c b/lib/libpthread/uthread/uthread_resume_np.c index 6e211f34ece..e4be286bde6 100644 --- a/lib/libpthread/uthread/uthread_resume_np.c +++ b/lib/libpthread/uthread/uthread_resume_np.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_resume_np.c,v 1.2 1999/01/06 05:29:26 d Exp $ + * $OpenBSD: uthread_resume_np.c,v 1.3 1999/05/26 00:18:25 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -46,8 +46,21 @@ pthread_resume_np(pthread_t thread) if ((ret = _find_thread(thread)) == 0) { /* The thread exists. Is it suspended? */ if (thread->state != PS_SUSPENDED) { + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + /* Allow the thread to run. */ PTHREAD_NEW_STATE(thread,PS_RUNNING); + + /* + * Reenable preemption and yield if a scheduling + * signal occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } } return(ret); diff --git a/lib/libpthread/uthread/uthread_select.c b/lib/libpthread/uthread/uthread_select.c index 7793633fd75..dd9714e7b3e 100644 --- a/lib/libpthread/uthread/uthread_select.c +++ b/lib/libpthread/uthread/uthread_select.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_select.c,v 1.2 1999/01/06 05:29:26 d Exp $ + * $OpenBSD: uthread_select.c,v 1.3 1999/05/26 00:18:25 d Exp $ */ #include <unistd.h> #include <errno.h> @@ -48,6 +48,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds, struct timespec ts; struct timeval zero_timeout = {0, 0}; int i, ret = 0, got_all_locks = 1; + int f_wait = 1; struct pthread_select_data data; if (numfds > _thread_dtablesize) { @@ -60,6 +61,8 @@ select(int numfds, fd_set * readfds, fd_set * writefds, /* Set the wake up time: */ _thread_kern_set_timeout(&ts); + if (ts.tv_sec == 0 && ts.tv_nsec == 0) + f_wait = 0; } else { /* Wait for ever: */ _thread_kern_set_timeout(NULL); @@ -111,7 +114,7 @@ select(int numfds, fd_set * readfds, fd_set * writefds, if (exceptfds != NULL) { memcpy(&data.exceptfds, exceptfds, sizeof(data.exceptfds)); } - if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0) { + if ((ret = _thread_sys_select(data.nfds, &data.readfds, &data.writefds, &data.exceptfds, &zero_timeout)) == 0 && f_wait) { data.nfds = numfds; FD_ZERO(&data.readfds); FD_ZERO(&data.writefds); diff --git a/lib/libpthread/uthread/uthread_setprio.c b/lib/libpthread/uthread/uthread_setprio.c index 7de34d3c7c1..575eb62da2c 100644 --- a/lib/libpthread/uthread/uthread_setprio.c +++ b/lib/libpthread/uthread/uthread_setprio.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_setprio.c,v 1.4 1999/05/26 00:18:25 d Exp $ */ /* * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. * All rights reserved. @@ -20,7 +21,7 @@ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -29,60 +30,24 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_setprio.c,v 1.3 1999/01/17 23:57:16 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE #include <pthread.h> -#include <sched.h> #include "pthread_private.h" int pthread_setprio(pthread_t pthread, int prio) { - int ret; + int ret, policy; + struct sched_param param; - /* Check if the priority is invalid: */ - if (prio < PTHREAD_MIN_PRIORITY || prio > PTHREAD_MAX_PRIORITY) - /* Return an invalid argument error: */ - ret = EINVAL; - - /* Find the thread in the list of active threads: */ - else if ((ret = _find_thread(pthread)) == 0) - /* Set the thread priority: */ - pthread->pthread_priority = prio; + if ((ret = pthread_getschedparam(pthread, &policy, ¶m)) == 0) { + param.sched_priority = prio; + ret = pthread_setschedparam(pthread, policy, ¶m); + } /* Return the error status: */ return (ret); } - -int -pthread_getschedparam(thread, policy, param) - pthread_t thread; - int *policy; - struct sched_param *param; -{ - int ret = 0; - - if ((ret = _find_thread(thread)) == 0) { - if (policy) - *policy = SCHED_RR; - if (param) - param->sched_priority = thread->pthread_priority; - } - return (ret); -} - -int -pthread_setschedparam(thread, policy, param) - pthread_t thread; - int policy; - const struct sched_param *param; -{ - - if (policy == SCHED_RR) - return pthread_setprio(thread, param->sched_priority); - else - return (EINVAL); -} #endif diff --git a/lib/libpthread/uthread/uthread_setschedparam.c b/lib/libpthread/uthread/uthread_setschedparam.c new file mode 100644 index 00000000000..0024460c003 --- /dev/null +++ b/lib/libpthread/uthread/uthread_setschedparam.c @@ -0,0 +1,114 @@ +/* $OpenBSD: uthread_setschedparam.c,v 1.1 1999/05/26 00:18:25 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#include <sys/param.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param) +{ + int old_prio, in_readyq = 0, ret = 0; + + if ((param == NULL) || (param->sched_priority < PTHREAD_MIN_PRIORITY) || + (param->sched_priority > PTHREAD_MAX_PRIORITY) || + (policy < SCHED_FIFO) || (policy > SCHED_RR)) + /* Return an invalid argument error: */ + ret = EINVAL; + + /* Find the thread in the list of active threads: */ + else if ((ret = _find_thread(pthread)) == 0) { + /* + * Guard against being preempted by a scheduling + * signal: + */ + _thread_kern_sched_defer(); + + if (param->sched_priority != pthread->base_priority) { + /* + * Remove the thread from its current priority + * queue before any adjustments are made to its + * active priority: + */ + if ((pthread != _thread_run) && + (pthread->state == PS_RUNNING)) { + in_readyq = 1; + old_prio = pthread->active_priority; + PTHREAD_PRIOQ_REMOVE(pthread); + } + + /* Set the thread base priority: */ + pthread->base_priority = param->sched_priority; + + /* Recalculate the active priority: */ + pthread->active_priority = MAX(pthread->base_priority, + pthread->inherited_priority); + + if (in_readyq) { + if ((pthread->priority_mutex_count > 0) && + (old_prio > pthread->active_priority)) { + /* + * POSIX states that if the priority is + * being lowered, the thread must be + * inserted at the head of the queue for + * its priority if it owns any priority + * protection or inheritence mutexes. + */ + PTHREAD_PRIOQ_INSERT_HEAD(pthread); + } + else + PTHREAD_PRIOQ_INSERT_TAIL(pthread); + } + + /* + * Check for any mutex priority adjustments. This + * includes checking for a priority mutex on which + * this thread is waiting. + */ + _mutex_notify_priochange(pthread); + } + + /* Set the scheduling policy: */ + pthread->attr.sched_policy = policy; + + /* + * Renable preemption and yield if a scheduling signal + * arrived while in the critical region: + */ + _thread_kern_sched_undefer(); + } + return(ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_sig.c b/lib/libpthread/uthread/uthread_sig.c index 350ce71c6cf..1d32d376fe0 100644 --- a/lib/libpthread/uthread/uthread_sig.c +++ b/lib/libpthread/uthread/uthread_sig.c @@ -1,3 +1,4 @@ +/* $OpenBSD: uthread_sig.c,v 1.5 1999/05/26 00:18:26 d Exp $ */ /* * Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au> * All rights reserved. @@ -29,7 +30,6 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_sig.c,v 1.4 1999/01/06 05:29:27 d Exp $ */ #include <signal.h> #include <fcntl.h> @@ -39,6 +39,19 @@ #include <pthread.h> #include "pthread_private.h" +/* + * State change macro for signal handler: + */ +#define PTHREAD_SIG_NEW_STATE(thrd, newstate) { \ + if ((_thread_run->sched_defer_count == 0) && \ + (_thread_kern_in_sched == 0)) { \ + PTHREAD_NEW_STATE(thrd, newstate); \ + } else { \ + _waitingq_check_reqd = 1; \ + PTHREAD_SET_STATE(thrd, newstate); \ + } \ +} + /* Static variables: */ static int volatile yield_on_unlock_thread = 0; static spinlock_t thread_link_list_lock = _SPINLOCK_INITIALIZER; @@ -109,7 +122,7 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) _thread_dump_info(); /* Check if an interval timer signal: */ - else if (sig == SIGVTALRM) { + else if (sig == _SCHED_SIGNAL) { /* Check if the scheduler interrupt has come at an * unfortunate time which one of the threads is * modifying the thread list: @@ -123,6 +136,14 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) yield_on_unlock_thread = 1; /* + * Check if the scheduler interrupt has come when + * the currently running thread has deferred thread + * scheduling. + */ + else if (_thread_run->sched_defer_count) + _thread_run->yield_on_sched_undefer = 1; + + /* * Check if the kernel has not been interrupted while * executing scheduler code: */ @@ -178,18 +199,17 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) } /* - * Enter a loop to process each thread in the linked + * Enter a loop to process each thread in the waiting * list that is sigwait-ing on a signal. Since POSIX * doesn't specify which thread will get the signal * if there are multiple waiters, we'll give it to the * first one we find. */ - for (pthread = _thread_link_list; pthread != NULL; - pthread = pthread->nxt) { + TAILQ_FOREACH(pthread, &_waitingq, pqe) { if ((pthread->state == PS_SIGWAIT) && sigismember(pthread->data.sigwait, sig)) { /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; @@ -209,11 +229,18 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp) * list: */ for (pthread = _thread_link_list; pthread != NULL; - pthread = pthread->nxt) - _thread_signal(pthread,sig); + pthread = pthread->nxt) { + pthread_t pthread_saved = _thread_run; - /* Dispatch pending signals to the running thread: */ - _dispatch_signals(); + _thread_run = pthread; + _thread_signal(pthread,sig); + /* + * Dispatch pending signals to the + * running thread: + */ + _dispatch_signals(); + _thread_run = pthread_saved; + } } /* Returns nothing. */ @@ -265,7 +292,7 @@ _thread_signal(pthread_t pthread, int sig) pthread->interrupted = 1; /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; @@ -285,7 +312,7 @@ _thread_signal(pthread_t pthread, int sig) pthread->interrupted = 1; /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; @@ -300,7 +327,7 @@ _thread_signal(pthread_t pthread, int sig) if (!sigismember(&pthread->sigmask, sig) && _thread_sigact[sig - 1].sa_handler != SIG_DFL) { /* Change the state of the thread to run: */ - PTHREAD_NEW_STATE(pthread,PS_RUNNING); + PTHREAD_SIG_NEW_STATE(pthread,PS_RUNNING); /* Return the signal number: */ pthread->signo = sig; diff --git a/lib/libpthread/uthread/uthread_sigaction.c b/lib/libpthread/uthread/uthread_sigaction.c index 156d60a6583..6d709a8803f 100644 --- a/lib/libpthread/uthread/uthread_sigaction.c +++ b/lib/libpthread/uthread/uthread_sigaction.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_sigaction.c,v 1.3 1999/01/06 05:29:27 d Exp $ + * $OpenBSD: uthread_sigaction.c,v 1.4 1999/05/26 00:18:26 d Exp $ */ #include <signal.h> #include <errno.h> @@ -72,7 +72,7 @@ sigaction(int sig, const struct sigaction * act, struct sigaction * oact) * Check if the kernel needs to be advised of a change * in signal action: */ - if (act != NULL && sig != SIGVTALRM && sig != SIGCHLD && + if (act != NULL && sig != _SCHED_SIGNAL && sig != SIGCHLD && sig != SIGINFO) { /* Initialise the global signal action structure: */ gact.sa_mask = act->sa_mask; diff --git a/lib/libpthread/uthread/uthread_sigpending.c b/lib/libpthread/uthread/uthread_sigpending.c new file mode 100644 index 00000000000..2daf3159b59 --- /dev/null +++ b/lib/libpthread/uthread/uthread_sigpending.c @@ -0,0 +1,56 @@ +/* $OpenBSD: uthread_sigpending.c,v 1.1 1999/05/26 00:18:26 d Exp $ */ +/* + * Copyright (c) 1999 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by John Birrell. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <signal.h> +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include "pthread_private.h" + +int +sigpending(sigset_t * set) +{ + int ret = 0; + + /* Check for a null signal set pointer: */ + if (set == NULL) { + /* Return an invalid argument: */ + ret = EINVAL; + } + else { + *set = _thread_run->sigpend; + } + /* Return the completion status: */ + return (ret); +} +#endif diff --git a/lib/libpthread/uthread/uthread_sigwait.c b/lib/libpthread/uthread/uthread_sigwait.c index 3593b72853f..b4277133310 100644 --- a/lib/libpthread/uthread/uthread_sigwait.c +++ b/lib/libpthread/uthread/uthread_sigwait.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_sigwait.c,v 1.4 1999/01/17 23:57:27 d Exp $ + * $OpenBSD: uthread_sigwait.c,v 1.5 1999/05/26 00:18:26 d Exp $ */ #include <signal.h> #include <errno.h> @@ -58,7 +58,7 @@ sigwait(const sigset_t * set, int *sig) */ sigdelset(&act.sa_mask, SIGKILL); sigdelset(&act.sa_mask, SIGSTOP); - sigdelset(&act.sa_mask, SIGVTALRM); + sigdelset(&act.sa_mask, _SCHED_SIGNAL); sigdelset(&act.sa_mask, SIGCHLD); sigdelset(&act.sa_mask, SIGINFO); diff --git a/lib/libpthread/uthread/uthread_spinlock.c b/lib/libpthread/uthread/uthread_spinlock.c index a7284cd0139..3ea96013bc6 100644 --- a/lib/libpthread/uthread/uthread_spinlock.c +++ b/lib/libpthread/uthread/uthread_spinlock.c @@ -29,8 +29,8 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $FreeBSD: uthread_spinlock.c,v 1.4 1998/06/09 23:13:10 jb Exp $ - * $OpenBSD: uthread_spinlock.c,v 1.4 1999/01/10 23:13:24 d Exp $ + * $FreeBSD: uthread_spinlock.c,v 1.5 1999/03/23 05:07:56 jb Exp $ + * $OpenBSD: uthread_spinlock.c,v 1.5 1999/05/26 00:18:26 d Exp $ * */ @@ -57,12 +57,9 @@ _spinlock(spinlock_t *lck) * it before we do. */ while(_atomic_lock(&lck->access_lock)) { - /* Give up the time slice: */ - sched_yield(); - - /* Check if already locked by the running thread: */ - if (lck->lock_owner == _thread_run) - return; + /* Block the thread until the lock. */ + _thread_run->data.spinlock = lck; + _thread_kern_sched_state(PS_SPINBLOCK, __FILE__, __LINE__); } /* The running thread now owns the lock: */ @@ -82,24 +79,25 @@ _spinlock(spinlock_t *lck) void _spinlock_debug(spinlock_t *lck, const char *fname, int lineno) { + int cnt = 0; + /* * Try to grab the lock and loop if another thread grabs * it before we do. */ while(_atomic_lock(&lck->access_lock)) { - /* Give up the time slice: */ - sched_yield(); - - /* Check if already locked by the running thread: */ - if (lck->lock_owner == _thread_run) { + cnt++; + if (cnt > 100) { char str[256]; - snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) which it had already locked in %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno); + snprintf(str, sizeof(str), "%s - Warning: Thread %p attempted to lock %p from %s (%d) was left locked from %s (%d)\n", __progname, _thread_run, lck, fname, lineno, lck->fname, lck->lineno); _thread_sys_write(2,str,strlen(str)); - - /* Create a thread dump to help debug this problem: */ - _thread_dump_info(); - return; + sleep(1); + cnt = 0; } + + /* Block the thread until the lock. */ + _thread_run->data.spinlock = lck; + _thread_kern_sched_state(PS_SPINBLOCK, fname, lineno); } /* The running thread now owns the lock: */ diff --git a/lib/libpthread/uthread/uthread_suspend_np.c b/lib/libpthread/uthread/uthread_suspend_np.c index 632dbf5169b..daeb60a661a 100644 --- a/lib/libpthread/uthread/uthread_suspend_np.c +++ b/lib/libpthread/uthread/uthread_suspend_np.c @@ -29,7 +29,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $OpenBSD: uthread_suspend_np.c,v 1.2 1999/01/06 05:29:29 d Exp $ + * $OpenBSD: uthread_suspend_np.c,v 1.3 1999/05/26 00:18:26 d Exp $ */ #include <errno.h> #ifdef _THREAD_SAFE @@ -52,8 +52,21 @@ pthread_suspend_np(pthread_t thread) thread->interrupted = 1; } + /* + * Guard against preemption by a scheduling signal. + * A change of thread state modifies the waiting + * and priority queues. + */ + _thread_kern_sched_defer(); + /* Suspend the thread. */ PTHREAD_NEW_STATE(thread,PS_SUSPENDED); + + /* + * Reenable preemption and yield if a scheduling signal + * occurred while in the critical region. + */ + _thread_kern_sched_undefer(); } return(ret); } diff --git a/lib/libpthread/uthread/uthread_switch_np.c b/lib/libpthread/uthread/uthread_switch_np.c new file mode 100644 index 00000000000..598edacd2bb --- /dev/null +++ b/lib/libpthread/uthread/uthread_switch_np.c @@ -0,0 +1,70 @@ +/* $OpenBSD: uthread_switch_np.c,v 1.1 1999/05/26 00:18:26 d Exp $ */ +/* + * Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Daniel Eischen. + * 4. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ +#include <errno.h> +#ifdef _THREAD_SAFE +#include <pthread.h> +#include <pthread_np.h> +#include "pthread_private.h" + + +int +pthread_switch_add_np(pthread_switch_routine_t routine) +{ + int ret = 0; + + if (routine == NULL) + /* Return an invalid argument error: */ + ret = EINVAL; + else + /* Shouldn't need a lock to protect this assigment. */ + _sched_switch_hook = routine; + + return(ret); +} + +int +pthread_switch_delete_np(pthread_switch_routine_t routine) +{ + int ret = 0; + + if (routine != _sched_switch_hook) + /* Return an invalid argument error: */ + ret = EINVAL; + else + /* Shouldn't need a lock to protect this assigment. */ + _sched_switch_hook = NULL; + + return(ret); +} +#endif |