summaryrefslogtreecommitdiff
path: root/lib/libpthread/uthread
diff options
context:
space:
mode:
authorFederico G. Schwindt <fgsch@cvs.openbsd.org>2001-12-31 18:23:16 +0000
committerFederico G. Schwindt <fgsch@cvs.openbsd.org>2001-12-31 18:23:16 +0000
commit0d227c4e86fd246a7da6e9edb97e6bef77779729 (patch)
treea8bb77358150faa0d0372965e313bd2ff914d54c /lib/libpthread/uthread
parentc46895d8d7573ae0e7da97af7986b5630e355e97 (diff)
More changes from FreeBSD, including:
o Only poll file descriptors when needed. o Change the way timing is achieved, counting scheduling ticks instead of calculating the elapsed time via gettimeofday(). o Prevent an overflow when polling. o Use curthread instead of _thread_run. o Remove extra spaces; indent.
Diffstat (limited to 'lib/libpthread/uthread')
-rw-r--r--lib/libpthread/uthread/pthread_private.h29
-rw-r--r--lib/libpthread/uthread/uthread_create.c63
-rw-r--r--lib/libpthread/uthread/uthread_init.c30
-rw-r--r--lib/libpthread/uthread/uthread_kern.c454
-rw-r--r--lib/libpthread/uthread/uthread_nanosleep.c18
-rw-r--r--lib/libpthread/uthread/uthread_sig.c12
6 files changed, 296 insertions, 310 deletions
diff --git a/lib/libpthread/uthread/pthread_private.h b/lib/libpthread/uthread/pthread_private.h
index af234ac6c7d..8c76cef2ce9 100644
--- a/lib/libpthread/uthread/pthread_private.h
+++ b/lib/libpthread/uthread/pthread_private.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pthread_private.h,v 1.32 2001/12/19 02:02:52 fgsch Exp $ */
+/* $OpenBSD: pthread_private.h,v 1.33 2001/12/31 18:23:15 fgsch Exp $ */
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
@@ -255,14 +255,14 @@ struct pthread_mutex {
};
/*
- * Flags for mutexes.
+ * Flags for mutexes.
*/
#define MUTEX_FLAGS_PRIVATE 0x01
#define MUTEX_FLAGS_INITED 0x02
#define MUTEX_FLAGS_BUSY 0x04
/*
- * Static mutex initialization values.
+ * Static mutex initialization values.
*/
#define PTHREAD_MUTEX_STATIC_INITIALIZER \
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, TAILQ_INITIALIZER, \
@@ -314,7 +314,7 @@ struct pthread_cond_attr {
#define COND_FLAGS_BUSY 0x04
/*
- * Static cond initialization values.
+ * Static cond initialization values.
*/
#define PTHREAD_COND_STATIC_INITIALIZER \
{ COND_TYPE_FAST, TAILQ_INITIALIZER, NULL, NULL, \
@@ -412,14 +412,15 @@ enum pthread_susp {
#define PTHREAD_BASE_PRIORITY(prio) ((prio) & PTHREAD_MAX_PRIORITY)
/*
- * Clock resolution in nanoseconds.
+ * Clock resolution in microseconds.
*/
-#define CLOCK_RES_NSEC 10000000
+#define CLOCK_RES_USEC 10000
+#define CLOCK_RES_USEC_MIN 1000
/*
* Time slice period in microseconds.
*/
-#define TIMESLICE_USEC 100000
+#define TIMESLICE_USEC 20000
/*
* Define a thread-safe macro to get the current time of day
@@ -627,11 +628,11 @@ struct pthread {
/* Thread state: */
enum pthread_state state;
- /* Time that this thread was last made active. */
- struct timeval last_active;
+ /* Scheduling clock when this thread was last made active. */
+ long last_active;
- /* Time that this thread was last made inactive. */
- struct timeval last_inactive;
+ /* Scheduling clock when this thread was last made inactive. */
+ long last_inactive;
/*
* Number of microseconds accumulated by this thread when
@@ -650,7 +651,7 @@ struct pthread {
/*
* Error variable used instead of errno. The function __error()
- * returns a pointer to this.
+ * returns a pointer to this.
*/
int error;
@@ -959,9 +960,9 @@ SCLASS int _thread_dtablesize /* Descriptor table size. */
;
#endif
-SCLASS int _clock_res_nsec /* Clock resolution in nsec. */
+SCLASS int _clock_res_usec /* Clock resolution in usec. */
#ifdef GLOBAL_PTHREAD_PRIVATE
-= CLOCK_RES_NSEC;
+= CLOCK_RES_USEC;
#else
;
#endif
diff --git a/lib/libpthread/uthread/uthread_create.c b/lib/libpthread/uthread/uthread_create.c
index de96d4440de..c61c1b9d515 100644
--- a/lib/libpthread/uthread/uthread_create.c
+++ b/lib/libpthread/uthread/uthread_create.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_create.c,v 1.17 2001/12/11 00:19:47 fgsch Exp $ */
+/* $OpenBSD: uthread_create.c,v 1.18 2001/12/31 18:23:15 fgsch Exp $ */
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
@@ -50,6 +50,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
struct pthread *curthread = _get_curthread();
+ struct itimerval itimer;
int f_gc = 0;
int ret = 0;
pthread_t gc_thread;
@@ -75,11 +76,17 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
} else {
pattr = *attr;
}
- /* Create a stack from the specified attributes: */
- if ((stack = _thread_stack_alloc(pattr->stackaddr_attr,
- pattr->stacksize_attr)) == NULL) {
- ret = EAGAIN;
- free(new_thread);
+ /* Check if a stack was specified in the thread attributes: */
+ if ((stack = pattr->stackaddr_attr) != NULL) {
+ }
+ /* Allocate a stack: */
+ else {
+ stack = _thread_stack_alloc(pattr->stackaddr_attr,
+ pattr->stacksize_attr);
+ if (stack == NULL) {
+ ret = EAGAIN;
+ free(new_thread);
+ }
}
/* Check for errors: */
@@ -93,6 +100,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->stack = stack;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
+
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
PTHREAD_CANCEL_DEFERRED;
@@ -104,6 +112,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialise the thread for signals: */
new_thread->sigmask = curthread->sigmask;
+ new_thread->sigmask_seqno = 0;
/*
* Set up new stack frame so that it 'returns' to
@@ -118,24 +127,26 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/*
* Check if this thread is to inherit the scheduling
- * attributes from its parent:
+ * attributes from its parent:
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
- new_thread->base_priority
- = curthread->base_priority;
- new_thread->attr.prio
- = curthread->base_priority;
- new_thread->attr.sched_policy
- = curthread->attr.sched_policy;
+ new_thread->base_priority =
+ curthread->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.prio =
+ curthread->base_priority &
+ ~PTHREAD_SIGNAL_PRIORITY;
+ new_thread->attr.sched_policy =
+ curthread->attr.sched_policy;
} else {
/*
* Use just the thread priority, leaving the
* other scheduling attributes as their
- * default values:
+ * default values:
*/
- new_thread->base_priority
- = new_thread->attr.prio;
+ new_thread->base_priority =
+ new_thread->attr.prio;
}
new_thread->active_priority = new_thread->base_priority;
new_thread->inherited_priority = 0;
@@ -169,11 +180,10 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Add the thread to the linked list of all threads: */
TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
- if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
- PTHREAD_SET_STATE(new_thread, PS_SUSPENDED);
- PTHREAD_WAITQ_INSERT(new_thread);
- } else {
- PTHREAD_SET_STATE(new_thread, PS_RUNNING);
+ if (pattr->suspend == PTHREAD_CREATE_SUSPENDED)
+ new_thread->state = PS_SUSPENDED;
+ else {
+ new_thread->state = PS_RUNNING;
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
}
@@ -187,6 +197,16 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (thread != NULL)
(*thread) = new_thread;
+ if (f_gc != 0) {
+ /* Install the scheduling timer: */
+ itimer.it_interval.tv_sec = 0;
+ itimer.it_interval.tv_usec = _clock_res_usec;
+ itimer.it_value = itimer.it_interval;
+ if (setitimer(_ITIMER_SCHED_TIMER, &itimer,
+ NULL) != 0)
+ PANIC("Cannot set interval timer");
+ }
+
/* Schedule the new user thread: */
_thread_kern_sched(NULL);
@@ -197,6 +217,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (f_gc && pthread_create(&gc_thread,NULL,
_thread_gc,NULL) != 0)
PANIC("Can't create gc thread");
+
}
}
diff --git a/lib/libpthread/uthread/uthread_init.c b/lib/libpthread/uthread/uthread_init.c
index 0c952b4130e..a5a8e76cc8b 100644
--- a/lib/libpthread/uthread/uthread_init.c
+++ b/lib/libpthread/uthread/uthread_init.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_init.c,v 1.18 2001/12/11 00:19:47 fgsch Exp $ */
+/* $OpenBSD: uthread_init.c,v 1.19 2001/12/31 18:23:15 fgsch Exp $ */
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
@@ -114,7 +114,7 @@ _thread_init(void)
/*
* Create a pipe that is written to by the signal handler to prevent
- * signals being missed in calls to _select:
+ * signals being missed in calls to _select:
*/
if (_thread_sys_pipe(_thread_kern_pipe) != 0) {
/* Cannot create pipe, so abort: */
@@ -149,7 +149,7 @@ _thread_init(void)
else if ((_thread_initial = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
/*
* Insufficient memory to initialise this application, so
- * abort:
+ * abort:
*/
PANIC("Cannot allocate memory for initial thread");
} else {
@@ -193,6 +193,13 @@ _thread_init(void)
TAILQ_INIT(&(_thread_initial->mutexq));
_thread_initial->priority_mutex_count = 0;
+ /* Initialize the global scheduling time: */
+ _sched_ticks = 0;
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Initialize last active: */
+ _thread_initial->last_active = (long) _sched_ticks;
+
/* Give it a useful name */
pthread_set_name_np(_thread_initial, "main");
@@ -231,7 +238,7 @@ _thread_init(void)
&_thread_sigact[i - 1]) != 0) {
/*
* Abort this process if signal
- * initialisation fails:
+ * initialisation fails:
*/
PANIC("Cannot read signal handler info");
}
@@ -249,9 +256,9 @@ _thread_init(void)
_thread_sys_sigaction(SIGINFO, &act, NULL) != 0 ||
_thread_sys_sigaction(SIGCHLD, &act, NULL) != 0) {
/*
- * Abort this process if signal initialisation fails:
+ * Abort this process if signal initialization fails:
*/
- PANIC("Cannot initialise signal handler");
+ PANIC("Cannot initialize signal handler");
}
/* Get the kernel clockrate: */
@@ -259,13 +266,14 @@ _thread_init(void)
mib[1] = KERN_CLOCKRATE;
len = sizeof (struct clockinfo);
if (sysctl(mib, 2, &clockinfo, &len, NULL, 0) == 0)
- _clock_res_nsec = clockinfo.tick * 1000;
+ _clock_res_usec = clockinfo.tick > CLOCK_RES_USEC_MIN ?
+ clockinfo.tick : CLOCK_RES_USEC_MIN;
/* Get the table size: */
if ((_thread_dtablesize = getdtablesize()) < 0) {
/*
* Cannot get the system defined table size, so abort
- * this process.
+ * this process.
*/
PANIC("Cannot get dtablesize");
}
@@ -276,7 +284,7 @@ _thread_init(void)
/*
* Cannot allocate memory for the file descriptor
- * table, so abort this process.
+ * table, so abort this process.
*/
PANIC("Cannot allocate memory for file descriptor table");
}
@@ -284,13 +292,13 @@ _thread_init(void)
if ((_thread_pfd_table = (struct pollfd *) malloc(sizeof(struct pollfd) * _thread_dtablesize)) == NULL) {
/*
* Cannot allocate memory for the file descriptor
- * table, so abort this process.
+ * table, so abort this process.
*/
PANIC("Cannot allocate memory for pollfd table");
} else {
/*
* Enter a loop to initialise the file descriptor
- * table:
+ * table:
*/
for (i = 0; i < _thread_dtablesize; i++) {
/* Initialise the file descriptor table: */
diff --git a/lib/libpthread/uthread/uthread_kern.c b/lib/libpthread/uthread/uthread_kern.c
index c055b61cb39..d783396bccb 100644
--- a/lib/libpthread/uthread/uthread_kern.c
+++ b/lib/libpthread/uthread/uthread_kern.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_kern.c,v 1.18 2001/12/18 03:47:52 marc Exp $ */
+/* $OpenBSD: uthread_kern.c,v 1.19 2001/12/31 18:23:15 fgsch Exp $ */
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
@@ -34,10 +34,10 @@
*
*/
#include <errno.h>
+#include <poll.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
-#include <poll.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -51,8 +51,8 @@
#include "pthread_private.h"
/* Static function prototype definitions: */
-static void
-_thread_kern_poll(int wait_reqd);
+static void
+thread_kern_poll(int wait_reqd);
static void
dequeue_signals(void);
@@ -60,16 +60,19 @@ dequeue_signals(void);
static inline void
thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in);
+/* Static variables: */
+static int last_tick = 0;
+
void
_thread_kern_sched(struct sigcontext * scp)
{
+ struct timespec ts;
+ struct timeval tv;
struct pthread *curthread = _get_curthread();
- pthread_t pthread, pthread_h = NULL;
+ pthread_t pthread, pthread_h;
+ unsigned int current_tick;
+ int add_to_prioq;
pthread_t old_thread_run;
- struct itimerval itimer;
- struct timespec ts, ts1;
- struct timeval tv, tv1;
- int set_timer = 0;
/*
* Flag the pthread kernel as executing scheduler code
@@ -82,30 +85,31 @@ _thread_kern_sched(struct sigcontext * scp)
if (scp != NULL) {
/*
* Copy the signal context to the current thread's jump
- * buffer:
+ * buffer:
*/
- memcpy(&_thread_run->saved_sigcontext, scp, sizeof(_thread_run->saved_sigcontext));
+ memcpy(&curthread->saved_sigcontext, scp,
+ sizeof(curthread->saved_sigcontext));
/*
* Save floating point state.
*/
- _thread_machdep_save_float_state(&_thread_run->_machdep);
+ _thread_machdep_save_float_state(&curthread->_machdep);
/* Flag the signal context as the last state saved: */
- _thread_run->sig_saved = 1;
+ curthread->sig_saved = 1;
} else
/* Flag the jump buffer was the last state saved: */
- _thread_run->sig_saved = 0;
+ curthread->sig_saved = 0;
/* If the currently running thread is a user thread, save it: */
- if ((_thread_run->flags & PTHREAD_FLAGS_PRIVATE) == 0)
- _last_user_thread = _thread_run;
+ if ((curthread->flags & PTHREAD_FLAGS_PRIVATE) == 0)
+ _last_user_thread = curthread;
/* Save errno. */
- _thread_run->error = errno;
+ curthread->error = errno;
/* Save the current thread to switch from */
- old_thread_run = _thread_run;
+ old_thread_run = curthread;
/*
* Enter a scheduling loop that finds the next thread that is
@@ -116,37 +120,48 @@ _thread_kern_sched(struct sigcontext * scp)
*/
while (!(TAILQ_EMPTY(&_thread_list))) {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
+ current_tick = _sched_ticks;
/*
* Protect the scheduling queues from access by the signal
* handler.
*/
_queue_signals = 1;
+ add_to_prioq = 0;
- if (_thread_run != &_thread_kern_thread) {
-
+ if (curthread != &_thread_kern_thread) {
/*
* This thread no longer needs to yield the CPU.
*/
- _thread_run->yield_on_sig_undefer = 0;
-
- /*
- * Save the current time as the time that the thread
- * became inactive:
- */
- _thread_run->last_inactive.tv_sec = tv.tv_sec;
- _thread_run->last_inactive.tv_usec = tv.tv_usec;
+ curthread->yield_on_sig_undefer = 0;
+ if (curthread->state != PS_RUNNING) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ curthread->last_inactive = (long)current_tick;
+ if (curthread->last_inactive <
+ curthread->last_active) {
+ /* Account for a rollover: */
+ curthread->last_inactive =+
+ UINT_MAX + 1;
+ }
+ }
+
/*
* Place the currently running thread into the
* appropriate queue(s).
*/
- switch (_thread_run->state) {
+ switch (curthread->state) {
case PS_DEAD:
+ case PS_STATE_MAX: /* to silence -Wall */
+ case PS_SUSPENDED:
/*
- * Dead threads are not placed in any queue:
+ * Dead and suspended threads are not placed
+ * in any queue:
*/
break;
@@ -157,27 +172,12 @@ _thread_kern_sched(struct sigcontext * scp)
* are polled (to preserve round-robin
* scheduling).
*/
- if ((_thread_run->slice_usec != -1) &&
- (_thread_run->attr.sched_policy != SCHED_FIFO)) {
- /*
- * Accumulate the number of microseconds that
- * this thread has run for:
- */
- _thread_run->slice_usec +=
- (_thread_run->last_inactive.tv_sec -
- _thread_run->last_active.tv_sec) * 1000000 +
- _thread_run->last_inactive.tv_usec -
- _thread_run->last_active.tv_usec;
-
- /* Check for time quantum exceeded: */
- if (_thread_run->slice_usec > TIMESLICE_USEC)
- _thread_run->slice_usec = -1;
- }
+ add_to_prioq = 1;
break;
/*
* States which do not depend on file descriptor I/O
- * operations or timeouts:
+ * operations or timeouts:
*/
case PS_DEADLOCK:
case PS_FDLR_WAIT:
@@ -188,65 +188,95 @@ _thread_kern_sched(struct sigcontext * scp)
case PS_SIGSUSPEND:
case PS_SIGTHREAD:
case PS_SIGWAIT:
- case PS_SUSPENDED:
case PS_WAIT_WAIT:
/* No timeouts for these states: */
- _thread_run->wakeup_time.tv_sec = -1;
- _thread_run->wakeup_time.tv_nsec = -1;
+ curthread->wakeup_time.tv_sec = -1;
+ curthread->wakeup_time.tv_nsec = -1;
/* Restart the time slice: */
- _thread_run->slice_usec = -1;
+ curthread->slice_usec = -1;
/* Insert into the waiting queue: */
- PTHREAD_WAITQ_INSERT(_thread_run);
+ PTHREAD_WAITQ_INSERT(curthread);
break;
/* States which can timeout: */
case PS_COND_WAIT:
case PS_SLEEP_WAIT:
/* Restart the time slice: */
- _thread_run->slice_usec = -1;
+ curthread->slice_usec = -1;
/* Insert into the waiting queue: */
- PTHREAD_WAITQ_INSERT(_thread_run);
+ PTHREAD_WAITQ_INSERT(curthread);
break;
/* States that require periodic work: */
case PS_SPINBLOCK:
/* No timeouts for this state: */
- _thread_run->wakeup_time.tv_sec = -1;
- _thread_run->wakeup_time.tv_nsec = -1;
+ curthread->wakeup_time.tv_sec = -1;
+ curthread->wakeup_time.tv_nsec = -1;
/* Increment spinblock count: */
_spinblock_count++;
- /* fall through */
+ /* FALLTHROUGH */
case PS_FDR_WAIT:
case PS_FDW_WAIT:
case PS_POLL_WAIT:
case PS_SELECT_WAIT:
/* Restart the time slice: */
- _thread_run->slice_usec = -1;
+ curthread->slice_usec = -1;
/* Insert into the waiting queue: */
- PTHREAD_WAITQ_INSERT(_thread_run);
+ PTHREAD_WAITQ_INSERT(curthread);
/* Insert into the work queue: */
- PTHREAD_WORKQ_INSERT(_thread_run);
+ PTHREAD_WORKQ_INSERT(curthread);
+ break;
}
- }
- /* Unprotect the scheduling queues: */
- _queue_signals = 0;
+ /*
+ * Are there pending signals for this thread?
+ *
+ * This check has to be performed after the thread
+ * has been placed in the queue(s) appropriate for
+ * its state. The process of adding pending signals
+ * can change a threads state, which in turn will
+ * attempt to add or remove the thread from any
+ * scheduling queue to which it belongs.
+ */
+ if (curthread->check_pending != 0) {
+ curthread->check_pending = 0;
+ _thread_sig_check_pending(curthread);
+ }
+ }
/*
- * Poll file descriptors to update the state of threads
- * waiting on file I/O where data may be available:
+ * Avoid polling file descriptors if there are none
+ * waiting:
*/
- _thread_kern_poll(0);
+ if (TAILQ_EMPTY(&_workq) != 0) {
+ }
+ /*
+ * Poll file descriptors only if a new scheduling signal
+ * has occurred or if we have no more runnable threads.
+ */
+ else if (((current_tick = _sched_ticks) != last_tick) ||
+ ((curthread->state != PS_RUNNING) &&
+ (PTHREAD_PRIOQ_FIRST() == NULL))) {
+ /* Unprotect the scheduling queues: */
+ _queue_signals = 0;
- /* Protect the scheduling queues: */
- _queue_signals = 1;
+ /*
+ * Poll file descriptors to update the state of threads
+ * waiting on file I/O where data may be available:
+ */
+ thread_kern_poll(0);
+
+ /* Protect the scheduling queues: */
+ _queue_signals = 1;
+ }
+ last_tick = current_tick;
/*
* Wake up threads that have timedout. This has to be
@@ -288,27 +318,52 @@ _thread_kern_sched(struct sigcontext * scp)
PTHREAD_WAITQ_CLEARACTIVE();
/*
- * Check if there is a current runnable thread that isn't
- * already in the ready queue:
+ * Check to see if the current thread needs to be added
+ * to the priority queue:
*/
- if ((_thread_run != &_thread_kern_thread) &&
- (_thread_run->state == PS_RUNNING) &&
- ((_thread_run->flags & PTHREAD_FLAGS_IN_PRIOQ) == 0)) {
- if (_thread_run->slice_usec == -1) {
+ if (add_to_prioq != 0) {
+ /*
+ * Save the current time as the time that the
+ * thread became inactive:
+ */
+ current_tick = _sched_ticks;
+ curthread->last_inactive = (long)current_tick;
+ if (curthread->last_inactive <
+ curthread->last_active) {
+ /* Account for a rollover: */
+ curthread->last_inactive =+ UINT_MAX + 1;
+ }
+
+ if ((curthread->slice_usec != -1) &&
+ (curthread->attr.sched_policy != SCHED_FIFO)) {
+ /*
+ * Accumulate the number of microseconds for
+ * which the current thread has run:
+ */
+ curthread->slice_usec +=
+ (curthread->last_inactive -
+ curthread->last_active) *
+ (long)_clock_res_usec;
+ /* Check for time quantum exceeded: */
+ if (curthread->slice_usec > TIMESLICE_USEC)
+ curthread->slice_usec = -1;
+ }
+
+ if (curthread->slice_usec == -1) {
/*
* The thread exceeded its time
* quantum or it yielded the CPU;
* place it at the tail of the
* queue for its priority.
*/
- PTHREAD_PRIOQ_INSERT_TAIL(_thread_run);
+ PTHREAD_PRIOQ_INSERT_TAIL(curthread);
} else {
/*
* The thread hasn't exceeded its
* interval. Place it at the head
* of the queue for its priority.
*/
- PTHREAD_PRIOQ_INSERT_HEAD(_thread_run);
+ PTHREAD_PRIOQ_INSERT_HEAD(curthread);
}
}
@@ -322,7 +377,7 @@ _thread_kern_sched(struct sigcontext * scp)
/*
* Lock the pthread kernel by changing the pointer to
* the running thread to point to the global kernel
- * thread structure:
+ * thread structure:
*/
_set_curthread(&_thread_kern_thread);
curthread = &_thread_kern_thread;
@@ -332,24 +387,30 @@ _thread_kern_sched(struct sigcontext * scp)
/*
* There are no threads ready to run, so wait until
- * something happens that changes this condition:
+ * something happens that changes this condition:
*/
- _thread_kern_poll(1);
+ thread_kern_poll(1);
+
+ /*
+ * This process' usage will likely be very small
+ * while waiting in a poll. Since the scheduling
+ * clock is based on the profiling timer, it is
+ * unlikely that the profiling timer will fire
+ * and update the time of day. To account for this,
+ * get the time of day after polling with a timeout.
+ */
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+
+ /* Check once more for a runnable thread: */
+ _queue_signals = 1;
+ pthread_h = PTHREAD_PRIOQ_FIRST();
+ _queue_signals = 0;
}
- else {
+
+ if (pthread_h != NULL) {
/* Remove the thread from the ready queue: */
PTHREAD_PRIOQ_REMOVE(pthread_h);
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
-
- /* Check to see if there is more than one thread: */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
-
/* Unprotect the scheduling queues: */
_queue_signals = 0;
@@ -372,32 +433,19 @@ _thread_kern_sched(struct sigcontext * scp)
*/
if (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
(pthread->active_priority > pthread_h->active_priority)) {
+ /* Remove the thread from the ready queue: */
+ PTHREAD_PRIOQ_REMOVE(pthread);
+
/*
* Insert the lower priority thread
* at the head of its priority list:
*/
PTHREAD_PRIOQ_INSERT_HEAD(pthread_h);
- /* Remove the thread from the ready queue: */
- PTHREAD_PRIOQ_REMOVE(pthread);
-
/* There's a new thread in town: */
pthread_h = pthread;
}
- /* Get first thread on the waiting list: */
- pthread = TAILQ_FIRST(&_waitingq);
-
- /*
- * Check to see if there is more than one
- * thread:
- */
- if (pthread_h != TAILQ_FIRST(&_thread_list) ||
- TAILQ_NEXT(pthread_h, tle) != NULL)
- set_timer = 1;
- else
- set_timer = 0;
-
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
@@ -408,125 +456,40 @@ _thread_kern_sched(struct sigcontext * scp)
/*
* Save the current time as the time that the thread
- * became active:
- */
- curthread->last_active.tv_sec = tv.tv_sec;
- curthread->last_active.tv_usec = tv.tv_usec;
-
- /*
- * Define the maximum time before a scheduling signal
- * is required:
- */
- itimer.it_value.tv_sec = 0;
- itimer.it_value.tv_usec = TIMESLICE_USEC;
-
- /*
- * The interval timer is not reloaded when it
- * times out. The interval time needs to be
- * calculated every time.
+ * became active:
*/
- itimer.it_interval.tv_sec = 0;
- itimer.it_interval.tv_usec = 0;
-
- /* Get first thread on the waiting list: */
- if ((pthread != NULL) &&
- (pthread->wakeup_time.tv_sec != -1)) {
- /*
- * Calculate the time until this thread
- * is ready, allowing for the clock
- * resolution:
- */
- ts1.tv_sec = pthread->wakeup_time.tv_sec
- - ts.tv_sec;
- ts1.tv_nsec = pthread->wakeup_time.tv_nsec
- - ts.tv_nsec + _clock_res_nsec;
-
- /*
- * Check for underflow of the nanosecond field:
- */
- while (ts1.tv_nsec < 0) {
- /*
- * Allow for the underflow of the
- * nanosecond field:
- */
- ts1.tv_sec--;
- ts1.tv_nsec += 1000000000;
- }
- /*
- * Check for overflow of the nanosecond field:
- */
- while (ts1.tv_nsec >= 1000000000) {
- /*
- * Allow for the overflow of the
- * nanosecond field:
- */
- ts1.tv_sec++;
- ts1.tv_nsec -= 1000000000;
- }
- /*
- * Convert the timespec structure to a
- * timeval structure:
- */
- TIMESPEC_TO_TIMEVAL(&tv1, &ts1);
-
- /*
- * Check if the thread will be ready
- * sooner than the earliest ones found
- * so far:
- */
- if (timercmp(&tv1, &itimer.it_value, <)) {
- /*
- * Update the time value:
- */
- itimer.it_value.tv_sec = tv1.tv_sec;
- itimer.it_value.tv_usec = tv1.tv_usec;
- }
- }
+ current_tick = _sched_ticks;
+ curthread->last_active = (long) current_tick;
/*
* Check if this thread is running for the first time
* or running again after using its full time slice
- * allocation:
+ * allocation:
*/
- if (_thread_run->slice_usec == -1) {
+ if (curthread->slice_usec == -1) {
/* Reset the accumulated time slice period: */
- _thread_run->slice_usec = 0;
- }
-
- /* Check if there is more than one thread: */
- if (set_timer != 0) {
- /*
- * Start the interval timer for the
- * calculated time interval:
- */
- if (setitimer(_ITIMER_SCHED_TIMER, &itimer, NULL) != 0) {
- /*
- * Cannot initialise the timer, so
- * abort this process:
- */
- PANIC("Cannot set scheduling timer");
- }
+ curthread->slice_usec = 0;
}
/* Restore errno. */
- errno = _thread_run->error;
+ errno = curthread->error;
/*
* Restore the new thread, saving current.
*/
- _thread_machdep_switch(&_thread_run->_machdep,
+ _thread_machdep_switch(&curthread->_machdep,
&old_thread_run->_machdep);
/* Check if a signal context was saved: */
- if (_thread_run->sig_saved == 1) {
+ if (curthread->sig_saved == 1) {
/*
* Restore floating point state.
*/
- _thread_machdep_restore_float_state(&_thread_run->_machdep);
+ _thread_machdep_restore_float_state(&curthread->_machdep);
/*
* Do a sigreturn to restart the thread that
- * was interrupted by a signal:
+ * was interrupted by a signal:
*/
_thread_kern_in_sched = 0;
@@ -535,9 +498,9 @@ _thread_kern_sched(struct sigcontext * scp)
* installed switch hooks.
*/
if ((_sched_switch_hook != NULL) &&
- (_last_user_thread != _thread_run)) {
+ (_last_user_thread != curthread)) {
thread_run_switch_hook(_last_user_thread,
- _thread_run);
+ curthread);
}
if (((curthread->cancelflags &
@@ -548,7 +511,7 @@ _thread_kern_sched(struct sigcontext * scp)
/* return to signal handler. This code
should be:
- _thread_sys_sigreturn(&_thread_run->saved_sigcontext);
+ _thread_sys_sigreturn(&curthread->saved_sigcontext);
but that doesn't currently work on the
sparc */
return;
@@ -561,7 +524,7 @@ _thread_kern_sched(struct sigcontext * scp)
if (_sched_switch_hook != NULL) {
/* Run the installed switch hook: */
thread_run_switch_hook(_last_user_thread,
- _thread_run);
+ curthread);
}
if (((curthread->cancelflags &
@@ -640,7 +603,7 @@ _thread_kern_sched_state_unlock(enum pthread_state state,
}
static void
-_thread_kern_poll(int wait_reqd)
+thread_kern_poll(int wait_reqd)
{
int count = 0;
int i, found;
@@ -657,7 +620,7 @@ _thread_kern_poll(int wait_reqd)
}
else {
/* Get the current time of day: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &ts);
_queue_signals = 1;
@@ -669,16 +632,19 @@ _thread_kern_poll(int wait_reqd)
* Either there are no threads in the waiting queue,
* or there are no threads that can timeout.
*/
- timeout_ms = -1;
+ timeout_ms = INFTIM;
}
+ else if (pthread->wakeup_time.tv_sec - ts.tv_sec > 60000)
+ /* Limit maximum timeout to prevent rollover. */
+ timeout_ms = 60000;
else {
/*
* Calculate the time left for the next thread to
- * timeout allowing for the clock resolution:
+ * timeout:
*/
timeout_ms = ((pthread->wakeup_time.tv_sec - ts.tv_sec) *
- 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec +
- _clock_res_nsec) / 1000000);
+ 1000) + ((pthread->wakeup_time.tv_nsec - ts.tv_nsec) /
+ 1000000);
/*
* Don't allow negative timeouts:
*/
@@ -792,14 +758,14 @@ _thread_kern_poll(int wait_reqd)
/*
* Wait for a file descriptor to be ready for read, write, or
- * an exception, or a timeout to occur:
+ * an exception, or a timeout to occur:
*/
count = _thread_sys_poll(_thread_pfd_table, nfds, timeout_ms);
if (kern_pipe_added != 0)
/*
* Remove the pthread kernel pipe file descriptor
- * from the pollfd table:
+ * from the pollfd table:
*/
nfds = 1;
else
@@ -813,7 +779,7 @@ _thread_kern_poll(int wait_reqd)
(_thread_pfd_table[0].revents & POLLRDNORM))) {
/*
* If the kernel read pipe was included in the
- * count:
+ * count:
*/
if (count > 0) {
/* Decrement the count of file descriptors: */
@@ -835,7 +801,7 @@ _thread_kern_poll(int wait_reqd)
/*
* Enter a loop to look for threads waiting on file
* descriptors that are flagged as available by the
- * _poll syscall:
+ * _poll syscall:
*/
PTHREAD_WAITQ_SETACTIVE();
TAILQ_FOREACH(pthread, &_workq, qe) {
@@ -963,9 +929,6 @@ _thread_kern_poll(int wait_reqd)
/* Unprotect the scheduling queues: */
_queue_signals = 0;
}
-
- /* Nothing to return. */
- return;
}
void
@@ -982,7 +945,7 @@ _thread_kern_set_timeout(const struct timespec * timeout)
if (timeout == NULL) {
/*
* Set the wakeup time to something that can be recognised as
- * different to an actual time of day:
+ * different to an actual time of day:
*/
curthread->wakeup_time.tv_sec = -1;
curthread->wakeup_time.tv_nsec = -1;
@@ -994,7 +957,7 @@ _thread_kern_set_timeout(const struct timespec * timeout)
curthread->wakeup_time.tv_nsec = 0;
} else {
/* Get the current time: */
- gettimeofday(&tv, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &current_time);
/* Calculate the time for the current thread to wake up: */
@@ -1008,7 +971,6 @@ _thread_kern_set_timeout(const struct timespec * timeout)
curthread->wakeup_time.tv_nsec -= 1000000000;
}
}
- return;
}
void
@@ -1023,9 +985,7 @@ _thread_kern_sig_defer(void)
void
_thread_kern_sig_undefer(void)
{
- struct pthread *curthread = _get_curthread();
- pthread_t pthread;
- int need_resched = 0;
+ struct pthread *curthread = _get_curthread();
/*
* Perform checks to yield only if we are about to undefer
@@ -1042,30 +1002,8 @@ _thread_kern_sig_undefer(void)
/*
* Check if there are queued signals:
*/
- while (_sigq_check_reqd != 0) {
- /* Defer scheduling while we process queued signals: */
- curthread->sig_defer_count = 1;
-
- /* Clear the flag before checking the signal queue: */
- _sigq_check_reqd = 0;
-
- /* Dequeue and handle signals: */
- dequeue_signals();
-
- /*
- * Avoiding an unnecessary check to reschedule, check
- * to see if signal handling caused a higher priority
- * thread to become ready.
- */
- if ((need_resched == 0) &&
- (((pthread = PTHREAD_PRIOQ_FIRST()) != NULL) &&
- (pthread->active_priority > curthread->active_priority))) {
- need_resched = 1;
- }
-
- /* Reenable signals: */
- curthread->sig_defer_count = 0;
- }
+ if (_sigq_check_reqd != 0)
+ _thread_kern_sched(NULL);
/*
* Check for asynchronous cancellation before delivering any
@@ -1075,8 +1013,14 @@ _thread_kern_sig_undefer(void)
((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
pthread_testcancel();
- /* Yield the CPU if necessary: */
- if (need_resched || curthread->yield_on_sig_undefer != 0) {
+ /*
+ * If there are pending signals or this thread has
+ * to yield the CPU, call the kernel scheduler:
+ *
+ * XXX - Come back and revisit the pending signal problem
+ */
+ if ((curthread->yield_on_sig_undefer != 0) ||
+ curthread->sigpend != 0) {
curthread->yield_on_sig_undefer = 0;
_thread_kern_sched(NULL);
}
@@ -1091,7 +1035,7 @@ dequeue_signals(void)
/*
* Enter a loop to read and handle queued signals from the
- * pthread kernel pipe:
+ * pthread kernel pipe:
*/
while (((num = _thread_sys_read(_thread_kern_pipe[0], bufr,
sizeof(bufr))) > 0) || (num == -1 && errno == EINTR)) {
@@ -1128,10 +1072,10 @@ thread_run_switch_hook(pthread_t thread_out, pthread_t thread_in)
pthread_t tid_in = thread_in;
if ((tid_out != NULL) &&
- ((tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0))
+ (tid_out->flags & PTHREAD_FLAGS_PRIVATE) != 0)
tid_out = NULL;
if ((tid_in != NULL) &&
- ((tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0))
+ (tid_in->flags & PTHREAD_FLAGS_PRIVATE) != 0)
tid_in = NULL;
if ((_sched_switch_hook != NULL) && (tid_out != tid_in)) {
diff --git a/lib/libpthread/uthread/uthread_nanosleep.c b/lib/libpthread/uthread/uthread_nanosleep.c
index 4518a13e91c..26a26c1c678 100644
--- a/lib/libpthread/uthread/uthread_nanosleep.c
+++ b/lib/libpthread/uthread/uthread_nanosleep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_nanosleep.c,v 1.6 2001/08/21 19:24:53 fgsch Exp $ */
+/* $OpenBSD: uthread_nanosleep.c,v 1.7 2001/12/31 18:23:15 fgsch Exp $ */
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
@@ -59,8 +59,12 @@ nanosleep(const struct timespec * time_to_sleep,
errno = EINVAL;
ret = -1;
} else {
- /* Get the current time: */
- gettimeofday(&tv, NULL);
+ /*
+ * As long as we're going to get the time of day, we
+ * might as well store it in the global time of day:
+ */
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &current_time);
/* Calculate the time for the current thread to wake up: */
@@ -78,8 +82,12 @@ nanosleep(const struct timespec * time_to_sleep,
/* Reschedule the current thread to sleep: */
_thread_kern_sched_state(PS_SLEEP_WAIT, __FILE__, __LINE__);
- /* Get the current time: */
- gettimeofday(&tv, NULL);
+ /*
+ * As long as we're going to get the time of day, we
+ * might as well store it in the global time of day:
+ */
+ gettimeofday((struct timeval *) &_sched_tod, NULL);
+ GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &current_time1);
/* Calculate the remaining time to sleep: */
diff --git a/lib/libpthread/uthread/uthread_sig.c b/lib/libpthread/uthread/uthread_sig.c
index 2755ba4747a..fd49684c955 100644
--- a/lib/libpthread/uthread/uthread_sig.c
+++ b/lib/libpthread/uthread/uthread_sig.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uthread_sig.c,v 1.11 2001/12/18 03:47:52 marc Exp $ */
+/* $OpenBSD: uthread_sig.c,v 1.12 2001/12/31 18:23:15 fgsch Exp $ */
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
@@ -71,6 +71,10 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
/* Check if an interval timer signal: */
if (sig == _SCHED_SIGNAL) {
+ /* Update the scheduling clock: */
+ gettimeofday((struct timeval *)&_sched_tod, NULL);
+ _sched_ticks++;
+
if (_thread_kern_in_sched != 0) {
/*
* The scheduler is already running; ignore this
@@ -89,7 +93,7 @@ _thread_sig_handler(int sig, int code, struct sigcontext * scp)
/*
* Schedule the next thread. This function is not
* expected to return because it will do a longjmp
- * instead.
+ * instead.
*/
_thread_kern_sched(scp);
@@ -243,7 +247,7 @@ _thread_sig_handle(int sig, struct sigcontext * scp)
if (_thread_sigact[sig - 1].sa_handler != SIG_IGN)
/*
* Enter a loop to process each thread in the linked
- * list:
+ * list:
*/
TAILQ_FOREACH(pthread, &_thread_list, tle) {
pthread_t pthread_saved = curthread;
@@ -330,7 +334,7 @@ _thread_signal(pthread_t pthread, int sig)
/*
* States that are interrupted by the occurrence of a signal
- * other than the scheduling alarm:
+ * other than the scheduling alarm:
*/
case PS_FDR_WAIT:
case PS_FDW_WAIT: