summaryrefslogtreecommitdiff
path: root/lib/libpthread
diff options
context:
space:
mode:
authorPeter Galbavy <peter@cvs.openbsd.org>1998-07-21 19:48:10 +0000
committerPeter Galbavy <peter@cvs.openbsd.org>1998-07-21 19:48:10 +0000
commit57c8ec29e7edfecff534599d5b59c7ab27a2ca03 (patch)
tree019102de27932c5251f0c0d0d7e8008213395080 /lib/libpthread
parent85951770ba93251f4179cb19db5aea7acd61a541 (diff)
this will now compile on i386 if you move arch/i386/machdep.h to
arch/i386/pthread/machdep.h - not an ideal solution. Correct fix is welcome. I am quiting work on this for today, so other hackers are welcome to take it up for the rest of the day/night. More from me tomorrow.
Diffstat (limited to 'lib/libpthread')
-rw-r--r--lib/libpthread/Makefile18
-rw-r--r--lib/libpthread/gen/ctime.c1
-rw-r--r--lib/libpthread/gen/ttyname.c2
-rw-r--r--lib/libpthread/pthreads/Makefile.inc17
-rw-r--r--lib/libpthread/pthreads/fd.c953
-rw-r--r--lib/libpthread/pthreads/fd_kern.c1947
-rw-r--r--lib/libpthread/pthreads/signal.c603
-rw-r--r--lib/libpthread/stdio/xprintf.c14
-rw-r--r--lib/libpthread/stdlib/getopt.c1
-rw-r--r--lib/libpthread/tests/Makefile10
10 files changed, 2776 insertions, 790 deletions
diff --git a/lib/libpthread/Makefile b/lib/libpthread/Makefile
index 866c4456a5c..6bd1d145d68 100644
--- a/lib/libpthread/Makefile
+++ b/lib/libpthread/Makefile
@@ -5,16 +5,16 @@
.include <bsd.own.mk>
LIB=pthread
-NOPIC= no
-CPPFLAGS+= -I. -I${.CURDIR}/include -I${.CURDIR}/arch/${MACHINE_ARCH}
+NOPIC=no
+CPPFLAGS+=-DPTHREAD_KERNEL -I. -I${.CURDIR}/include -I${.CURDIR}/arch/${MACHINE_ARCH}
+.include "${.CURDIR}/include/Makefile.inc"
.include "${.CURDIR}/arch/${MACHINE}/Makefile.inc"
-#.include "${.CURDIR}/pthreads/Makefile.inc"
-#.include "${.CURDIR}/stdlib/Makefile.inc"
-#.include "${.CURDIR}/stdio/Makefile.inc"
-#.include "${.CURDIR}/string/Makefile.inc"
-#.include "${.CURDIR}/gen/Makefile.inc"
-#.include "${.CURDIR}/net/Makefile.inc"
-#.include "${.CURDIR}/scripts/Makefile.inc"
+.include "${.CURDIR}/pthreads/Makefile.inc"
+.include "${.CURDIR}/stdlib/Makefile.inc"
+.include "${.CURDIR}/stdio/Makefile.inc"
+.include "${.CURDIR}/string/Makefile.inc"
+.include "${.CURDIR}/gen/Makefile.inc"
+.include "${.CURDIR}/net/Makefile.inc"
.include <bsd.lib.mk>
diff --git a/lib/libpthread/gen/ctime.c b/lib/libpthread/gen/ctime.c
index e7980296e50..c496e9dfb23 100644
--- a/lib/libpthread/gen/ctime.c
+++ b/lib/libpthread/gen/ctime.c
@@ -46,7 +46,6 @@ static char sccsid[] = "@(#)ctime.c 5.26 (Berkeley) 2/23/91";
*/
/*LINTLIBRARY*/
-#include "config.h"
#include <pthread.h>
#include <sys/param.h>
#include <fcntl.h>
diff --git a/lib/libpthread/gen/ttyname.c b/lib/libpthread/gen/ttyname.c
index 0998cad1f78..0fe6abe4386 100644
--- a/lib/libpthread/gen/ttyname.c
+++ b/lib/libpthread/gen/ttyname.c
@@ -43,7 +43,7 @@ static char sccsid[] = "@(#)ttyname.c 5.10 (Berkeley) 5/6/91";
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
-#include "config.h"
+#include <sys/__path.h>
static pthread_mutex_t ttyname_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_key_t ttyname_key;
diff --git a/lib/libpthread/pthreads/Makefile.inc b/lib/libpthread/pthreads/Makefile.inc
index 98ac0b520cc..4e3f7d41df6 100644
--- a/lib/libpthread/pthreads/Makefile.inc
+++ b/lib/libpthread/pthreads/Makefile.inc
@@ -11,28 +11,33 @@ SRCS+= cleanup.c cond.c fd.c fd_kern.c fd_pipe.c file.c globals.c malloc.c \
OBJS+= syscalls.o
+AVAILABLE_SYSCALLS = open write read creat close fcntl lseek dup pipe fchmod fchown execve fstat lstat link unlink chdir chown chmod stat rename ioctl fstatfs sigsuspend sigaction sigprocmask getdents waitsys poll putmsg getmsg pgrpsys exit readv writev fork
+
+SYSCALL_TEMPLATE = ${.CURDIR}/arch/${MACHINE}/syscall-template.S
+SYSCALL_S = ${.CURDIR}/arch/${MACHINE}/syscall.S
+
.if !defined(NOPIC)
SOBJS+= syscalls.so
-SYSCALL_PIC_COMPILE= $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -DPIC -c ${.CURDIR}/syscall-template.S -o ${.OBJDIR}/syscalls/S$$syscall.so
+SYSCALL_PIC_COMPILE= $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -DPIC -c ${SYSCALL_TEMPLATE} -o ${.OBJDIR}/syscalls/S$$syscall.so
.else
SYSCALL_PIC_COMPILE= true
.endif
.if !defined(NOPROFILE)
POBJS+= syscalls.po
-SYSCALL_PROF_COMPILE= $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -pg -c ${.CURDIR}/syscall-template.S -o ${.OBJDIR}/syscalls/S$$syscall.po
+SYSCALL_PROF_COMPILE= $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -pg -c ${SYSCALL_TEMPLATE} -o ${.OBJDIR}/syscalls/S$$syscall.po
.else
SYSCALL_PROF_COMPILE= true
.endif
-syscalls.o syscalls.so syscalls.po : syscall-template.S
+syscalls.o syscalls.so syscalls.po : ${SYSCALL_TEMPLATE}
-rm -rf ${.OBJDIR}/syscalls
mkdir ${.OBJDIR}/syscalls
for syscall in $(AVAILABLE_SYSCALLS) ; do \
case " $(SYSCALL_EXCEPTIONS) " in \
*" "$$syscall" "*) ;; \
*) echo $$syscall ; \
- $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -c ${.CURDIR}/syscall-template.S -o ${.OBJDIR}/syscalls/S$$syscall.o ; \
+ $(CC) $(CFLAGS) -DSYSCALL_NAME=$$syscall -c ${SYSCALL_TEMPLATE} -o ${.OBJDIR}/syscalls/S$$syscall.o ; \
$(SYSCALL_PIC_COMPILE) ; \
$(SYSCALL_PROF_COMPILE) ;; \
esac ; \
@@ -48,13 +53,13 @@ syscalls.o syscalls.so syscalls.po : syscall-template.S
rm -r ${.OBJDIR}/syscalls
syscall.o: syscall.S
- cpp ${CPPFLAGS} ${.CURDIR}/syscall.S > syscall.i
+ cpp ${CPPFLAGS} ${SYSCALL_S} > syscall.i
as syscall.i
rm syscall.i
mv a.out syscall.o
syscall.po: syscall.S
- cpp ${CPPFLAGS} ${.CURDIR}/syscall.S > syscall.i
+ cpp ${CPPFLAGS} ${SYSCALL_S} > syscall.i
as syscall.i
rm syscall.i
mv a.out syscall.po
diff --git a/lib/libpthread/pthreads/fd.c b/lib/libpthread/pthreads/fd.c
index 2302f1d2068..e603c0da0f9 100644
--- a/lib/libpthread/pthreads/fd.c
+++ b/lib/libpthread/pthreads/fd.c
@@ -39,13 +39,24 @@
*/
#ifndef lint
-static const char rcsid[] = "$Id: fd.c,v 1.1 1995/10/18 08:43:04 deraadt Exp $ $provenid: fd.c,v 1.16 1994/02/07 02:18:39 proven Exp $";
+static const char rcsid[] = "$Id: fd.c,v 1.2 1998/07/21 19:48:00 peter Exp $";
#endif
#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
#include <sys/types.h>
+#include <sys/stat.h>
#include <sys/uio.h>
+#include <sys/ioctl.h>
+#ifdef HAVE_SYS_FILIO_H
+#include <sys/filio.h> /* For ioctl */
+#endif
+#if __STDC__
#include <stdarg.h>
+#else
+#include <varargs.h>
+#endif
#include <fcntl.h>
#include <errno.h>
#include <pthread/posix.h>
@@ -55,8 +66,77 @@ static const char rcsid[] = "$Id: fd.c,v 1.1 1995/10/18 08:43:04 deraadt Exp $ $
*
* I really should dynamically figure out what the table size is.
*/
-int dtablesize = 64;
-static struct fd_table_entry fd_entry[64];
+static pthread_mutex_t fd_table_mutex = PTHREAD_MUTEX_INITIALIZER;
+static const int dtablecount = 4096/sizeof(struct fd_table_entry);
+int dtablesize;
+
+static int fd_get_pthread_fd_from_kernel_fd( int );
+
+/* ==========================================================================
+ * Allocate dtablecount entries at once and populate the fd_table.
+ *
+ * fd_init_entry()
+ */
+int fd_init_entry(int entry)
+{
+ struct fd_table_entry *fd_entry;
+ int i, round;
+
+ if (fd_table[entry] == NULL) {
+ round = entry - entry % dtablecount;
+
+ if ((fd_entry = (struct fd_table_entry *)malloc(
+ sizeof(struct fd_table_entry) * dtablecount)) == NULL) {
+ return(NOTOK);
+ }
+
+ for (i = 0; i < dtablecount && round+i < dtablesize; i++) {
+ fd_table[round + i] = &fd_entry[i];
+
+ fd_table[round + i]->ops = NULL;
+ fd_table[round + i]->type = FD_NT;
+ fd_table[round + i]->fd.i = NOTOK;
+ fd_table[round + i]->flags = 0;
+ fd_table[round + i]->count = 0;
+
+ pthread_mutex_init(&(fd_table[round + i]->mutex), NULL);
+ pthread_queue_init(&(fd_table[round + i]->r_queue));
+ pthread_queue_init(&(fd_table[round + i]->w_queue));
+ fd_table[round + i]->r_owner = NULL;
+ fd_table[round + i]->w_owner = NULL;
+ fd_table[round + i]->r_lockcount= 0;
+ fd_table[round + i]->w_lockcount= 0;
+
+ fd_table[round + i]->next = NULL;
+ }
+ }
+ return(OK);
+}
+
+/* ==========================================================================
+ * fd_check_entry()
+ */
+int fd_check_entry(unsigned int entry)
+{
+ int ret = OK;
+
+ pthread_mutex_lock(&fd_table_mutex);
+
+ if (entry < dtablesize) {
+ if (fd_table[entry] == NULL) {
+ if (fd_init_entry(entry)) {
+ SET_ERRNO(EBADF);
+ ret = -EBADF;
+ }
+ }
+ } else {
+ SET_ERRNO(EBADF);
+ ret = -EBADF;
+ }
+
+ pthread_mutex_unlock(&fd_table_mutex);
+ return(ret);
+}
/* ==========================================================================
* fd_init()
@@ -65,30 +145,32 @@ void fd_init(void)
{
int i;
- for (i = 0; i < dtablesize; i++) {
- fd_table[i] = &fd_entry[i];
-
- fd_table[i]->ops = NULL;
- fd_table[i]->type = FD_NT;
- fd_table[i]->fd.i = NOTOK;
- fd_table[i]->flags = 0;
- fd_table[i]->count = 0;
-
- pthread_queue_init(&(fd_table[i]->r_queue));
- pthread_queue_init(&(fd_table[i]->w_queue));
-
- fd_table[i]->r_owner = NULL;
- fd_table[i]->w_owner = NULL;
- fd_table[i]->lock = SEMAPHORE_CLEAR;
- fd_table[i]->next = NULL;
- fd_table[i]->lockcount = 0;
+ if ((dtablesize = machdep_sys_getdtablesize()) < 0) {
+ /* Can't figure out the table size. */
+ PANIC();
}
- /* Currently only initialize first 3 fds. */
- fd_kern_init(0);
- fd_kern_init(1);
- fd_kern_init(2);
- printf ("Warning: threaded process may have changed open file descriptors\n");
+ /* select() can only handle FD_SETSIZE descriptors, so our inner loop will
+ * break if dtablesize is higher than that. This should be removed if and
+ * when the inner loop is rewritten to use poll(). */
+ if (dtablesize > FD_SETSIZE) {
+ dtablesize = FD_SETSIZE;
+ }
+
+ if (fd_table = (struct fd_table_entry **)malloc(
+ sizeof(struct fd_table_entry) * dtablesize)) {
+ memset(fd_table, 0, sizeof(struct fd_table_entry) * dtablesize);
+ if (fd_check_entry(0) == OK) {
+ return;
+ }
+ }
+
+ /*
+ * There isn't enough memory to allocate a fd table at init time.
+ * This is a problem.
+ */
+ PANIC();
+
}
/* ==========================================================================
@@ -96,116 +178,143 @@ void fd_init(void)
*/
int fd_allocate()
{
- semaphore *lock;
+ pthread_mutex_t * mutex;
int i;
for (i = 0; i < dtablesize; i++) {
- lock = &(fd_table[i]->lock);
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- continue;
- }
- if (fd_table[i]->count || fd_table[i]->r_owner
- || fd_table[i]->w_owner) {
- SEMAPHORE_RESET(lock);
- continue;
- }
- if (fd_table[i]->type == FD_NT) {
- /* Test to see if the kernel version is in use */
- /* If so continue; */
+ if (fd_check_entry(i) == OK) {
+ mutex = &(fd_table[i]->mutex);
+ if (pthread_mutex_trylock(mutex)) {
+ continue;
+ }
+ if (fd_table[i]->count || fd_table[i]->r_owner
+ || fd_table[i]->w_owner) {
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ if (fd_table[i]->type == FD_NT) {
+ /* Test to see if the kernel version is in use */
+ if ((machdep_sys_fcntl(i, F_GETFL, NULL)) >= OK) {
+ /* If so continue; */
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ }
+ fd_table[i]->count++;
+ pthread_mutex_unlock(mutex);
+ return(i);
}
- fd_table[i]->count++;
- SEMAPHORE_RESET(lock);
- return(i);
}
- pthread_run->error = ENFILE;
+ SET_ERRNO(ENFILE);
return(NOTOK);
}
-/* ==========================================================================
- * fd_free()
- *
- * Assumes fd is locked and owner by pthread_run
- * Don't clear the queues, fd_unlock will do that.
- */
-int fd_free(int fd)
+/*----------------------------------------------------------------------
+ * Function: fd_get_pthread_fd_from_kernel_fd
+ * Purpose: get the fd_table index of a kernel fd
+ * Args: fd = kernel fd to convert
+ * Returns: fd_table index, -1 if not found
+ * Notes:
+ *----------------------------------------------------------------------*/
+static int
+fd_get_pthread_fd_from_kernel_fd( int kfd )
{
- struct fd_table_entry *fd_valid;
- int ret;
+ int j;
- if (ret = --fd_table[fd]->count) {
- /* Separate pthread queue into two distinct queues. */
- fd_valid = fd_table[fd];
- fd_table[fd] = fd_table[fd]->next;
- fd_valid->next = fd_table[fd]->next;
+ /* This is *SICK*, but unless there is a faster way to
+ * turn a kernel fd into an fd_table index, this has to do.
+ */
+ for( j=0; j < dtablesize; j++ ) {
+ if( fd_table[j] &&
+ fd_table[j]->type != FD_NT &&
+ fd_table[j]->type != FD_NIU &&
+ fd_table[j]->fd.i == kfd ) {
+ return j;
+ }
}
- fd_table[fd]->type = FD_NIU;
- fd_table[fd]->fd.i = NOTOK;
- fd_table[fd]->next = NULL;
- fd_table[fd]->flags = 0;
- fd_table[fd]->count = 0;
- return(ret);
+ /* Not listed byfd, Check for kernel fd == pthread fd */
+ if( fd_table[kfd] == NULL || fd_table[kfd]->type == FD_NT ) {
+ /* Assume that the kernel fd is the same */
+ return kfd;
+ }
+
+ return NOTOK; /* Not found */
}
/* ==========================================================================
- * fd_basic_unlock()
- *
+ * fd_basic_basic_unlock()
+ *
* The real work of unlock without the locking of fd_table[fd].lock.
*/
-void fd_basic_unlock(int fd, int lock_type)
+void fd_basic_basic_unlock(struct fd_table_entry * entry, int lock_type)
{
struct pthread *pthread;
- semaphore *plock;
- if (fd_table[fd]->r_owner == pthread_run) {
- if (pthread = pthread_queue_get(&fd_table[fd]->r_queue)) {
-
- plock = &(pthread->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
- pthread_queue_deq(&fd_table[fd]->r_queue);
- fd_table[fd]->r_owner = pthread;
- pthread->state = PS_RUNNING;
- SEMAPHORE_RESET(plock);
- } else {
- fd_table[fd]->r_owner = NULL;
- }
+ if (entry->r_owner == pthread_run) {
+ if ((entry->type == FD_HALF_DUPLEX) ||
+ (entry->type == FD_TEST_HALF_DUPLEX) ||
+ (lock_type == FD_READ) || (lock_type == FD_RDWR)) {
+ if (entry->r_lockcount == 0) {
+ if (pthread = pthread_queue_deq(&entry->r_queue)) {
+ pthread_sched_prevent();
+ entry->r_owner = pthread;
+ if ((SET_PF_DONE_EVENT(pthread)) == OK) {
+ pthread_sched_other_resume(pthread);
+ } else {
+ pthread_sched_resume();
+ }
+ } else {
+ entry->r_owner = NULL;
+ }
+ } else {
+ entry->r_lockcount--;
+ }
+ }
}
- if (fd_table[fd]->w_owner == pthread_run) {
- if (pthread = pthread_queue_get(&fd_table[fd]->w_queue)) {
- plock = &(pthread->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
- pthread_queue_deq(&fd_table[fd]->r_queue);
- fd_table[fd]->w_owner = pthread;
- pthread->state = PS_RUNNING;
- SEMAPHORE_RESET(plock);
- } else {
- fd_table[fd]->w_owner = NULL;
- }
+ if (entry->w_owner == pthread_run) {
+ if ((entry->type != FD_HALF_DUPLEX) &&
+ (entry->type != FD_TEST_HALF_DUPLEX) &&
+ ((lock_type == FD_WRITE) || (lock_type == FD_RDWR))) {
+ if (entry->w_lockcount == 0) {
+ if (pthread = pthread_queue_deq(&entry->w_queue)) {
+ pthread_sched_prevent();
+ entry->w_owner = pthread;
+ if ((SET_PF_DONE_EVENT(pthread)) == OK) {
+ pthread_sched_other_resume(pthread);
+ } else {
+ pthread_sched_resume();
+ }
+ } else {
+ entry->w_owner = NULL;
+ }
+ } else {
+ entry->w_lockcount--;
+ }
+ }
}
}
/* ==========================================================================
+ * fd_basic_unlock()
+ */
+void fd_basic_unlock(int fd, int lock_type)
+{
+ fd_basic_basic_unlock(fd_table[fd], lock_type);
+}
+
+/* ==========================================================================
* fd_unlock()
- * If there is a lock count then the function fileunlock will do
- * the unlocking, just return.
*/
void fd_unlock(int fd, int lock_type)
{
- semaphore *lock;
+ pthread_mutex_t *mutex;
- if (!(fd_table[fd]->lockcount)) {
- lock = &(fd_table[fd]->lock);
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
- fd_basic_unlock(fd, lock_type);
- SEMAPHORE_RESET(lock);
- }
+ mutex = &(fd_table[fd]->mutex);
+ pthread_mutex_lock(mutex);
+ fd_basic_basic_unlock(fd_table[fd], lock_type);
+ pthread_mutex_unlock(mutex);
}
/* ==========================================================================
@@ -214,64 +323,126 @@ void fd_unlock(int fd, int lock_type)
* The real work of lock without the locking of fd_table[fd].lock.
* Be sure to leave the lock the same way you found it. i.e. locked.
*/
-int fd_basic_lock(unsigned int fd, int lock_type, semaphore * lock)
+int fd_basic_lock(unsigned int fd, int lock_type, pthread_mutex_t * mutex,
+ struct timespec * timeout)
{
semaphore *plock;
- /* If not in use return EBADF error */
- if (fd_table[fd]->type == FD_NIU) {
+ switch (fd_table[fd]->type) {
+ case FD_NIU:
+ /* If not in use return EBADF error */
+ SET_ERRNO(EBADF);
return(NOTOK);
- }
-
- /* If not tested, test it and see if it is valid */
- if (fd_table[fd]->type == FD_NT) {
- /* If not ok return EBADF error */
- if (fd_kern_init(fd) != OK) {
+ break;
+ case FD_NT:
+ /*
+ * If not tested, test it and see if it is valid
+ * If not ok return EBADF error
+ */
+ fd_kern_init(fd);
+ if (fd_table[fd]->type == FD_NIU) {
+ SET_ERRNO(EBADF);
return(NOTOK);
}
+ break;
+ case FD_TEST_HALF_DUPLEX:
+ case FD_TEST_FULL_DUPLEX:
+ /* If a parent process reset the fd to its proper state */
+ if (!fork_lock) {
+ /* It had better be a kernel fd */
+ fd_kern_reset(fd);
+ }
+ break;
+ default:
+ break;
}
+
if ((fd_table[fd]->type == FD_HALF_DUPLEX) ||
- (lock_type & FD_READ)) {
+ (fd_table[fd]->type == FD_TEST_HALF_DUPLEX) ||
+ (lock_type == FD_READ) || (lock_type == FD_RDWR)) {
if (fd_table[fd]->r_owner) {
if (fd_table[fd]->r_owner != pthread_run) {
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
+ pthread_sched_prevent();
pthread_queue_enq(&fd_table[fd]->r_queue, pthread_run);
- SEMAPHORE_RESET(lock);
-
- /* Reschedule will unlock pthread_run */
- reschedule(PS_FDLR_WAIT);
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_mutex_unlock(mutex);
- while(SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
+
+ /* If we're the owner then we have to cancel the sleep */
+ if (fd_table[fd]->r_owner != pthread_run) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ SET_ERRNO(ETIMEDOUT);
+ return(NOTOK);
+ }
+ sleep_cancel(pthread_run);
+ } else {
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
}
+ CLEAR_PF_DONE_EVENT(pthread_run);
} else {
- if (!fd_table[fd]->lockcount) {
- PANIC();
- }
+ fd_table[fd]->r_lockcount++;
}
}
fd_table[fd]->r_owner = pthread_run;
}
if ((fd_table[fd]->type != FD_HALF_DUPLEX) &&
- (lock_type & FD_WRITE)) {
+ (fd_table[fd]->type != FD_TEST_HALF_DUPLEX) &&
+ ((lock_type == FD_WRITE) || (lock_type == FD_RDWR))) {
if (fd_table[fd]->w_owner) {
if (fd_table[fd]->w_owner != pthread_run) {
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
+ pthread_sched_prevent();
pthread_queue_enq(&fd_table[fd]->w_queue, pthread_run);
- SEMAPHORE_RESET(lock);
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_mutex_unlock(mutex);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
- /* Reschedule will unlock pthread_run */
- reschedule(PS_FDLW_WAIT);
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
- while(SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
+ /* If we're the owner then we have to cancel the sleep */
+ if (fd_table[fd]->w_owner != pthread_run) {
+ if (lock_type == FD_RDWR) {
+ /* Unlock current thread */
+ fd_basic_unlock(fd, FD_READ);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ SET_ERRNO(ETIMEDOUT);
+ return(NOTOK);
+ }
+ sleep_cancel(pthread_run);
+ } else {
+ /* Reschedule will unlock pthread_run */
+ pthread_run->data.fd.fd = fd;
+ pthread_run->data.fd.branch = __LINE__;
+ pthread_resched_resume(PS_FDLR_WAIT);
+ pthread_mutex_lock(mutex);
}
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ } else {
+ fd_table[fd]->w_lockcount++;
}
}
fd_table[fd]->w_owner = pthread_run;
@@ -283,24 +454,144 @@ int fd_basic_lock(unsigned int fd, int lock_type, semaphore * lock)
return(OK);
}
+/*----------------------------------------------------------------------
+ * Function: fd_unlock_for_cancel
+ * Purpose: Unlock all fd locks held prior to being cancelled
+ * Args: void
+ * Returns:
+ * OK or NOTOK
+ * Notes:
+ * Assumes the kernel is locked on entry
+ *----------------------------------------------------------------------*/
+int
+fd_unlock_for_cancel( void )
+{
+ int i, fd;
+ struct pthread_select_data *data;
+ int rdlk, wrlk, lktype;
+ int found;
+
+ /* What we do depends on the previous state of the thread */
+ switch( pthread_run->old_state ) {
+ case PS_RUNNING:
+ case PS_JOIN:
+ case PS_SLEEP_WAIT:
+ case PS_WAIT_WAIT:
+ case PS_SIGWAIT:
+ case PS_FDLR_WAIT:
+ case PS_FDLW_WAIT:
+ case PS_DEAD:
+ case PS_UNALLOCED:
+ break; /* Nothing to do */
+
+ case PS_COND_WAIT:
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP );
+ /* Must reaquire the mutex according to the standard */
+ if( pthread_run->data.mutex == NULL ) {
+ PANIC();
+ }
+ pthread_mutex_lock( pthread_run->data.mutex );
+ break;
+
+ case PS_FDR_WAIT:
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
+ /* Free the lock on the fd being used */
+ fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd );
+ if( fd == NOTOK ) {
+ PANIC(); /* Can't find fd */
+ }
+ fd_unlock( fd, FD_READ );
+ break;
+
+ case PS_FDW_WAIT: /* Waiting on i/o */
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
+ /* Free the lock on the fd being used */
+ fd = fd_get_pthread_fd_from_kernel_fd( pthread_run->data.fd.fd );
+ if( fd == NOTOK ) {
+ PANIC(); /* Can't find fd */
+ }
+ fd_unlock( fd, FD_WRITE );
+ break;
+
+ case PS_SELECT_WAIT:
+ data = pthread_run->data.select_data;
+
+ CLEAR_PF_GROUP( pthread_run, PF_EVENT_GROUP);
+
+ for( i = 0; i < data->nfds; i++) {
+ rdlk =(FD_ISSET(i,&data->readfds)
+ || FD_ISSET(i,&data->exceptfds));
+ wrlk = FD_ISSET(i, &data->writefds);
+ lktype = rdlk ? (wrlk ? FD_RDWR : FD_READ) : FD_WRITE;
+
+ if( ! (rdlk || wrlk) )
+ continue; /* No locks, no unlock */
+
+ if( (fd = fd_get_pthread_fd_from_kernel_fd( i )) == NOTOK ) {
+ PANIC(); /* Can't find fd */
+ }
+
+ fd_unlock( fd, lktype );
+ }
+ break;
+
+ case PS_MUTEX_WAIT:
+ PANIC(); /* Should never cancel a mutex wait */
+
+ default:
+ PANIC(); /* Unknown thread status */
+ }
+}
+
/* ==========================================================================
* fd_lock()
*/
-int fd_lock(unsigned int fd, int lock_type)
+#define pthread_mutex_lock_timedwait(a, b) pthread_mutex_lock(a)
+
+int fd_lock(unsigned int fd, int lock_type, struct timespec * timeout)
{
- semaphore *lock;
+ struct timespec current_time;
+ pthread_mutex_t *mutex;
int error;
- if (fd < dtablesize) {
- lock = &(fd_table[fd]->lock);
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
+ if ((error = fd_check_entry(fd)) == OK) {
+ mutex = &(fd_table[fd]->mutex);
+ if (pthread_mutex_lock_timedwait(mutex, timeout)) {
+ SET_ERRNO(ETIMEDOUT);
+ return(-ETIMEDOUT);
}
- error = fd_basic_lock(fd, lock_type, lock);
- SEMAPHORE_RESET(lock);
- return(error);
+ error = fd_basic_lock(fd, lock_type, mutex, timeout);
+ pthread_mutex_unlock(mutex);
}
- return(NOTOK);
+ return(error);
+}
+
+/* ==========================================================================
+ * fd_free()
+ *
+ * Assumes fd is locked and owner by pthread_run
+ * Don't clear the queues, fd_unlock will do that.
+ */
+struct fd_table_entry * fd_free(int fd)
+{
+ struct fd_table_entry *fd_valid;
+
+ fd_valid = NULL;
+ fd_table[fd]->r_lockcount = 0;
+ fd_table[fd]->w_lockcount = 0;
+ if (--fd_table[fd]->count) {
+ fd_valid = fd_table[fd];
+ fd_table[fd] = fd_table[fd]->next;
+ fd_valid->next = fd_table[fd]->next;
+ /* Don't touch queues of fd_valid */
+ }
+
+ fd_table[fd]->type = FD_NIU;
+ fd_table[fd]->fd.i = NOTOK;
+ fd_table[fd]->next = NULL;
+ fd_table[fd]->flags = 0;
+ fd_table[fd]->count = 0;
+ return(fd_valid);
}
@@ -308,73 +599,111 @@ int fd_lock(unsigned int fd, int lock_type)
* ======================================================================= */
/* ==========================================================================
- * read()
+ * read_timedwait()
*/
-ssize_t read(int fd, void *buf, size_t nbytes)
+ssize_t read_timedwait(int fd, void *buf, size_t nbytes,
+ struct timespec * timeout)
{
int ret;
- if ((ret = fd_lock(fd, FD_READ)) == OK) {
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
ret = fd_table[fd]->ops->read(fd_table[fd]->fd,
- fd_table[fd]->flags, buf, nbytes);
+ fd_table[fd]->flags, buf, nbytes, timeout);
fd_unlock(fd, FD_READ);
}
return(ret);
}
/* ==========================================================================
- * readv()
+ * read()
*/
-int readv(int fd, const struct iovec *iov, int iovcnt)
+ssize_t read(int fd, void *buf, size_t nbytes)
+{
+ return(read_timedwait(fd, buf, nbytes, NULL));
+}
+
+/* ==========================================================================
+ * readv_timedwait()
+ */
+int readv_timedwait(int fd, const struct iovec *iov, int iovcnt,
+ struct timespec * timeout)
{
int ret;
- if ((ret = fd_lock(fd, FD_READ)) == OK) {
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
ret = fd_table[fd]->ops->readv(fd_table[fd]->fd,
- fd_table[fd]->flags, iov, iovcnt);
+ fd_table[fd]->flags, iov, iovcnt, timeout);
fd_unlock(fd, FD_READ);
}
return(ret);
}
/* ==========================================================================
+ * readv()
+ */
+ssize_t readv(int fd, const struct iovec *iov, int iovcnt)
+{
+ return(readv_timedwait(fd, iov, iovcnt, NULL));
+}
+
+/* ==========================================================================
* write()
*/
-ssize_t write(int fd, const void *buf, size_t nbytes)
+ssize_t write_timedwait(int fd, const void *buf, size_t nbytes,
+ struct timespec * timeout)
{
- int ret;
+ int ret;
- if ((ret = fd_lock(fd, FD_WRITE)) == OK) {
- ret = fd_table[fd]->ops->write(fd_table[fd]->fd,
- fd_table[fd]->flags, buf, nbytes);
- fd_unlock(fd, FD_WRITE);
- }
- return(ret);
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK)
+ {
+ ret = fd_table[fd]->ops->write(fd_table[fd]->fd,
+ fd_table[fd]->flags, buf, nbytes,
+ timeout);
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
}
/* ==========================================================================
- * writev()
+ * write()
+ */
+ssize_t write(int fd, const void * buf, size_t nbytes)
+{
+ return(write_timedwait(fd, buf, nbytes, NULL));
+}
+
+/* ==========================================================================
+ * writev_timedwait()
*/
-int writev(int fd, const struct iovec *iov, int iovcnt)
+int writev_timedwait(int fd, const struct iovec *iov, int iovcnt,
+ struct timespec * timeout)
{
int ret;
- if ((ret = fd_lock(fd, FD_WRITE)) == OK) {
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
ret = fd_table[fd]->ops->writev(fd_table[fd]->fd,
- fd_table[fd]->flags, iov, iovcnt);
+ fd_table[fd]->flags, iov, iovcnt, timeout);
fd_unlock(fd, FD_WRITE);
}
return(ret);
}
/* ==========================================================================
+ * writev()
+ */
+ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
+{
+ return(writev_timedwait(fd, iov, iovcnt, NULL));
+}
+
+/* ==========================================================================
* lseek()
*/
off_t lseek(int fd, off_t offset, int whence)
{
int ret;
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
ret = fd_table[fd]->ops->seek(fd_table[fd]->fd,
fd_table[fd]->flags, offset, whence);
fd_unlock(fd, FD_RDWR);
@@ -393,66 +722,178 @@ off_t lseek(int fd, off_t offset, int whence)
* to the fd_table[fd] queue, and the count is set to zero, (BUT THE LOCK IS NOT
* RELEASED). close() then calls fd_unlock which give the fd to the next queued
* element which determins that the fd is closed and then calls fd_unlock etc...
+ *
+ * XXX close() is even uglier now. You may assume that the kernel fd is the
+ * same as fd if fd_table[fd] == NULL or if fd_table[fd]->type == FD_NT.
+ * This is true because before any fd_table[fd] is allocated the corresponding
+ * kernel fd must be checks to see if it's valid.
*/
int close(int fd)
{
- union fd_data realfd;
- int ret, flags;
+ struct fd_table_entry * entry;
+ pthread_mutex_t *mutex;
+ union fd_data realfd;
+ int ret, flags;
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
- flags = fd_table[fd]->flags;
- realfd = fd_table[fd]->fd;
- if (fd_free(fd) == OK) {
- ret = fd_table[fd]->ops->close(realfd, flags);
- }
- fd_unlock(fd, FD_RDWR);
+ if(fd < 0 || fd >= dtablesize)
+ {
+ SET_ERRNO(EBADF);
+ return -1;
+ }
+ /* Need to lock the newfd by hand */
+ pthread_mutex_lock(&fd_table_mutex);
+ if (fd_table[fd]) {
+ pthread_mutex_unlock(&fd_table_mutex);
+ mutex = &(fd_table[fd]->mutex);
+ pthread_mutex_lock(mutex);
+
+ /*
+ * XXX Gross hack ... because of fork(), any fd closed by the
+ * parent should not change the fd of the child, unless it owns it.
+ */
+ switch(fd_table[fd]->type) {
+ case FD_NIU:
+ pthread_mutex_unlock(mutex);
+ ret = -EBADF;
+ break;
+ case FD_NT:
+ /*
+ * If it's not tested then the only valid possibility is it's
+ * kernel fd.
+ */
+ ret = machdep_sys_close(fd);
+ fd_table[fd]->type = FD_NIU;
+ pthread_mutex_unlock(mutex);
+ break;
+ case FD_TEST_FULL_DUPLEX:
+ case FD_TEST_HALF_DUPLEX:
+ realfd = fd_table[fd]->fd;
+ flags = fd_table[fd]->flags;
+ if ((entry = fd_free(fd)) == NULL) {
+ ret = fd_table[fd]->ops->close(realfd, flags);
+ } else {
+ /* There can't be any others waiting for fd. */
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
+ mutex = &(fd_table[fd]->mutex);
+ }
+ pthread_mutex_unlock(mutex);
+ break;
+ default:
+ ret = fd_basic_lock(fd, FD_RDWR, mutex, NULL);
+ if (ret == OK) {
+ realfd = fd_table[fd]->fd;
+ flags = fd_table[fd]->flags;
+ pthread_mutex_unlock(mutex);
+ if ((entry = fd_free(fd)) == NULL) {
+ ret = fd_table[fd]->ops->close(realfd, flags);
+ } else {
+ fd_basic_basic_unlock(entry, FD_RDWR);
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
}
- return(ret);
+ fd_unlock(fd, FD_RDWR);
+ } else {
+ pthread_mutex_unlock(mutex);
+ }
+ break;
+ }
+ } else {
+ /* Don't bother creating a table entry */
+ pthread_mutex_unlock(&fd_table_mutex);
+ ret = machdep_sys_close(fd);
+ }
+ if( ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return(ret);
}
/* ==========================================================================
* fd_basic_dup()
*
- * Might need to do more than just what's below.
+ *
+ * This is a MAJOR guess!! I don't know if the mutext unlock is valid
+ * in the BIG picture. But it seems to be needed to avoid deadlocking
+ * with ourselves when we try to close the duped file descriptor.
*/
static inline void fd_basic_dup(int fd, int newfd)
{
fd_table[newfd]->next = fd_table[fd]->next;
fd_table[fd]->next = fd_table[newfd];
+ fd_table[newfd] = fd_table[fd];
fd_table[fd]->count++;
+ pthread_mutex_unlock(&fd_table[newfd]->next->mutex);
+
}
/* ==========================================================================
* dup2()
*
- * Always lock the lower number fd first to avoid deadlocks.
- * newfd must be locked by hand so it can be closed if it is open,
- * or it won't be opened while dup is in progress.
+ * Note: Always lock the lower number fd first to avoid deadlocks.
+ * Note: Leave the newfd locked. It will be unlocked at close() time.
+ * Note: newfd must be locked by hand so it can be closed if it is open,
+ * or it won't be opened while dup is in progress.
*/
int dup2(fd, newfd)
{
+ struct fd_table_entry * entry;
+ pthread_mutex_t *mutex;
union fd_data realfd;
- semaphore *lock;
int ret, flags;
+ if ((ret = fd_check_entry(newfd)) != OK)
+ return ret;
+
if (newfd < dtablesize) {
if (fd < newfd) {
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
/* Need to lock the newfd by hand */
- lock = &(fd_table[newfd]->lock);
- while(SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
+ mutex = &(fd_table[newfd]->mutex);
+ pthread_mutex_lock(mutex);
/* Is it inuse */
- if (fd_basic_lock(newfd, FD_RDWR, lock) == OK) {
+ if (fd_basic_lock(newfd, FD_RDWR, mutex, NULL) == OK) {
+ realfd = fd_table[newfd]->fd;
+ flags = fd_table[newfd]->flags;
/* free it and check close status */
- flags = fd_table[fd]->flags;
- realfd = fd_table[fd]->fd;
- if (fd_free(fd) == OK) {
- ret = fd_table[fd]->ops->close(realfd, flags);
+ if ((entry = fd_free(newfd)) == NULL) {
+ entry = fd_table[newfd];
+ entry->ops->close(realfd, flags);
+ if (entry->r_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->r_queue.q_last->next =
+ entry->r_queue.q_next;
+ } else {
+ fd_table[fd]->r_queue.q_next =
+ entry->r_queue.q_next;
+ }
+ fd_table[fd]->r_queue.q_last =
+ entry->r_queue.q_last;
+ }
+ if (entry->w_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->w_queue.q_last->next =
+ entry->w_queue.q_next;
+ } else {
+ fd_table[fd]->w_queue.q_next =
+ entry->w_queue.q_next;
+ }
+ fd_table[fd]->w_queue.q_last =
+ entry->w_queue.q_last;
+ }
+ entry->r_queue.q_next = NULL;
+ entry->w_queue.q_next = NULL;
+ entry->r_queue.q_last = NULL;
+ entry->w_queue.q_last = NULL;
+ entry->r_owner = NULL;
+ entry->w_owner = NULL;
+ ret = OK;
} else {
- /* Lots of work to do */
+ fd_basic_basic_unlock(entry, FD_RDWR);
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
}
}
fd_basic_dup(fd, newfd);
@@ -460,27 +901,56 @@ int dup2(fd, newfd)
fd_unlock(fd, FD_RDWR);
} else {
/* Need to lock the newfd by hand */
- lock = &(fd_table[newfd]->lock);
- while(SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
- }
- /* Is it inuse */
- if ((ret = fd_basic_lock(newfd, FD_RDWR, lock)) == OK) {
- /* free it and check close status */
- flags = fd_table[fd]->flags;
- realfd = fd_table[fd]->fd;
- if (fd_free(fd) == OK) {
- ret = fd_table[fd]->ops->close(realfd, flags);
- } else {
- /* Lots of work to do */
- }
+ mutex = &(fd_table[newfd]->mutex);
+ pthread_mutex_lock(mutex);
- fd_basic_dup(fd, newfd);
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ /* Is newfd inuse */
+ if ((ret = fd_basic_lock(newfd, FD_RDWR, mutex, NULL)) == OK) {
+ realfd = fd_table[newfd]->fd;
+ flags = fd_table[newfd]->flags;
+ /* free it and check close status */
+ if ((entry = fd_free(newfd)) == NULL) {
+ entry = fd_table[newfd];
+ entry->ops->close(realfd, flags);
+ if (entry->r_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->r_queue.q_last->next =
+ entry->r_queue.q_next;
+ } else {
+ fd_table[fd]->r_queue.q_next =
+ entry->r_queue.q_next;
+ }
+ fd_table[fd]->r_queue.q_last =
+ entry->r_queue.q_last;
+ }
+ if (entry->w_queue.q_next) {
+ if (fd_table[fd]->next) {
+ fd_table[fd]->w_queue.q_last->next =
+ entry->w_queue.q_next;
+ } else {
+ fd_table[fd]->w_queue.q_next =
+ entry->w_queue.q_next;
+ }
+ fd_table[fd]->w_queue.q_last =
+ entry->w_queue.q_last;
+ }
+ entry->r_queue.q_next = NULL;
+ entry->w_queue.q_next = NULL;
+ entry->r_queue.q_last = NULL;
+ entry->w_queue.q_last = NULL;
+ entry->r_owner = NULL;
+ entry->w_owner = NULL;
+ ret = OK;
+ } else {
+ fd_basic_basic_unlock(entry, FD_RDWR);
+ pthread_mutex_unlock(&entry->mutex);
+ /* Note: entry->mutex = mutex */
+ }
+ fd_basic_dup(fd, newfd);
+ }
fd_unlock(fd, FD_RDWR);
}
- SEMAPHORE_RESET(lock);
}
} else {
ret = NOTOK;
@@ -496,7 +966,7 @@ int dup(int fd)
{
int ret;
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
ret = fd_allocate();
fd_basic_dup(fd, ret);
fd_unlock(fd, FD_RDWR);
@@ -515,7 +985,7 @@ int fcntl(int fd, int cmd, ...)
va_list ap;
flags = 0;
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
va_start(ap, cmd);
switch(cmd) {
case F_DUPFD:
@@ -523,10 +993,8 @@ int fcntl(int fd, int cmd, ...)
fd_basic_dup(va_arg(ap, int), ret);
break;
case F_SETFD:
- flags = va_arg(ap, int);
+ break;
case F_GETFD:
- ret = fd_table[fd]->ops->fcntl(fd_table[fd]->fd,
- fd_table[fd]->flags, cmd, flags | __FD_NONBLOCK);
break;
case F_GETFL:
ret = fd_table[fd]->flags;
@@ -560,3 +1028,54 @@ int fcntl(int fd, int cmd, ...)
}
return(ret);
}
+
+/* ==========================================================================
+ * getdtablesize()
+ */
+int getdtablesize()
+{
+ return dtablesize;
+}
+
+/* ==========================================================================
+ * ioctl()
+ *
+ * Really want to do a real implementation of this that parses the args ala
+ * fcntl(), above, but it will have to be a totally platform-specific,
+ * nightmare-on-elm-st-style sort of thing. Might even deserve its own file
+ * ala select()... --SNL
+ */
+#ifndef ioctl_request_type
+#define ioctl_request_type unsigned long /* Dummy patch by Monty */
+#endif
+
+int
+ioctl(int fd, ioctl_request_type request, ...)
+{
+ int ret;
+ pthread_va_list ap;
+ caddr_t arg;
+
+ va_start( ap, request ); /* Get the arg */
+ arg = va_arg(ap,caddr_t);
+ va_end( ap );
+
+ if (fd < 0 || fd >= dtablesize)
+ ret = NOTOK;
+ else if (fd_table[fd]->fd.i == NOTOK)
+ ret = machdep_sys_ioctl(fd, request, arg);
+ else if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ ret = machdep_sys_ioctl(fd_table[fd]->fd.i, request, arg);
+ if( ret == 0 && request == FIONBIO ) {
+ /* Properly set NONBLOCK flag */
+ int v = *(int *)arg;
+ if( v )
+ fd_table[fd]->flags |= __FD_NONBLOCK;
+ else
+ fd_table[fd]->flags &= ~__FD_NONBLOCK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
+
diff --git a/lib/libpthread/pthreads/fd_kern.c b/lib/libpthread/pthreads/fd_kern.c
index ed4f5d008fb..be2fc5ec90f 100644
--- a/lib/libpthread/pthreads/fd_kern.c
+++ b/lib/libpthread/pthreads/fd_kern.c
@@ -39,10 +39,12 @@
*/
#ifndef lint
-static const char rcsid[] = "$Id: fd_kern.c,v 1.1 1995/10/18 08:43:04 deraadt Exp $ $provenid: fd_kern.c,v 1.7 1994/02/07 02:18:49 proven Exp $";
+static const char rcsid[] = "$Id: fd_kern.c,v 1.2 1998/07/21 19:48:02 peter Exp $";
#endif
#include <pthread.h>
+#include <unistd.h>
+#include <sys/compat.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/stat.h>
@@ -53,12 +55,227 @@ static const char rcsid[] = "$Id: fd_kern.c,v 1.1 1995/10/18 08:43:04 deraadt Ex
#include <fcntl.h>
#include <errno.h>
#include <pthread/posix.h>
+#include <string.h>
+
+#if defined (HAVE_SYSCALL_SENDTO) && !defined (HAVE_SYSCALL_SEND)
+
+pthread_ssize_t machdep_sys_send (int fd, const void *msg, size_t len,
+ int flags)
+{
+ return machdep_sys_sendto (fd, msg, len, flags,
+ (const struct sockaddr *) 0, 0);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECVFROM) && !defined (HAVE_SYSCALL_RECV)
+
+pthread_ssize_t machdep_sys_recv (int fd, void *buf, size_t len, int flags)
+{
+ return machdep_sys_recvfrom (fd, buf, len, flags,
+ (struct sockaddr *) 0, (int *) 0);
+}
+
+#endif
+
+/* ==========================================================================
+ * Check if there is any signal with must be handled. Added by Monty
+ * This could be somewhat system dependent but it should work.
+ */
+
+static int fd_check_if_pending_signal(struct pthread *pthread)
+{
+ int i;
+ unsigned long *pending,*mask;
+ if (!pthread->sigcount)
+ return 0;
+ pending= (unsigned long*) &pthread->sigpending;
+ mask= (unsigned long*) &pthread->sigmask;
+
+ for (i=0 ; i < sizeof(pthread->sigpending)/sizeof(unsigned long); i++)
+ {
+ if (*pending && (*mask ^ (unsigned) ~0L))
+ return 1;
+ pending++;
+ mask++;
+ }
+ return 0;
+}
/* ==========================================================================
* Variables used by both fd_kern_poll and fd_kern_wait
*/
-static struct pthread *fd_wait_read, *fd_wait_write;
-static fd_set fd_set_read, fd_set_write;
+struct pthread_queue fd_wait_read = PTHREAD_QUEUE_INITIALIZER;
+struct pthread_queue fd_wait_write = PTHREAD_QUEUE_INITIALIZER;
+struct pthread_queue fd_wait_select = PTHREAD_QUEUE_INITIALIZER;
+
+static struct timeval __fd_kern_poll_timeout = { 0, 0 }; /* Moved by monty */
+extern struct timeval __fd_kern_wait_timeout;
+extern volatile sig_atomic_t sig_to_process;
+
+/*
+ * ==========================================================================
+ * Do a select if there is someting to wait for.
+ * This is to a combination of the old fd_kern_poll() and fd_kern_wait()
+ * Return 1 if nothing to do.
+ */
+
+static int fd_kern_select(struct timeval *timeout)
+{
+ fd_set fd_set_read, fd_set_write, fd_set_except;
+ struct pthread *pthread, *deq;
+ int count, i;
+
+ if (!fd_wait_read.q_next && !fd_wait_write.q_next && !fd_wait_select.q_next)
+ return 1; /* Nothing to do */
+
+ FD_ZERO(&fd_set_read);
+ FD_ZERO(&fd_set_write);
+ FD_ZERO(&fd_set_except);
+ for (pthread = fd_wait_read.q_next; pthread; pthread = pthread->next)
+ FD_SET(pthread->data.fd.fd, &fd_set_read);
+ for (pthread = fd_wait_write.q_next; pthread; pthread = pthread->next)
+ FD_SET(pthread->data.fd.fd, &fd_set_write);
+ for (pthread = fd_wait_select.q_next; pthread; pthread = pthread->next)
+ {
+ for (i = 0; i < pthread->data.select_data->nfds; i++) {
+ if (FD_ISSET(i, &pthread->data.select_data->exceptfds))
+ FD_SET(i, &fd_set_except);
+ if (FD_ISSET(i, &pthread->data.select_data->writefds))
+ FD_SET(i, &fd_set_write);
+ if (FD_ISSET(i, &pthread->data.select_data->readfds))
+ FD_SET(i, &fd_set_read);
+ }
+ }
+
+ /* Turn off interrupts for real while we set the timer. */
+
+ if (timeout == &__fd_kern_wait_timeout)
+ { /* from fd_kern_wait() */
+ sigset_t sig_to_block, oset;
+ sigfillset(&sig_to_block);
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+
+ machdep_unset_thread_timer(NULL);
+ __fd_kern_wait_timeout.tv_usec = 0;
+ __fd_kern_wait_timeout.tv_sec = (sig_to_process) ? 0 : 3600;
+
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+ }
+ /*
+ * There is a small but finite chance that an interrupt will
+ * occure between the unblock and the select. Because of this
+ * sig_handler_real() sets the value of __fd_kern_wait_timeout
+ * to zero causing the select to do a poll instead of a wait.
+ */
+
+ while ((count = machdep_sys_select(dtablesize, &fd_set_read,
+ &fd_set_write, &fd_set_except,
+ timeout)) < OK)
+ {
+ if (count == -EINTR)
+ return 0;
+ PANIC();
+ }
+
+ for (pthread = fd_wait_read.q_next; pthread; ) {
+ if (count && FD_ISSET(pthread->data.fd.fd, &fd_set_read) ||
+ fd_check_if_pending_signal(pthread))
+ {
+ if (FD_ISSET(pthread->data.fd.fd, &fd_set_read))
+ count--;
+ deq = pthread;
+ pthread = pthread->next;
+ pthread_queue_remove(&fd_wait_read, deq);
+ if (SET_PF_DONE_EVENT(deq) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, deq);
+ deq->state = PS_RUNNING;
+ }
+ continue;
+ }
+ pthread = pthread->next;
+ }
+
+ for (pthread = fd_wait_write.q_next; pthread; ) {
+ if (count && FD_ISSET(pthread->data.fd.fd, &fd_set_write) ||
+ fd_check_if_pending_signal(pthread))
+ {
+ if (FD_ISSET(pthread->data.fd.fd, &fd_set_read))
+ count--;
+ deq = pthread;
+ pthread = pthread->next;
+ pthread_queue_remove(&fd_wait_write, deq);
+ if (SET_PF_DONE_EVENT(deq) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, deq);
+ deq->state = PS_RUNNING;
+ }
+ continue;
+ }
+ pthread = pthread->next;
+ }
+
+ for (pthread = fd_wait_select.q_next; pthread; )
+ {
+ int found_one=0; /* Loop fixed by monty */
+ if (count)
+ {
+ fd_set tmp_readfds, tmp_writefds, tmp_exceptfds;
+ memcpy(&tmp_readfds, &pthread->data.select_data->readfds,
+ sizeof(fd_set));
+ memcpy(&tmp_writefds, &pthread->data.select_data->writefds,
+ sizeof(fd_set));
+ memcpy(&tmp_exceptfds, &pthread->data.select_data->exceptfds,
+ sizeof(fd_set));
+
+ for (i = 0; i < pthread->data.select_data->nfds; i++) {
+ if (FD_ISSET(i, &tmp_exceptfds))
+ {
+ if (! FD_ISSET(i, &fd_set_except))
+ FD_CLR(i, &tmp_exceptfds);
+ else
+ found_one=1;
+ }
+ if (FD_ISSET(i, &tmp_writefds))
+ {
+ if (! FD_ISSET(i, &fd_set_write))
+ FD_CLR(i, &tmp_writefds);
+ else
+ found_one=1;
+ }
+ if (FD_ISSET(i, &tmp_readfds))
+ {
+ if (! FD_ISSET(i, &fd_set_read))
+ FD_CLR(i, &tmp_readfds);
+ else
+ found_one=1;
+ }
+ }
+ if (found_one)
+ {
+ memcpy(&pthread->data.select_data->readfds, &tmp_readfds,
+ sizeof(fd_set));
+ memcpy(&pthread->data.select_data->writefds, &tmp_writefds,
+ sizeof(fd_set));
+ memcpy(&pthread->data.select_data->exceptfds, &tmp_exceptfds,
+ sizeof(fd_set));
+ }
+ }
+ if (found_one || fd_check_if_pending_signal(pthread))
+ {
+ deq = pthread;
+ pthread = pthread->next;
+ pthread_queue_remove(&fd_wait_select, deq);
+ if (SET_PF_DONE_EVENT(deq) == OK) {
+ pthread_prio_queue_enq(pthread_current_prio_queue, deq);
+ deq->state = PS_RUNNING;
+ }
+ } else {
+ pthread = pthread->next;
+ }
+ }
+ return 0;
+}
+
/* ==========================================================================
* fd_kern_poll()
@@ -67,152 +284,27 @@ static fd_set fd_set_read, fd_set_write;
*
* This function uses a linked list of waiting pthreads, NOT a queue.
*/
-static semaphore fd_wait_lock = SEMAPHORE_CLEAR;
void fd_kern_poll()
{
- struct timeval __fd_kern_poll_timeout = { 0, 0 };
- struct pthread **pthread;
- semaphore *lock;
- int count;
-
- /* If someone has the lock then they are in RUNNING state, just return */
- lock = &fd_wait_lock;
- if (SEMAPHORE_TEST_AND_SET(lock)) {
- return;
- }
- if (fd_wait_read || fd_wait_write) {
- for (pthread = &fd_wait_read; *pthread; pthread = &((*pthread)->next)) {
- FD_SET((*pthread)->fd, &fd_set_read);
- }
- for (pthread = &fd_wait_write; *pthread; pthread = &((*pthread)->next)) {
- FD_SET((*pthread)->fd, &fd_set_write);
- }
-
- if ((count = machdep_sys_select(dtablesize, &fd_set_read,
- &fd_set_write, NULL, &__fd_kern_poll_timeout)) < OK) {
- if (count == -EINTR) {
- SEMAPHORE_RESET(lock);
- return;
- }
- PANIC();
- }
-
- for (pthread = &fd_wait_read; count && *pthread; ) {
- if (FD_ISSET((*pthread)->fd, &fd_set_read)) {
- /* Get lock on thread */
-
- (*pthread)->state = PS_RUNNING;
- *pthread = (*pthread)->next;
- count--;
- continue;
- }
- pthread = &((*pthread)->next);
- }
-
- for (pthread = &fd_wait_write; count && *pthread; ) {
- if (FD_ISSET((*pthread)->fd, &fd_set_write)) {
- semaphore *plock;
-
- /* Get lock on thread */
- plock = &(*pthread)->lock;
- if (!(SEMAPHORE_TEST_AND_SET(plock))) {
- /* Thread locked, skip it. */
- (*pthread)->state = PS_RUNNING;
- *pthread = (*pthread)->next;
- SEMAPHORE_RESET(plock);
- }
- count--;
- continue;
- }
- pthread = &((*pthread)->next);
- }
- }
- SEMAPHORE_RESET(lock);
+ fd_kern_select(&__fd_kern_poll_timeout);
}
+
/* ==========================================================================
* fd_kern_wait()
*
* Called when there is no active thread to run.
*/
-extern struct timeval __fd_kern_wait_timeout;
void fd_kern_wait()
{
- struct pthread **pthread;
- sigset_t sig_to_block;
- int count;
-
- if (fd_wait_read || fd_wait_write) {
- for (pthread = &fd_wait_read; *pthread; pthread = &((*pthread)->next)) {
- FD_SET((*pthread)->fd, &fd_set_read);
- }
- for (pthread = &fd_wait_write; *pthread; pthread = &((*pthread)->next)) {
- FD_SET((*pthread)->fd, &fd_set_write);
- }
-
- /* Turn off interrupts for real while we set the timer. */
-
- sigfillset(&sig_to_block);
- sigprocmask(SIG_BLOCK, &sig_to_block, NULL);
-
- machdep_unset_thread_timer();
- __fd_kern_wait_timeout.tv_usec = 0;
- __fd_kern_wait_timeout.tv_sec = 3600;
-
- sigprocmask(SIG_UNBLOCK, &sig_to_block, NULL);
-
- /*
- * There is a small but finite chance that an interrupt will
- * occure between the unblock and the select. Because of this
- * sig_handler_real() sets the value of __fd_kern_wait_timeout
- * to zero causing the select to do a poll instead of a wait.
- */
-
- while ((count = machdep_sys_select(dtablesize, &fd_set_read,
- &fd_set_write, NULL, &__fd_kern_wait_timeout)) < OK) {
- if (count == -EINTR) {
- return;
- }
- PANIC();
- }
-
- for (pthread = &fd_wait_read; count && *pthread; ) {
- if (FD_ISSET((*pthread)->fd, &fd_set_read)) {
- /* Get lock on thread */
-
- (*pthread)->state = PS_RUNNING;
- *pthread = (*pthread)->next;
- count--;
- continue;
- }
- pthread = &((*pthread)->next);
- }
-
- for (pthread = &fd_wait_write; count && *pthread; ) {
- if (FD_ISSET((*pthread)->fd, &fd_set_write)) {
- semaphore *plock;
-
- /* Get lock on thread */
- plock = &(*pthread)->lock;
- if (!(SEMAPHORE_TEST_AND_SET(plock))) {
- /* Thread locked, skip it. */
- (*pthread)->state = PS_RUNNING;
- *pthread = (*pthread)->next;
- SEMAPHORE_RESET(plock);
- }
- count--;
- continue;
- }
- pthread = &((*pthread)->next);
- }
- } else {
- /* No threads, waiting on I/O, do a sigsuspend */
- sig_handler_pause();
- }
+ if (fd_kern_select(&__fd_kern_wait_timeout))
+ /* No threads, waiting on I/O, do a sigsuspend */
+ sig_handler_pause();
}
+
/* ==========================================================================
* Special Note: All operations return the errno as a negative of the errno
* listed in errno.h
@@ -221,143 +313,231 @@ void fd_kern_wait()
/* ==========================================================================
* read()
*/
-ssize_t __fd_kern_read(int fd, int flags, void *buf, size_t nbytes)
+pthread_ssize_t __fd_kern_read(union fd_data fd_data, int flags, void *buf,
+ size_t nbytes, struct timespec * timeout)
{
- semaphore *lock, *plock;
- int ret;
-
- while ((ret = machdep_sys_read(fd, buf, nbytes)) < OK) {
- if (ret == -EWOULDBLOCK) {
- /* Lock queue */
- lock = &fd_wait_lock;
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
-
- /* Lock pthread */
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
-
- /* queue pthread for a FDR_WAIT */
- pthread_run->next = fd_wait_read;
- fd_wait_read = pthread_run;
- pthread_run->fd = fd;
- SEMAPHORE_RESET(lock);
- reschedule(PS_FDR_WAIT);
- } else {
- pthread_run->error = -ret;
- ret = NOTOK;
- break;
- }
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_read(fd, buf, nbytes)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDR_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret= NOTOK;
+ break;
}
- return(ret);
+ pthread_sched_resume();
+ } else {
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDR_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ return(ret);
}
/* ==========================================================================
* readv()
*/
-int __fd_kern_readv(int fd, int flags, struct iovec *iov, int iovcnt)
+int __fd_kern_readv(union fd_data fd_data, int flags, const struct iovec *iov,
+ int iovcnt, struct timespec * timeout)
{
- semaphore *lock, *plock;
- int ret;
-
- while ((ret = machdep_sys_readv(fd, iov, iovcnt)) < OK) {
- if (ret == -EWOULDBLOCK) {
- /* Lock queue */
- lock = &fd_wait_lock;
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
-
- /* Lock pthread */
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
-
- /* queue pthread for a FDR_WAIT */
- pthread_run->next = fd_wait_read;
- fd_wait_read = pthread_run;
- pthread_run->fd = fd;
- SEMAPHORE_RESET(lock);
- reschedule(PS_FDR_WAIT);
- } else {
- pthread_run->error = -ret;
- ret = NOTOK;
- break;
- }
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_readv(fd, iov, iovcnt)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDW_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
}
- return(ret);
+ pthread_sched_resume();
+ } else {
+ SET_PF_AT_CANCEL_POINT(pthread_run);
+ pthread_resched_resume(PS_FDW_WAIT);
+ CLEAR_PF_AT_CANCEL_POINT(pthread_run);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ return(ret);
}
/* ==========================================================================
* write()
*/
-ssize_t __fd_kern_write(int fd, int flags, const void *buf, size_t nbytes)
+pthread_ssize_t __fd_kern_write(union fd_data fd_data, int flags,
+ const void *buf, size_t nbytes, struct timespec * timeout)
{
- semaphore *lock, *plock;
- int ret;
-
- while ((ret = machdep_sys_write(fd, buf, nbytes)) < OK) {
- if (pthread_run->error == -EWOULDBLOCK) {
- /* Lock queue */
- lock = &fd_wait_lock;
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
-
- /* Lock pthread */
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
-
- /* queue pthread for a FDW_WAIT */
- pthread_run->next = fd_wait_write;
- fd_wait_write = pthread_run;
- pthread_run->fd = fd;
- SEMAPHORE_RESET(lock);
- reschedule(PS_FDW_WAIT);
- } else {
- pthread_run->error = ret;
- break;
- }
+ int fd = fd_data.i;
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ while ((ret = machdep_sys_write(fd, buf, nbytes)) < OK) {
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
}
- return(ret);
+ }
+ return(ret);
}
/* ==========================================================================
* writev()
*/
-int __fd_kern_writev(int fd, int flags, struct iovec *iov, int iovcnt)
+int __fd_kern_writev(union fd_data fd_data, int flags, const struct iovec *iov,
+ int iovcnt, struct timespec * timeout)
{
- semaphore *lock, *plock;
+ int fd = fd_data.i;
int ret;
+ pthread_run->sighandled=0; /* Added by monty */
while ((ret = machdep_sys_writev(fd, iov, iovcnt)) < OK) {
- if (pthread_run->error == -EWOULDBLOCK) {
- /* Lock queue */
- lock = &fd_wait_lock;
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
-
- /* Lock pthread */
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
+ if (!(flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
/* queue pthread for a FDW_WAIT */
- pthread_run->next = fd_wait_write;
- fd_wait_write = pthread_run;
- pthread_run->fd = fd;
- SEMAPHORE_RESET(lock);
- reschedule(PS_FDW_WAIT);
+ pthread_run->data.fd.fd = fd;
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
} else {
- pthread_run->error = ret;
break;
}
}
@@ -368,25 +548,38 @@ int __fd_kern_writev(int fd, int flags, struct iovec *iov, int iovcnt)
* For blocking version we really should set an interrupt
* fcntl()
*/
-int __fd_kern_fcntl(int fd, int flags, int cmd, int arg)
+int __fd_kern_fcntl(union fd_data fd_data, int flags, int cmd, int arg)
{
+ int fd = fd_data.i;
+
return(machdep_sys_fcntl(fd, cmd, arg));
}
/* ==========================================================================
* close()
*/
-int __fd_kern_close(int fd, int flags)
+int __fd_kern_close(union fd_data fd_data, int flags)
{
+ int fd = fd_data.i;
+
return(machdep_sys_close(fd));
}
/* ==========================================================================
* lseek()
+ * Assume that error number is in the range 0- 255 to get bigger
+ * range of seek. ; Monty
*/
-int __fd_kern_lseek(int fd, int flags, off_t offset, int whence)
+off_t __fd_kern_lseek(union fd_data fd_data, int f, off_t offset, int whence)
{
- return(machdep_sys_lseek(fd, offset, whence));
+ int fd = fd_data.i;
+ off_t ret=machdep_sys_lseek(fd, offset, whence);
+ if ((long) ret < 0L && (long) ret >= -255L)
+ {
+ SET_ERRNO(ret);
+ ret= NOTOK;
+ }
+ return ret;
}
/*
@@ -397,7 +590,7 @@ extern machdep_sys_close();
/* Normal file operations */
static struct fd_ops __fd_kern_ops = {
__fd_kern_write, __fd_kern_read, __fd_kern_close, __fd_kern_fcntl,
- __fd_kern_readv, __fd_kern_writev, __fd_kern_lseek
+ __fd_kern_writev, __fd_kern_readv, __fd_kern_lseek, 1
};
/* NFS file opperations */
@@ -416,7 +609,7 @@ static struct fd_ops __fd_kern_ops = {
*
* This is not done yet
*
- * A reqular file on the local system needs no special treatment.
+ * A regular file on the local system needs no special treatment.
*/
int open(const char *path, int flags, ...)
{
@@ -440,8 +633,8 @@ int open(const char *path, int flags, ...)
if (!((fd_kern = machdep_sys_open(path, flags, mode)) < OK)) {
/* fstat the file to determine what type it is */
- if (fstat(fd_kern, &stat_buf)) {
-printf("error %d stating new fd %d\n", errno, fd);
+ if (machdep_sys_fstat(fd_kern, &stat_buf)) {
+ PANIC();
}
if (S_ISREG(stat_buf.st_mode)) {
fd_table[fd]->ops = &(__fd_kern_ops);
@@ -450,41 +643,421 @@ printf("error %d stating new fd %d\n", errno, fd);
fd_table[fd]->ops = &(__fd_kern_ops);
fd_table[fd]->type = FD_FULL_DUPLEX;
}
- fd_table[fd]->fd = fd_kern;
+ fd_table[fd]->fd.i = fd_kern;
return(fd);
}
- pthread_run->error = - fd_kern;
fd_table[fd]->count = 0;
+ SET_ERRNO(-fd_kern);
}
return(NOTOK);
}
/* ==========================================================================
+ * create()
+ */
+int create(const char *path, mode_t mode)
+{
+ return creat (path, mode);
+}
+
+/* ==========================================================================
+ * creat()
+ */
+#undef creat
+
+int creat(const char *path, mode_t mode)
+{
+ return open (path, O_CREAT | O_TRUNC | O_WRONLY, mode);
+}
+
+/* ==========================================================================
+ * fchown()
+ */
+int fchown(int fd, uid_t owner, gid_t group)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_fchown(fd_table[fd]->fd.i, owner, group)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * fchmod()
+ */
+int fchmod(int fd, mode_t mode)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_fchmod(fd_table[fd]->fd.i, mode)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * ftruncate()
+ */
+int ftruncate(int fd, off_t length)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_ftruncate(fd_table[fd]->fd.i, length)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+#if defined (HAVE_SYSCALL_FLOCK)
+/* ==========================================================================
+ * flock()
+ *
+ * Added (mevans)
+ */
+int flock(int fd, int operation)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_flock(fd_table[fd]->fd.i,
+ operation)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+#endif
+
+/* ==========================================================================
+ * pipe()
+ */
+int pipe(int fds[2])
+{
+ int kfds[2];
+ int ret;
+
+ if ((fds[0] = fd_allocate()) >= OK) {
+ if ((fds[1] = fd_allocate()) >= OK) {
+ if ((ret = machdep_sys_pipe(kfds)) >= OK) {
+ fd_table[fds[0]]->flags = machdep_sys_fcntl(kfds[0], F_GETFL, NULL);
+ machdep_sys_fcntl(kfds[0], F_SETFL, fd_table[fds[0]]->flags | __FD_NONBLOCK);
+ fd_table[fds[1]]->flags = machdep_sys_fcntl(kfds[1], F_GETFL, NULL);
+ machdep_sys_fcntl(kfds[1], F_SETFL, fd_table[fds[1]]->flags | __FD_NONBLOCK);
+
+ fd_table[fds[0]]->ops = &(__fd_kern_ops);
+ fd_table[fds[1]]->ops = &(__fd_kern_ops);
+
+ /* Not really full duplex but ... */
+ fd_table[fds[0]]->type = FD_FULL_DUPLEX;
+ fd_table[fds[1]]->type = FD_FULL_DUPLEX;
+
+ fd_table[fds[0]]->fd.i = kfds[0];
+ fd_table[fds[1]]->fd.i = kfds[1];
+
+ return(OK);
+ } else {
+ SET_ERRNO(-ret);
+ }
+ fd_table[fds[1]]->count = 0;
+ }
+ fd_table[fds[0]]->count = 0;
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * fd_kern_reset()
+ * Change the fcntl blocking flag back to NONBLOCKING. This should only
+ * be called after a fork.
+ */
+void fd_kern_reset(int fd)
+{
+ switch (fd_table[fd]->type) {
+ case FD_TEST_HALF_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL,
+ fd_table[fd]->flags | __FD_NONBLOCK);
+ fd_table[fd]->type = FD_HALF_DUPLEX;
+ break;
+ case FD_TEST_FULL_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL,
+ fd_table[fd]->flags | __FD_NONBLOCK);
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ break;
+ default:
+ break;
+ }
+}
+
+/* ==========================================================================
* fd_kern_init()
*
* Assume the entry is locked before routine is invoked
*
* This may change. The problem is setting the fd to nonblocking changes
* the parents fd too, which may not be the desired result.
+ *
+ * New added feature: If the fd in question is a tty then we open it again
+ * and close the original, this way we don't have to worry about the
+ * fd being NONBLOCKING to the outside world.
*/
-static fd_kern_init_called = 0;
void fd_kern_init(int fd)
{
if ((fd_table[fd]->flags = machdep_sys_fcntl(fd, F_GETFL, NULL)) >= OK) {
+ if (isatty_basic(fd)) {
+ int new_fd;
+
+ if ((new_fd = machdep_sys_open(__ttyname_basic(fd), O_RDWR)) >= OK){
+ if (machdep_sys_dup2(new_fd, fd) == OK) {
+ /* Should print a warning */
+
+ /* Should also set the flags to that of opened outside of
+ process */
+ }
+ machdep_sys_close(new_fd);
+ }
+ }
+ /* We do these things regaurdless of the above results */
machdep_sys_fcntl(fd, F_SETFL, fd_table[fd]->flags | __FD_NONBLOCK);
fd_table[fd]->ops = &(__fd_kern_ops);
fd_table[fd]->type = FD_HALF_DUPLEX;
- fd_table[fd]->fd = fd;
+ fd_table[fd]->fd.i = fd;
fd_table[fd]->count = 1;
}
}
/* ==========================================================================
+ * fd_kern_gettableentry()
+ *
+ * Remember only return a a file descriptor that I will modify later.
+ * Don't return file descriptors that aren't owned by the child, or don't
+ * have kernel operations.
+ */
+static int fd_kern_gettableentry(const int child, int fd)
+{
+ int i;
+
+ for (i = 0; i < dtablesize; i++) {
+ if (fd_table[i]) {
+ if (fd_table[i]->fd.i == fd) {
+ if (child) {
+ if ((fd_table[i]->type != FD_TEST_HALF_DUPLEX) &&
+ (fd_table[i]->type != FD_TEST_FULL_DUPLEX)) {
+ continue;
+ }
+ } else {
+ if ((fd_table[i]->type == FD_NT) ||
+ (fd_table[i]->type == FD_NIU)) {
+ continue;
+ }
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[i]->ops) ||
+ (fd_table[i]->ops->use_kfds != 1)) {
+ continue;
+ }
+ return(i);
+ }
+ }
+ }
+ return(NOTOK);
+}
+
+/* ==========================================================================
+ * fd_kern_exec()
+ *
+ * Fixup the fd_table such that (fd == fd_table[fd]->fd.i) this way
+ * the new immage will be OK.
+ *
+ * Only touch those that won't be used by the parent if we're in a child
+ * otherwise fixup all.
+ *
+ * Returns:
+ * 0 no fixup necessary
+ * 1 fixup without problems
+ * 2 failed fixup on some descriptors, and clobbered them.
+ */
+int fd_kern_exec(const int child)
+{
+ int ret = 0;
+ int fd, i;
+
+ for (fd = 0; fd < dtablesize; fd++) {
+ if (fd_table[fd] == NULL) {
+ continue;
+ }
+ /* Is the fd already in use ? */
+ if (child) {
+ if ((fd_table[fd]->type != FD_TEST_HALF_DUPLEX) &&
+ (fd_table[fd]->type != FD_TEST_FULL_DUPLEX)) {
+ continue;
+ }
+ } else {
+ if ((fd_table[fd]->type == FD_NT) ||
+ (fd_table[fd]->type == FD_NIU)) {
+ continue;
+ }
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[fd]->ops) ||
+ (fd_table[fd]->ops->use_kfds != 1)) {
+ continue;
+ }
+ /* Does it match ? */
+ if (fd_table[fd]->fd.i == fd) {
+ continue;
+ }
+ /* OK, fixup entry: Read comments before changing. This isn't obvious */
+
+ /* i is the real file descriptor fd currently represents */
+ if (((i = fd_table[fd]->fd.i) >= dtablesize) || (i < 0)) {
+ /* This should never happen */
+ PANIC();
+ }
+
+ /*
+ * if the real file descriptor with the same number as the fake file
+ * descriptor number fd is actually in use by the program, we have
+ * to move it out of the way
+ */
+ if ((machdep_sys_fcntl(fd, F_GETFL, NULL)) >= OK) {
+ /* fd is busy */
+ int j;
+
+ /*
+ * j is the fake file descriptor that represents the real file
+ * descriptor that we want to move. This way the fake file
+ * descriptor fd can move its real file descriptor i such that
+ * fd == i.
+ */
+ if ((j = fd_kern_gettableentry(child, fd)) >= OK) {
+
+ /*
+ * Since j represents a fake file descriptor and fd represents
+ * a fake file descriptor. If j < fd then a previous pass
+ * should have set fd_table[j]->fd.i == j.
+ */
+ if (fd < j) {
+ if ((fd_table[j]->fd.i = machdep_sys_dup(fd)) < OK) {
+ /* Close j, there is nothing else we can do */
+ fd_table[j]->type = FD_NIU;
+ ret = 2;
+ }
+ } else {
+ /* This implies fd_table[j]->fd.i != j */
+ PANIC();
+ }
+ }
+ }
+
+ /*
+ * Here the real file descriptor i is set to equel the fake file
+ * descriptor fd
+ */
+ machdep_sys_dup2(i, fd);
+
+ /*
+ * Now comes the really complicated part: UNDERSTAND before changing
+ *
+ * Here are the things this routine wants to do ...
+ *
+ * Case 1. The real file descriptor has only one fake file descriptor
+ * representing it.
+ * fd -> i, fd != i ===> fd -> fd, close(i)
+ * Example fd = 4, i = 2: then close(2), set fd -> i = 4
+ *
+ * Case 2. The real file descriptor has more than one fake file
+ * descriptor representing it, and this is the first fake file
+ * descriptor representing the real file descriptor
+ * fd -> i, fd' -> i, fd != i ===> fd -> fd, fd' -> fd, close(i)
+ *
+ * The problem is achiving the above is very messy and difficult,
+ * but I should be able to take a short cut. If fd > i then there
+ * will be no need to ever move i, this is because the fake file
+ * descriptor foo that we would have wanted to represent the real
+ * file descriptor i has already been processed. If fd < i then by
+ * moving i to fd all subsequent fake file descriptors fd' should fall
+ * into the previous case and won't need aditional adjusting.
+ *
+ * Does this break the above fd < j check .... It shouldn't because j
+ * is a fake file descriptor and if j < fd then j has already moved
+ * its real file descriptor foo such that foo <= j therefore foo < fd
+ * and not foo == fd therefor j cannot represent the real
+ * filedescriptor that fd want to move to and be less than fd
+ */
+ if (fd < i) {
+ fd_table[fd]->fd.i = fd;
+ machdep_sys_close(i);
+ }
+ if (ret < 1) {
+ ret = 1;
+ }
+ }
+}
+
+/* ==========================================================================
+ * fd_kern_fork()
+ */
+void fd_kern_fork()
+{
+ pthread_mutex_t *mutex;
+ int fd;
+
+ for (fd = 0; fd < dtablesize; fd++) {
+ if (fd_table[fd] == NULL) {
+ continue;
+ }
+ mutex = & (fd_table[fd]->mutex);
+ if (pthread_mutex_trylock(mutex)) {
+ continue;
+ }
+ if ((fd_table[fd]->r_owner) || (fd_table[fd]->w_owner)) {
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ /* Is it a kernel fd ? */
+ if ((!fd_table[fd]->ops) || (fd_table[fd]->ops->use_kfds != 1)) {
+ pthread_mutex_unlock(mutex);
+ continue;
+ }
+ switch (fd_table[fd]->type) {
+ case FD_HALF_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL, fd_table[fd]->flags);
+ fd_table[fd]->type = FD_TEST_HALF_DUPLEX;
+ break;
+ case FD_FULL_DUPLEX:
+ machdep_sys_fcntl(fd_table[fd]->fd.i, F_SETFL, fd_table[fd]->flags);
+ fd_table[fd]->type = FD_TEST_FULL_DUPLEX;
+ break;
+ default:
+ break;
+ }
+ pthread_mutex_unlock(mutex);
+ }
+}
+
+/* ==========================================================================
* Here are the berkeley socket functions. These are not POSIX.
* ======================================================================= */
+#if defined (HAVE_SYSCALL_SOCKET) || defined (HAVE_SYSCALL_SOCKETCALL)
+
/* ==========================================================================
* socket()
*/
@@ -495,153 +1068,843 @@ int socket(int af, int type, int protocol)
if (!((fd = fd_allocate()) < OK)) {
if (!((fd_kern = machdep_sys_socket(af, type, protocol)) < OK)) {
- machdep_sys_fcntl(fd_kern, F_SETFL, __FD_NONBLOCK);
+ int tmp_flags;
+
+ tmp_flags = machdep_sys_fcntl(fd_kern, F_GETFL, 0);
+ machdep_sys_fcntl(fd_kern, F_SETFL, tmp_flags | __FD_NONBLOCK);
/* Should fstat the file to determine what type it is */
fd_table[fd]->ops = & __fd_kern_ops;
fd_table[fd]->type = FD_FULL_DUPLEX;
- fd_table[fd]->fd = fd_kern;
- fd_table[fd]->flags = 0;
+ fd_table[fd]->fd.i = fd_kern;
+ fd_table[fd]->flags = tmp_flags;
return(fd);
}
- pthread_run->error = - fd_kern;
fd_table[fd]->count = 0;
+ SET_ERRNO(-fd_kern);
}
return(NOTOK);
}
+#endif
+
+#if defined (HAVE_SYSCALL_BIND) || defined (HAVE_SYSCALL_SOCKETCALL)
+
/* ==========================================================================
* bind()
*/
int bind(int fd, const struct sockaddr *name, int namelen)
{
/* Not much to do in bind */
- semaphore *plock;
int ret;
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
- if ((ret = machdep_sys_bind(fd_table[fd]->fd, name, namelen)) < OK) {
- pthread_run->error = - ret;
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_bind(fd_table[fd]->fd.i, name, namelen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
}
fd_unlock(fd, FD_RDWR);
}
return(ret);
}
+#endif
+
+#if defined (HAVE_SYSCALL_CONNECT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
/* ==========================================================================
* connect()
*/
int connect(int fd, const struct sockaddr *name, int namelen)
{
- semaphore *lock, *plock;
- struct sockaddr tmpname;
- int ret, tmpnamelen;
-
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
- if ((ret = machdep_sys_connect(fd_table[fd]->fd, name, namelen)) < OK) {
- if ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) ||
- (ret == -EALREADY)) {
- /* Lock queue */
- lock = &fd_wait_lock;
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
+ struct sockaddr tmpname;
+ int ret, tmpnamelen;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_connect(fd_table[fd]->fd.i, name, namelen)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EINPROGRESS) ||
+ (ret == -EALREADY) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+ CLEAR_PF_DONE_EVENT(pthread_run);
+
+ tmpnamelen = sizeof(tmpname);
+ /* OK now lets see if it really worked */
+ if (((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
+ &tmpname, &tmpnamelen)) < OK) &&
+ (ret == -ENOTCONN))
+ {
+ /* Get the error, this function should not fail */
+ machdep_sys_getsockopt(fd_table[fd]->fd.i, SOL_SOCKET,
+ SO_ERROR, &ret, &tmpnamelen);
+ SET_ERRNO(ret); /* ret is already positive (mevans) */
+ ret = NOTOK;
+ }
+ } else {
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ }
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
- /* Lock pthread */
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
+#endif
+
+#if defined (HAVE_SYSCALL_ACCEPT) || defined (HAVE_SYSCALL_SOCKETCALL)
- /* queue pthread for a FDW_WAIT */
- pthread_run->fd = fd_table[fd]->fd.i;
- pthread_run->next = fd_wait_write;
- fd_wait_write = pthread_run;
- SEMAPHORE_RESET(lock);
- reschedule(PS_FDW_WAIT);
+/* ==========================================================================
+ * accept()
+ */
+int accept(int fd, struct sockaddr *name, int *namelen)
+{
+ int ret, fd_kern;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ while ((fd_kern = machdep_sys_accept(fd_table[fd]->fd.i, name, namelen)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((fd_kern == -EWOULDBLOCK) || (fd_kern == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ } else {
+ fd_unlock(fd, FD_RDWR);
+ SET_ERRNO(-fd_kern);
+ return(NOTOK);
+ }
+ }
+ fd_unlock(fd, FD_RDWR);
- /* OK now lets see if it really worked */
- if (((ret = machdep_sys_getpeername(fd_table[fd]->fd,
- &tmpname, &tmpnamelen)) < OK) && (ret == -ENOTCONN)) {
+ if (!((ret = fd_allocate()) < OK)) {
- /* Get the error, this function should not fail */
- machdep_sys_getsockopt(fd_table[fd]->fd, SOL_SOCKET,
- SO_ERROR, &pthread_run->error, &tmpnamelen);
- }
- } else {
- pthread_run->error = -ret;
+ /* This may be unnecessary */
+ machdep_sys_fcntl(fd_kern, F_SETFL, __FD_NONBLOCK);
+
+ /* Should fstat the file to determine what type it is */
+ fd_table[ret]->ops = & __fd_kern_ops;
+ fd_table[ret]->type = FD_FULL_DUPLEX;
+ fd_table[ret]->fd.i = fd_kern;
+
+ /* XXX Flags should be the same as those on the listening fd */
+ fd_table[ret]->flags = fd_table[fd]->flags;
+ }
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_LISTEN) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * listen()
+ */
+int listen(int fd, int backlog)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_listen(fd_table[fd]->fd.i, backlog)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return(ret);
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SEND) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * send_timedwait()
+ */
+ssize_t send_timedwait(int fd, const void * msg, size_t len, int flags,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ while ((ret = machdep_sys_send(fd_table[fd]->fd.i,
+ msg, len, flags)) < OK)
+ {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN)))
+ {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return ret;
+}
+
+/* ==========================================================================
+ * send()
+ */
+ssize_t send(int fd, const void * msg, size_t len, int flags)
+{
+ return(send_timedwait(fd, msg, len, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SENDTO) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * sendto_timedwait()
+ */
+ssize_t sendto_timedwait(int fd, const void * msg, size_t len,
+ int flags, const struct sockaddr *to, int to_len,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ while ((ret = machdep_sys_sendto(fd_table[fd]->fd.i,
+ msg, len, flags, to, to_len)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret= -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+ }
+ else
+ break; /* ret contains the errorcode */
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * sendto()
+ */
+ssize_t sendto(int fd, const void * msg, size_t len, int flags,
+ const struct sockaddr *to, int to_len)
+{
+ return(sendto_timedwait(fd, msg, len, flags, to, to_len, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SENDMSG) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * sendmsg_timedwait()
+ */
+ssize_t sendmsg_timedwait(int fd, const struct msghdr *msg, int flags,
+ struct timespec * timeout)
+{
+ int passed_fd, ret, i;
+
+ /* Handle getting the real file descriptor */
+ for(i = 0; i < (((struct omsghdr *)msg)->msg_accrightslen/sizeof(i)); i++) {
+ passed_fd = *(((int *)((struct omsghdr *)msg)->msg_accrights) + i);
+ if ((ret = fd_lock(passed_fd, FD_RDWR, NULL)) == OK) {
+ *(((int *)((struct omsghdr *)msg)->msg_accrights) + i)
+ = fd_table[passed_fd]->fd.i;
+ machdep_sys_fcntl(fd_table[passed_fd]->fd.i, F_SETFL,
+ fd_table[passed_fd]->flags);
+ switch(fd_table[passed_fd]->type) {
+ case FD_TEST_FULL_DUPLEX:
+ case FD_TEST_HALF_DUPLEX:
+ break;
+ case FD_FULL_DUPLEX:
+ fd_table[passed_fd]->type = FD_TEST_FULL_DUPLEX;
+ break;
+ case FD_HALF_DUPLEX:
+ fd_table[passed_fd]->type = FD_TEST_HALF_DUPLEX;
+ break;
+ default:
+ PANIC();
+ }
+ } else {
+ fd_unlock(fd, FD_RDWR);
+ SET_ERRNO(EBADF);
+ return(NOTOK);
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_WRITE, timeout)) == OK) {
+ while((ret = machdep_sys_sendmsg(fd_table[fd]->fd.i, msg, flags)) < OK){
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDW_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_write, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDW_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+
+ } else {
+ pthread_resched_resume(PS_FDW_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * sendmsg()
+ */
+ssize_t sendmsg(int fd, const struct msghdr *msg, int flags)
+{
+ return(sendmsg_timedwait(fd, msg, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECV) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * recv_timedwait()
+ */
+ssize_t recv_timedwait(int fd, void * buf, size_t len, int flags,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ while ((ret = machdep_sys_recv(fd_table[fd]->fd.i,
+ buf, len, flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret = -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+
+ } else {
+ break;
+ }
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * recv()
+ */
+ssize_t recv(int fd, void * buf, size_t len, int flags)
+{
+ return(recv_timedwait(fd, buf, len, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECVFROM) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * recvfrom_timedwait()
+ */
+ssize_t recvfrom_timedwait(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, int * from_len,
+ struct timespec * timeout)
+{
+ int ret;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ while ((ret = machdep_sys_recvfrom(fd_table[fd]->fd.i,
+ buf, len, flags, from, from_len)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ ret= -ETIMEDOUT;
+ break;
+ }
+ pthread_sched_resume();
+
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ ret= -EINTR;
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ fd_unlock(fd, FD_READ);
+ }
+ if (ret < 0)
+ {
+ SET_ERRNO(-ret);
+ return(NOTOK);
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * recvfrom()
+ */
+ssize_t recvfrom(int fd, void * buf, size_t len, int flags,
+ struct sockaddr * from, int * from_len)
+{
+ return(recvfrom_timedwait(fd, buf, len, flags, from, from_len, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_RECVMSG) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * recvmsg_timedwait()
+ */
+ssize_t recvmsg_timedwait(int fd, struct msghdr *msg, int flags,
+ struct timespec * timeout)
+{
+ struct stat stat_buf;
+ int passed_fd, ret, i;
+
+ pthread_run->sighandled=0; /* Added by monty */
+ if ((ret = fd_lock(fd, FD_READ, timeout)) == OK) {
+ while ((ret = machdep_sys_recvmsg(fd_table[fd]->fd.i, msg, flags)) < OK) {
+ if (!(fd_table[fd]->flags & __FD_NONBLOCK) &&
+ ((ret == -EWOULDBLOCK) || (ret == -EAGAIN))) {
+ pthread_sched_prevent();
+
+ /* queue pthread for a FDR_WAIT */
+ SET_PF_WAIT_EVENT(pthread_run);
+ pthread_run->data.fd.fd = fd_table[fd]->fd.i;
+ pthread_queue_enq(&fd_wait_read, pthread_run);
+
+ if (timeout) {
+ /* get current time */
+ struct timespec current_time;
+ machdep_gettimeofday(&current_time);
+ sleep_schedule(&current_time, timeout);
+
+ pthread_resched_resume(PS_FDR_WAIT);
+
+ /* We're awake */
+ pthread_sched_prevent();
+ if (sleep_cancel(pthread_run) == NOTOK) {
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ pthread_sched_resume();
+ SET_ERRNO(ETIMEDOUT);
+ ret = NOTOK;
+ break;
+ }
+ pthread_sched_resume();
+
+ } else {
+ pthread_resched_resume(PS_FDR_WAIT);
+ }
+ CLEAR_PF_DONE_EVENT(pthread_run);
+ if (pthread_run->sighandled) /* Added by monty */
+ { /* We where aborted */
+ SET_ERRNO(EINTR);
+ ret= NOTOK;
+ break;
+ }
+ } else {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ break;
+ }
+ }
+ fd_unlock(fd, FD_READ);
+
+ /* Handle getting the real file descriptor */
+ for (i = 0; i < (((struct omsghdr *)msg)->msg_accrightslen / sizeof(i));
+ i++) {
+ passed_fd = *(((int *)((struct omsghdr *)msg)->msg_accrights) + i);
+ if (!((fd = fd_allocate()) < OK)) {
+ fd_table[fd]->flags = machdep_sys_fcntl(passed_fd, F_GETFL);
+
+ if (!( fd_table[fd]->flags & __FD_NONBLOCK)) {
+ machdep_sys_fcntl(passed_fd, F_SETFL,
+ fd_table[fd]->flags | __FD_NONBLOCK);
+ }
+
+ /* fstat the file to determine what type it is */
+ machdep_sys_fstat(passed_fd, &stat_buf);
+ if (S_ISREG(stat_buf.st_mode)) {
+ fd_table[fd]->type = FD_HALF_DUPLEX;
+ } else {
+ fd_table[fd]->type = FD_FULL_DUPLEX;
+ }
+ *(((int *)((struct omsghdr *)msg)->msg_accrights) + i) = fd;
+ fd_table[fd]->ops = &(__fd_kern_ops);
+ fd_table[fd]->fd.i = passed_fd;
+ } else {
+ SET_ERRNO(EBADF);
+ return(NOTOK);
+ break;
+ }
+ }
+ }
+ return(ret);
+}
+
+/* ==========================================================================
+ * recvmsg()
+ */
+ssize_t recvmsg(int fd, struct msghdr *msg, int flags)
+{
+ return(recvmsg_timedwait(fd, msg, flags, NULL));
+}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SHUTDOWN) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * shutdown()
+ */
+int shutdown(int fd, int how)
+{
+ int ret;
+
+ switch(how) {
+ case 0: /* Read */
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_shutdown(fd_table[fd]->fd.i, how)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
}
+ fd_unlock(fd, FD_READ);
}
- fd_unlock(fd, FD_RDWR);
+ case 1: /* Write */
+ if ((ret = fd_lock(fd, FD_WRITE, NULL)) == OK) {
+ if ((ret = machdep_sys_shutdown(fd_table[fd]->fd.i, how)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_WRITE);
+ }
+ case 2: /* Read-Write */
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_shutdown(fd_table[fd]->fd.i, how)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ default:
+ SET_ERRNO(EBADF);
+ ret = NOTOK;
+ break;
}
return(ret);
}
+#endif
+
+#if defined (HAVE_SYSCALL_SETSOCKOPT) || defined (HAVE_SYSCALL_SOCKETCALL)
+
/* ==========================================================================
- * accept()
+ * setsockopt()
*/
-int accept(int fd, struct sockaddr *name, int *namelen)
+int setsockopt(int fd, int level, int optname, const void * optval, int optlen)
{
- semaphore *lock, *plock;
- int ret, fd_kern;
+ int ret;
+ if ((ret = fd_lock(fd, FD_RDWR, NULL)) == OK) {
+ if ((ret = machdep_sys_setsockopt(fd_table[fd]->fd.i, level,
+ optname, optval, optlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
+#endif
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
- while ((fd_kern = machdep_sys_accept(fd_table[fd]->fd, name, namelen)) < OK) {
- if (fd_kern == -EWOULDBLOCK) {
- /* Lock queue */
- lock = &fd_wait_lock;
- while (SEMAPHORE_TEST_AND_SET(lock)) {
- pthread_yield();
- }
+#if defined (HAVE_SYSCALL_GETSOCKOPT) || defined (HAVE_SYSCALL_SOCKETCALL)
- /* Lock pthread */
- plock = &(pthread_run->lock);
- while (SEMAPHORE_TEST_AND_SET(plock)) {
- pthread_yield();
- }
+/* ==========================================================================
+ * getsockopt()
+ */
+int getsockopt(int fd, int level, int optname, void * optval, int * optlen)
+{
+ int ret;
- /* queue pthread for a FDR_WAIT */
- pthread_run->fd = fd_table[fd]->fd.i;
- pthread_run->next = fd_wait_write;
- pthread_run->next = fd_wait_read;
- fd_wait_read = pthread_run;
- SEMAPHORE_RESET(lock);
- reschedule(PS_FDR_WAIT);
- } else {
- fd_unlock(fd, FD_RDWR);
- return(fd_kern);
- }
- }
- fd_unlock(fd, FD_RDWR);
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_getsockopt(fd_table[fd]->fd.i, level,
+ optname, optval, optlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_RDWR);
+ }
+ return ret;
+}
- if (!((ret = fd_allocate()) < OK)) {
+#endif
- /* This may be unnecessary */
- machdep_sys_fcntl(fd_kern, F_SETFL, __FD_NONBLOCK);
+#if defined (HAVE_SYSCALL_GETSOCKOPT) || defined (HAVE_SYSCALL_SOCKETCALL)
- /* Should fstat the file to determine what type it is */
- fd_table[ret]->ops = & __fd_kern_ops;
- fd_table[ret]->type = FD_FULL_DUPLEX;
- fd_table[ret]->fd = fd_kern;
- fd_table[ret]->flags = 0;
+/* ==========================================================================
+ * getsockname()
+ */
+int getsockname(int fd, struct sockaddr * name, int * naddrlen)
+{
+ int ret;
+
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_getsockname(fd_table[fd]->fd.i,
+ name, naddrlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
}
+ fd_unlock(fd, FD_RDWR);
}
- return(ret);
+ return ret;
}
+#endif
+
+#if defined (HAVE_SYSCALL_GETPEERNAME) || defined (HAVE_SYSCALL_SOCKETCALL)
+
/* ==========================================================================
- * listen()
+ * getpeername()
*/
-int listen(int fd, int backlog)
+int getpeername(int fd, struct sockaddr * peer, int * paddrlen)
{
int ret;
- if ((ret = fd_lock(fd, FD_RDWR)) == OK) {
- ret = machdep_sys_listen(fd_table[fd]->fd, backlog);
- fd_unlock(fd, FD_RDWR);
+ if ((ret = fd_lock(fd, FD_READ, NULL)) == OK) {
+ if ((ret = machdep_sys_getpeername(fd_table[fd]->fd.i,
+ peer, paddrlen)) < OK) {
+ SET_ERRNO(-ret);
+ ret = NOTOK;
+ }
+ fd_unlock(fd, FD_READ);
}
- return(ret);
+ return ret;
}
+
+#endif
+
+#if defined (HAVE_SYSCALL_SOCKETPAIR) || defined (HAVE_SYSCALL_SOCKETCALL)
+
+/* ==========================================================================
+ * socketpair()
+ */
+int socketpair(int af, int type, int protocol, int pair[2])
+{
+ int ret, fd[2];
+
+ if (!((pair[0] = fd_allocate()) < OK)) {
+ if (!((pair[1] = fd_allocate()) < OK)) {
+ if (!((ret = machdep_sys_socketpair(af, type, protocol, fd)) < OK)){
+ int tmp_flags;
+
+ tmp_flags = machdep_sys_fcntl(fd[0], F_GETFL, 0);
+ machdep_sys_fcntl(fd[0], F_SETFL, tmp_flags | __FD_NONBLOCK);
+ fd_table[pair[0]]->ops = & __fd_kern_ops;
+ fd_table[pair[0]]->type = FD_FULL_DUPLEX;
+ fd_table[pair[0]]->flags = tmp_flags;
+ fd_table[pair[0]]->fd.i = fd[0];
+
+ tmp_flags = machdep_sys_fcntl(fd[1], F_GETFL, 0);
+ machdep_sys_fcntl(fd[1], F_SETFL, tmp_flags | __FD_NONBLOCK);
+ fd_table[pair[1]]->ops = & __fd_kern_ops;
+ fd_table[pair[1]]->type = FD_FULL_DUPLEX;
+ fd_table[pair[1]]->flags = tmp_flags;
+ fd_table[pair[1]]->fd.i = fd[1];
+
+ return(ret);
+ }
+ fd_table[pair[1]]->count = 0;
+ }
+ fd_table[pair[0]]->count = 0;
+ SET_ERRNO(-ret);
+ }
+ return(NOTOK);
+}
+
+#endif
diff --git a/lib/libpthread/pthreads/signal.c b/lib/libpthread/pthreads/signal.c
index 5bdbe6892d7..b96bee7e38f 100644
--- a/lib/libpthread/pthreads/signal.c
+++ b/lib/libpthread/pthreads/signal.c
@@ -36,12 +36,20 @@
*/
#ifndef lint
-static const char rcsid[] = "$Id: signal.c,v 1.1 1995/10/18 08:43:05 deraadt Exp $ $provenid: signal.c,v 1.18 1994/02/07 02:19:28 proven Exp $";
+static const char rcsid[] = "$Id: signal.c,v 1.2 1998/07/21 19:48:04 peter Exp $";
#endif
#include <pthread.h>
#include <signal.h>
+/* This will force init.o to get dragged in; if you've got support for
+ C++ initialization, that'll cause pthread_init to be called at
+ program startup automatically, so the application won't need to
+ call it explicitly. */
+
+extern char __pthread_init_hack;
+char *__pthread_init_hack_2 = &__pthread_init_hack;
+
/*
* Time which select in fd_kern_wait() will sleep.
* If there are no threads to run we sleep for an hour or until
@@ -54,13 +62,16 @@ struct timeval __fd_kern_wait_timeout = { 0, 0 };
/*
* Global for user-kernel lock, and blocked signals
*/
-static volatile sigset_t sig_to_tryagain;
-static volatile sigset_t sig_to_process;
-static volatile int kernel_lock = 0;
+
+static sig_atomic_t signum_to_process[SIGMAX + 1] = { 0, };
+volatile sig_atomic_t sig_to_process = 0;
+
+/* static volatile sigset_t sig_to_process; */
static volatile int sig_count = 0;
static void sig_handler(int signal);
static void set_thread_timer();
+static void __cleanup_after_resume( void );
void sig_prevent(void);
void sig_resume(void);
@@ -75,99 +86,101 @@ void sig_resume(void);
*/
static void context_switch()
{
- struct pthread **current, *next, *last;
- semaphore *lock;
- int count;
+ struct pthread **current, *next, *last, **dead;
+
+ if (pthread_run->state == PS_RUNNING) {
+ /* Put current thread back on the queue */
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread_run);
+ }
+ /* save floating point registers if necessary */
+ if (!(pthread_run->attr.flags & PTHREAD_NOFLOAT)) {
+ machdep_save_float_state(pthread_run);
+ }
/* save state of current thread */
if (machdep_save_state()) {
return;
}
last = pthread_run;
- if (pthread_run = pthread_queue_deq(&pthread_current_queue)) {
- /* restore state of new current thread */
- machdep_restore_state();
- return;
- }
- /* Poll all the kernel fds */
+ /* Poll all fds */
fd_kern_poll();
context_switch_reschedule:;
- /*
- * Go through the reschedule list once, this is the only place
- * that goes through the queue without using the queue routines.
- *
- * But first delete the current queue.
- */
- pthread_queue_init(&pthread_current_queue);
- current = &(pthread_link_list);
- count = 0;
-
- while (*current) {
- switch((*current)->state) {
- case PS_RUNNING:
- pthread_queue_enq(&pthread_current_queue, *current);
- current = &((*current)->pll);
- count++;
- break;
- case PS_DEAD:
- /* Cleanup thread, unless we're using the stack */
- if (((*current)->flags & PF_DETACHED) && (*current != last)) {
- next = (*current)->pll;
- lock = &((*current)->lock);
- if (SEMAPHORE_TEST_AND_SET(lock)) {
- /* Couldn't cleanup this time, try again later */
- current = &((*current)->pll);
- } else {
- if (!((*current)->attr.stackaddr_attr)) {
- free (machdep_pthread_cleanup(&((*current)->machdep_data)));
- }
- free (*current);
- *current = next;
- }
- } else {
- current = &((*current)->pll);
- }
- break;
- default:
- /* Should be on a different queue. Ignore. */
- current = &((*current)->pll);
- count++;
- break;
- }
- }
-
/* Are there any threads to run */
- if (pthread_run = pthread_queue_deq(&pthread_current_queue)) {
- /* restore state of new current thread */
+ if (pthread_run = pthread_prio_queue_deq(pthread_current_prio_queue)) {
+ /* restore floating point registers if necessary */
+ if (!(pthread_run->attr.flags & PTHREAD_NOFLOAT)) {
+ machdep_restore_float_state();
+ }
+ uthread_sigmask = &(pthread_run->sigmask);
+ /* restore state of new current thread */
machdep_restore_state();
- return;
- }
+ return;
+ }
/* Are there any threads at all */
- if (count) {
- /*
- * Do a wait, timeout is set to a hour unless we get an interrupt
- * before the select in wich case it polls and returns.
- */
- fd_kern_wait();
-
- /* Check for interrupts, but ignore SIGVTALR */
- sigdelset(&sig_to_process, SIGVTALRM);
-
- if (sig_to_process) {
- /* Process interrupts */
- sig_handler(0);
+ for (next = pthread_link_list; next; next = next->pll) {
+ if ((next->state != PS_UNALLOCED) && (next->state != PS_DEAD)) {
+ sigset_t sig_to_block, oset;
+
+ sigfillset(&sig_to_block);
+
+ /*
+ * Check sig_to_process before calling fd_kern_wait, to handle
+ * things like zero timeouts to select() which would register
+ * a signal with the sig_handler_fake() call.
+ *
+ * This case should ignore SIGVTALRM
+ */
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+ signum_to_process[SIGVTALRM] = 0;
+ if (sig_to_process) {
+ /* Process interrupts */
+ /*
+ * XXX pthread_run should not be set!
+ * Places where it dumps core should be fixed to
+ * check for the existance of pthread_run --proven
+ */
+ sig_handler(0);
+ } else {
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+ /*
+ * Do a wait, timeout is set to a hour unless we get an
+ * intr. before the select in wich case it polls.
+ */
+ fd_kern_wait();
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+ /* Check for interrupts, but ignore SIGVTALR */
+ signum_to_process[SIGVTALRM] = 0;
+ if (sig_to_process) {
+ /* Process interrupts */
+ sig_handler(0);
+ }
+ }
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
+ goto context_switch_reschedule;
}
-
- goto context_switch_reschedule;
-
}
+
+ /* There are no threads alive. */
+ pthread_run = last;
exit(0);
}
+#if !defined(HAVE_SYSCALL_SIGSUSPEND) && defined(HAVE_SYSCALL_SIGPAUSE)
+
+/* ==========================================================================
+ * machdep_sys_sigsuspend()
+ */
+int machdep_sys_sigsuspend(sigset_t * set)
+{
+ return(machdep_sys_sigpause(* set));
+}
+
+#endif
+
/* ==========================================================================
* sig_handler_pause()
*
@@ -175,15 +188,16 @@ context_switch_reschedule:;
*/
void sig_handler_pause()
{
- sigset_t sig_to_block, sig_to_pause;
+ sigset_t sig_to_block, sig_to_pause, oset;
sigfillset(&sig_to_block);
sigemptyset(&sig_to_pause);
- sigprocmask(SIG_BLOCK, &sig_to_block, NULL);
+ machdep_sys_sigprocmask(SIG_BLOCK, &sig_to_block, &oset);
+/* if (!(SIG_ANY(sig_to_process))) { */
if (!sig_to_process) {
- sigsuspend(&sig_to_pause);
+ machdep_sys_sigsuspend(&sig_to_pause);
}
- sigprocmask(SIG_UNBLOCK, &sig_to_block, NULL);
+ machdep_sys_sigprocmask(SIG_UNBLOCK, &sig_to_block, &oset);
}
/* ==========================================================================
@@ -198,7 +212,8 @@ void sig_handler_pause()
*/
void context_switch_done()
{
- sigdelset(&sig_to_process, SIGVTALRM);
+ /* sigdelset((sigset_t *)&sig_to_process, SIGVTALRM); */
+ signum_to_process[SIGVTALRM] = 0;
set_thread_timer();
}
@@ -211,13 +226,13 @@ static void set_thread_timer()
{
static int last_sched_attr = SCHED_RR;
- switch (pthread_run->attr.sched_attr) {
+ switch (pthread_run->attr.schedparam_policy) {
case SCHED_RR:
machdep_set_thread_timer(&(pthread_run->machdep_data));
break;
case SCHED_FIFO:
if (last_sched_attr != SCHED_FIFO) {
- machdep_unset_thread_timer();
+ machdep_unset_thread_timer(NULL);
}
break;
case SCHED_IO:
@@ -229,90 +244,158 @@ static void set_thread_timer()
machdep_set_thread_timer(&(pthread_run->machdep_data));
break;
}
- last_sched_attr = pthread_run->attr.sched_attr;
+ last_sched_attr = pthread_run->attr.schedparam_policy;
}
/* ==========================================================================
- * sig_handler()
- *
- * Assumes the kernel is locked.
+ * sigvtalrm()
*/
-static void sig_handler(int sig)
+static inline void sigvtalrm()
{
+ if (sig_count) {
+ sigset_t sigall, oset;
- /*
- * First check for old signals, do one pass through and don't
- * check any twice.
- */
- if (sig_to_tryagain) {
- if (sigismember(&sig_to_tryagain, SIGALRM)) {
- switch (sleep_wakeup()) {
- case 1:
- /* Do the default action, no threads were sleeping */
- case OK:
- /* Woke up a sleeping thread */
- sigdelset(&sig_to_tryagain, SIGALRM);
+ sig_count = 0;
+
+ /* Unblock all signals */
+ sigemptyset(&sigall);
+ machdep_sys_sigprocmask(SIG_SETMASK, &sigall, &oset);
+ }
+ context_switch();
+ context_switch_done();
+}
+
+/* ==========================================================================
+ * sigdefault()
+ */
+static inline void sigdefault(int sig)
+{
+ int ret;
+
+ ret = pthread_sig_register(sig);
+ if (pthread_run && (ret > pthread_run->pthread_priority)) {
+ sigvtalrm();
+ }
+}
+
+/* ==========================================================================
+ * sig_handler_switch()
+ */
+static inline void sig_handler_switch(int sig)
+{
+ int ret;
+
+ switch(sig) {
+ case 0:
break;
- case NOTOK:
- /* Couldn't get appropriate locks, try again later */
+ case SIGVTALRM:
+ sigvtalrm();
break;
- }
- } else {
- PANIC();
+ case SIGALRM:
+/* sigdelset((sigset_t *)&sig_to_process, SIGALRM); */
+ signum_to_process[SIGALRM] = 0;
+ switch (ret = sleep_wakeup()) {
+ default:
+ if (pthread_run && (ret > pthread_run->pthread_priority)) {
+ sigvtalrm();
+ }
+ case 0:
+ break;
+ case NOTOK:
+ /* Do the registered action, no threads were sleeping */
+ /* There is a timing window that gets
+ * here when no threads are on the
+ * sleep queue. This is a quick fix.
+ * The real problem is possibly related
+ * to heavy use of condition variables
+ * with time outs.
+ * (mevans)
+ *sigdefault(sig);
+ */
+ break;
+ }
+ break;
+ case SIGCHLD:
+/* sigdelset((sigset_t *)&sig_to_process, SIGCHLD); */
+ signum_to_process[SIGCHLD] = 0;
+ switch (ret = wait_wakeup()) {
+ default:
+ if (pthread_run && (ret > pthread_run->pthread_priority)) {
+ sigvtalrm();
+ }
+ case 0:
+ break;
+ case NOTOK:
+ /* Do the registered action, no threads were waiting */
+ sigdefault(sig);
+ break;
+ }
+ break;
+
+#ifdef SIGINFO
+ case SIGINFO:
+ pthread_dump_info ();
+ /* Then fall through, invoking the application's
+ signal handler after printing our info out.
+
+ I'm not convinced that this is right, but I'm not
+ 100% convinced that it is wrong, and this is how
+ Chris wants it done... */
+#endif
+
+ default:
+ /* Do the registered action */
+ if (!sigismember(uthread_sigmask, sig)) {
+ /*
+ * If the signal isn't masked by the last running thread and
+ * the signal behavior is default or ignore then we can
+ * execute it immediatly. --proven
+ */
+ pthread_sig_default(sig);
}
+ signum_to_process[sig] = 0;
+ sigdefault(sig);
+ break;
}
-
- /*
- * NOW, process signal that just came in, plus any pending on the
- * signal mask. All of these must be resolved.
- */
-sig_handler_top:;
+}
- switch(sig) {
- case 0:
- break;
- case SIGVTALRM:
- if (sig_count) {
- sigset_t sigall;
+/* ==========================================================================
+ * sig_handler()
+ *
+ * Process signal that just came in, plus any pending on the signal mask.
+ * All of these must be resolved.
+ *
+ * Assumes the kernel is locked.
+ */
+static void sig_handler(int sig)
+{
+ if (pthread_kernel_lock != 1) {
+ PANIC();
+ }
- sig_count = 0;
+ if (sig) {
+ sig_handler_switch(sig);
+ }
- /* Unblock all signals */
- sigemptyset(&sigall);
- sigprocmask(SIG_SETMASK, &sigall, NULL);
+ while (sig_to_process) {
+ for (sig_to_process = 0, sig = 1; sig <= SIGMAX; sig++) {
+ if (signum_to_process[sig]) {
+ sig_handler_switch(sig);
+ }
}
- context_switch();
- context_switch_done();
- break;
- case SIGALRM:
- sigdelset(&sig_to_process, SIGALRM);
- switch (sleep_wakeup()) {
- case 1:
- /* Do the default action, no threads were sleeping */
- case OK:
- /* Woke up a sleeping thread */
- break;
- case NOTOK:
- /* Couldn't get appropriate locks, try again later */
- sigaddset(&sig_to_tryagain, SIGALRM);
- break;
- }
- break;
- default:
- PANIC();
}
- /* Determine if there are any other signals */
- if (sig_to_process) {
- for (sig = 1; sig <= SIGMAX; sig++) {
- if (sigismember(&sig_to_process, sig)) {
- /* goto sig_handler_top */
+/*
+ if (SIG_ANY(sig_to_process)) {
+ for (sig = 1; sig <= SIGMAX; sig++) {
+ if (sigismember((sigset_t *)&sig_to_process, sig)) {
goto sig_handler_top;
}
}
}
+*/
}
/* ==========================================================================
@@ -323,15 +406,37 @@ sig_handler_top:;
*/
void sig_handler_real(int sig)
{
- if (kernel_lock) {
+ /*
+ * Get around systems with BROKEN signal handlers.
+ *
+ * Some systems will reissue SIGCHLD if the handler explicitly
+ * clear the signal pending by either doing a wait() or
+ * ignoring the signal.
+ */
+#if defined BROKEN_SIGNALS
+ if (sig == SIGCHLD) {
+ sigignore(SIGCHLD);
+ signal(SIGCHLD, sig_handler_real);
+ }
+#endif
+
+ if (pthread_kernel_lock) {
+ /* sigaddset((sigset_t *)&sig_to_process, sig); */
__fd_kern_wait_timeout.tv_sec = 0;
- sigaddset(&sig_to_process, sig);
+ signum_to_process[sig] = 1;
+ sig_to_process = 1;
return;
}
- sig_prevent();
+ pthread_kernel_lock++;
+
sig_count++;
sig_handler(sig);
- sig_resume();
+
+ /* Handle any signals the current thread might have just gotten */
+ if (pthread_run && pthread_run->sigcount) {
+ pthread_sig_process();
+ }
+ pthread_kernel_lock--;
}
/* ==========================================================================
@@ -339,73 +444,144 @@ void sig_handler_real(int sig)
*/
void sig_handler_fake(int sig)
{
- if (kernel_lock) {
- /* Currently this should be impossible */
- PANIC();
+ if (pthread_kernel_lock) {
+ /* sigaddset((sigset_t *)&sig_to_process, sig); */
+ signum_to_process[sig] = 1;
+ sig_to_process = 1;
+ return;
}
- sig_prevent();
+ pthread_kernel_lock++;
sig_handler(sig);
- sig_resume();
+ while (!(--pthread_kernel_lock)) {
+ if (sig_to_process) {
+ /* if (SIG_ANY(sig_to_process)) { */
+ pthread_kernel_lock++;
+ sig_handler(0);
+ } else {
+ break;
+ }
+ }
}
/* ==========================================================================
- * reschedule()
+ * __pthread_signal_delete(int sig)
*
- * This routine assumes that the caller is the current pthread, pthread_run
- * and that it has a lock on itself and that it wants to reschedule itself.
+ * Assumes the kernel is locked.
*/
-void reschedule(enum pthread_state state)
+void __pthread_signal_delete(int sig)
{
- semaphore *plock;
-
- if (kernel_lock) {
- /* Currently this should be impossible */
- PANIC();
- }
- sig_prevent();
- pthread_run->state = state;
- SEMAPHORE_RESET((plock = &(pthread_run->lock)));
- sig_handler(SIGVTALRM);
- sig_resume();
+ signum_to_process[sig] = 0;
}
/* ==========================================================================
- * sig_prevent()
+ * pthread_sched_other_resume()
+ *
+ * Check if thread to be resumed is of higher priority and if so
+ * stop current thread and start new thread.
*/
-void sig_prevent(void)
+pthread_sched_other_resume(struct pthread * pthread)
{
- kernel_lock++;
+ pthread->state = PS_RUNNING;
+ pthread_prio_queue_enq(pthread_current_prio_queue, pthread);
+
+ if (pthread->pthread_priority > pthread_run->pthread_priority) {
+ if (pthread_kernel_lock == 1) {
+ sig_handler(SIGVTALRM);
+ }
+ }
+
+ __cleanup_after_resume();
}
/* ==========================================================================
- * sig_resume()
+ * pthread_resched_resume()
+ *
+ * This routine assumes that the caller is the current pthread, pthread_run
+ * and that it has a lock the kernel thread and it wants to reschedule itself.
*/
-void sig_resume()
+void pthread_resched_resume(enum pthread_state state)
{
- kernel_lock--;
+ pthread_run->state = state;
+
+ /* Since we are about to block this thread, lets see if we are
+ * at a cancel point and if we've been cancelled.
+ * Avoid cancelling dead or unalloced threads.
+ */
+ if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) &&
+ TEST_PTHREAD_IS_CANCELLABLE(pthread_run) &&
+ state != PS_DEAD && state != PS_UNALLOCED ) {
+
+ /* Set this flag to avoid recursively calling pthread_exit */
+ /* We have to set this flag here because we will unlock the
+ * kernel prior to calling pthread_cancel_internal.
+ */
+ SET_PF_RUNNING_TO_CANCEL(pthread_run);
+
+ pthread_run->old_state = state; /* unlock needs this data */
+ pthread_sched_resume(); /* Unlock kernel before cancel */
+ pthread_cancel_internal( 1 ); /* free locks and exit */
+ }
+
+ sig_handler(SIGVTALRM);
+
+ __cleanup_after_resume();
}
/* ==========================================================================
- * sig_check_and_resume()
+ * pthread_sched_resume()
*/
-void sig_check_and_resume()
+void pthread_sched_resume()
{
- /* Some routine name that is yet to be determined. */
-
- /* Only bother if we are truely unlocking the kernel */
- while (!(--kernel_lock)) {
+ __cleanup_after_resume();
+}
- /* Assume sigset_t is not a struct or union */
+/*----------------------------------------------------------------------
+ * Function: __cleanup_after_resume
+ * Purpose: cleanup kernel locks after a resume
+ * Args: void
+ * Returns: void
+ * Notes:
+ *----------------------------------------------------------------------*/
+static void
+__cleanup_after_resume( void )
+{
+ /* Only bother if we are truely unlocking the kernel */
+ while (!(--pthread_kernel_lock)) {
+ /* if (SIG_ANY(sig_to_process)) { */
if (sig_to_process) {
- kernel_lock++;
+ pthread_kernel_lock++;
sig_handler(0);
- } else {
- break;
+ continue;
+ }
+ if (pthread_run && pthread_run->sigcount) {
+ pthread_kernel_lock++;
+ pthread_sig_process();
+ continue;
}
+ break;
+ }
+
+ if( pthread_run == NULL )
+ return; /* Must be during init processing */
+
+ /* Test for cancel that should be handled now */
+
+ if( ! TEST_PF_RUNNING_TO_CANCEL(pthread_run) &&
+ TEST_PTHREAD_IS_CANCELLABLE(pthread_run) ) {
+ /* Kernel is already unlocked */
+ pthread_cancel_internal( 1 ); /* free locks and exit */
}
}
/* ==========================================================================
+ * pthread_sched_prevent()
+ */
+void pthread_sched_prevent(void)
+{
+ pthread_kernel_lock++;
+}
+
+/* ==========================================================================
* sig_init()
*
* SIGVTALRM (NOT POSIX) needed for thread timeslice timeouts.
@@ -414,32 +590,63 @@ void sig_check_and_resume()
* SIGALRM (IS POSIX) so some special handling will be
* necessary to fake SIGALRM signals
*/
+#ifndef SIGINFO
+#define SIGINFO 0
+#endif
void sig_init(void)
{
- int sig_to_init[] = { SIGVTALRM, SIGALRM, 0 };
+ static const int signum_to_initialize[] =
+ { SIGCHLD, SIGALRM, SIGVTALRM, SIGINFO, 0 };
+ static const int signum_to_ignore[] = { SIGKILL, SIGSTOP, 0 };
+ int i, j;
-#if defined(SA_RESTART)
+#if defined(HAVE_SYSCALL_SIGACTION) || defined(HAVE_SYSCALL_KSIGACTION)
struct sigaction act;
-#endif
-
- int i;
-#if defined(SA_RESTART)
act.sa_handler = sig_handler_real;
sigemptyset(&(act.sa_mask));
- act.sa_flags = SA_RESTART;
+ act.sa_flags = 0;
#endif
- /* Initialize only the necessary signals */
- for (i = 0; sig_to_init[i]; i++) {
+ /* Initialize the important signals */
+ for (i = 0; signum_to_initialize[i]; i++) {
-#if defined(SA_RESTART)
- if (sigaction(sig_to_init[i], &act, NULL)) {
+#if defined(HAVE_SYSCALL_SIGACTION) || defined(HAVE_SYSCALL_KSIGACTION)
+ if (sigaction(signum_to_initialize[i], &act, NULL)) {
#else
- if (signal(sig_to_init[i], sig_handler_real)) {
+ if (signal(signum_to_initialize[i], sig_handler_real)) {
#endif
PANIC();
}
}
+
+ /* Initialize the rest of the signals */
+ for (j = 1; j < SIGMAX; j++) {
+ for (i = 0; signum_to_initialize[i]; i++) {
+ if (signum_to_initialize[i] == j) {
+ goto sig_next;
+ }
+ }
+ /* Because Solaris 2.4 can't deal -- proven */
+ for (i = 0; signum_to_ignore[i]; i++) {
+ if (signum_to_ignore[i] == j) {
+ goto sig_next;
+ }
+ }
+ pthread_signal(j, SIG_DFL);
+
+#if defined(HAVE_SYSCALL_SIGACTION) || defined(HAVE_SYSCALL_KSIGACTION)
+ sigaction(j, &act, NULL);
+#else
+ signal(j, sig_handler_real);
+#endif
+
+ sig_next:;
+ }
+
+#if defined BROKEN_SIGNALS
+ signal(SIGCHLD, sig_handler_real);
+#endif
+
}
diff --git a/lib/libpthread/stdio/xprintf.c b/lib/libpthread/stdio/xprintf.c
index a05de561e72..11c4860ac0b 100644
--- a/lib/libpthread/stdio/xprintf.c
+++ b/lib/libpthread/stdio/xprintf.c
@@ -6,8 +6,8 @@
*/
/*
** NAME: $Source: /cvs/OpenBSD/src/lib/libpthread/stdio/Attic/xprintf.c,v $
-** VERSION: $Revision: 1.1 $
-** DATE: $Date: 1998/07/21 13:22:19 $
+** VERSION: $Revision: 1.2 $
+** DATE: $Date: 1998/07/21 19:48:06 $
**
** ONELINER: A replacement for formatted printing programs.
**
@@ -725,7 +725,7 @@ int sprintf(char *buf, const char *fmt, ...){
rc = vxprintf(sout,&arg,fmt,ap);
va_end(ap);
}
-int vsprintf(char *buf,const char *fmt,va_list ap){
+int vsprintf(char *buf,const char *fmt,pthread_va_list ap){
struct s_strargument arg;
arg.next = buf;
arg.last = 0;
@@ -744,7 +744,7 @@ int snprintf(char *buf, size_t n, const char *fmt, ...){
rc = vxprintf(sout,&arg,fmt,ap);
va_end(ap);
}
-int vsnprintf(char *buf, size_t n, const char *fmt, va_list ap){
+int vsnprintf(char *buf, size_t n, const char *fmt, pthread_va_list ap){
struct s_strargument arg;
arg.next = buf;
arg.last = &buf[n-1];
@@ -825,7 +825,7 @@ char *mprintf(const char *zFormat, ...){
** The name is changed to TclVMPrintf() to conform with Tcl naming
** conventions.
*/
-char *vmprintf(const char *zFormat,va_list ap){
+char *vmprintf(const char *zFormat,pthread_va_list ap){
struct sgMprintf sMprintf;
char zBuf[200];
sMprintf.nChar = 0;
@@ -866,7 +866,7 @@ int fprintf(FILE *pOut, const char *zFormat, ...){
va_end(ap);
return retc;
}
-int vfprintf(FILE *pOut, const char *zFormat, va_list ap){
+int vfprintf(FILE *pOut, const char *zFormat, pthread_va_list ap){
return vxprintf(fout,pOut,zFormat,ap);
}
int printf(const char *zFormat, ...){
@@ -878,6 +878,6 @@ int printf(const char *zFormat, ...){
va_end(ap);
return retc;
}
-int vprintf(const char *zFormat, va_list ap){
+int vprintf(const char *zFormat, pthread_va_list ap){
return vxprintf(fout,stdout,zFormat,ap);
}
diff --git a/lib/libpthread/stdlib/getopt.c b/lib/libpthread/stdlib/getopt.c
index 88164ef34a1..20d2e7b8ea3 100644
--- a/lib/libpthread/stdlib/getopt.c
+++ b/lib/libpthread/stdlib/getopt.c
@@ -39,7 +39,6 @@ static char sccsid[] = "@(#)getopt.c 4.13 (Berkeley) 2/23/91";
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <config.h>
/*
* get option letter from argument vector
diff --git a/lib/libpthread/tests/Makefile b/lib/libpthread/tests/Makefile
index 5ba9f9a3927..626cc39e93f 100644
--- a/lib/libpthread/tests/Makefile
+++ b/lib/libpthread/tests/Makefile
@@ -8,17 +8,11 @@
# -Initial cut for pthreads.
#
-CC = ../pgcc -notinstalled
-CPP = gcc -E
-srctop = /dr1/my/masters/mysql/mit-pthreads
-srcdir = /dr1/my/masters/mysql/mit-pthreads/tests
-VPATH = /dr1/my/masters/mysql/mit-pthreads/tests
CDEBUGFLAGS = -g -O2 -Werror
-INCLUDES= -I../include -I.. -I$(srctop)/include
+INCLUDES= -I../arch/${MACHINE} -I../include -I.. -I$(srctop)/include
CFLAGS = $(CDEBUGFLAGS) $(INCLUDES) $(ADDL_CFLAGS) -DSRCDIR=\"$(srcdir)\"
-LIBS = -lm -lgcc -L../obj/ -lpthread
-#LIBS = -static
+LIBS = -static -lm -lgcc -L../obj.${MACHINE}/ -lpthread
# This list used to include test_select, but that test doesn't terminate.
TESTS = test_create test_pthread_join test_switch test_sleep test_readdir \