summaryrefslogtreecommitdiff
path: root/lib/librthread
diff options
context:
space:
mode:
authorPhilip Guenthe <guenther@cvs.openbsd.org>2012-02-18 21:12:10 +0000
committerPhilip Guenthe <guenther@cvs.openbsd.org>2012-02-18 21:12:10 +0000
commit128b34bebb37c17bd4b6dd8cc00ef84cb1987a41 (patch)
treebc1b64782554f53cfca95cf9d76af3b0ffac6b72 /lib/librthread
parent837e4ba0ab76477cbdf508b2293676a2e3e8cf5f (diff)
Fix the handling of the stackaddr, stacksize, and guardsize attributes:
don't try to merge values, round the sizes separately, and don't try to unmap application-supplied stacks. Copy from uthread the caching of default-sized stacks. Have pthread_attr_init() and pthread_create() get the default attributes from staticly allocated pthread_attr_t. Cache the pagesize in _rthread_init() and provide a macro for rounding to it based on suggestions from kettenis@ and tedu@, ok kettenis@
Diffstat (limited to 'lib/librthread')
-rw-r--r--lib/librthread/rthread.c26
-rw-r--r--lib/librthread/rthread.h18
-rw-r--r--lib/librthread/rthread_attr.c39
-rw-r--r--lib/librthread/rthread_np.c17
-rw-r--r--lib/librthread/rthread_stack.c142
5 files changed, 141 insertions, 101 deletions
diff --git a/lib/librthread/rthread.c b/lib/librthread/rthread.c
index eeae528d57f..a7d29402220 100644
--- a/lib/librthread/rthread.c
+++ b/lib/librthread/rthread.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.c,v 1.51 2012/02/16 20:55:09 kettenis Exp $ */
+/* $OpenBSD: rthread.c,v 1.52 2012/02/18 21:12:09 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -39,6 +39,7 @@
static int concurrency_level; /* not used */
int _threads_ready;
+size_t _thread_pagesize;
struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
_spinlock_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
static struct pthread_queue _thread_gc_list
@@ -49,6 +50,18 @@ struct thread_control_block _initial_thread_tcb;
int __tfork_thread(const struct __tfork *, void *, void (*)(void *), void *);
+struct pthread_attr _rthread_attr_default = {
+ .stack_addr = NULL,
+ .stack_size = RTHREAD_STACK_SIZE_DEF,
+/* .guard_size set in _rthread_init */
+ .detach_state = PTHREAD_CREATE_JOINABLE,
+ .contention_scope = PTHREAD_SCOPE_SYSTEM,
+ .sched_policy = SCHED_OTHER,
+ .sched_param.sched_priority = 0,
+ .sched_inherit = PTHREAD_INHERIT_SCHED,
+ .create_suspended = 0,
+};
+
/*
* internal support functions
*/
@@ -145,6 +158,9 @@ _rthread_init(void)
LIST_INSERT_HEAD(&_thread_list, thread, threads);
_rthread_debug_init();
+ _thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
+ _rthread_attr_default.guard_size = _thread_pagesize;
+
_threads_ready = 1;
_rthread_debug(1, "rthread init\n");
@@ -372,13 +388,7 @@ pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
thread->arg = arg;
thread->tid = -1;
- if (attr)
- thread->attr = *(*attr);
- else {
- thread->attr.stack_size = RTHREAD_STACK_SIZE_DEF;
- thread->attr.guard_size = sysconf(_SC_PAGESIZE);
- thread->attr.stack_size -= thread->attr.guard_size;
- }
+ thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
thread->flags |= THREAD_DETACHED;
thread->flags |= THREAD_CANCEL_ENABLE|THREAD_CANCEL_DEFERRED;
diff --git a/lib/librthread/rthread.h b/lib/librthread/rthread.h
index cf7ee4eb9f0..d410b200212 100644
--- a/lib/librthread/rthread.h
+++ b/lib/librthread/rthread.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.h,v 1.32 2012/02/15 04:58:42 guenther Exp $ */
+/* $OpenBSD: rthread.h,v 1.33 2012/02/18 21:12:09 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -37,11 +37,12 @@
#endif
struct stack {
- void *sp;
- void *base;
- void *guard;
- size_t guardsize;
- size_t len;
+ SLIST_ENTRY(stack) link; /* link for free default stacks */
+ void *sp; /* machine stack pointer */
+ void *base; /* bottom of allocated area */
+ size_t guardsize; /* size of PROT_NONE zone or */
+ /* ==1 if application alloced */
+ size_t len; /* total size of allocated stack */
};
struct sem {
@@ -163,9 +164,14 @@ struct pthread {
extern int _threads_ready;
+extern size_t _thread_pagesize;
extern LIST_HEAD(listhead, pthread) _thread_list;
extern struct pthread _initial_thread;
extern _spinlock_lock_t _thread_lock;
+extern struct pthread_attr _rthread_attr_default;
+
+#define ROUND_TO_PAGE(size) \
+ (((size) + (_thread_pagesize - 1)) & ~(_thread_pagesize - 1))
void _spinlock(_spinlock_lock_t *);
void _spinunlock(_spinlock_lock_t *);
diff --git a/lib/librthread/rthread_attr.c b/lib/librthread/rthread_attr.c
index 9ba7b0ec240..dbef6b5acf1 100644
--- a/lib/librthread/rthread_attr.c
+++ b/lib/librthread/rthread_attr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_attr.c,v 1.11 2012/02/18 07:44:28 guenther Exp $ */
+/* $OpenBSD: rthread_attr.c,v 1.12 2012/02/18 21:12:09 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -54,13 +54,7 @@ pthread_attr_init(pthread_attr_t *attrp)
attr = calloc(1, sizeof(*attr));
if (!attr)
return (errno);
- attr->stack_size = RTHREAD_STACK_SIZE_DEF;
- attr->guard_size = sysconf(_SC_PAGESIZE);
- attr->stack_size -= attr->guard_size;
- attr->detach_state = PTHREAD_CREATE_JOINABLE;
- attr->contention_scope = PTHREAD_SCOPE_SYSTEM;
- attr->sched_policy = SCHED_OTHER;
- attr->sched_inherit = PTHREAD_INHERIT_SCHED;
+ *attr = _rthread_attr_default;
*attrp = attr;
return (0);
@@ -86,11 +80,7 @@ pthread_attr_getguardsize(const pthread_attr_t *attrp, size_t *guardsize)
int
pthread_attr_setguardsize(pthread_attr_t *attrp, size_t guardsize)
{
- if ((*attrp)->guard_size != guardsize) {
- (*attrp)->stack_size += (*attrp)->guard_size;
- (*attrp)->guard_size = guardsize;
- (*attrp)->stack_size -= (*attrp)->guard_size;
- }
+ (*attrp)->guard_size = guardsize;
return 0;
}
@@ -121,7 +111,6 @@ pthread_attr_getstack(const pthread_attr_t *attrp, void **stackaddr,
size_t *stacksize)
{
*stackaddr = (*attrp)->stack_addr;
- *stacksize = (*attrp)->stack_size + (*attrp)->guard_size;
return (0);
}
@@ -131,10 +120,15 @@ pthread_attr_setstack(pthread_attr_t *attrp, void *stackaddr, size_t stacksize)
{
int n;
+ /*
+ * XXX Add an alignment test, on stackaddr for stack-grows-up
+ * archs or on stackaddr+stacksize for stack-grows-down archs
+ */
+ if (stacksize < PTHREAD_STACK_MIN)
+ return (EINVAL);
if ((n = pthread_attr_setstackaddr(attrp, stackaddr)))
return (n);
(*attrp)->stack_size = stacksize;
- (*attrp)->stack_size -= (*attrp)->guard_size;
return (0);
}
@@ -142,7 +136,7 @@ pthread_attr_setstack(pthread_attr_t *attrp, void *stackaddr, size_t stacksize)
int
pthread_attr_getstacksize(const pthread_attr_t *attrp, size_t *stacksize)
{
- *stacksize = (*attrp)->stack_size + (*attrp)->guard_size;
+ *stacksize = (*attrp)->stack_size;
return (0);
}
@@ -150,11 +144,10 @@ pthread_attr_getstacksize(const pthread_attr_t *attrp, size_t *stacksize)
int
pthread_attr_setstacksize(pthread_attr_t *attrp, size_t stacksize)
{
+ if (stacksize < PTHREAD_STACK_MIN ||
+ stacksize > ROUND_TO_PAGE(stacksize))
+ return (EINVAL);
(*attrp)->stack_size = stacksize;
- if ((*attrp)->stack_size > (*attrp)->guard_size)
- (*attrp)->stack_size -= (*attrp)->guard_size;
- else
- (*attrp)->stack_size = 0;
return (0);
}
@@ -170,11 +163,7 @@ pthread_attr_getstackaddr(const pthread_attr_t *attrp, void **stackaddr)
int
pthread_attr_setstackaddr(pthread_attr_t *attrp, void *stackaddr)
{
- size_t pgsz = sysconf(_SC_PAGESIZE);
-
- if (pgsz == (size_t)-1)
- return EINVAL;
- if ((uintptr_t)stackaddr & (pgsz - 1))
+ if (stackaddr == NULL || (uintptr_t)stackaddr & (_thread_pagesize - 1))
return EINVAL;
(*attrp)->stack_addr = stackaddr;
diff --git a/lib/librthread/rthread_np.c b/lib/librthread/rthread_np.c
index 69acbd458e6..728d0fc73b9 100644
--- a/lib/librthread/rthread_np.c
+++ b/lib/librthread/rthread_np.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_np.c,v 1.6 2011/12/28 04:59:31 guenther Exp $ */
+/* $OpenBSD: rthread_np.c,v 1.7 2012/02/18 21:12:09 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* Copyright (c) 2005 Otto Moerbeek <otto@openbsd.org>
@@ -50,7 +50,9 @@ pthread_main_np(void)
/*
* Return stack info from the given thread. Based upon the solaris
- * thr_stksegment function.
+ * thr_stksegment function. Note that the returned ss_sp member is the
+ * *top* of the allocated stack area, unlike in sigaltstack() where
+ * it's the bottom. You'll have to ask Sun what they were thinking...
*
* This function taken from the uthread library, with the following
* license:
@@ -59,7 +61,6 @@ int
pthread_stackseg_np(pthread_t thread, stack_t *sinfo)
{
char *base;
- size_t pgsz;
int ret;
struct rlimit rl;
@@ -75,18 +76,14 @@ pthread_stackseg_np(pthread_t thread, stack_t *sinfo)
} else if (thread == &_initial_thread) {
if (getrlimit(RLIMIT_STACK, &rl) != 0)
return (EAGAIN);
- pgsz = (size_t)sysconf(_SC_PAGESIZE);
- if (pgsz == (size_t)-1)
- return (EAGAIN);
+
/*
* round_page() stack rlim_cur and
* trunc_page() USRSTACK to be consistent with
* the way the kernel sets up the stack.
*/
- sinfo->ss_size = (size_t)rl.rlim_cur;
- sinfo->ss_size += (pgsz - 1);
- sinfo->ss_size &= ~(pgsz - 1);
- sinfo->ss_sp = (caddr_t) (USRSTACK & ~(pgsz - 1));
+ sinfo->ss_size = ROUND_TO_PAGE((size_t)rl.rlim_cur);
+ sinfo->ss_sp = (caddr_t) (USRSTACK & ~(_thread_pagesize - 1));
sinfo->ss_flags = 0;
ret = 0;
diff --git a/lib/librthread/rthread_stack.c b/lib/librthread/rthread_stack.c
index 3ffae44c21d..e3c8ee3b6c0 100644
--- a/lib/librthread/rthread_stack.c
+++ b/lib/librthread/rthread_stack.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_stack.c,v 1.5 2011/11/06 11:48:59 guenther Exp $ */
+/* $OpenBSD: rthread_stack.c,v 1.6 2012/02/18 21:12:09 guenther Exp $ */
/* $snafu: rthread_stack.c,v 1.12 2005/01/11 02:45:28 marc Exp $ */
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
@@ -10,80 +10,118 @@
#include <errno.h>
#include <pthread.h>
+#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
#include "rthread.h"
+/*
+ * Follow uthread's example and keep around stacks that have default
+ * attributes for possible reuse.
+ */
+static SLIST_HEAD(, stack) def_stacks = SLIST_HEAD_INITIALIZER(head);
+static _spinlock_lock_t def_stacks_lock = _SPINLOCK_UNLOCKED;
+
struct stack *
_rthread_alloc_stack(pthread_t thread)
{
struct stack *stack;
caddr_t base;
caddr_t guard;
- caddr_t start = NULL;
- size_t pgsz;
size_t size;
+ size_t guardsize;
+
+ /* if the request uses the defaults, try to reuse one */
+ if (thread->attr.stack_addr != NULL &&
+ thread->attr.stack_size == RTHREAD_STACK_SIZE_DEF &&
+ thread->attr.guard_size == _rthread_attr_default.guard_size) {
+ _spinlock(&def_stacks_lock);
+ stack = SLIST_FIRST(&def_stacks);
+ if (stack != NULL)
+ _spinunlock(&def_stacks_lock);
+ if (stack != NULL)
+ return (stack);
+ }
- /* guard pages are forced to a multiple of the page size */
- pgsz = sysconf(_SC_PAGESIZE);
- if (pgsz == (size_t)-1)
- return NULL;
-
- /* figure out the actual requested size, including guard size */
- size = thread->attr.stack_size + thread->attr.guard_size;
- size += pgsz - 1;
- size &= ~(pgsz - 1);
-
- /*
- * Allocate some stack space unless an address was provided.
- * A provided address is ASSUMED to be correct with respect to
- * alignment constraints.
- */
- if (size > thread->attr.guard_size) {
- if (thread->attr.stack_addr)
- base = thread->attr.stack_addr;
- else {
- base = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_ANON, -1, 0);
- if (base == MAP_FAILED)
- return (NULL);
- }
- /* memory protect the guard region */
+ /* allocate the stack struct that we'll return */
+ stack = malloc(sizeof(*stack));
+ if (stack == NULL)
+ return (NULL);
+ /* If a stack address was provided, just fill in the details */
+ if (thread->attr.stack_addr != NULL) {
+ stack->base = thread->attr.stack_addr;
+ stack->len = thread->attr.stack_size;
#ifdef MACHINE_STACK_GROWS_UP
- guard = base + size - thread->attr.guard_size;
- start = base;
+ stack->sp = thread->attr.stack_addr;
#else
- guard = base;
- start = base + size;
+ stack->sp = thread->attr.stack_addr + thread->attr.stack_size;
#endif
- if (mprotect(guard, thread->attr.guard_size, PROT_NONE) == -1) {
- munmap(base, size);
- return (NULL);
- }
-
- /* wrap up the info in a struct stack and return it */
- stack = malloc(sizeof(*stack));
- if (!stack) {
- munmap(base, size);
- return (NULL);
- }
- stack->sp = start;
- stack->base = base;
- stack->guard = guard;
- stack->guardsize = thread->attr.guard_size;
- stack->len = size;
+ /*
+ * This impossible guardsize marks this stack as
+ * application allocated so it won't be freed or
+ * cached by _rthread_free_stack()
+ */
+ stack->guardsize = 1;
return (stack);
}
- errno = EINVAL;
- return (NULL);
+
+ /* round up the requested sizes up to full pages */
+ size = ROUND_TO_PAGE(thread->attr.stack_size);
+ guardsize = ROUND_TO_PAGE(thread->attr.guard_size);
+
+ /* check for overflow */
+ if (size < thread->attr.stack_size ||
+ guardsize < thread->attr.guard_size ||
+ SIZE_MAX - size < guardsize) {
+ free(stack);
+ errno = EINVAL;
+ return (NULL);
+ }
+ size += guardsize;
+
+ /* actually allocate the real stack */
+ base = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANON, -1, 0);
+ if (base == MAP_FAILED) {
+ free(stack);
+ return (NULL);
+ }
+
+#ifdef MACHINE_STACK_GROWS_UP
+ guard = base + size - guardsize;
+ stack->sp = base;
+#else
+ guard = base;
+ stack->sp = base + size;
+#endif
+
+ /* memory protect the guard region */
+ if (guardsize != 0 && mprotect(guard, guardsize, PROT_NONE) == -1) {
+ munmap(base, size);
+ free(stack);
+ return (NULL);
+ }
+
+ stack->base = base;
+ stack->guardsize = guardsize;
+ stack->len = size;
+ return (stack);
}
void
_rthread_free_stack(struct stack *stack)
{
- munmap(stack->base, stack->len);
- free(stack);
+ if (stack->len == RTHREAD_STACK_SIZE_DEF &&
+ stack->guardsize == _rthread_attr_default.guard_size) {
+ _spinlock(&def_stacks_lock);
+ SLIST_INSERT_HEAD(&def_stacks, stack, link);
+ _spinunlock(&def_stacks_lock);
+ } else {
+ /* unmap the storage unless it was application allocated */
+ if (stack->guardsize != 1)
+ munmap(stack->base, stack->len);
+ free(stack);
+ }
}