1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
/* $OpenBSD: uthread_stack.c,v 1.11 2008/12/18 09:30:32 guenther Exp $ */
/*
* Copyright 1999, David Leonard. All rights reserved.
* <insert BSD-style license&disclaimer>
*/
/*
* Thread stack allocation.
*
* If stack pointers grow down, towards the beginning of stack storage,
* the first page of the storage is protected using mprotect() so as
* to generate a SIGSEGV if a thread overflows its stack. Similarly,
* for stacks that grow up, the last page of the storage is protected.
*/
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/param.h>
#include <sys/user.h>
#include <sys/mman.h>
#include <pthread.h>
#include <pthread_np.h>
#include "pthread_private.h"
struct stack *
_thread_stack_alloc(void *base, size_t size, size_t guardsize)
{
struct stack *stack;
size_t nbpg = (size_t)getpagesize();
/* Maintain a stack of default-sized stacks that we can re-use. */
if (base == NULL && size == PTHREAD_STACK_DEFAULT
&& guardsize == pthread_attr_default.guardsize_attr) {
if (pthread_mutex_lock(&_gc_mutex) != 0)
PANIC("Cannot lock gc mutex");
if ((stack = SLIST_FIRST(&_stackq)) != NULL) {
SLIST_REMOVE_HEAD(&_stackq, qe);
if (pthread_mutex_unlock(&_gc_mutex) != 0)
PANIC("Cannot unlock gc mutex");
return stack;
}
if (pthread_mutex_unlock(&_gc_mutex) != 0)
PANIC("Cannot unlock gc mutex");
}
/* Allocate some storage to hold information about the stack: */
stack = (struct stack *)malloc(sizeof (struct stack));
if (stack == NULL)
return NULL;
if (base != NULL) {
/* Use the user's storage */
stack->base = base;
stack->size = size;
stack->guardsize = 0;
stack->redzone = NULL;
stack->storage = NULL;
return stack;
}
/* Round sizes up to closest page boundry */
size = ((size + (nbpg - 1)) / nbpg) * nbpg;
guardsize = ((guardsize + (nbpg - 1)) / nbpg) * nbpg;
/* overflow? */
if (SIZE_MAX - size < guardsize) {
free(stack);
return NULL;
}
/* mmap storage for the stack, possibly with page(s) for redzone */
stack->storage = mmap(NULL, size + guardsize, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, -1, 0);
if (stack->storage == MAP_FAILED) {
free(stack);
return NULL;
}
/*
* Compute the location of the red zone.
*/
#if defined(MACHINE_STACK_GROWS_UP)
/* Red zone is the last page of the storage: */
stack->redzone = (caddr_t)stack->storage + (ptrdiff_t)size;
stack->base = stack->storage;
stack->size = size;
stack->guardsize = guardsize;
#else
/* Red zone is the first page of the storage: */
stack->redzone = stack->storage;
stack->base = (caddr_t)stack->redzone + (ptrdiff_t)guardsize;
stack->size = size;
stack->guardsize = guardsize;
#endif
if (!guardsize)
stack->redzone = NULL;
else if (mprotect(stack->redzone, guardsize, PROT_NONE) == -1)
PANIC("Cannot protect stack red zone");
return stack;
}
void
_thread_stack_free(stack)
struct stack *stack;
{
/* Cache allocated stacks of default size: */
if (stack->storage != NULL && stack->size == PTHREAD_STACK_DEFAULT
&& stack->guardsize == pthread_attr_default.guardsize_attr)
SLIST_INSERT_HEAD(&_stackq, stack, qe);
else {
/* unmap storage: */
if (stack->storage)
munmap(stack->storage, stack->size + stack->guardsize);
/* Free stack information storage: */
free(stack);
}
}
|