1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
|
/* $OpenBSD: thread_private.h,v 1.29 2016/10/15 18:24:40 guenther Exp $ */
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
#ifndef _THREAD_PRIVATE_H_
#define _THREAD_PRIVATE_H_
#include <stdio.h> /* for FILE and __isthreaded */
#define _MALLOC_MUTEXES 4
void _malloc_init(int);
#ifdef __LIBC__
PROTO_NORMAL(_malloc_init);
#endif /* __LIBC__ */
/*
* The callbacks needed by libc to handle the threaded case.
* NOTE: Bump the version when you change the struct contents!
*
* tc_canceled:
* If not NULL, what to do when canceled (otherwise _exit(0))
*
* tc_flockfile, tc_ftrylockfile, and tc_funlockfile:
* If not NULL, these implement the flockfile() family.
* XXX In theory, you should be able to lock a FILE before
* XXX loading libpthread and have that be a real lock on it,
* XXX but that doesn't work without the libc base version
* XXX tracking the recursion count.
*
* tc_malloc_lock and tc_malloc_unlock:
* tc_atexit_lock and tc_atexit_unlock:
* tc_atfork_lock and tc_atfork_unlock:
* tc_arc4_lock and tc_arc4_unlock:
* The locks used by the malloc, atexit, atfork, and arc4 subsystems.
* These have to be ordered specially in the fork/vfork wrappers
* and may be implemented differently than the general mutexes
* in the callbacks below.
*
* tc_mutex_lock and tc_mutex_unlock:
* Lock and unlock the given mutex. If the given mutex is NULL
* a mutex is allocated and initialized automatically.
*
* tc_mutex_destroy:
* Destroy/deallocate the given mutex.
*
* tc_tag_lock and tc_tag_unlock:
* Lock and unlock the mutex associated with the given tag.
* If the given tag is NULL a tag is allocated and initialized
* automatically.
*
* tc_tag_storage:
* Returns a pointer to per-thread instance of data associated
* with the given tag. If the given tag is NULL a tag is
* allocated and initialized automatically.
*
* tc_fork, tc_vfork:
* If not NULL, they are called instead of the syscall stub, so that
* the thread library can do necessary locking and reinitialization.
*
*
* If <machine/tcb.h> doesn't define TCB_GET(), then locating the TCB in a
* threaded process requires a syscall (__get_tcb(2)) which is too much
* overhead for single-threaded processes. For those archs, there are two
* additional callbacks, though they are placed first in the struct for
* convenience in ASM:
*
* tc_errnoptr:
* Returns the address of the thread's errno.
*
* tc_tcb:
* Returns the address of the thread's TCB.
*/
struct thread_callbacks {
int *(*tc_errnoptr)(void); /* MUST BE FIRST */
void *(*tc_tcb)(void);
__dead void (*tc_canceled)(void);
void (*tc_flockfile)(FILE *);
int (*tc_ftrylockfile)(FILE *);
void (*tc_funlockfile)(FILE *);
void (*tc_malloc_lock)(int);
void (*tc_malloc_unlock)(int);
void (*tc_atexit_lock)(void);
void (*tc_atexit_unlock)(void);
void (*tc_atfork_lock)(void);
void (*tc_atfork_unlock)(void);
void (*tc_arc4_lock)(void);
void (*tc_arc4_unlock)(void);
void (*tc_mutex_lock)(void **);
void (*tc_mutex_unlock)(void **);
void (*tc_mutex_destroy)(void **);
void (*tc_tag_lock)(void **);
void (*tc_tag_unlock)(void **);
void *(*tc_tag_storage)(void **, void *, size_t, void *);
__pid_t (*tc_fork)(void);
__pid_t (*tc_vfork)(void);
};
__BEGIN_PUBLIC_DECLS
/*
* Set the callbacks used by libc
*/
void _thread_set_callbacks(const struct thread_callbacks *_cb, size_t _len);
__END_PUBLIC_DECLS
#ifdef __LIBC__
__BEGIN_HIDDEN_DECLS
/* the current set */
extern struct thread_callbacks _thread_cb;
__END_HIDDEN_DECLS
#endif /* __LIBC__ */
/*
* helper macro to make unique names in the thread namespace
*/
#define __THREAD_NAME(name) __CONCAT(_thread_tagname_,name)
/*
* Resolver code is special cased in that it uses global keys.
*/
extern void *__THREAD_NAME(_res);
extern void *__THREAD_NAME(_res_ext);
extern void *__THREAD_NAME(serv_mutex);
/*
* Macros used in libc to access thread mutex, keys, and per thread storage.
* _THREAD_PRIVATE_KEY and _THREAD_PRIVATE_MUTEX are different macros for
* historical reasons. They do the same thing, define a static variable
* keyed by 'name' that identifies a mutex and a key to identify per thread
* data.
*/
#define _THREAD_PRIVATE_KEY(name) \
static void *__THREAD_NAME(name)
#define _THREAD_PRIVATE_MUTEX(name) \
static void *__THREAD_NAME(name)
#ifndef __LIBC__ /* building some sort of reach around */
#define _THREAD_PRIVATE_MUTEX_LOCK(name) do {} while (0)
#define _THREAD_PRIVATE_MUTEX_UNLOCK(name) do {} while (0)
#define _THREAD_PRIVATE(keyname, storage, error) &(storage)
#define _MUTEX_LOCK(mutex) do {} while (0)
#define _MUTEX_UNLOCK(mutex) do {} while (0)
#define _MUTEX_DESTROY(mutex) do {} while (0)
#define _MALLOC_LOCK(n) do {} while (0)
#define _MALLOC_UNLOCK(n) do {} while (0)
#define _ATEXIT_LOCK() do {} while (0)
#define _ATEXIT_UNLOCK() do {} while (0)
#define _ATFORK_LOCK() do {} while (0)
#define _ATFORK_UNLOCK() do {} while (0)
#define _ARC4_LOCK() do {} while (0)
#define _ARC4_UNLOCK() do {} while (0)
#else /* building libc */
#define _THREAD_PRIVATE_MUTEX_LOCK(name) \
do { \
if (_thread_cb.tc_tag_lock != NULL) \
_thread_cb.tc_tag_lock(&(__THREAD_NAME(name))); \
} while (0)
#define _THREAD_PRIVATE_MUTEX_UNLOCK(name) \
do { \
if (_thread_cb.tc_tag_unlock != NULL) \
_thread_cb.tc_tag_unlock(&(__THREAD_NAME(name))); \
} while (0)
#define _THREAD_PRIVATE(keyname, storage, error) \
(_thread_cb.tc_tag_storage == NULL ? &(storage) : \
_thread_cb.tc_tag_storage(&(__THREAD_NAME(keyname)), \
&(storage), sizeof(storage), error))
/*
* Macros used in libc to access mutexes.
*/
#define _MUTEX_LOCK(mutex) \
do { \
if (__isthreaded) \
_thread_cb.tc_mutex_lock(mutex); \
} while (0)
#define _MUTEX_UNLOCK(mutex) \
do { \
if (__isthreaded) \
_thread_cb.tc_mutex_unlock(mutex); \
} while (0)
#define _MUTEX_DESTROY(mutex) \
do { \
if (__isthreaded) \
_thread_cb.tc_mutex_destroy(mutex); \
} while (0)
/*
* malloc lock/unlock prototypes and definitions
*/
#define _MALLOC_LOCK(n) \
do { \
if (__isthreaded) \
_thread_cb.tc_malloc_lock(n); \
} while (0)
#define _MALLOC_UNLOCK(n) \
do { \
if (__isthreaded) \
_thread_cb.tc_malloc_unlock(n); \
} while (0)
#define _ATEXIT_LOCK() \
do { \
if (__isthreaded) \
_thread_cb.tc_atexit_lock(); \
} while (0)
#define _ATEXIT_UNLOCK() \
do { \
if (__isthreaded) \
_thread_cb.tc_atexit_unlock(); \
} while (0)
#define _ATFORK_LOCK() \
do { \
if (__isthreaded) \
_thread_cb.tc_atfork_lock(); \
} while (0)
#define _ATFORK_UNLOCK() \
do { \
if (__isthreaded) \
_thread_cb.tc_atfork_unlock(); \
} while (0)
#define _ARC4_LOCK() \
do { \
if (__isthreaded) \
_thread_cb.tc_arc4_lock(); \
} while (0)
#define _ARC4_UNLOCK() \
do { \
if (__isthreaded) \
_thread_cb.tc_arc4_unlock(); \
} while (0)
#endif /* __LIBC__ */
#endif /* _THREAD_PRIVATE_H_ */
|