summaryrefslogtreecommitdiff
path: root/lib/libc/stdlib/malloc.c
diff options
context:
space:
mode:
authorTed Unangst <tedu@cvs.openbsd.org>2014-08-18 14:34:59 +0000
committerTed Unangst <tedu@cvs.openbsd.org>2014-08-18 14:34:59 +0000
commitf848bb6835c092c147bb0cf88c0a4095fdcfe841 (patch)
tree98b9f282f05b2ce323323c7923642433fe61aaca /lib/libc/stdlib/malloc.c
parent2b22d5967f83c65ba139f8ab889009b06d22ded0 (diff)
a small tweak to improve malloc in multithreaded programs. we don't need
to hold the malloc lock across mmap syscalls in all cases. dropping it allows another thread to access the existing chunk cache if necessary. could be improved to be a bit more aggressive, but i've been testing this simple diff for some time now with good results.
Diffstat (limited to 'lib/libc/stdlib/malloc.c')
-rw-r--r--lib/libc/stdlib/malloc.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/lib/libc/stdlib/malloc.c b/lib/libc/stdlib/malloc.c
index a800ea17bef..165ad700317 100644
--- a/lib/libc/stdlib/malloc.c
+++ b/lib/libc/stdlib/malloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: malloc.c,v 1.170 2014/07/09 19:11:00 tedu Exp $ */
+/* $OpenBSD: malloc.c,v 1.171 2014/08/18 14:34:58 tedu Exp $ */
/*
* Copyright (c) 2008, 2010, 2011 Otto Moerbeek <otto@drijf.net>
* Copyright (c) 2012 Matthew Dempsky <matthew@openbsd.org>
@@ -93,6 +93,15 @@
#define MQUERY(a, sz) mquery((a), (size_t)(sz), PROT_READ | PROT_WRITE, \
MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, (off_t)0)
+#define KERNENTER() if (__isthreaded) do { \
+ malloc_active--; \
+ _MALLOC_UNLOCK(); \
+} while (0)
+#define KERNEXIT() if (__isthreaded) do { \
+ _MALLOC_LOCK(); \
+ malloc_active++; \
+} while (0)
+
struct region_info {
void *p; /* page; low bits used to mark chunks */
uintptr_t size; /* size for pages, or chunk_info pointer */
@@ -312,7 +321,8 @@ unmap(struct dir_info *d, void *p, size_t sz)
}
if (psz > mopts.malloc_cache) {
- if (munmap(p, sz))
+ i = munmap(p, sz);
+ if (i)
wrterror("munmap", p);
STATS_SUB(d->malloc_used, sz);
return;
@@ -396,7 +406,9 @@ map(struct dir_info *d, size_t sz, int zero_fill)
return MAP_FAILED;
}
if (psz > d->free_regions_size) {
+ KERNENTER();
p = MMAP(sz);
+ KERNEXIT();
if (p != MAP_FAILED)
STATS_ADD(d->malloc_used, sz);
/* zero fill not needed */
@@ -408,13 +420,13 @@ map(struct dir_info *d, size_t sz, int zero_fill)
if (r->p != NULL) {
if (r->size == psz) {
p = r->p;
+ r->p = NULL;
+ r->size = 0;
+ d->free_regions_size -= psz;
if (mopts.malloc_freeunmap)
mprotect(p, sz, PROT_READ | PROT_WRITE);
if (mopts.malloc_hint)
madvise(p, sz, MADV_NORMAL);
- r->p = NULL;
- r->size = 0;
- d->free_regions_size -= psz;
if (zero_fill)
memset(p, 0, sz);
else if (mopts.malloc_junk == 2 &&
@@ -440,11 +452,13 @@ map(struct dir_info *d, size_t sz, int zero_fill)
memset(p, SOME_FREEJUNK, sz);
return p;
}
+ if (d->free_regions_size > mopts.malloc_cache)
+ wrterror("malloc cache", NULL);
+ KERNENTER();
p = MMAP(sz);
+ KERNEXIT();
if (p != MAP_FAILED)
STATS_ADD(d->malloc_used, sz);
- if (d->free_regions_size > mopts.malloc_cache)
- wrterror("malloc cache", NULL);
/* zero fill not needed */
return p;
}