summaryrefslogtreecommitdiff
path: root/libexec
diff options
context:
space:
mode:
authorKurt Miller <kurt@cvs.openbsd.org>2019-01-25 18:13:14 +0000
committerKurt Miller <kurt@cvs.openbsd.org>2019-01-25 18:13:14 +0000
commit4d6ab29a228719c3bca357312d4cad15631e2a5d (patch)
tree481376c5bbaad4c4ad1561460b34f6b3caaa7062 /libexec
parent6f6f926dc1b333c4059f8f121c82e917c3488453 (diff)
On i386, ensure that the first PT_LOAD segment is below the W^X
line unless it is writable. lld places read-only sections below the gap so this is needed to be able to retain W^X with lld. Note however the read-only sections below the W^X line are now executable on pre-NX machines and a possible source of gadgets. This is a change from Gnu ld where RO sections were ordered above the W^X line and not executable. okay drahn@ kettenis@ deraadt@
Diffstat (limited to 'libexec')
-rw-r--r--libexec/ld.so/library_mquery.c51
1 files changed, 20 insertions, 31 deletions
diff --git a/libexec/ld.so/library_mquery.c b/libexec/ld.so/library_mquery.c
index a01d43af83e..2eca7e76246 100644
--- a/libexec/ld.so/library_mquery.c
+++ b/libexec/ld.so/library_mquery.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: library_mquery.c,v 1.58 2017/12/08 05:25:20 deraadt Exp $ */
+/* $OpenBSD: library_mquery.c,v 1.59 2019/01/25 18:13:13 kurt Exp $ */
/*
* Copyright (c) 2002 Dale Rahn
@@ -235,8 +235,6 @@ retry:
void *res;
flags = MAP_PRIVATE;
- if (LOFF + ld->moff != 0)
- flags |= MAP_FIXED | __MAP_NOREPLACE;
if (ld->foff < 0) {
fd = -1;
@@ -247,42 +245,33 @@ retry:
foff = ld->foff;
}
- res = _dl_mmap((void *)(LOFF + ld->moff), ROUND_PG(ld->size),
- ld->prot, flags, fd, foff);
- if (_dl_mmap_error(res)) {
+ if (ld == lowld) {
/*
- * The mapping we wanted isn't free, so we do an
- * mquery without MAP_FIXED to get the next free
- * mapping, adjust the base mapping address to match
- * this free mapping and restart the process again.
- *
- * XXX - we need some kind of boundary condition
- * here, or fix mquery to not run into the stack
+ * Add PROT_EXEC to force the first allocation in
+ * EXEC region unless it is writable.
*/
+ int exec = (ld->prot & PROT_WRITE) ? 0 : PROT_EXEC;
res = _dl_mquery((void *)(LOFF + ld->moff),
- ROUND_PG(ld->size), ld->prot,
- flags & ~(MAP_FIXED | __MAP_NOREPLACE), fd, foff);
+ ROUND_PG(ld->size), ld->prot | exec, flags,
+ fd, foff);
+ if (_dl_mmap_error(res))
+ goto fail;
+ lowld->start = res;
+ }
- /*
- * If ld == lowld, then ld->start is just a hint and
- * thus shouldn't be unmapped.
- */
- ld->start = NULL;
+ res = _dl_mmap((void *)(LOFF + ld->moff), ROUND_PG(ld->size),
+ ld->prot, flags | MAP_FIXED | __MAP_NOREPLACE, fd, foff);
+
+ if (_dl_mmap_error(res)) {
+ struct load_list *ll;
/* Unmap any mappings that we did get in. */
- for (ld = lowld; ld != NULL; ld = ld->next) {
- if (ld->start == NULL)
- break;
- _dl_munmap(ld->start, ROUND_PG(ld->size));
- ld->start = NULL;
+ for (ll = lowld; ll != NULL && ll != ld;
+ ll = ll->next) {
+ _dl_munmap(ll->start, ROUND_PG(ll->size));
}
- /* if the mquery failed, give up */
- if (_dl_mmap_error(res))
- goto fail;
-
- /* otherwise, reset the start of the base mapping */
- lowld->start = res - ld->moff + lowld->moff;
+ lowld->start += ROUND_PG(ld->size);
goto retry;
}