summaryrefslogtreecommitdiff
path: root/usr.sbin/procmap
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2019-11-29 06:34:47 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2019-11-29 06:34:47 +0000
commitf67b268725d23fd3229f73b136ba575514edf1a1 (patch)
tree707fd46fa1309120b07da0ff3342c12297961096 /usr.sbin/procmap
parent6760a6095c934a222278e6d1c4e2209b9b96f736 (diff)
Repurpose the "syscalls must be on a writeable page" mechanism to
enforce a new policy: system calls must be in pre-registered regions. We have discussed more strict checks than this, but none satisfy the cost/benefit based upon our understanding of attack methods, anyways let's see what the next iteration looks like. This is intended to harden (translation: attackers must put extra effort into attacking) against a mixture of W^X failures and JIT bugs which allow syscall misinterpretation, especially in environments with polymorphic-instruction/variable-sized instructions. It fits in a bit with libc/libcrypto/ld.so random relink on boot and no-restart-at-crash behaviour, particularily for remote problems. Less effective once on-host since someone the libraries can be read. For static-executables the kernel registers the main program's PIE-mapped exec section valid, as well as the randomly-placed sigtramp page. For dynamic executables ELF ld.so's exec segment is also labelled valid; ld.so then has enough information to register libc's exec section as valid via call-once msyscall(2) For dynamic binaries, we continue to to permit the main program exec segment because "go" (and potentially a few other applications) have embedded system calls in the main program. Hopefully at least go gets fixed soon. We declare the concept of embedded syscalls a bad idea for numerous reasons, as we notice the ecosystem has many of static-syscall-in-base-binary which are dynamically linked against libraries which in turn use libc, which contains another set of syscall stubs. We've been concerned about adding even one additional syscall entry point... but go's approach tends to double the entry-point attack surface. This was started at a nano-hackathon in Bob Beck's basement 2 weeks ago during a long discussion with mortimer trying to hide from the SSL scream-conversations, and finished in more comfortable circumstances next to a wood-stove at Elk Lakes cabin with UVM scream-conversations. ok guenther kettenis mortimer, lots of feedback from others conversations about go with jsing tb sthen
Diffstat (limited to 'usr.sbin/procmap')
-rw-r--r--usr.sbin/procmap/procmap.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/usr.sbin/procmap/procmap.c b/usr.sbin/procmap/procmap.c
index aa6954e918c..02dcabc561e 100644
--- a/usr.sbin/procmap/procmap.c
+++ b/usr.sbin/procmap/procmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: procmap.c,v 1.65 2019/02/05 02:17:32 deraadt Exp $ */
+/* $OpenBSD: procmap.c,v 1.66 2019/11/29 06:34:46 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.1 2002/09/01 20:32:44 atatat Exp $ */
/*
@@ -483,11 +483,11 @@ process_map(kvm_t *kd, pid_t pid, struct kinfo_proc *proc, struct sum *sum)
/* headers */
#ifdef DISABLED_HEADERS
if (print_map)
- printf("%-*s %-*s rwx RWX CPY NCP I W A\n",
+ printf("%-*s %-*s rwxSe RWX CPY NCP I W A\n",
(int)sizeof(long) * 2 + 2, "Start",
(int)sizeof(long) * 2 + 2, "End");
if (print_maps)
- printf("%-*s %-*s rwxp %-*s Dev Inode File\n",
+ printf("%-*s %-*s rwxSep %-*s Dev Inode File\n",
(int)sizeof(long) * 2 + 0, "Start",
(int)sizeof(long) * 2 + 0, "End",
(int)sizeof(long) * 2 + 0, "Offset");
@@ -497,7 +497,7 @@ process_map(kvm_t *kd, pid_t pid, struct kinfo_proc *proc, struct sum *sum)
(int)sizeof(int) * 2 - 1, "Size ");
#endif
if (print_all)
- printf("%-*s %-*s %*s %-*s rwxpc RWX I/W/A Dev %*s - File\n",
+ printf("%-*s %-*s %*s %-*s rwxpcSe RWX I/W/A Dev %*s - File\n",
(int)sizeof(long) * 2, "Start",
(int)sizeof(long) * 2, "End",
(int)sizeof(int) * 2, "Size ",
@@ -719,11 +719,14 @@ dump_vm_map_entry(kvm_t *kd, struct kbit *vmspace,
name = findname(kd, vmspace, vme, vp, vfs, uvm_obj);
if (print_map) {
- printf("0x%lx 0x%lx %c%c%c %c%c%c %s %s %d %d %d",
- vme->start, vme->end,
+ printf("0x%-*lx 0x%-*lx %c%c%c%c%c %c%c%c %s %s %d %d %d",
+ (int)sizeof(long) * 2 + 0, vme->start,
+ (int)sizeof(long) * 2 + 0, vme->end,
(vme->protection & PROT_READ) ? 'r' : '-',
(vme->protection & PROT_WRITE) ? 'w' : '-',
(vme->protection & PROT_EXEC) ? 'x' : '-',
+ (vme->etype & UVM_ET_STACK) ? 'S' : '-',
+ (vme->etype & UVM_ET_SYSCALL) ? 'e' : '-',
(vme->max_protection & PROT_READ) ? 'r' : '-',
(vme->max_protection & PROT_WRITE) ? 'w' : '-',
(vme->max_protection & PROT_EXEC) ? 'x' : '-',
@@ -743,12 +746,14 @@ dump_vm_map_entry(kvm_t *kd, struct kbit *vmspace,
}
if (print_maps)
- printf("%0*lx-%0*lx %c%c%c%c %0*lx %02x:%02x %llu %s\n",
+ printf("0x%-*lx 0x%-*lx %c%c%c%c%c%c %0*lx %02x:%02x %llu %s\n",
(int)sizeof(void *) * 2, vme->start,
(int)sizeof(void *) * 2, vme->end,
(vme->protection & PROT_READ) ? 'r' : '-',
(vme->protection & PROT_WRITE) ? 'w' : '-',
(vme->protection & PROT_EXEC) ? 'x' : '-',
+ (vme->etype & UVM_ET_STACK) ? 'S' : '-',
+ (vme->etype & UVM_ET_SYSCALL) ? 'e' : '-',
(vme->etype & UVM_ET_COPYONWRITE) ? 'p' : 's',
(int)sizeof(void *) * 2,
(unsigned long)vme->offset,
@@ -761,11 +766,14 @@ dump_vm_map_entry(kvm_t *kd, struct kbit *vmspace,
vme->start, vme->end,
vme->object.uvm_obj, (unsigned long)vme->offset,
vme->aref.ar_amap, vme->aref.ar_pageoff);
- printf("\tsubmap=%c, cow=%c, nc=%c, prot(max)=%d/%d, inh=%d, "
+ printf("\tsubmap=%c, cow=%c, nc=%c, stack=%c, "
+ "syscall=%c, prot(max)=%d/%d, inh=%d, "
"wc=%d, adv=%d\n",
(vme->etype & UVM_ET_SUBMAP) ? 'T' : 'F',
(vme->etype & UVM_ET_COPYONWRITE) ? 'T' : 'F',
(vme->etype & UVM_ET_NEEDSCOPY) ? 'T' : 'F',
+ (vme->etype & UVM_ET_STACK) ? 'T' : 'F',
+ (vme->etype & UVM_ET_SYSCALL) ? 'T' : 'F',
vme->protection, vme->max_protection,
vme->inheritance, vme->wired_count, vme->advice);
if (inode && verbose)
@@ -805,13 +813,15 @@ dump_vm_map_entry(kvm_t *kd, struct kbit *vmspace,
}
sz = (size_t)((vme->end - vme->start) / 1024);
- printf("%0*lx-%0*lx %7luk %0*lx %c%c%c%c%c (%c%c%c) %d/%d/%d %02u:%02u %7llu - %s",
+ printf("%0*lx-%0*lx %7luk %0*lx %c%c%c%c%c%c%c (%c%c%c) %d/%d/%d %02u:%02u %7llu - %s",
(int)sizeof(void *) * 2, vme->start, (int)sizeof(void *) * 2,
vme->end - (vme->start != vme->end ? 1 : 0), (unsigned long)sz,
(int)sizeof(void *) * 2, (unsigned long)vme->offset,
(vme->protection & PROT_READ) ? 'r' : '-',
(vme->protection & PROT_WRITE) ? 'w' : '-',
(vme->protection & PROT_EXEC) ? 'x' : '-',
+ (vme->etype & UVM_ET_STACK) ? 'S' : '-',
+ (vme->etype & UVM_ET_SYSCALL) ? 'e' : '-',
(vme->etype & UVM_ET_COPYONWRITE) ? 'p' : 's',
(vme->etype & UVM_ET_NEEDSCOPY) ? '+' : '-',
(vme->max_protection & PROT_READ) ? 'r' : '-',