summaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2022-10-21 18:10:57 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2022-10-21 18:10:57 +0000
commit04edc6b37a6f09654692fdf3019a9c44f17f5d4b (patch)
tree7b0035e3592481a53e753c9dfc487f3dc5012c12 /sys/kern
parentc7dc0358a85f8024ae81bed8fdd3657084a13140 (diff)
automatically mark immutable certain regions in program&ld.so LOADs.
The large commented block in elf_load_psection explains the sitaution. ok kettenis.
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/exec_elf.c21
-rw-r--r--sys/kern/exec_subr.c62
2 files changed, 70 insertions, 13 deletions
diff --git a/sys/kern/exec_elf.c b/sys/kern/exec_elf.c
index a27dac2e817..8b5a67baea6 100644
--- a/sys/kern/exec_elf.c
+++ b/sys/kern/exec_elf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_elf.c,v 1.168 2022/08/29 16:53:46 deraadt Exp $ */
+/* $OpenBSD: exec_elf.c,v 1.169 2022/10/21 18:10:56 deraadt Exp $ */
/*
* Copyright (c) 1996 Per Fogelstrom
@@ -189,11 +189,18 @@ elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp,
* initially. The dynamic linker will make these read-only
* and add back X permission after relocation processing.
* Static executables with W|X segments will probably crash.
+ * Apply immutability as much as possible, but not for RELRO
+ * or PT_OPENBSD_MUTABLE sections, or LOADS marked
+ * PF_OPENBSD_MUTABLE, or LOADS which violate W^X. Userland
+ * (meaning crt0 or ld.so) will repair those regions.
*/
*prot |= (ph->p_flags & PF_R) ? PROT_READ : 0;
*prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0;
if ((ph->p_flags & PF_W) == 0)
*prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0;
+ if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) &&
+ (ph->p_flags & PF_OPENBSD_MUTABLE) == 0)
+ flags |= VMCMD_IMMUTABLE;
msize = ph->p_memsz + diff;
offset = ph->p_offset - bdiff;
@@ -432,6 +439,12 @@ elf_load_file(struct proc *p, char *path, struct exec_package *epp,
ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
break;
+ case PT_GNU_RELRO:
+ case PT_OPENBSD_MUTABLE:
+ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
+ ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0);
+ break;
+
default:
break;
}
@@ -655,6 +668,12 @@ exec_elf_makecmds(struct proc *p, struct exec_package *epp)
ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
break;
+ case PT_GNU_RELRO:
+ case PT_OPENBSD_MUTABLE:
+ NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable,
+ ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0);
+ break;
+
default:
/*
* Not fatal, we don't need to understand everything
diff --git a/sys/kern/exec_subr.c b/sys/kern/exec_subr.c
index 8a949bebd28..dc4053a5f89 100644
--- a/sys/kern/exec_subr.c
+++ b/sys/kern/exec_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_subr.c,v 1.58 2022/10/07 14:59:39 deraadt Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.59 2022/10/21 18:10:56 deraadt Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
@@ -167,7 +167,7 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
* call this routine.
*/
struct uvm_object *uobj;
- unsigned int syscalls = 0;
+ unsigned int flags = UVM_FLAG_COPYONW | UVM_FLAG_FIXED;
int error;
/*
@@ -195,12 +195,12 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
* do the map
*/
if ((cmd->ev_flags & VMCMD_SYSCALL) && (cmd->ev_prot & PROT_EXEC))
- syscalls |= UVM_FLAG_SYSCALL;
+ flags |= UVM_FLAG_SYSCALL;
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
uobj, cmd->ev_offset, 0,
UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
- MADV_NORMAL, UVM_FLAG_COPYONW | UVM_FLAG_FIXED | syscalls));
+ MADV_NORMAL, flags));
/*
* check for error
@@ -211,6 +211,11 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
* error: detach from object
*/
uobj->pgops->pgo_detach(uobj);
+ } else {
+ if (cmd->ev_flags & VMCMD_IMMUTABLE)
+ uvm_map_immutable(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
+ 1, "pagedvn");
}
return (error);
@@ -234,7 +239,7 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
prot = cmd->ev_prot;
- cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
+ KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, MAP_INHERIT_COPY,
@@ -256,12 +261,19 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
* it mapped read-only, so now we are going to have to call
* uvm_map_protect() to fix up the protection. ICK.
*/
- return (uvm_map_protect(&p->p_vmspace->vm_map,
- trunc_page(cmd->ev_addr),
- round_page(cmd->ev_addr + cmd->ev_len),
+ error = (uvm_map_protect(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
prot, FALSE, TRUE));
}
- return (0);
+ if (error == 0) {
+ if (cmd->ev_flags & VMCMD_IMMUTABLE) {
+ //printf("imut readvn\n");
+ uvm_map_immutable(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
+ 1, "readvn");
+ }
+ }
+ return (error);
}
/*
@@ -272,15 +284,41 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
int
vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
{
+ int error;
+
if (cmd->ev_len == 0)
return (0);
- cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
- return (uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
+ KASSERT((cmd->ev_addr & PAGE_MASK) == 0);
+ error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, MAP_INHERIT_COPY,
MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW |
- (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0))));
+ (cmd->ev_flags & VMCMD_STACK ? UVM_FLAG_STACK : 0)));
+ if (cmd->ev_flags & VMCMD_IMMUTABLE) {
+ //printf("imut zero\n");
+ uvm_map_immutable(&p->p_vmspace->vm_map,
+ cmd->ev_addr, round_page(cmd->ev_len),
+ 1, "zero");
+ }
+ return error;
+}
+
+/*
+ * vmcmd_mutable():
+ * handle vmcmd which changes an address space region.back to mutable
+ */
+
+int
+vmcmd_mutable(struct proc *p, struct exec_vmcmd *cmd)
+{
+ if (cmd->ev_len == 0)
+ return (0);
+
+ /* ev_addr, ev_len may be misaligned, so maximize the region */
+ uvm_map_immutable(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
+ round_page(cmd->ev_addr + cmd->ev_len), 0, "mutable");
+ return 0;
}
/*