summaryrefslogtreecommitdiff
path: root/sys/uvm
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2022-10-07 14:59:40 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2022-10-07 14:59:40 +0000
commitd5dc31a79f667c4cedbee1fd924cce6958a38693 (patch)
tree5833d6d11a134abb7146ebe6af3404f801cdbda2 /sys/uvm
parentf06f83d9171fd070aa1dfb4cadffa776031c8a0a (diff)
Add mimmutable(2) system call which locks the permissions (PROT_*) of
memory mappings so they cannot be changed by a later mmap(), mprotect(), or munmap(), which will error with EPERM instead. ok kettenis
Diffstat (limited to 'sys/uvm')
-rw-r--r--sys/uvm/uvm_extern.h4
-rw-r--r--sys/uvm/uvm_io.c4
-rw-r--r--sys/uvm/uvm_map.c93
-rw-r--r--sys/uvm/uvm_map.h7
-rw-r--r--sys/uvm/uvm_mmap.c41
5 files changed, 124 insertions, 25 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index 31b35d83e3b..cfdfe884a36 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.163 2022/08/15 03:21:04 jsg Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.164 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -399,7 +399,7 @@ int uvm_map_pageable_all(vm_map_t, int, vsize_t);
boolean_t uvm_map_checkprot(vm_map_t, vaddr_t,
vaddr_t, vm_prot_t);
int uvm_map_protect(vm_map_t, vaddr_t,
- vaddr_t, vm_prot_t, boolean_t);
+ vaddr_t, vm_prot_t, boolean_t, boolean_t);
struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t,
boolean_t, boolean_t);
void uvmspace_init(struct vmspace *, struct pmap *,
diff --git a/sys/uvm/uvm_io.c b/sys/uvm/uvm_io.c
index b3d4b876de7..3bd0b4e249a 100644
--- a/sys/uvm/uvm_io.c
+++ b/sys/uvm/uvm_io.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_io.c,v 1.29 2022/03/12 08:11:07 mpi Exp $ */
+/* $OpenBSD: uvm_io.c,v 1.30 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_io.c,v 1.12 2000/06/27 17:29:23 mrg Exp $ */
/*
@@ -127,7 +127,7 @@ uvm_io(vm_map_t map, struct uio *uio, int flags)
vm_map_lock(kernel_map);
TAILQ_INIT(&dead_entries);
uvm_unmap_remove(kernel_map, kva, kva+chunksz,
- &dead_entries, FALSE, TRUE);
+ &dead_entries, FALSE, TRUE, FALSE);
vm_map_unlock(kernel_map);
uvm_unmap_detach(&dead_entries, AMAP_REFALL);
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 6338052d0b6..4fe00dbdab2 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.294 2022/08/15 15:53:45 jsg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.295 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -797,7 +797,11 @@ uvm_mapanon(struct vm_map *map, vaddr_t *addr, vsize_t sz,
error = EINVAL;
goto unlock;
}
- uvm_unmap_remove(map, *addr, *addr + sz, &dead, FALSE, TRUE);
+ if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
+ FALSE, TRUE, TRUE) != 0) {
+ error = EPERM; /* immutable entries found */
+ goto unlock;
+ }
}
if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
error = ENOMEM;
@@ -1038,8 +1042,13 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
}
/* Check that the space is available. */
- if (flags & UVM_FLAG_UNMAP)
- uvm_unmap_remove(map, *addr, *addr + sz, &dead, FALSE, TRUE);
+ if (flags & UVM_FLAG_UNMAP) {
+ if (uvm_unmap_remove(map, *addr, *addr + sz, &dead,
+ FALSE, TRUE, TRUE) != 0) {
+ error = EPERM; /* immutable entries found */
+ goto unlock;
+ }
+ }
if (!uvm_map_isavail(map, NULL, &first, &last, *addr, sz)) {
error = ENOMEM;
goto unlock;
@@ -1817,7 +1826,7 @@ uvm_unmap(struct vm_map *map, vaddr_t start, vaddr_t end)
(end & (vaddr_t)PAGE_MASK) == 0);
TAILQ_INIT(&dead);
vm_map_lock(map);
- uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE);
+ uvm_unmap_remove(map, start, end, &dead, FALSE, TRUE, FALSE);
vm_map_unlock(map);
if (map->flags & VM_MAP_INTRSAFE)
@@ -1959,17 +1968,17 @@ uvm_unmap_kill_entry(struct vm_map *map, struct vm_map_entry *entry)
* If markfree, entry will be properly marked free, otherwise, no replacement
* entry will be put in the tree (corrupting the tree).
*/
-void
+int
uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
struct uvm_map_deadq *dead, boolean_t remove_holes,
- boolean_t markfree)
+ boolean_t markfree, boolean_t checkimmutable)
{
struct vm_map_entry *prev_hint, *next, *entry;
start = MAX(start, map->min_offset);
end = MIN(end, map->max_offset);
if (start >= end)
- return;
+ return 0;
if ((map->flags & VM_MAP_INTRSAFE) == 0)
splassert(IPL_NONE);
@@ -1979,6 +1988,19 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
/* Find first affected entry. */
entry = uvm_map_entrybyaddr(&map->addr, start);
KDASSERT(entry != NULL && entry->start <= start);
+
+ if (checkimmutable) {
+ struct vm_map_entry *entry1 = entry;
+
+ /* Refuse to unmap if any entries are immutable */
+ for (; entry1 != NULL && entry1->start < end; entry1 = next) {
+ KDASSERT(entry1->start >= start);
+ next = RBT_NEXT(uvm_map_addr, entry1);
+ if (entry1->etype & UVM_ET_IMMUTABLE)
+ return EPERM;
+ }
+ }
+
if (entry->end <= start && markfree)
entry = RBT_NEXT(uvm_map_addr, entry);
else
@@ -2043,6 +2065,7 @@ uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
KDASSERT(uvm_map_entrybyaddr(&map->addr, a) == NULL);
}
#endif
+ return 0;
}
/*
@@ -3063,7 +3086,7 @@ uvm_page_printit(struct vm_page *pg, boolean_t full,
*/
int
uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
- vm_prot_t new_prot, boolean_t set_max)
+ vm_prot_t new_prot, boolean_t set_max, boolean_t checkimmutable)
{
struct vm_map_entry *first, *iter;
vm_prot_t old_prot;
@@ -3098,6 +3121,11 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
if (iter->start == iter->end || UVM_ET_ISHOLE(iter))
continue;
+ if (checkimmutable &&
+ (iter->etype & UVM_ET_IMMUTABLE)) {
+ error = EPERM;
+ goto out;
+ }
old_prot = iter->protection;
if (old_prot == PROT_NONE && new_prot != old_prot) {
dused += uvmspace_dused(
@@ -3356,7 +3384,7 @@ uvmspace_exec(struct proc *p, vaddr_t start, vaddr_t end)
* (as in, not replace them with free-memory entries).
*/
uvm_unmap_remove(map, map->min_offset, map->max_offset,
- &dead_entries, TRUE, FALSE);
+ &dead_entries, TRUE, FALSE, FALSE);
KDASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
@@ -3529,7 +3557,7 @@ uvm_share(struct vm_map *dstmap, vaddr_t dstaddr, vm_prot_t prot,
}
ret = EINVAL;
- uvm_unmap_remove(dstmap, dstaddr, unmap_end, &dead, FALSE, TRUE);
+ uvm_unmap_remove(dstmap, dstaddr, unmap_end, &dead, FALSE, TRUE, FALSE);
exit_unlock:
vm_map_unlock_read(srcmap);
@@ -4088,7 +4116,7 @@ uvm_map_deallocate(vm_map_t map)
TAILQ_INIT(&dead);
uvm_tree_sanity(map, __FILE__, __LINE__);
uvm_unmap_remove(map, map->min_offset, map->max_offset, &dead,
- TRUE, FALSE);
+ TRUE, FALSE, FALSE);
pmap_destroy(map->pmap);
KASSERT(RBT_EMPTY(uvm_map_addr, &map->addr));
free(map, M_VMMAP, sizeof *map);
@@ -4183,6 +4211,45 @@ uvm_map_syscall(struct vm_map *map, vaddr_t start, vaddr_t end)
return (0);
}
+/*
+ * uvm_map_immutable: block mapping/mprotect for range of addrs in map.
+ *
+ * => map must be unlocked
+ */
+int
+uvm_map_immutable(struct vm_map *map, vaddr_t start, vaddr_t end, int imut, char *name)
+{
+ struct vm_map_entry *entry;
+
+ if (start > end)
+ return EINVAL;
+ start = MAX(start, map->min_offset);
+ end = MIN(end, map->max_offset);
+ if (start >= end)
+ return 0;
+
+ vm_map_lock(map);
+
+ entry = uvm_map_entrybyaddr(&map->addr, start);
+ if (entry->end > start)
+ UVM_MAP_CLIP_START(map, entry, start);
+ else
+ entry = RBT_NEXT(uvm_map_addr, entry);
+
+ while (entry != NULL && entry->start < end) {
+ UVM_MAP_CLIP_END(map, entry, end);
+ if (imut)
+ entry->etype |= UVM_ET_IMMUTABLE;
+ else
+ entry->etype &= ~UVM_ET_IMMUTABLE;
+ entry = RBT_NEXT(uvm_map_addr, entry);
+ }
+
+ map->wserial++;
+ vm_map_unlock(map);
+ return (0);
+}
+
/*
* uvm_map_advice: set advice code for range of addrs in map.
*
@@ -4367,7 +4434,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
fail2_unmap:
if (error) {
uvm_unmap_remove(kernel_map, dstaddr, dstaddr + len, &dead,
- FALSE, TRUE);
+ FALSE, TRUE, FALSE);
}
/* Release maps, release dead entries. */
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index d3482f377c6..566fd983c78 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.h,v 1.75 2022/03/12 08:11:07 mpi Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.76 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -350,6 +350,7 @@ struct vm_map * uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
vaddr_t uvm_map_pie(vaddr_t);
vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t);
int uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t);
+int uvm_map_immutable(struct vm_map *, vaddr_t, vaddr_t, int, char *);
int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t);
int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
void uvm_map_init(void);
@@ -365,8 +366,8 @@ int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
struct vm_map *);
void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t);
void uvm_unmap_detach(struct uvm_map_deadq *, int);
-void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
- struct uvm_map_deadq *, boolean_t, boolean_t);
+int uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
+ struct uvm_map_deadq *, boolean_t, boolean_t, boolean_t);
void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
struct uvm_addr_state*);
int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 6f17349f9cd..f5aba6a7d40 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_mmap.c,v 1.172 2022/08/01 14:56:59 deraadt Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.173 2022/10/07 14:59:39 deraadt Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -569,7 +569,11 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
}
TAILQ_INIT(&dead_entries);
- uvm_unmap_remove(map, addr, addr + size, &dead_entries, FALSE, TRUE);
+ if (uvm_unmap_remove(map, addr, addr + size, &dead_entries,
+ FALSE, TRUE, TRUE) != 0) {
+ vm_map_unlock(map);
+ return EPERM; /* immutable entries found */
+ }
vm_map_unlock(map); /* and unlock */
uvm_unmap_detach(&dead_entries, 0);
@@ -619,7 +623,7 @@ sys_mprotect(struct proc *p, void *v, register_t *retval)
return EINVAL; /* disallow wrap-around. */
return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
- prot, FALSE));
+ prot, FALSE, TRUE));
}
/*
@@ -649,6 +653,32 @@ sys_msyscall(struct proc *p, void *v, register_t *retval)
}
/*
+ * sys_mimmutable: the mimmutable system call
+ */
+int
+sys_mimmutable(struct proc *p, void *v, register_t *retval)
+{
+ struct sys_mimmutable_args /* {
+ immutablearg(void *) addr;
+ immutablearg(size_t) len;
+ } */ *uap = v;
+ vaddr_t addr;
+ vsize_t size, pageoff;
+
+ addr = (vaddr_t)SCARG(uap, addr);
+ size = (vsize_t)SCARG(uap, len);
+
+ /*
+ * align the address to a page boundary, and adjust the size accordingly
+ */
+ ALIGN_ADDR(addr, size, pageoff);
+ if (addr > SIZE_MAX - size)
+ return EINVAL; /* disallow wrap-around. */
+
+ return uvm_map_immutable(&p->p_vmspace->vm_map, addr, addr+size, 1, "sys");
+}
+
+/*
* sys_minherit: the minherit system call
*/
int
@@ -1228,7 +1258,8 @@ redo:
if (kva != 0) {
vm_map_lock(kernel_map);
uvm_unmap_remove(kernel_map, kva,
- kva+PAGE_SIZE, &dead_entries, FALSE, TRUE);
+ kva+PAGE_SIZE, &dead_entries,
+ FALSE, TRUE, FALSE); /* XXX */
vm_map_unlock(kernel_map);
kva = 0;
}
@@ -1255,7 +1286,7 @@ redo:
if (kva != 0) {
vm_map_lock(kernel_map);
uvm_unmap_remove(kernel_map, kva, kva+PAGE_SIZE,
- &dead_entries, FALSE, TRUE);
+ &dead_entries, FALSE, TRUE, FALSE); /* XXX */
vm_map_unlock(kernel_map);
}
uvm_unmap_detach(&dead_entries, AMAP_REFALL);