summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_map.h
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_map.h')
-rw-r--r--sys/uvm/uvm_map.h307
1 files changed, 184 insertions, 123 deletions
diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h
index c416cc51d23..e0e21267e31 100644
--- a/sys/uvm/uvm_map.h
+++ b/sys/uvm/uvm_map.h
@@ -1,7 +1,22 @@
-/* $OpenBSD: uvm_map.h,v 1.46 2011/06/06 17:10:23 ariane Exp $ */
+/* $OpenBSD: uvm_map.h,v 1.47 2012/03/09 13:01:29 ariane Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
-/*
+/*
+ * Copyright (c) 2011 Ariane van der Steldt <ariane@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ *
* Copyright (c) 1997 Charles D. Cranor and Washington University.
* Copyright (c) 1991, 1993, The Regents of the University of California.
*
@@ -75,14 +90,28 @@
#ifdef _KERNEL
/*
+ * Internal functions.
+ *
+ * Required by clipping macros.
+ */
+void uvm_map_clip_end(struct vm_map*, struct vm_map_entry*,
+ vaddr_t);
+void uvm_map_clip_start(struct vm_map*,
+ struct vm_map_entry*, vaddr_t);
+
+/*
* UVM_MAP_CLIP_START: ensure that the entry begins at or after
* the starting address, if it doesn't we split the entry.
*
* => map must be locked by caller
*/
-#define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
- if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); }
+#define UVM_MAP_CLIP_START(_map, _entry, _addr) \
+ do { \
+ KASSERT((_entry)->end + (_entry)->fspace > (_addr)); \
+ if ((_entry)->start < (_addr)) \
+ uvm_map_clip_start((_map), (_entry), (_addr)); \
+ } while (0)
/*
* UVM_MAP_CLIP_END: ensure that the entry ends at or before
@@ -91,15 +120,16 @@
* => map must be locked by caller
*/
-#define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
- if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); }
+#define UVM_MAP_CLIP_END(_map, _entry, _addr) \
+ do { \
+ KASSERT((_entry)->start < (_addr)); \
+ if ((_entry)->end > (_addr)) \
+ uvm_map_clip_end((_map), (_entry), (_addr)); \
+ } while (0)
/*
* extract flags
*/
-#define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */
-#define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */
-#define UVM_EXTRACT_QREF 0x4 /* use quick refs */
#define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */
#endif /* _KERNEL */
@@ -133,21 +163,34 @@ union vm_map_object {
* Also included is control information for virtual copy operations.
*/
struct vm_map_entry {
- RB_ENTRY(vm_map_entry) rb_entry; /* tree information */
- vaddr_t ownspace; /* free space after */
- vaddr_t space; /* space in subtree */
- struct vm_map_entry *prev; /* previous entry */
- struct vm_map_entry *next; /* next entry */
+ union {
+ RB_ENTRY(vm_map_entry) addr_entry; /* address tree */
+ } daddrs;
+
+ union {
+ RB_ENTRY(vm_map_entry) rbtree; /* Link freespace tree. */
+ TAILQ_ENTRY(vm_map_entry) tailq;/* Link freespace queue. */
+ TAILQ_ENTRY(vm_map_entry) deadq;/* dead entry queue */
+ } dfree;
+
+#define uvm_map_entry_start_copy start
vaddr_t start; /* start address */
vaddr_t end; /* end address */
+
+ vsize_t guard; /* bytes in guard */
+ vsize_t fspace; /* free space */
+
union vm_map_object object; /* object I point to */
voff_t offset; /* offset into object */
+ struct vm_aref aref; /* anonymous overlay */
+
int etype; /* entry type */
+
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
+
int wired_count; /* can be paged if == 0 */
- struct vm_aref aref; /* anonymous overlay */
int advice; /* madvise advice */
#define uvm_map_entry_stop_copy flags
u_int8_t flags; /* flags */
@@ -156,18 +199,19 @@ struct vm_map_entry {
#define UVM_MAP_KMEM 0x02 /* from kmem entry pool */
};
-/*
- * Marks the map entry as a guard page, using vm_map_entry.etype.
- */
-#define MAP_ET_KVAGUARD 0x10 /* guard entry */
-
#define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0)
+TAILQ_HEAD(uvm_map_deadq, vm_map_entry); /* dead entry queue */
+RB_HEAD(uvm_map_addr, vm_map_entry);
+RB_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
+ uvm_mapentry_addrcmp);
+
/*
- * Maps are doubly-linked lists of map entries, kept sorted
- * by address. A single hint is provided to start
- * searches again from the last successful search,
- * insertion, or removal.
+ * A Map is a rbtree of map entries, kept sorted by address.
+ * In addition, free space entries are also kept in a rbtree,
+ * indexed by free size.
+ *
+ *
*
* LOCKING PROTOCOL NOTES:
* -----------------------
@@ -214,23 +258,80 @@ struct vm_map_entry {
* is busy, and thread is attempting
* to write-lock. must be tested
* while `flags_lock' is asserted.
+ *
+ * VM_MAP_GUARDPAGES r/o; must be specified at map
+ * initialization time.
+ * If set, guards will appear between
+ * automatic allocations.
+ * No locking required.
+ *
+ * VM_MAP_ISVMSPACE r/o; set by uvmspace_alloc.
+ * Signifies that this map is a vmspace.
+ * (The implementation treats all maps
+ * without this bit as kernel maps.)
+ * No locking required.
+ *
+ *
+ * All automatic allocations (uvm_map without MAP_FIXED) will allocate
+ * from vm_map.free.
+ * If that allocation fails:
+ * - vmspace maps will spill over into vm_map.bfree,
+ * - all other maps will call uvm_map_kmem_grow() to increase the arena.
+ *
+ * vmspace maps have their data, brk() and stack arenas automatically
+ * updated when uvm_map() is invoked without MAP_FIXED.
+ * The spill over arena (vm_map.bfree) will contain the space in the brk()
+ * and stack ranges.
+ * Kernel maps never have a bfree arena and this tree will always be empty.
+ *
+ *
+ * read_locks and write_locks are used in lock debugging code.
*/
struct vm_map {
struct pmap * pmap; /* Physical map */
struct rwlock lock; /* Lock for map data */
- RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */
- struct vm_map_entry header; /* List of entries */
- int nentries; /* Number of entries */
+
+ struct uvm_map_addr addr; /* Entry tree, by addr */
+
vsize_t size; /* virtual size */
int ref_count; /* Reference count */
simple_lock_data_t ref_lock; /* Lock for ref_count field */
- vm_map_entry_t hint; /* hint for quick lookups */
- simple_lock_data_t hint_lock; /* lock for hint storage */
- vm_map_entry_t first_free; /* First free space hint */
int flags; /* flags */
unsigned int timestamp; /* Version number */
-#define min_offset header.start
-#define max_offset header.end
+
+ vaddr_t min_offset; /* First address in map. */
+ vaddr_t max_offset; /* Last address in map. */
+
+ /*
+ * Allocation overflow regions.
+ */
+ vaddr_t b_start; /* Start for brk() alloc. */
+ vaddr_t b_end; /* End for brk() alloc. */
+ vaddr_t s_start; /* Start for stack alloc. */
+ vaddr_t s_end; /* End for stack alloc. */
+
+ /*
+ * Special address selectors.
+ *
+ * The uaddr_exe mapping is used if:
+ * - protX is selected
+ * - the pointer is not NULL
+ *
+ * If uaddr_exe is not used, the other mappings are checked in
+ * order of appearance.
+ * If a hint is given, the selection will only be used if the hint
+ * falls in the range described by the mapping.
+ *
+ * The states are pointers because:
+ * - they may not all be in use
+ * - the struct size for different schemes is variable
+ *
+ * The uaddr_brk_stack selector will select addresses that are in
+ * the brk/stack area of the map.
+ */
+ struct uvm_addr_state *uaddr_exe; /* Executable selector. */
+ struct uvm_addr_state *uaddr_any[4]; /* More selectors. */
+ struct uvm_addr_state *uaddr_brk_stack; /* Brk/stack selector. */
};
/* vm_map flags */
@@ -239,11 +340,13 @@ struct vm_map {
#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
#define VM_MAP_BUSY 0x08 /* rw: map is busy */
#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
+#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */
+#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */
/* XXX: number of kernel maps and entries to statically allocate */
#if !defined(MAX_KMAPENT)
-#define MAX_KMAPENT 1024 /* XXXCDC: no crash */
+#define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */
#endif /* !defined MAX_KMAPENT */
#ifdef _KERNEL
@@ -268,9 +371,7 @@ struct vm_map_intrsafe {
#ifdef _KERNEL
-#ifdef PMAP_GROWKERNEL
extern vaddr_t uvm_maxkaddr;
-#endif
/*
* protos: the following prototypes define the interface to vm_map
@@ -279,32 +380,29 @@ extern vaddr_t uvm_maxkaddr;
void uvm_map_deallocate(vm_map_t);
int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int);
-void uvm_map_clip_start(vm_map_t, vm_map_entry_t, vaddr_t);
-void uvm_map_clip_end(vm_map_t, vm_map_entry_t, vaddr_t);
vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
-int uvm_map_extract(vm_map_t, vaddr_t, vsize_t,
- vm_map_t, vaddr_t *, int);
-vm_map_entry_t uvm_map_findspace(vm_map_t, vaddr_t, vsize_t, vaddr_t *,
- struct uvm_object *, voff_t, vsize_t, int);
+int uvm_map_extract(struct vm_map*, vaddr_t, vsize_t, vaddr_t*,
+ int);
vaddr_t uvm_map_pie(vaddr_t);
-#define uvm_map_hint(p, prot) uvm_map_hint1(p, prot, 1)
-vaddr_t uvm_map_hint1(struct proc *, vm_prot_t, int);
+vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t);
int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t);
int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int);
void uvm_map_init(void);
boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *);
-void uvm_map_reference(vm_map_t);
int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t,
vm_map_entry_t, int);
int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t,
vaddr_t *);
void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int);
int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t);
-#define uvm_unmap(_m, _s, _e) uvm_unmap_p(_m, _s, _e, 0)
-void uvm_unmap_p(vm_map_t, vaddr_t, vaddr_t, struct proc *);
-void uvm_unmap_detach(vm_map_entry_t,int);
-void uvm_unmap_remove(vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t *,
- struct proc *, boolean_t);
+void uvm_unmap(vm_map_t, vaddr_t, vaddr_t);
+void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**,
+ struct uvm_addr_state*);
+int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int);
+
+void uvm_unmap_detach(struct uvm_map_deadq*, int);
+void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t,
+ struct uvm_map_deadq*, boolean_t, boolean_t);
#endif /* _KERNEL */
@@ -337,82 +435,45 @@ void uvm_unmap_remove(vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t *,
*/
#ifdef _KERNEL
-/* XXX: clean up later */
+/*
+ * XXX: clean up later
+ * Half the kernel seems to depend on them being included here.
+ */
#include <sys/time.h>
-#include <sys/systm.h> /* for panic() */
-
-static __inline boolean_t vm_map_lock_try(vm_map_t);
-static __inline void vm_map_lock(vm_map_t);
-extern const char vmmapbsy[];
-
-static __inline boolean_t
-vm_map_lock_try(struct vm_map *map)
-{
- boolean_t rv;
-
- if (map->flags & VM_MAP_INTRSAFE) {
- rv = TRUE;
- } else {
- if (map->flags & VM_MAP_BUSY) {
- return (FALSE);
- }
- rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
- }
-
- if (rv)
- map->timestamp++;
-
- return (rv);
-}
-
-static __inline void
-vm_map_lock(struct vm_map *map)
-{
- if (map->flags & VM_MAP_INTRSAFE)
- return;
-
- do {
- while (map->flags & VM_MAP_BUSY) {
- map->flags |= VM_MAP_WANTLOCK;
- tsleep(&map->flags, PVM, (char *)vmmapbsy, 0);
- }
- } while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
-
- map->timestamp++;
-}
-
-#define vm_map_lock_read(map) rw_enter_read(&(map)->lock)
-
-#define vm_map_unlock(map) \
-do { \
- if (((map)->flags & VM_MAP_INTRSAFE) == 0) \
- rw_exit(&(map)->lock); \
-} while (0)
-
-#define vm_map_unlock_read(map) rw_exit_read(&(map)->lock)
-
-#define vm_map_downgrade(map) rw_enter(&(map)->lock, RW_DOWNGRADE)
-
-#define vm_map_upgrade(map) \
-do { \
- rw_exit_read(&(map)->lock); \
- rw_enter_write(&(map)->lock); \
-} while (0)
-
-#define vm_map_busy(map) \
-do { \
- (map)->flags |= VM_MAP_BUSY; \
-} while (0)
+#include <sys/systm.h> /* for panic() */
+
+boolean_t vm_map_lock_try_ln(struct vm_map*, char*, int);
+void vm_map_lock_ln(struct vm_map*, char*, int);
+void vm_map_lock_read_ln(struct vm_map*, char*, int);
+void vm_map_unlock_ln(struct vm_map*, char*, int);
+void vm_map_unlock_read_ln(struct vm_map*, char*, int);
+void vm_map_downgrade_ln(struct vm_map*, char*, int);
+void vm_map_upgrade_ln(struct vm_map*, char*, int);
+void vm_map_busy_ln(struct vm_map*, char*, int);
+void vm_map_unbusy_ln(struct vm_map*, char*, int);
+
+#ifdef DIAGNOSTIC
+#define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
+#define vm_map_lock(map) vm_map_lock_ln(map, __FILE__, __LINE__)
+#define vm_map_lock_read(map) vm_map_lock_read_ln(map, __FILE__, __LINE__)
+#define vm_map_unlock(map) vm_map_unlock_ln(map, __FILE__, __LINE__)
+#define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, __FILE__, __LINE__)
+#define vm_map_downgrade(map) vm_map_downgrade_ln(map, __FILE__, __LINE__)
+#define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__)
+#define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__)
+#define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#else
+#define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
+#define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0)
+#define vm_map_lock_read(map) vm_map_lock_read_ln(map, NULL, 0)
+#define vm_map_unlock(map) vm_map_unlock_ln(map, NULL, 0)
+#define vm_map_unlock_read(map) vm_map_unlock_read_ln(map, NULL, 0)
+#define vm_map_downgrade(map) vm_map_downgrade_ln(map, NULL, 0)
+#define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0)
+#define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0)
+#define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0)
+#endif
-#define vm_map_unbusy(map) \
-do { \
- int oflags; \
- \
- oflags = (map)->flags; \
- (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \
- if (oflags & VM_MAP_WANTLOCK) \
- wakeup(&(map)->flags); \
-} while (0)
#endif /* _KERNEL */
/*