diff options
author | Martin Pieuchot <mpi@cvs.openbsd.org> | 2019-12-12 11:12:38 +0000 |
---|---|---|
committer | Martin Pieuchot <mpi@cvs.openbsd.org> | 2019-12-12 11:12:38 +0000 |
commit | 7ff24f14fc5aa2876f20cae2c52edf2cbb12951a (patch) | |
tree | 26b51a5489ec9e4318f1108d84feec49e8966e90 /sys | |
parent | 4e842f628b0ac59e7f59b9539cc562e8c01e81c4 (diff) |
Header cleanup.
- reduces gratuitous differences with NetBSD,
- merges multiple '#ifdef _KERNEL' blocks,
- kills unused 'struct vm_map_intrsafe'
- turns 'union vm_map_object' into a anonymous union (following to NetBSD)
- move questionable vm_map_modflags() into uvm/uvm_map.c
- remove guards around MAX_KMAPENT, it is defined&used only once
- document lock differences
- fix tab vs space
ok mlarkin@, visa@
Diffstat (limited to 'sys')
-rw-r--r-- | sys/uvm/uvm_extern.h | 5 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 11 | ||||
-rw-r--r-- | sys/uvm/uvm_map.h | 124 |
3 files changed, 47 insertions, 93 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index 779f7654d9d..7bc4417d9e6 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.151 2019/11/29 06:34:45 deraadt Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.152 2019/12/12 11:12:36 mpi Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -65,9 +65,6 @@ typedef int vm_fault_t; typedef int vm_inherit_t; /* XXX: inheritance codes */ typedef off_t voff_t; /* XXX: offset within a uvm_object */ -union vm_map_object; -typedef union vm_map_object vm_map_object_t; - struct vm_map_entry; typedef struct vm_map_entry *vm_map_entry_t; diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index 2b327a87c95..5b35f603be8 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.258 2019/12/09 17:37:59 deraadt Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.259 2019/12/12 11:12:36 mpi Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -230,7 +230,6 @@ void vmspace_validate(struct vm_map*); #define PMAP_PREFER(addr, off) (addr) #endif - /* * The kernel map will initially be VM_MAP_KSIZE_INIT bytes. * Every time that gets cramped, we grow by at least VM_MAP_KSIZE_DELTA bytes. @@ -335,6 +334,14 @@ vaddr_t uvm_maxkaddr; } \ } while (0) +#define vm_map_modflags(map, set, clear) \ + do { \ + mtx_enter(&(map)->flags_lock); \ + (map)->flags = ((map)->flags | (set)) & ~(clear); \ + mtx_leave(&(map)->flags_lock); \ + } while (0) + + /* * Tree describing entries by address. * diff --git a/sys/uvm/uvm_map.h b/sys/uvm/uvm_map.h index 7ee39f50e81..4dc26a0eb3b 100644 --- a/sys/uvm/uvm_map.h +++ b/sys/uvm/uvm_map.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.h,v 1.65 2019/11/29 06:34:46 deraadt Exp $ */ +/* $OpenBSD: uvm_map.h,v 1.66 2019/12/12 11:12:37 mpi Exp $ */ /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ /* @@ -86,16 +86,6 @@ #ifdef _KERNEL /* - * Internal functions. - * - * Required by clipping macros. - */ -void uvm_map_clip_end(struct vm_map*, struct vm_map_entry*, - vaddr_t); -void uvm_map_clip_start(struct vm_map*, - struct vm_map_entry*, vaddr_t); - -/* * UVM_MAP_CLIP_START: ensure that the entry begins at or after * the starting address, if it doesn't we split the entry. * @@ -133,26 +123,6 @@ void uvm_map_clip_start(struct vm_map*, #include <uvm/uvm_anon.h> /* - * types defined: - * - * vm_map_t the high-level address map data structure. - * vm_map_entry_t an entry in an address map. - * vm_map_version_t a timestamp of a map, for use with vm_map_lookup - */ - -/* - * Objects which live in maps may be either VM objects, or another map - * (called a "sharing map") which denotes read-write sharing with other maps. - * - * XXXCDC: private pager data goes here now - */ - -union vm_map_object { - struct uvm_object *uvm_obj; /* UVM OBJECT */ - struct vm_map *sub_map; /* belongs to another map */ -}; - -/* * Address map entries consist of start and end addresses, * a VM object (or sharing map) and offset into that object, * and user-exported inheritance and protection information. @@ -177,23 +147,23 @@ struct vm_map_entry { vsize_t guard; /* bytes in guard */ vsize_t fspace; /* free space */ - union vm_map_object object; /* object I point to */ + union { + struct uvm_object *uvm_obj; /* uvm object */ + struct vm_map *sub_map; /* belongs to another map */ + } object; /* object I point to */ voff_t offset; /* offset into object */ struct vm_aref aref; /* anonymous overlay */ - int etype; /* entry type */ - vm_prot_t protection; /* protection code */ vm_prot_t max_protection; /* maximum protection */ vm_inherit_t inheritance; /* inheritance */ - int wired_count; /* can be paged if == 0 */ int advice; /* madvise advice */ #define uvm_map_entry_stop_copy flags u_int8_t flags; /* flags */ -#define UVM_MAP_STATIC 0x01 /* static map entry */ -#define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ +#define UVM_MAP_STATIC 0x01 /* static map entry */ +#define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ vsize_t fspace_augment; /* max(fspace) in subtree */ }; @@ -278,7 +248,7 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, * If that allocation fails: * - vmspace maps will spill over into vm_map.bfree, * - all other maps will call uvm_map_kmem_grow() to increase the arena. - * + * * vmspace maps have their data, brk() and stack arenas automatically * updated when uvm_map() is invoked without MAP_FIXED. * The spill over arena (vm_map.bfree) will contain the space in the brk() @@ -294,8 +264,8 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry, */ struct vm_map { struct pmap *pmap; /* [I] Physical map */ - struct rwlock lock; /* Lock for map data */ - struct mutex mtx; + struct rwlock lock; /* Non-intrsafe lock */ + struct mutex mtx; /* Intrsafe lock */ u_long sserial; /* [v] # stack changes */ u_long wserial; /* [v] # PROT_WRITE increases */ @@ -348,75 +318,58 @@ struct vm_map { #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ #define VM_MAP_BUSY 0x08 /* rw: map is busy */ #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ -#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */ -#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */ -#define VM_MAP_SYSCALL_ONCE 0x80 /* rw: libc syscall registered */ +#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */ +#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */ +#define VM_MAP_SYSCALL_ONCE 0x80 /* rw: libc syscall registered */ -/* XXX: number of kernel maps and entries to statically allocate */ - -#if !defined(MAX_KMAPENT) +/* Number of kernel maps and entries to statically allocate */ #define MAX_KMAPENT 1024 /* Sufficient to make it to the scheduler. */ -#endif /* !defined MAX_KMAPENT */ #ifdef _KERNEL -#define vm_map_modflags(map, set, clear) \ -do { \ - mtx_enter(&(map)->flags_lock); \ - (map)->flags = ((map)->flags | (set)) & ~(clear); \ - mtx_leave(&(map)->flags_lock); \ -} while (0) -#endif /* _KERNEL */ - -/* - * Interrupt-safe maps must also be kept on a special list, - * to assist uvm_fault() in avoiding locking problems. - */ -struct vm_map_intrsafe { - struct vm_map vmi_map; - LIST_ENTRY(vm_map_intrsafe) vmi_list; -}; - /* * globals: */ -#ifdef _KERNEL - extern vaddr_t uvm_maxkaddr; /* * protos: the following prototypes define the interface to vm_map */ -void uvm_map_deallocate(vm_map_t); +void uvm_map_deallocate(struct vm_map *); -int uvm_map_clean(vm_map_t, vaddr_t, vaddr_t, int); -vm_map_t uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); -int uvm_map_extract(struct vm_map*, vaddr_t, vsize_t, vaddr_t*, - int); +int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); +void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, + vaddr_t); +void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, + vaddr_t); +int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, + vaddr_t *, int); +struct vm_map * uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); vaddr_t uvm_map_pie(vaddr_t); vaddr_t uvm_map_hint(struct vmspace *, vm_prot_t, vaddr_t, vaddr_t); -int uvm_map_syscall(vm_map_t, vaddr_t, vaddr_t); -int uvm_map_inherit(vm_map_t, vaddr_t, vaddr_t, vm_inherit_t); -int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int); +int uvm_map_syscall(struct vm_map *, vaddr_t, vaddr_t); +int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); +int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_init(void); -boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *); -boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t, vsize_t); +boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, vm_map_entry_t *); +boolean_t uvm_map_is_stack_remappable(struct vm_map *, vaddr_t, vsize_t); int uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t); -int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t, +int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t, vm_map_entry_t, int); -int uvm_map_reserve(vm_map_t, vsize_t, vaddr_t, vsize_t, +int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, vaddr_t *); -void uvm_map_setup(vm_map_t, vaddr_t, vaddr_t, int); -int uvm_map_submap(vm_map_t, vaddr_t, vaddr_t, vm_map_t); -void uvm_unmap(vm_map_t, vaddr_t, vaddr_t); +void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); +int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, + struct vm_map *); +void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t); +void uvm_unmap_detach(struct uvm_map_deadq *, int); +void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t, + struct uvm_map_deadq *, boolean_t, boolean_t); void uvm_map_set_uaddr(struct vm_map*, struct uvm_addr_state**, struct uvm_addr_state*); int uvm_map_mquery(struct vm_map*, vaddr_t*, vsize_t, voff_t, int); -void uvm_unmap_detach(struct uvm_map_deadq*, int); -void uvm_unmap_remove(struct vm_map*, vaddr_t, vaddr_t, - struct uvm_map_deadq*, boolean_t, boolean_t); struct p_inentry; @@ -430,8 +383,6 @@ struct kinfo_vmentry; int uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *, size_t *); -#endif /* _KERNEL */ - /* * VM map locking operations: * @@ -458,7 +409,6 @@ int uvm_map_fill_vmmap(struct vm_map *, struct kinfo_vmentry *, * */ -#ifdef _KERNEL /* * XXX: clean up later * Half the kernel seems to depend on them being included here. |