summaryrefslogtreecommitdiff
path: root/sys/uvm/uvm_extern.h
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>2001-12-19 08:58:08 +0000
committerArtur Grabowski <art@cvs.openbsd.org>2001-12-19 08:58:08 +0000
commitd8afae924f4db99650aa0df115c6ae66ed02b950 (patch)
treea2cbc045f0eb624a394f8e45fea1a58ecf1faaf0 /sys/uvm/uvm_extern.h
parent4a6c79ff4f05aaae32458104529a9a9e0d3d208b (diff)
UBC was a disaster. It worked very good when it worked, but on some
machines or some configurations or in some phase of the moon (we actually don't know when or why) files disappeared. Since we've not been able to track down the problem in two weeks intense debugging and we need -current to be stable, back out everything to a state it had before UBC. We apologise for the inconvenience.
Diffstat (limited to 'sys/uvm/uvm_extern.h')
-rw-r--r--sys/uvm/uvm_extern.h152
1 files changed, 69 insertions, 83 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index ac984530ff3..39d6fcb6767 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_extern.h,v 1.39 2001/12/06 23:01:07 niklas Exp $ */
-/* $NetBSD: uvm_extern.h,v 1.66 2001/08/16 01:37:50 chs Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.40 2001/12/19 08:58:07 art Exp $ */
+/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
*
@@ -88,12 +88,24 @@
* typedefs, necessary for standard UVM headers.
*/
-typedef unsigned int uvm_flag_t;
+typedef unsigned int uvm_flag_t;
typedef int vm_fault_t;
typedef int vm_inherit_t; /* XXX: inheritance codes */
typedef off_t voff_t; /* XXX: offset within a uvm_object */
+union vm_map_object;
+typedef union vm_map_object vm_map_object_t;
+
+struct vm_map_entry;
+typedef struct vm_map_entry *vm_map_entry_t;
+
+struct vm_map;
+typedef struct vm_map *vm_map_t;
+
+struct vm_page;
+typedef struct vm_page *vm_page_t;
+
/*
* defines
*/
@@ -211,21 +223,6 @@ typedef int vm_prot_t;
#define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */
/*
- * the following defines are for ubc_alloc's flags
- */
-#define UBC_READ 0
-#define UBC_WRITE 1
-
-/*
- * flags for uvn_findpages().
- */
-#define UFP_ALL 0x0
-#define UFP_NOWAIT 0x1
-#define UFP_NOALLOC 0x2
-#define UFP_NOCACHE 0x4
-#define UFP_NORDONLY 0x8
-
-/*
* lockflags that control the locking behavior of various functions.
*/
#define UVM_LK_ENTER 0x00000001 /* map locked on entry */
@@ -251,9 +248,6 @@ struct pmap;
struct vnode;
struct pool;
struct simplelock;
-struct vm_map_entry;
-struct vm_map;
-struct vm_page;
extern struct pool *uvm_aiobuf_pool;
@@ -276,9 +270,6 @@ struct uvmexp {
int paging; /* number of pages in the process of being paged out */
int wired; /* number of wired pages */
- int ncolors; /* number of page color buckets: must be p-o-2 */
- int colormask; /* color bucket mask */
-
int zeropages; /* number of zero'd pages */
int reserve_pagedaemon; /* number of pages reserved for pagedaemon */
int reserve_kernel; /* number of pages reserved for kernel */
@@ -328,9 +319,8 @@ struct uvmexp {
was available */
int pga_zeromiss; /* pagealloc where zero wanted and zero
not available */
- int zeroaborts; /* number of times page zeroing was aborted */
- int colorhit; /* pagealloc where we got optimal color */
- int colormiss; /* pagealloc where we didn't */
+ int zeroaborts; /* number of times page zeroing was
+ aborted */
/* fault subcounters */
int fltnoram; /* number of times fault was out of ram */
@@ -402,7 +392,7 @@ struct vmspace {
caddr_t vm_shm; /* SYS5 shared memory private data XXX */
/* we copy from vm_startcopy to the end of the structure on fork */
#define vm_startcopy vm_rssize
- segsz_t vm_rssize; /* current resident set size in pages */
+ segsz_t vm_rssize; /* current resident set size in pages */
segsz_t vm_swrss; /* resident set size before last swap */
segsz_t vm_tsize; /* text size (pages) XXX */
segsz_t vm_dsize; /* data size (pages) XXX */
@@ -424,6 +414,7 @@ extern struct vm_map *kmem_map;
extern struct vm_map *mb_map;
extern struct vm_map *phys_map;
+
/*
* macros
*/
@@ -434,7 +425,11 @@ extern struct vm_map *phys_map;
#endif /* _KERNEL */
+#ifdef pmap_resident_count
#define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
+#else
+#define vm_resident_count(vm) ((vm)->vm_rssize)
+#endif
/* XXX clean up later */
struct buf;
@@ -469,16 +464,9 @@ void uao_detach_locked __P((struct uvm_object *));
void uao_reference __P((struct uvm_object *));
void uao_reference_locked __P((struct uvm_object *));
-/* uvm_bio.c */
-void ubc_init __P((void));
-void * ubc_alloc __P((struct uvm_object *, voff_t, vsize_t *,
- int));
-void ubc_release __P((void *, vsize_t));
-void ubc_flush __P((struct uvm_object *, voff_t, voff_t));
-
/* uvm_fault.c */
-int uvm_fault __P((struct vm_map *, vaddr_t, vm_fault_t,
- vm_prot_t));
+int uvm_fault __P((vm_map_t, vaddr_t,
+ vm_fault_t, vm_prot_t));
/* handle a page fault */
/* uvm_glue.c */
@@ -499,53 +487,50 @@ void uvm_vsunlock __P((struct proc *, caddr_t, size_t));
/* uvm_init.c */
-void uvm_init __P((void));
+void uvm_init __P((void));
/* init the uvm system */
/* uvm_io.c */
-int uvm_io __P((struct vm_map *, struct uio *));
+int uvm_io __P((vm_map_t, struct uio *));
/* uvm_km.c */
-vaddr_t uvm_km_alloc1 __P((struct vm_map *, vsize_t,
- boolean_t));
-void uvm_km_free __P((struct vm_map *, vaddr_t, vsize_t));
-void uvm_km_free_wakeup __P((struct vm_map *, vaddr_t,
- vsize_t));
-vaddr_t uvm_km_kmemalloc __P((struct vm_map *, struct
- uvm_object *, vsize_t, int));
-struct vm_map *uvm_km_suballoc __P((struct vm_map *, vaddr_t *,
- vaddr_t *, vsize_t, int, boolean_t,
- struct vm_map *));
-vaddr_t uvm_km_valloc __P((struct vm_map *, vsize_t));
-vaddr_t uvm_km_valloc_align __P((struct vm_map *, vsize_t,
- vsize_t));
-vaddr_t uvm_km_valloc_wait __P((struct vm_map *, vsize_t));
-vaddr_t uvm_km_valloc_prefer_wait __P((struct vm_map *, vsize_t,
- voff_t));
-vaddr_t uvm_km_alloc_poolpage1 __P((struct vm_map *,
- struct uvm_object *, boolean_t));
-void uvm_km_free_poolpage1 __P((struct vm_map *, vaddr_t));
-
-#define uvm_km_alloc_poolpage(waitok) \
- uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object, (waitok))
-#define uvm_km_free_poolpage(addr) \
- uvm_km_free_poolpage1(kmem_map, (addr))
+vaddr_t uvm_km_alloc1 __P((vm_map_t, vsize_t, boolean_t));
+void uvm_km_free __P((vm_map_t, vaddr_t, vsize_t));
+void uvm_km_free_wakeup __P((vm_map_t, vaddr_t,
+ vsize_t));
+vaddr_t uvm_km_kmemalloc __P((vm_map_t, struct uvm_object *,
+ vsize_t, int));
+struct vm_map *uvm_km_suballoc __P((vm_map_t, vaddr_t *,
+ vaddr_t *, vsize_t, int,
+ boolean_t, vm_map_t));
+vaddr_t uvm_km_valloc __P((vm_map_t, vsize_t));
+vaddr_t uvm_km_valloc_align __P((vm_map_t, vsize_t, vsize_t));
+vaddr_t uvm_km_valloc_wait __P((vm_map_t, vsize_t));
+vaddr_t uvm_km_valloc_prefer_wait __P((vm_map_t, vsize_t,
+ voff_t));
+vaddr_t uvm_km_alloc_poolpage1 __P((vm_map_t,
+ struct uvm_object *, boolean_t));
+void uvm_km_free_poolpage1 __P((vm_map_t, vaddr_t));
+
+#define uvm_km_alloc_poolpage(waitok) uvm_km_alloc_poolpage1(kmem_map, \
+ uvmexp.kmem_object, (waitok))
+#define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr))
/* uvm_map.c */
-int uvm_map __P((struct vm_map *, vaddr_t *, vsize_t,
+int uvm_map __P((vm_map_t, vaddr_t *, vsize_t,
struct uvm_object *, voff_t, vsize_t,
uvm_flag_t));
-int uvm_map_pageable __P((struct vm_map *, vaddr_t,
+int uvm_map_pageable __P((vm_map_t, vaddr_t,
vaddr_t, boolean_t, int));
-int uvm_map_pageable_all __P((struct vm_map *, int,
- vsize_t));
-boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t,
+int uvm_map_pageable_all __P((vm_map_t, int, vsize_t));
+boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t,
vaddr_t, vm_prot_t));
-int uvm_map_protect __P((struct vm_map *, vaddr_t,
+int uvm_map_protect __P((vm_map_t, vaddr_t,
vaddr_t, vm_prot_t, boolean_t));
-struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t));
+struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t,
+ boolean_t));
void uvmspace_init __P((struct vmspace *, struct pmap *,
- vaddr_t, vaddr_t));
+ vaddr_t, vaddr_t, boolean_t));
void uvmspace_exec __P((struct proc *, vaddr_t, vaddr_t));
struct vmspace *uvmspace_fork __P((struct vmspace *));
void uvmspace_free __P((struct vmspace *));
@@ -555,14 +540,14 @@ void uvmspace_unshare __P((struct proc *));
/* uvm_meter.c */
void uvm_meter __P((void));
-int uvm_sysctl __P((int *, u_int, void *, size_t *,
+int uvm_sysctl __P((int *, u_int, void *, size_t *,
void *, size_t, struct proc *));
void uvm_total __P((struct vmtotal *));
/* uvm_mmap.c */
-int uvm_mmap __P((struct vm_map *, vaddr_t *, vsize_t,
- vm_prot_t, vm_prot_t, int,
- void *, voff_t, vsize_t));
+int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t,
+ vm_prot_t, vm_prot_t, int,
+ caddr_t, voff_t, vsize_t));
/* uvm_page.c */
struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
@@ -570,7 +555,9 @@ struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *,
#define uvm_pagealloc(obj, off, anon, flags) \
uvm_pagealloc_strat((obj), (off), (anon), (flags), \
UVM_PGA_STRAT_NORMAL, 0)
-void uvm_pagerealloc __P((struct vm_page *,
+vaddr_t uvm_pagealloc_contig __P((vaddr_t, vaddr_t,
+ vaddr_t, vaddr_t));
+void uvm_pagerealloc __P((struct vm_page *,
struct uvm_object *, voff_t));
/* Actually, uvm_page_physload takes PF#s which need their own type */
void uvm_page_physload __P((paddr_t, paddr_t,
@@ -589,28 +576,27 @@ void uvm_aiodone_daemon __P((void *));
/* uvm_pglist.c */
int uvm_pglistalloc __P((psize_t, paddr_t,
paddr_t, paddr_t, paddr_t,
- struct pglist *, int, int));
+ struct pglist *, int, int));
void uvm_pglistfree __P((struct pglist *));
/* uvm_swap.c */
void uvm_swap_init __P((void));
/* uvm_unix.c */
-int uvm_coredump __P((struct proc *, struct vnode *,
+int uvm_coredump __P((struct proc *, struct vnode *,
struct ucred *, struct core *));
int uvm_grow __P((struct proc *, vaddr_t));
/* uvm_user.c */
-void uvm_deallocate __P((struct vm_map *, vaddr_t, vsize_t));
+int uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t));
/* uvm_vnode.c */
void uvm_vnp_setsize __P((struct vnode *, voff_t));
void uvm_vnp_sync __P((struct mount *));
+void uvm_vnp_terminate __P((struct vnode *));
+ /* terminate a uvm/uvn object */
+boolean_t uvm_vnp_uncache __P((struct vnode *));
struct uvm_object *uvn_attach __P((void *, vm_prot_t));
-void uvn_findpages __P((struct uvm_object *, voff_t,
- int *, struct vm_page **, int));
-void uvm_vnp_zerorange __P((struct vnode *, off_t, size_t));
-void uvm_vnp_asyncget __P((struct vnode *, off_t, size_t));
/* kern_malloc.c */
void kmeminit_nkmempages __P((void));