summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1996-08-02 00:06:07 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1996-08-02 00:06:07 +0000
commitb2a7bcf1c6ab2f0dfb98b56de2bb2512f3f0d33a (patch)
tree976938a589b4628bb54e5ba46f0cf3d912b08489 /sys
parent6b0d7d475099e45ef06df97446491a48ba27be1b (diff)
Fix long-standing swap-leak. Add OpenBSD tags. Optimize thread_wakeup.
Diffstat (limited to 'sys')
-rw-r--r--sys/vm/device_pager.c2
-rw-r--r--sys/vm/device_pager.h1
-rw-r--r--sys/vm/kern_lock.c1
-rw-r--r--sys/vm/lock.h1
-rw-r--r--sys/vm/pmap.h1
-rw-r--r--sys/vm/swap_pager.c255
-rw-r--r--sys/vm/swap_pager.h2
-rw-r--r--sys/vm/vm.h1
-rw-r--r--sys/vm/vm_extern.h150
-rw-r--r--sys/vm/vm_fault.c14
-rw-r--r--sys/vm/vm_glue.c12
-rw-r--r--sys/vm/vm_inherit.h1
-rw-r--r--sys/vm/vm_init.c1
-rw-r--r--sys/vm/vm_kern.c2
-rw-r--r--sys/vm/vm_kern.h1
-rw-r--r--sys/vm/vm_map.h1
-rw-r--r--sys/vm/vm_object.c697
-rw-r--r--sys/vm/vm_object.h5
-rw-r--r--sys/vm/vm_page.c3
-rw-r--r--sys/vm/vm_page.h1
-rw-r--r--sys/vm/vm_pageout.c11
-rw-r--r--sys/vm/vm_pageout.h1
-rw-r--r--sys/vm/vm_pager.c32
-rw-r--r--sys/vm/vm_pager.h54
-rw-r--r--sys/vm/vm_prot.h1
-rw-r--r--sys/vm/vnode_pager.h1
26 files changed, 937 insertions, 315 deletions
diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c
index c62bf53cc20..36d10e39c6d 100644
--- a/sys/vm/device_pager.c
+++ b/sys/vm/device_pager.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: device_pager.c,v 1.4 1996/08/02 00:05:54 niklas Exp $ */
/* $NetBSD: device_pager.c,v 1.21 1996/03/16 23:15:18 christos Exp $ */
/*
@@ -49,6 +50,7 @@
#include <sys/conf.h>
#include <sys/mman.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
diff --git a/sys/vm/device_pager.h b/sys/vm/device_pager.h
index 6411b824a82..59b6d993ac9 100644
--- a/sys/vm/device_pager.h
+++ b/sys/vm/device_pager.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: device_pager.h,v 1.2 1996/08/02 00:05:55 niklas Exp $ */
/* $NetBSD: device_pager.h,v 1.9 1994/06/29 06:47:41 cgd Exp $ */
/*
diff --git a/sys/vm/kern_lock.c b/sys/vm/kern_lock.c
index 2d61239ee37..2d03a11c25a 100644
--- a/sys/vm/kern_lock.c
+++ b/sys/vm/kern_lock.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: kern_lock.c,v 1.4 1996/08/02 00:05:56 niklas Exp $ */
/* $NetBSD: kern_lock.c,v 1.10 1994/10/30 19:11:09 cgd Exp $ */
/*
diff --git a/sys/vm/lock.h b/sys/vm/lock.h
index 1157b83f356..4d64105395e 100644
--- a/sys/vm/lock.h
+++ b/sys/vm/lock.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: lock.h,v 1.4 1996/08/02 00:05:56 niklas Exp $ */
/* $NetBSD: lock.h,v 1.8 1994/10/30 19:11:11 cgd Exp $ */
/*
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 5aba64b55d5..07053e3047a 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: pmap.h,v 1.4 1996/08/02 00:05:56 niklas Exp $ */
/* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */
/*
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index e0cbe6bb79a..84a1b37126e 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: swap_pager.c,v 1.5 1996/08/02 00:05:57 niklas Exp $ */
/* $NetBSD: swap_pager.c,v 1.27 1996/03/16 23:15:20 christos Exp $ */
/*
@@ -137,6 +138,10 @@ static void swap_pager_cluster
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
static void swap_pager_dealloc __P((vm_pager_t));
+static int swap_pager_remove
+ __P((vm_pager_t, vm_offset_t, vm_offset_t));
+static vm_offset_t swap_pager_next __P((vm_pager_t, vm_offset_t));
+static int swap_pager_count __P((vm_pager_t));
static int swap_pager_getpage
__P((vm_pager_t, vm_page_t *, int, boolean_t));
static boolean_t swap_pager_haspage __P((vm_pager_t, vm_offset_t));
@@ -144,6 +149,7 @@ static int swap_pager_io __P((sw_pager_t, vm_page_t *, int, int));
static void swap_pager_iodone __P((struct buf *));
static int swap_pager_putpage
__P((vm_pager_t, vm_page_t *, int, boolean_t));
+static int count_bits __P((u_int));
struct pagerops swappagerops = {
swap_pager_init,
@@ -152,7 +158,10 @@ struct pagerops swappagerops = {
swap_pager_getpage,
swap_pager_putpage,
swap_pager_haspage,
- swap_pager_cluster
+ swap_pager_cluster,
+ swap_pager_remove,
+ swap_pager_next,
+ swap_pager_count
};
static void
@@ -310,7 +319,7 @@ swap_pager_alloc(handle, size, prot, foff)
}
bzero((caddr_t)swp->sw_blocks,
swp->sw_nblocks * sizeof(*swp->sw_blocks));
- swp->sw_poip = 0;
+ swp->sw_poip = swp->sw_cnt = 0;
if (handle) {
vm_object_t object;
@@ -560,7 +569,8 @@ swap_pager_io(swp, mlist, npages, flags)
register struct buf *bp;
register sw_blk_t swb;
register int s;
- int ix, mask;
+ int ix;
+ u_int mask;
boolean_t rv;
vm_offset_t kva, off;
swp_clean_t spc;
@@ -732,6 +742,7 @@ swap_pager_io(swp, mlist, npages, flags)
printf("swpg_io: off=%lx, npg=%x, mask=%x, bmask=%x\n",
off, npages, mask, swb->swb_mask);
#endif
+ swp->sw_cnt += count_bits(mask & ~swb->swb_mask);
swb->swb_mask |= mask;
}
/*
@@ -1030,3 +1041,241 @@ swap_pager_iodone(bp)
wakeup(&vm_pages_needed);
splx(s);
}
+
+/*
+ * swap_pager_remove:
+ *
+ * This is called via the vm_pager_remove path and
+ * will remove any pages inside the range [from, to)
+ * backed by us. It is assumed that both addresses
+ * are multiples of PAGE_SIZE. The special case
+ * where TO is zero means: remove to end of object.
+ */
+static int
+swap_pager_remove(pager, from, to)
+ vm_pager_t pager;
+ vm_offset_t from, to;
+{
+ sw_pager_t swp;
+ sw_blk_t swb;
+ int bsize, blk, bit, to_blk, to_bit, mask, cnt = 0;
+
+#ifdef DEBUG
+ if (swpagerdebug & SDB_FOLLOW)
+ printf("swpg_remove()\n");
+#endif
+
+ /* Special case stupid ranges. */
+ if (to > 0 && from >= to)
+ return(0);
+
+ swp = (sw_pager_t)pager->pg_data;
+
+ /*
+ * If we back no pages, just return. XXX Can this
+ * ever be the case? At least all remove calls should
+ * be through vm_object_remove_from_pager which also
+ * deallocates the pager when it no longer backs any
+ * pages. Left is the initial case: can a swap-pager
+ * be created without any pages put into it?
+ */
+ if (swp->sw_cnt == 0)
+ return(0);
+
+ bsize = dbtob(swp->sw_bsize);
+ blk = from / bsize;
+
+ /* Another fast one.. no blocks in range. */
+ if (blk >= swp->sw_nblocks)
+ return(0);
+ bit = atop(from % bsize);
+
+ /*
+ * Deal with the special case with TO == 0.
+ * XXX Perhaps the code might be improved if we
+ * made to_blk & to_bit signify the inclusive end
+ * of range instead (i.e. to - 1).
+ */
+ if (to) {
+ to_blk = to / bsize;
+ if (to_blk > swp->sw_nblocks) {
+ to_blk = swp->sw_nblocks;
+ to_bit = 0;
+ } else
+ to_bit = atop(to % bsize);
+ } else {
+ to_blk = swp->sw_nblocks;
+ to_bit = 0;
+ }
+
+ /*
+ * Loop over the range, remove pages as we find them.
+ * If all pages in a block get freed, deallocate the
+ * swap block as well.
+ */
+ for (swb = &swp->sw_blocks[blk], mask = (1 << bit) - 1;
+ blk < to_blk || to_bit;
+ blk++, swb++, mask = 0) {
+
+ /* Don't bother if the block is already cleared. */
+ if (swb->swb_block == 0)
+ continue;
+
+ /*
+ * When coming to the end-block we need to
+ * adjust the mask in the othher end, as well as
+ * ensuring this will be the last iteration.
+ */
+ if (blk == to_blk) {
+ mask |= ~((1 << to_bit) - 1);
+ to_bit = 0;
+ }
+
+ /* Count pages that will be removed. */
+ cnt += count_bits(swb->swb_mask & ~mask);
+
+ /*
+ * Remove pages by applying our mask, and if this
+ * means no pages are left in the block, free it.
+ */
+ if ((swb->swb_mask &= mask) == 0) {
+ rmfree(swapmap, swp->sw_bsize, swb->swb_block);
+ swb->swb_block = 0;
+ }
+ }
+
+ /* Adjust the page count and return the removed count. */
+ swp->sw_cnt -= cnt;
+#ifdef DEBUG
+ if (swp->sw_cnt < 0)
+ panic("swap_pager_remove: sw_cnt < 0");
+#endif
+ return(cnt);
+}
+
+/*
+ * swap_pager_next:
+ *
+ * This is called via the vm_pager_next path and
+ * will return the offset of the next page (addresswise)
+ * which this pager is backing. If there are no more
+ * pages we will return the size of the pager's managed
+ * space (which by definition is larger than any page's
+ * offset).
+ */
+static vm_offset_t
+swap_pager_next(pager, offset)
+ vm_pager_t pager;
+ vm_offset_t offset;
+{
+ sw_pager_t swp;
+ sw_blk_t swb;
+ int bsize, blk, bit, to_blk, to_bit, mask;
+
+#ifdef DEBUG
+ if (swpagerdebug & SDB_FOLLOW)
+ printf("swpg_next()\n");
+#endif
+
+ swp = (sw_pager_t)pager->pg_data;
+
+ /*
+ * If we back no pages, just return our size. XXX Can
+ * this ever be the case? At least all remove calls
+ * should be through vm_object_remove_from_pager which
+ * also deallocates the pager when it no longer backs any
+ * pages. Left is the initial case: can a swap-pager
+ * be created without any pages put into it?
+ */
+ if (swp->sw_cnt == 0)
+ return(swp->sw_osize);
+
+ bsize = dbtob(swp->sw_bsize);
+ blk = offset / bsize;
+
+ /* Another fast one.. no blocks in range. */
+ if (blk >= swp->sw_nblocks)
+ return(swp->sw_osize);
+ bit = atop(offset % bsize);
+ to_blk = swp->sw_osize / bsize;
+ to_bit = atop(swp->sw_osize % bsize);
+
+ /*
+ * Loop over the remaining blocks, returning as soon
+ * as we find a page.
+ */
+ swb = &swp->sw_blocks[blk];
+ mask = ~((1 << bit) - 1);
+ for (;;) {
+ if (blk == to_blk) {
+ /* Nothing to be done in this end-block? */
+ if (to_bit == 0)
+ break;
+ mask &= (1 << to_bit) - 1;
+ }
+
+ /*
+ * Check this block for a backed page and return
+ * its offset if there.
+ */
+ mask &= swb->swb_mask;
+ if (mask)
+ return(blk * bsize + (ffs (mask) - 1) * PAGE_SIZE);
+
+ /*
+ * If we handled the end of range now, this
+ * means we are ready.
+ */
+ if (blk == to_blk)
+ break;
+
+ /* Get on with the next block. */
+ blk++;
+ swb++;
+ mask = ~0;
+ }
+ return swp->sw_osize;
+}
+
+/*
+ * swap_pager_count:
+ *
+ * Just returns the count of pages backed by this pager.
+ */
+int
+swap_pager_count(pager)
+ vm_pager_t pager;
+{
+#ifndef notyet
+ return((sw_pager_t)pager->pg_data)->sw_cnt;
+#else
+ sw_pager_t swp;
+ sw_blk_t swb;
+ int i, cnt = 0;
+
+ swp = (sw_pager_t)pager->pg_data;
+ if (swp->sw_blocks == NULL)
+ return 0;
+ for (i = 0; i < swp->sw_nblocks; i++)
+ cnt += count_bits(swp->sw_blocks[i].swb_mask);
+ return cnt;
+#endif
+}
+
+/*
+ * count_bits:
+ *
+ * Counts the number of set bits in a word.
+ */
+static int
+count_bits(x)
+ u_int x;
+{
+ int cnt = 0;
+
+ while (x) {
+ cnt += x & 1;
+ x >>= 1;
+ }
+ return(cnt);
+}
diff --git a/sys/vm/swap_pager.h b/sys/vm/swap_pager.h
index 07b25ecbe8b..c55afc97e82 100644
--- a/sys/vm/swap_pager.h
+++ b/sys/vm/swap_pager.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: swap_pager.h,v 1.2 1996/08/02 00:05:57 niklas Exp $ */
/* $NetBSD: swap_pager.h,v 1.6 1994/06/29 06:47:49 cgd Exp $ */
/*
@@ -83,6 +84,7 @@ struct swpager {
sw_blk_t sw_blocks; /* pointer to list of swap blocks */
short sw_flags; /* flags */
short sw_poip; /* pageouts in progress */
+ int sw_cnt; /* count of pages in pager */
};
typedef struct swpager *sw_pager_t;
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index c23a14e1d16..688c7987e8c 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm.h,v 1.2 1996/08/02 00:05:58 niklas Exp $ */
/* $NetBSD: vm.h,v 1.13 1994/06/29 06:47:52 cgd Exp $ */
/*
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index dc52e27d09c..cf93f186b1b 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_extern.h,v 1.7 1996/07/23 23:54:21 deraadt Exp $ */
+/* $OpenBSD: vm_extern.h,v 1.8 1996/08/02 00:05:58 niklas Exp $ */
/* $NetBSD: vm_extern.h,v 1.20 1996/04/23 12:25:23 christos Exp $ */
/*-
@@ -46,97 +46,101 @@ struct vnode;
struct core;
#ifdef KGDB
-void chgkprot __P((caddr_t, int, int));
+void chgkprot __P((caddr_t, int, int));
#endif
#ifdef _KERNEL
#ifdef TYPEDEF_FOR_UAP
-int compat_43_getpagesize __P((struct proc *p, void *, int *));
-int madvise __P((struct proc *, void *, int *));
-int mincore __P((struct proc *, void *, int *));
-int mprotect __P((struct proc *, void *, int *));
-int minherit __P((struct proc *, void *, int *));
-int msync __P((struct proc *, void *, int *));
-int munmap __P((struct proc *, void *, int *));
-int obreak __P((struct proc *, void *, int *));
-int sbrk __P((struct proc *, void *, int *));
-int smmap __P((struct proc *, void *, int *));
-int sstk __P((struct proc *, void *, int *));
+int compat_43_getpagesize __P((struct proc *p, void *, int *));
+int madvise __P((struct proc *, void *, int *));
+int mincore __P((struct proc *, void *, int *));
+int mprotect __P((struct proc *, void *, int *));
+int minherit __P((struct proc *, void *, int *));
+int msync __P((struct proc *, void *, int *));
+int munmap __P((struct proc *, void *, int *));
+int obreak __P((struct proc *, void *, int *));
+int sbrk __P((struct proc *, void *, int *));
+int smmap __P((struct proc *, void *, int *));
+int sstk __P((struct proc *, void *, int *));
#endif
-void assert_wait __P((void *, boolean_t));
-int grow __P((struct proc *, vm_offset_t));
-void iprintf __P((int (*)(const char *, ...), const char *, ...));
-int kernacc __P((caddr_t, int, int));
-int kinfo_loadavg __P((int, char *, int *, int, int *));
-int kinfo_meter __P((int, caddr_t, int *, int, int *));
-vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
-vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
-vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
-void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
-void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
-void kmem_init __P((vm_offset_t, vm_offset_t));
-vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
-vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
- vm_size_t, boolean_t));
-void loadav __P((struct loadavg *));
-void munmapfd __P((struct proc *, int));
-int pager_cache __P((vm_object_t, boolean_t));
-void sched __P((void));
+void assert_wait __P((void *, boolean_t));
+int grow __P((struct proc *, vm_offset_t));
+void iprintf __P((int (*)(const char *, ...), const char *, ...));
+int kernacc __P((caddr_t, int, int));
+int kinfo_loadavg __P((int, char *, int *, int, int *));
+int kinfo_meter __P((int, caddr_t, int *, int, int *));
+vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
+vm_offset_t kmem_alloc_pageable __P((vm_map_t, vm_size_t));
+vm_offset_t kmem_alloc_wait __P((vm_map_t, vm_size_t));
+void kmem_free __P((vm_map_t, vm_offset_t, vm_size_t));
+void kmem_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t));
+void kmem_init __P((vm_offset_t, vm_offset_t));
+vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
+vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
+ vm_size_t, boolean_t));
+void loadav __P((struct loadavg *));
+void munmapfd __P((struct proc *, int));
+int pager_cache __P((vm_object_t, boolean_t));
+void sched __P((void));
#ifdef __GNUC__
-void scheduler __P((void)) __attribute ((noreturn));
+void scheduler __P((void)) __attribute ((noreturn));
#else
-void scheduler __P((void));
+void scheduler __P((void));
#endif
-int svm_allocate __P((struct proc *, void *, int *));
-int svm_deallocate __P((struct proc *, void *, int *));
-int svm_inherit __P((struct proc *, void *, int *));
-int svm_protect __P((struct proc *, void *, int *));
-void swapinit __P((void));
-void swapout __P((struct proc *));
-void swapout_threads __P((void));
-int swfree __P((struct proc *, int));
-void swstrategy __P((struct buf *));
-void thread_block __P((void));
-void thread_sleep __P((void *, simple_lock_t, boolean_t));
-void thread_wakeup __P((void *));
-int useracc __P((caddr_t, int, int));
-int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
- boolean_t));
-int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
+int svm_allocate __P((struct proc *, void *, int *));
+int svm_deallocate __P((struct proc *, void *, int *));
+int svm_inherit __P((struct proc *, void *, int *));
+int svm_protect __P((struct proc *, void *, int *));
+void swapinit __P((void));
+void swapout __P((struct proc *));
+void swapout_threads __P((void));
+int swfree __P((struct proc *, int));
+void swstrategy __P((struct buf *));
+void thread_block __P((void));
+void thread_sleep __P((void *, simple_lock_t, boolean_t));
+/*
+ * This define replaces a thread_wakeup prototype, as thread_wakeup
+ * was solely a wrapper around wakeup.
+ */
+#define thread_wakeup wakeup
+int useracc __P((caddr_t, int, int));
+int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
+ boolean_t));
+int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
vm_size_t, boolean_t, vm_pager_t, vm_offset_t, boolean_t));
-int vm_coredump __P((struct proc *, struct vnode *, struct ucred *,
- struct core *));
-int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
-int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
-void vm_fault_copy_entry __P((vm_map_t,
+int vm_coredump __P((struct proc *, struct vnode *, struct ucred *,
+ struct core *));
+int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
+int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
+void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
-void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
-int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
+void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
+int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
#ifdef __FORK_BRAINDAMAGE
-int vm_fork __P((struct proc *, struct proc *));
+int vm_fork __P((struct proc *, struct proc *));
#else
-void vm_fork __P((struct proc *, struct proc *));
+void vm_fork __P((struct proc *, struct proc *));
#endif
-int vm_inherit __P((vm_map_t,
+int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
-void vm_init_limits __P((struct proc *));
-void vm_mem_init __P((void));
-int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
+void vm_init_limits __P((struct proc *));
+void vm_mem_init __P((void));
+int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
-int vm_protect __P((vm_map_t,
+int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
-void vm_set_page_size __P((void));
-void vmmeter __P((void));
+void vm_set_page_size __P((void));
+void vmmeter __P((void));
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
-void vmspace_free __P((struct vmspace *));
-void vmtotal __P((struct vmtotal *));
-void vnode_pager_setsize __P((struct vnode *, u_long));
-void vnode_pager_umount __P((struct mount *));
-boolean_t vnode_pager_uncache __P((struct vnode *));
-void vslock __P((caddr_t, u_int));
-void vsunlock __P((caddr_t, u_int));
+void vmspace_free __P((struct vmspace *));
+void vmtotal __P((struct vmtotal *));
+void vnode_pager_setsize __P((struct vnode *, u_long));
+void vnode_pager_umount __P((struct mount *));
+boolean_t vnode_pager_uncache __P((struct vnode *));
+void vslock __P((caddr_t, u_int));
+void vsunlock __P((caddr_t, u_int));
/* Machine dependent portion */
void vmapbuf __P((struct buf *, vm_size_t));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index dcde5af75b2..915004c1fec 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_fault.c,v 1.3 1996/05/23 08:34:51 deraadt Exp $ */
+/* $OpenBSD: vm_fault.c,v 1.4 1996/08/02 00:05:59 niklas Exp $ */
/* $NetBSD: vm_fault.c,v 1.18 1996/05/20 17:40:02 mrg Exp $ */
/*
@@ -191,6 +191,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
vm_object_lock(first_object);
first_object->ref_count++;
+#ifdef DIAGNOSTIC
+ if (first_object->paging_in_progress == 0xdead)
+ panic("vm_fault: first_object deallocated");
+#endif
first_object->paging_in_progress++;
/*
@@ -425,6 +429,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
object->paging_in_progress--;
vm_object_unlock(object);
object = next_object;
+#ifdef DIAGNOSTIC
+ if (object->paging_in_progress == 0xdead)
+ panic("vm_fault: object deallocated (1)");
+#endif
object->paging_in_progress++;
}
}
@@ -524,6 +532,10 @@ vm_fault(map, vaddr, fault_type, change_wiring)
*/
object->paging_in_progress--;
vm_object_collapse(object);
+#ifdef DIAGNOSTIC
+ if (object->paging_in_progress == 0xdead)
+ panic("vm_fault: object deallocated (2)");
+#endif
object->paging_in_progress++;
}
else {
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 2bbde4ea4f8..47c723dcd33 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_glue.c,v 1.19 1996/07/23 23:54:22 deraadt Exp $ */
+/* $OpenBSD: vm_glue.c,v 1.20 1996/08/02 00:05:59 niklas Exp $ */
/* $NetBSD: vm_glue.c,v 1.55.4.1 1996/06/13 17:25:45 cgd Exp $ */
/*
@@ -592,16 +592,6 @@ thread_sleep(event, lock, ruptible)
splx(s);
}
-void
-thread_wakeup(event)
- void *event;
-{
- int s = splhigh();
-
- wakeup(event);
- splx(s);
-}
-
/*
* DEBUG stuff
*/
diff --git a/sys/vm/vm_inherit.h b/sys/vm/vm_inherit.h
index 23f21699763..ef1cb4a7fb1 100644
--- a/sys/vm/vm_inherit.h
+++ b/sys/vm/vm_inherit.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_inherit.h,v 1.2 1996/08/02 00:06:00 niklas Exp $ */
/* $NetBSD: vm_inherit.h,v 1.7 1994/06/29 06:47:58 cgd Exp $ */
/*
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 14a539cee37..8f353eba930 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_init.c,v 1.2 1996/08/02 00:06:00 niklas Exp $ */
/* $NetBSD: vm_init.c,v 1.9 1994/06/29 06:48:00 cgd Exp $ */
/*
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index c5475f0d1f8..4ebf2bbbd3b 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_kern.c,v 1.3 1996/08/02 00:06:01 niklas Exp $ */
/* $NetBSD: vm_kern.c,v 1.17.6.1 1996/06/13 17:21:28 cgd Exp $ */
/*
@@ -70,6 +71,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h
index b4bf3cba04c..e46ee18b458 100644
--- a/sys/vm/vm_kern.h
+++ b/sys/vm/vm_kern.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_kern.h,v 1.2 1996/08/02 00:06:01 niklas Exp $ */
/* $NetBSD: vm_kern.h,v 1.9 1994/06/29 06:48:03 cgd Exp $ */
/*
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index cd3e305710f..d67ca25e845 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_map.h,v 1.3 1996/08/02 00:06:01 niklas Exp $ */
/* $NetBSD: vm_map.h,v 1.11 1995/03/26 20:39:10 jtc Exp $ */
/*
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index c4d3f285422..d6d93b62e75 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_object.c,v 1.5 1996/07/23 23:54:25 deraadt Exp $ */
+/* $OpenBSD: vm_object.c,v 1.6 1996/08/02 00:06:02 niklas Exp $ */
/* $NetBSD: vm_object.c,v 1.34 1996/02/28 22:35:35 gwr Exp $ */
/*
@@ -72,9 +72,11 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
+#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
/*
* Virtual memory objects maintain the actual data
@@ -112,8 +114,26 @@ struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT];
long object_collapses = 0;
long object_bypasses = 0;
+boolean_t vm_object_collapse_allowed = TRUE;
+
+#ifndef VMDEBUG
+#define VMDEBUG 0
+#endif
+
+#ifdef DEBUG
+#define VMDEBUG_SHADOW 0x1
+#define VMDEBUG_SHADOW_VERBOSE 0x2
+#define VMDEBUG_COLLAPSE 0x4
+#define VMDEBUG_COLLAPSE_PAGEIN 0x8
+int vmdebug = VMDEBUG;
+#endif
static void _vm_object_allocate __P((vm_size_t, vm_object_t));
+static int vm_object_collapse_aux __P((vm_object_t));
+static int vm_object_bypass __P((vm_object_t));
+static void vm_object_set_shadow __P((vm_object_t, vm_object_t));
+static int vm_object_remove_from_pager
+ __P((vm_object_t, vm_offset_t, vm_offset_t));
/*
* vm_object_init:
@@ -183,6 +203,7 @@ _vm_object_allocate(size, object)
object->paging_offset = 0;
object->shadow = NULL;
object->shadow_offset = (vm_offset_t) 0;
+ LIST_INIT(&object->shadowers);
simple_lock(&vm_object_list_lock);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
@@ -223,7 +244,12 @@ void
vm_object_deallocate(object)
register vm_object_t object;
{
- vm_object_t temp;
+ /*
+ * While "temp" is used for other things as well, we
+ * initialize it to NULL here for being able to check
+ * if we are in the first revolution of the loop.
+ */
+ vm_object_t temp = NULL;
while (object != NULL) {
@@ -240,12 +266,26 @@ vm_object_deallocate(object)
*/
vm_object_lock(object);
if (--(object->ref_count) != 0) {
+ /*
+ * If this is a deallocation of a shadow
+ * reference (which it is unless it's the
+ * first time round) and this operation made
+ * us singly-shadowed, try to collapse us
+ * with our shadower.
+ */
+ vm_object_unlock(object);
+ if (temp != NULL &&
+ (temp = object->shadowers.lh_first) != NULL &&
+ temp->shadowers_list.le_next == NULL) {
+ vm_object_lock(temp);
+ vm_object_collapse(temp);
+ vm_object_unlock(temp);
+ }
/*
* If there are still references, then
* we are done.
*/
- vm_object_unlock(object);
vm_object_cache_unlock();
return;
}
@@ -303,6 +343,7 @@ vm_object_terminate(object)
*/
if ((shadow_object = object->shadow) != NULL) {
vm_object_lock(shadow_object);
+ vm_object_set_shadow(object, NULL);
if (shadow_object->copy == object)
shadow_object->copy = NULL;
#if 0
@@ -469,6 +510,11 @@ again:
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ);
if (!(p->flags & PG_CLEAN)) {
p->flags |= PG_BUSY;
+#ifdef DIAGNOSTIC
+ if (object->paging_in_progress == 0xdead)
+ panic("vm_object_page_clean: "
+ "object deallocated");
+#endif
object->paging_in_progress++;
vm_object_unlock(object);
/*
@@ -761,11 +807,11 @@ vm_object_copy(src_object, src_offset, size,
/*
* Make the old copy-object shadow the new one.
* It will receive no more pages from the original
- * object.
+ * object. Locking of new_copy not needed. We
+ * have the only pointer.
*/
-
src_object->ref_count--; /* remove ref. from old_copy */
- old_copy->shadow = new_copy;
+ vm_object_set_shadow(old_copy, new_copy);
new_copy->ref_count++; /* locking not needed - we
have the only pointer */
vm_object_unlock(old_copy); /* done with old_copy */
@@ -778,7 +824,7 @@ vm_object_copy(src_object, src_offset, size,
* Point the new copy at the existing object.
*/
- new_copy->shadow = src_object;
+ vm_object_set_shadow(new_copy, src_object);
new_copy->shadow_offset = new_start;
src_object->ref_count++;
src_object->copy = new_copy;
@@ -807,6 +853,8 @@ vm_object_copy(src_object, src_offset, size,
*
* The new object and offset into that object
* are returned in the source parameters.
+ *
+ * The old object should not be locked.
*/
void
vm_object_shadow(object, offset, length)
@@ -822,7 +870,6 @@ vm_object_shadow(object, offset, length)
/*
* Allocate a new object with the given length
*/
-
if ((result = vm_object_allocate(length)) == NULL)
panic("vm_object_shadow: no object for shadowing");
@@ -833,19 +880,19 @@ vm_object_shadow(object, offset, length)
* the source object. Net result: no change of reference
* count.
*/
- result->shadow = source;
+ vm_object_lock(source);
+ vm_object_set_shadow(result, source);
+ vm_object_unlock(source);
/*
* Store the offset into the source object,
* and fix up the offset into the new object.
*/
-
result->shadow_offset = *offset;
/*
* Return the new things
*/
-
*offset = 0;
*object = result;
}
@@ -854,8 +901,7 @@ vm_object_shadow(object, offset, length)
* Set the specified object's pager to the specified pager.
*/
void
-vm_object_setpager(object, pager, paging_offset,
- read_only)
+vm_object_setpager(object, pager, paging_offset, read_only)
vm_object_t object;
vm_pager_t pager;
vm_offset_t paging_offset;
@@ -1009,7 +1055,375 @@ vm_object_cache_clear()
vm_object_cache_unlock();
}
-boolean_t vm_object_collapse_allowed = TRUE;
+/*
+ * vm_object_remove_from_pager:
+ *
+ * Tell object's pager that it needn't back the page
+ * anymore. If the pager ends up empty, deallocate it.
+ * Assume object->pager is non-NULL.
+ */
+static int
+vm_object_remove_from_pager(object, from, to)
+ vm_object_t object;
+ vm_offset_t from, to;
+{
+ vm_pager_t pager = object->pager;
+ int cnt = 0;
+
+ cnt = vm_pager_remove(pager, from, to);
+
+ /* If pager became empty, remove it. */
+ if (cnt > 0 && vm_pager_count(pager) == 0) {
+ vm_pager_deallocate(pager);
+ object->pager = NULL;
+ }
+ return(cnt);
+}
+
+/*
+ * vm_object_collapse_aux:
+ *
+ * Internal function to vm_object_collapse called when
+ * it has been shown that a collapse operation is likely
+ * to succeed. We know that the backing object is only
+ * referenced by me and that paging is not in progress.
+ */
+static int
+vm_object_collapse_aux(object)
+ vm_object_t object;
+{
+ vm_object_t backing_object = object->shadow;
+ vm_offset_t backing_offset = object->shadow_offset;
+ vm_size_t size = object->size;
+ vm_offset_t offset;
+ vm_page_t backing_page, page = NULL;
+
+#ifdef DEBUG
+ if (vmdebug & VMDEBUG_COLLAPSE)
+ printf("vm_object_collapse_aux(0x%x)\n", object);
+#endif
+
+ /*
+ * First of all get rid of resident pages in the
+ * backing object. We can guarantee to remove
+ * every page thus we can write the while-test
+ * like this.
+ */
+ while ((backing_page = backing_object->memq.tqh_first) != NULL) {
+ /*
+ * If the page is outside the shadowing object's
+ * range or if the page is shadowed (either by a
+ * resident "non-fake" page or a paged out one) we
+ * can discard it right away. Otherwise we need to
+ * move the page to the shadowing object, perhaps
+ * waking up waiters for "fake" pages first.
+ *
+ * XXX There is a condition I'm unsure about, both
+ * if it exists and if I handle it right. The
+ * case that worries me is if an object can hold
+ * "fake" pages at the same time a real one is
+ * paged out. To me it sounds as this condition
+ * can't exist. Does anyone know? The way the
+ * condition below is done, "fake" pages are
+ * handled suboptimally if pagers are guaranteed
+ * not to have such pages in store.
+ */
+ if (backing_page->offset < backing_offset ||
+ (offset = backing_page->offset - backing_offset) >= size ||
+ ((page = vm_page_lookup(object, offset)) != NULL &&
+ !(page->flags & PG_FAKE)) ||
+ (object->pager &&
+ vm_pager_has_page(object->pager, offset))) {
+
+ /* Just discard the page, noone needs it. */
+ vm_page_lock_queues();
+ vm_page_free(backing_page);
+ vm_page_unlock_queues();
+ } else {
+ /*
+ * If a "fake" page was found, someone may
+ * be waiting for it. Wake her up and then
+ * remove the page.
+ */
+ if (page) {
+ PAGE_WAKEUP(page);
+ vm_page_lock_queues();
+ vm_page_free(page);
+ vm_page_unlock_queues();
+ }
+
+ /* Just move the page up front. */
+ vm_page_rename(backing_page, object, offset);
+ }
+ }
+
+ /*
+ * If not both object have pagers we are essentially
+ * ready. Just see to that any existing pager is
+ * attached to the shadowing object and then get rid
+ * of the backing object (which at that time should
+ * have neither resident nor paged out pages left).
+ */
+ if (object->pager == NULL || backing_object->pager == NULL) {
+ /*
+ * If the shadowing object don't have a pager the
+ * easiest thing to do now is to just move the
+ * potential backing pager up front.
+ */
+ if (object->pager == NULL) {
+ object->pager = backing_object->pager;
+ object->paging_offset = backing_object->paging_offset +
+ backing_offset;
+ backing_object->pager = NULL;
+ }
+ } else {
+ /*
+ * OK we know both objects have pagers so now we need to
+ * check if the backing objects's paged out pages can be
+ * discarded or needs to be moved.
+ *
+ * As a first measure we know we can discard everything
+ * that the shadowing object doesn't shadow.
+ */
+ if (backing_offset > 0)
+ vm_object_remove_from_pager(backing_object, 0,
+ backing_offset);
+ if (backing_offset + size < backing_object->size)
+ vm_object_remove_from_pager(backing_object,
+ backing_offset + size, backing_object->size);
+
+ /*
+ * What's left to do is to find all paged out pages
+ * in the backing pager and either discard or move
+ * it to the front object.
+ */
+ offset = 0;
+ while (backing_object->pager &&
+ (offset = vm_pager_next(backing_object->pager, offset)) <
+ backing_object->size) {
+
+ /*
+ * If the shadowing object has this page, get
+ * rid of it from the backing pager. Trust
+ * the loop condition to get us out of here
+ * quickly if we remove the last paged out page.
+ *
+ * XXX Should pages found paged out in the backing
+ * object be marked for pageout in the shadowing
+ * object?
+ *
+ * XXX Would clustering several pages at a time
+ * be a win in this situation?
+ *
+ * XXX "fake" page handling???
+ */
+ if (vm_page_lookup(object, offset - backing_offset) ==
+ NULL && !vm_pager_has_page(object->pager,
+ offset - backing_offset)) {
+ /*
+ * Suck the page from the pager and give it
+ * to the shadowing object.
+ */
+#ifdef DEBUG
+ if (vmdebug & VMDEBUG_COLLAPSE_PAGEIN)
+ printf("vm_object_collapse_aux: "
+ "pagein needed\n");
+#endif
+
+ /* First allocate a page. */
+ for (;;) {
+ backing_page =
+ vm_page_alloc(backing_object,
+ offset);
+ if (backing_page)
+ break;
+ VM_WAIT;
+ }
+
+ /* Second, start paging it in. */
+ backing_object->paging_in_progress++;
+ if (vm_pager_get_pages(backing_object->pager,
+ &backing_page, 1, TRUE) != VM_PAGER_OK) {
+ panic("vm_object_collapse_aux: "
+ "could not get paged out page");
+#if 0
+ return KERN_FAILURE;
+#endif
+ }
+ if (--backing_object->paging_in_progress == 0)
+ thread_wakeup(backing_object);
+
+ /*
+ * Third, relookup in case pager changed
+ * page. Pager is responsible for
+ * disposition of old page if moved.
+ */
+ backing_page = vm_page_lookup(backing_object,
+ offset);
+
+ cnt.v_pgpgin++;
+ backing_page->flags &= ~PG_FAKE;
+ backing_page->flags |= PG_CLEAN;
+ pmap_clear_modify(VM_PAGE_TO_PHYS(
+ backing_page));
+
+ /*
+ * Fourth, move it up front, and wake up
+ * potential waiters.
+ */
+ vm_page_rename(backing_page, object,
+ offset - backing_offset);
+ PAGE_WAKEUP(backing_page);
+ }
+ vm_object_remove_from_pager(backing_object, offset,
+ offset + PAGE_SIZE);
+ offset += PAGE_SIZE;
+ }
+ }
+
+ /*
+ * I've seen this condition once in an out of VM situation.
+ * For the moment I don't know why it occurred, although I suspect
+ * vm_object_page_clean can create a pager even if it won't use
+ * it.
+ */
+ if (backing_object->pager &&
+ vm_pager_count(backing_object->pager) == 0) {
+ vm_pager_deallocate(backing_object->pager);
+ backing_object->pager = NULL;
+ }
+
+#ifdef DIAGNOSTIC
+ if (backing_object->pager)
+ panic("vm_object_collapse_aux: backing_object->pager remains");
+#endif
+
+ /*
+ * Object now shadows whatever backing_object did.
+ * Note that the reference to backing_object->shadow
+ * moves from within backing_object to within object.
+ */
+ if(backing_object->shadow)
+ vm_object_lock(backing_object->shadow);
+ vm_object_set_shadow(object, backing_object->shadow);
+ if(backing_object->shadow) {
+ vm_object_set_shadow(backing_object, NULL);
+ vm_object_unlock(backing_object->shadow);
+ }
+ object->shadow_offset += backing_object->shadow_offset;
+ if (object->shadow != NULL && object->shadow->copy != NULL)
+ panic("vm_object_collapse_aux: we collapsed a copy-object!");
+
+ /* Fast cleanup is the only thing left now. */
+ vm_object_unlock(backing_object);
+
+ simple_lock(&vm_object_list_lock);
+ TAILQ_REMOVE(&vm_object_list, backing_object, object_list);
+ vm_object_count--;
+ simple_unlock(&vm_object_list_lock);
+
+ free((caddr_t)backing_object, M_VMOBJ);
+
+ object_collapses++;
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_object_bypass:
+ *
+ * Internal function to vm_object_collapse called when collapsing
+ * the object with its backing one is not allowed but there may
+ * be an opportunity to bypass the backing object and shadow the
+ * next object in the chain instead.
+ */
+static int
+vm_object_bypass(object)
+ vm_object_t object;
+{
+ register vm_object_t backing_object = object->shadow;
+ register vm_offset_t backing_offset = object->shadow_offset;
+ register vm_offset_t new_offset;
+ register vm_page_t p, pp;
+
+ /*
+ * If all of the pages in the backing object are
+ * shadowed by the parent object, the parent
+ * object no longer has to shadow the backing
+ * object; it can shadow the next one in the
+ * chain.
+ *
+ * The backing object must not be paged out - we'd
+ * have to check all of the paged-out pages, as
+ * well.
+ */
+
+ if (backing_object->pager != NULL)
+ return KERN_FAILURE;
+
+ /*
+ * Should have a check for a 'small' number
+ * of pages here.
+ */
+
+ for (p = backing_object->memq.tqh_first; p != NULL;
+ p = p->listq.tqe_next) {
+ new_offset = p->offset - backing_offset;
+
+ /*
+ * If the parent has a page here, or if
+ * this page falls outside the parent,
+ * keep going.
+ *
+ * Otherwise, the backing_object must be
+ * left in the chain.
+ */
+
+ if (p->offset >= backing_offset && new_offset < object->size &&
+ ((pp = vm_page_lookup(object, new_offset)) == NULL ||
+ (pp->flags & PG_FAKE))) {
+ /*
+ * Page still needed. Can't go any further.
+ */
+ return KERN_FAILURE;
+ }
+ }
+
+ /*
+ * Make the parent shadow the next object
+ * in the chain. Deallocating backing_object
+ * will not remove it, since its reference
+ * count is at least 2.
+ */
+
+ vm_object_lock(object->shadow);
+ if (backing_object->shadow)
+ vm_object_lock(backing_object->shadow);
+ vm_object_set_shadow(object, backing_object->shadow);
+ if (backing_object->shadow)
+ vm_object_unlock(backing_object->shadow);
+ vm_object_reference(object->shadow);
+ vm_object_unlock(object->shadow);
+ object->shadow_offset += backing_object->shadow_offset;
+
+ /*
+ * Backing object might have had a copy pointer
+ * to us. If it did, clear it.
+ */
+
+ if (backing_object->copy == object)
+ backing_object->copy = NULL;
+
+ /* Drop the reference count on backing_object.
+ * Since its ref_count was at least 2, it
+ * will not vanish; so we don't need to call
+ * vm_object_deallocate.
+ */
+ backing_object->ref_count--;
+ vm_object_unlock(backing_object);
+ object_bypasses++;
+ return KERN_SUCCESS;
+}
+
/*
* vm_object_collapse:
*
@@ -1027,10 +1441,6 @@ vm_object_collapse(object)
{
register vm_object_t backing_object;
- register vm_offset_t backing_offset;
- register vm_size_t size;
- register vm_offset_t new_offset;
- register vm_page_t p, pp;
if (!vm_object_collapse_allowed)
return;
@@ -1040,11 +1450,9 @@ vm_object_collapse(object)
* Verify that the conditions are right for collapse:
*
* The object exists and no pages in it are currently
- * being paged out (or have ever been paged out).
+ * being paged out.
*/
- if (object == NULL ||
- object->paging_in_progress != 0 ||
- object->pager != NULL)
+ if (object == NULL || object->paging_in_progress)
return;
/*
@@ -1086,195 +1494,21 @@ vm_object_collapse(object)
}
/*
- * We know that we can either collapse the backing
- * object (if the parent is the only reference to
- * it) or (perhaps) remove the parent's reference
- * to it.
- */
-
- backing_offset = object->shadow_offset;
- size = object->size;
-
- /*
* If there is exactly one reference to the backing
- * object, we can collapse it into the parent.
+ * object, we can collapse it into the parent,
+ * otherwise we might be able to bypass it completely.
*/
if (backing_object->ref_count == 1) {
-
- /*
- * We can collapse the backing object.
- *
- * Move all in-memory pages from backing_object
- * to the parent. Pages that have been paged out
- * will be overwritten by any of the parent's
- * pages that shadow them.
- */
-
- while ((p = backing_object->memq.tqh_first) != NULL) {
- new_offset = (p->offset - backing_offset);
-
- /*
- * If the parent has a page here, or if
- * this page falls outside the parent,
- * dispose of it.
- *
- * Otherwise, move it as planned.
- */
-
- if (p->offset < backing_offset ||
- new_offset >= size) {
- vm_page_lock_queues();
- vm_page_free(p);
- vm_page_unlock_queues();
- } else {
- pp = vm_page_lookup(object, new_offset);
- if (pp != NULL && !(pp->flags & PG_FAKE)) {
- vm_page_lock_queues();
- vm_page_free(p);
- vm_page_unlock_queues();
- }
- else {
- if (pp) {
- /* may be someone waiting for it */
- PAGE_WAKEUP(pp);
- vm_page_lock_queues();
- vm_page_free(pp);
- vm_page_unlock_queues();
- }
- vm_page_rename(p, object, new_offset);
- }
- }
- }
-
- /*
- * Move the pager from backing_object to object.
- *
- * XXX We're only using part of the paging space
- * for keeps now... we ought to discard the
- * unused portion.
- */
-
- if (backing_object->pager) {
- object->pager = backing_object->pager;
- object->paging_offset = backing_offset +
- backing_object->paging_offset;
- backing_object->pager = NULL;
- }
-
- /*
- * Object now shadows whatever backing_object did.
- * Note that the reference to backing_object->shadow
- * moves from within backing_object to within object.
- */
-
- object->shadow = backing_object->shadow;
- object->shadow_offset += backing_object->shadow_offset;
- if (object->shadow != NULL &&
- object->shadow->copy != NULL) {
- panic("vm_object_collapse: we collapsed a copy-object!");
- }
- /*
- * Discard backing_object.
- *
- * Since the backing object has no pages, no
- * pager left, and no object references within it,
- * all that is necessary is to dispose of it.
- */
-
- vm_object_unlock(backing_object);
-
- simple_lock(&vm_object_list_lock);
- TAILQ_REMOVE(&vm_object_list, backing_object,
- object_list);
- vm_object_count--;
- simple_unlock(&vm_object_list_lock);
-
- free((caddr_t)backing_object, M_VMOBJ);
-
- object_collapses++;
- }
- else {
- /*
- * If all of the pages in the backing object are
- * shadowed by the parent object, the parent
- * object no longer has to shadow the backing
- * object; it can shadow the next one in the
- * chain.
- *
- * The backing object must not be paged out - we'd
- * have to check all of the paged-out pages, as
- * well.
- */
-
- if (backing_object->pager != NULL) {
+ if (vm_object_collapse_aux(object) != KERN_SUCCESS) {
vm_object_unlock(backing_object);
return;
}
-
- /*
- * Should have a check for a 'small' number
- * of pages here.
- */
-
- for (p = backing_object->memq.tqh_first;
- p != NULL;
- p = p->listq.tqe_next) {
- new_offset = (p->offset - backing_offset);
-
- /*
- * If the parent has a page here, or if
- * this page falls outside the parent,
- * keep going.
- *
- * Otherwise, the backing_object must be
- * left in the chain.
- */
-
- if (p->offset >= backing_offset &&
- new_offset < size &&
- ((pp = vm_page_lookup(object, new_offset))
- == NULL ||
- (pp->flags & PG_FAKE))) {
- /*
- * Page still needed.
- * Can't go any further.
- */
- vm_object_unlock(backing_object);
- return;
- }
- }
-
- /*
- * Make the parent shadow the next object
- * in the chain. Deallocating backing_object
- * will not remove it, since its reference
- * count is at least 2.
- */
-
- object->shadow = backing_object->shadow;
- vm_object_reference(object->shadow);
- object->shadow_offset += backing_object->shadow_offset;
-
- /*
- * Backing object might have had a copy pointer
- * to us. If it did, clear it.
- */
- if (backing_object->copy == object) {
- backing_object->copy = NULL;
+ } else
+ if (vm_object_bypass(object) != KERN_SUCCESS) {
+ vm_object_unlock(backing_object);
+ return;
}
-
- /* Drop the reference count on backing_object.
- * Since its ref_count was at least 2, it
- * will not vanish; so we don't need to call
- * vm_object_deallocate.
- */
- backing_object->ref_count--;
- vm_object_unlock(backing_object);
-
- object_bypasses ++;
-
- }
/*
* Try again with this object's new backing object.
@@ -1421,9 +1655,10 @@ _vm_object_print(object, full, pr)
int (*pr) __P((const char *, ...));
{
register vm_page_t p;
- extern indent;
-
- register int count;
+ char *delim;
+ vm_object_t o;
+ register int count;
+ extern int indent;
if (object == NULL)
return;
@@ -1434,6 +1669,13 @@ _vm_object_print(object, full, pr)
(*pr)("pager=0x%lx+0x%lx, shadow=(0x%lx)+0x%lx\n",
(long) object->pager, (long) object->paging_offset,
(long) object->shadow, (long) object->shadow_offset);
+ (*pr)("shadowers=(");
+ delim = "";
+ for (o = object->shadowers.lh_first; o; o = o->shadowers_list.le_next) {
+ (*pr)("%s0x%x", delim, o);
+ delim = ", ";
+ };
+ (*pr)(")\n");
(*pr)("cache: next=0x%lx, prev=0x%lx\n",
(long)object->cached_list.tqe_next,
(long)object->cached_list.tqe_prev);
@@ -1460,3 +1702,46 @@ _vm_object_print(object, full, pr)
(*pr)("\n");
indent -= 2;
}
+
+/*
+ * vm_object_set_shadow:
+ *
+ * Maintain the shadow graph so that back-link consistency is
+ * always kept.
+ *
+ * Assumes both objects as well as the old shadow to be locked
+ * (unless NULL of course).
+ */
+static void
+vm_object_set_shadow(object, shadow)
+ vm_object_t object, shadow;
+{
+ vm_object_t old_shadow = object->shadow;
+
+#ifdef DEBUG
+ if (vmdebug & VMDEBUG_SHADOW)
+ printf("vm_object_set_shadow(object=0x%x, shadow=0x%x) "
+ "old_shadow=0x%x\n", object, shadow, old_shadow);
+ if (vmdebug & VMDEBUG_SHADOW_VERBOSE) {
+ vm_object_print(object, 0);
+ vm_object_print(old_shadow, 0);
+ vm_object_print(shadow, 0);
+ }
+#endif
+ if (old_shadow == shadow)
+ return;
+ if (old_shadow) {
+ LIST_REMOVE(object, shadowers_list);
+ }
+ if (shadow) {
+ LIST_INSERT_HEAD(&shadow->shadowers, object, shadowers_list);
+ }
+ object->shadow = shadow;
+#ifdef DEBUG
+ if (vmdebug & VMDEBUG_SHADOW_VERBOSE) {
+ vm_object_print(object, 0);
+ vm_object_print(old_shadow, 0);
+ vm_object_print(shadow, 0);
+ }
+#endif
+}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 3ec9af99a45..46a7939ac68 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_object.h,v 1.3 1996/08/02 00:06:02 niklas Exp $ */
/* $NetBSD: vm_object.h,v 1.16 1995/03/29 22:10:28 briggs Exp $ */
/*
@@ -98,7 +99,11 @@ struct vm_object {
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
+ LIST_HEAD(, vm_object) shadowers; /* set of shadowers */
+ LIST_ENTRY(vm_object) shadowers_list; /* link to next shadower of
+ this object's shadow */
};
+
/*
* Flags
*/
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 7bb8ac4f5b5..1d14a50ff2e 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_page.c,v 1.2 1996/03/03 17:45:35 niklas Exp $ */
+/* $OpenBSD: vm_page.c,v 1.3 1996/08/02 00:06:03 niklas Exp $ */
/* $NetBSD: vm_page.c,v 1.28 1996/02/05 01:54:05 christos Exp $ */
/*
@@ -71,6 +71,7 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/proc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index d4bdd132440..584d7365f58 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_page.h,v 1.2 1996/08/02 00:06:03 niklas Exp $ */
/* $NetBSD: vm_page.h,v 1.18 1995/03/26 20:39:13 jtc Exp $ */
/*
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index f6abe2f1313..fb0c99ba52b 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pageout.c,v 1.2 1996/03/03 17:45:36 niklas Exp $ */
+/* $OpenBSD: vm_pageout.c,v 1.3 1996/08/02 00:06:04 niklas Exp $ */
/* $NetBSD: vm_pageout.c,v 1.23 1996/02/05 01:54:07 christos Exp $ */
/*
@@ -71,6 +71,7 @@
#include <sys/param.h>
#include <sys/proc.h>
+#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -276,6 +277,10 @@ vm_pageout_page(m, object)
if (object->pager == NULL)
vm_object_collapse(object);
+#ifdef DIAGNOSTIC
+ if (object->paging_in_progress == 0xdead)
+ panic("vm_pageout_page: object deallocated");
+#endif
object->paging_in_progress++;
vm_object_unlock(object);
@@ -443,6 +448,10 @@ vm_pageout_cluster(m, object)
* in case it blocks.
*/
vm_page_unlock_queues();
+#ifdef DIAGNOSTIC
+ if (object->paging_in_progress == 0xdead)
+ panic("vm_pageout_cluster: object deallocated");
+#endif
object->paging_in_progress++;
vm_object_unlock(object);
again:
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index 649f2852b7f..f07c6fbcf85 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_pageout.h,v 1.2 1996/08/02 00:06:04 niklas Exp $ */
/* $NetBSD: vm_pageout.h,v 1.11 1995/03/26 20:39:14 jtc Exp $ */
/*
diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c
index 6305c7049bf..dbf95c3e984 100644
--- a/sys/vm/vm_pager.c
+++ b/sys/vm/vm_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pager.c,v 1.3 1996/04/21 22:33:16 deraadt Exp $ */
+/* $OpenBSD: vm_pager.c,v 1.4 1996/08/02 00:06:05 niklas Exp $ */
/* $NetBSD: vm_pager.c,v 1.21 1996/03/16 23:15:25 christos Exp $ */
/*
@@ -173,11 +173,39 @@ vm_pager_deallocate(pager)
{
if (pager == NULL)
panic("vm_pager_deallocate: null pager");
-
(*pager->pg_ops->pgo_dealloc)(pager);
}
int
+vm_pager_remove(pager, from, to)
+ vm_pager_t pager;
+ vm_offset_t from, to;
+{
+ if (pager == NULL)
+ panic("vm_pager_remove: null pager");
+ return (*pager->pg_ops->pgo_remove)(pager, from, to);
+}
+
+vm_offset_t
+vm_pager_next(pager, offset)
+ vm_pager_t pager;
+ vm_offset_t offset;
+{
+ if (pager == NULL)
+ panic("vm_pager_next: null pager");
+ return (*pager->pg_ops->pgo_next)(pager, offset);
+}
+
+int
+vm_pager_count(pager)
+ vm_pager_t pager;
+{
+ if (pager == NULL)
+ panic("vm_pager_count: null pager");
+ return (*pager->pg_ops->pgo_count)(pager);
+}
+
+int
vm_pager_get_pages(pager, mlist, npages, sync)
vm_pager_t pager;
vm_page_t *mlist;
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index 49f5c0782a3..81e7a361ca2 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_pager.h,v 1.2 1996/08/02 00:06:05 niklas Exp $ */
/* $NetBSD: vm_pager.h,v 1.10 1995/03/26 20:39:15 jtc Exp $ */
/*
@@ -76,15 +77,30 @@ struct pagerops {
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
void (*pgo_dealloc) /* Disassociate. */
__P((vm_pager_t));
- int (*pgo_getpages) /* Get (read) page. */
+ int (*pgo_getpages) /* Get (read) pages. */
__P((vm_pager_t, vm_page_t *, int, boolean_t));
- int (*pgo_putpages) /* Put (write) page. */
+ int (*pgo_putpages) /* Put (write) pages. */
__P((vm_pager_t, vm_page_t *, int, boolean_t));
boolean_t (*pgo_haspage) /* Does pager have page? */
__P((vm_pager_t, vm_offset_t));
void (*pgo_cluster) /* Return range of cluster. */
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
+ /*
+ * The following are an extension to the original Mach pager
+ * interface first seen in BSD/OS 2.1 (at least as far as I am
+ * aware). As compatibility is a good thing (tm) I choose to
+ * use that interface extension instead of coming up with one
+ * of my own (the interface must be extended to make the
+ * object collapse operation work in the presense of pagers).
+ * -- Niklas Hallqvist (niklas@appli.se).
+ */
+ int (*pgo_remove) /* Don't manage range anymore */
+ __P((vm_pager_t, vm_offset_t, vm_offset_t));
+ vm_offset_t (*pgo_next) /* Find next page in pager. */
+ __P((vm_pager_t, vm_offset_t));
+ int (*pgo_count) /* How many pages in pager? */
+ __P((vm_pager_t));
};
/*
@@ -106,34 +122,38 @@ struct pagerops {
#ifdef _KERNEL
extern struct pagerops *dfltpagerops;
-vm_pager_t vm_pager_allocate
+vm_pager_t vm_pager_allocate
__P((int, caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
-vm_page_t vm_pager_atop __P((vm_offset_t));
-void vm_pager_cluster
+vm_page_t vm_pager_atop __P((vm_offset_t));
+void vm_pager_cluster
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
-void vm_pager_clusternull
+void vm_pager_clusternull
__P((vm_pager_t, vm_offset_t,
vm_offset_t *, vm_offset_t *));
-void vm_pager_deallocate __P((vm_pager_t));
-int vm_pager_get_pages
+void vm_pager_deallocate __P((vm_pager_t));
+int vm_pager_remove
+ __P((vm_pager_t, vm_offset_t, vm_offset_t));
+int vm_pager_count __P((vm_pager_t));
+vm_offset_t vm_pager_next __P((vm_pager_t, vm_offset_t));
+int vm_pager_get_pages
__P((vm_pager_t, vm_page_t *, int, boolean_t));
-boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));
-void vm_pager_init __P((void));
-vm_pager_t vm_pager_lookup __P((struct pagerlst *, caddr_t));
-vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));
-int vm_pager_put_pages
+boolean_t vm_pager_has_page __P((vm_pager_t, vm_offset_t));
+void vm_pager_init __P((void));
+vm_pager_t vm_pager_lookup __P((struct pagerlst *, caddr_t));
+vm_offset_t vm_pager_map_pages __P((vm_page_t *, int, boolean_t));
+int vm_pager_put_pages
__P((vm_pager_t, vm_page_t *, int, boolean_t));
-void vm_pager_sync __P((void));
-void vm_pager_unmap_pages __P((vm_offset_t, int));
+void vm_pager_sync __P((void));
+void vm_pager_unmap_pages __P((vm_offset_t, int));
#define vm_pager_cancluster(p, b) ((p)->pg_flags & (b))
/*
* XXX compat with old interface
*/
-int vm_pager_get __P((vm_pager_t, vm_page_t, boolean_t));
-int vm_pager_put __P((vm_pager_t, vm_page_t, boolean_t));
+int vm_pager_get __P((vm_pager_t, vm_page_t, boolean_t));
+int vm_pager_put __P((vm_pager_t, vm_page_t, boolean_t));
#endif
#endif /* _VM_PAGER_ */
diff --git a/sys/vm/vm_prot.h b/sys/vm/vm_prot.h
index 60734f1a618..3fc95e54a2c 100644
--- a/sys/vm/vm_prot.h
+++ b/sys/vm/vm_prot.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vm_prot.h,v 1.2 1996/08/02 00:06:05 niklas Exp $ */
/* $NetBSD: vm_prot.h,v 1.6 1994/06/29 06:48:42 cgd Exp $ */
/*
diff --git a/sys/vm/vnode_pager.h b/sys/vm/vnode_pager.h
index 4f903f4db82..871d8e3a283 100644
--- a/sys/vm/vnode_pager.h
+++ b/sys/vm/vnode_pager.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: vnode_pager.h,v 1.2 1996/08/02 00:06:06 niklas Exp $ */
/* $NetBSD: vnode_pager.h,v 1.6 1994/06/29 06:48:49 cgd Exp $ */
/*