summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
committerNiklas Hallqvist <niklas@cvs.openbsd.org>1998-03-01 00:38:26 +0000
commitb92b419a6a8ef401c8a4e022115bf3e18426eea0 (patch)
treeff214e6b334202d15c2b303427a2a5d2f16af4f0
parent4f215f167e35940141001f7f31bfce350266d153 (diff)
Merge of MACHINE_NEW_CONTIG (aka MNN) code from Chuck Cranor,
<chuck@openbsd.org>. This code is as of yet disabled on all platforms, actually not yet supported on more than mvme68k, although other platforms are expected soon, as code is already available. This code makes handling of multiple physical memory regions consistent over all platforms, as well as keeping the performance of maintaining a single continuous memory chunk. It is also a requirement for the upcoming UVM replacement VM system. What I did in this merge: just declared the pmap_map function in a MD include file per port that needs it. It's not an exported pmap interface, says Chuck. It ended up in differnt include files on differnet ports, as I tried to follow the current policy on a per-arch basis.
-rw-r--r--sys/arch/alpha/alpha/pmap.old.c4
-rw-r--r--sys/arch/amiga/include/cpu.h3
-rw-r--r--sys/arch/arc/arc/pmap.c14
-rw-r--r--sys/arch/arc/arc/vm_machdep.c6
-rw-r--r--sys/arch/arm32/arm32/machdep.c4
-rw-r--r--sys/arch/atari/include/cpu.h11
-rw-r--r--sys/arch/hp300/include/cpu.h7
-rw-r--r--sys/arch/i386/include/cpu.h6
-rw-r--r--sys/arch/kbus/include/cpu.h4
-rw-r--r--sys/arch/mac68k/include/cpu.h5
-rw-r--r--sys/arch/mvme68k/include/pmap.h3
-rw-r--r--sys/arch/mvme68k/include/vmparam.h28
-rw-r--r--sys/arch/mvme68k/mvme68k/locore.s5
-rw-r--r--sys/arch/mvme68k/mvme68k/machdep.c22
-rw-r--r--sys/arch/mvme68k/mvme68k/pmap.c217
-rw-r--r--sys/arch/pc532/include/cpu.h11
-rw-r--r--sys/arch/sun3/include/machdep.h4
-rw-r--r--sys/arch/vax/include/pmap.h3
-rw-r--r--sys/arch/wgrisc/wgrisc/pmap.c14
-rw-r--r--sys/conf/files3
-rw-r--r--sys/kern/init_main.c5
-rw-r--r--sys/kern/kern_fthread.c167
-rw-r--r--sys/sys/malloc.h6
-rw-r--r--sys/vm/pglist.h8
-rw-r--r--sys/vm/pmap.h80
-rw-r--r--sys/vm/swap_pager.c4
-rw-r--r--sys/vm/vm.h6
-rw-r--r--sys/vm/vm_extern.h31
-rw-r--r--sys/vm/vm_fault.c18
-rw-r--r--sys/vm/vm_glue.c79
-rw-r--r--sys/vm/vm_init.c18
-rw-r--r--sys/vm/vm_kern.c8
-rw-r--r--sys/vm/vm_kern.h13
-rw-r--r--sys/vm/vm_map.c39
-rw-r--r--sys/vm/vm_map.h41
-rw-r--r--sys/vm/vm_meter.c4
-rw-r--r--sys/vm/vm_object.c10
-rw-r--r--sys/vm/vm_object.h8
-rw-r--r--sys/vm/vm_page.c1279
-rw-r--r--sys/vm/vm_page.h464
-rw-r--r--sys/vm/vm_pageout.c47
-rw-r--r--sys/vm/vm_pageout.h16
-rw-r--r--sys/vm/vm_pager.h31
-rw-r--r--sys/vm/vm_param.h28
44 files changed, 2123 insertions, 661 deletions
diff --git a/sys/arch/alpha/alpha/pmap.old.c b/sys/arch/alpha/alpha/pmap.old.c
index 239297c18fe..fbba3be8acc 100644
--- a/sys/arch/alpha/alpha/pmap.old.c
+++ b/sys/arch/alpha/alpha/pmap.old.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.old.c,v 1.2 1997/01/24 19:56:41 niklas Exp $ */
+/* $OpenBSD: pmap.old.c,v 1.3 1998/03/01 00:37:20 niklas Exp $ */
/* $NetBSD: pmap.old.c,v 1.14 1996/11/13 21:13:10 cgd Exp $ */
/*
@@ -542,6 +542,7 @@ pmap_init(phys_start, phys_end)
pmap_initialized = TRUE;
}
+#if 0
/*
* Used to map a range of physical addresses into kernel
* virtual address space.
@@ -567,6 +568,7 @@ pmap_map(virt, start, end, prot)
}
return(virt);
}
+#endif
/*
* Create and return a physical map.
diff --git a/sys/arch/amiga/include/cpu.h b/sys/arch/amiga/include/cpu.h
index 25b9113c9eb..c44d8bcd522 100644
--- a/sys/arch/amiga/include/cpu.h
+++ b/sys/arch/amiga/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.11 1997/09/19 17:16:18 niklas Exp $ */
+/* $OpenBSD: cpu.h,v 1.12 1998/03/01 00:37:22 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.36 1996/09/11 00:11:42 thorpej Exp $ */
/*
@@ -251,6 +251,7 @@ void setconf __P((void));
* Prototypes from pmap.c:
*/
void pmap_bootstrap __P((vm_offset_t, vm_offset_t));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
#endif /* _KERNEL */
diff --git a/sys/arch/arc/arc/pmap.c b/sys/arch/arc/arc/pmap.c
index c83071a9350..139ca419e5d 100644
--- a/sys/arch/arc/arc/pmap.c
+++ b/sys/arch/arc/arc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.11 1997/08/01 23:33:05 deraadt Exp $ */
+/* $OpenBSD: pmap.c,v 1.12 1998/03/01 00:37:24 niklas Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 8.4 (Berkeley) 1/26/94
- * $Id: pmap.c,v 1.11 1997/08/01 23:33:05 deraadt Exp $
+ * $Id: pmap.c,v 1.12 1998/03/01 00:37:24 niklas Exp $
*/
/*
@@ -487,8 +487,9 @@ pmap_pinit(pmap)
do {
mem = vm_page_alloc1();
if (mem == NULL) {
- VM_WAIT; /* XXX What else can we do */
- } /* XXX Deadlock situations? */
+ /* XXX What else can we do? Deadlocks? */
+ vm_wait("ppinit");
+ }
} while (mem == NULL);
/* Do zero via cached if No L2 or Snooping L2 */
@@ -1213,8 +1214,9 @@ pmap_enter(pmap, va, pa, prot, wired)
do {
mem = vm_page_alloc1();
if (mem == NULL) {
- VM_WAIT; /* XXX What else can we do */
- } /* XXX Deadlock situations? */
+ /* XXX What else can we do? Deadlocks? */
+ vm_wait("penter");
+ }
} while (mem == NULL);
/* Do zero via cached if No L2 or Snooping L2 */
diff --git a/sys/arch/arc/arc/vm_machdep.c b/sys/arch/arc/arc/vm_machdep.c
index 247d27630a7..b3db107794a 100644
--- a/sys/arch/arc/arc/vm_machdep.c
+++ b/sys/arch/arc/arc/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.7 1998/01/28 13:46:00 pefo Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.8 1998/03/01 00:37:25 niklas Exp $ */
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1992, 1993
@@ -39,7 +39,7 @@
* from: Utah Hdr: vm_machdep.c 1.21 91/04/06
*
* from: @(#)vm_machdep.c 8.3 (Berkeley) 1/4/94
- * $Id: vm_machdep.c,v 1.7 1998/01/28 13:46:00 pefo Exp $
+ * $Id: vm_machdep.c,v 1.8 1998/03/01 00:37:25 niklas Exp $
*/
#include <sys/param.h>
@@ -509,7 +509,7 @@ kmem_alloc_wait_align(map, size, align)
}
assert_wait(map, TRUE);
vm_map_unlock(map);
- thread_block();
+ thread_block("kmawa");
}
vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
vm_map_unlock(map);
diff --git a/sys/arch/arm32/arm32/machdep.c b/sys/arch/arm32/arm32/machdep.c
index a962a32215d..ad2be7c616d 100644
--- a/sys/arch/arm32/arm32/machdep.c
+++ b/sys/arch/arm32/arm32/machdep.c
@@ -1,4 +1,5 @@
-/* $NetBSD: machdep.c,v 1.6 1996/03/13 21:32:39 mark Exp $ */
+/* $OpenBSD: machdep.c,v 1.3 1998/03/01 00:37:27 niklas Exp $ */
+/* $NetBSD: machdep.c,v 1.6 1996/03/13 21:32:39 mark Exp $ */
/*
* Copyright (c) 1994-1996 Mark Brinicombe.
@@ -215,7 +216,6 @@ void map_pagetable __P((vm_offset_t, vm_offset_t, vm_offset_t));
void map_entry __P((vm_offset_t, vm_offset_t va, vm_offset_t));
void map_entry_ro __P((vm_offset_t, vm_offset_t, vm_offset_t));
-void pmap_bootstrap __P((vm_offset_t /*kernel_l1pt*/));
void process_kernel_args __P((void));
u_long strtoul __P((const char */*s*/, char **/*ptr*/, int /*base*/));
caddr_t allocsys __P((caddr_t /*v*/));
diff --git a/sys/arch/atari/include/cpu.h b/sys/arch/atari/include/cpu.h
index 2efe4402c86..b887861929f 100644
--- a/sys/arch/atari/include/cpu.h
+++ b/sys/arch/atari/include/cpu.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: cpu.h,v 1.4 1998/03/01 00:37:29 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.10 1996/01/19 13:46:56 leo Exp $ */
/*
@@ -200,4 +201,14 @@ extern int machineid, mmutype, cpu040, fputype;
{ "console_device", CTLTYPE_STRUCT }, \
}
+#ifdef _KERNEL
+
+/*
+ * Prototypes from pmap.c:
+ */
+void pmap_bootstrap __P((vm_offset_t));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+
+#endif /* _KERNEL */
+
#endif /* !_MACHINE_CPU_H_ */
diff --git a/sys/arch/hp300/include/cpu.h b/sys/arch/hp300/include/cpu.h
index ba48571cc47..56740ac457d 100644
--- a/sys/arch/hp300/include/cpu.h
+++ b/sys/arch/hp300/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.8 1997/07/06 08:02:11 downsj Exp $ */
+/* $OpenBSD: cpu.h,v 1.9 1998/03/01 00:37:31 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.25 1997/04/27 20:37:07 thorpej Exp $ */
/*
@@ -180,6 +180,9 @@ int badbaddr __P((caddr_t));
void regdump __P((struct frame *, int));
void dumpconf __P((void));
+/* pmap.c functions */
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+
/* sys_machdep.c functions */
int cachectl __P((int, caddr_t, int));
@@ -191,7 +194,7 @@ int kvtop __P((caddr_t));
/* what is this supposed to do? i.e. how is it different than startrtclock? */
#define enablertclock()
-#endif
+#endif /* _KERNEL */
/* physical memory sections */
#define ROMBASE (0x00000000)
diff --git a/sys/arch/i386/include/cpu.h b/sys/arch/i386/include/cpu.h
index fed648fadf4..69c9518a90f 100644
--- a/sys/arch/i386/include/cpu.h
+++ b/sys/arch/i386/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.20 1998/02/17 23:49:31 matthieu Exp $ */
+/* $OpenBSD: cpu.h,v 1.21 1998/03/01 00:37:33 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.35 1996/05/05 19:29:26 christos Exp $ */
/*-
@@ -209,6 +209,10 @@ int i386_set_ldt __P((struct proc *, char *, register_t *));
void isa_defaultirq __P((void));
int isa_nmi __P((void));
+/* pmap.c */
+void pmap_bootstrap __P((vm_offset_t));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+
/* vm_machdep.c */
int kvtop __P((caddr_t));
diff --git a/sys/arch/kbus/include/cpu.h b/sys/arch/kbus/include/cpu.h
index a11c7c54633..b1de5bf7b5a 100644
--- a/sys/arch/kbus/include/cpu.h
+++ b/sys/arch/kbus/include/cpu.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: cpu.h,v 1.2 1998/03/01 00:37:34 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.12 1995/06/28 02:55:56 cgd Exp $ */
/*-
@@ -238,6 +239,9 @@ void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
void kgdb_connect __P((int));
void kgdb_panic __P((void));
#endif
+/* pmap.c */
+void pmap_bootstrap __P((vm_offset_t));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
/* vm_machdep.c */
void cpu_set_kpc __P((struct proc *, void (*)(struct proc *)));
/* iommu.c */
diff --git a/sys/arch/mac68k/include/cpu.h b/sys/arch/mac68k/include/cpu.h
index 9dfb8e7b094..a467e5f16d4 100644
--- a/sys/arch/mac68k/include/cpu.h
+++ b/sys/arch/mac68k/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.17 1997/11/30 06:12:24 gene Exp $ */
+/* $OpenBSD: cpu.h,v 1.18 1998/03/01 00:37:36 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.45 1997/02/10 22:13:40 scottr Exp $ */
/*
@@ -199,6 +199,9 @@ int suline __P((caddr_t, caddr_t));
void savectx __P((struct pcb *));
void proc_trampoline __P((void));
+/* pmap.c */
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+
/* trap.c */
void child_return __P((struct proc *, struct frame));
diff --git a/sys/arch/mvme68k/include/pmap.h b/sys/arch/mvme68k/include/pmap.h
index 905b3c577e8..d3f7a6ae388 100644
--- a/sys/arch/mvme68k/include/pmap.h
+++ b/sys/arch/mvme68k/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.4 1997/03/31 00:24:05 downsj Exp $ */
+/* $OpenBSD: pmap.h,v 1.5 1998/03/01 00:37:38 niklas Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -146,7 +146,6 @@ extern struct pmap kernel_pmap_store;
extern struct pv_entry *pv_table; /* array of entries, one per page */
#define pmap_page_index(pa) atop(pa - vm_first_phys)
-#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)])
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
diff --git a/sys/arch/mvme68k/include/vmparam.h b/sys/arch/mvme68k/include/vmparam.h
index 82ca7b4d107..c921a36633d 100644
--- a/sys/arch/mvme68k/include/vmparam.h
+++ b/sys/arch/mvme68k/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.3 1996/04/28 10:56:37 deraadt Exp $ */
+/* $OpenBSD: vmparam.h,v 1.4 1998/03/01 00:37:39 niklas Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -42,6 +42,9 @@
* @(#)vmparam.h 8.2 (Berkeley) 4/19/94
*/
+#ifndef _MVME68K_VMPARAM_H_
+#define _MVME68K_VMPARAM_H_
+
/*
* Machine dependent constants for MVME68K
*/
@@ -244,3 +247,26 @@
/* pcb base */
#define pcbb(p) ((u_int)(p)->p_addr)
+
+/* Use new VM page bootstrap interface. */
+#define MACHINE_NEW_NONCONTIG
+
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * Constants which control the way the VM system deals with memory segments.
+ * The hp300 only has one physical memory segment.
+ */
+#define VM_PHYSSEG_MAX 1
+#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
+#define VM_PHYSSEG_NOADD
+
+/*
+ * pmap-specific data stored in the vm_physmem[] array.
+ */
+struct pmap_physseg {
+ struct pv_entry *pvent; /* pv table for this seg */
+ char *attrs; /* page attributes for this seg */
+};
+#endif /* MACHINE_NEW_NONCONTIG */
+
+#endif /* _MVME68K_VMPARAM_H_ */
diff --git a/sys/arch/mvme68k/mvme68k/locore.s b/sys/arch/mvme68k/mvme68k/locore.s
index cfda7713a8a..85170e0e7c4 100644
--- a/sys/arch/mvme68k/mvme68k/locore.s
+++ b/sys/arch/mvme68k/mvme68k/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.17 1997/07/27 09:10:55 deraadt Exp $ */
+/* $OpenBSD: locore.s,v 1.18 1998/03/01 00:37:41 niklas Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -512,6 +512,9 @@ Lnocache0:
movl d7, _boothowto | save boothowto
/* d3-d7 now free */
+/* Final setup for call to main(). */
+ jbsr _C_LABEL(mvme68k_init)
+
/*
* Create a fake exception frame so that cpu_fork() can copy it.
* main() nevers returns; we exit to user mode from a forked process
diff --git a/sys/arch/mvme68k/mvme68k/machdep.c b/sys/arch/mvme68k/mvme68k/machdep.c
index 1d1063e6b0d..f3cc2011518 100644
--- a/sys/arch/mvme68k/mvme68k/machdep.c
+++ b/sys/arch/mvme68k/mvme68k/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.22 1997/07/23 06:58:21 denny Exp $ */
+/* $OpenBSD: machdep.c,v 1.23 1998/03/01 00:37:42 niklas Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -155,6 +155,26 @@ extern struct emul emul_hpux;
extern struct emul emul_sunos;
#endif
+void
+mvme68k_init()
+{
+#if defined(MACHINE_NEW_NONCONTIG)
+ extern vm_offset_t avail_start, avail_end;
+
+ /*
+ * Tell the VM system about available physical memory. The
+ * hp300 only has one segment.
+ */
+#if defined(UVM)
+ uvm_page_physload(atop(avail_start), atop(avail_end),
+ atop(avail_start), atop(avail_end));
+#else
+ vm_page_physload(atop(avail_start), atop(avail_end),
+ atop(avail_start), atop(avail_end));
+#endif /* UVM */
+#endif /* MACHINE_NEW_NONCONTIG */
+}
+
/*
* Console initialization: called early on from main,
* before vm init or startup. Do enough configuration
diff --git a/sys/arch/mvme68k/mvme68k/pmap.c b/sys/arch/mvme68k/mvme68k/pmap.c
index f3df1b600e8..4ca075b67fa 100644
--- a/sys/arch/mvme68k/mvme68k/pmap.c
+++ b/sys/arch/mvme68k/mvme68k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.6 1997/02/10 11:39:26 downsj Exp $ */
+/* $OpenBSD: pmap.c,v 1.7 1998/03/01 00:37:44 niklas Exp $ */
/*
* Copyright (c) 1995 Theo de Raadt
@@ -291,9 +291,7 @@ vm_offset_t avail_end; /* PA of last available physical page */
vm_size_t mem_size; /* memory size in bytes */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
-vm_offset_t vm_first_phys; /* PA of first managed page */
-vm_offset_t vm_last_phys; /* PA just past last managed page */
-int npages;
+int page_cnt; /* number of pages managed by VM system */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
struct pv_entry *pv_table;
@@ -312,6 +310,8 @@ void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
boolean_t pmap_testbit __P((vm_offset_t, int));
void pmap_changebit __P((vm_offset_t, int, boolean_t));
void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
+void pmap_collect1 __P((pmap_t, vm_offset_t, vm_offset_t));
+
#ifdef DEBUG
void pmap_pvdump __P((vm_offset_t));
void pmap_check_wiring __P((char *, vm_offset_t));
@@ -321,6 +321,37 @@ void pmap_check_wiring __P((char *, vm_offset_t));
#define PRM_TFLUSH 1
#define PRM_CFLUSH 2
+#if !defined(MACHINE_NEW_NONCONTIG)
+vm_offset_t vm_first_phys; /* PA of first managed page */
+vm_offset_t vm_last_phys; /* PA just past last managed page */
+
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ (pa) >= vm_first_phys && (pa) < vm_last_phys)
+
+#define pa_to_pvh(pa) (&pv_table[pmap_page_index((pa))])
+#define pa_to_attribute(pa) (&pmap_attributes[pmap_page_index((pa))])
+#else
+#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
+ vm_physseg_find(atop((pa)), NULL) != -1)
+
+#define pa_to_pvh(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.pvent[pg_]; \
+})
+
+#define pa_to_attribute(pa) \
+({ \
+ int bank_, pg_; \
+ \
+ bank_ = vm_physseg_find(atop((pa)), &pg_); \
+ &vm_physmem[bank_].pmseg.attrs[pg_]; \
+})
+#endif /* MACHINE_NEW_NONCONTIG */
+
+#if !defined(MACHINE_NEW_NONCONTIG)
/*
* Bootstrap memory allocator. This function allows for early dynamic
* memory allocation until the virtual memory system has been bootstrapped.
@@ -351,27 +382,66 @@ pmap_bootstrap_alloc(size)
bzero((void *)val, size);
return ((void *) val);
}
+#endif /* ! MACHINE_NEW_NONCONTIG */
+#if defined(MACHINE_NEW_NONCONTIG)
/*
- * Initialize the pmap module.
- * Called by vm_init, to initialize any structures that the pmap
- * system needs to map virtual memory.
+ * Routine: pmap_virtual_space
+ *
+ * Function:
+ * Report the range of available kernel virtual address
+ * space to the VM system during bootstrap. Called by
+ * vm_bootstrap_steal_memory().
+ */
+void
+pmap_virtual_space(vstartp, vendp)
+ vm_offset_t *vstartp, *vendp;
+{
+
+ *vstartp = virtual_avail;
+ *vendp = virtual_end;
+}
+#endif /* MACHINE_NEW_NONCONTIG */
+
+/*
+ * Routine: pmap_init
+ *
+ * Function:
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+void
+pmap_init()
+#else
void
pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
+#endif
{
vm_offset_t addr, addr2;
vm_size_t s;
int rv;
+ int npages;
+#if defined(MACHINE_NEW_NONCONTIG)
+ struct pv_entry *pv;
+ char *attr;
+ int bank;
+#endif
+
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
+#if defined(MACHINE_NEW_NONCONTIG)
+ printf("pmap_init()\n");
+#else
printf("pmap_init(%x, %x)\n", phys_start, phys_end);
#endif
+#endif
/*
* Now that kernel map has been allocated, we can mark as
- * unavailable regions which we have mapped in locore.
+ * unavailable regions which we have mapped in pmap_bootstrap().
*/
addr = (vm_offset_t) intiobase;
(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
@@ -385,7 +455,7 @@ pmap_init(phys_start, phys_end)
/*
* If this fails it is probably because the static portion of
* the kernel page table isn't big enough and we overran the
- * page table map. Need to adjust pmap_size() in m68k_init.c.
+ * page table map.
*/
if (addr != (vm_offset_t)Sysmap)
bogons:
@@ -404,23 +474,51 @@ bogons:
* Allocate memory for random pmap data structures. Includes the
* initial segment table, pv_head_table and pmap_attributes.
*/
- npages = atop(phys_end - phys_start);
- s = (vm_size_t) (M68K_STSIZE + sizeof(struct pv_entry) * npages + npages);
+#if defined(MACHINE_NEW_NONCONTIG)
+ for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
+ page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
+#else
+ page_cnt = atop(phys_end - phys_start);
+#endif
+ s = M68K_STSIZE; /* Segtabzero */
+ s += page_cnt * sizeof(struct pv_entry); /* pv table */
+ s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
+
Segtabzero = (st_entry_t *) addr;
Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr);
addr += M68K_STSIZE;
+
pv_table = (struct pv_entry *) addr;
- addr += sizeof(struct pv_entry) * npages;
+ addr += page_cnt * sizeof(struct pv_entry);
+
pmap_attributes = (char *) addr;
+
#ifdef DEBUG
if (pmapdebug & PDB_INIT)
- printf("pmap_init: %x bytes: npages %x s0 %x(%x) tbl %x atr %x\n",
- s, npages, Segtabzero, Segtabzeropa,
- pv_table, pmap_attributes);
+ printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
+ "tbl %p atr %p\n",
+ s, page_cnt, Segtabzero, Segtabzeropa,
+ pv_table, pmap_attributes);
#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ /*
+ * Now that the pv and attribute tables have been allocated,
+ * assign them to the memory segments.
+ */
+ pv = pv_table;
+ attr = pmap_attributes;
+ for (bank = 0; bank < vm_nphysseg; bank++) {
+ npages = vm_physmem[bank].end - vm_physmem[bank].start;
+ vm_physmem[bank].pmseg.pvent = pv;
+ vm_physmem[bank].pmseg.attrs = attr;
+ pv += npages;
+ attr += npages;
+ }
+#endif
+
/*
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
@@ -512,8 +610,10 @@ bogons:
/*
* Now it is safe to enable pv_table recording.
*/
+#if !defined(MACHINE_NEW_NONCONTIG)
vm_first_phys = phys_start;
vm_last_phys = phys_end;
+#endif
pmap_initialized = TRUE;
}
@@ -600,7 +700,7 @@ pmap_collect_pv()
if (pv_page_collectlist.tqh_first == 0)
return;
- for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
+ for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
if (ph->pv_pmap == 0)
continue;
s = splimp();
@@ -912,7 +1012,7 @@ pmap_page_protect(pa, prot)
prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
printf("pmap_page_protect(%x, %x)\n", pa, prot);
#endif
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
switch (prot) {
@@ -1175,7 +1275,7 @@ pmap_enter(pmap, va, pa, prot, wired)
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
*/
- if (pa >= vm_first_phys && pa < vm_last_phys) {
+ if (PAGE_IS_MANAGED(pa)) {
register struct pv_entry *pv, *npv;
int s;
@@ -1433,29 +1533,61 @@ void
pmap_collect(pmap)
pmap_t pmap;
{
- register vm_offset_t pa;
- register struct pv_entry *pv;
- register pt_entry_t *pte;
- vm_offset_t kpa;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int bank, s;
+#else
int s;
+#endif /* MACHINE_NEW_NONCONTIG */
-#ifdef DEBUG
- st_entry_t *ste;
- int opmapdebug;
-#endif
if (pmap != pmap_kernel())
return;
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
- printf("pmap_collect(%x)\n", pmap);
+ printf("pmap_collect(%p)\n", pmap);
#endif
#ifdef PMAPSTATS
kpt_stats.collectscans++;
#endif
s = splimp();
- for (pa = vm_first_phys; pa < vm_last_phys; pa += NBPG) {
- register struct kpt_page *kpt, **pkpt;
+#if defined(MACHINE_NEW_NONCONTIG)
+ for (bank = 0; bank < vm_nphysseg; bank++)
+ pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
+ ptoa(vm_physmem[bank].end));
+#else
+ pmap_collect1(pmap, vm_first_phys, vm_last_phys);
+#endif /* MACHINE_NEW_NONCONTIG */
+ splx(s);
+
+#ifdef notyet
+ /* Go compact and garbage-collect the pv_table. */
+ pmap_collect_pv();
+#endif
+}
+
+/*
+ * Routine: pmap_collect1()
+ *
+ * Function:
+ * Helper function for pmap_collect(). Do the actual
+ * garbage-collection of range of physical addresses.
+ */
+void
+pmap_collect1(pmap, startpa, endpa)
+ pmap_t pmap;
+ vm_offset_t startpa, endpa;
+{
+ vm_offset_t pa;
+ struct pv_entry *pv;
+ pt_entry_t *pte;
+ vm_offset_t kpa;
+#ifdef DEBUG
+ st_entry_t *ste;
+ int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
+#endif
+
+ for (pa = startpa; pa < endpa; pa += NBPG) {
+ struct kpt_page *kpt, **pkpt;
/*
* Locate physical pages which are being used as kernel
@@ -1467,12 +1599,12 @@ pmap_collect(pmap)
do {
if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
break;
- } while (pv = pv->pv_next);
+ } while ((pv = pv->pv_next));
if (pv == NULL)
continue;
#ifdef DEBUG
if (pv->pv_va < (vm_offset_t)Sysmap ||
- pv->pv_va >= (vm_offset_t)Sysmap + M68K_MAX_PTSIZE)
+ pv->pv_va >= (vm_offset_t)Sysmap + HP_MAX_PTSIZE)
printf("collect: kernel PT VA out of range\n");
else
goto ok;
@@ -1488,7 +1620,7 @@ ok:
#ifdef DEBUG
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
- printf("collect: freeing KPT page at %x (ste %x@%x)\n",
+ printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
opmapdebug = pmapdebug;
pmapdebug |= PDB_PTPAGE;
@@ -1518,7 +1650,7 @@ ok:
if (kpt == (struct kpt_page *)0)
panic("pmap_collect: lost a KPT page");
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
- printf("collect: %x (%x) to free list\n",
+ printf("collect: %lx (%lx) to free list\n",
kpt->kpt_va, kpa);
#endif
*pkpt = kpt->kpt_next;
@@ -1533,15 +1665,14 @@ ok:
pmapdebug = opmapdebug;
if (*ste != SG_NV)
- printf("collect: kernel STE at %x still valid (%x)\n",
+ printf("collect: kernel STE at %p still valid (%x)\n",
ste, *ste);
ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
if (*ste != SG_NV)
- printf("collect: kernel PTmap at %x still valid (%x)\n",
+ printf("collect: kernel PTmap at %p still valid (%x)\n",
ste, *ste);
#endif
}
- splx(s);
}
/*
@@ -1652,7 +1783,7 @@ pmap_pageable(pmap, sva, eva, pageable)
if (!pmap_ste_v(pmap, sva))
return;
pa = pmap_pte_pa(pmap_pte(pmap, sva));
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
pv = pa_to_pvh(pa);
if (pv->pv_ptste == NULL)
@@ -1880,7 +2011,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
/*
* If this isn't a managed page, we are all done.
*/
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
/*
* Otherwise remove it from the PV table
@@ -2013,7 +2144,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
/*
* Update saved attributes for managed page
*/
- pmap_attributes[pmap_page_index(pa)] |= bits;
+ *pa_to_attribute(pa) |= bits;
splx(s);
}
@@ -2027,7 +2158,7 @@ pmap_testbit(pa, bit)
register pt_entry_t *pte;
int s;
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return(FALSE);
pv = pa_to_pvh(pa);
@@ -2035,7 +2166,7 @@ pmap_testbit(pa, bit)
/*
* Check saved info first
*/
- if (pmap_attributes[pmap_page_index(pa)] & bit) {
+ if (*pa_to_attribute(pa) & bit) {
splx(s);
return(TRUE);
}
@@ -2077,7 +2208,7 @@ pmap_changebit(pa, bit, setem)
printf("pmap_changebit(%x, %x, %s)\n",
pa, bit, setem ? "set" : "clear");
#endif
- if (pa < vm_first_phys || pa >= vm_last_phys)
+ if (PAGE_IS_MANAGED(pa) == 0)
return;
#ifdef PMAPSTATS
@@ -2093,7 +2224,7 @@ pmap_changebit(pa, bit, setem)
* Clear saved attributes (modify, reference)
*/
if (!setem)
- pmap_attributes[pmap_page_index(pa)] &= ~bit;
+ *pa_to_attribute(pa) &= ~bit;
/*
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?
diff --git a/sys/arch/pc532/include/cpu.h b/sys/arch/pc532/include/cpu.h
index 454bca5c00e..1c4199407bd 100644
--- a/sys/arch/pc532/include/cpu.h
+++ b/sys/arch/pc532/include/cpu.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: cpu.h,v 1.2 1998/03/01 00:37:46 niklas Exp $ */
/* $NetBSD: cpu.h,v 1.12 1995/06/28 02:55:56 cgd Exp $ */
/*-
@@ -116,4 +117,14 @@ int want_resched; /* resched() was called */
{ "console_device", CTLTYPE_STRUCT }, \
}
+#ifdef _KERNEL
+
+/*
+ * Prototypes from pmap.c:
+ */
+void pmap_bootstrap __P((vm_offset_t, vm_offset_t));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+
+#endif /* _KERNEL */
+
#endif
diff --git a/sys/arch/sun3/include/machdep.h b/sys/arch/sun3/include/machdep.h
index 40798fad64a..228e049aac5 100644
--- a/sys/arch/sun3/include/machdep.h
+++ b/sys/arch/sun3/include/machdep.h
@@ -1,4 +1,5 @@
-/* $OpenBSD: machdep.h,v 1.7 1997/04/05 20:22:02 kstailey Exp $ */
+/* $OpenBSD: machdep.h,v 1.8 1998/03/01 00:37:48 niklas Exp $ */
+
/*
* Copyright (c) 1994 Gordon W. Ross
* Copyright (c) 1993 Adam Glass
@@ -124,6 +125,7 @@ void proc_do_uret __P((void));
void proc_trampoline __P((void));
void pmap_bootstrap __P((void));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
int pmap_fault_reload __P((struct pmap *, vm_offset_t, int));
void pmap_get_ksegmap __P((u_char *));
void pmap_get_pagemap __P((int *pt, int off));
diff --git a/sys/arch/vax/include/pmap.h b/sys/arch/vax/include/pmap.h
index 16daa47f1e4..0f6bfc040a3 100644
--- a/sys/arch/vax/include/pmap.h
+++ b/sys/arch/vax/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.6 1997/09/12 09:21:22 maja Exp $ */
+/* $OpenBSD: pmap.h,v 1.7 1998/03/01 00:37:49 niklas Exp $ */
/* $NetBSD: pmap.h,v 1.19 1997/07/06 22:38:29 ragge Exp $ */
/*
@@ -121,6 +121,7 @@ extern struct pmap kernel_pmap_store;
/* Prototypes */
void pmap_bootstrap __P((void));
+vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
void pmap_expandp0 __P((struct pmap *, int));
void pmap_expandp1 __P((struct pmap *));
#endif PMAP_H
diff --git a/sys/arch/wgrisc/wgrisc/pmap.c b/sys/arch/wgrisc/wgrisc/pmap.c
index 0abc9c6bdaf..7adde022f35 100644
--- a/sys/arch/wgrisc/wgrisc/pmap.c
+++ b/sys/arch/wgrisc/wgrisc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.1 1997/02/06 16:02:46 pefo Exp $ */
+/* $OpenBSD: pmap.c,v 1.2 1998/03/01 00:37:51 niklas Exp $ */
/*
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
@@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 8.4 (Berkeley) 1/26/94
- * $Id: pmap.c,v 1.1 1997/02/06 16:02:46 pefo Exp $
+ * $Id: pmap.c,v 1.2 1998/03/01 00:37:51 niklas Exp $
*/
/*
@@ -474,8 +474,9 @@ pmap_pinit(pmap)
do {
mem = vm_page_alloc1();
if (mem == NULL) {
- VM_WAIT; /* XXX What else can we do */
- } /* XXX Deadlock situations? */
+ /* XXX What else can we do? Deadlocks? */
+ vm_wait("ppinit");
+ }
} while (mem == NULL);
pmap_zero_page(VM_PAGE_TO_PHYS(mem));
@@ -1216,8 +1217,9 @@ pmap_enter(pmap, va, pa, prot, wired)
do {
mem = vm_page_alloc1();
if (mem == NULL) {
- VM_WAIT; /* XXX What else can we do */
- } /* XXX Deadlock situations? */
+ /* XXX What else can we do? Deadlocks? */
+ vm_wait("penter");
+ }
} while (mem == NULL);
pmap_zero_page(VM_PAGE_TO_PHYS(mem));
diff --git a/sys/conf/files b/sys/conf/files
index ba6d5d2bc14..5ba2ffa5307 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1,4 +1,4 @@
-# $OpenBSD: files,v 1.74 1998/01/28 14:19:39 niklas Exp $
+# $OpenBSD: files,v 1.75 1998/03/01 00:37:53 niklas Exp $
# $NetBSD: files,v 1.87 1996/05/19 17:17:50 jonathan Exp $
# @(#)files.newconf 7.5 (Berkeley) 5/10/93
@@ -155,6 +155,7 @@ file kern/kern_descrip.c
file kern/kern_exec.c
file kern/kern_exit.c
file kern/kern_fork.c
+file kern/kern_fthread.c
file kern/kern_ktrace.c ktrace
file kern/kern_lock.c
file kern/kern_lkm.c lkm
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 97a32834ff0..665be14f636 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: init_main.c,v 1.30 1998/02/20 13:41:33 niklas Exp $ */
+/* $OpenBSD: init_main.c,v 1.31 1998/03/01 00:37:54 niklas Exp $ */
/* $NetBSD: init_main.c,v 1.84.4.1 1996/06/02 09:08:06 mrg Exp $ */
/*
@@ -196,6 +196,9 @@ main(framep)
vm_mem_init();
kmeminit();
+#if defined(MACHINE_NEW_NONCONTIG)
+ vm_page_physrehash();
+#endif
disk_init(); /* must come before autoconfiguration */
tty_init(); /* initialise tty's */
cpu_startup();
diff --git a/sys/kern/kern_fthread.c b/sys/kern/kern_fthread.c
new file mode 100644
index 00000000000..ae02fa972ea
--- /dev/null
+++ b/sys/kern/kern_fthread.c
@@ -0,0 +1,167 @@
+/* $OpenBSD: kern_fthread.c,v 1.1 1998/03/01 00:37:56 niklas Exp $ */
+/* $NetBSD: kern_fthread.c,v 1.3 1998/02/07 16:23:35 chs Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * The Mach Operating System project at Carnegie-Mellon University.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_lock.c 8.1 (Berkeley) 6/11/93
+ *
+ *
+ * Copyright (c) 1987, 1990 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Authors: Avadis Tevanian, Jr., Michael Wayne Young
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Locking primitives implementation
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+
+/*
+ * these routines fake thread handling
+ */
+
+#if !defined(UVM)
+
+void
+assert_wait(event, ruptible)
+ void *event;
+ boolean_t ruptible;
+{
+#ifdef lint
+ ruptible++;
+#endif
+ curproc->p_thread = event;
+}
+
+void
+thread_block(msg)
+char *msg;
+{
+ int s = splhigh();
+
+ if (curproc->p_thread)
+ tsleep(curproc->p_thread, PVM, msg, 0);
+ splx(s);
+}
+
+#endif
+
+void
+thread_sleep_msg(event, lock, ruptible, msg, timo)
+ void *event;
+ simple_lock_t lock;
+ boolean_t ruptible;
+ char *msg;
+ int timo;
+{
+ int s = splhigh();
+
+#ifdef lint
+ ruptible++;
+#endif
+ curproc->p_thread = event;
+ simple_unlock(lock);
+ if (curproc->p_thread)
+ tsleep(event, PVM, msg, timo);
+ splx(s);
+}
+
+/*
+ * DEBUG stuff
+ */
+
+int indent = 0;
+
+/*
+ * Note that stdarg.h and the ANSI style va_start macro is used for both
+ * ANSI and traditional C compilers. (Same as subr_prf.c does.)
+ * XXX: This requires that stdarg.h defines: va_alist, va_dcl
+ */
+#include <machine/stdarg.h>
+
+/*ARGSUSED2*/
+void
+#ifdef __STDC__
+iprintf(int (*pr)(const char *, ...), const char *fmt, ...)
+#else
+iprintf(pr, fmt, va_alist)
+ void (*pr)();
+ const char *fmt;
+ va_dcl
+#endif
+{
+ register int i;
+ va_list ap;
+
+ va_start(ap, fmt);
+ for (i = indent; i >= 8; i -= 8)
+ (*pr)("\t");
+ while (--i >= 0)
+ (*pr)(" ");
+#ifdef __powerpc__ /* XXX */
+ if (pr != printf) /* XXX */
+ panic("iprintf"); /* XXX */
+ vprintf(fmt, ap); /* XXX */
+#else /* XXX */
+ (*pr)("%:", fmt, ap); /* XXX */
+#endif /* __powerpc__ */ /* XXX */
+ va_end(ap);
+}
diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h
index a7b5558c54f..2aaf3150152 100644
--- a/sys/sys/malloc.h
+++ b/sys/sys/malloc.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: malloc.h,v 1.13 1997/11/06 05:59:08 csapuntz Exp $ */
+/* $OpenBSD: malloc.h,v 1.14 1998/03/01 00:37:56 niklas Exp $ */
/* $NetBSD: malloc.h,v 1.23 1996/04/05 04:52:52 mhitch Exp $ */
/*
@@ -144,6 +144,8 @@
#define M_MKDIR 89 /* New directory */
#define M_DIRREM 90 /* Directory entry deleted */
+#define M_VMPBUCKET 91 /* VM page buckets */
+
#define M_TEMP 127 /* misc temporary data buffers */
#define M_LAST 128 /* Must be last type + 1 */
@@ -240,6 +242,7 @@
"diradd", /* 88 M_DIRADD */ \
"mkdir", /* 89 M_MKDIR */ \
"dirrem", /* 90 M_DIRREM */ \
+ "VM page bucket", /* 91 M_VMPBUCKET */ \
NULL, NULL, NULL, NULL, NULL, \
NULL, NULL, NULL, NULL, NULL, \
NULL, NULL, NULL, NULL, NULL, \
@@ -247,7 +250,6 @@
NULL, NULL, NULL, NULL, NULL, \
NULL, NULL, NULL, NULL, NULL, \
NULL, NULL, NULL, NULL, NULL, \
- NULL, \
"temp", /* 127 M_TEMP */ \
}
diff --git a/sys/vm/pglist.h b/sys/vm/pglist.h
new file mode 100644
index 00000000000..2e17c434f92
--- /dev/null
+++ b/sys/vm/pglist.h
@@ -0,0 +1,8 @@
+/* $OpenBSD: pglist.h,v 1.1 1998/03/01 00:37:58 niklas Exp $ */
+
+#ifndef _PGLIST_H_
+#define _PGLIST_H_
+
+TAILQ_HEAD(pglist, vm_page);
+
+#endif
diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h
index 07053e3047a..8edd35b77a8 100644
--- a/sys/vm/pmap.h
+++ b/sys/vm/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.4 1996/08/02 00:05:56 niklas Exp $ */
+/* $OpenBSD: pmap.h,v 1.5 1998/03/01 00:37:58 niklas Exp $ */
/* $NetBSD: pmap.h,v 1.16 1996/03/31 22:15:32 pk Exp $ */
/*
@@ -87,6 +87,26 @@ typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
+/*
+ * PMAP_PGARG hack
+ *
+ * operations that take place on managed pages used to take PAs.
+ * this caused us to translate the PA back to a page (or pv_head).
+ * PMAP_NEW avoids this by passing the vm_page in (pv_head should be
+ * pointed to by vm_page (or be a part of it)).
+ *
+ * applies to: pmap_page_protect, pmap_is_referenced, pmap_is_modified,
+ * pmap_clear_reference, pmap_clear_modify.
+ *
+ * the latter two functions are boolean_t in PMAP_NEW. they return
+ * TRUE if something was cleared.
+ */
+#if defined(PMAP_NEW)
+#define PMAP_PGARG(PG) (PG)
+#else
+#define PMAP_PGARG(PG) (VM_PAGE_TO_PHYS(PG))
+#endif
+
#ifndef PMAP_EXCLUDE_DECLS /* Used in Sparc port to virtualize pmap mod */
#ifdef _KERNEL
__BEGIN_DECLS
@@ -96,33 +116,74 @@ void *pmap_bootstrap_alloc __P((int));
void pmap_bootstrap( /* machine dependent */ );
#endif
void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
+
+#if defined(PMAP_NEW)
+#if !defined(pmap_clear_modify)
+boolean_t pmap_clear_modify __P((struct vm_page *));
+#endif
+#if !defined(pmap_clear_reference)
+boolean_t pmap_clear_reference __P((struct vm_page *));
+#endif
+#else /* PMAP_NEW */
void pmap_clear_modify __P((vm_offset_t pa));
void pmap_clear_reference __P((vm_offset_t pa));
+#endif /* PMAP_NEW */
+
void pmap_collect __P((pmap_t));
void pmap_copy __P((pmap_t,
pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
void pmap_copy_page __P((vm_offset_t, vm_offset_t));
+#if defined(PMAP_NEW)
+struct pmap *pmap_create __P((void));
+#else
pmap_t pmap_create __P((vm_size_t));
+#endif
void pmap_destroy __P((pmap_t));
void pmap_enter __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
-#ifndef pmap_page_index
-int pmap_page_index __P((vm_offset_t));
+#if defined(PMAP_NEW) && defined(PMAP_GROWKERNEL)
+void pmap_growkernel __P((vm_offset_t));
#endif
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
void pmap_init __P((vm_offset_t, vm_offset_t));
#else
void pmap_init __P((void));
#endif
+
+#if defined(PMAP_NEW)
+void pmap_kenter_pa __P((vm_offset_t, vm_offset_t, vm_prot_t));
+void pmap_kenter_pgs __P((vm_offset_t, struct vm_page **, int));
+void pmap_kremove __P((vm_offset_t, vm_size_t));
+#if !defined(pmap_is_modified)
+boolean_t pmap_is_modified __P((struct vm_page *));
+#endif
+#if !defined(pmap_is_referenced)
+boolean_t pmap_is_referenced __P((struct vm_page *));
+#endif
+#else /* PMAP_NEW */
boolean_t pmap_is_modified __P((vm_offset_t pa));
boolean_t pmap_is_referenced __P((vm_offset_t pa));
-vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
+#endif /* PMAP_NEW */
+
+#if !defined(MACHINE_NEW_NONCONTIG)
+#ifndef pmap_page_index
+int pmap_page_index __P((vm_offset_t));
+#endif
+#endif /* ! MACHINE_NEW_NONCONTIG */
+
+#if defined(PMAP_NEW)
+void pmap_page_protect __P((struct vm_page *, vm_prot_t));
+#else
void pmap_page_protect __P((vm_offset_t, vm_prot_t));
+#endif
+
void pmap_pageable __P((pmap_t,
vm_offset_t, vm_offset_t, boolean_t));
+#if !defined(pmap_phys_address)
vm_offset_t pmap_phys_address __P((int));
+#endif
void pmap_pinit __P((pmap_t));
void pmap_protect __P((pmap_t,
vm_offset_t, vm_offset_t, vm_prot_t));
@@ -135,10 +196,15 @@ void pmap_zero_page __P((vm_offset_t));
#ifdef MACHINE_NONCONTIG
u_int pmap_free_pages __P((void));
boolean_t pmap_next_page __P((vm_offset_t *));
-void pmap_startup __P((vm_offset_t *, vm_offset_t *));
-vm_offset_t pmap_steal_memory __P((vm_size_t));
+#endif
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
+#if defined(PMAP_STEAL_MEMORY)
+vm_offset_t pmap_steal_memory __P((vm_size_t, vm_offset_t *,
+ vm_offset_t *));
+#else
void pmap_virtual_space __P((vm_offset_t *, vm_offset_t *));
#endif
+#endif
__END_DECLS
#endif /* kernel*/
#endif /* PMAP_EXCLUDE_DECLS */
diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 65ad9d9a921..8ffef6d549e 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: swap_pager.c,v 1.12 1997/12/02 16:55:51 csapuntz Exp $ */
+/* $OpenBSD: swap_pager.c,v 1.13 1998/03/01 00:38:00 niklas Exp $ */
/* $NetBSD: swap_pager.c,v 1.27 1996/03/16 23:15:20 christos Exp $ */
/*
@@ -127,6 +127,8 @@ struct swpclean swap_pager_inuse; /* list of pending page cleans */
struct swpclean swap_pager_free; /* list of free pager clean structs */
struct pagerlst swap_pager_list; /* list of "named" anon regions */
+extern struct buf bswlist; /* import from vm_swap.c */
+
static void swap_pager_init __P((void));
static vm_pager_t swap_pager_alloc
__P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 1f63279b69d..07c188f3043 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm.h,v 1.5 1997/11/06 05:59:31 csapuntz Exp $ */
+/* $OpenBSD: vm.h,v 1.6 1998/03/01 00:38:01 niklas Exp $ */
/* $NetBSD: vm.h,v 1.13 1994/06/29 06:47:52 cgd Exp $ */
/*
@@ -39,6 +39,8 @@
#ifndef VM_H
#define VM_H
+/* XXX remove this later when the simple locks are not here! */
+
typedef int vm_inherit_t; /* XXX: inheritance codes */
union vm_map_object;
@@ -62,10 +64,12 @@ typedef struct pager_struct *vm_pager_t;
/*
* MACH VM locking type mappings to kernel types
*/
+#if !defined(UVM)
typedef struct simplelock simple_lock_data_t;
typedef struct simplelock *simple_lock_t;
typedef struct lock lock_data_t;
typedef struct lock *lock_t;
+#endif
#include <sys/vmmeter.h>
#include <sys/queue.h>
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 86462654a49..d6265da7915 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_extern.h,v 1.14 1997/11/06 05:59:31 csapuntz Exp $ */
+/* $OpenBSD: vm_extern.h,v 1.15 1998/03/01 00:38:02 niklas Exp $ */
/* $NetBSD: vm_extern.h,v 1.20 1996/04/23 12:25:23 christos Exp $ */
/*-
@@ -45,7 +45,7 @@ struct mount;
struct vnode;
struct core;
-#ifdef KGDB
+#if defined(KGDB) && !defined(UVM)
void chgkprot __P((caddr_t, int, int));
#endif
@@ -65,9 +65,13 @@ int sstk __P((struct proc *, void *, int *));
#endif
void assert_wait __P((void *, boolean_t));
+#if !defined(UVM)
int grow __P((struct proc *, vm_offset_t));
+#endif
void iprintf __P((int (*)(const char *, ...), const char *, ...));
+#if !defined(UVM)
int kernacc __P((caddr_t, int, int));
+#endif
int kinfo_loadavg __P((int, char *, int *, int, int *));
int kinfo_meter __P((int, caddr_t, int *, int, int *));
vm_offset_t kmem_alloc __P((vm_map_t, vm_size_t));
@@ -80,26 +84,33 @@ vm_offset_t kmem_malloc __P((vm_map_t, vm_size_t, boolean_t));
vm_map_t kmem_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *,
vm_size_t, boolean_t));
void loadav __P((struct loadavg *));
+#if !defined(UVM)
void munmapfd __P((struct proc *, int));
+#endif
int pager_cache __P((vm_object_t, boolean_t));
void sched __P((void));
+#if !defined(UVM)
#ifdef __GNUC__
void scheduler __P((void)) __attribute ((noreturn));
#else
void scheduler __P((void));
#endif
+#endif
int svm_allocate __P((struct proc *, void *, int *));
int svm_deallocate __P((struct proc *, void *, int *));
int svm_inherit __P((struct proc *, void *, int *));
int svm_protect __P((struct proc *, void *, int *));
void swapinit __P((void));
+#if !defined(UVM)
void swapout __P((struct proc *));
void swapout_threads __P((void));
+#endif
int swfree __P((struct proc *, int));
void swstrategy __P((struct buf *));
-void thread_block __P((void));
+void thread_block __P((char *));
void thread_sleep_msg __P((void *, simple_lock_t,
- boolean_t, char *));
+ boolean_t, char *, int));
+
/* backwards compatibility */
#define thread_sleep(event, lock, ruptible) \
@@ -110,6 +121,7 @@ void thread_sleep_msg __P((void *, simple_lock_t,
* was solely a wrapper around wakeup.
*/
#define thread_wakeup wakeup
+#if !defined(UVM)
int useracc __P((caddr_t, int, int));
int vm_allocate __P((vm_map_t, vm_offset_t *, vm_size_t,
boolean_t));
@@ -118,35 +130,46 @@ int vm_allocate_with_pager __P((vm_map_t, vm_offset_t *,
int vm_coredump __P((struct proc *, struct vnode *, struct ucred *,
struct core *));
int vm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t));
+#endif
int vm_fault __P((vm_map_t, vm_offset_t, vm_prot_t, boolean_t));
void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
+#if !defined(UVM)
#ifdef __FORK_BRAINDAMAGE
int vm_fork __P((struct proc *, struct proc *));
#else
void vm_fork __P((struct proc *, struct proc *));
#endif
+#endif
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
+#if !defined(UVM)
void vm_init_limits __P((struct proc *));
+#endif
void vm_mem_init __P((void));
+#if !defined(UVM)
int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t,
vm_prot_t, vm_prot_t, int, caddr_t, vm_offset_t));
+#endif
int vm_protect __P((vm_map_t,
vm_offset_t, vm_size_t, boolean_t, vm_prot_t));
void vm_set_page_size __P((void));
void vmmeter __P((void));
+#if !defined(UVM)
struct vmspace *vmspace_alloc __P((vm_offset_t, vm_offset_t, int));
struct vmspace *vmspace_fork __P((struct vmspace *));
void vmspace_free __P((struct vmspace *));
+#endif
void vmtotal __P((struct vmtotal *));
void vnode_pager_setsize __P((struct vnode *, u_long));
void vnode_pager_umount __P((struct mount *));
boolean_t vnode_pager_uncache __P((struct vnode *));
+#if !defined(UVM)
int vslock __P((caddr_t, u_int));
int vsunlock __P((caddr_t, u_int));
+#endif
/* Machine dependent portion */
void vmapbuf __P((struct buf *, vm_size_t));
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 0e2a13c6291..cbf765aab9e 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_fault.c,v 1.13 1997/11/06 05:59:32 csapuntz Exp $ */
-/* $NetBSD: vm_fault.c,v 1.20 1997/02/18 13:39:33 mrg Exp $ */
+/* $OpenBSD: vm_fault.c,v 1.14 1998/03/01 00:38:04 niklas Exp $ */
+/* $NetBSD: vm_fault.c,v 1.21 1998/01/31 04:02:39 ross Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -245,7 +245,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
PAGE_ASSERT_WAIT(m, !change_wiring);
UNLOCK_THINGS;
- thread_block();
+ thread_block("mFltbsy");
wait_result = current_thread()->wait_result;
vm_object_deallocate(first_object);
if (wait_result != THREAD_AWAKENED)
@@ -255,7 +255,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
PAGE_ASSERT_WAIT(m, !change_wiring);
UNLOCK_THINGS;
cnt.v_intrans++;
- thread_block();
+ thread_block("mFltbsy2");
vm_object_deallocate(first_object);
goto RetryFault;
#endif
@@ -300,7 +300,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
if (m == NULL) {
UNLOCK_AND_DEALLOCATE;
- VM_WAIT;
+ vm_wait("fVfault1");
goto RetryFault;
}
}
@@ -574,7 +574,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_THINGS;
- thread_block();
+ thread_block("mCpybsy");
wait_result =
current_thread()->wait_result;
vm_object_deallocate(first_object);
@@ -592,7 +592,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_THINGS;
- thread_block();
+ thread_block("mCpybsy2");
vm_object_deallocate(first_object);
goto RetryFault;
#endif
@@ -627,7 +627,7 @@ vm_fault(map, vaddr, fault_type, change_wiring)
copy_object->ref_count--;
vm_object_unlock(copy_object);
UNLOCK_AND_DEALLOCATE;
- VM_WAIT;
+ vm_wait("fCopy");
goto RetryFault;
}
@@ -986,7 +986,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
dst_m = vm_page_alloc(dst_object, dst_offset);
if (dst_m == NULL) {
vm_object_unlock(dst_object);
- VM_WAIT;
+ vm_wait("fVm_copy");
vm_object_lock(dst_object);
}
} while (dst_m == NULL);
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index 2542fa235fe..88c3322a6d7 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_glue.c,v 1.28 1998/02/23 20:15:54 niklas Exp $ */
+/* $OpenBSD: vm_glue.c,v 1.29 1998/03/01 00:38:05 niklas Exp $ */
/* $NetBSD: vm_glue.c,v 1.55.4.1 1996/06/13 17:25:45 cgd Exp $ */
/*
@@ -441,7 +441,7 @@ loop:
p->p_pid, p->p_comm, cnt.v_free_count);
#endif
(void)splhigh();
- VM_WAIT;
+ vm_wait("fLowmem");
(void)spl0();
#ifdef DEBUG
if (swapdebug & SDB_FOLLOW)
@@ -557,78 +557,3 @@ swapout(p)
p->p_swtime = 0;
++cnt.v_swpout;
}
-
-/*
- * The rest of these routines fake thread handling
- */
-
-void
-assert_wait(event, ruptible)
- void *event;
- boolean_t ruptible;
-{
-#ifdef lint
- ruptible++;
-#endif
- curproc->p_thread = event;
-}
-
-void
-thread_block()
-{
- int s = splhigh();
-
- if (curproc->p_thread)
- tsleep(curproc->p_thread, PVM, "thrd_block", 0);
- splx(s);
-}
-
-void
-thread_sleep_msg(event, lock, ruptible, msg)
- void *event;
- simple_lock_t lock;
- boolean_t ruptible;
- char *msg;
-{
- int s = splhigh();
-
-#ifdef lint
- ruptible++;
-#endif
- curproc->p_thread = event;
- simple_unlock(lock);
- if (curproc->p_thread)
- tsleep(event, PVM, msg, 0);
- splx(s);
-}
-
-/*
- * DEBUG stuff
- */
-
-int indent = 0;
-
-#include <machine/stdarg.h> /* see subr_prf.c */
-
-/*ARGSUSED2*/
-void
-#if __STDC__
-iprintf(int (*pr)(const char *, ...), const char *fmt, ...)
-#else
-iprintf(pr, fmt /* , va_alist */)
- void (*pr)();
- char *fmt;
- /* va_dcl */
-#endif
-{
- register int i;
- va_list ap;
-
- for (i = indent; i >= 8; i -= 8)
- (*pr)("\t");
- while (--i >= 0)
- (*pr)(" ");
- va_start(ap, fmt);
- (*pr)("%:", fmt, ap);
- va_end(ap);
-}
diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c
index 8f353eba930..6b46acbf01d 100644
--- a/sys/vm/vm_init.c
+++ b/sys/vm/vm_init.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_init.c,v 1.2 1996/08/02 00:06:00 niklas Exp $ */
-/* $NetBSD: vm_init.c,v 1.9 1994/06/29 06:48:00 cgd Exp $ */
+/* $OpenBSD: vm_init.c,v 1.3 1998/03/01 00:38:06 niklas Exp $ */
+/* $NetBSD: vm_init.c,v 1.11 1998/01/09 06:00:50 thorpej Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -70,6 +70,7 @@
*/
#include <sys/param.h>
+#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -84,7 +85,7 @@
void vm_mem_init()
{
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
extern vm_offset_t avail_start, avail_end;
extern vm_offset_t virtual_avail, virtual_end;
#else
@@ -96,8 +97,11 @@ void vm_mem_init()
* From here on, all physical memory is accounted for,
* and we use only virtual addresses.
*/
- vm_set_page_size();
-#ifndef MACHINE_NONCONTIG
+ if (page_shift == 0) {
+ printf("vm_mem_init: WARN: MD code did not set page size\n");
+ vm_set_page_size();
+ }
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
vm_page_startup(&avail_start, &avail_end);
#else
vm_page_bootstrap(&start, &end);
@@ -106,13 +110,13 @@ void vm_mem_init()
/*
* Initialize other VM packages
*/
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
vm_object_init(virtual_end - VM_MIN_KERNEL_ADDRESS);
#else
vm_object_init(end - VM_MIN_KERNEL_ADDRESS);
#endif
vm_map_startup();
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
kmem_init(virtual_avail, virtual_end);
pmap_init(avail_start, avail_end);
#else
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 51be546be26..ea3953c457d 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_kern.c,v 1.9 1998/02/23 20:22:17 niklas Exp $ */
+/* $OpenBSD: vm_kern.c,v 1.10 1998/03/01 00:38:08 niklas Exp $ */
/* $NetBSD: vm_kern.c,v 1.17.6.1 1996/06/13 17:21:28 cgd Exp $ */
/*
@@ -177,7 +177,7 @@ kmem_alloc(map, size)
while ((mem = vm_page_alloc(kernel_object, offset + i)) ==
NULL) {
vm_object_unlock(kernel_object);
- VM_WAIT;
+ vm_wait("fKmwire");
vm_object_lock(kernel_object);
}
vm_page_zero_fill(mem);
@@ -241,7 +241,7 @@ kmem_suballoc(parent, min, max, size, pageable)
size = round_page(size);
- *min = (vm_offset_t) vm_map_min(parent);
+ *min = (vm_offset_t)vm_map_min(parent);
ret = vm_map_find(parent, NULL, (vm_offset_t)0, min, size, TRUE);
if (ret != KERN_SUCCESS) {
printf("kmem_suballoc: bad status return of %d.\n", ret);
@@ -417,7 +417,7 @@ kmem_alloc_wait(map, size)
}
assert_wait(map, TRUE);
vm_map_unlock(map);
- thread_block();
+ thread_block("mKmwait");
}
vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size);
vm_map_unlock(map);
diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h
index e46ee18b458..37b47261a57 100644
--- a/sys/vm/vm_kern.h
+++ b/sys/vm/vm_kern.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_kern.h,v 1.2 1996/08/02 00:06:01 niklas Exp $ */
-/* $NetBSD: vm_kern.h,v 1.9 1994/06/29 06:48:03 cgd Exp $ */
+/* $OpenBSD: vm_kern.h,v 1.3 1998/03/01 00:38:09 niklas Exp $ */
+/* $NetBSD: vm_kern.h,v 1.11 1998/02/10 14:08:58 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -67,9 +67,18 @@
/* Kernel memory management definitions. */
+#if defined(UVM)
+extern vm_map_t buffer_map;
+extern vm_map_t exec_map;
+extern vm_map_t kernel_map;
+extern vm_map_t kmem_map;
+extern vm_map_t mb_map;
+extern vm_map_t phys_map;
+#else
vm_map_t buffer_map;
vm_map_t exec_map;
vm_map_t kernel_map;
vm_map_t kmem_map;
vm_map_t mb_map;
vm_map_t phys_map;
+#endif
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index 9c9aca5d679..ebe5fa82592 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_map.c,v 1.12 1998/02/03 01:27:09 millert Exp $ */
+/* $OpenBSD: vm_map.c,v 1.13 1998/03/01 00:38:11 niklas Exp $ */
/* $NetBSD: vm_map.c,v 1.23 1996/02/10 00:08:08 christos Exp $ */
/*
@@ -136,8 +136,16 @@
* maps and requires map entries.
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+u_int8_t kentry_data_store[MAX_KMAP*sizeof(struct vm_map) +
+ MAX_KMAPENT*sizeof(struct vm_map_entry)];
+vm_offset_t kentry_data = (vm_offset_t) kentry_data_store;
+vm_size_t kentry_data_size = sizeof(kentry_data_store);
+#else
+/* NUKE NUKE NUKE */
vm_offset_t kentry_data;
vm_size_t kentry_data_size;
+#endif
vm_map_entry_t kentry_free;
vm_map_t kmap_free;
@@ -160,6 +168,12 @@ vm_map_startup()
vm_map_t mp;
/*
+ * zero kentry area
+ * XXX necessary?
+ */
+ bzero((caddr_t)kentry_data, kentry_data_size);
+
+ /*
* Static map structures for allocation before initialization of
* kernel map or kmem map. vm_map_create knows how to deal with them.
*/
@@ -197,11 +211,24 @@ vmspace_alloc(min, max, pageable)
register struct vmspace *vm;
if (mapvmpgcnt == 0 && mapvm == 0) {
-#ifndef MACHINE_NONCONTIG
- mapvmpgcnt = ((last_page-first_page) * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
-#else
- mapvmpgcnt = (vm_page_count * sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
-#endif
+#if defined(MACHINE_NEW_NONCONTIG)
+ int vm_page_count = 0;
+ int lcv;
+
+ for (lcv = 0; lcv < vm_nphysseg; lcv++)
+ vm_page_count += (vm_physmem[lcv].end -
+ vm_physmem[lcv].start);
+
+ mapvmpgcnt = (vm_page_count *
+ sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+#elif defined(MACHINE_NONCONTIG)
+ mapvmpgcnt = (vm_page_count *
+ sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
+#else /* must be contig */
+ mapvmpgcnt = ((last_page-first_page) *
+ sizeof(struct vm_map_entry) + PAGE_SIZE - 1) / PAGE_SIZE;
+#endif /* contig */
mapvm_start = mapvm = kmem_alloc_pageable(kernel_map,
mapvmpgcnt * PAGE_SIZE);
mapvmmax = mapvm_start + mapvmpgcnt * PAGE_SIZE;
diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h
index 7140ad1be98..63ca52ac0db 100644
--- a/sys/vm/vm_map.h
+++ b/sys/vm/vm_map.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_map.h,v 1.7 1997/11/11 20:16:41 millert Exp $ */
+/* $OpenBSD: vm_map.h,v 1.8 1998/03/01 00:38:12 niklas Exp $ */
/* $NetBSD: vm_map.h,v 1.11 1995/03/26 20:39:10 jtc Exp $ */
/*
@@ -72,6 +72,10 @@
#ifndef _VM_MAP_
#define _VM_MAP_
+#ifdef UVM
+#include <uvm/uvm_anon.h>
+#endif
+
/*
* Types defined:
*
@@ -84,12 +88,17 @@
* Objects which live in maps may be either VM objects, or
* another map (called a "sharing map") which denotes read-write
* sharing with other maps.
+ *
+ * XXXCDC: private pager data goes here now
*/
union vm_map_object {
struct vm_object *vm_object; /* object object */
struct vm_map *share_map; /* share map */
struct vm_map *sub_map; /* belongs to another map */
+#ifdef UVM
+ struct uvm_object *uvm_obj; /* UVM OBJECT */
+#endif /* UVM */
};
/*
@@ -105,16 +114,30 @@ struct vm_map_entry {
vm_offset_t end; /* end address */
union vm_map_object object; /* object I point to */
vm_offset_t offset; /* offset into object */
+#if defined(UVM)
+ /* etype is a bitmap that replaces the following 4 items */
+ int etype; /* entry type */
+#else
boolean_t is_a_map; /* Is "object" a map? */
boolean_t is_sub_map; /* Is "object" a submap? */
/* Only in sharing maps: */
boolean_t copy_on_write; /* is data copy-on-write */
boolean_t needs_copy; /* does object need to be copied */
+#endif
/* Only in task maps: */
vm_prot_t protection; /* protection code */
vm_prot_t max_protection; /* maximum protection */
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
+#ifdef UVM
+ struct vm_aref aref; /* anonymous overlay */
+ int advice; /* madvise advice */
+#define uvm_map_entry_stop_copy flags
+ u_int8_t flags; /* flags */
+
+#define UVM_MAP_STATIC 0x01 /* static map entry */
+
+#endif /* UVM */
};
/*
@@ -198,6 +221,22 @@ typedef struct {
(map)->lk_flags &= ~LK_CANRECURSE; \
simple_unlock(&(map)->lk_interlock); \
}
+#if defined(UVM) && defined(_KERNEL)
+/* XXX: clean up later */
+static boolean_t vm_map_lock_try __P((vm_map_t));
+
+static __inline boolean_t vm_map_lock_try(map)
+
+vm_map_t map;
+
+{
+ if (lockmgr(&(map)->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) != 0)
+ return(FALSE);
+ map->timestamp++;
+ return(TRUE);
+}
+#endif
+
/*
* Functions implemented as macros
*/
diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c
index 0d63c2bc9cf..0364c169ea9 100644
--- a/sys/vm/vm_meter.c
+++ b/sys/vm/vm_meter.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_meter.c,v 1.7 1997/11/06 05:59:35 csapuntz Exp $ */
+/* $OpenBSD: vm_meter.c,v 1.8 1998/03/01 00:38:14 niklas Exp $ */
/* $NetBSD: vm_meter.c,v 1.18 1996/02/05 01:53:59 christos Exp $ */
/*
@@ -47,7 +47,7 @@
struct loadavg averunnable; /* load average, of runnable procs */
int maxslp = MAXSLP;
-#ifndef MACHINE_NONCONTIG
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
int saferss = SAFERSS;
#endif /* MACHINE_NONCONTIG */
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index b8b6d2e4196..9b96a03f4a2 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_object.c,v 1.20 1997/11/06 05:59:35 csapuntz Exp $ */
+/* $OpenBSD: vm_object.c,v 1.21 1998/03/01 00:38:15 niklas Exp $ */
/* $NetBSD: vm_object.c,v 1.46 1997/03/30 20:56:12 mycroft Exp $ */
/*-
@@ -391,7 +391,7 @@ vm_object_terminate(object)
* Wait until the pageout daemon is through with the object or a
* potential collapse operation is finished.
*/
- vm_object_paging_wait(object);
+ vm_object_paging_wait(object,"vmterm");
/*
* Detach the object from its shadow if we are the shadow's
@@ -507,7 +507,7 @@ again:
/*
* Wait until the pageout daemon is through with the object.
*/
- vm_object_paging_wait(object);
+ vm_object_paging_wait(object,"vclean");
/*
* Loop through the object page list cleaning as necessary.
@@ -1201,7 +1201,7 @@ vm_object_overlay(object)
vm_object_unlock(object);
retry:
- vm_object_paging_wait(backing_object);
+ vm_object_paging_wait(backing_object,"vpagew");
/*
* While we were asleep, the parent object might have been deleted. If
@@ -1318,7 +1318,7 @@ retry:
paged_offset);
if (backing_page == NULL) {
vm_object_unlock(backing_object);
- VM_WAIT;
+ vm_wait("fVmcollapse");
vm_object_lock(backing_object);
goto retry;
}
diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h
index 7c4522a0740..53114a4e8c1 100644
--- a/sys/vm/vm_object.h
+++ b/sys/vm/vm_object.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_object.h,v 1.8 1997/11/06 05:59:36 csapuntz Exp $ */
+/* $OpenBSD: vm_object.h,v 1.9 1998/03/01 00:38:17 niklas Exp $ */
/* $NetBSD: vm_object.h,v 1.16 1995/03/29 22:10:28 briggs Exp $ */
/*
@@ -150,7 +150,7 @@ vm_object_t kmem_object;
do { \
(object)->flags |= OBJ_WAITING; \
thread_sleep_msg((event), &(object)->Lock, \
- (interruptible), (where)); \
+ (interruptible), (where), 0); \
} while (0)
#define vm_object_wakeup(object) \
@@ -184,11 +184,11 @@ vm_object_t kmem_object;
vm_object_wakeup((object)); \
} while (0)
-#define vm_object_paging_wait(object) \
+#define vm_object_paging_wait(object,msg) \
do { \
while (vm_object_paging((object))) { \
vm_object_sleep((object), (object), FALSE, \
- "vospgw"); \
+ (msg)); \
vm_object_lock((object)); \
} \
} while (0)
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 5e68e78d814..365acb01bdb 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_page.c,v 1.10 1998/02/06 08:32:47 niklas Exp $ */
-/* $NetBSD: vm_page.c,v 1.31 1997/06/06 23:10:23 thorpej Exp $ */
+/* $OpenBSD: vm_page.c,v 1.11 1998/03/01 00:38:18 niklas Exp $ */
+/* $NetBSD: vm_page.c,v 1.41 1998/02/08 18:24:52 thorpej Exp $ */
#define VM_PAGE_ALLOC_MEMORY_STATS
@@ -75,7 +75,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
+ * @(#)vm_page.c 8.3 (Berkeley) 3/21/94
*
*
* Copyright (c) 1987, 1990 Carnegie-Mellon University.
@@ -105,12 +105,13 @@
*/
/*
- * Resident memory management module.
+ * Resident memory management module.
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
+#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@@ -119,25 +120,49 @@
#include <machine/cpu.h>
-#ifdef MACHINE_NONCONTIG
+#define VERY_LOW_MEM() (cnt.v_free_count <= vm_page_free_reserved)
+#define KERN_OBJ(object) ((object) == kernel_object || (object) == kmem_object)
+
+int vm_page_free_reserved = 10;
+
+#if defined(MACHINE_NEW_NONCONTIG)
+
/*
- * These variables record the values returned by vm_page_bootstrap,
- * for debugging purposes. The implementation of pmap_steal_memory
- * and pmap_startup here also uses them internally.
+ * physical memory config is stored in vm_physmem.
*/
-vm_offset_t virtual_space_start;
-vm_offset_t virtual_space_end;
-#endif /* MACHINE_NONCONTIG */
+struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
+int vm_nphysseg = 0;
+static int vm_page_lost_count = 0; /* XXXCDC: DEBUG DEBUG */
+
+#endif
+
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
/*
- * Associated with page of user-allocatable memory is a
- * page structure.
+ * These variables record the values returned by vm_page_bootstrap,
+ * for debugging purposes.
+ *
+ * The implementation of vm_bootstrap_steal_memory here also uses
+ * them internally.
+ */
+static vm_offset_t virtual_space_start;
+static vm_offset_t virtual_space_end;
+
+vm_offset_t vm_bootstrap_steal_memory __P((vm_size_t));
+#endif
+
+/*
+ * Associated with page of user-allocatable memory is a
+ * page structure.
*/
struct pglist *vm_page_buckets; /* Array of buckets */
int vm_page_bucket_count = 0; /* How big is array? */
int vm_page_hash_mask; /* Mask for hash function */
simple_lock_data_t bucket_lock; /* lock for all buckets XXX */
+#if defined(MACHINE_NEW_NONCONTIG)
+struct pglist vm_page_bootbucket; /* bootstrap bucket */
+#endif
struct pglist vm_page_queue_free;
struct pglist vm_page_queue_active;
@@ -149,26 +174,55 @@ simple_lock_data_t vm_page_queue_free_lock;
boolean_t vm_page_startup_initialized;
vm_page_t vm_page_array;
+#if defined(MACHINE_NEW_NONCONTIG)
+ /* NOTHING NEEDED HERE */
+#elif defined(MACHINE_NONCONTIG)
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+u_long first_page;
int vm_page_count;
-#ifndef MACHINE_NONCONTIG
+#else
+/* OLD NCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
long first_page;
long last_page;
vm_offset_t first_phys_addr;
vm_offset_t last_phys_addr;
-#else
-u_long first_page;
-#endif /* MACHINE_NONCONTIG */
+int vm_page_count;
+#endif
vm_size_t page_mask;
int page_shift;
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * local prototypes
+ */
+
+#if !defined(PMAP_STEAL_MEMORY)
+static boolean_t vm_page_physget __P((vm_offset_t *));
+#endif
+#endif
+
+/*
+ * macros
+ */
+
/*
- * vm_set_page_size:
+ * vm_page_hash:
*
- * Sets the page size, perhaps based upon the memory
- * size. Must be called before any use of page-size
- * dependent functions.
+ * Distributes the object/offset key pair among hash buckets.
*
- * Sets page_shift and page_mask from cnt.v_page_size.
+ * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
+ */
+#define vm_page_hash(object, offset) \
+ (((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
+
+/*
+ * vm_set_page_size:
+ *
+ * Sets the page size, perhaps based upon the memory
+ * size. Must be called before any use of page-size
+ * dependent functions.
+ *
+ * Sets page_shift and page_mask from cnt.v_page_size.
*/
void
vm_set_page_size()
@@ -184,73 +238,611 @@ vm_set_page_size()
break;
}
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * vm_page_bootstrap: initialize the resident memory module (called
+ * from vm_mem_init()).
+ *
+ * - startp and endp are out params which return the boundaries of the
+ * free part of the kernel's virtual address space.
+ */
+void
+vm_page_bootstrap(startp, endp)
+ vm_offset_t *startp, *endp; /* OUT, OUT */
+{
+ vm_offset_t paddr;
+ vm_page_t pagearray;
+ int lcv, freepages, pagecount, n, i;
+
+ /*
+ * first init all the locks and queues.
+ */
+ simple_lock_init(&vm_page_queue_free_lock);
+ simple_lock_init(&vm_page_queue_lock);
+ TAILQ_INIT(&vm_page_queue_free);
+ TAILQ_INIT(&vm_page_queue_active);
+ TAILQ_INIT(&vm_page_queue_inactive);
+
+ /*
+ * init the <OBJ,OFFSET> => <PAGE> hash table buckets. for now
+ * we just have one bucket (the bootstrap bucket). later on we
+ * will malloc() new buckets as we dynamically resize the hash table.
+ */
+ vm_page_bucket_count = 1;
+ vm_page_hash_mask = 0;
+ vm_page_buckets = &vm_page_bootbucket;
+ TAILQ_INIT(vm_page_buckets);
+ simple_lock_init(&bucket_lock);
+
+ /*
+ * before calling this function the MD code is expected to register
+ * some free RAM with the vm_page_physload() function. our job
+ * now is to allocate vm_page structures for this preloaded memory.
+ */
+ if (vm_nphysseg == 0)
+ panic("vm_page_bootstrap: no memory pre-allocated");
+
+ /*
+ * first calculate the number of free pages... note that start/end
+ * are inclusive so you have to add one to get the number of pages.
+ *
+ * note that we use start/end rather than avail_start/avail_end.
+ * this allows us to allocate extra vm_page structures in case we
+ * want to return some memory to the pool after booting.
+ */
+ freepages = 0;
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ freepages = freepages +
+ (vm_physmem[lcv].end - vm_physmem[lcv].start);
+ }
+
+ /*
+ * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
+ * use. for each page of memory we use we need a vm_page structure.
+ * thus, the total number of pages we can use is the total size of
+ * the memory divided by the PAGE_SIZE plus the size of the vm_page
+ * structure. we add one to freepages as a fudge factor to avoid
+ * truncation errors (since we can only allocate in terms of whole
+ * pages).
+ */
+ pagecount = (PAGE_SIZE * (freepages + 1)) /
+ (PAGE_SIZE + sizeof(struct vm_page));
+ pagearray = (vm_page_t)
+ vm_bootstrap_steal_memory(pagecount * sizeof(struct vm_page));
+ bzero(pagearray, pagecount * sizeof(struct vm_page));
+
+ /*
+ * now init the page frames
+ */
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+
+ n = vm_physmem[lcv].end - vm_physmem[lcv].start;
+ if (n > pagecount) {
+ printf("vm_init: lost %d page(s) in init\n",
+ n - pagecount);
+ vm_page_lost_count += (n - pagecount);
+ n = pagecount;
+ }
+
+ /* set up page array pointers */
+ vm_physmem[lcv].pgs = pagearray;
+ pagearray += n;
+ pagecount -= n;
+ vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
+
+ /* init and free vm_pages (we've already bzero'd them) */
+ paddr = ptoa(vm_physmem[lcv].start);
+ for (i = 0; i < n; i++, paddr += PAGE_SIZE) {
+ vm_physmem[lcv].pgs[i].phys_addr = paddr;
+ if (atop(paddr) >= vm_physmem[lcv].avail_start &&
+ atop(paddr) <= vm_physmem[lcv].avail_end)
+ vm_page_free(&vm_physmem[lcv].pgs[i]);
+ }
+ }
+
+ /*
+ * pass up the values of virtual_space_start and virtual_space_end
+ * (obtained by vm_bootstrap_steal_memory) to the upper layers of
+ * the VM.
+ */
+ *startp = round_page(virtual_space_start);
+ *endp = trunc_page(virtual_space_end);
+
+ /*
+ * init pagedaemon lock
+ */
+ simple_lock_init(&vm_pages_needed_lock);
+}
+
+/*
+ * vm_bootstrap_steal_memory: steal memory from physmem for bootstrapping
+ */
+vm_offset_t
+vm_bootstrap_steal_memory(size)
+ vm_size_t size;
+{
+#if defined(PMAP_STEAL_MEMORY)
+ vm_offset_t addr;
+
+ /*
+ * Defer this to machine-dependent code; we may need to allocate
+ * from a direct-mapped segment.
+ */
+ addr = pmap_steal_memory(size, &virtual_space_start,
+ &virtual_space_end);
+
+ /* round it the way we like it */
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+
+ return (addr);
+#else /* ! PMAP_STEAL_MEMORY */
+ vm_offset_t addr, vaddr, paddr;
+
+ /* round to page size */
+ size = round_page(size);
+
+ /*
+ * on first call to this function init ourselves. we detect this
+ * by checking virtual_space_start/end which are in the zero'd BSS
+ * area.
+ */
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /* round it the way we like it */
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * allocate virtual memory for this request
+ */
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * allocate and mapin physical pages to back new virtual pages
+ */
+ for (vaddr = round_page(addr); vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ if (!vm_page_physget(&paddr))
+ panic("vm_bootstrap_steal_memory: out of memory");
+
+ /* XXX: should be wired, but some pmaps don't like that ... */
+ pmap_enter(pmap_kernel(), vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+ return(addr);
+#endif /* PMAP_STEAL_MEMORY */
+}
+
+#if !defined(PMAP_STEAL_MEMORY)
+/*
+ * vm_page_physget: "steal" one page from the vm_physmem structure.
+ *
+ * - attempt to allocate it off the end of a segment in which the "avail"
+ * values match the start/end values. if we can't do that, then we
+ * will advance both values (making them equal, and removing some
+ * vm_page structures from the non-avail area).
+ * - return false if out of memory.
+ */
+static boolean_t
+vm_page_physget(paddrp)
+ vm_offset_t *paddrp;
+
+{
+ int lcv, x;
+
+ /* pass 1: try allocating from a matching end */
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+ for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
+#else
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+#endif
+ {
+ if (vm_physmem[lcv].pgs)
+ panic("vm_page_physget: called _after_ bootstrap");
+
+ /* try from front */
+ if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
+ vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
+ *paddrp = ptoa(vm_physmem[lcv].avail_start);
+ vm_physmem[lcv].avail_start++;
+ vm_physmem[lcv].start++;
+
+ /* nothing left? nuke it */
+ if (vm_physmem[lcv].avail_start ==
+ vm_physmem[lcv].end) {
+ if (vm_nphysseg == 1)
+ panic("vm_page_physget: out of memory!");
+ vm_nphysseg--;
+ for (x = lcv; x < vm_nphysseg; x++)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x+1];
+ }
+ return(TRUE);
+ }
+
+ /* try from rear */
+ if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
+ vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
+ *paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
+ vm_physmem[lcv].avail_end--;
+ vm_physmem[lcv].end--;
+
+ /* nothing left? nuke it */
+ if (vm_physmem[lcv].avail_end ==
+ vm_physmem[lcv].start) {
+ if (vm_nphysseg == 1)
+ panic("vm_page_physget: out of memory!");
+ vm_nphysseg--;
+ for (x = lcv; x < vm_nphysseg; x++)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x+1];
+ }
+ return(TRUE);
+ }
+ }
+
+ /* pass2: forget about matching ends, just allocate something */
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+ for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
+#else
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+#endif
+ {
+ /* any room in this bank? */
+ if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
+ continue; /* nope */
+
+ *paddrp = ptoa(vm_physmem[lcv].avail_start);
+ vm_physmem[lcv].avail_start++;
+ vm_physmem[lcv].start = vm_physmem[lcv].avail_start; /* truncate! */
+
+ /* nothing left? nuke it */
+ if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
+ if (vm_nphysseg == 1)
+ panic("vm_page_physget: out of memory!");
+ vm_nphysseg--;
+ for (x = lcv; x < vm_nphysseg; x++)
+ vm_physmem[x] = vm_physmem[x+1]; /* structure copy */
+ }
+ return(TRUE);
+ }
+
+ return(FALSE); /* whoops! */
+}
+#endif /* ! PMAP_STEAL_MEMORY */
+
+/*
+ * vm_page_physload: load physical memory into VM system
+ *
+ * - all args are PFs
+ * - all pages in start/end get vm_page structures
+ * - areas marked by avail_start/avail_end get added to the free page pool
+ * - we are limited to VM_PHYSSEG_MAX physical memory segments
+ */
+void
+vm_page_physload(start, end, avail_start, avail_end)
+ vm_offset_t start, end, avail_start, avail_end;
+{
+ struct vm_page *pgs;
+ struct vm_physseg *ps;
+ int preload, lcv, npages, x;
+
+ if (page_shift == 0)
+ panic("vm_page_physload: page size not set!");
+
+ /*
+ * do we have room?
+ */
+ if (vm_nphysseg == VM_PHYSSEG_MAX) {
+ printf("vm_page_physload: unable to load physical memory segment\n");
+ printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n",
+ VM_PHYSSEG_MAX, start, end);
+ return;
+ }
+
+ /*
+ * check to see if this is a "preload" (i.e. vm_mem_init hasn't been
+ * called yet, so malloc is not available).
+ */
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ if (vm_physmem[lcv].pgs)
+ break;
+ }
+ preload = (lcv == vm_nphysseg);
+
+ /*
+ * if VM is already running, attempt to malloc() vm_page structures
+ */
+ if (!preload) {
+#if defined(VM_PHYSSEG_NOADD)
+ panic("vm_page_physload: tried to add RAM after vm_mem_init");
+#else
+/* XXXCDC: need some sort of lockout for this case */
+ vm_offset_t paddr;
+
+ /* # of pages */
+ npages = end - start;
+ MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages,
+ M_VMPAGE, M_NOWAIT);
+ if (pgs == NULL) {
+ printf("vm_page_physload: can not malloc vm_page structs for segment\n");
+ printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
+ return;
+ }
+ /* zero data, init phys_addr, and free pages */
+ bzero(pgs, sizeof(struct vm_page) * npages);
+ for (lcv = 0, paddr = ptoa(start); lcv < npages;
+ lcv++, paddr += PAGE_SIZE) {
+ pgs[lcv].phys_addr = paddr;
+ if (atop(paddr) >= avail_start &&
+ atop(paddr) <= avail_end)
+ vm_page_free(&pgs[i]);
+ }
+/* XXXCDC: incomplete: need to update v_free_count, what else? */
+/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
+#endif
+ } else {
+ /* XXX/gcc complains if these don't get init'd */
+ pgs = NULL;
+ npages = 0;
+ }
+
+ /*
+ * now insert us in the proper place in vm_physmem[]
+ */
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
+ /* random: put it at the end (easy!) */
+ ps = &vm_physmem[vm_nphysseg];
+
+#else
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+
+ /* sort by address for binary search */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ if (start < vm_physmem[lcv].start)
+ break;
+ ps = &vm_physmem[lcv];
+
+ /* move back other entries, if necessary ... */
+ for (x = vm_nphysseg ; x > lcv ; x--)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x - 1];
+
+#else
+#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
+
+ /* sort by largest segment first */
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ if ((end - start) >
+ (vm_physmem[lcv].end - vm_physmem[lcv].start))
+ break;
+ ps = &vm_physmem[lcv];
+
+ /* move back other entries, if necessary ... */
+ for (x = vm_nphysseg ; x > lcv ; x--)
+ /* structure copy */
+ vm_physmem[x] = vm_physmem[x - 1];
+
+#else
+
+ panic("vm_page_physload: unknown physseg strategy selected!");
+
+#endif
+#endif
+#endif
+
+ ps->start = start;
+ ps->end = end;
+ ps->avail_start = avail_start;
+ ps->avail_end = avail_end;
+ if (preload) {
+ ps->pgs = NULL;
+ } else {
+ ps->pgs = pgs;
+ ps->lastpg = pgs + npages - 1;
+ }
+ vm_nphysseg++;
+
+ /*
+ * done!
+ */
+ return;
+}
+
+/*
+ * vm_page_physrehash: reallocate hash table based on number of
+ * free pages.
+ */
+void
+vm_page_physrehash()
+{
+ struct pglist *newbuckets, *oldbuckets;
+ struct vm_page *pg;
+ int freepages, lcv, bucketcount, s, oldcount;
+
+ /*
+ * compute number of pages that can go in the free pool
+ */
+ freepages = 0;
+ for (lcv = 0; lcv < vm_nphysseg; lcv++)
+ freepages = freepages + (vm_physmem[lcv].avail_end -
+ vm_physmem[lcv].avail_start);
+
+ /*
+ * compute number of buckets needed for this number of pages
+ */
+ bucketcount = 1;
+ while (bucketcount < freepages)
+ bucketcount = bucketcount * 2;
+
+ /*
+ * malloc new buckets
+ */
+ MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount,
+ M_VMPBUCKET, M_NOWAIT);
+ if (newbuckets == NULL) {
+ printf("vm_page_physrehash: WARNING: could not grow page hash table\n");
+ return;
+ }
+ for (lcv = 0; lcv < bucketcount; lcv++)
+ TAILQ_INIT(&newbuckets[lcv]);
+
+ /*
+ * now replace the old buckets with the new ones and rehash everything
+ */
+ s = splimp();
+ simple_lock(&bucket_lock);
+ /* swap old for new ... */
+ oldbuckets = vm_page_buckets;
+ oldcount = vm_page_bucket_count;
+ vm_page_buckets = newbuckets;
+ vm_page_bucket_count = bucketcount;
+ vm_page_hash_mask = bucketcount - 1; /* power of 2 */
+
+ /* ... and rehash */
+ for (lcv = 0 ; lcv < oldcount ; lcv++) {
+ while ((pg = oldbuckets[lcv].tqh_first) != NULL) {
+ TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
+ TAILQ_INSERT_TAIL(&vm_page_buckets[
+ vm_page_hash(pg->object, pg->offset)], pg, hashq);
+ }
+ }
+ simple_unlock(&bucket_lock);
+ splx(s);
+
+ /*
+ * free old bucket array if we malloc'd it previously
+ */
+ if (oldbuckets != &vm_page_bootbucket)
+ FREE(oldbuckets, M_VMPBUCKET);
+
+ /*
+ * done
+ */
+ return;
+}
+
+#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
+
+void vm_page_physdump __P((void)); /* SHUT UP GCC */
+
+/* call from DDB */
+void
+vm_page_physdump()
+{
+ int lcv;
+
+ printf("rehash: physical memory config [segs=%d of %d]:\n",
+ vm_nphysseg, VM_PHYSSEG_MAX);
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
+ printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start,
+ vm_physmem[lcv].end, vm_physmem[lcv].avail_start,
+ vm_physmem[lcv].avail_end);
+ printf("STRATEGY = ");
+
+ switch (VM_PHYSSEG_STRAT) {
+ case VM_PSTRAT_RANDOM:
+ printf("RANDOM\n");
+ break;
+
+ case VM_PSTRAT_BSEARCH:
+ printf("BSEARCH\n");
+ break;
+
+ case VM_PSTRAT_BIGFIRST:
+ printf("BIGFIRST\n");
+ break;
+
+ default:
+ printf("<<UNKNOWN>>!!!!\n");
+ }
+ printf("number of buckets = %d\n", vm_page_bucket_count);
+ printf("number of lost pages = %d\n", vm_page_lost_count);
+}
+#endif
+
+#elif defined(MACHINE_NONCONTIG)
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+
+/*
+ * We implement vm_page_bootstrap and vm_bootstrap_steal_memory with
+ * the help of two simpler functions:
+ *
+ * pmap_virtual_space and pmap_next_page
+ */
-#ifdef MACHINE_NONCONTIG
/*
- * vm_page_bootstrap:
+ * vm_page_bootstrap:
*
- * Initializes the resident memory module.
+ * Initializes the resident memory module.
*
- * Allocates memory for the page cells, and
- * for the object/offset-to-page hash table headers.
- * Each page cell is initialized and placed on the free list.
- * Returns the range of available kernel virtual memory.
+ * Allocates memory for the page cells, and
+ * for the object/offset-to-page hash table headers.
+ * Each page cell is initialized and placed on the free list.
+ * Returns the range of available kernel virtual memory.
*/
void
vm_page_bootstrap(startp, endp)
vm_offset_t *startp;
vm_offset_t *endp;
{
- int i;
+ unsigned int i, freepages;
register struct pglist *bucket;
-
+ vm_offset_t paddr;
+
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
+
/*
- * Initialize the locks
+ * Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
/*
- * Initialize the queue headers for the free queue,
- * the active queue and the inactive queue.
+ * Initialize the queue headers for the free queue,
+ * the active queue and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
- * Pre-allocate maps and map entries that cannot be dynamically
- * allocated via malloc(). The maps include the kernel_map and
- * kmem_map which must be initialized before malloc() will
- * work (obviously). Also could include pager maps which would
- * be allocated before kmeminit.
+ * Pre-allocate maps and map entries that cannot be dynamically
+ * allocated via malloc(). The maps include the kernel_map and
+ * kmem_map which must be initialized before malloc() will
+ * work (obviously). Also could include pager maps which would
+ * be allocated before kmeminit.
*
- * Allow some kernel map entries... this should be plenty
- * since people shouldn't be cluttering up the kernel
- * map (they should use their own maps).
+ * Allow some kernel map entries... this should be plenty
+ * since people shouldn't be cluttering up the kernel
+ * map (they should use their own maps).
*/
+
kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
MAX_KMAPENT*sizeof(struct vm_map_entry));
- kentry_data = (vm_offset_t)pmap_steal_memory(kentry_data_size);
+ kentry_data = vm_bootstrap_steal_memory(kentry_data_size);
/*
- * Validate these zone addresses.
+ * Validate these zone addresses.
*/
- bzero((caddr_t)kentry_data, kentry_data_size);
+ bzero((caddr_t) kentry_data, kentry_data_size);
/*
- * Allocate (and initialize) the virtual-to-physical
- * table hash buckets.
+ * Allocate (and initialize) the virtual-to-physical
+ * table hash buckets.
*
- * The number of buckets MUST BE a power of 2, and
- * the actual value is the next power of 2 greater
- * than the number of physical pages in the system.
+ * The number of buckets MUST BE a power of 2, and
+ * the actual value is the next power of 2 greater
+ * than the number of physical pages in the system.
*
- * Note:
- * This computation can be tweaked if desired.
+ * Note:
+ * This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
unsigned int npages = pmap_free_pages();
@@ -263,9 +855,10 @@ vm_page_bootstrap(startp, endp)
vm_page_hash_mask = vm_page_bucket_count - 1;
vm_page_buckets = (struct pglist *)
- pmap_steal_memory(vm_page_bucket_count * sizeof(*vm_page_buckets));
- bucket = vm_page_buckets;
-
+ vm_bootstrap_steal_memory(vm_page_bucket_count *
+ sizeof(*vm_page_buckets));
+ bucket = vm_page_buckets;
+
for (i = vm_page_bucket_count; i--;) {
TAILQ_INIT(bucket);
bucket++;
@@ -274,13 +867,83 @@ vm_page_bootstrap(startp, endp)
simple_lock_init(&bucket_lock);
/*
- * Machine-dependent code allocates the resident page table.
- * It uses VM_PAGE_INIT to initialize the page frames.
- * The code also returns to us the virtual space available
- * to the kernel. We don't trust the pmap module
- * to get the alignment right.
+ * We calculate how many page frames we will have and
+ * then allocate the page structures in one chunk.
+ * The calculation is non-trivial. We want:
+ *
+ * vmpages > (freepages - (vmpages / sizeof(vm_page_t)))
+ *
+ * ...which, with some algebra, becomes:
+ *
+ * vmpages > (freepages * sizeof(...) / (1 + sizeof(...)))
+ *
+ * The value of vm_page_count need not be exact, but must
+ * be large enough so vm_page_array handles the index range.
+ */
+
+ freepages = pmap_free_pages();
+ /* Fudge slightly to deal with truncation error. */
+ freepages += 1; /* fudge */
+
+ vm_page_count = (PAGE_SIZE * freepages) /
+ (PAGE_SIZE + sizeof(*vm_page_array));
+
+ vm_page_array = (vm_page_t)
+ vm_bootstrap_steal_memory(vm_page_count * sizeof(*vm_page_array));
+ bzero(vm_page_array, vm_page_count * sizeof(*vm_page_array));
+
+#ifdef DIAGNOSTIC
+ /*
+ * Initialize everything in case the holes are stepped in,
+ * and set PA to something that will cause a panic...
+ */
+ for (i = 0; i < vm_page_count; i++)
+ vm_page_array[i].phys_addr = 0xdeadbeef;
+#endif
+
+ /*
+ * Initialize the page frames. Note that some page
+ * indices may not be usable when pmap_free_pages()
+ * counts pages in a hole.
+ */
+
+ if (!pmap_next_page(&paddr))
+ panic("vm_page_bootstrap: can't get first page");
+
+ first_page = pmap_page_index(paddr);
+ for (i = 0;;) {
+ /*
+ * Initialize a page array element.
+ */
+
+ VM_PAGE_INIT(&vm_page_array[i], NULL, NULL);
+ vm_page_array[i].phys_addr = paddr;
+ vm_page_free(&vm_page_array[i]);
+
+ /*
+ * Are there any more physical pages?
+ */
+
+ if (!pmap_next_page(&paddr))
+ break;
+ i = pmap_page_index(paddr) - first_page;
+
+ /*
+ * Don't trust pmap_page_index()...
+ */
+
+ if (
+#if 0
+ i < 0 || /* can't happen, i is unsigned */
+#endif
+ i >= vm_page_count)
+ panic("vm_page_bootstrap: bad i = 0x%x", i);
+ }
+
+ /*
+ * Make sure we have nice, round values.
*/
- pmap_startup(&virtual_space_start, &virtual_space_end);
+
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
@@ -290,16 +953,75 @@ vm_page_bootstrap(startp, endp)
simple_lock_init(&vm_pages_needed_lock);
}
+vm_offset_t
+vm_bootstrap_steal_memory(size)
+ vm_size_t size;
+{
+ vm_offset_t addr, vaddr, paddr;
+
+ /*
+ * We round to page size.
+ */
+
+ size = round_page(size);
+
+ /*
+ * If this is the first call to vm_bootstrap_steal_memory,
+ * we have to initialize ourself.
+ */
+
+ if (virtual_space_start == virtual_space_end) {
+ pmap_virtual_space(&virtual_space_start, &virtual_space_end);
+
+ /*
+ * The initial values must be aligned properly, and
+ * we don't trust the pmap module to do it right.
+ */
+
+ virtual_space_start = round_page(virtual_space_start);
+ virtual_space_end = trunc_page(virtual_space_end);
+ }
+
+ /*
+ * Allocate virtual memory for this request.
+ */
+
+ addr = virtual_space_start;
+ virtual_space_start += size;
+
+ /*
+ * Allocate and map physical pages to back new virtual pages.
+ */
+
+ for (vaddr = round_page(addr);
+ vaddr < addr + size;
+ vaddr += PAGE_SIZE) {
+ if (!pmap_next_page(&paddr))
+ panic("vm_bootstrap_steal_memory");
+
+ /*
+ * XXX Logically, these mappings should be wired,
+ * but some pmap modules barf if they are.
+ */
+
+ pmap_enter(pmap_kernel(), vaddr, paddr,
+ VM_PROT_READ|VM_PROT_WRITE, FALSE);
+ }
+
+ return addr;
+}
+
#else /* MACHINE_NONCONTIG */
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
/*
- * vm_page_startup:
+ * vm_page_startup:
*
- * Initializes the resident memory module.
+ * Initializes the resident memory module.
*
- * Allocates memory for the page cells, and
- * for the object/offset-to-page hash table headers.
- * Each page cell is initialized and placed on the free list.
+ * Allocates memory for the page cells, and
+ * for the object/offset-to-page hash table headers.
+ * Each page cell is initialized and placed on the free list.
*/
void
vm_page_startup(start, end)
@@ -314,29 +1036,30 @@ vm_page_startup(start, end)
extern vm_offset_t kentry_data;
extern vm_size_t kentry_data_size;
+
/*
- * Initialize the locks
+ * Initialize the locks
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
/*
- * Initialize the queue headers for the free queue,
- * the active queue and the inactive queue.
+ * Initialize the queue headers for the free queue,
+ * the active queue and the inactive queue.
*/
TAILQ_INIT(&vm_page_queue_free);
TAILQ_INIT(&vm_page_queue_active);
TAILQ_INIT(&vm_page_queue_inactive);
/*
- * Calculate the number of hash table buckets.
+ * Calculate the number of hash table buckets.
*
- * The number of buckets MUST BE a power of 2, and
- * the actual value is the next power of 2 greater
- * than the number of physical pages in the system.
+ * The number of buckets MUST BE a power of 2, and
+ * the actual value is the next power of 2 greater
+ * than the number of physical pages in the system.
*
- * Note:
- * This computation can be tweaked if desired.
+ * Note:
+ * This computation can be tweaked if desired.
*/
if (vm_page_bucket_count == 0) {
vm_page_bucket_count = 1;
@@ -347,7 +1070,7 @@ vm_page_startup(start, end)
vm_page_hash_mask = vm_page_bucket_count - 1;
/*
- * Allocate (and initialize) the hash table buckets.
+ * Allocate (and initialize) the hash table buckets.
*/
vm_page_buckets = (struct pglist *)
pmap_bootstrap_alloc(vm_page_bucket_count * sizeof(struct pglist));
@@ -361,55 +1084,56 @@ vm_page_startup(start, end)
simple_lock_init(&bucket_lock);
/*
- * Truncate the remainder of physical memory to our page size.
+ * Truncate the remainder of physical memory to our page size.
*/
*end = trunc_page(*end);
/*
- * Pre-allocate maps and map entries that cannot be dynamically
- * allocated via malloc(). The maps include the kernel_map and
- * kmem_map which must be initialized before malloc() will
- * work (obviously). Also could include pager maps which would
- * be allocated before kmeminit.
+ * Pre-allocate maps and map entries that cannot be dynamically
+ * allocated via malloc(). The maps include the kernel_map and
+ * kmem_map which must be initialized before malloc() will
+ * work (obviously). Also could include pager maps which would
+ * be allocated before kmeminit.
*
- * Allow some kernel map entries... this should be plenty
- * since people shouldn't be cluttering up the kernel
- * map (they should use their own maps).
+ * Allow some kernel map entries... this should be plenty
+ * since people shouldn't be cluttering up the kernel
+ * map (they should use their own maps).
*/
kentry_data_size = round_page(MAX_KMAP*sizeof(struct vm_map) +
- MAX_KMAPENT*sizeof(struct vm_map_entry));
+ MAX_KMAPENT*sizeof(struct vm_map_entry));
kentry_data = (vm_offset_t) pmap_bootstrap_alloc(kentry_data_size);
/*
- * Compute the number of pages of memory that will be
- * available for use (taking into account the overhead
- * of a page structure per page).
+ * Compute the number of pages of memory that will be
+ * available for use (taking into account the overhead
+ * of a page structure per page).
*/
- cnt.v_free_count = vm_page_count = (*end - *start +
- sizeof(struct vm_page)) / (PAGE_SIZE + sizeof(struct vm_page));
+ cnt.v_free_count = vm_page_count =
+ (*end - *start + sizeof(struct vm_page)) /
+ (PAGE_SIZE + sizeof(struct vm_page));
/*
- * Record the extent of physical memory that the
- * virtual memory system manages.
+ * Record the extent of physical memory that the
+ * virtual memory system manages.
*/
first_page = *start;
first_page += vm_page_count * sizeof(struct vm_page);
first_page = atop(round_page(first_page));
- last_page = first_page + vm_page_count - 1;
+ last_page = first_page + vm_page_count - 1;
first_phys_addr = ptoa(first_page);
- last_phys_addr = ptoa(last_page) + PAGE_MASK;
+ last_phys_addr = ptoa(last_page) + PAGE_MASK;
/*
- * Allocate and clear the mem entry structures.
+ * Allocate and clear the mem entry structures.
*/
m = vm_page_array = (vm_page_t)
- pmap_bootstrap_alloc(vm_page_count * sizeof(struct vm_page));
+ pmap_bootstrap_alloc(vm_page_count * sizeof(struct vm_page));
bzero(vm_page_array, vm_page_count * sizeof(struct vm_page));
/*
- * Initialize the mem entry structures now, and
- * put them in the free queue.
+ * Initialize the mem entry structures now, and
+ * put them in the free queue.
*/
pa = first_phys_addr;
npages = vm_page_count;
@@ -423,8 +1147,8 @@ vm_page_startup(start, end)
}
/*
- * Initialize vm_pages_needed lock here - don't wait for pageout
- * daemon XXX
+ * Initialize vm_pages_needed lock here - don't wait for pageout
+ * daemon XXX
*/
simple_lock_init(&vm_pages_needed_lock);
@@ -433,161 +1157,13 @@ vm_page_startup(start, end)
}
#endif /* MACHINE_NONCONTIG */
-#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_PAGES)
/*
- * We implement pmap_steal_memory and pmap_startup with the help
- * of two simpler functions, pmap_virtual_space and pmap_next_page.
- */
-vm_offset_t
-pmap_steal_memory(size)
- vm_size_t size;
-{
- vm_offset_t addr, vaddr, paddr;
-
-#ifdef i386 /* XXX i386 calls pmap_steal_memory before vm_mem_init() */
- if (cnt.v_page_size == 0) /* XXX */
- vm_set_page_size();
-#endif
-
- /*
- * We round the size to an integer multiple.
- */
- size = (size + 3) &~ 3; /* XXX */
-
- /*
- * If this is the first call to pmap_steal_memory,
- * we have to initialize ourself.
- */
- if (virtual_space_start == virtual_space_end) {
- pmap_virtual_space(&virtual_space_start, &virtual_space_end);
-
- /*
- * The initial values must be aligned properly, and
- * we don't trust the pmap module to do it right.
- */
- virtual_space_start = round_page(virtual_space_start);
- virtual_space_end = trunc_page(virtual_space_end);
- }
-
- /*
- * Allocate virtual memory for this request.
- */
- addr = virtual_space_start;
- virtual_space_start += size;
-
- /*
- * Allocate and map physical pages to back new virtual pages.
- */
- for (vaddr = round_page(addr); vaddr < addr + size;
- vaddr += PAGE_SIZE) {
- if (!pmap_next_page(&paddr))
- panic("pmap_steal_memory");
-
- /*
- * XXX Logically, these mappings should be wired,
- * but some pmap modules barf if they are.
- */
- pmap_enter(pmap_kernel(), vaddr, paddr,
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- }
-
- return addr;
-}
-
-void
-pmap_startup(startp, endp)
- vm_offset_t *startp;
- vm_offset_t *endp;
-{
- unsigned int i, freepages;
- vm_offset_t paddr;
-
- /*
- * We calculate how many page frames we will have
- * and then allocate the page structures in one chunk.
- * The calculation is non-trivial. We want:
- *
- * vmpages > (freepages - (vmpages / sizeof(vm_page_t)))
- *
- * which, with some algebra, becomes:
- *
- * vmpages > (freepages * sizeof(...) / (1 + sizeof(...)))
- *
- * The value of vm_page_count need not be exact, but must be
- * large enough so vm_page_array handles the index range.
- */
- freepages = pmap_free_pages();
- /* Fudge slightly to deal with truncation error. */
- freepages += 1; /* fudge */
-
- vm_page_count = (PAGE_SIZE * freepages) /
- (PAGE_SIZE + sizeof(*vm_page_array));
-
- vm_page_array = (vm_page_t)
- pmap_steal_memory(vm_page_count * sizeof(*vm_page_array));
- bzero(vm_page_array, vm_page_count * sizeof(*vm_page_array));
-
-#ifdef DIAGNOSTIC
- /*
- * Initialize everyting in case the holes are stepped in,
- * and set PA to something that will cause a panic...
- */
- for (i = 0; i < vm_page_count; i++)
- vm_page_array[i].phys_addr = 0xdeadbeef;
-#endif
-
- /*
- * Initialize the page frames.
- * Note that some page indices may not be usable
- * when pmap_free_pages() counts pages in a hole.
- */
- if (!pmap_next_page(&paddr))
- panic("pmap_startup: can't get first page");
- first_page = pmap_page_index(paddr);
- i = 0;
- for (;;) {
- /* Initialize a page array element. */
- VM_PAGE_INIT(&vm_page_array[i], NULL, 0);
- vm_page_array[i].phys_addr = paddr;
- vm_page_free(&vm_page_array[i]);
-
- /* Are there more physical pages? */
- if (!pmap_next_page(&paddr))
- break;
- i = pmap_page_index(paddr) - first_page;
-
- /* Don't trust pmap_page_index()... */
- if (
-#if 0
- /* Cannot happen; i is unsigned */
- i < 0 ||
-#endif
- i >= vm_page_count)
- panic("pmap_startup: bad i=0x%x", i);
- }
-
- *startp = virtual_space_start;
- *endp = virtual_space_end;
-}
-#endif /* MACHINE_NONCONTIG && !MACHINE_PAGES */
-
-/*
- * vm_page_hash:
- *
- * Distributes the object/offset key pair among hash buckets.
- *
- * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
- */
-#define vm_page_hash(object, offset) \
- (((unsigned long)object+(unsigned long)atop(offset))&vm_page_hash_mask)
-
-/*
- * vm_page_insert: [ internal use only ]
+ * vm_page_insert: [ internal use only ]
*
- * Inserts the given mem entry into the object/object-page
- * table and object list.
+ * Inserts the given mem entry into the object/object-page
+ * table and object list.
*
- * The object and page must be locked.
+ * The object and page must be locked.
*/
void
vm_page_insert(mem, object, offset)
@@ -604,42 +1180,46 @@ vm_page_insert(mem, object, offset)
panic("vm_page_insert: already inserted");
/*
- * Record the object/offset pair in this page
+ * Record the object/offset pair in this page
*/
+
mem->object = object;
mem->offset = offset;
/*
- * Insert it into the object_object/offset hash table
+ * Insert it into the object_object/offset hash table
*/
+
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
spl = splimp();
simple_lock(&bucket_lock);
TAILQ_INSERT_TAIL(bucket, mem, hashq);
simple_unlock(&bucket_lock);
- (void)splx(spl);
+ (void) splx(spl);
/*
- * Now link into the object's list of backed pages.
+ * Now link into the object's list of backed pages.
*/
+
TAILQ_INSERT_TAIL(&object->memq, mem, listq);
mem->flags |= PG_TABLED;
/*
- * And show that the object has one more resident
- * page.
+ * And show that the object has one more resident
+ * page.
*/
+
object->resident_page_count++;
}
/*
- * vm_page_remove: [ internal use only ]
+ * vm_page_remove: [ internal use only ]
* XXX: used by device pager as well
*
- * Removes the given mem entry from the object/offset-page
- * table and the object page list.
+ * Removes the given mem entry from the object/offset-page
+ * table and the object page list.
*
- * The object and page must be locked.
+ * The object and page must be locked.
*/
void
vm_page_remove(mem)
@@ -659,8 +1239,9 @@ vm_page_remove(mem)
return;
/*
- * Remove from the object_object/offset hash table
+ * Remove from the object_object/offset hash table
*/
+
bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
spl = splimp();
simple_lock(&bucket_lock);
@@ -669,26 +1250,28 @@ vm_page_remove(mem)
(void) splx(spl);
/*
- * Now remove from the object's list of backed pages.
+ * Now remove from the object's list of backed pages.
*/
+
TAILQ_REMOVE(&mem->object->memq, mem, listq);
/*
- * And show that the object has one fewer resident
- * page.
+ * And show that the object has one fewer resident
+ * page.
*/
+
mem->object->resident_page_count--;
mem->flags &= ~PG_TABLED;
}
/*
- * vm_page_lookup:
+ * vm_page_lookup:
*
- * Returns the page associated with the object/offset
- * pair specified; if none is found, NULL is returned.
+ * Returns the page associated with the object/offset
+ * pair specified; if none is found, NULL is returned.
*
- * The object must be locked. No side effects.
+ * The object must be locked. No side effects.
*/
vm_page_t
vm_page_lookup(object, offset)
@@ -700,8 +1283,9 @@ vm_page_lookup(object, offset)
int spl;
/*
- * Search the hash table for this object/offset pair
+ * Search the hash table for this object/offset pair
*/
+
bucket = &vm_page_buckets[vm_page_hash(object, offset)];
spl = splimp();
@@ -721,12 +1305,12 @@ vm_page_lookup(object, offset)
}
/*
- * vm_page_rename:
+ * vm_page_rename:
*
- * Move the given memory entry from its
- * current object to the specified target object/offset.
+ * Move the given memory entry from its
+ * current object to the specified target object/offset.
*
- * The object must be locked.
+ * The object must be locked.
*/
void
vm_page_rename(mem, new_object, new_offset)
@@ -734,25 +1318,26 @@ vm_page_rename(mem, new_object, new_offset)
register vm_object_t new_object;
vm_offset_t new_offset;
{
+
if (mem->object == new_object)
return;
- /* Keep page from moving out from under pageout daemon */
- vm_page_lock_queues();
-
- vm_page_remove(mem);
+ vm_page_lock_queues(); /* keep page from moving out from
+ under pageout daemon */
+ vm_page_remove(mem);
vm_page_insert(mem, new_object, new_offset);
vm_page_unlock_queues();
}
/*
- * vm_page_alloc:
+ * vm_page_alloc:
*
- * Allocate and return a memory cell associated
- * with this VM object/offset pair.
+ * Allocate and return a memory cell associated
+ * with this VM object/offset pair.
*
- * Object must be locked.
+ * Object must be locked.
*/
+
vm_page_t
vm_page_alloc(object, offset)
vm_object_t object;
@@ -763,13 +1348,20 @@ vm_page_alloc(object, offset)
spl = splimp(); /* XXX */
simple_lock(&vm_page_queue_free_lock);
- if (vm_page_queue_free.tqh_first == NULL) {
- simple_unlock(&vm_page_queue_free_lock);
- splx(spl);
- return(NULL);
- }
-
mem = vm_page_queue_free.tqh_first;
+
+ if (VERY_LOW_MEM()) {
+ if ((!KERN_OBJ(object) && curproc != pageout_daemon)
+ || mem == NULL) {
+ simple_unlock(&vm_page_queue_free_lock);
+ splx(spl);
+ return(NULL);
+ }
+ }
+#ifdef DIAGNOSTIC
+ if (mem == NULL) /* because we now depend on VERY_LOW_MEM() */
+ panic("vm_page_alloc");
+#endif
TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
cnt.v_free_count--;
@@ -779,34 +1371,36 @@ vm_page_alloc(object, offset)
VM_PAGE_INIT(mem, object, offset);
/*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
+ * Decide if we should poke the pageout daemon.
+ * We do this if the free count is less than the low
+ * water mark, or if the free count is less than the high
+ * water mark (but above the low water mark) and the inactive
+ * count is less than its target.
*
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
+ * We don't have the counts locked ... if they change a little,
+ * it doesn't really matter.
*/
+
if (cnt.v_free_count < cnt.v_free_min ||
(cnt.v_free_count < cnt.v_free_target &&
- cnt.v_inactive_count < cnt.v_inactive_target))
+ cnt.v_inactive_count < cnt.v_inactive_target))
thread_wakeup(&vm_pages_needed);
return (mem);
}
/*
- * vm_page_free:
+ * vm_page_free:
*
- * Returns the given page to the free list,
- * disassociating it with any VM object.
+ * Returns the given page to the free list,
+ * disassociating it with any VM object.
*
- * Object and page must be locked prior to entry.
+ * Object and page must be locked prior to entry.
*/
void
vm_page_free(mem)
register vm_page_t mem;
{
+
vm_page_remove(mem);
if (mem->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
@@ -835,18 +1429,19 @@ vm_page_free(mem)
}
/*
- * vm_page_wire:
+ * vm_page_wire:
*
- * Mark this page as wired down by yet
- * another map, removing it from paging queues
- * as necessary.
+ * Mark this page as wired down by yet
+ * another map, removing it from paging queues
+ * as necessary.
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_wire(mem)
register vm_page_t mem;
{
+
VM_PAGE_CHECK(mem);
if (mem->wire_count == 0) {
@@ -866,17 +1461,18 @@ vm_page_wire(mem)
}
/*
- * vm_page_unwire:
+ * vm_page_unwire:
*
- * Release one wiring of this page, potentially
- * enabling it to be paged again.
+ * Release one wiring of this page, potentially
+ * enabling it to be paged again.
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_unwire(mem)
register vm_page_t mem;
{
+
VM_PAGE_CHECK(mem);
mem->wire_count--;
@@ -889,24 +1485,26 @@ vm_page_unwire(mem)
}
/*
- * vm_page_deactivate:
+ * vm_page_deactivate:
*
- * Returns the given page to the inactive list,
- * indicating that no physical maps have access
- * to this page. [Used by the physical mapping system.]
+ * Returns the given page to the inactive list,
+ * indicating that no physical maps have access
+ * to this page. [Used by the physical mapping system.]
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_deactivate(m)
register vm_page_t m;
{
+
VM_PAGE_CHECK(m);
/*
- * Only move active pages -- ignore locked or already
- * inactive ones.
+ * Only move active pages -- ignore locked or already
+ * inactive ones.
*/
+
if (m->flags & PG_ACTIVE) {
TAILQ_REMOVE(&vm_page_queue_active, m, pageq);
m->flags &= ~PG_ACTIVE;
@@ -929,16 +1527,17 @@ vm_page_deactivate(m)
}
/*
- * vm_page_activate:
+ * vm_page_activate:
*
- * Put the specified page on the active list (if appropriate).
+ * Put the specified page on the active list (if appropriate).
*
- * The page queues must be locked.
+ * The page queues must be locked.
*/
void
vm_page_activate(m)
register vm_page_t m;
{
+
VM_PAGE_CHECK(m);
if (m->flags & PG_INACTIVE) {
@@ -957,16 +1556,17 @@ vm_page_activate(m)
}
/*
- * vm_page_zero_fill:
+ * vm_page_zero_fill:
*
- * Zero-fill the specified page.
- * Written as a standard pagein routine, to
- * be used by the zero-fill object.
+ * Zero-fill the specified page.
+ * Written as a standard pagein routine, to
+ * be used by the zero-fill object.
*/
boolean_t
vm_page_zero_fill(m)
vm_page_t m;
{
+
VM_PAGE_CHECK(m);
m->flags &= ~PG_CLEAN;
@@ -975,15 +1575,16 @@ vm_page_zero_fill(m)
}
/*
- * vm_page_copy:
+ * vm_page_copy:
*
- * Copy one page to another
+ * Copy one page to another
*/
void
vm_page_copy(src_m, dest_m)
vm_page_t src_m;
vm_page_t dest_m;
{
+
VM_PAGE_CHECK(src_m);
VM_PAGE_CHECK(dest_m);
@@ -1039,14 +1640,18 @@ u_long vm_page_alloc_memory_npages;
* XXX allocates a single segment.
*/
int
-vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
- waitok)
+vm_page_alloc_memory(size, low, high, alignment, boundary,
+ rlist, nsegs, waitok)
vm_size_t size;
vm_offset_t low, high, alignment, boundary;
struct pglist *rlist;
int nsegs, waitok;
{
vm_offset_t try, idxpa, lastidxpa;
+#if defined(MACHINE_NEW_NONCONTIG)
+ int psi;
+ struct vm_page *vm_page_array;
+#endif
int s, tryidx, idx, end, error;
vm_page_t m;
u_long pagemask;
@@ -1101,6 +1706,19 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
/*
* Make sure this is a managed physical page.
*/
+#if defined(MACHINE_NEW_NONCONTIG)
+
+ if ((psi = vm_physseg_find(atop(try), &idx)) == -1)
+ continue; /* managed? */
+ if (vm_physseg_find(atop(try + size), NULL) != psi)
+ continue; /* end must be in this segment */
+
+ tryidx = idx;
+ end = idx + (size / PAGE_SIZE);
+ vm_page_array = vm_physmem[psi].pgs;
+ /* XXX: emulates old global vm_page_array */
+
+#else
if (IS_VM_PHYSADDR(try) == 0)
continue;
@@ -1112,6 +1730,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
*/
goto out;
}
+#endif
/*
* Found a suitable starting page. See of the range
@@ -1127,6 +1746,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
idxpa = VM_PAGE_TO_PHYS(&vm_page_array[idx]);
+#if !defined(MACHINE_NEW_NONCONTIG)
/*
* Make sure this is a managed physical page.
* XXX Necessary? I guess only if there
@@ -1134,6 +1754,7 @@ vm_page_alloc_memory(size, low, high, alignment, boundary, rlist, nsegs,
*/
if (IS_VM_PHYSADDR(idxpa) == 0)
break;
+#endif
if (idx > tryidx) {
lastidxpa =
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index e4780cdbb8b..ab48d7dffd6 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_page.h,v 1.4 1997/09/22 20:44:53 niklas Exp $ */
-/* $NetBSD: vm_page.h,v 1.20 1997/06/06 23:10:25 thorpej Exp $ */
+/* $OpenBSD: vm_page.h,v 1.5 1998/03/01 00:38:20 niklas Exp $ */
+/* $NetBSD: vm_page.h,v 1.24 1998/02/10 14:09:03 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -68,7 +68,6 @@
/*
* Resident memory system definitions.
*/
-
#ifndef _VM_PAGE_
#define _VM_PAGE_
@@ -94,24 +93,60 @@
*
* Fields in this structure are locked either by the lock on the
* object that the page belongs to (O) or by the lock on the page
- * queues (P).
+ * queues (P) [or both].
+ */
+
+#if defined(UVM)
+/*
+ * locking note: the mach version of this data structure had bit
+ * fields for the flags, and the bit fields were divided into two
+ * items (depending on who locked what). some time, in BSD, the bit
+ * fields were dumped and all the flags were lumped into one short.
+ * that is fine for a single threaded uniprocessor OS, but bad if you
+ * want to actual make use of locking (simple_lock's). so, we've
+ * seperated things back out again.
+ *
+ * note the page structure has no lock of its own.
*/
+#include <uvm/uvm_extern.h>
+#include <vm/pglist.h>
+#else
TAILQ_HEAD(pglist, vm_page);
+#endif /* UVM */
struct vm_page {
- TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
- * queue or free list (P) */
- TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
- TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
+ TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO
+ * queue or free list (P) */
+ TAILQ_ENTRY(vm_page) hashq; /* hash table links (O)*/
+ TAILQ_ENTRY(vm_page) listq; /* pages in same object (O)*/
- vm_object_t object; /* which object am I in (O,P)*/
- vm_offset_t offset; /* offset into object (O,P) */
-
- u_short wire_count; /* wired down maps refs (P) */
- u_short flags; /* see below */
+#if !defined(UVM) /* uvm uses obju */
+ vm_object_t object; /* which object am I in (O,P)*/
+#endif
+ vm_offset_t offset; /* offset into object (O,P) */
+
+#if defined(UVM)
+ struct uvm_object *uobject; /* object (O,P) */
+ struct vm_anon *uanon; /* anon (O,P) */
+ u_short flags; /* object flags [O] */
+ u_short version; /* version count [O] */
+ u_short wire_count; /* wired down map refs [P] */
+ u_short pqflags; /* page queue flags [P] */
+ u_int loan_count; /* number of active loans
+ * to read: [O or P]
+ * to modify: [O _and_ P] */
+#else
+ u_short wire_count; /* wired down maps refs (P) */
+ u_short flags; /* see below */
+#endif
- vm_offset_t phys_addr; /* physical address of page */
+ vm_offset_t phys_addr; /* physical address of page */
+#if defined(UVM) && defined(UVM_PAGE_TRKOWN)
+ /* debugging fields to track page ownership */
+ pid_t owner; /* proc that set PG_BUSY */
+ char *owner_tag; /* why it was set busy */
+#endif
};
/*
@@ -119,6 +154,38 @@ struct vm_page {
*
* Note: PG_FILLED and PG_DIRTY are added for the filesystems.
*/
+#if defined(UVM)
+
+/*
+ * locking rules:
+ * PG_ ==> locked by object lock
+ * PQ_ ==> lock by page queue lock
+ * PQ_FREE is locked by free queue lock and is mutex with all other PQs
+ *
+ * possible deadwood: PG_FAULTING, PQ_LAUNDRY
+ */
+#define PG_CLEAN 0x0008 /* page has not been modified */
+#define PG_BUSY 0x0010 /* page is in transit */
+#define PG_WANTED 0x0020 /* someone is waiting for page */
+#define PG_TABLED 0x0040 /* page is in VP table */
+#define PG_FAKE 0x0200 /* page is placeholder for pagein */
+#define PG_FILLED 0x0400 /* client flag to set when filled */
+#define PG_DIRTY 0x0800 /* client flag to set when dirty */
+#define PG_RELEASED 0x1000 /* page released while paging */
+#define PG_FAULTING 0x2000 /* page is being faulted in */
+#define PG_CLEANCHK 0x4000 /* clean bit has been checked */
+
+#define PQ_FREE 0x0001 /* page is on free list */
+#define PQ_INACTIVE 0x0002 /* page is in inactive list */
+#define PQ_ACTIVE 0x0004 /* page is in active list */
+#define PQ_LAUNDRY 0x0008 /* page is being cleaned now */
+#define PQ_ANON 0x0010 /* page is part of an anon, rather
+ than an uvm_object */
+#define PQ_AOBJ 0x0020 /* page is part of an anonymous
+ uvm_object */
+#define PQ_SWAPBACKED (PQ_ANON|PQ_AOBJ)
+
+#else
#define PG_INACTIVE 0x0001 /* page is in inactive list (P) */
#define PG_ACTIVE 0x0002 /* page is in active list (P) */
#define PG_LAUNDRY 0x0004 /* page is being cleaned now (P) */
@@ -144,32 +211,44 @@ struct vm_page {
#define PG_FAULTING 0x2000 /* page is being faulted in */
#define PG_PAGEROWNED 0x4000 /* DEBUG: async paging op in progress */
#define PG_PTPAGE 0x8000 /* DEBUG: is a user page table page */
+#endif
-#if VM_PAGE_DEBUG
-#ifndef MACHINE_NONCONTIG
-#define VM_PAGE_CHECK(mem) { \
- if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
- (((unsigned int) mem) > \
- ((unsigned int) &vm_page_array[last_page-first_page])) || \
- ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
- (PG_ACTIVE | PG_INACTIVE))) \
- panic("vm_page_check: not valid!"); \
-}
-#else /* MACHINE_NONCONTIG */
-#define VM_PAGE_CHECK(mem) { \
- if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
- (((unsigned int) mem) > \
- ((unsigned int) &vm_page_array[vm_page_count])) || \
- ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
- (PG_ACTIVE | PG_INACTIVE))) \
- panic("vm_page_check: not valid!"); \
-}
-#endif /* MACHINE_NONCONTIG */
-#else /* VM_PAGE_DEBUG */
-#define VM_PAGE_CHECK(mem)
-#endif /* VM_PAGE_DEBUG */
+#if defined(MACHINE_NEW_NONCONTIG)
+/*
+ * physical memory layout structure
+ *
+ * MD vmparam.h must #define:
+ * VM_PHYSEG_MAX = max number of physical memory segments we support
+ * (if this is "1" then we revert to a "contig" case)
+ * VM_PHYSSEG_STRAT: memory sort/search options (for VM_PHYSEG_MAX > 1)
+ * - VM_PSTRAT_RANDOM: linear search (random order)
+ * - VM_PSTRAT_BSEARCH: binary search (sorted by address)
+ * - VM_PSTRAT_BIGFIRST: linear search (sorted by largest segment first)
+ * - others?
+ * XXXCDC: eventually we should remove contig and old non-contig cases
+ * and purge all left-over global variables...
+ */
+#define VM_PSTRAT_RANDOM 1
+#define VM_PSTRAT_BSEARCH 2
+#define VM_PSTRAT_BIGFIRST 3
+
+/*
+ * vm_physmemseg: describes one segment of physical memory
+ */
+struct vm_physseg {
+ vm_offset_t start; /* PF# of first page in segment */
+ vm_offset_t end; /* (PF# of last page in segment) + 1 */
+ vm_offset_t avail_start; /* PF# of first free page in segment */
+ vm_offset_t avail_end; /* (PF# of last free page in segment) +1 */
+ struct vm_page *pgs; /* vm_page structures (from start) */
+ struct vm_page *lastpg; /* vm_page structure for end */
+ struct pmap_physseg pmseg; /* pmap specific (MD) data */
+};
+
+#endif /* MACHINE_NEW_NONCONTIG */
+
+#if defined(_KERNEL)
-#ifdef _KERNEL
/*
* Each pageable resident page falls into one of three lists:
*
@@ -193,51 +272,226 @@ struct pglist vm_page_queue_active; /* active memory queue */
extern
struct pglist vm_page_queue_inactive; /* inactive memory queue */
+
+#if defined(MACHINE_NEW_NONCONTIG)
+
+/*
+ * physical memory config is stored in vm_physmem.
+ */
+
+extern struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];
+extern int vm_nphysseg;
+
+#else
+#if defined(MACHINE_NONCONTIG)
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+extern
+u_long first_page; /* first physical page number */
+extern
+int vm_page_count; /* How many pages do we manage? */
extern
vm_page_t vm_page_array; /* First resident page in table */
-#ifndef MACHINE_NONCONTIG
+#define VM_PAGE_INDEX(pa) \
+ (pmap_page_index((pa)) - first_page)
+#else
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
extern
-long first_page; /* first physical page number */
+long first_page; /* first physical page number */
/* ... represented in vm_page_array */
extern
-long last_page; /* last physical page number */
+long last_page; /* last physical page number */
/* ... represented in vm_page_array */
/* [INCLUSIVE] */
extern
-vm_offset_t first_phys_addr; /* physical address for first_page */
+vm_offset_t first_phys_addr; /* physical address for first_page */
extern
-vm_offset_t last_phys_addr; /* physical address for last_page */
-#else /* MACHINE_NONCONTIG */
-extern
-u_long first_page; /* first physical page number */
+vm_offset_t last_phys_addr; /* physical address for last_page */
extern
-int vm_page_count; /* How many pages do we manage? */
+vm_page_t vm_page_array; /* First resident page in table */
+
+#define VM_PAGE_INDEX(pa) \
+ (atop((pa)) - first_page)
+
#endif /* MACHINE_NONCONTIG */
+#endif /* MACHINE_NEW_NONCONTIG */
+
+/*
+ * prototypes
+ */
+
+#if defined(MACHINE_NEW_NONCONTIG)
+static struct vm_page *PHYS_TO_VM_PAGE __P((vm_offset_t));
+static int vm_physseg_find __P((vm_offset_t, int *));
+#endif
+void vm_page_activate __P((vm_page_t));
+vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
+int vm_page_alloc_memory __P((vm_size_t size, vm_offset_t low,
+ vm_offset_t high, vm_offset_t alignment, vm_offset_t boundary,
+ struct pglist *rlist, int nsegs, int waitok));
+void vm_page_free_memory __P((struct pglist *list));
+#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG)
+void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
+#endif
+void vm_page_copy __P((vm_page_t, vm_page_t));
+void vm_page_deactivate __P((vm_page_t));
+void vm_page_free __P((vm_page_t));
+void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
+vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
+#if defined(MACHINE_NEW_NONCONTIG)
+void vm_page_physload __P((vm_offset_t, vm_offset_t,
+ vm_offset_t, vm_offset_t));
+void vm_page_physrehash __P((void));
+#endif
+void vm_page_remove __P((vm_page_t));
+void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
+void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
+#endif
+void vm_page_unwire __P((vm_page_t));
+void vm_page_wire __P((vm_page_t));
+boolean_t vm_page_zero_fill __P((vm_page_t));
+
+/*
+ * macros and inlines
+ */
#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
-#ifndef MACHINE_NONCONTIG
+#if defined(MACHINE_NEW_NONCONTIG)
+
+/*
+ * when VM_PHYSSEG_MAX is 1, we can simplify these functions
+ */
+
+/*
+ * vm_physseg_find: find vm_physseg structure that belongs to a PA
+ */
+static __inline int
+vm_physseg_find(pframe, offp)
+ vm_offset_t pframe;
+ int *offp;
+{
+#if VM_PHYSSEG_MAX == 1
+
+ /* 'contig' case */
+ if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
+ if (offp)
+ *offp = pframe - vm_physmem[0].start;
+ return(0);
+ }
+ return(-1);
+
+#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
+ /* binary search for it */
+ int start, len, try;
+
+ /*
+ * if try is too large (thus target is less than than try) we reduce
+ * the length to trunc(len/2) [i.e. everything smaller than "try"]
+ *
+ * if the try is too small (thus target is greater than try) then
+ * we set the new start to be (try + 1). this means we need to
+ * reduce the length to (round(len/2) - 1).
+ *
+ * note "adjust" below which takes advantage of the fact that
+ * (round(len/2) - 1) == trunc((len - 1) / 2)
+ * for any value of len we may have
+ */
+
+ for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
+ try = start + (len / 2); /* try in the middle */
+
+ /* start past our try? */
+ if (pframe >= vm_physmem[try].start) {
+ /* was try correct? */
+ if (pframe < vm_physmem[try].end) {
+ if (offp)
+ *offp = pframe - vm_physmem[try].start;
+ return(try); /* got it */
+ }
+ start = try + 1; /* next time, start here */
+ len--; /* "adjust" */
+ } else {
+ /*
+ * pframe before try, just reduce length of
+ * region, done in "for" loop
+ */
+ }
+ }
+ return(-1);
+
+#else
+ /* linear search for it */
+ int lcv;
+
+ for (lcv = 0; lcv < vm_nphysseg; lcv++) {
+ if (pframe >= vm_physmem[lcv].start &&
+ pframe < vm_physmem[lcv].end) {
+ if (offp)
+ *offp = pframe - vm_physmem[lcv].start;
+ return(lcv); /* got it */
+ }
+ }
+ return(-1);
+
+#endif
+}
+
+
+/*
+ * IS_VM_PHYSADDR: only used my mips/pmax/pica trap/pmap.
+ */
+
+#define IS_VM_PHYSADDR(PA) (vm_physseg_find(atop(PA), NULL) != -1)
+
+/*
+ * PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
+ * back from an I/O mapping (ugh!). used in some MD code as well.
+ */
+static __inline struct vm_page *
+PHYS_TO_VM_PAGE(pa)
+ vm_offset_t pa;
+{
+ vm_offset_t pf = atop(pa);
+ int off;
+ int psi;
+
+ psi = vm_physseg_find(pf, &off);
+ if (psi != -1)
+ return(&vm_physmem[psi].pgs[off]);
+ return(NULL);
+}
+
+#elif defined(MACHINE_NONCONTIG)
+
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
#define IS_VM_PHYSADDR(pa) \
- ((pa) >= first_phys_addr && (pa) <= last_phys_addr)
+ (pmap_page_index(pa) >= 0)
+
+#define PHYS_TO_VM_PAGE(pa) \
+ (&vm_page_array[pmap_page_index(pa) - first_page])
-#define VM_PAGE_INDEX(pa) \
- (atop((pa)) - first_page)
#else
-#define IS_VM_PHYSADDR(pa) \
-({ \
- int __pmapidx = pmap_page_index(pa); \
- (__pmapidx >= 0 && __pmapidx >= first_page); \
-})
-#define VM_PAGE_INDEX(pa) \
- (pmap_page_index((pa)) - first_page)
-#endif /* MACHINE_NONCONTIG */
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+#define IS_VM_PHYSADDR(pa) \
+ ((pa) >= first_phys_addr && (pa) <= last_phys_addr)
+
+#define PHYS_TO_VM_PAGE(pa) \
+ (&vm_page_array[atop(pa) - first_page ])
+
+#endif /* (OLD) MACHINE_NONCONTIG */
+
+#if defined(UVM)
+
+#define VM_PAGE_IS_FREE(entry) ((entry)->pqflags & PQ_FREE)
+
+#else /* UVM */
-#define PHYS_TO_VM_PAGE(pa) \
- (&vm_page_array[VM_PAGE_INDEX((pa))])
+#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
-#define VM_PAGE_IS_FREE(entry) ((entry)->flags & PG_FREE)
+#endif /* UVM */
extern
simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
@@ -245,10 +499,6 @@ simple_lock_data_t vm_page_queue_lock; /* lock on active and inactive
extern /* lock on free page queue */
simple_lock_data_t vm_page_queue_free_lock;
-/*
- * Functions implemented as macros
- */
-
#define PAGE_ASSERT_WAIT(m, interruptible) { \
(m)->flags |= PG_WANTED; \
assert_wait((m), (interruptible)); \
@@ -267,7 +517,10 @@ simple_lock_data_t vm_page_queue_free_lock;
#define vm_page_set_modified(m) { (m)->flags &= ~PG_CLEAN; }
-#ifndef MACHINE_NONCONTIG
+/*
+ * XXXCDC: different versions of this should die
+ */
+#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG)
#define VM_PAGE_INIT(mem, obj, offset) { \
(mem)->flags = PG_BUSY | PG_CLEAN | PG_FAKE; \
vm_page_insert((mem), (obj), (offset)); \
@@ -284,37 +537,58 @@ simple_lock_data_t vm_page_queue_free_lock;
}
#endif /* MACHINE_NONCONTIG */
-/* XXX what is this here for? */
-void vm_set_page_size __P((void));
+#if VM_PAGE_DEBUG
+#if defined(MACHINE_NEW_NONCONTIG)
-/* XXX probably should be elsewhere. */
-#ifdef MACHINE_NONCONTIG
-vm_offset_t pmap_steal_memory __P((vm_size_t));
-void pmap_startup __P((vm_offset_t *, vm_offset_t *));
-#endif
+/*
+ * VM_PAGE_CHECK: debugging check of a vm_page structure
+ */
+static __inline void
+VM_PAGE_CHECK(mem)
+ struct vm_page *mem;
+{
+ int lcv;
+
+ for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
+ if ((unsigned int) mem >= (unsigned int) vm_physmem[lcv].pgs &&
+ (unsigned int) mem <= (unsigned int) vm_physmem[lcv].lastpg)
+ break;
+ }
+ if (lcv == vm_nphysseg ||
+ (mem->flags & (PG_ACTIVE|PG_INACTIVE)) == (PG_ACTIVE|PG_INACTIVE))
+ panic("vm_page_check: not valid!");
+ return;
+}
+
+#elif defined(MACHINE_NONCONTIG)
+
+/* OLD NONCONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+#define VM_PAGE_CHECK(mem) { \
+ if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
+ (((unsigned int) mem) > \
+ ((unsigned int) &vm_page_array[vm_page_count])) || \
+ ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
+ (PG_ACTIVE | PG_INACTIVE))) \
+ panic("vm_page_check: not valid!"); \
+}
+
+#else
+
+/* OLD CONTIG CODE: NUKE NUKE NUKE ONCE CONVERTED */
+#define VM_PAGE_CHECK(mem) { \
+ if ((((unsigned int) mem) < ((unsigned int) &vm_page_array[0])) || \
+ (((unsigned int) mem) > \
+ ((unsigned int) &vm_page_array[last_page-first_page])) || \
+ ((mem->flags & (PG_ACTIVE | PG_INACTIVE)) == \
+ (PG_ACTIVE | PG_INACTIVE))) \
+ panic("vm_page_check: not valid!"); \
+}
-void vm_page_activate __P((vm_page_t));
-vm_page_t vm_page_alloc __P((vm_object_t, vm_offset_t));
-int vm_page_alloc_memory __P((vm_size_t, vm_offset_t,
- vm_offset_t, vm_offset_t, vm_offset_t,
- struct pglist *, int, int));
-void vm_page_free_memory __P((struct pglist *));
-#ifdef MACHINE_NONCONTIG
-void vm_page_bootstrap __P((vm_offset_t *, vm_offset_t *));
-#endif
-void vm_page_copy __P((vm_page_t, vm_page_t));
-void vm_page_deactivate __P((vm_page_t));
-void vm_page_free __P((vm_page_t));
-void vm_page_insert __P((vm_page_t, vm_object_t, vm_offset_t));
-vm_page_t vm_page_lookup __P((vm_object_t, vm_offset_t));
-void vm_page_remove __P((vm_page_t));
-void vm_page_rename __P((vm_page_t, vm_object_t, vm_offset_t));
-#ifndef MACHINE_NONCONTIG
-void vm_page_startup __P((vm_offset_t *, vm_offset_t *));
#endif
-void vm_page_unwire __P((vm_page_t));
-void vm_page_wire __P((vm_page_t));
-boolean_t vm_page_zero_fill __P((vm_page_t));
+
+#else /* VM_PAGE_DEBUG */
+#define VM_PAGE_CHECK(mem)
+#endif /* VM_PAGE_DEBUG */
#endif /* _KERNEL */
#endif /* !_VM_PAGE_ */
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b1b41394611..e6a9fef00bb 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pageout.c,v 1.8 1997/11/06 05:59:36 csapuntz Exp $ */
+/* $OpenBSD: vm_pageout.c,v 1.9 1998/03/01 00:38:21 niklas Exp $ */
/* $NetBSD: vm_pageout.c,v 1.23 1996/02/05 01:54:07 christos Exp $ */
/*
@@ -99,6 +99,33 @@ int doclustered_pageout = 1;
#endif
/*
+ * Activate the pageout daemon and sleep awaiting more free memory
+ */
+void vm_wait(msg)
+ char *msg;
+{
+ int timo = 0;
+
+ if(curproc == pageout_daemon) {
+ /*
+ * We might be toast here, but IF some paging operations
+ * are pending then pages will magically appear. We
+ * usually can't return an error because callers of
+ * malloc who can wait generally don't check for
+ * failure.
+ *
+ * Only the pageout_daemon wakes up this channel!
+ */
+ printf("pageout daemon has stalled\n");
+ timo = hz >> 3;
+ }
+ simple_lock(&vm_pages_needed_lock);
+ thread_wakeup(&vm_pages_needed);
+ thread_sleep_msg(&cnt.v_free_count, &vm_pages_needed_lock, FALSE, msg,
+ timo);
+}
+
+/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
void
@@ -202,7 +229,6 @@ vm_pageout_scan()
object = m->object;
if (!vm_object_lock_try(object))
continue;
- cnt.v_pageouts++;
#ifdef CLUSTERED_PAGEOUT
if (object->pager &&
vm_pager_cancluster(object->pager, PG_CLUSTERPUT))
@@ -294,9 +320,10 @@ vm_pageout_page(m, object)
vm_object_unlock(object);
/*
- * Do a wakeup here in case the following operations block.
+ * We _used_ to wakeup page consumers here, "in case the following
+ * operations block". That leads to livelock if the pageout fails,
+ * which is actually quite a common thing for NFS paging.
*/
- thread_wakeup(&cnt.v_free_count);
/*
* If there is no pager for the page, use the default pager.
@@ -317,6 +344,9 @@ vm_pageout_page(m, object)
switch (pageout_status) {
case VM_PAGER_OK:
case VM_PAGER_PEND:
+ /* hmm, don't wakeup if memory is _very_ low? */
+ thread_wakeup(&cnt.v_free_count);
+ cnt.v_pageouts++;
cnt.v_pgpgout++;
m->flags &= ~PG_LAUNDRY;
break;
@@ -340,7 +370,7 @@ vm_pageout_page(m, object)
* XXX could get stuck here.
*/
(void)tsleep((caddr_t)&vm_pages_needed, PZERO|PCATCH,
- "pageout", hz);
+ "pageout", hz>>3);
break;
}
case VM_PAGER_FAIL:
@@ -391,6 +421,7 @@ vm_pageout_cluster(m, object)
vm_page_t plist[MAXPOCLUSTER], *plistp, p;
int postatus, ix, count;
+ cnt.v_pageouts++;
/*
* Determine the range of pages that can be part of a cluster
* for this object/offset. If it is only our single page, just
@@ -524,7 +555,8 @@ again:
void
vm_pageout()
{
- (void)spl0();
+ pageout_daemon = curproc;
+ (void) spl0();
/*
* Initialize some paging parameters.
@@ -557,7 +589,8 @@ vm_pageout()
simple_lock(&vm_pages_needed_lock);
while (TRUE) {
- thread_sleep(&vm_pages_needed, &vm_pages_needed_lock, FALSE);
+ thread_sleep_msg(&vm_pages_needed, &vm_pages_needed_lock,
+ FALSE, "paged", 0);
/*
* Compute the inactive target for this scan.
* We need to keep a reasonable amount of memory in the
diff --git a/sys/vm/vm_pageout.h b/sys/vm/vm_pageout.h
index 566be52e9eb..7451f5f75b0 100644
--- a/sys/vm/vm_pageout.h
+++ b/sys/vm/vm_pageout.h
@@ -1,5 +1,5 @@
-/* $OpenBSD: vm_pageout.h,v 1.5 1997/11/06 05:59:37 csapuntz Exp $ */
-/* $NetBSD: vm_pageout.h,v 1.11 1995/03/26 20:39:14 jtc Exp $ */
+/* $OpenBSD: vm_pageout.h,v 1.6 1998/03/01 00:38:22 niklas Exp $ */
+/* $NetBSD: vm_pageout.h,v 1.14 1998/02/10 14:09:04 mrg Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -76,6 +76,9 @@
extern int vm_pages_needed; /* should be some "event" structure */
simple_lock_data_t vm_pages_needed_lock;
+struct proc *pageout_daemon; /* watch for this in vm_fault()!! */
+u_int32_t vm_pages_reserved; /* i.e., reserved for pageout_daemon */
+
/*
* Exported routines.
@@ -85,15 +88,12 @@ simple_lock_data_t vm_pages_needed_lock;
* Signal pageout-daemon and wait for it.
*/
-#define VM_WAIT { \
- simple_lock(&vm_pages_needed_lock); \
- thread_wakeup(&vm_pages_needed); \
- thread_sleep(&cnt.v_free_count, \
- &vm_pages_needed_lock, FALSE); \
- }
+#if !defined(UVM)
#ifdef _KERNEL
+void vm_wait __P((char *));
void vm_pageout __P((void));
void vm_pageout_scan __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
#endif
+#endif
diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h
index 6b3ab9059c8..a612d9f2cee 100644
--- a/sys/vm/vm_pager.h
+++ b/sys/vm/vm_pager.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_pager.h,v 1.5 1997/11/06 05:59:38 csapuntz Exp $ */
+/* $OpenBSD: vm_pager.h,v 1.6 1998/03/01 00:38:24 niklas Exp $ */
/* $NetBSD: vm_pager.h,v 1.10 1995/03/26 20:39:15 jtc Exp $ */
/*
@@ -105,19 +105,24 @@ struct pagerops {
/*
* get/put return values
- * OK operation was successful
- * BAD specified data was out of the accepted range
- * FAIL specified data was in range, but doesn't exist
- * PEND operations was initiated but not completed
- * ERROR error while accessing data that is in range and exists
- * AGAIN temporary resource shortage prevented operation from happening
+ * OK operation was successful
+ * BAD specified data was out of the accepted range
+ * FAIL specified data was in range, but doesn't exist
+ * PEND operations was initiated but not completed
+ * ERROR error while accessing data that is in range and exists
+ * AGAIN temporary resource shortage prevented operation from happening
+ * UNLOCK unlock the map and try again
+ * REFAULT [uvm_fault internal use only!] unable to relock data structures,
+ * thus the mapping needs to be reverified before we can procede
*/
-#define VM_PAGER_OK 0
-#define VM_PAGER_BAD 1
-#define VM_PAGER_FAIL 2
-#define VM_PAGER_PEND 3
-#define VM_PAGER_ERROR 4
-#define VM_PAGER_AGAIN 5
+#define VM_PAGER_OK 0
+#define VM_PAGER_BAD 1
+#define VM_PAGER_FAIL 2
+#define VM_PAGER_PEND 3
+#define VM_PAGER_ERROR 4
+#define VM_PAGER_AGAIN 5
+#define VM_PAGER_UNLOCK 6
+#define VM_PAGER_REFAULT 7
#ifdef _KERNEL
extern struct pagerops *dfltpagerops;
diff --git a/sys/vm/vm_param.h b/sys/vm/vm_param.h
index 74124a226cf..97eeaaf24b1 100644
--- a/sys/vm/vm_param.h
+++ b/sys/vm/vm_param.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_param.h,v 1.13 1997/12/12 08:46:00 deraadt Exp $ */
+/* $OpenBSD: vm_param.h,v 1.14 1998/03/01 00:38:25 niklas Exp $ */
/* $NetBSD: vm_param.h,v 1.12 1995/03/26 20:39:16 jtc Exp $ */
/*
@@ -96,10 +96,17 @@ typedef int boolean_t;
* or PAGE_SHIFT. The fact they are variables is hidden here so that
* we can easily make them constant if we so desire.
*/
+#if defined(UVM)
+#define PAGE_SIZE uvmexp.pagesize /* size of page */
+#define PAGE_MASK uvmexp.pagemask /* size of page - 1 */
+#define PAGE_SHIFT uvmexp.pageshift /* bits to shift for pages */
+#else
#define PAGE_SIZE cnt.v_page_size /* size of page */
#define PAGE_MASK page_mask /* size of page - 1 */
#define PAGE_SHIFT page_shift /* bits to shift for pages */
-#ifdef _KERNEL
+#endif
+
+#if defined(_KERNEL) && !defined(UVM)
extern vm_size_t page_mask;
extern int page_shift;
#endif
@@ -110,6 +117,7 @@ extern int page_shift;
#define VM_METER 1 /* struct vmmeter */
#define VM_LOADAVG 2 /* struct loadavg */
#define VM_PSSTRINGS 3 /* PSSTRINGS */
+#if !defined(UVM)
#define VM_MAXID 4 /* number of valid vm ids */
#define CTL_VM_NAMES { \
@@ -119,6 +127,22 @@ extern int page_shift;
{ "psstrings", CTLTYPE_STRUCT }, \
}
+#else
+
+#define VM_UVMEXP 4 /* struct uvmexp */
+#define VM_MAXID 5 /* number of valid vm ids */
+
+#define CTL_VM_NAMES { \
+ { 0, 0 }, \
+ { "vmmeter", CTLTYPE_STRUCT }, \
+ { "loadavg", CTLTYPE_STRUCT }, \
+ { "psstrings", CTLTYPE_STRUCT }, \
+ { "uvmexp", CTLTYPE_STRUCT }, \
+}
+
+#endif
+
+
struct _ps_strings {
void *val;
};