summaryrefslogtreecommitdiff
path: root/sys/vm
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>1995-12-14 05:16:11 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>1995-12-14 05:16:11 +0000
commit94e7bd1c1ec76c7e08243e8e674c3ce5395b1de8 (patch)
tree3662a7841f4506f9682e1bd03f118823bcdc9e05 /sys/vm
parent8bd27a21125fec410de2fd9d839ea136711aaa0f (diff)
from netbsd:
Extend use of vm_object_prefer() to vm_allocate_with_pager(). Make vm_object_prefer() call MD aligner for "pageless" objects too, so we can have more control over the virtual address to be used. Implementation could be simpler if we by-pass the object to mapped, but we'd loose the ability to adapt alignment to objects that were previously mmap'ed with MAP_FIXED on. Only expect vm_fork() to return if __FORK_BRAINDAMAGE is defined. Eliminate unused third arg to vm_fork().
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/vm_extern.h8
-rw-r--r--sys/vm/vm_glue.c32
-rw-r--r--sys/vm/vm_mmap.c45
-rw-r--r--sys/vm/vm_object.c21
-rw-r--r--sys/vm/vm_user.c23
5 files changed, 87 insertions, 42 deletions
diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h
index 91e0417c78f..cca6f755a64 100644
--- a/sys/vm/vm_extern.h
+++ b/sys/vm/vm_extern.h
@@ -1,4 +1,4 @@
-/* $NetBSD: vm_extern.h,v 1.14 1995/09/27 20:30:17 thorpej Exp $ */
+/* $NetBSD: vm_extern.h,v 1.15 1995/12/09 04:28:16 mycroft Exp $ */
/*-
* Copyright (c) 1992, 1993
@@ -103,7 +103,11 @@ void vm_fault_copy_entry __P((vm_map_t,
vm_map_t, vm_map_entry_t, vm_map_entry_t));
void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t));
int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t));
-int vm_fork __P((struct proc *, struct proc *, int));
+#ifdef __FORK_BRAINDAMAGE
+int vm_fork __P((struct proc *, struct proc *));
+#else
+void vm_fork __P((struct proc *, struct proc *));
+#endif
int vm_inherit __P((vm_map_t,
vm_offset_t, vm_size_t, vm_inherit_t));
void vm_init_limits __P((struct proc *));
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index e8a668ec61f..08927c41330 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -1,4 +1,4 @@
-/* $NetBSD: vm_glue.c,v 1.46 1995/05/05 03:35:39 cgd Exp $ */
+/* $NetBSD: vm_glue.c,v 1.48 1995/12/09 04:28:19 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -202,10 +202,13 @@ vsunlock(addr, len, dirtied)
* after cpu_fork returns in the child process. We do nothing here
* after cpu_fork returns.
*/
+#ifdef __FORK_BRAINDAMAGE
int
-vm_fork(p1, p2, isvfork)
+#else
+void
+#endif
+vm_fork(p1, p2)
register struct proc *p1, *p2;
- int isvfork;
{
register struct user *up;
vm_offset_t addr;
@@ -222,18 +225,14 @@ vm_fork(p1, p2, isvfork)
#ifdef SYSVSHM
if (p1->p_vmspace->vm_shm)
- shmfork(p1, p2, isvfork);
+ shmfork(p1, p2);
#endif
#if !defined(i386) && !defined(pc532)
/*
* Allocate a wired-down (for now) pcb and kernel stack for the process
*/
-#ifdef pica
- addr = kmem_alloc_upage(kernel_map, USPACE);
-#else
addr = kmem_alloc_pageable(kernel_map, USPACE);
-#endif
if (addr == 0)
panic("vm_fork: no more kernel virtual memory");
vm_map_pageable(kernel_map, addr, addr + USPACE, FALSE);
@@ -276,6 +275,8 @@ vm_fork(p1, p2, isvfork)
(void)vm_map_inherit(vp, addr, VM_MAX_ADDRESS, VM_INHERIT_NONE);
}
#endif
+
+#ifdef __FORK_BRAINDAMAGE
/*
* cpu_fork will copy and update the kernel stack and pcb,
* and make the child ready to run. It marks the child
@@ -284,6 +285,15 @@ vm_fork(p1, p2, isvfork)
* once in the child.
*/
return (cpu_fork(p1, p2));
+#else
+ /*
+ * cpu_fork will copy and update the kernel stack and pcb,
+ * and make the child ready to run. The child will exit
+ * directly to user mode on its first time slice, and will
+ * not return here.
+ */
+ cpu_fork(p1, p2);
+#endif
}
/*
@@ -339,7 +349,7 @@ swapin(p)
cpu_swapin(p);
s = splstatclock();
if (p->p_stat == SRUN)
- setrunqueue(p);
+ setrunqueue(p);
p->p_flag |= P_INMEM;
splx(s);
p->p_swtime = 0;
@@ -400,10 +410,6 @@ loop:
p->p_pid, p->p_comm, p->p_addr,
ppri, cnt.v_free_count);
#endif
-#ifdef pica
- vm_map_pageable(kernel_map, (vm_offset_t)p->p_addr,
- (vm_offset_t)p->p_addr + atop(USPACE), FALSE);
-#endif
swapin(p);
goto loop;
}
diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c
index ea205439963..016dbc9565e 100644
--- a/sys/vm/vm_mmap.c
+++ b/sys/vm/vm_mmap.c
@@ -1,4 +1,4 @@
-/* $NetBSD: vm_mmap.c,v 1.42 1995/10/10 01:27:11 mycroft Exp $ */
+/* $NetBSD: vm_mmap.c,v 1.43 1995/12/05 22:54:42 pk Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -827,15 +827,13 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
vm_offset_t off;
/* locate and allocate the target address space */
+ vm_map_lock(map);
if (fitit) {
/*
- * We cannot call vm_map_find() because
- * a proposed address may be vetoed by
- * the pmap module.
- * So we look for space ourselves, validate
- * it and insert it into the map.
+ * Find space in the map at a location
+ * that is compatible with the object/offset
+ * we're going to attach there.
*/
- vm_map_lock(map);
again:
if (vm_map_findspace(map, *addr, size,
addr) == 1) {
@@ -843,33 +841,40 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
} else {
vm_object_prefer(object, foff, addr);
rv = vm_map_insert(map, NULL,
- (vm_offset_t)0,
- *addr, *addr+size);
+ (vm_offset_t)0,
+ *addr, *addr+size);
+ /*
+ * vm_map_insert() may fail if
+ * vm_object_prefer() has altered
+ * the initial address.
+ * If so, we start again.
+ */
if (rv == KERN_NO_SPACE)
- /*
- * Modified address didn't fit
- * after all, the gap must
- * have been to small.
- */
goto again;
}
- vm_map_unlock(map);
} else {
- rv = vm_map_find(map, NULL, (vm_offset_t)0,
- addr, size, 0);
+ rv = vm_map_insert(map, NULL, (vm_offset_t)0,
+ *addr, *addr + size);
+#ifdef DEBUG
/*
* Check against PMAP preferred address. If
* there's a mismatch, these pages should not
* be shared with others. <howto?>
*/
- if (rv == KERN_SUCCESS) {
+ if (rv == KERN_SUCCESS &&
+ (mmapdebug & MDB_MAPIT)) {
vm_offset_t paddr = *addr;
vm_object_prefer(object, foff, &paddr);
if (paddr != *addr)
- printf("vm_mmap: pmap botch!\n");
+ printf(
+ "vm_mmap: pmap botch! "
+ "[foff %x, addr %x, paddr %x]\n",
+ foff, *addr, paddr);
}
+#endif
}
+ vm_map_unlock(map);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
@@ -879,7 +884,7 @@ vm_mmap(map, addr, size, prot, maxprot, flags, handle, foff)
VM_MIN_ADDRESS+size, TRUE);
off = VM_MIN_ADDRESS;
rv = vm_allocate_with_pager(tmap, &off, size,
- TRUE, pager,
+ FALSE, pager,
foff, FALSE);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index 2dc50bb47c8..81bb806cfa8 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -1,4 +1,4 @@
-/* $NetBSD: vm_object.c,v 1.29 1995/07/13 12:35:29 pk Exp $ */
+/* $NetBSD: vm_object.c,v 1.31 1995/12/06 00:38:11 pk Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -1418,17 +1418,17 @@ vm_object_prefer(object, offset, addr)
register vm_page_t p;
register vm_offset_t paddr;
+#ifdef PMAP_PREFER
if (object == NULL)
- return;
+ goto first_map;
-#ifdef PMAP_PREFER
- vm_object_lock(object);
/*
* Look for the first page that the pmap layer has something
* to say about. Since an object maps a contiguous range of
* virutal addresses, this will determine the preferred origin
* of the proposed mapping.
*/
+ vm_object_lock(object);
for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) {
if (p->flags & (PG_FAKE | PG_FICTITIOUS))
continue;
@@ -1436,9 +1436,20 @@ vm_object_prefer(object, offset, addr)
if (paddr == (vm_offset_t)-1)
continue;
*addr = paddr - (p->offset - offset);
- break;
+ vm_object_unlock(object);
+ return;
}
vm_object_unlock(object);
+
+first_map:
+ /*
+ * No physical page attached; ask for a preferred address based
+ * only on the given virtual address.
+ */
+ paddr = PMAP_PREFER((vm_offset_t)-1, *addr);
+ if (paddr != (vm_offset_t)-1)
+ *addr = paddr;
+
#endif
}
/*
diff --git a/sys/vm/vm_user.c b/sys/vm/vm_user.c
index 26d8730445e..e0569fe2eff 100644
--- a/sys/vm/vm_user.c
+++ b/sys/vm/vm_user.c
@@ -1,4 +1,4 @@
-/* $NetBSD: vm_user.c,v 1.11 1994/10/20 04:27:34 cgd Exp $ */
+/* $NetBSD: vm_user.c,v 1.12 1995/12/05 22:54:39 pk Exp $ */
/*
* Copyright (c) 1991, 1993
@@ -276,6 +276,7 @@ vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
{
register vm_object_t object;
register int result;
+ vm_offset_t start;
if (map == NULL)
return(KERN_INVALID_ARGUMENT);
@@ -309,7 +310,25 @@ vm_allocate_with_pager(map, addr, size, anywhere, pager, poffset, internal)
cnt.v_nzfod -= atop(size);
}
- result = vm_map_find(map, object, poffset, addr, size, anywhere);
+ start = *addr;
+ vm_map_lock(map);
+ if (anywhere) {
+ again:
+ if (vm_map_findspace(map, start, size, addr))
+ result = KERN_NO_SPACE;
+ else {
+ vm_object_prefer(object, poffset, addr);
+ start = *addr;
+ result = vm_map_insert(map, object, poffset,
+ start, start + size);
+ if (result == KERN_NO_SPACE)
+ goto again;
+ }
+ } else
+ result = vm_map_insert(map, object, poffset,
+ start, start + size);
+ vm_map_unlock(map);
+
if (result != KERN_SUCCESS)
vm_object_deallocate(object);
else if (pager != NULL)