summaryrefslogtreecommitdiff
path: root/sys/kern/exec_subr.c
diff options
context:
space:
mode:
authorArtur Grabowski <art@cvs.openbsd.org>1999-02-26 05:14:28 +0000
committerArtur Grabowski <art@cvs.openbsd.org>1999-02-26 05:14:28 +0000
commitedaa202c93517022a2282c5bbef59c502898d0ec (patch)
treeb97c8d54ec043e3d8e58a0182c3eaf91e8ec44c5 /sys/kern/exec_subr.c
parent0bcdaa68dd16b1ab659a27d91f6181029d41f99a (diff)
a new way of mapping in exec for uvm
Diffstat (limited to 'sys/kern/exec_subr.c')
-rw-r--r--sys/kern/exec_subr.c101
1 files changed, 100 insertions, 1 deletions
diff --git a/sys/kern/exec_subr.c b/sys/kern/exec_subr.c
index 32b93934f81..054fae50571 100644
--- a/sys/kern/exec_subr.c
+++ b/sys/kern/exec_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_subr.c,v 1.5 1998/07/28 00:13:01 millert Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.6 1999/02/26 05:14:27 art Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
@@ -43,6 +43,10 @@
#include <vm/vm.h>
+#if defined(UVM)
+#include <uvm/uvm.h>
+#endif
+
#ifdef DEBUG
/*
* new_vmcmd():
@@ -142,9 +146,57 @@ vmcmd_map_pagedvn(p, cmd)
* VTEXT. that's handled in the routine which sets up the vmcmd to
* call this routine.
*/
+#if defined(UVM)
+ struct uvm_object *uobj;
+ int retval;
+
+ /*
+ * map the vnode in using uvm_map.
+ */
+
+ /* checks imported from uvm_mmap, needed? */
+ if (cmd->ev_len == 0)
+ return(0);
+ if (cmd->ev_offset & PAGE_MASK)
+ return(EINVAL);
+ if (cmd->ev_addr & PAGE_MASK)
+ return(EINVAL);
+
+ /*
+ * first, attach to the object
+ */
+
+ uobj = uvn_attach((void *) cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
+ if (uobj == NULL)
+ return(ENOMEM);
+
+ /*
+ * do the map
+ */
+
+ retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
+ uobj, cmd->ev_offset,
+ UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
+ UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
+
+ /*
+ * check for error
+ */
+
+ if (retval == KERN_SUCCESS)
+ return(0);
+
+ /*
+ * error: detach from object
+ */
+
+ uobj->pgops->pgo_detach(uobj);
+ return(EINVAL);
+#else
return vm_mmap(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
cmd->ev_prot, VM_PROT_ALL, MAP_FIXED|MAP_COPY, (caddr_t)cmd->ev_vp,
cmd->ev_offset);
+#endif
}
/*
@@ -160,8 +212,21 @@ vmcmd_map_readvn(p, cmd)
{
int error;
+#if defined(UVM)
+ if (cmd->ev_len == 0)
+ return(KERN_SUCCESS); /* XXXCDC: should it happen? */
+
+ cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
+ error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
+ round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_COPY,
+ UVM_ADV_NORMAL,
+ UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
+
+#else
error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
cmd->ev_len, 0);
+#endif
if (error)
return error;
@@ -171,8 +236,25 @@ vmcmd_map_readvn(p, cmd)
if (error)
return error;
+#if defined(UVM)
+ if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
+ /*
+ * we had to map in the area at PROT_ALL so that vn_rdwr()
+ * could write to it. however, the caller seems to want
+ * it mapped read-only, so now we are going to have to call
+ * uvm_map_protect() to fix up the protection. ICK.
+ */
+ return(uvm_map_protect(&p->p_vmspace->vm_map,
+ trunc_page(cmd->ev_addr),
+ round_page(cmd->ev_addr + cmd->ev_len),
+ cmd->ev_prot, FALSE));
+ } else {
+ return(KERN_SUCCESS);
+ }
+#else
return vm_map_protect(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
round_page(cmd->ev_addr + cmd->ev_len), cmd->ev_prot, FALSE);
+#endif
}
/*
@@ -188,13 +270,30 @@ vmcmd_map_zero(p, cmd)
{
int error;
+#if defined(UVM)
+ if (cmd->ev_len == 0)
+ return(KERN_SUCCESS); /* XXXCDC: should it happen? */
+
+ cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
+ error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
+ round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET,
+ UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
+ UVM_ADV_NORMAL,
+ UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
+
+#else
error = vm_allocate(&p->p_vmspace->vm_map, &cmd->ev_addr,
cmd->ev_len, 0);
+#endif
if (error)
return error;
+#if !defined(UVM)
return vm_map_protect(&p->p_vmspace->vm_map, trunc_page(cmd->ev_addr),
round_page(cmd->ev_addr + cmd->ev_len), cmd->ev_prot, FALSE);
+#else
+ return(KERN_SUCCESS);
+#endif
}
/*