summaryrefslogtreecommitdiff
path: root/sys/kern/exec_subr.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/exec_subr.c')
-rw-r--r--sys/kern/exec_subr.c43
1 files changed, 14 insertions, 29 deletions
diff --git a/sys/kern/exec_subr.c b/sys/kern/exec_subr.c
index e79db64dcae..1d816ded073 100644
--- a/sys/kern/exec_subr.c
+++ b/sys/kern/exec_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_subr.c,v 1.15 2001/11/27 05:27:11 art Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.16 2001/11/28 13:47:39 art Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
@@ -138,14 +138,8 @@ vmcmd_map_pagedvn(p, cmd)
struct proc *p;
struct exec_vmcmd *cmd;
{
- /*
- * note that if you're going to map part of an process as being
- * paged from a vnode, that vnode had damn well better be marked as
- * VTEXT. that's handled in the routine which sets up the vmcmd to
- * call this routine.
- */
struct uvm_object *uobj;
- int retval;
+ int error;
/*
* map the vnode in using uvm_map.
@@ -173,24 +167,16 @@ vmcmd_map_pagedvn(p, cmd)
* do the map
*/
- retval = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
+ error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
uobj, cmd->ev_offset, 0,
UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
- /*
- * check for error
- */
-
- if (retval == KERN_SUCCESS)
- return(0);
-
- /*
- * error: detach from object
- */
+ if (error) {
+ uobj->pgops->pgo_detach(uobj);
+ }
- uobj->pgops->pgo_detach(uobj);
- return(EINVAL);
+ return(error);
}
/*
@@ -207,7 +193,7 @@ vmcmd_map_readvn(p, cmd)
int error;
if (cmd->ev_len == 0)
- return(KERN_SUCCESS); /* XXXCDC: should it happen? */
+ return (0);
cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
@@ -217,13 +203,13 @@ vmcmd_map_readvn(p, cmd)
UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
if (error)
- return error;
+ return (error);
error = vn_rdwr(UIO_READ, cmd->ev_vp, (caddr_t)cmd->ev_addr,
cmd->ev_len, cmd->ev_offset, UIO_USERSPACE, IO_UNIT|IO_NODELOCKED,
p->p_ucred, NULL, p);
if (error)
- return error;
+ return (error);
if (cmd->ev_prot != (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) {
/*
@@ -232,13 +218,12 @@ vmcmd_map_readvn(p, cmd)
* it mapped read-only, so now we are going to have to call
* uvm_map_protect() to fix up the protection. ICK.
*/
- return(uvm_map_protect(&p->p_vmspace->vm_map,
+ return (uvm_map_protect(&p->p_vmspace->vm_map,
trunc_page(cmd->ev_addr),
round_page(cmd->ev_addr + cmd->ev_len),
cmd->ev_prot, FALSE));
- } else {
- return(KERN_SUCCESS);
}
+ return (0);
}
/*
@@ -255,7 +240,7 @@ vmcmd_map_zero(p, cmd)
int error;
if (cmd->ev_len == 0)
- return(KERN_SUCCESS); /* XXXCDC: should it happen? */
+ return (0);
cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
@@ -266,7 +251,7 @@ vmcmd_map_zero(p, cmd)
if (error)
return error;
- return(KERN_SUCCESS);
+ return (0);
}
/*