summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2021-03-26 13:40:06 +0000
committerMartin Pieuchot <mpi@cvs.openbsd.org>2021-03-26 13:40:06 +0000
commitadd89e71d9c5838dfefc6777e4811780334de97d (patch)
tree75fa848c0df1e6d346a74623681e86d6d30f1ee2 /sys
parentb5e05aaf3d967427bc39a710364710a4d5e75add (diff)
Remove parenthesis around return value to reduce the diff with NetBSD.
No functional change. ok mlarkin@
Diffstat (limited to 'sys')
-rw-r--r--sys/uvm/uvm_amap.c14
-rw-r--r--sys/uvm/uvm_anon.c4
-rw-r--r--sys/uvm/uvm_aobj.c42
-rw-r--r--sys/uvm/uvm_fault.c12
-rw-r--r--sys/uvm/uvm_glue.c4
-rw-r--r--sys/uvm/uvm_km.c24
-rw-r--r--sys/uvm/uvm_map.c10
-rw-r--r--sys/uvm/uvm_mmap.c148
-rw-r--r--sys/uvm/uvm_page.c34
-rw-r--r--sys/uvm/uvm_page.h6
-rw-r--r--sys/uvm/uvm_pager.c8
-rw-r--r--sys/uvm/uvm_swap.c4
-rw-r--r--sys/uvm/uvm_vnode.c42
13 files changed, 176 insertions, 176 deletions
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index 52b2f3998c8..f5076a5c350 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.88 2021/03/20 10:24:21 mpi Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.89 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -342,7 +342,7 @@ amap_alloc1(int slots, int waitf, int lazyalloc)
amap = pool_get(&uvm_small_amap_pool[slots - 1],
pwaitf | PR_ZERO);
if (amap == NULL)
- return(NULL);
+ return NULL;
amap->am_lock = NULL;
amap->am_ref = 1;
@@ -355,7 +355,7 @@ amap_alloc1(int slots, int waitf, int lazyalloc)
if (UVM_AMAP_SMALL(amap)) {
amap->am_small.ac_nslot = slots;
- return (amap);
+ return amap;
}
amap->am_ncused = 0;
@@ -392,14 +392,14 @@ amap_alloc1(int slots, int waitf, int lazyalloc)
}
}
- return(amap);
+ return amap;
fail1:
free(amap->am_buckets, M_UVMAMAP, buckets * sizeof(*amap->am_buckets));
TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, tmp)
pool_put(&uvm_amap_chunk_pool, chunk);
pool_put(&uvm_amap_pool, amap);
- return (NULL);
+ return NULL;
}
static void
@@ -423,7 +423,7 @@ amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
AMAP_B2SLOT(slots, sz); /* load slots */
if (slots > INT_MAX)
- return (NULL);
+ return NULL;
amap = amap_alloc1(slots, waitf, lazyalloc);
if (amap != NULL) {
@@ -431,7 +431,7 @@ amap_alloc(vaddr_t sz, int waitf, int lazyalloc)
amap_list_insert(amap);
}
- return(amap);
+ return amap;
}
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index e07838ea57d..496df417e07 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.53 2021/03/20 10:24:21 mpi Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.54 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -67,7 +67,7 @@ uvm_analloc(void)
anon->an_page = NULL;
anon->an_swslot = 0;
}
- return(anon);
+ return anon;
}
/*
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 9c694abc9f2..db74848643e 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.92 2021/03/20 10:24:21 mpi Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.93 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -211,7 +211,7 @@ uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
*/
LIST_FOREACH(elt, swhash, list) {
if (elt->tag == page_tag)
- return(elt);
+ return elt;
}
if (!create)
@@ -234,7 +234,7 @@ uao_find_swhash_elt(struct uvm_aobj *aobj, int pageidx, boolean_t create)
LIST_INSERT_HEAD(swhash, elt, list);
elt->tag = page_tag;
- return(elt);
+ return elt;
}
/*
@@ -248,7 +248,7 @@ uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
* if noswap flag is set, then we never return a slot
*/
if (aobj->u_flags & UAO_FLAG_NOSWAP)
- return(0);
+ return 0;
/*
* if hashing, look in hash table.
@@ -258,15 +258,15 @@ uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
uao_find_swhash_elt(aobj, pageidx, FALSE);
if (elt)
- return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx));
+ return UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
else
- return(0);
+ return 0;
}
/*
* otherwise, look in the array
*/
- return(aobj->u_swslots[pageidx]);
+ return aobj->u_swslots[pageidx];
}
/*
@@ -289,7 +289,7 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
*/
if (aobj->u_flags & UAO_FLAG_NOSWAP) {
if (slot == 0)
- return(0); /* a clear is ok */
+ return 0; /* a clear is ok */
/* but a set is not */
printf("uao_set_swslot: uobj = %p\n", uobj);
@@ -309,7 +309,7 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE);
if (elt == NULL) {
KASSERT(slot == 0);
- return (0);
+ return 0;
}
oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx);
@@ -336,7 +336,7 @@ uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot)
oldslot = aobj->u_swslots[pageidx];
aobj->u_swslots[pageidx] = slot;
}
- return (oldslot);
+ return oldslot;
}
/*
* end of hash/array functions
@@ -749,7 +749,7 @@ uao_create(vsize_t size, int flags)
if (aobj->u_swhash == NULL) {
if (flags & UAO_FLAG_CANFAIL) {
pool_put(&uvm_aobj_pool, aobj);
- return (NULL);
+ return NULL;
}
panic("uao_create: hashinit swhash failed");
}
@@ -759,7 +759,7 @@ uao_create(vsize_t size, int flags)
if (aobj->u_swslots == NULL) {
if (flags & UAO_FLAG_CANFAIL) {
pool_put(&uvm_aobj_pool, aobj);
- return (NULL);
+ return NULL;
}
panic("uao_create: malloc swslots failed");
}
@@ -767,7 +767,7 @@ uao_create(vsize_t size, int flags)
if (flags & UAO_FLAG_KERNSWAP) {
aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */
- return(&aobj->u_obj);
+ return &aobj->u_obj;
/* done! */
}
}
@@ -784,7 +784,7 @@ uao_create(vsize_t size, int flags)
LIST_INSERT_HEAD(&uao_list, aobj, u_list);
mtx_leave(&uao_list_lock);
- return(&aobj->u_obj);
+ return &aobj->u_obj;
}
@@ -940,7 +940,7 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
* or deactivating pages.
*/
if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0)
- return (TRUE);
+ return TRUE;
curoff = start;
for (;;) {
@@ -1016,7 +1016,7 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
}
}
- return (TRUE);
+ return TRUE;
}
/*
@@ -1118,10 +1118,10 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
*npagesp = gotpages;
if (done)
/* bingo! */
- return(VM_PAGER_OK);
+ return VM_PAGER_OK;
else
/* EEK! Need to unlock and I/O */
- return(VM_PAGER_UNLOCK);
+ return VM_PAGER_UNLOCK;
}
/*
@@ -1249,7 +1249,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
uvm_pagefree(ptmp);
uvm_unlock_pageq();
- return (rv);
+ return rv;
}
}
@@ -1269,7 +1269,7 @@ uao_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
} /* lcv loop */
- return(VM_PAGER_OK);
+ return VM_PAGER_OK;
}
/*
@@ -1284,7 +1284,7 @@ uao_dropswap(struct uvm_object *uobj, int pageidx)
if (slot) {
uvm_swap_free(slot, 1);
}
- return (slot);
+ return slot;
}
/*
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index a4d0337d469..6660d05b855 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.119 2021/03/20 10:24:21 mpi Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.120 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -1688,7 +1688,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
while (1) {
if (ufi->orig_rvaddr < ufi->map->min_offset ||
ufi->orig_rvaddr >= ufi->map->max_offset)
- return(FALSE);
+ return FALSE;
/* lock map */
if (write_lock) {
@@ -1701,7 +1701,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr,
&ufi->entry)) {
uvmfault_unlockmaps(ufi, write_lock);
- return(FALSE);
+ return FALSE;
}
/* reduce size if necessary */
@@ -1723,7 +1723,7 @@ uvmfault_lookup(struct uvm_faultinfo *ufi, boolean_t write_lock)
* got it!
*/
ufi->mapv = ufi->map->timestamp;
- return(TRUE);
+ return TRUE;
} /* while loop */
@@ -1756,9 +1756,9 @@ uvmfault_relock(struct uvm_faultinfo *ufi)
vm_map_lock_read(ufi->map);
if (ufi->mapv != ufi->map->timestamp) {
vm_map_unlock_read(ufi->map);
- return(FALSE);
+ return FALSE;
}
counters_inc(uvmexp_counters, flt_relckok);
- return(TRUE); /* got it! */
+ return TRUE; /* got it! */
}
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index ee1bca5eac4..a8818a65ec5 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_glue.c,v 1.79 2021/03/12 14:15:49 jsg Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.80 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */
/*
@@ -97,7 +97,7 @@ uvm_kernacc(caddr_t addr, size_t len, int rw)
rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot);
vm_map_unlock_read(kernel_map);
- return(rv);
+ return rv;
}
/*
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index af6e17963a5..a30c0211860 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.142 2021/03/20 10:24:21 mpi Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.143 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -343,12 +343,12 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_INHERIT_NONE, MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) {
- return(0);
+ return 0;
}
/* if all we wanted was VA, return now */
if (flags & UVM_KMF_VALLOC) {
- return(kva);
+ return kva;
}
/* recover object offset from virtual address */
@@ -405,7 +405,7 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
KASSERT(TAILQ_EMPTY(&pgl));
pmap_update(pmap_kernel());
- return(kva);
+ return kva;
}
/*
@@ -461,7 +461,7 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
UVM_MAPFLAG(PROT_READ | PROT_WRITE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_INHERIT_NONE, MADV_RANDOM, 0)) != 0)) {
- return(0);
+ return 0;
}
/* recover object offset from virtual address */
@@ -512,7 +512,7 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
if (zeroit)
memset((caddr_t)kva, 0, loopva - kva);
- return(kva);
+ return kva;
}
/*
@@ -524,13 +524,13 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
vaddr_t
uvm_km_valloc(struct vm_map *map, vsize_t size)
{
- return(uvm_km_valloc_align(map, size, 0, 0));
+ return uvm_km_valloc_align(map, size, 0, 0);
}
vaddr_t
uvm_km_valloc_try(struct vm_map *map, vsize_t size)
{
- return(uvm_km_valloc_align(map, size, 0, UVM_FLAG_TRYLOCK));
+ return uvm_km_valloc_align(map, size, 0, UVM_FLAG_TRYLOCK);
}
vaddr_t
@@ -550,10 +550,10 @@ uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags)
UVM_UNKNOWN_OFFSET, align,
UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_INHERIT_NONE, MADV_RANDOM, flags)) != 0)) {
- return(0);
+ return 0;
}
- return(kva);
+ return kva;
}
/*
@@ -572,7 +572,7 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
size = round_page(size);
if (size > vm_map_max(map) - vm_map_min(map))
- return(0);
+ return 0;
while (1) {
kva = vm_map_min(map); /* hint */
@@ -585,7 +585,7 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
prefer, 0,
UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_INHERIT_NONE, MADV_RANDOM, 0)) == 0)) {
- return(kva);
+ return kva;
}
/* failed. sleep for a while (on map) */
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index d62981f3c74..c3d9190ca78 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.273 2021/03/12 14:15:49 jsg Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.274 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -1000,7 +1000,7 @@ uvm_mapanon(struct vm_map *map, vaddr_t *addr, vsize_t sz,
*/
new = uvm_mapent_alloc(map, flags);
if (new == NULL)
- return(ENOMEM);
+ return ENOMEM;
vm_map_lock(map);
first = last = NULL;
@@ -1229,7 +1229,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
*/
new = uvm_mapent_alloc(map, flags);
if (new == NULL)
- return(ENOMEM);
+ return ENOMEM;
if (flags & UVM_FLAG_TRYLOCK) {
if (vm_map_lock_try(map) == FALSE) {
@@ -1759,7 +1759,7 @@ uvm_mapent_alloc(struct vm_map *map, int flags)
RBT_POISON(uvm_map_addr, me, UVMMAP_DEADBEEF);
out:
- return(me);
+ return me;
}
/*
@@ -4229,7 +4229,7 @@ uvm_map_submap(struct vm_map *map, vaddr_t start, vaddr_t end,
result = EINVAL;
vm_map_unlock(map);
- return(result);
+ return result;
}
/*
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index dd78690e878..4b969cb0fc6 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_mmap.c,v 1.163 2020/10/07 12:26:20 mpi Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.164 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -92,14 +92,14 @@ int uvm_mmapfile(vm_map_t, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int,
pageoff = (addr & PAGE_MASK); \
if (pageoff != 0) { \
if (size > SIZE_MAX - pageoff) \
- return (EINVAL); /* wraparound */ \
+ return EINVAL; /* wraparound */ \
addr -= pageoff; \
size += pageoff; \
} \
if (size != 0) { \
size = (vsize_t)round_page(size); \
if (size == 0) \
- return (EINVAL); /* wraparound */ \
+ return EINVAL; /* wraparound */ \
} \
} while (0)
@@ -140,14 +140,14 @@ sys_mquery(struct proc *p, void *v, register_t *retval)
fd = SCARG(uap, fd);
if ((prot & PROT_MASK) != prot)
- return (EINVAL);
+ return EINVAL;
if (SCARG(uap, flags) & MAP_FIXED)
flags |= UVM_FLAG_FIXED;
if (fd >= 0) {
if ((error = getvnode(p, fd, &fp)) != 0)
- return (error);
+ return error;
uoff = SCARG(uap, pos);
} else {
fp = NULL;
@@ -165,7 +165,7 @@ sys_mquery(struct proc *p, void *v, register_t *retval)
if (fp != NULL)
FRELE(fp, p);
- return (error);
+ return error;
}
int uvm_wxabort;
@@ -181,7 +181,7 @@ uvm_wxcheck(struct proc *p, char *call)
(pr->ps_textvp->v_mount->mnt_flag & MNT_WXALLOWED));
if (wxallowed && (pr->ps_flags & PS_WXNEEDED))
- return (0);
+ return 0;
if (uvm_wxabort) {
/* Report W^X failures */
@@ -192,7 +192,7 @@ uvm_wxcheck(struct proc *p, char *call)
sigexit(p, SIGABRT);
}
- return (ENOTSUP);
+ return ENOTSUP;
}
/*
@@ -239,33 +239,33 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
* Validate the flags.
*/
if ((prot & PROT_MASK) != prot)
- return (EINVAL);
+ return EINVAL;
if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) &&
(error = uvm_wxcheck(p, "mmap")))
- return (error);
+ return error;
if ((flags & MAP_FLAGMASK) != flags)
- return (EINVAL);
+ return EINVAL;
if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE))
- return (EINVAL);
+ return EINVAL;
if ((flags & (MAP_FIXED|__MAP_NOREPLACE)) == __MAP_NOREPLACE)
- return (EINVAL);
+ return EINVAL;
if (flags & MAP_STACK) {
if ((flags & (MAP_ANON|MAP_PRIVATE)) != (MAP_ANON|MAP_PRIVATE))
- return (EINVAL);
+ return EINVAL;
if (flags & ~(MAP_STACK|MAP_FIXED|MAP_ANON|MAP_PRIVATE))
- return (EINVAL);
+ return EINVAL;
if (pos != 0)
- return (EINVAL);
+ return EINVAL;
if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE))
- return (EINVAL);
+ return EINVAL;
}
if (size == 0)
- return (EINVAL);
+ return EINVAL;
error = pledge_protexec(p, prot);
if (error)
- return (error);
+ return error;
/* align file position and save offset. adjust size. */
ALIGN_ADDR(pos, size, pageoff);
@@ -275,15 +275,15 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
/* adjust address by the same amount as we did the offset */
addr -= pageoff;
if (addr & PAGE_MASK)
- return (EINVAL); /* not page aligned */
+ return EINVAL; /* not page aligned */
if (addr > SIZE_MAX - size)
- return (EINVAL); /* no wrapping! */
+ return EINVAL; /* no wrapping! */
if (VM_MAXUSER_ADDRESS > 0 &&
(addr + size) > VM_MAXUSER_ADDRESS)
- return (EINVAL);
+ return EINVAL;
if (vm_min_address > 0 && addr < vm_min_address)
- return (EINVAL);
+ return EINVAL;
}
/* check for file mappings (i.e. not anonymous) and verify file. */
@@ -430,13 +430,13 @@ is_anon: /* label for SunOS style /dev/zero */
/* remember to add offset */
*retval = (register_t)(addr + pageoff);
- return (error);
+ return error;
out:
KERNEL_UNLOCK();
if (fp)
FRELE(fp, p);
- return (error);
+ return error;
}
/*
@@ -465,14 +465,14 @@ sys_msync(struct proc *p, void *v, register_t *retval)
if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 ||
(flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 ||
(flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC))
- return (EINVAL);
+ return EINVAL;
if ((flags & (MS_ASYNC | MS_SYNC)) == 0)
flags |= MS_SYNC;
/* align the address to a page boundary, and adjust the size accordingly */
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
/* get map */
map = &p->p_vmspace->vm_map;
@@ -486,7 +486,7 @@ sys_msync(struct proc *p, void *v, register_t *retval)
else
uvmflags |= PGO_SYNCIO; /* XXXCDC: force sync for now! */
- return (uvm_map_clean(map, addr, addr+size, uvmflags));
+ return uvm_map_clean(map, addr, addr+size, uvmflags);
}
/*
@@ -517,11 +517,11 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
* Note that VM_*_ADDRESS are not constants due to casts (argh).
*/
if (addr > SIZE_MAX - size)
- return (EINVAL);
+ return EINVAL;
if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
- return (EINVAL);
+ return EINVAL;
if (vm_min_address > 0 && addr < vm_min_address)
- return (EINVAL);
+ return EINVAL;
map = &p->p_vmspace->vm_map;
@@ -533,7 +533,7 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
*/
if (!uvm_map_checkprot(map, addr, addr + size, PROT_NONE)) {
vm_map_unlock(map);
- return (EINVAL);
+ return EINVAL;
}
TAILQ_INIT(&dead_entries);
@@ -542,7 +542,7 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
uvm_unmap_detach(&dead_entries, 0);
- return (0);
+ return 0;
}
/*
@@ -570,21 +570,21 @@ sys_mprotect(struct proc *p, void *v, register_t *retval)
prot = SCARG(uap, prot);
if ((prot & PROT_MASK) != prot)
- return (EINVAL);
+ return EINVAL;
if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) &&
(error = uvm_wxcheck(p, "mprotect")))
- return (error);
+ return error;
error = pledge_protexec(p, prot);
if (error)
- return (error);
+ return error;
/*
* align the address to a page boundary, and adjust the size accordingly
*/
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size,
prot, FALSE));
@@ -611,9 +611,9 @@ sys_msyscall(struct proc *p, void *v, register_t *retval)
*/
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
- return (uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size));
+ return uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size);
}
/*
@@ -640,7 +640,7 @@ sys_minherit(struct proc *p, void *v, register_t *retval)
*/
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
return (uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
inherit));
@@ -671,7 +671,7 @@ sys_madvise(struct proc *p, void *v, register_t *retval)
*/
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
switch (advice) {
case MADV_NORMAL:
@@ -691,7 +691,7 @@ sys_madvise(struct proc *p, void *v, register_t *retval)
* Should invent a "weak" mode for uvm_fault()
* which would only do the PGO_LOCKED pgo_get().
*/
- return (0);
+ return 0;
case MADV_DONTNEED:
/*
@@ -724,13 +724,13 @@ sys_madvise(struct proc *p, void *v, register_t *retval)
* as it will free swap space allocated to pages in core.
* There's also what to do for device/file/anonymous memory.
*/
- return (EINVAL);
+ return EINVAL;
default:
- return (EINVAL);
+ return EINVAL;
}
- return (error);
+ return error;
}
/*
@@ -755,23 +755,23 @@ sys_mlock(struct proc *p, void *v, register_t *retval)
/* align address to a page boundary and adjust size accordingly */
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
if (atop(size) + uvmexp.wired > uvmexp.wiredmax)
- return (EAGAIN);
+ return EAGAIN;
#ifdef pmap_wired_count
if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) >
lim_cur(RLIMIT_MEMLOCK))
- return (EAGAIN);
+ return EAGAIN;
#else
if ((error = suser(p)) != 0)
- return (error);
+ return error;
#endif
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE,
0);
- return (error == 0 ? 0 : ENOMEM);
+ return error == 0 ? 0 : ENOMEM;
}
/*
@@ -796,16 +796,16 @@ sys_munlock(struct proc *p, void *v, register_t *retval)
/* align address to a page boundary, and adjust size accordingly */
ALIGN_ADDR(addr, size, pageoff);
if (addr > SIZE_MAX - size)
- return (EINVAL); /* disallow wrap-around. */
+ return EINVAL; /* disallow wrap-around. */
#ifndef pmap_wired_count
if ((error = suser(p)) != 0)
- return (error);
+ return error;
#endif
error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE,
0);
- return (error == 0 ? 0 : ENOMEM);
+ return error == 0 ? 0 : ENOMEM;
}
/*
@@ -823,18 +823,18 @@ sys_mlockall(struct proc *p, void *v, register_t *retval)
if (flags == 0 ||
(flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0)
- return (EINVAL);
+ return EINVAL;
#ifndef pmap_wired_count
if ((error = suser(p)) != 0)
- return (error);
+ return error;
#endif
error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags,
lim_cur(RLIMIT_MEMLOCK));
if (error != 0 && error != ENOMEM)
- return (EAGAIN);
- return (error);
+ return EAGAIN;
+ return error;
}
/*
@@ -845,7 +845,7 @@ sys_munlockall(struct proc *p, void *v, register_t *retval)
{
(void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0);
- return (0);
+ return 0;
}
/*
@@ -865,7 +865,7 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
/*
* No more work to do in this case.
*/
- return (0);
+ return 0;
}
vm_map_lock(map);
@@ -883,7 +883,7 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
/* unmap the region! */
uvm_unmap(map, *addr, *addr + size);
KERNEL_UNLOCK();
- return (error);
+ return error;
}
/*
* uvm_map_pageable() always returns the map
@@ -895,13 +895,13 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
/* unmap the region! */
uvm_unmap(map, *addr, *addr + size);
KERNEL_UNLOCK();
- return (error);
+ return error;
}
KERNEL_UNLOCK();
- return (0);
+ return 0;
}
vm_map_unlock(map);
- return (0);
+ return 0;
}
/*
@@ -926,7 +926,7 @@ uvm_mmapanon(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
*addr = round_page(*addr); /* round */
} else {
if (*addr & PAGE_MASK)
- return(EINVAL);
+ return EINVAL;
uvmflag |= UVM_FLAG_FIXED;
if ((flags & __MAP_NOREPLACE) == 0)
@@ -983,7 +983,7 @@ uvm_mmapfile(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
*addr = round_page(*addr); /* round */
} else {
if (*addr & PAGE_MASK)
- return(EINVAL);
+ return EINVAL;
uvmflag |= UVM_FLAG_FIXED;
if ((flags & __MAP_NOREPLACE) == 0)
@@ -1045,7 +1045,7 @@ uvm_mmapfile(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
}
if (uobj == NULL)
- return((vp->v_type == VREG) ? ENOMEM : EINVAL);
+ return vp->v_type == VREG ? ENOMEM : EINVAL;
if ((flags & MAP_SHARED) == 0)
uvmflag |= UVM_FLAG_COPYONW;
@@ -1070,7 +1070,7 @@ uvm_mmapfile(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
if (uobj)
uobj->pgops->pgo_detach(uobj);
- return (error);
+ return error;
}
/* an address that can't be in userspace or kernelspace */
@@ -1106,7 +1106,7 @@ sys_kbind(struct proc *p, void *v, register_t *retval)
/* a NULL paramp disables the syscall for the process */
if (paramp == NULL) {
pr->ps_kbind_addr = BOGO_PC;
- return (0);
+ return 0;
}
/* security checks */
@@ -1119,9 +1119,9 @@ sys_kbind(struct proc *p, void *v, register_t *retval)
else if (pr->ps_kbind_cookie != SCARG(uap, proc_cookie))
sigexit(p, SIGILL);
if (psize < sizeof(struct __kbind) || psize > sizeof(param))
- return (EINVAL);
+ return EINVAL;
if ((error = copyin(paramp, &param, psize)))
- return (error);
+ return error;
/*
* The param argument points to an array of __kbind structures
@@ -1133,7 +1133,7 @@ sys_kbind(struct proc *p, void *v, register_t *retval)
s = psize;
for (count = 0; s > 0 && count < KBIND_BLOCK_MAX; count++) {
if (s < sizeof(*paramp))
- return (EINVAL);
+ return EINVAL;
s -= sizeof(*paramp);
baseva = (vaddr_t)paramp[count].kb_addr;
@@ -1145,12 +1145,12 @@ sys_kbind(struct proc *p, void *v, register_t *retval)
endva >= VM_MAXUSER_ADDRESS ||
trunc_page(baseva) != trunc_page(endva) ||
s < paramp[count].kb_size)
- return (EINVAL);
+ return EINVAL;
s -= paramp[count].kb_size;
}
if (s > 0)
- return (EINVAL);
+ return EINVAL;
data = (const char *)&paramp[count];
/* all looks good, so do the bindings */
@@ -1192,5 +1192,5 @@ sys_kbind(struct proc *p, void *v, register_t *retval)
}
uvm_unmap_detach(&dead_entries, AMAP_REFALL);
- return (error);
+ return error;
}
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index 10e8fd6b32c..4a9563c570f 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.155 2021/01/19 13:21:36 mpi Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.156 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -84,7 +84,7 @@ RBT_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
int
uvm_pagecmp(const struct vm_page *a, const struct vm_page *b)
{
- return (a->offset < b->offset ? -1 : a->offset > b->offset);
+ return a->offset < b->offset ? -1 : a->offset > b->offset;
}
/*
@@ -324,7 +324,7 @@ uvm_pageboot_alloc(vsize_t size)
addr = pmap_steal_memory(size, &virtual_space_start,
&virtual_space_end);
- return(addr);
+ return addr;
#else /* !PMAP_STEAL_MEMORY */
@@ -380,7 +380,7 @@ uvm_pageboot_alloc(vsize_t size)
pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE);
}
pmap_update(pmap_kernel());
- return(addr);
+ return addr;
#endif /* PMAP_STEAL_MEMORY */
}
@@ -428,7 +428,7 @@ uvm_page_physget(paddr_t *paddrp)
/* structure copy */
seg[0] = seg[1];
}
- return (TRUE);
+ return TRUE;
}
/* try from rear */
@@ -446,7 +446,7 @@ uvm_page_physget(paddr_t *paddrp)
/* structure copy */
seg[0] = seg[1];
}
- return (TRUE);
+ return TRUE;
}
}
@@ -478,10 +478,10 @@ uvm_page_physget(paddr_t *paddrp)
/* structure copy */
seg[0] = seg[1];
}
- return (TRUE);
+ return TRUE;
}
- return (FALSE); /* whoops! */
+ return FALSE; /* whoops! */
}
#endif /* PMAP_STEAL_MEMORY */
@@ -729,7 +729,7 @@ uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT));
if (size == 0)
- return (EINVAL);
+ return EINVAL;
size = atop(round_page(size));
/*
@@ -896,10 +896,10 @@ uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
else
atomic_setbits_int(&pg->pg_flags, PG_CLEAN);
- return(pg);
+ return pg;
fail:
- return (NULL);
+ return NULL;
}
/*
@@ -1136,7 +1136,7 @@ vm_physseg_find(paddr_t pframe, int *offp)
if (pframe < seg->end) {
if (offp)
*offp = pframe - seg->start;
- return(try); /* got it */
+ return try; /* got it */
}
start = try + 1; /* next time, start here */
len--; /* "adjust" */
@@ -1147,7 +1147,7 @@ vm_physseg_find(paddr_t pframe, int *offp)
*/
}
}
- return(-1);
+ return -1;
#else
/* linear search for it */
@@ -1157,10 +1157,10 @@ vm_physseg_find(paddr_t pframe, int *offp)
if (pframe >= seg->start && pframe < seg->end) {
if (offp)
*offp = pframe - seg->start;
- return(lcv); /* got it */
+ return lcv; /* got it */
}
}
- return(-1);
+ return -1;
#endif
}
@@ -1178,7 +1178,7 @@ PHYS_TO_VM_PAGE(paddr_t pa)
psi = vm_physseg_find(pf, &off);
- return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]);
+ return (psi == -1) ? NULL : &vm_physmem[psi].pgs[off];
}
#endif /* VM_PHYSSEG_MAX > 1 */
@@ -1192,7 +1192,7 @@ uvm_pagelookup(struct uvm_object *obj, voff_t off)
struct vm_page pg;
pg.offset = off;
- return (RBT_FIND(uvm_objtree, &obj->memt, &pg));
+ return RBT_FIND(uvm_objtree, &obj->memt, &pg);
}
/*
diff --git a/sys/uvm/uvm_page.h b/sys/uvm/uvm_page.h
index 6cf40f30e2b..e29a872bf39 100644
--- a/sys/uvm/uvm_page.h
+++ b/sys/uvm/uvm_page.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.h,v 1.65 2020/09/22 14:31:08 mpi Exp $ */
+/* $OpenBSD: uvm_page.h,v 1.66 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_page.h,v 1.19 2000/12/28 08:24:55 chs Exp $ */
/*
@@ -256,9 +256,9 @@ vm_physseg_find(paddr_t pframe, int *offp)
if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) {
if (offp)
*offp = pframe - vm_physmem[0].start;
- return(0);
+ return 0;
}
- return(-1);
+ return -1;
}
/*
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index c5195224087..286e7c2a025 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pager.c,v 1.75 2021/03/12 14:15:49 jsg Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.76 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */
/*
@@ -326,7 +326,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */
pps[0] = center;
*npages = 1;
- return(pps);
+ return pps;
}
/* now determine the center and attempt to cluster around the edges */
@@ -410,7 +410,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
/*
* done! return the cluster array to the caller!!!
*/
- return(ppsp);
+ return ppsp;
}
/*
@@ -586,7 +586,7 @@ ReTry:
* to worry about.
*/
- return(result);
+ return result;
}
/*
diff --git a/sys/uvm/uvm_swap.c b/sys/uvm/uvm_swap.c
index cc9786e482e..d333e547e06 100644
--- a/sys/uvm/uvm_swap.c
+++ b/sys/uvm/uvm_swap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_swap.c,v 1.149 2021/03/04 09:00:03 mpi Exp $ */
+/* $OpenBSD: uvm_swap.c,v 1.150 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_swap.c,v 1.40 2000/11/17 11:39:39 mrg Exp $ */
/*
@@ -1426,7 +1426,7 @@ ReTry: /* XXXMRG */
sdp->swd_npginuse += *nslots;
uvmexp.swpginuse += *nslots;
/* done! return drum slot number */
- return(result + sdp->swd_drumoffset);
+ return result + sdp->swd_drumoffset;
}
}
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 55d98192462..c787d903f83 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_vnode.c,v 1.112 2021/03/12 14:15:49 jsg Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.113 2021/03/26 13:40:05 mpi Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@@ -155,7 +155,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot)
/* if we're mapping a BLK device, make sure it is a disk. */
if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) {
- return(NULL);
+ return NULL;
}
/*
@@ -219,7 +219,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot)
if (uvn->u_flags & UVM_VNODE_WANTED)
wakeup(uvn);
uvn->u_flags = 0;
- return(NULL);
+ return NULL;
}
/*
@@ -253,7 +253,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot)
if (oldflags & UVM_VNODE_WANTED)
wakeup(uvn);
- return(&uvn->u_obj);
+ return &uvn->u_obj;
}
@@ -835,7 +835,7 @@ ReTry:
uvm_pglistfree(&dead);
- return(retval);
+ return retval;
}
/*
@@ -885,11 +885,11 @@ uvn_put(struct uvm_object *uobj, struct vm_page **pps, int npages, int flags)
retval = uvm_vnode_lock(uvn);
if (retval)
- return(retval);
+ return retval;
retval = uvn_io(uvn, pps, npages, flags, UIO_WRITE);
uvm_vnode_unlock(uvn);
- return(retval);
+ return retval;
}
/*
@@ -977,9 +977,9 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
*npagesp = gotpages; /* let caller know */
if (done)
- return(VM_PAGER_OK); /* bingo! */
+ return VM_PAGER_OK; /* bingo! */
else
- return(VM_PAGER_UNLOCK);
+ return VM_PAGER_UNLOCK;
}
/*
@@ -992,7 +992,7 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
*/
retval = uvm_vnode_lock(uvn);
if (retval)
- return(retval);
+ return retval;
/*
* step 2: get non-resident or busy pages.
@@ -1098,7 +1098,7 @@ uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps,
uvm_lock_pageq();
uvm_pagefree(ptmp);
uvm_unlock_pageq();
- return(result);
+ return result;
}
/*
@@ -1154,7 +1154,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
/* check for sync'ing I/O. */
while (uvn->u_flags & UVM_VNODE_IOSYNC) {
if (waitf == M_NOWAIT) {
- return(VM_PAGER_AGAIN);
+ return VM_PAGER_AGAIN;
}
uvn->u_flags |= UVM_VNODE_IOSYNCWANTED;
tsleep_nsec(&uvn->u_flags, PVM, "uvn_iosync", INFSLP);
@@ -1162,7 +1162,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
/* check size */
if (file_offset >= uvn->u_size) {
- return(VM_PAGER_BAD);
+ return VM_PAGER_BAD;
}
/* first try and map the pages in (without waiting) */
@@ -1171,7 +1171,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
kva = uvm_pagermapin(pps, npages, mapinflags);
if (kva == 0 && waitf == M_NOWAIT) {
- return(VM_PAGER_AGAIN);
+ return VM_PAGER_AGAIN;
}
/*
@@ -1245,13 +1245,13 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t *pps, int npages, int flags, int rw)
}
if (result == 0)
- return(VM_PAGER_OK);
+ return VM_PAGER_OK;
if (result == EIO) {
/* Signal back to uvm_vnode_unlock(). */
uvn->u_flags |= UVM_VNODE_IOERROR;
}
- return(VM_PAGER_ERROR);
+ return VM_PAGER_ERROR;
}
/*
@@ -1301,7 +1301,7 @@ uvm_vnp_uncache(struct vnode *vp)
if ((uvn->u_flags & UVM_VNODE_VALID) == 0 ||
(uvn->u_flags & UVM_VNODE_BLOCKED) != 0) {
- return(TRUE);
+ return TRUE;
}
/*
@@ -1310,7 +1310,7 @@ uvm_vnp_uncache(struct vnode *vp)
*/
uvn->u_flags &= ~UVM_VNODE_CANPERSIST;
if (uvn->u_obj.uo_refs) {
- return(FALSE);
+ return FALSE;
}
/*
@@ -1343,7 +1343,7 @@ uvm_vnp_uncache(struct vnode *vp)
uvn_detach(&uvn->u_obj);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- return(TRUE);
+ return TRUE;
}
/*
@@ -1476,7 +1476,7 @@ uvm_vnode_lock(struct uvm_vnode *uvn)
int netunlocked = 0;
if (uvn->u_flags & UVM_VNODE_VNISLOCKED)
- return(VM_PAGER_OK);
+ return VM_PAGER_OK;
/*
* This thread may already have the net lock, if we faulted in copyin()
@@ -1499,7 +1499,7 @@ uvm_vnode_lock(struct uvm_vnode *uvn)
error = vn_lock(uvn->u_vnode, LK_EXCLUSIVE | LK_RECURSEFAIL);
if (netunlocked)
NET_LOCK();
- return(error ? VM_PAGER_ERROR : VM_PAGER_OK);
+ return error ? VM_PAGER_ERROR : VM_PAGER_OK;
}
void