summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
Diffstat (limited to 'sys')
-rw-r--r--sys/arch/alpha/alpha/machdep.c10
-rw-r--r--sys/arch/alpha/alpha/pmap.c34
-rw-r--r--sys/arch/alpha/alpha/trap.c10
-rw-r--r--sys/arch/alpha/alpha/vm_machdep.c4
-rw-r--r--sys/arch/alpha/dev/bus_dma.c6
-rw-r--r--sys/arch/amd64/amd64/acpi_machdep.c4
-rw-r--r--sys/arch/amd64/amd64/bios.c4
-rw-r--r--sys/arch/amd64/amd64/bus_dma.c6
-rw-r--r--sys/arch/amd64/amd64/bus_space.c5
-rw-r--r--sys/arch/amd64/amd64/cpu.c9
-rw-r--r--sys/arch/amd64/amd64/gdt.c5
-rw-r--r--sys/arch/amd64/amd64/hibernate_machdep.c26
-rw-r--r--sys/arch/amd64/amd64/machdep.c12
-rw-r--r--sys/arch/amd64/amd64/mpbios.c4
-rw-r--r--sys/arch/amd64/amd64/pmap.c31
-rw-r--r--sys/arch/amd64/amd64/trap.c8
-rw-r--r--sys/arch/amd64/amd64/vm_machdep.c4
-rw-r--r--sys/arch/amd64/include/pmap.h11
-rw-r--r--sys/arch/amd64/pci/vga_post.c8
-rw-r--r--sys/arch/arm/arm/arm32_machdep.c6
-rw-r--r--sys/arch/arm/arm/bus_dma.c4
-rw-r--r--sys/arch/arm/arm/cpu.c4
-rw-r--r--sys/arch/arm/arm/db_interface.c6
-rw-r--r--sys/arch/arm/arm/fault.c16
-rw-r--r--sys/arch/arm/arm/fiq.c6
-rw-r--r--sys/arch/arm/arm/mem.c6
-rw-r--r--sys/arch/arm/arm/pmap.c56
-rw-r--r--sys/arch/arm/arm/pmap7.c60
-rw-r--r--sys/arch/arm/arm/stubs.c4
-rw-r--r--sys/arch/arm/arm/vm_machdep.c5
-rw-r--r--sys/arch/arm/armv7/armv7_space.c4
-rw-r--r--sys/arch/arm/include/pmap.h22
-rw-r--r--sys/arch/arm/xscale/i80321_space.c6
-rw-r--r--sys/arch/arm/xscale/pxa2x0_space.c4
-rw-r--r--sys/arch/armish/armish/armish_machdep.c24
-rw-r--r--sys/arch/armish/dev/obio_space.c4
-rw-r--r--sys/arch/armv7/armv7/armv7_machdep.c22
-rw-r--r--sys/arch/aviion/aviion/av400_machdep.c6
-rw-r--r--sys/arch/aviion/aviion/av530_machdep.c6
-rw-r--r--sys/arch/aviion/aviion/bus_dma.c6
-rw-r--r--sys/arch/aviion/dev/if_le_syscon.c5
-rw-r--r--sys/arch/aviion/dev/vme.c7
-rw-r--r--sys/arch/hppa/dev/astro.c4
-rw-r--r--sys/arch/hppa/hppa/mainbus.c6
-rw-r--r--sys/arch/hppa/hppa/pmap.c28
-rw-r--r--sys/arch/hppa/hppa/trap.c12
-rw-r--r--sys/arch/hppa/hppa/vm_machdep.c4
-rw-r--r--sys/arch/hppa/include/pmap.h10
-rw-r--r--sys/arch/hppa64/dev/astro.c4
-rw-r--r--sys/arch/hppa64/hppa64/mainbus.c4
-rw-r--r--sys/arch/hppa64/hppa64/pmap.c19
-rw-r--r--sys/arch/hppa64/hppa64/trap.c12
-rw-r--r--sys/arch/hppa64/hppa64/vm_machdep.c4
-rw-r--r--sys/arch/hppa64/include/pmap.h10
-rw-r--r--sys/arch/i386/i386/acpi_machdep.c4
-rw-r--r--sys/arch/i386/i386/bios.c12
-rw-r--r--sys/arch/i386/i386/bus_dma.c6
-rw-r--r--sys/arch/i386/i386/cpu.c9
-rw-r--r--sys/arch/i386/i386/gdt.c6
-rw-r--r--sys/arch/i386/i386/hibernate_machdep.c8
-rw-r--r--sys/arch/i386/i386/kvm86.c4
-rw-r--r--sys/arch/i386/i386/machdep.c16
-rw-r--r--sys/arch/i386/i386/mem.c4
-rw-r--r--sys/arch/i386/i386/mpbios.c4
-rw-r--r--sys/arch/i386/i386/pmap.c28
-rw-r--r--sys/arch/i386/i386/trap.c8
-rw-r--r--sys/arch/i386/i386/vm_machdep.c4
-rw-r--r--sys/arch/i386/include/pmap.h11
-rw-r--r--sys/arch/i386/pci/agp_machdep.c4
-rw-r--r--sys/arch/i386/pci/vga_post.c8
-rw-r--r--sys/arch/landisk/dev/obio.c4
-rw-r--r--sys/arch/landisk/landisk/bus_dma.c4
-rw-r--r--sys/arch/loongson/loongson/bus_dma.c6
-rw-r--r--sys/arch/luna88k/luna88k/pmap_table.c6
-rw-r--r--sys/arch/m88k/m88k/pmap.c23
-rw-r--r--sys/arch/m88k/m88k/trap.c38
-rw-r--r--sys/arch/m88k/m88k/vm_machdep.c6
-rw-r--r--sys/arch/macppc/macppc/dma.c6
-rw-r--r--sys/arch/macppc/macppc/machdep.c8
-rw-r--r--sys/arch/mips64/mips64/mips64_machdep.c4
-rw-r--r--sys/arch/mips64/mips64/pmap.c30
-rw-r--r--sys/arch/mips64/mips64/r4000_errata.c4
-rw-r--r--sys/arch/mips64/mips64/trap.c26
-rw-r--r--sys/arch/mips64/mips64/vm_machdep.c6
-rw-r--r--sys/arch/octeon/octeon/bus_dma.c6
-rw-r--r--sys/arch/powerpc/powerpc/pmap.c40
-rw-r--r--sys/arch/powerpc/powerpc/trap.c18
-rw-r--r--sys/arch/powerpc/powerpc/vm_machdep.c4
-rw-r--r--sys/arch/sgi/sgi/bus_dma.c6
-rw-r--r--sys/arch/sh/sh/pmap.c42
-rw-r--r--sys/arch/sh/sh/trap.c10
-rw-r--r--sys/arch/sh/sh/vm_machdep.c4
-rw-r--r--sys/arch/socppc/socppc/dma.c6
-rw-r--r--sys/arch/socppc/socppc/machdep.c6
-rw-r--r--sys/arch/solbourne/solbourne/machdep.c10
-rw-r--r--sys/arch/solbourne/solbourne/mem.c4
-rw-r--r--sys/arch/solbourne/solbourne/pmap.c16
-rw-r--r--sys/arch/solbourne/solbourne/trap.c8
-rw-r--r--sys/arch/sparc/dev/if_ie.c4
-rw-r--r--sys/arch/sparc/sparc/autoconf.c4
-rw-r--r--sys/arch/sparc/sparc/clock.c12
-rw-r--r--sys/arch/sparc/sparc/iommu.c6
-rw-r--r--sys/arch/sparc/sparc/machdep.c8
-rw-r--r--sys/arch/sparc/sparc/mem.c4
-rw-r--r--sys/arch/sparc/sparc/pmap.c58
-rw-r--r--sys/arch/sparc/sparc/trap.c20
-rw-r--r--sys/arch/sparc/sparc/vm_machdep.c6
-rw-r--r--sys/arch/sparc64/dev/iommu.c6
-rw-r--r--sys/arch/sparc64/sparc64/clock.c6
-rw-r--r--sys/arch/sparc64/sparc64/cpu.c4
-rw-r--r--sys/arch/sparc64/sparc64/emul.c4
-rw-r--r--sys/arch/sparc64/sparc64/machdep.c14
-rw-r--r--sys/arch/sparc64/sparc64/mdesc.c7
-rw-r--r--sys/arch/sparc64/sparc64/mem.c6
-rw-r--r--sys/arch/sparc64/sparc64/pmap.c88
-rw-r--r--sys/arch/sparc64/sparc64/trap.c14
-rw-r--r--sys/arch/sparc64/sparc64/vm_machdep.c8
-rw-r--r--sys/arch/vax/vax/bus_dma.c8
-rw-r--r--sys/arch/vax/vax/pmap.c32
-rw-r--r--sys/arch/vax/vax/trap.c6
-rw-r--r--sys/arch/vax/vax/vm_machdep.c5
-rw-r--r--sys/arch/zaurus/zaurus/zaurus_machdep.c26
-rw-r--r--sys/compat/linux/linux_misc.c6
-rw-r--r--sys/ddb/db_watch.c4
-rw-r--r--sys/dev/audio.c16
-rw-r--r--sys/dev/ic/sti.c4
-rw-r--r--sys/dev/isa/aha.c8
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c14
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_tiling.c4
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.c10
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo.c6
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_util.c8
-rw-r--r--sys/kern/exec_elf.c20
-rw-r--r--sys/kern/exec_subr.c26
-rw-r--r--sys/kern/init_main.c6
-rw-r--r--sys/kern/kern_exec.c10
-rw-r--r--sys/kern/kern_malloc.c4
-rw-r--r--sys/kern/kern_malloc_debug.c6
-rw-r--r--sys/kern/kern_physio.c4
-rw-r--r--sys/kern/kern_resource.c6
-rw-r--r--sys/kern/kern_sysctl.c4
-rw-r--r--sys/kern/subr_hibernate.c14
-rw-r--r--sys/kern/sysv_shm.c8
-rw-r--r--sys/kern/vfs_biomem.c8
-rw-r--r--sys/tmpfs/tmpfs_subr.c14
-rw-r--r--sys/uvm/uvm_amap.c4
-rw-r--r--sys/uvm/uvm_anon.c6
-rw-r--r--sys/uvm/uvm_aobj.c10
-rw-r--r--sys/uvm/uvm_device.c3
-rw-r--r--sys/uvm/uvm_extern.h57
-rw-r--r--sys/uvm/uvm_fault.c47
-rw-r--r--sys/uvm/uvm_glue.c9
-rw-r--r--sys/uvm/uvm_init.c10
-rw-r--r--sys/uvm/uvm_km.c58
-rw-r--r--sys/uvm/uvm_map.c41
-rw-r--r--sys/uvm/uvm_mmap.c40
-rw-r--r--sys/uvm/uvm_object.c5
-rw-r--r--sys/uvm/uvm_page.c6
-rw-r--r--sys/uvm/uvm_pager.c8
-rw-r--r--sys/uvm/uvm_pdaemon.c12
-rw-r--r--sys/uvm/uvm_pmap.h4
-rw-r--r--sys/uvm/uvm_unix.c10
-rw-r--r--sys/uvm/uvm_vnode.c18
163 files changed, 985 insertions, 1023 deletions
diff --git a/sys/arch/alpha/alpha/machdep.c b/sys/arch/alpha/alpha/machdep.c
index 050601817e6..2a81b749f3c 100644
--- a/sys/arch/alpha/alpha/machdep.c
+++ b/sys/arch/alpha/alpha/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.165 2014/09/20 09:28:24 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.166 2014/11/16 12:30:52 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.210 2000/06/01 17:12:38 thorpej Exp $ */
/*-
@@ -485,10 +485,10 @@ nobootinfo:
memc->mddt_usage & MDDT_NONVOLATILE || /* XXX */
memc->mddt_usage & MDDT_PALCODE)
mem_clusters[mem_cluster_cnt].size |=
- VM_PROT_READ;
+ PROT_READ;
else
mem_clusters[mem_cluster_cnt].size |=
- VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ PROT_READ | PROT_WRITE | PROT_EXEC;
mem_cluster_cnt++;
} /* XXX else print something! */
@@ -1905,9 +1905,9 @@ alpha_pa_access(pa)
* access. Otherwise, grant read/write.
*/
if (securelevel > 0)
- return (VM_PROT_NONE);
+ return (PROT_NONE);
else
- return (VM_PROT_READ | VM_PROT_WRITE);
+ return (PROT_READ | PROT_WRITE);
}
/* XXX XXX BEGIN XXX XXX */
diff --git a/sys/arch/alpha/alpha/pmap.c b/sys/arch/alpha/alpha/pmap.c
index fe40fbaa9e8..465cdbc0012 100644
--- a/sys/arch/alpha/alpha/pmap.c
+++ b/sys/arch/alpha/alpha/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.72 2014/03/29 18:09:28 guenther Exp $ */
+/* $OpenBSD: pmap.c,v 1.73 2014/11/16 12:30:52 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.154 2000/12/07 22:18:55 thorpej Exp $ */
/*-
@@ -1382,18 +1382,18 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
#ifdef DEBUG
if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
- (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
+ (prot == PROT_NONE && (pmapdebug & PDB_REMOVE)))
printf("pmap_page_protect(%p, %x)\n", pg, prot);
#endif
switch (prot) {
- case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
- case VM_PROT_READ|VM_PROT_WRITE:
+ case PROT_READ | PROT_WRITE | PROT_EXEC:
+ case PROT_READ | PROT_WRITE:
return;
/* copy_on_write */
- case VM_PROT_READ|VM_PROT_EXECUTE:
- case VM_PROT_READ:
+ case PROT_READ | PROT_EXEC:
+ case PROT_READ:
PMAP_HEAD_TO_MAP_LOCK();
for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
PMAP_LOCK(pv->pv_pmap);
@@ -1465,7 +1465,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
pmap, sva, eva, prot);
#endif
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ if ((prot & PROT_READ) == PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
}
@@ -1507,7 +1507,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
PMAP_TLB_SHOOTNOW();
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
PMAP_SYNC_ISTREAM(pmap);
PMAP_UNLOCK(pmap);
@@ -1554,11 +1554,11 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* Determine what we need to do about the I-stream. If
- * VM_PROT_EXECUTE is set, we mark a user pmap as needing
+ * PROT_EXEC is set, we mark a user pmap as needing
* an I-sync on the way back out to userspace. We always
* need an immediate I-sync for the kernel pmap.
*/
- if (prot & VM_PROT_EXECUTE) {
+ if (prot & PROT_EXEC) {
if (pmap == pmap_kernel())
needisync = TRUE;
else {
@@ -1756,12 +1756,12 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
int attrs;
#ifdef DIAGNOSTIC
- if ((flags & VM_PROT_ALL) & ~prot)
+ if ((flags & PROT_MASK) & ~prot)
panic("pmap_enter: access type exceeds prot");
#endif
- if (flags & VM_PROT_WRITE)
+ if (flags & PROT_WRITE)
pg->mdpage.pvh_attrs |= (PGA_REFERENCED|PGA_MODIFIED);
- else if (flags & VM_PROT_ALL)
+ else if (flags & PROT_MASK)
pg->mdpage.pvh_attrs |= PGA_REFERENCED;
attrs = pg->mdpage.pvh_attrs;
@@ -1856,7 +1856,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
if (pmap_pte_w(pte) == 0)
PMAP_STAT_DECR(pmap->pm_stats.wired_count, 1);
- if ((prot & VM_PROT_EXECUTE) != 0 || pmap_pte_exec(pte))
+ if ((prot & PROT_EXEC) != 0 || pmap_pte_exec(pte))
needisync = TRUE;
/*
@@ -2359,15 +2359,15 @@ alpha_protection_init(void)
kp[prot] = PG_ASM;
up[prot] = 0;
- if (prot & VM_PROT_READ) {
+ if (prot & PROT_READ) {
kp[prot] |= PG_KRE;
up[prot] |= PG_KRE | PG_URE;
}
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
kp[prot] |= PG_KWE;
up[prot] |= PG_KWE | PG_UWE;
}
- if (prot & VM_PROT_EXECUTE) {
+ if (prot & PROT_EXEC) {
kp[prot] |= PG_EXEC | PG_KRE;
up[prot] |= PG_EXEC | PG_KRE | PG_URE;
} else {
diff --git a/sys/arch/alpha/alpha/trap.c b/sys/arch/alpha/alpha/trap.c
index db7a0adae05..7c322071c69 100644
--- a/sys/arch/alpha/alpha/trap.c
+++ b/sys/arch/alpha/alpha/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.75 2014/07/02 18:37:33 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.76 2014/11/16 12:30:52 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.52 2000/05/24 16:48:33 thorpej Exp $ */
/*-
@@ -375,7 +375,7 @@ trap(a0, a1, a2, entry, framep)
case ALPHA_MMCSR_FOW:
KERNEL_LOCK();
if (pmap_emulate_reference(p, a0, user, a1)) {
- ftype = VM_PROT_EXECUTE;
+ ftype = PROT_EXEC;
goto do_fault;
}
KERNEL_UNLOCK();
@@ -392,13 +392,13 @@ trap(a0, a1, a2, entry, framep)
switch (a2) {
case -1: /* instruction fetch fault */
- ftype = VM_PROT_EXECUTE;
+ ftype = PROT_EXEC;
break;
case 0: /* load instruction */
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
break;
case 1: /* store instruction */
- ftype = VM_PROT_READ|VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
break;
}
diff --git a/sys/arch/alpha/alpha/vm_machdep.c b/sys/arch/alpha/alpha/vm_machdep.c
index ed5bf574260..ef712b325b6 100644
--- a/sys/arch/alpha/alpha/vm_machdep.c
+++ b/sys/arch/alpha/alpha/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.41 2014/02/01 21:19:35 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.42 2014/11/16 12:30:52 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.55 2000/03/29 03:49:48 simonb Exp $ */
/*
@@ -262,7 +262,7 @@ vmapbuf(bp, len)
faddr, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
- VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
}
diff --git a/sys/arch/alpha/dev/bus_dma.c b/sys/arch/alpha/dev/bus_dma.c
index 08e55b2831b..b9dd135c75c 100644
--- a/sys/arch/alpha/dev/bus_dma.c
+++ b/sys/arch/alpha/dev/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.34 2014/09/13 16:06:36 doug Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.35 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: bus_dma.c,v 1.40 2000/07/17 04:47:56 thorpej Exp $ */
/*-
@@ -611,8 +611,8 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
if (size == 0)
panic("_bus_dmamem_map: size botch");
error = pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/amd64/amd64/acpi_machdep.c b/sys/arch/amd64/amd64/acpi_machdep.c
index 2e686a2b546..7ecc744bbf8 100644
--- a/sys/arch/amd64/amd64/acpi_machdep.c
+++ b/sys/arch/amd64/amd64/acpi_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: acpi_machdep.c,v 1.63 2014/09/19 20:02:25 kettenis Exp $ */
+/* $OpenBSD: acpi_machdep.c,v 1.64 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
*
@@ -82,7 +82,7 @@ acpi_map(paddr_t pa, size_t len, struct acpi_mem_map *handle)
handle->pa = pa;
do {
- pmap_kenter_pa(va, pgpa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pgpa, PROT_READ | PROT_WRITE);
va += NBPG;
pgpa += NBPG;
} while (pgpa < endpa);
diff --git a/sys/arch/amd64/amd64/bios.c b/sys/arch/amd64/amd64/bios.c
index 659affed2c3..577c8b49758 100644
--- a/sys/arch/amd64/amd64/bios.c
+++ b/sys/arch/amd64/amd64/bios.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bios.c,v 1.28 2014/09/14 14:17:23 jsg Exp $ */
+/* $OpenBSD: bios.c,v 1.29 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2006 Gordon Willem Klok <gklok@cogeco.ca>
*
@@ -133,7 +133,7 @@ bios_attach(struct device *parent, struct device *self, void *aux)
smbios_entry.count = hdr->count;
for (; pa < end; pa+= NBPG, va+= NBPG)
- pmap_kenter_pa(va, pa, VM_PROT_READ);
+ pmap_kenter_pa(va, pa, PROT_READ);
printf(": SMBIOS rev. %d.%d @ 0x%x (%d entries)",
hdr->majrev, hdr->minrev, hdr->addr, hdr->count);
diff --git a/sys/arch/amd64/amd64/bus_dma.c b/sys/arch/amd64/amd64/bus_dma.c
index 053a0305eeb..9f86bdaa4ec 100644
--- a/sys/arch/amd64/amd64/bus_dma.c
+++ b/sys/arch/amd64/amd64/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.45 2014/07/12 18:44:41 tedu Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.46 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: bus_dma.c,v 1.3 2003/05/07 21:33:58 fvdl Exp $ */
/*-
@@ -494,8 +494,8 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
if (size == 0)
panic("_bus_dmamem_map: size botch");
error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/amd64/amd64/bus_space.c b/sys/arch/amd64/amd64/bus_space.c
index 3b8a345d427..5cf9a745314 100644
--- a/sys/arch/amd64/amd64/bus_space.c
+++ b/sys/arch/amd64/amd64/bus_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_space.c,v 1.24 2014/10/25 16:57:58 kettenis Exp $ */
+/* $OpenBSD: bus_space.c,v 1.25 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: bus_space.c,v 1.2 2003/03/14 18:47:53 christos Exp $ */
/*-
@@ -508,8 +508,7 @@ x86_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
for (; map_size > 0;
pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
- pmap_kenter_pa(va, pa | pmap_flags,
- VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa | pmap_flags, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
return 0;
diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c
index c3b596c7d3d..209fff74ea4 100644
--- a/sys/arch/amd64/amd64/cpu.c
+++ b/sys/arch/amd64/amd64/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.66 2014/10/09 03:59:58 tedu Exp $ */
+/* $OpenBSD: cpu.c,v 1.67 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@@ -170,9 +170,8 @@ replacesmap(void)
pmap_extract(pmap_kernel(), kva, &pa1);
pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
- pmap_kenter_pa(nva, pa1, VM_PROT_READ | VM_PROT_WRITE);
- pmap_kenter_pa(nva + PAGE_SIZE, pa2, VM_PROT_READ |
- VM_PROT_WRITE);
+ pmap_kenter_pa(nva, pa1, PROT_READ | PROT_WRITE);
+ pmap_kenter_pa(nva + PAGE_SIZE, pa2, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
/* replace 3 byte nops with stac/clac instructions */
@@ -833,7 +832,7 @@ mp_cpu_start(struct cpu_info *ci)
dwordptr[0] = 0;
dwordptr[1] = MP_TRAMPOLINE >> 4;
- pmap_kenter_pa(0, 0, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(0, 0, PROT_READ | PROT_WRITE);
memcpy((u_int8_t *) 0x467, dwordptr, 4);
pmap_kremove(0, PAGE_SIZE);
diff --git a/sys/arch/amd64/amd64/gdt.c b/sys/arch/amd64/amd64/gdt.c
index 6f822cf69f1..50588686167 100644
--- a/sys/arch/amd64/amd64/gdt.c
+++ b/sys/arch/amd64/amd64/gdt.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: gdt.c,v 1.20 2014/09/14 14:17:23 jsg Exp $ */
+/* $OpenBSD: gdt.c,v 1.21 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: gdt.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*-
@@ -65,8 +65,7 @@ gdt_alloc_cpu(struct cpu_info *ci)
pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
if (pg == NULL)
panic("gdt_init: no pages");
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
}
bcopy(gdtstore, ci->ci_gdt, GDT_SIZE);
bzero(ci->ci_tss, sizeof(*ci->ci_tss));
diff --git a/sys/arch/amd64/amd64/hibernate_machdep.c b/sys/arch/amd64/amd64/hibernate_machdep.c
index 42d22593d27..265ecb758d2 100644
--- a/sys/arch/amd64/amd64/hibernate_machdep.c
+++ b/sys/arch/amd64/amd64/hibernate_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: hibernate_machdep.c,v 1.30 2014/11/08 08:18:37 mlarkin Exp $ */
+/* $OpenBSD: hibernate_machdep.c,v 1.31 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2012 Mike Larkin <mlarkin@openbsd.org>
@@ -272,22 +272,22 @@ hibernate_populate_resume_pt(union hibernate_info *hib_info,
pt_entry_t *pde, npde;
/* Identity map MMU pages */
- pmap_kenter_pa(HIBERNATE_PML4T, HIBERNATE_PML4T, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PDPT_LOW, HIBERNATE_PDPT_LOW, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PDPT_HI, HIBERNATE_PDPT_HI, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PD_LOW, HIBERNATE_PD_LOW, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PD_LOW2, HIBERNATE_PD_LOW2, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PD_HI, HIBERNATE_PD_HI, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PT_LOW, HIBERNATE_PT_LOW, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PT_LOW2, HIBERNATE_PT_LOW2, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PT_HI, HIBERNATE_PT_HI, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PML4T, HIBERNATE_PML4T, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PDPT_LOW, HIBERNATE_PDPT_LOW, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PDPT_HI, HIBERNATE_PDPT_HI, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PD_LOW, HIBERNATE_PD_LOW, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PD_LOW2, HIBERNATE_PD_LOW2, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PD_HI, HIBERNATE_PD_HI, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PT_LOW, HIBERNATE_PT_LOW, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PT_LOW2, HIBERNATE_PT_LOW2, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PT_HI, HIBERNATE_PT_HI, PROT_MASK);
/* Identity map 3 pages for stack */
- pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, PROT_MASK);
pmap_kenter_pa(HIBERNATE_STACK_PAGE - PAGE_SIZE,
- HIBERNATE_STACK_PAGE - PAGE_SIZE, VM_PROT_ALL);
+ HIBERNATE_STACK_PAGE - PAGE_SIZE, PROT_MASK);
pmap_kenter_pa(HIBERNATE_STACK_PAGE - 2*PAGE_SIZE,
- HIBERNATE_STACK_PAGE - 2*PAGE_SIZE, VM_PROT_ALL);
+ HIBERNATE_STACK_PAGE - 2*PAGE_SIZE, PROT_MASK);
pmap_activate(curproc);
bzero((caddr_t)HIBERNATE_PML4T, PAGE_SIZE);
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index 5f5c32e02df..157587e4707 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.194 2014/11/08 03:31:58 guenther Exp $ */
+/* $OpenBSD: machdep.c,v 1.195 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
@@ -1179,7 +1179,7 @@ void
map_tramps(void) {
struct pmap *kmp = pmap_kernel();
- pmap_kenter_pa(lo32_vaddr, lo32_paddr, VM_PROT_ALL);
+ pmap_kenter_pa(lo32_vaddr, lo32_paddr, PROT_READ | PROT_WRITE | PROT_EXEC);
/*
* The initial PML4 pointer must be below 4G, so if the
@@ -1195,12 +1195,12 @@ map_tramps(void) {
#ifdef MULTIPROCESSOR
pmap_kenter_pa((vaddr_t)MP_TRAMPOLINE, /* virtual */
(paddr_t)MP_TRAMPOLINE, /* physical */
- VM_PROT_ALL); /* protection */
+ PROT_MASK); /* protection */
#endif /* MULTIPROCESSOR */
pmap_kenter_pa((vaddr_t)ACPI_TRAMPOLINE, /* virtual */
(paddr_t)ACPI_TRAMPOLINE, /* physical */
- VM_PROT_ALL); /* protection */
+ PROT_MASK); /* protection */
}
#endif
@@ -1507,9 +1507,9 @@ init_x86_64(paddr_t first_avail)
pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);
- pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(idt_vaddr, idt_paddr, PROT_READ | PROT_WRITE);
pmap_kenter_pa(idt_vaddr + PAGE_SIZE, idt_paddr + PAGE_SIZE,
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
#if defined(MULTIPROCESSOR) || \
(NACPI > 0 && !defined(SMALL_KERNEL))
diff --git a/sys/arch/amd64/amd64/mpbios.c b/sys/arch/amd64/amd64/mpbios.c
index aef32f6ffa1..614dbbec79e 100644
--- a/sys/arch/amd64/amd64/mpbios.c
+++ b/sys/arch/amd64/amd64/mpbios.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mpbios.c,v 1.22 2014/05/26 19:03:28 kettenis Exp $ */
+/* $OpenBSD: mpbios.c,v 1.23 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: mpbios.c,v 1.7 2003/05/15 16:32:50 fvdl Exp $ */
/*-
@@ -259,7 +259,7 @@ mpbios_map(paddr_t pa, int len, struct mp_map *handle)
handle->vsize = endpa - pgpa;
do {
- pmap_kenter_pa(va, pgpa, VM_PROT_READ);
+ pmap_kenter_pa(va, pgpa, PROT_READ);
va += PAGE_SIZE;
pgpa += PAGE_SIZE;
} while (pgpa < endpa);
diff --git a/sys/arch/amd64/amd64/pmap.c b/sys/arch/amd64/amd64/pmap.c
index cc726f0fbd2..6ed7fefb44d 100644
--- a/sys/arch/amd64/amd64/pmap.c
+++ b/sys/arch/amd64/amd64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.77 2014/11/07 03:20:02 mlarkin Exp $ */
+/* $OpenBSD: pmap.c,v 1.78 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.3 2003/05/08 18:13:13 thorpej Exp $ */
/*
@@ -296,7 +296,7 @@ static const struct nx_range_description nx_ranges[] = {
/*
* List of ranges to map as NX (non-execute) if the processor supports
* NX. Each range consists of a start vaddr and size (in bytes), and a
- * protection value (eg, VM_PROT_READ or VM_PROT_READ | VM_PROT_WRITE).
+ * protection value (eg, PROT_READ or PROT_READ | PROT_WRITE).
*
* The list also includes an 'is_ptr' field in each element to denote
* if the 'start' value is a constant (is_ptr == 0) or should be
@@ -308,13 +308,13 @@ static const struct nx_range_description nx_ranges[] = {
{ /* .rodata range */
(vaddr_t)&__rodata_start,
(size_t)&__rodata_size,
- VM_PROT_READ,
+ PROT_READ,
0
},
{ /* ISA hole */
(vaddr_t)&atdevbase,
IOM_SIZE,
- VM_PROT_READ | VM_PROT_WRITE,
+ PROT_READ | PROT_WRITE,
1
}
};
@@ -493,7 +493,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pte = kvtopte(va);
- npte = (pa & PMAP_PA_MASK) | ((prot & VM_PROT_WRITE) ? PG_RW : PG_RO) |
+ npte = (pa & PMAP_PA_MASK) | ((prot & PROT_WRITE) ? PG_RW : PG_RO) |
((pa & PMAP_NOCACHE) ? PG_N : 0) |
((pa & PMAP_WC) ? pmap_pg_wc : 0) | PG_V;
@@ -501,7 +501,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
if (va >= (vaddr_t)NBPD_L2)
npte |= PG_G;
- if ((cpu_feature & CPUID_NXE) && !(prot & VM_PROT_EXECUTE))
+ if ((cpu_feature & CPUID_NXE) && !(prot & PROT_EXEC))
npte |= PG_NX;
opte = pmap_pte_set(pte, npte);
#ifdef LARGEPAGES
@@ -593,15 +593,14 @@ pmap_bootstrap(paddr_t first_avail, paddr_t max_pa)
* we can jam into a i386 PTE.
*/
- protection_codes[VM_PROT_NONE] = pg_nx; /* --- */
- protection_codes[VM_PROT_EXECUTE] = PG_RO; /* --x */
- protection_codes[VM_PROT_READ] = PG_RO | pg_nx; /* -r- */
- protection_codes[VM_PROT_READ|VM_PROT_EXECUTE] = PG_RO; /* -rx */
- protection_codes[VM_PROT_WRITE] = PG_RW | pg_nx; /* w-- */
- protection_codes[VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;/* w-x */
- protection_codes[VM_PROT_WRITE|VM_PROT_READ] = PG_RW | pg_nx;
- /* wr- */
- protection_codes[VM_PROT_ALL] = PG_RW; /* wrx */
+ protection_codes[PROT_NONE] = pg_nx; /* --- */
+ protection_codes[PROT_EXEC] = PG_RO; /* --x */
+ protection_codes[PROT_READ] = PG_RO | pg_nx; /* -r- */
+ protection_codes[PROT_READ | PROT_EXEC] = PG_RO; /* -rx */
+ protection_codes[PROT_WRITE] = PG_RW | pg_nx; /* w-- */
+ protection_codes[PROT_WRITE | PROT_EXEC] = PG_RW; /* w-x */
+ protection_codes[PROT_WRITE | PROT_READ] = PG_RW | pg_nx; /* wr- */
+ protection_codes[PROT_READ | PROT_WRITE | PROT_EXEC] = PG_RW; /* wrx */
/*
* now we init the kernel's pmap
@@ -1850,7 +1849,7 @@ pmap_write_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
eva &= PG_FRAME;
nx = 0;
- if ((cpu_feature & CPUID_NXE) && !(prot & VM_PROT_EXECUTE))
+ if ((cpu_feature & CPUID_NXE) && !(prot & PROT_EXEC))
nx = PG_NX;
if ((eva - sva > 32 * PAGE_SIZE) && pmap != pmap_kernel())
diff --git a/sys/arch/amd64/amd64/trap.c b/sys/arch/amd64/amd64/trap.c
index 8ec6571fd0e..59775682c6a 100644
--- a/sys/arch/amd64/amd64/trap.c
+++ b/sys/arch/amd64/amd64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.42 2014/07/13 12:11:01 jasper Exp $ */
+/* $OpenBSD: trap.c,v 1.43 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.2 2003/05/04 23:51:56 fvdl Exp $ */
/*-
@@ -353,11 +353,11 @@ faultcommon:
else
map = &vm->vm_map;
if (frame->tf_err & PGEX_W)
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
else if (frame->tf_err & PGEX_I)
- ftype = VM_PROT_EXECUTE;
+ ftype = PROT_EXEC;
else
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
#ifdef DIAGNOSTIC
if (map == kernel_map && va == 0) {
diff --git a/sys/arch/amd64/amd64/vm_machdep.c b/sys/arch/amd64/amd64/vm_machdep.c
index 6da163abcf2..f1c135e03cf 100644
--- a/sys/arch/amd64/amd64/vm_machdep.c
+++ b/sys/arch/amd64/amd64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.28 2013/06/02 16:38:05 guenther Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.29 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.1 2003/04/26 18:39:33 fvdl Exp $ */
/*-
@@ -249,7 +249,7 @@ vmapbuf(struct buf *bp, vsize_t len)
while (len) {
(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr, &fpa);
- pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
diff --git a/sys/arch/amd64/include/pmap.h b/sys/arch/amd64/include/pmap.h
index 0aef748ad37..5c3a22b832a 100644
--- a/sys/arch/amd64/include/pmap.h
+++ b/sys/arch/amd64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.47 2014/10/06 20:34:58 sf Exp $ */
+/* $OpenBSD: pmap.h,v 1.48 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: pmap.h,v 1.1 2003/04/26 18:39:46 fvdl Exp $ */
/*
@@ -70,6 +70,7 @@
#ifndef _LOCORE
#ifdef _KERNEL
+#include <sys/mman.h>
#include <machine/cpufunc.h>
#include <machine/segments.h>
#endif /* _KERNEL */
@@ -464,8 +465,8 @@ pmap_update_2pg(vaddr_t va, vaddr_t vb)
__inline static void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC)) {
(void) pmap_clear_attrs(pg, PG_RW);
} else {
pmap_page_remove(pg);
@@ -484,8 +485,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
__inline static void
pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ| PROT_EXEC)) {
pmap_write_protect(pmap, sva, eva, prot);
} else {
pmap_remove(pmap, sva, eva);
diff --git a/sys/arch/amd64/pci/vga_post.c b/sys/arch/amd64/pci/vga_post.c
index ccef775ea7d..cdb04147cc2 100644
--- a/sys/arch/amd64/pci/vga_post.c
+++ b/sys/arch/amd64/pci/vga_post.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vga_post.c,v 1.7 2014/07/12 18:44:41 tedu Exp $ */
+/* $OpenBSD: vga_post.c,v 1.8 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: vga_post.c,v 1.12 2009/03/15 21:32:36 cegger Exp $ */
/*-
@@ -149,7 +149,7 @@ vga_post_init(int bus, int device, int function)
sc->sys_image = sys_image;
sc->emu.sys_private = sc;
- pmap_kenter_pa(sys_bios_data, 0, VM_PROT_READ);
+ pmap_kenter_pa(sys_bios_data, 0, PROT_READ);
pmap_update(pmap_kernel());
memcpy((void *)sc->bios_data, (void *)sys_bios_data, PAGE_SIZE);
pmap_kremove(sys_bios_data, PAGE_SIZE);
@@ -158,14 +158,14 @@ vga_post_init(int bus, int device, int function)
iter = 0;
TAILQ_FOREACH(pg, &sc->ram_backing, pageq) {
pmap_kenter_pa(sc->sys_image + iter, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
iter += PAGE_SIZE;
}
KASSERT(iter == BASE_MEMORY);
for (iter = 640 * 1024; iter < 1024 * 1024; iter += PAGE_SIZE)
pmap_kenter_pa(sc->sys_image + iter, iter,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
memset(&sc->emu, 0, sizeof(sc->emu));
diff --git a/sys/arch/arm/arm/arm32_machdep.c b/sys/arch/arm/arm/arm32_machdep.c
index b937f517c7c..7a9d9dcd29f 100644
--- a/sys/arch/arm/arm/arm32_machdep.c
+++ b/sys/arch/arm/arm/arm32_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: arm32_machdep.c,v 1.45 2014/07/10 19:44:35 uebayasi Exp $ */
+/* $OpenBSD: arm32_machdep.c,v 1.46 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: arm32_machdep.c,v 1.42 2003/12/30 12:33:15 pk Exp $ */
/*
@@ -244,7 +244,7 @@ cpu_startup()
cpu_setup();
/* Lock down zero page */
- vector_page_setprot(VM_PROT_READ|VM_PROT_EXECUTE);
+ vector_page_setprot(PROT_READ | PROT_EXEC);
/*
* Give pmap a chance to set up a few more things now the vm
@@ -264,7 +264,7 @@ cpu_startup()
/* msgbufphys was setup during the secondary boot strap */
for (loop = 0; loop < atop(MSGBUFSIZE); ++loop)
pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
- msgbufphys + loop * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
+ msgbufphys + loop * PAGE_SIZE, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
diff --git a/sys/arch/arm/arm/bus_dma.c b/sys/arch/arm/arm/bus_dma.c
index 5e27c0ac5ba..64a3b5954e7 100644
--- a/sys/arch/arm/arm/bus_dma.c
+++ b/sys/arch/arm/arm/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.27 2014/09/13 16:06:36 doug Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.28 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: bus_dma.c,v 1.38 2003/10/30 08:44:13 scw Exp $ */
/*-
@@ -765,7 +765,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_kenter_cache(va, addr,
- VM_PROT_READ | VM_PROT_WRITE,
+ PROT_READ | PROT_WRITE,
!(flags & BUS_DMA_COHERENT));
#ifdef DEBUG_DMA
diff --git a/sys/arch/arm/arm/cpu.c b/sys/arch/arm/arm/cpu.c
index 0502476e232..9be602dee35 100644
--- a/sys/arch/arm/arm/cpu.c
+++ b/sys/arch/arm/arm/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.19 2014/11/14 09:56:06 dlg Exp $ */
+/* $OpenBSD: cpu.c,v 1.20 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: cpu.c,v 1.56 2004/04/14 04:01:49 bsh Exp $ */
@@ -530,7 +530,7 @@ cpu_alloc_idlepcb(struct cpu_info *ci)
*/
if (uvm_uarea_alloc(&uaddr)) {
error = uvm_fault_wire(kernel_map, uaddr, uaddr + USPACE,
- VM_FAULT_WIRE, VM_PROT_READ | VM_PROT_WRITE);
+ VM_FAULT_WIRE, PROT_READ | PROT_WRITE);
if (error)
return error;
}
diff --git a/sys/arch/arm/arm/db_interface.c b/sys/arch/arm/arm/db_interface.c
index c934a3796cc..82af3014759 100644
--- a/sys/arch/arm/arm/db_interface.c
+++ b/sys/arch/arm/arm/db_interface.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_interface.c,v 1.8 2014/07/13 12:11:01 jasper Exp $ */
+/* $OpenBSD: db_interface.c,v 1.9 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: db_interface.c,v 1.34 2003/10/26 23:11:15 chris Exp $ */
/*
@@ -245,7 +245,7 @@ db_write_text(vaddr_t addr, size_t size, char *data)
pgva = (vaddr_t)dst & L1_S_FRAME;
limit = L1_S_SIZE - ((vaddr_t)dst & L1_S_OFFSET);
- tmppde = oldpde | L1_S_PROT(PTE_KERNEL, VM_PROT_WRITE);
+ tmppde = oldpde | L1_S_PROT(PTE_KERNEL, PROT_WRITE);
*pde = tmppde;
PTE_SYNC(pde);
break;
@@ -257,7 +257,7 @@ db_write_text(vaddr_t addr, size_t size, char *data)
if (pte == NULL)
goto no_mapping;
oldpte = *pte;
- tmppte = oldpte | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE);
+ tmppte = oldpte | L2_S_PROT(PTE_KERNEL, PROT_WRITE);
*pte = tmppte;
PTE_SYNC(pte);
break;
diff --git a/sys/arch/arm/arm/fault.c b/sys/arch/arm/arm/fault.c
index c3802ec5cf1..dad72fba6fe 100644
--- a/sys/arch/arm/arm/fault.c
+++ b/sys/arch/arm/arm/fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: fault.c,v 1.17 2014/05/08 21:17:00 miod Exp $ */
+/* $OpenBSD: fault.c,v 1.18 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: fault.c,v 1.46 2004/01/21 15:39:21 skrll Exp $ */
/*
@@ -343,22 +343,22 @@ data_abort_handler(trapframe_t *tf)
* responsible to determine if it was a write.
*/
if (IS_PERMISSION_FAULT(fsr))
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
else {
u_int insn = *(u_int *)tf->tf_pc;
if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */
((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */
((insn & 0x0a100000) == 0x08000000)) /* STM/CDT */
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
else
if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */
- ftype = VM_PROT_READ | VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
else
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
}
#else
- ftype = fsr & FAULT_WNR ? VM_PROT_WRITE : VM_PROT_READ;
+ ftype = fsr & FAULT_WNR ? PROT_WRITE : PROT_READ;
#endif
/*
@@ -689,7 +689,7 @@ prefetch_abort_handler(trapframe_t *tf)
#ifdef DEBUG
last_fault_code = -1;
#endif
- if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ|VM_PROT_EXECUTE, 1))
+ if (pmap_fault_fixup(map->pmap, va, PROT_READ | PROT_EXEC, 1))
goto out;
#ifdef DIAGNOSTIC
@@ -699,7 +699,7 @@ prefetch_abort_handler(trapframe_t *tf)
}
#endif
- error = uvm_fault(map, va, 0, VM_PROT_READ|VM_PROT_EXECUTE);
+ error = uvm_fault(map, va, 0, PROT_READ | PROT_EXEC);
if (__predict_true(error == 0))
goto out;
diff --git a/sys/arch/arm/arm/fiq.c b/sys/arch/arm/arm/fiq.c
index ac7af088409..d0ee0182aee 100644
--- a/sys/arch/arm/arm/fiq.c
+++ b/sys/arch/arm/arm/fiq.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: fiq.c,v 1.5 2014/04/03 10:17:34 mpi Exp $ */
+/* $OpenBSD: fiq.c,v 1.6 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: fiq.c,v 1.5 2002/04/03 23:33:27 thorpej Exp $ */
/*
@@ -65,13 +65,13 @@ static void
fiq_installhandler(void *func, size_t size)
{
#if !defined(__ARM_FIQ_INDIRECT)
- vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
+ vector_page_setprot(PROT_READ | PROT_WRITE | PROT_EXEC);
#endif
memcpy(fiqvector, func, size);
#if !defined(__ARM_FIQ_INDIRECT)
- vector_page_setprot(VM_PROT_READ|VM_PROT_EXECUTE);
+ vector_page_setprot(PROT_READ | PROT_EXEC);
#endif
cpu_icache_sync_range((vaddr_t) fiqvector, size);
}
diff --git a/sys/arch/arm/arm/mem.c b/sys/arch/arm/arm/mem.c
index f6c9f756eee..56c6b25e153 100644
--- a/sys/arch/arm/arm/mem.c
+++ b/sys/arch/arm/arm/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.10 2010/12/26 15:40:59 miod Exp $ */
+/* $OpenBSD: mem.c,v 1.11 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: mem.c,v 1.11 2003/10/16 12:02:58 jdolecek Exp $ */
/*
@@ -183,8 +183,8 @@ mmrw(dev, uio, flags)
case DEV_MEM:
v = uio->uio_offset;
- prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
- VM_PROT_WRITE;
+ prot = uio->uio_rw == UIO_READ ? PROT_READ :
+ PROT_WRITE;
pmap_enter(pmap_kernel(), (vaddr_t)memhook,
trunc_page(v), prot, prot|PMAP_WIRED);
pmap_update(pmap_kernel());
diff --git a/sys/arch/arm/arm/pmap.c b/sys/arch/arm/arm/pmap.c
index 42d46b406b8..27f486cfb7a 100644
--- a/sys/arch/arm/arm/pmap.c
+++ b/sys/arch/arm/arm/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.47 2014/10/07 07:14:55 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.48 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -1871,7 +1871,7 @@ pmap_create(void)
* Map the vector page.
*/
pmap_enter(pm, vector_page, systempage.pv_pa,
- VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
+ PROT_READ, PROT_READ | PMAP_WIRED);
pmap_update(pm);
}
@@ -1902,7 +1902,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
- KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
+ KDASSERT((flags & PMAP_WIRED) == 0 || (flags & PROT_MASK) != 0);
KDASSERT(((va | pa) & PGOFSET) == 0);
/*
@@ -1912,9 +1912,9 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
nflags = 0;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
nflags |= PVF_WRITE;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
nflags |= PVF_EXEC;
if (flags & PMAP_WIRED)
nflags |= PVF_WIRED;
@@ -1960,7 +1960,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* This is to be a managed mapping.
*/
- if ((flags & VM_PROT_ALL) ||
+ if ((flags & PROT_MASK) ||
(pg->mdpage.pvh_attrs & PVF_REF)) {
/*
* - The access type indicates that we don't need
@@ -1973,8 +1973,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
nflags |= PVF_REF;
- if ((prot & VM_PROT_WRITE) != 0 &&
- ((flags & VM_PROT_WRITE) != 0 ||
+ if ((prot & PROT_WRITE) != 0 &&
+ ((flags & PROT_WRITE) != 0 ||
(pg->mdpage.pvh_attrs & PVF_MOD) != 0)) {
/*
* This is a writable mapping, and the
@@ -2011,7 +2011,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (pm->pm_cstate.cs_cache_d &&
(oflags & PVF_NC) == 0 &&
(opte & L2_S_PROT_KW) != 0 &&
- (prot & VM_PROT_WRITE) == 0)
+ (prot & PROT_WRITE) == 0)
cpu_dcache_wb_range(va, PAGE_SIZE);
} else {
/*
@@ -2071,7 +2071,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* the get go as we don't need to track ref/mod status.
*/
npte |= L2_S_PROTO;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
npte |= L2_S_PROT_KW;
/*
@@ -2552,12 +2552,12 @@ pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
pm, sva, eva, prot));
- if ((prot & VM_PROT_READ) == 0) {
+ if ((prot & PROT_READ) == 0) {
pmap_remove(pm, sva, eva);
return;
}
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
/*
* If this is a read->write transition, just ignore it and let
* uvm_fault() take care of it later.
@@ -2648,12 +2648,12 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
pg, pg->phys_addr, prot));
switch(prot) {
- case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
- case VM_PROT_READ|VM_PROT_WRITE:
+ case PROT_READ | PROT_WRITE | PROT_EXEC:
+ case PROT_READ | PROT_WRITE:
return;
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
+ case PROT_READ:
+ case PROT_READ | PROT_EXEC:
pmap_clearbit(pg, PVF_WRITE);
break;
@@ -2765,7 +2765,7 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
pa = l2pte_pa(pte);
- if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_KW) == 0) {
+ if ((ftype & PROT_WRITE) && (pte & L2_S_PROT_KW) == 0) {
/*
* This looks like a good candidate for "page modified"
* emulation...
@@ -3226,7 +3226,7 @@ pmap_zero_page_generic(struct vm_page *pg)
* zeroed page. Invalidate the TLB as needed.
*/
*cdst_pte = L2_S_PROTO | phys |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(cdstp);
cpu_cpwait();
@@ -3252,7 +3252,7 @@ pmap_zero_page_xscale(struct vm_page *pg)
* zeroed page. Invalidate the TLB as needed.
*/
*cdst_pte = L2_S_PROTO | phys |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(cdstp);
@@ -3287,7 +3287,7 @@ pmap_pageidlezero(struct vm_page *pg)
* zeroed page. Invalidate the TLB as needed.
*/
*cdst_pte = L2_S_PROTO | phys |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(cdstp);
cpu_cpwait();
@@ -3352,10 +3352,10 @@ pmap_copy_page_generic(struct vm_page *src_pg, struct vm_page *dst_pg)
* as required.
*/
*csrc_pte = L2_S_PROTO | src |
- L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_READ) | pte_l2_s_cache_mode;
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | dst |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
cpu_tlb_flushD_SE(cdstp);
@@ -3395,11 +3395,11 @@ pmap_copy_page_xscale(struct vm_page *src_pg, struct vm_page *dst_pg)
* as required.
*/
*csrc_pte = L2_S_PROTO | src |
- L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_S_PROT(PTE_KERNEL, PROT_READ) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | dst |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
@@ -3443,7 +3443,7 @@ pmap_copy_page_v7(struct vm_page *src_pg, struct vm_page *dst_pg)
L2_V7_AP(0x5) | pte_l2_s_cache_mode;
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | dst |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
cpu_tlb_flushD_SE(cdstp);
@@ -3498,7 +3498,7 @@ pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
ptep = &l2b->l2b_kva[l2pte_index(va)];
*ptep = L2_S_PROTO | pa | cache_mode |
- L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
+ L2_S_PROT(PTE_KERNEL, PROT_READ | PROT_WRITE);
PTE_SYNC(ptep);
memset((void *)va, 0, PAGE_SIZE);
return (0);
@@ -4157,7 +4157,7 @@ pmap_postinit(void)
paddr_t pa = VM_PAGE_TO_PHYS(m);
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
/*
* Make sure the L1 descriptor table is mapped
@@ -4973,7 +4973,7 @@ xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
#else
pte[l2pte_index(va)] =
#endif
- L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
+ L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, PROT_READ) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
}
diff --git a/sys/arch/arm/arm/pmap7.c b/sys/arch/arm/arm/pmap7.c
index 926420640b7..786a530c769 100644
--- a/sys/arch/arm/arm/pmap7.c
+++ b/sys/arch/arm/arm/pmap7.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap7.c,v 1.17 2014/10/27 00:49:05 jsg Exp $ */
+/* $OpenBSD: pmap7.c,v 1.18 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.147 2004/01/18 13:03:50 scw Exp $ */
/*
@@ -691,7 +691,7 @@ printf("%s: %d %d\n", __func__, domain, ++nl1);
for (eva = va + L1_TABLE_SIZE; va < eva; va += PAGE_SIZE) {
paddr_t pa = VM_PAGE_TO_PHYS(m);
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
/*
* Make sure the L1 descriptor table is mapped
* with the cache-mode set to write-through, or
@@ -1052,7 +1052,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
/* make the pte read only */
npte = (npte & ~L2_S_PROT_MASK) |
L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
- npte & L2_V7_S_XN ? VM_PROT_READ : VM_PROT_READ | VM_PROT_EXECUTE);
+ npte & L2_V7_S_XN ? PROT_READ : PROT_READ | PROT_EXEC);
}
if (maskbits & PVF_REF) {
@@ -1154,7 +1154,7 @@ pmap_clean_page(struct vm_page *pg, int isync)
*/
if (!wb) {
*cwb_pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cwb_pte);
cpu_tlb_flushD_SE(cwbp);
cpu_cpwait();
@@ -1311,7 +1311,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
- KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
+ KDASSERT((flags & PMAP_WIRED) == 0 || (flags & PROT_MASK) != 0);
KDASSERT(((va | pa) & PGOFSET) == 0);
/*
@@ -1321,9 +1321,9 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
nflags = 0;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
nflags |= PVF_WRITE;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
nflags |= PVF_EXEC;
if (flags & PMAP_WIRED)
nflags |= PVF_WIRED;
@@ -1369,7 +1369,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* This has to be a managed mapping.
*/
- if ((flags & VM_PROT_ALL) ||
+ if ((flags & PROT_MASK) ||
(pg->mdpage.pvh_attrs & PVF_REF)) {
/*
* - The access type indicates that we don't need
@@ -1382,8 +1382,8 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
nflags |= PVF_REF;
- if ((prot & VM_PROT_WRITE) != 0 &&
- ((flags & VM_PROT_WRITE) != 0 ||
+ if ((prot & PROT_WRITE) != 0 &&
+ ((flags & PROT_WRITE) != 0 ||
(pg->mdpage.pvh_attrs & PVF_MOD) != 0)) {
/*
* This is a writable mapping, and the
@@ -1399,7 +1399,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
*/
npte &= ~L2_TYPE_MASK;
npte |= L2_TYPE_INV;
- prot &= ~VM_PROT_WRITE;
+ prot &= ~PROT_WRITE;
mapped = 0;
}
@@ -1419,7 +1419,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
*/
if ((oflags & PVF_NC) == 0 &&
l2pte_is_writeable(opte, pm) &&
- (prot & VM_PROT_WRITE) == 0) {
+ (prot & PROT_WRITE) == 0) {
cpu_dcache_wb_range(va, PAGE_SIZE);
cpu_sdcache_wb_range(va, opte & L2_S_FRAME,
PAGE_SIZE);
@@ -1477,7 +1477,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* Make sure userland mappings get the right permissions
*/
npte |= L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
- prot & ~VM_PROT_WRITE);
+ prot & ~PROT_WRITE);
/*
* Keep the stats up to date
@@ -1530,7 +1530,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* Make sure executable pages do not have stale data in I$,
* which is VIPT.
*/
- if (mapped && (prot & VM_PROT_EXECUTE) != 0 && pmap_is_current(pm))
+ if (mapped && (prot & PROT_EXEC) != 0 && pmap_is_current(pm))
cpu_icache_sync_range(va, PAGE_SIZE);
pmap_release_pmap_lock(pm);
@@ -1822,13 +1822,13 @@ pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x",
pm, sva, eva, prot));
- if ((prot & VM_PROT_READ) == 0) {
+ if ((prot & PROT_READ) == 0) {
NPDEBUG(PDB_PROTECT, printf("\n"));
pmap_remove(pm, sva, eva);
return;
}
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
/*
* If this is a read->write transition, just ignore it and let
* uvm_fault() take care of it later.
@@ -1878,7 +1878,7 @@ NPDEBUG(PDB_PROTECT, printf("\n"));
pmap_clean_page(pg, FALSE);
pte = (pte & ~L2_S_PROT_MASK) |
L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
- pte & L2_V7_S_XN ? VM_PROT_READ : VM_PROT_READ | VM_PROT_EXECUTE);
+ pte & L2_V7_S_XN ? PROT_READ : PROT_READ | PROT_EXEC);
*ptep = pte;
PTE_SYNC(ptep);
@@ -1926,12 +1926,12 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
pg, pg->phys_addr, prot));
switch(prot) {
- case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
- case VM_PROT_READ|VM_PROT_WRITE:
+ case PROT_READ | PROT_WRITE | PROT_EXEC:
+ case PROT_READ | PROT_WRITE:
return;
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
+ case PROT_READ:
+ case PROT_READ | PROT_EXEC:
pmap_clearbit(pg, PVF_WRITE);
break;
@@ -2041,14 +2041,14 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
*/
if (user) {
/* XXX use of L2_V7_S_XN */
- if ((pte & L2_S_PROT_MASK & ~L2_V7_S_XN) != L2_S_PROT(PTE_USER, VM_PROT_READ) &&
- (pte & L2_S_PROT_MASK & ~L2_V7_S_XN) != L2_S_PROT(PTE_USER, VM_PROT_WRITE))
+ if ((pte & L2_S_PROT_MASK & ~L2_V7_S_XN) != L2_S_PROT(PTE_USER, PROT_READ) &&
+ (pte & L2_S_PROT_MASK & ~L2_V7_S_XN) != L2_S_PROT(PTE_USER, PROT_WRITE))
goto out;
}
pa = l2pte_pa(pte);
- if ((ftype & VM_PROT_EXECUTE) && (pte & L2_V7_S_XN)) {
+ if ((ftype & PROT_EXEC) && (pte & L2_V7_S_XN)) {
printf("%s: va %08lx ftype %x %c pte %08x\n", __func__, va, ftype, user ? 'u' : 's', pte);
printf("fault on exec\n");
#ifdef DDB
@@ -2057,7 +2057,7 @@ Debugger();
/* XXX FIX THIS */
goto out;
}
- if ((ftype & VM_PROT_WRITE) && !l2pte_is_writeable(pte, pm)) {
+ if ((ftype & PROT_WRITE) && !l2pte_is_writeable(pte, pm)) {
/*
* This looks like a good candidate for "page modified"
* emulation...
@@ -2098,7 +2098,7 @@ Debugger();
*/
*ptep = (pte & ~(L2_TYPE_MASK|L2_S_PROT_MASK)) | L2_S_PROTO |
L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
- pte & L2_V7_S_XN ? VM_PROT_WRITE : VM_PROT_WRITE | VM_PROT_EXECUTE);
+ pte & L2_V7_S_XN ? PROT_WRITE : PROT_WRITE | PROT_EXEC);
PTE_SYNC(ptep);
rv = 1;
} else
@@ -2361,7 +2361,7 @@ pmap_zero_page_generic(struct vm_page *pg)
* zeroed page. Invalidate the TLB as needed.
*/
*cdst_pte = L2_S_PROTO | phys |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(cdstp);
cpu_cpwait();
@@ -2391,10 +2391,10 @@ pmap_copy_page_generic(struct vm_page *src_pg, struct vm_page *dst_pg)
* as required.
*/
*csrc_pte = L2_S_PROTO | src |
- L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_READ) | pte_l2_s_cache_mode;
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | dst |
- L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
+ L2_S_PROT(PTE_KERNEL, PROT_WRITE) | pte_l2_s_cache_mode;
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
cpu_tlb_flushD_SE(cdstp);
@@ -2445,7 +2445,7 @@ pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
ptep = &l2b->l2b_kva[l2pte_index(va)];
*ptep = L2_S_PROTO | pa | cache_mode |
- L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
+ L2_S_PROT(PTE_KERNEL, PROT_READ | PROT_WRITE);
PTE_SYNC(ptep);
cpu_tlb_flushD_SE(va);
diff --git a/sys/arch/arm/arm/stubs.c b/sys/arch/arm/arm/stubs.c
index 30bac6d3cd5..0fb521d6ce0 100644
--- a/sys/arch/arm/arm/stubs.c
+++ b/sys/arch/arm/arm/stubs.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: stubs.c,v 1.8 2013/06/11 16:42:07 deraadt Exp $ */
+/* $OpenBSD: stubs.c,v 1.9 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: stubs.c,v 1.14 2003/07/15 00:24:42 lukem Exp $ */
/*
@@ -196,7 +196,7 @@ dumpsys()
addr += PAGE_SIZE) {
if ((len % (1024*1024)) == 0)
printf("%d ", len / (1024*1024));
- pmap_kenter_pa(dumpspace, addr, VM_PROT_READ);
+ pmap_kenter_pa(dumpspace, addr, PROT_READ);
pmap_update(pmap_kernel());
error = (*bdev->d_dump)(dumpdev,
diff --git a/sys/arch/arm/arm/vm_machdep.c b/sys/arch/arm/arm/vm_machdep.c
index 3d5fd35cc8b..54b6cb22ff3 100644
--- a/sys/arch/arm/arm/vm_machdep.c
+++ b/sys/arch/arm/arm/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.13 2013/01/16 19:04:43 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.14 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
/*
@@ -262,7 +262,8 @@ vmapbuf(bp, len)
(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr, &fpa);
pmap_enter(pmap_kernel(), taddr, fpa,
- VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
diff --git a/sys/arch/arm/armv7/armv7_space.c b/sys/arch/arm/armv7/armv7_space.c
index 8cad1a4c67e..243e2dbbd50 100644
--- a/sys/arch/arm/armv7/armv7_space.c
+++ b/sys/arch/arm/armv7/armv7_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: armv7_space.c,v 1.5 2013/07/11 16:16:38 rapha Exp $ */
+/* $OpenBSD: armv7_space.c,v 1.6 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2001, 2002 Wasabi Systems, Inc.
@@ -192,7 +192,7 @@ armv7_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
pte = vtopte(va);
*pte &= ~L2_S_CACHE_MASK;
diff --git a/sys/arch/arm/include/pmap.h b/sys/arch/arm/include/pmap.h
index af4280bb6bd..da4bd58da87 100644
--- a/sys/arch/arm/include/pmap.h
+++ b/sys/arch/arm/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.27 2014/10/07 10:10:58 jsg Exp $ */
+/* $OpenBSD: pmap.h,v 1.28 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */
/*
@@ -682,15 +682,15 @@ L1_S_PROT(int ku, vm_prot_t pr)
pt_entry_t pte;
if (ku == PTE_USER)
- pte = (pr & VM_PROT_WRITE) ? L1_S_PROT_UW : L1_S_PROT_UR;
+ pte = (pr & PROT_WRITE) ? L1_S_PROT_UW : L1_S_PROT_UR;
else
- pte = (pr & VM_PROT_WRITE) ? L1_S_PROT_KW : L1_S_PROT_KR;
+ pte = (pr & PROT_WRITE) ? L1_S_PROT_KW : L1_S_PROT_KR;
/*
* If we set the XN bit, the abort handlers or the vector page
* might be marked as such. Needs Debugging.
*/
/*
- if ((pr & VM_PROT_EXECUTE) == 0)
+ if ((pr & PROT_EXEC) == 0)
pte |= L1_S_V7_XN;
*/
@@ -702,15 +702,15 @@ L2_L_PROT(int ku, vm_prot_t pr)
pt_entry_t pte;
if (ku == PTE_USER)
- pte = (pr & VM_PROT_WRITE) ? L2_L_PROT_UW : L2_L_PROT_UR;
+ pte = (pr & PROT_WRITE) ? L2_L_PROT_UW : L2_L_PROT_UR;
else
- pte = (pr & VM_PROT_WRITE) ? L2_L_PROT_KW : L2_L_PROT_KR;
+ pte = (pr & PROT_WRITE) ? L2_L_PROT_KW : L2_L_PROT_KR;
/*
* If we set the XN bit, the abort handlers or the vector page
* might be marked as such. Needs Debugging.
*/
/*
- if ((pr & VM_PROT_EXECUTE) == 0)
+ if ((pr & PROT_EXEC) == 0)
pte |= L2_V7_L_XN;
*/
@@ -722,15 +722,15 @@ L2_S_PROT(int ku, vm_prot_t pr)
pt_entry_t pte;
if (ku == PTE_USER)
- pte = (pr & VM_PROT_WRITE) ? L2_S_PROT_UW : L2_S_PROT_UR;
+ pte = (pr & PROT_WRITE) ? L2_S_PROT_UW : L2_S_PROT_UR;
else
- pte = (pr & VM_PROT_WRITE) ? L2_S_PROT_KW : L2_S_PROT_KR;
+ pte = (pr & PROT_WRITE) ? L2_S_PROT_KW : L2_S_PROT_KR;
/*
* If we set the XN bit, the abort handlers or the vector page
* might be marked as such. Needs Debugging.
*/
/*
- if ((pr & VM_PROT_EXECUTE) == 0)
+ if ((pr & PROT_EXEC) == 0)
pte |= L2_V7_S_XN;
*/
@@ -743,7 +743,7 @@ l2pte_is_writeable(pt_entry_t pte, struct pmap *pm)
/* XXX use of L2_V7_S_XN */
return (pte & L2_S_PROT_MASK & ~L2_V7_S_XN) ==
L2_S_PROT(pm == pmap_kernel() ? PTE_KERNEL : PTE_USER,
- VM_PROT_WRITE);
+ PROT_WRITE);
}
#endif
diff --git a/sys/arch/arm/xscale/i80321_space.c b/sys/arch/arm/xscale/i80321_space.c
index 3bedbf7e033..f6b9ed1ca0b 100644
--- a/sys/arch/arm/xscale/i80321_space.c
+++ b/sys/arch/arm/xscale/i80321_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i80321_space.c,v 1.4 2006/06/01 03:46:01 drahn Exp $ */
+/* $OpenBSD: i80321_space.c,v 1.5 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: i80321_space.c,v 1.9 2005/11/24 13:08:32 yamt Exp $ */
/*
@@ -211,7 +211,7 @@ printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, startpa,
for (pa = startpa; pagecnt > 0;
pa += PAGE_SIZE, va += PAGE_SIZE, pagecnt -= PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
pte = vtopte(va);
*pte &= ~L2_S_CACHE_MASK;
@@ -385,7 +385,7 @@ printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, pa,
*bshp = va + (bpa & PAGE_MASK);
for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
pte = vtopte(va);
*pte &= ~L2_S_CACHE_MASK;
diff --git a/sys/arch/arm/xscale/pxa2x0_space.c b/sys/arch/arm/xscale/pxa2x0_space.c
index a10916df2df..f7c3aae7ad3 100644
--- a/sys/arch/arm/xscale/pxa2x0_space.c
+++ b/sys/arch/arm/xscale/pxa2x0_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pxa2x0_space.c,v 1.4 2008/05/15 22:17:08 brad Exp $ */
+/* $OpenBSD: pxa2x0_space.c,v 1.5 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: pxa2x0_space.c,v 1.5 2004/06/07 19:45:22 nathanw Exp $ */
/*
@@ -194,7 +194,7 @@ pxa2x0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
pte = vtopte(va);
*pte &= ~L2_S_CACHE_MASK;
diff --git a/sys/arch/armish/armish/armish_machdep.c b/sys/arch/armish/armish/armish_machdep.c
index d0a566282af..cf595130b0b 100644
--- a/sys/arch/armish/armish/armish_machdep.c
+++ b/sys/arch/armish/armish/armish_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: armish_machdep.c,v 1.32 2014/09/20 09:28:24 kettenis Exp $ */
+/* $OpenBSD: armish_machdep.c,v 1.33 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: lubbock_machdep.c,v 1.2 2003/07/15 00:25:06 lukem Exp $ */
/*
@@ -316,7 +316,7 @@ const struct pmap_devmap iq80321_devmap[] = {
IQ80321_OBIO_BASE,
IQ80321_OBIO_BASE,
0x00100000 /* IQ80321_OBIO_SIZE, */,
- VM_PROT_READ|VM_PROT_WRITE,
+ PROT_READ | PROT_WRITE,
PTE_NOCACHE,
},
{0, 0, 0, 0, 0}
@@ -600,10 +600,10 @@ initarm(void *arg0, void *arg1, void *arg2)
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, textsize,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, totalsize - textsize,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
}
#ifdef VERBOSE_INIT_ARM
@@ -612,21 +612,21 @@ initarm(void *arg0, void *arg1, void *arg2)
/* Map the stack pages */
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
- IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ IRQ_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
- ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ ABT_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
- UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ UND_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
- UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
+ UPAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
- L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
+ L1_TABLE_SIZE, PROT_READ | PROT_WRITE, PTE_PAGETABLE);
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
+ PROT_READ | PROT_WRITE, PTE_PAGETABLE);
}
/* Map the Mini-Data cache clean area. */
@@ -636,10 +636,10 @@ initarm(void *arg0, void *arg1, void *arg2)
/* Map the vector page. */
#ifdef HIGH_VECT
pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
#else
pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
#endif
pmap_devmap_bootstrap(l1pagetable, iq80321_devmap);
diff --git a/sys/arch/armish/dev/obio_space.c b/sys/arch/armish/dev/obio_space.c
index b4f8c2fea06..8b5f5541516 100644
--- a/sys/arch/armish/dev/obio_space.c
+++ b/sys/arch/armish/dev/obio_space.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: obio_space.c,v 1.2 2006/05/29 17:30:26 drahn Exp $ */
+/* $OpenBSD: obio_space.c,v 1.3 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: obio_space.c,v 1.9 2005/11/24 13:08:33 yamt Exp $ */
@@ -160,7 +160,7 @@ obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
pte = vtopte(va);
*pte &= ~L2_S_CACHE_MASK;
diff --git a/sys/arch/armv7/armv7/armv7_machdep.c b/sys/arch/armv7/armv7/armv7_machdep.c
index 994ae3d0bb1..ac0cd6eb6bb 100644
--- a/sys/arch/armv7/armv7/armv7_machdep.c
+++ b/sys/arch/armv7/armv7/armv7_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: armv7_machdep.c,v 1.15 2014/09/20 09:28:24 kettenis Exp $ */
+/* $OpenBSD: armv7_machdep.c,v 1.16 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: lubbock_machdep.c,v 1.2 2003/07/15 00:25:06 lukem Exp $ */
/*
@@ -355,7 +355,7 @@ bootstrap_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
for (pa = startpa; pa < endpa; pa += L1_S_SIZE, va += L1_S_SIZE)
pmap_map_section((vaddr_t)pagedir, va, pa,
- VM_PROT_READ | VM_PROT_WRITE, PTE_NOCACHE);
+ PROT_READ | PROT_WRITE, PTE_NOCACHE);
cpu_tlb_flushD();
@@ -626,10 +626,10 @@ initarm(void *arg0, void *arg1, void *arg2)
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, textsize,
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
+ PROT_READ | PROT_WRITE | PROT_EXEC, PTE_CACHE);
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, totalsize - textsize,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
}
#ifdef VERBOSE_INIT_ARM
@@ -638,28 +638,28 @@ initarm(void *arg0, void *arg1, void *arg2)
/* Map the stack pages */
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
- IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ IRQ_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
- ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ ABT_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
- UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ UND_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
- UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
+ UPAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
- L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
+ L1_TABLE_SIZE, PROT_READ | PROT_WRITE, PTE_PAGETABLE);
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
+ PROT_READ | PROT_WRITE, PTE_PAGETABLE);
}
/* Map the Mini-Data cache clean area. */
/* Map the vector page. */
pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
+ PROT_READ | PROT_WRITE | PROT_EXEC, PTE_CACHE);
/*
* map integrated peripherals at same address in l1pagetable
diff --git a/sys/arch/aviion/aviion/av400_machdep.c b/sys/arch/aviion/aviion/av400_machdep.c
index 65c495d170b..10a8702d763 100644
--- a/sys/arch/aviion/aviion/av400_machdep.c
+++ b/sys/arch/aviion/aviion/av400_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: av400_machdep.c,v 1.26 2013/10/23 10:07:14 miod Exp $ */
+/* $OpenBSD: av400_machdep.c,v 1.27 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2006, 2007, Miodrag Vallat.
*
@@ -170,9 +170,9 @@ void av400_ipi_handler(struct trapframe *);
const struct pmap_table
av400_ptable[] = {
- { AV400_PROM, AV400_PROM_SIZE, UVM_PROT_RW, CACHE_INH },
+ { AV400_PROM, AV400_PROM_SIZE, PROT_READ | PROT_WRITE, CACHE_INH },
#if 0 /* mapped by the hardcoded BATC entries */
- { AV400_UTILITY,AV400_UTILITY_SIZE, UVM_PROT_RW, CACHE_INH },
+ { AV400_UTILITY,AV400_UTILITY_SIZE, PROT_READ | PROT_WRITE, CACHE_INH },
#endif
{ 0, (vsize_t)-1, 0, 0 }
};
diff --git a/sys/arch/aviion/aviion/av530_machdep.c b/sys/arch/aviion/aviion/av530_machdep.c
index 503d4ab9ecf..381728da556 100644
--- a/sys/arch/aviion/aviion/av530_machdep.c
+++ b/sys/arch/aviion/aviion/av530_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: av530_machdep.c,v 1.12 2013/10/23 10:07:14 miod Exp $ */
+/* $OpenBSD: av530_machdep.c,v 1.13 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2006, 2007, 2010 Miodrag Vallat.
*
@@ -59,9 +59,9 @@ void av530_ipi_handler(struct trapframe *);
const struct pmap_table
av530_ptable[] = {
- { AV530_PROM, AV530_PROM_SIZE, UVM_PROT_RW, CACHE_INH },
+ { AV530_PROM, AV530_PROM_SIZE, PROT_READ | PROT_WRITE, CACHE_INH },
#if 0 /* mapped by the hardcoded BATC entries */
- { AV530_UTILITY,AV530_UTILITY_SIZE, UVM_PROT_RW, CACHE_INH },
+ { AV530_UTILITY,AV530_UTILITY_SIZE, PROT_READ | PROT_WRITE, CACHE_INH },
#endif
{ 0, (vsize_t)-1, 0, 0 }
};
diff --git a/sys/arch/aviion/aviion/bus_dma.c b/sys/arch/aviion/aviion/bus_dma.c
index 7b7d0c4d9e2..b19db9be109 100644
--- a/sys/arch/aviion/aviion/bus_dma.c
+++ b/sys/arch/aviion/aviion/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.8 2014/09/13 16:06:36 doug Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.9 2014/11/16 12:30:56 deraadt Exp $ */
/* $NetBSD: bus_dma.c,v 1.2 2001/06/10 02:31:25 briggs Exp $ */
/*-
@@ -541,8 +541,8 @@ bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
if (size == 0)
panic("bus_dmamem_map: size botch");
error = pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/aviion/dev/if_le_syscon.c b/sys/arch/aviion/dev/if_le_syscon.c
index 1c66dc91636..a6636023c43 100644
--- a/sys/arch/aviion/dev/if_le_syscon.c
+++ b/sys/arch/aviion/dev/if_le_syscon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_le_syscon.c,v 1.15 2013/10/23 22:14:22 miod Exp $ */
+/* $OpenBSD: if_le_syscon.c,v 1.16 2014/11/16 12:30:56 deraadt Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@@ -237,7 +237,8 @@ le_syscon_attach(struct device *parent, struct device *self, void *aux)
TAILQ_FOREACH(pg, &pglist, pageq) {
pmap_enter(pmap_kernel(), va, pa,
- UVM_PROT_RW, UVM_PROT_RW | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
va += PAGE_SIZE;
pa += PAGE_SIZE;
}
diff --git a/sys/arch/aviion/dev/vme.c b/sys/arch/aviion/dev/vme.c
index 51af775adbc..90fc6b86e6f 100644
--- a/sys/arch/aviion/dev/vme.c
+++ b/sys/arch/aviion/dev/vme.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vme.c,v 1.14 2014/07/12 18:44:41 tedu Exp $ */
+/* $OpenBSD: vme.c,v 1.15 2014/11/16 12:30:56 deraadt Exp $ */
/*
* Copyright (c) 2006, 2007, 2010 Miodrag Vallat.
*
@@ -477,7 +477,7 @@ vme_map(struct vme_softc *sc, struct extent *ext, u_int awidth,
/*
* Allocate virtual memory for the range and map it.
*/
- rc = vme_map_r(r, pa, len, flags, UVM_PROT_RW, rva);
+ rc = vme_map_r(r, pa, len, flags, PROT_READ | PROT_WRITE, rva);
if (rc != 0) {
if (ext != NULL)
(void)extent_free(ext, atop(pa), atop(len),
@@ -948,7 +948,8 @@ vmerw(struct vme_softc *sc, int awidth, int dwidth, struct uio *uio, int flags)
/* len = min(len, (off_t)r->vr_end - uio->uio_offset); */
rc = vme_map_r(r, trunc_page(uio->uio_offset), PAGE_SIZE, 0,
- uio->uio_rw == UIO_READ ? UVM_PROT_R : UVM_PROT_RW, &vmepg);
+ uio->uio_rw == UIO_READ ? PROT_READ : PROT_READ | PROT_WRITE,
+ &vmepg);
if (rc != 0)
break;
diff --git a/sys/arch/hppa/dev/astro.c b/sys/arch/hppa/dev/astro.c
index 9d851d5bc5a..1aa2db44506 100644
--- a/sys/arch/hppa/dev/astro.c
+++ b/sys/arch/hppa/dev/astro.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: astro.c,v 1.16 2014/10/25 10:19:20 kettenis Exp $ */
+/* $OpenBSD: astro.c,v 1.17 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2007 Mark Kettenis
@@ -292,7 +292,7 @@ astro_attach(struct device *parent, struct device *self, void *aux)
for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
pa = VM_PAGE_TO_PHYS(m);
pmap_enter(pmap_kernel(), va, pa,
- VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/hppa/hppa/mainbus.c b/sys/arch/hppa/hppa/mainbus.c
index 3fa43c00216..c8095013a01 100644
--- a/sys/arch/hppa/hppa/mainbus.c
+++ b/sys/arch/hppa/hppa/mainbus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mainbus.c,v 1.85 2014/07/12 18:44:41 tedu Exp $ */
+/* $OpenBSD: mainbus.c,v 1.86 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -220,7 +220,7 @@ mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
if (btlb_insert(HPPA_SID_KERNEL, spa, spa, &len,
pmap_sid2pid(HPPA_SID_KERNEL) |
- pmap_prot(pmap_kernel(), UVM_PROT_RW))
+ pmap_prot(pmap_kernel(), PROT_READ | PROT_WRITE))
>= 0) {
pa = spa + len; /* may wrap to 0... */
#ifdef BTLBDEBUG
@@ -247,7 +247,7 @@ mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
#endif
for (; spa != epa; spa += PAGE_SIZE)
pmap_kenter_pa(spa, spa,
- UVM_PROT_RW);
+ PROT_READ | PROT_WRITE);
}
#ifdef BTLBDEBUG
printf("\n");
diff --git a/sys/arch/hppa/hppa/pmap.c b/sys/arch/hppa/hppa/pmap.c
index 0a19aa376b9..db7a1370722 100644
--- a/sys/arch/hppa/hppa/pmap.c
+++ b/sys/arch/hppa/hppa/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.162 2014/05/12 14:35:56 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.163 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -463,14 +463,14 @@ pmap_bootstrap(vaddr_t vstart)
uvm_setpagesize();
- hppa_prot[UVM_PROT_NONE] = TLB_AR_NA;
- hppa_prot[UVM_PROT_READ] = TLB_AR_R;
- hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW;
- hppa_prot[UVM_PROT_RW] = TLB_AR_RW;
- hppa_prot[UVM_PROT_EXEC] = TLB_AR_RX;
- hppa_prot[UVM_PROT_RX] = TLB_AR_RX;
- hppa_prot[UVM_PROT_WX] = TLB_AR_RWX;
- hppa_prot[UVM_PROT_RWX] = TLB_AR_RWX;
+ hppa_prot[PROT_NONE] = TLB_AR_NA;
+ hppa_prot[PROT_READ] = TLB_AR_R;
+ hppa_prot[PROT_WRITE] = TLB_AR_RW;
+ hppa_prot[PROT_READ | PROT_WRITE] = TLB_AR_RW;
+ hppa_prot[PROT_EXEC] = TLB_AR_RX;
+ hppa_prot[PROT_READ | PROT_EXEC] = TLB_AR_RX;
+ hppa_prot[PROT_WRITE | PROT_EXEC] = TLB_AR_RWX;
+ hppa_prot[PROT_READ | PROT_WRITE | PROT_EXEC] = TLB_AR_RWX;
/*
* Initialize kernel pmap
@@ -547,7 +547,7 @@ pmap_bootstrap(vaddr_t vstart)
if (btlb_insert(HPPA_SID_KERNEL, va, va, &size,
pmap_sid2pid(HPPA_SID_KERNEL) |
- pmap_prot(pmap_kernel(), UVM_PROT_RX)) < 0) {
+ pmap_prot(pmap_kernel(), PROT_READ | PROT_EXEC)) < 0) {
printf("WARNING: cannot block map kernel text\n");
break;
}
@@ -592,14 +592,14 @@ pmap_bootstrap(vaddr_t vstart)
/* TODO optimize/inline the kenter */
for (va = 0; va < ptoa(physmem); va += PAGE_SIZE) {
extern struct user *proc0paddr;
- vm_prot_t prot = UVM_PROT_RW;
+ vm_prot_t prot = PROT_READ | PROT_WRITE;
if (va < (vaddr_t)&etext)
- prot = UVM_PROT_RX;
+ prot = PROT_READ | PROT_EXEC;
else if (va < (vaddr_t)&__rodata_end)
- prot = UVM_PROT_READ;
+ prot = PROT_READ;
else if (va == (vaddr_t)proc0paddr + USPACE)
- prot = UVM_PROT_NONE;
+ prot = PROT_NONE;
pmap_kenter_pa(va, va, prot);
}
diff --git a/sys/arch/hppa/hppa/trap.c b/sys/arch/hppa/hppa/trap.c
index 70128433ddf..900f6c69542 100644
--- a/sys/arch/hppa/hppa/trap.c
+++ b/sys/arch/hppa/hppa/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.137 2014/10/08 22:23:57 deraadt Exp $ */
+/* $OpenBSD: trap.c,v 1.138 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 1998-2004 Michael Shalayeff
@@ -170,16 +170,16 @@ trap(int type, struct trapframe *frame)
trapnum == T_IDEBUG || trapnum == T_PERFMON) {
va = frame->tf_iioq_head;
space = frame->tf_iisq_head;
- vftype = UVM_PROT_EXEC;
+ vftype = PROT_EXEC;
} else {
va = frame->tf_ior;
space = frame->tf_isr;
if (va == frame->tf_iioq_head)
- vftype = UVM_PROT_EXEC;
+ vftype = PROT_EXEC;
else if (inst_store(opcode))
- vftype = UVM_PROT_WRITE;
+ vftype = PROT_WRITE;
else
- vftype = UVM_PROT_READ;
+ vftype = PROT_READ;
}
if (frame->tf_flags & TFF_LAST)
@@ -447,7 +447,7 @@ trap(int type, struct trapframe *frame)
(frame->tf_iioq_head & 3) != pl ||
(type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
uvm_fault(map, trunc_page(va), fault,
- opcode & 0x40? UVM_PROT_WRITE : UVM_PROT_READ)) {
+ opcode & 0x40? PROT_WRITE : PROT_READ)) {
frame_regmap(frame, opcode & 0x1f) = 0;
frame->tf_ipsw |= PSL_N;
}
diff --git a/sys/arch/hppa/hppa/vm_machdep.c b/sys/arch/hppa/hppa/vm_machdep.c
index 2b49999e610..b7a7c6fd93c 100644
--- a/sys/arch/hppa/hppa/vm_machdep.c
+++ b/sys/arch/hppa/hppa/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.78 2014/04/08 09:34:23 mpi Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.79 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 1999-2004 Michael Shalayeff
@@ -203,7 +203,7 @@ vmapbuf(struct buf *bp, vsize_t len)
if (pmap_extract(pm, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
else
- pmap_kenter_pa(kva, pa, UVM_PROT_RW);
+ pmap_kenter_pa(kva, pa, PROT_READ | PROT_WRITE);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
size -= PAGE_SIZE;
diff --git a/sys/arch/hppa/include/pmap.h b/sys/arch/hppa/include/pmap.h
index 43135a46ef0..66158967ce1 100644
--- a/sys/arch/hppa/include/pmap.h
+++ b/sys/arch/hppa/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.45 2014/01/30 18:16:41 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.46 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2002-2004 Michael Shalayeff
@@ -136,8 +136,8 @@ pmap_prot(struct pmap *pmap, int prot)
static __inline void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- if ((prot & UVM_PROT_WRITE) == 0) {
- if (prot & (UVM_PROT_RX))
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC))
pmap_changebit(pg, 0, PTE_PROT(TLB_WRITE));
else
pmap_page_remove(pg);
@@ -147,8 +147,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
static __inline void
pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- if ((prot & UVM_PROT_WRITE) == 0) {
- if (prot & (UVM_PROT_RX))
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC))
pmap_write_protect(pmap, sva, eva, prot);
else
pmap_remove(pmap, sva, eva);
diff --git a/sys/arch/hppa64/dev/astro.c b/sys/arch/hppa64/dev/astro.c
index 22aaa26b46e..7ba5a7d2ebf 100644
--- a/sys/arch/hppa64/dev/astro.c
+++ b/sys/arch/hppa64/dev/astro.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: astro.c,v 1.7 2014/10/25 10:19:20 kettenis Exp $ */
+/* $OpenBSD: astro.c,v 1.8 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2007 Mark Kettenis
@@ -289,7 +289,7 @@ astro_attach(struct device *parent, struct device *self, void *aux)
for (; m != NULL; m = TAILQ_NEXT(m, pageq)) {
pa = VM_PAGE_TO_PHYS(m);
pmap_enter(pmap_kernel(), va, pa,
- VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/hppa64/hppa64/mainbus.c b/sys/arch/hppa64/hppa64/mainbus.c
index e0995ff262c..e7862f54c67 100644
--- a/sys/arch/hppa64/hppa64/mainbus.c
+++ b/sys/arch/hppa64/hppa64/mainbus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mainbus.c,v 1.15 2014/07/12 18:44:41 tedu Exp $ */
+/* $OpenBSD: mainbus.c,v 1.16 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2005 Michael Shalayeff
@@ -184,7 +184,7 @@ mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
for (spa = trunc_page(bpa), epa = bpa + size;
spa < epa; spa += PAGE_SIZE)
- pmap_kenter_pa(spa, spa, UVM_PROT_RW);
+ pmap_kenter_pa(spa, spa, PROT_READ | PROT_WRITE);
*bshp = bpa;
return (0);
diff --git a/sys/arch/hppa64/hppa64/pmap.c b/sys/arch/hppa64/hppa64/pmap.c
index 0b066cd6439..5c174061dcd 100644
--- a/sys/arch/hppa64/hppa64/pmap.c
+++ b/sys/arch/hppa64/hppa64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.24 2014/10/12 20:39:46 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.25 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2005 Michael Shalayeff
@@ -485,14 +485,15 @@ pmap_bootstrap(vaddr_t vstart)
uvmexp.pagesize = PAGE_SIZE;
uvm_setpagesize();
- hppa_prot[UVM_PROT_NONE] = PTE_ORDER|PTE_ACC_NONE;
- hppa_prot[UVM_PROT_READ] = PTE_ORDER|PTE_READ;
- hppa_prot[UVM_PROT_WRITE] = PTE_ORDER|PTE_WRITE;
- hppa_prot[UVM_PROT_RW] = PTE_ORDER|PTE_READ|PTE_WRITE;
- hppa_prot[UVM_PROT_EXEC] = PTE_ORDER|PTE_EXEC;
- hppa_prot[UVM_PROT_RX] = PTE_ORDER|PTE_READ|PTE_EXEC;
- hppa_prot[UVM_PROT_WX] = PTE_ORDER|PTE_WRITE|PTE_EXEC;
- hppa_prot[UVM_PROT_RWX] = PTE_ORDER|PTE_READ|PTE_WRITE|PTE_EXEC;
+ hppa_prot[PROT_NONE] = PTE_ORDER|PTE_ACC_NONE;
+ hppa_prot[PROT_READ] = PTE_ORDER|PTE_READ;
+ hppa_prot[PROT_WRITE] = PTE_ORDER|PTE_WRITE;
+ hppa_prot[PROT_READ | PROT_WRITE] = PTE_ORDER|PTE_READ|PTE_WRITE;
+ hppa_prot[PROT_EXEC] = PTE_ORDER|PTE_EXEC;
+ hppa_prot[PROT_READ | PROT_EXEC] = PTE_ORDER|PTE_READ|PTE_EXEC;
+ hppa_prot[PROT_WRITE | PROT_EXEC] = PTE_ORDER|PTE_WRITE|PTE_EXEC;
+ hppa_prot[PROT_READ | PROT_WRITE | PROT_EXEC] =
+ PTE_ORDER|PTE_READ|PTE_WRITE|PTE_EXEC;
/*
* Initialize kernel pmap
diff --git a/sys/arch/hppa64/hppa64/trap.c b/sys/arch/hppa64/hppa64/trap.c
index afa646c7370..0d006a8ccd7 100644
--- a/sys/arch/hppa64/hppa64/trap.c
+++ b/sys/arch/hppa64/hppa64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.40 2014/10/12 20:39:46 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.41 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2005 Michael Shalayeff
@@ -164,16 +164,16 @@ trap(int type, struct trapframe *frame)
trapnum == T_IPROT) {
va = frame->tf_iioq[0];
space = frame->tf_iisq[0];
- vftype = UVM_PROT_EXEC;
+ vftype = PROT_EXEC;
} else {
va = frame->tf_ior;
space = frame->tf_isr;
if (va == frame->tf_iioq[0])
- vftype = UVM_PROT_EXEC;
+ vftype = PROT_EXEC;
else if (inst_store(opcode))
- vftype = UVM_PROT_WRITE;
+ vftype = PROT_WRITE;
else
- vftype = UVM_PROT_READ;
+ vftype = PROT_READ;
}
if (frame->tf_flags & TFF_LAST)
@@ -370,7 +370,7 @@ trap(int type, struct trapframe *frame)
(type & T_USER && !pl) ||
(type & T_USER && va >= VM_MAXUSER_ADDRESS) ||
uvm_fault(map, trunc_page(va), fault,
- opcode & 0x40? UVM_PROT_WRITE : UVM_PROT_READ)) {
+ opcode & 0x40? PROT_WRITE : PROT_READ)) {
frame_regmap(frame, opcode & 0x1f) = 0;
frame->tf_ipsw |= PSL_N;
}
diff --git a/sys/arch/hppa64/hppa64/vm_machdep.c b/sys/arch/hppa64/hppa64/vm_machdep.c
index 94bea14200d..f582b9167d0 100644
--- a/sys/arch/hppa64/hppa64/vm_machdep.c
+++ b/sys/arch/hppa64/hppa64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.20 2014/04/08 09:34:23 mpi Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.21 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2005 Michael Shalayeff
@@ -193,7 +193,7 @@ vmapbuf(struct buf *bp, vsize_t len)
if (pmap_extract(pm, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
else
- pmap_kenter_pa(kva, pa, UVM_PROT_RW);
+ pmap_kenter_pa(kva, pa, PROT_READ | PROT_WRITE);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
size -= PAGE_SIZE;
diff --git a/sys/arch/hppa64/include/pmap.h b/sys/arch/hppa64/include/pmap.h
index e00d1cd0b86..073df8ff66f 100644
--- a/sys/arch/hppa64/include/pmap.h
+++ b/sys/arch/hppa64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.12 2014/05/08 21:31:56 miod Exp $ */
+/* $OpenBSD: pmap.h,v 1.13 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2005 Michael Shalayeff
@@ -118,8 +118,8 @@ void pmap_page_remove(struct vm_page *pg);
static __inline void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- if ((prot & UVM_PROT_WRITE) == 0) {
- if (prot & (UVM_PROT_RX))
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC))
pmap_changebit(pg, 0, PTE_WRITE);
else
pmap_page_remove(pg);
@@ -129,8 +129,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
static __inline void
pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- if ((prot & UVM_PROT_WRITE) == 0) {
- if (prot & (UVM_PROT_RX))
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC))
pmap_write_protect(pmap, sva, eva, prot);
else
pmap_remove(pmap, sva, eva);
diff --git a/sys/arch/i386/i386/acpi_machdep.c b/sys/arch/i386/i386/acpi_machdep.c
index 64f9d7cc90b..eba240ac41e 100644
--- a/sys/arch/i386/i386/acpi_machdep.c
+++ b/sys/arch/i386/i386/acpi_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: acpi_machdep.c,v 1.56 2014/09/19 20:02:25 kettenis Exp $ */
+/* $OpenBSD: acpi_machdep.c,v 1.57 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com>
*
@@ -88,7 +88,7 @@ acpi_map(paddr_t pa, size_t len, struct acpi_mem_map *handle)
handle->pa = pa;
do {
- pmap_kenter_pa(va, pgpa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pgpa, PROT_READ | PROT_WRITE);
va += NBPG;
pgpa += NBPG;
} while (pgpa < endpa);
diff --git a/sys/arch/i386/i386/bios.c b/sys/arch/i386/i386/bios.c
index f9dec0a6415..fdd85216dc5 100644
--- a/sys/arch/i386/i386/bios.c
+++ b/sys/arch/i386/i386/bios.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bios.c,v 1.106 2014/10/17 20:34:23 kettenis Exp $ */
+/* $OpenBSD: bios.c,v 1.107 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 1997-2001 Michael Shalayeff
@@ -282,7 +282,7 @@ biosattach(struct device *parent, struct device *self, void *aux)
smbios_entry.count = sh->count;
for (; pa < end; pa+= NBPG, eva+= NBPG)
- pmap_kenter_pa(eva, pa, VM_PROT_READ);
+ pmap_kenter_pa(eva, pa, PROT_READ);
printf(", SMBIOS rev. %d.%d @ 0x%x (%hd entries)",
sh->majrev, sh->minrev, sh->addr, sh->count);
@@ -670,14 +670,14 @@ bios32_service(u_int32_t service, bios32_entry_t e, bios32_entry_info_t ei)
va += trunc_page(BIOS32_START);
pa < endpa; pa += NBPG, va += NBPG) {
pmap_enter(pmap_kernel(), va, pa,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
/* for all you, broken hearted */
if (pa >= trunc_page(base)) {
pmap_enter(pmap_kernel(), sva, pa,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
sva += NBPG;
}
}
diff --git a/sys/arch/i386/i386/bus_dma.c b/sys/arch/i386/i386/bus_dma.c
index 1358197cd6b..87c5825902b 100644
--- a/sys/arch/i386/i386/bus_dma.c
+++ b/sys/arch/i386/i386/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.32 2014/07/12 18:44:41 tedu Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.33 2014/11/16 12:30:57 deraadt Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -456,8 +456,8 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
* alloc
*/
ret = pmap_enter(pmap_kernel(), va, addr | pmapflags,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (ret) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/i386/i386/cpu.c b/sys/arch/i386/i386/cpu.c
index 8d94de66447..728fa81efb9 100644
--- a/sys/arch/i386/i386/cpu.c
+++ b/sys/arch/i386/i386/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.56 2014/09/14 14:17:23 jsg Exp $ */
+/* $OpenBSD: cpu.c,v 1.57 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: cpu.c,v 1.1.2.7 2000/06/26 02:04:05 sommerfeld Exp $ */
/*-
@@ -219,9 +219,8 @@ replacesmap(void)
pmap_extract(pmap_kernel(), kva, &pa1);
pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
- pmap_kenter_pa(nva, pa1, VM_PROT_READ | VM_PROT_WRITE);
- pmap_kenter_pa(nva + PAGE_SIZE, pa2, VM_PROT_READ |
- VM_PROT_WRITE);
+ pmap_kenter_pa(nva, pa1, PROT_READ | PROT_WRITE);
+ pmap_kenter_pa(nva + PAGE_SIZE, pa2, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
/* replace 3 byte nops with stac/clac instructions */
@@ -724,7 +723,7 @@ mp_cpu_start(struct cpu_info *ci)
pmap_activate(curproc);
- pmap_kenter_pa(0, 0, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(0, 0, PROT_READ | PROT_WRITE);
memcpy((u_int8_t *)0x467, dwordptr, 4);
pmap_kremove(0, PAGE_SIZE);
diff --git a/sys/arch/i386/i386/gdt.c b/sys/arch/i386/i386/gdt.c
index 36738fd40f8..48e8cce9ff9 100644
--- a/sys/arch/i386/i386/gdt.c
+++ b/sys/arch/i386/i386/gdt.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: gdt.c,v 1.33 2014/09/14 14:17:23 jsg Exp $ */
+/* $OpenBSD: gdt.c,v 1.34 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: gdt.c,v 1.28 2002/12/14 09:38:50 junyoung Exp $ */
/*-
@@ -111,7 +111,7 @@ gdt_init()
if (pg == NULL)
panic("gdt_init: no pages");
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
}
bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
ci->ci_gdt = gdt;
@@ -140,7 +140,7 @@ gdt_alloc_cpu(struct cpu_info *ci)
if (pg == NULL)
panic("gdt_init: no pages");
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
}
bzero(ci->ci_gdt, MAXGDTSIZ);
bcopy(gdt, ci->ci_gdt, MAXGDTSIZ);
diff --git a/sys/arch/i386/i386/hibernate_machdep.c b/sys/arch/i386/i386/hibernate_machdep.c
index b91e618745a..7a23be27d8e 100644
--- a/sys/arch/i386/i386/hibernate_machdep.c
+++ b/sys/arch/i386/i386/hibernate_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: hibernate_machdep.c,v 1.40 2014/11/08 08:18:37 mlarkin Exp $ */
+/* $OpenBSD: hibernate_machdep.c,v 1.41 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2011 Mike Larkin <mlarkin@openbsd.org>
@@ -239,9 +239,9 @@ hibernate_populate_resume_pt(union hibernate_info *hib_info,
vaddr_t piglet_start_va, piglet_end_va;
/* Identity map PD, PT, and stack pages */
- pmap_kenter_pa(HIBERNATE_PT_PAGE, HIBERNATE_PT_PAGE, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_PD_PAGE, HIBERNATE_PD_PAGE, VM_PROT_ALL);
- pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, VM_PROT_ALL);
+ pmap_kenter_pa(HIBERNATE_PT_PAGE, HIBERNATE_PT_PAGE, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_PD_PAGE, HIBERNATE_PD_PAGE, PROT_MASK);
+ pmap_kenter_pa(HIBERNATE_STACK_PAGE, HIBERNATE_STACK_PAGE, PROT_MASK);
pmap_activate(curproc);
bzero((caddr_t)HIBERNATE_PT_PAGE, PAGE_SIZE);
diff --git a/sys/arch/i386/i386/kvm86.c b/sys/arch/i386/i386/kvm86.c
index ef8900dd647..12e5cd40c0d 100644
--- a/sys/arch/i386/i386/kvm86.c
+++ b/sys/arch/i386/i386/kvm86.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kvm86.c,v 1.7 2014/04/01 09:05:03 mpi Exp $ */
+/* $OpenBSD: kvm86.c,v 1.8 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: kvm86.c,v 1.10 2005/12/26 19:23:59 perry Exp $ */
/*
* Copyright (c) 2002
@@ -207,7 +207,7 @@ kvm86_bios_read(u_int32_t vmva, char *buf, size_t len)
if (!bioscallvmd->pgtbl[vmva >> 12])
break;
vmpa = bioscallvmd->pgtbl[vmva >> 12] & ~(PAGE_SIZE - 1);
- pmap_kenter_pa(bioscalltmpva, vmpa, VM_PROT_READ);
+ pmap_kenter_pa(bioscalltmpva, vmpa, PROT_READ);
pmap_update(pmap_kernel());
memcpy(buf, (void *)(bioscalltmpva + (vmva & (PAGE_SIZE - 1))),
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 0bea2b603e2..e0f274bc454 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.556 2014/10/25 16:57:58 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.557 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -392,7 +392,7 @@ cpu_startup()
pa = avail_end;
va = (vaddr_t)msgbufp;
for (i = 0; i < atop(MSGBUFSIZE); i++) {
- pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
va += PAGE_SIZE;
pa += PAGE_SIZE;
}
@@ -2794,7 +2794,7 @@ dumpsys()
printf("(%x %lld) ", maddr, (long long)blkno);
#endif
pmap_enter(pmap_kernel(), dumpspace, maddr,
- VM_PROT_READ, PMAP_WIRED);
+ PROT_READ, PMAP_WIRED);
if ((error = (*dump)(dumpdev, blkno,
(caddr_t)dumpspace, NBPG)))
break;
@@ -3148,8 +3148,8 @@ init386(paddr_t first_avail)
panic("cannot reserve /boot args memory");
pmap_enter(pmap_kernel(), (vaddr_t)bootargp, (paddr_t)bootargv,
- VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
bios_getopt();
@@ -3322,13 +3322,13 @@ init386(paddr_t first_avail)
#ifdef MULTIPROCESSOR
pmap_kenter_pa((vaddr_t)MP_TRAMPOLINE, /* virtual */
(paddr_t)MP_TRAMPOLINE, /* physical */
- VM_PROT_ALL); /* protection */
+ PROT_MASK); /* protection */
#endif
#if NACPI > 0 && !defined(SMALL_KERNEL)
pmap_kenter_pa((vaddr_t)ACPI_TRAMPOLINE,/* virtual */
(paddr_t)ACPI_TRAMPOLINE, /* physical */
- VM_PROT_ALL); /* protection */
+ PROT_MASK); /* protection */
#endif
tlbflush();
@@ -3740,7 +3740,7 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
for (; map_size > 0;
pa += PAGE_SIZE, va += PAGE_SIZE, map_size -= PAGE_SIZE)
pmap_kenter_pa(va, pa | pmap_flags,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
return 0;
diff --git a/sys/arch/i386/i386/mem.c b/sys/arch/i386/i386/mem.c
index 93cfd9fd458..6ad77648966 100644
--- a/sys/arch/i386/i386/mem.c
+++ b/sys/arch/i386/i386/mem.c
@@ -1,5 +1,5 @@
/* $NetBSD: mem.c,v 1.31 1996/05/03 19:42:19 christos Exp $ */
-/* $OpenBSD: mem.c,v 1.40 2014/09/14 14:17:23 jsg Exp $ */
+/* $OpenBSD: mem.c,v 1.41 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1982, 1986, 1990, 1993
@@ -147,7 +147,7 @@ mmrw(dev_t dev, struct uio *uio, int flags)
v = uio->uio_offset;
pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
trunc_page(v), uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ : PROT_WRITE, PMAP_WIRED);
pmap_update(pmap_kernel());
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
diff --git a/sys/arch/i386/i386/mpbios.c b/sys/arch/i386/i386/mpbios.c
index bb3ab9be2ba..4347c96ceb5 100644
--- a/sys/arch/i386/i386/mpbios.c
+++ b/sys/arch/i386/i386/mpbios.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mpbios.c,v 1.35 2014/05/26 19:03:28 kettenis Exp $ */
+/* $OpenBSD: mpbios.c,v 1.36 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: mpbios.c,v 1.2 2002/10/01 12:56:57 fvdl Exp $ */
/*-
@@ -267,7 +267,7 @@ mpbios_map(paddr_t pa, int len, struct mp_map *handle)
handle->vsize = endpa - pgpa;
do {
- pmap_kenter_pa(va, pgpa, VM_PROT_READ);
+ pmap_kenter_pa(va, pgpa, PROT_READ);
va += PAGE_SIZE;
pgpa += PAGE_SIZE;
} while (pgpa < endpa);
diff --git a/sys/arch/i386/i386/pmap.c b/sys/arch/i386/i386/pmap.c
index f974f97a661..1d2295f4f46 100644
--- a/sys/arch/i386/i386/pmap.c
+++ b/sys/arch/i386/i386/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.160 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.161 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.91 2000/06/02 17:46:37 thorpej Exp $ */
/*
@@ -605,7 +605,7 @@ pmap_exec_fixup(struct vm_map *map, struct trapframe *tf, struct pcb *pcb)
vm_map_lock(map);
RB_FOREACH_REVERSE(ent, uvm_map_addr, &map->addr) {
- if (ent->protection & VM_PROT_EXECUTE)
+ if (ent->protection & PROT_EXEC)
break;
}
/*
@@ -696,7 +696,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pt_entry_t *pte, opte, npte;
pte = vtopte(va);
- npte = (pa & PMAP_PA_MASK) | ((prot & VM_PROT_WRITE)? PG_RW : PG_RO) |
+ npte = (pa & PMAP_PA_MASK) | ((prot & PROT_WRITE)? PG_RW : PG_RO) |
PG_V | PG_U | PG_M | ((pa & PMAP_NOCACHE) ? PG_N : 0) |
((pa & PMAP_WC) ? pmap_pg_wc : 0);
@@ -795,14 +795,14 @@ pmap_bootstrap(vaddr_t kva_start)
* we can jam into a i386 PTE.
*/
- protection_codes[UVM_PROT_NONE] = 0; /* --- */
- protection_codes[UVM_PROT_EXEC] = PG_X; /* --x */
- protection_codes[UVM_PROT_READ] = PG_RO; /* -r- */
- protection_codes[UVM_PROT_RX] = PG_X; /* -rx */
- protection_codes[UVM_PROT_WRITE] = PG_RW; /* w-- */
- protection_codes[UVM_PROT_WX] = PG_RW|PG_X; /* w-x */
- protection_codes[UVM_PROT_RW] = PG_RW; /* wr- */
- protection_codes[UVM_PROT_RWX] = PG_RW|PG_X; /* wrx */
+ protection_codes[PROT_NONE] = 0; /* --- */
+ protection_codes[PROT_EXEC] = PG_X; /* --x */
+ protection_codes[PROT_READ] = PG_RO; /* -r- */
+ protection_codes[PROT_READ | PROT_EXEC] = PG_X; /* -rx */
+ protection_codes[PROT_WRITE] = PG_RW; /* w-- */
+ protection_codes[PROT_WRITE | PROT_EXEC] = PG_RW|PG_X; /* w-x */
+ protection_codes[PROT_READ | PROT_WRITE] = PG_RW; /* wr- */
+ protection_codes[PROT_READ | PROT_WRITE | PROT_EXEC] = PG_RW|PG_X; /* wrx */
/*
* now we init the kernel's pmap
@@ -1122,7 +1122,7 @@ pmap_alloc_pvpage(struct pmap *pmap, int mode)
*/
pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pvpage = (struct pv_page *) pv_cachedva;
pv_cachedva = 0;
return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
@@ -2594,9 +2594,9 @@ enter_now:
npte |= PG_RW; /* XXXCDC: no longer needed? */
if (pmap == pmap_kernel())
npte |= pmap_pg_g;
- if (flags & VM_PROT_READ)
+ if (flags & PROT_READ)
npte |= PG_U;
- if (flags & VM_PROT_WRITE)
+ if (flags & PROT_WRITE)
npte |= PG_M;
if (pg) {
npte |= PG_PVLIST;
diff --git a/sys/arch/i386/i386/trap.c b/sys/arch/i386/i386/trap.c
index c291d221983..c3ac81d1343 100644
--- a/sys/arch/i386/i386/trap.c
+++ b/sys/arch/i386/i386/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.117 2014/07/09 07:29:00 guenther Exp $ */
+/* $OpenBSD: trap.c,v 1.118 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.95 1996/05/05 06:50:02 mycroft Exp $ */
/*-
@@ -138,10 +138,10 @@ trap(struct trapframe *frame)
/* SIGSEGV and SIGBUS need this */
if (frame->tf_err & PGEX_W) {
- vftype = VM_PROT_WRITE;
- ftype = VM_PROT_READ | VM_PROT_WRITE;
+ vftype = PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
} else
- ftype = vftype = VM_PROT_READ;
+ ftype = vftype = PROT_READ;
#ifdef DEBUG
if (trapdebug) {
diff --git a/sys/arch/i386/i386/vm_machdep.c b/sys/arch/i386/i386/vm_machdep.c
index faec7aafc55..7fe6d681ae7 100644
--- a/sys/arch/i386/i386/vm_machdep.c
+++ b/sys/arch/i386/i386/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.60 2013/01/16 19:04:43 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.61 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.61 1996/05/03 19:42:35 christos Exp $ */
/*-
@@ -232,7 +232,7 @@ vmapbuf(struct buf *bp, vsize_t len)
while (len) {
pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr, &fpa);
- pmap_kenter_pa(taddr, fpa, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
diff --git a/sys/arch/i386/include/pmap.h b/sys/arch/i386/include/pmap.h
index b73e8aff5cb..3ae1e9299e2 100644
--- a/sys/arch/i386/include/pmap.h
+++ b/sys/arch/i386/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.65 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: pmap.h,v 1.66 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $ */
/*
@@ -34,6 +34,7 @@
#define _MACHINE_PMAP_H_
#ifdef _KERNEL
+#include <sys/mman.h>
#include <machine/cpufunc.h>
#include <machine/segments.h>
#endif
@@ -435,8 +436,8 @@ boolean_t pmap_zero_page_uncached(paddr_t);
__inline static void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC)) {
(void) pmap_clear_attrs(pg, PG_RW);
} else {
pmap_page_remove(pg);
@@ -455,8 +456,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
__inline static void
pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- if ((prot & VM_PROT_WRITE) == 0) {
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ if ((prot & PROT_WRITE) == 0) {
+ if (prot & (PROT_READ | PROT_EXEC)) {
pmap_write_protect(pmap, sva, eva, prot);
} else {
pmap_remove(pmap, sva, eva);
diff --git a/sys/arch/i386/pci/agp_machdep.c b/sys/arch/i386/pci/agp_machdep.c
index c4e4ec37988..b144465e679 100644
--- a/sys/arch/i386/pci/agp_machdep.c
+++ b/sys/arch/i386/pci/agp_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: agp_machdep.c,v 1.19 2014/09/20 16:15:16 kettenis Exp $ */
+/* $OpenBSD: agp_machdep.c,v 1.20 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2008 - 2009 Owain G. Ainsworth <oga@openbsd.org>
@@ -157,7 +157,7 @@ agp_map_atomic(struct agp_map *map, bus_size_t offset,
pmap_flags = PMAP_WC;
pa = bus_space_mmap(map->bst, map->addr, offset, 0, 0);
- pmap_kenter_pa(map->va, pa | pmap_flags, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(map->va, pa | pmap_flags, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
*bshp = (bus_space_handle_t)map->va;
diff --git a/sys/arch/i386/pci/vga_post.c b/sys/arch/i386/pci/vga_post.c
index 223de1f7f30..66e92fe9db6 100644
--- a/sys/arch/i386/pci/vga_post.c
+++ b/sys/arch/i386/pci/vga_post.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vga_post.c,v 1.7 2014/07/12 18:44:42 tedu Exp $ */
+/* $OpenBSD: vga_post.c,v 1.8 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: vga_post.c,v 1.12 2009/03/15 21:32:36 cegger Exp $ */
/*-
@@ -149,7 +149,7 @@ vga_post_init(int bus, int device, int function)
sc->sys_image = sys_image;
sc->emu.sys_private = sc;
- pmap_kenter_pa(sys_bios_data, 0, VM_PROT_READ);
+ pmap_kenter_pa(sys_bios_data, 0, PROT_READ);
pmap_update(pmap_kernel());
memcpy((void *)sc->bios_data, (void *)sys_bios_data, PAGE_SIZE);
pmap_kremove(sys_bios_data, PAGE_SIZE);
@@ -158,14 +158,14 @@ vga_post_init(int bus, int device, int function)
iter = 0;
TAILQ_FOREACH(pg, &sc->ram_backing, pageq) {
pmap_kenter_pa(sc->sys_image + iter, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
iter += PAGE_SIZE;
}
KASSERT(iter == BASE_MEMORY);
for (iter = 640 * 1024; iter < 1024 * 1024; iter += PAGE_SIZE)
pmap_kenter_pa(sc->sys_image + iter, iter,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
memset(&sc->emu, 0, sizeof(sc->emu));
diff --git a/sys/arch/landisk/dev/obio.c b/sys/arch/landisk/dev/obio.c
index ba103d98042..b5681174e97 100644
--- a/sys/arch/landisk/dev/obio.c
+++ b/sys/arch/landisk/dev/obio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: obio.c,v 1.8 2010/04/04 12:49:30 miod Exp $ */
+/* $OpenBSD: obio.c,v 1.9 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: obio.c,v 1.1 2006/09/01 21:26:18 uwe Exp $ */
/*-
@@ -269,7 +269,7 @@ obio_iomem_add_mapping(bus_addr_t bpa, bus_size_t size, int type,
#undef MODE
for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
pte = __pmap_kpte_lookup(va);
KDASSERT(pte);
*pte |= m; /* PTEA PCMCIA assistant bit */
diff --git a/sys/arch/landisk/landisk/bus_dma.c b/sys/arch/landisk/landisk/bus_dma.c
index 172a2fc40b2..9422561dffc 100644
--- a/sys/arch/landisk/landisk/bus_dma.c
+++ b/sys/arch/landisk/landisk/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.11 2014/07/12 18:44:42 tedu Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.12 2014/11/16 12:30:57 deraadt Exp $ */
/* $NetBSD: bus_dma.c,v 1.1 2006/09/01 21:26:18 uwe Exp $ */
/*
@@ -662,7 +662,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_kenter_pa(va, addr,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
}
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/loongson/loongson/bus_dma.c b/sys/arch/loongson/loongson/bus_dma.c
index 80faf59c1ed..33b0a265ae1 100644
--- a/sys/arch/loongson/loongson/bus_dma.c
+++ b/sys/arch/loongson/loongson/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.17 2014/09/26 14:32:07 jsing Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.18 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -464,8 +464,8 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
panic("_dmamem_map: size botch");
pa = (*t->_device_to_pa)(addr);
error = pmap_enter(pmap_kernel(), va, pa,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | pmap_flags);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | pmap_flags);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/luna88k/luna88k/pmap_table.c b/sys/arch/luna88k/luna88k/pmap_table.c
index 715421a4c7e..70d379e6783 100644
--- a/sys/arch/luna88k/luna88k/pmap_table.c
+++ b/sys/arch/luna88k/luna88k/pmap_table.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap_table.c,v 1.11 2014/04/16 12:01:33 aoyama Exp $ */
+/* $OpenBSD: pmap_table.c,v 1.12 2014/11/16 12:30:57 deraadt Exp $ */
/*
* Mach Operating System
@@ -34,8 +34,8 @@
#include <machine/board.h>
#include <machine/pmap_table.h>
-#define R VM_PROT_READ
-#define RW (VM_PROT_READ | VM_PROT_WRITE)
+#define R PROT_READ
+#define RW (PROT_READ | PROT_WRITE)
#define CW CACHE_WT
#define CI CACHE_INH
#define CG CACHE_GLOBAL
diff --git a/sys/arch/m88k/m88k/pmap.c b/sys/arch/m88k/m88k/pmap.c
index 2e0128b82a5..f11c8daa0e3 100644
--- a/sys/arch/m88k/m88k/pmap.c
+++ b/sys/arch/m88k/m88k/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.78 2014/06/09 14:33:20 miod Exp $ */
+/* $OpenBSD: pmap.c,v 1.79 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2001-2004, 2010, Miodrag Vallat.
@@ -179,7 +179,7 @@ pg_to_pvh(struct vm_page *pg)
* PTE routines
*/
-#define m88k_protection(prot) ((prot) & VM_PROT_WRITE ? PG_RW : PG_RO)
+#define m88k_protection(prot) ((prot) & PROT_WRITE ? PG_RW : PG_RO)
#define pmap_pte_w(pte) (*(pte) & PG_W)
#define SDTENT(pm, va) ((pm)->pm_stab + SDTIDX(va))
@@ -617,7 +617,7 @@ pmap_map(paddr_t pa, psize_t sz, vm_prot_t prot, u_int cmode,
pa = trunc_batc(pa);
batc = BATC_SO | BATC_V;
- if ((prot & VM_PROT_WRITE) == 0)
+ if ((prot & PROT_WRITE) == 0)
batc |= BATC_PROT;
if (cmode & CACHE_INH)
batc |= BATC_INH;
@@ -791,7 +791,8 @@ pmap_bootstrap(paddr_t s_rom, paddr_t e_rom)
if (e_rom != s_rom) {
s_firmware = s_rom;
l_firmware = e_rom - s_rom;
- pmap_map(s_firmware, l_firmware, UVM_PROT_RW, CACHE_INH, FALSE);
+ pmap_map(s_firmware, l_firmware, PROT_READ | PROT_WRITE,
+ CACHE_INH, FALSE);
}
for (ptable = pmap_table_build(); ptable->size != (vsize_t)-1; ptable++)
@@ -1099,7 +1100,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (wired)
npte |= PG_W;
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
/*
* On 88110, do not mark writable mappings as dirty unless we
* know the page is dirty, or we are using the kernel pmap.
@@ -1109,7 +1110,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
npte |= PG_U;
else
npte |= PG_M_U;
- } else if (prot & VM_PROT_ALL)
+ } else if (prot & PROT_MASK)
npte |= PG_U;
/*
@@ -1133,12 +1134,12 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* Cache attribute flags
*/
if (pvl != NULL) {
- if (flags & VM_PROT_WRITE) {
+ if (flags & PROT_WRITE) {
if (CPU_IS88110 && pmap != pmap_kernel())
pvl->pv_flags |= PG_U;
else
pvl->pv_flags |= PG_M_U;
- } else if (flags & VM_PROT_ALL)
+ } else if (flags & PROT_MASK)
pvl->pv_flags |= PG_U;
}
@@ -1439,7 +1440,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
pt_entry_t *pte, ap, opte, npte;
vaddr_t va, eseg;
- if ((prot & VM_PROT_READ) == 0) {
+ if ((prot & PROT_READ) == 0) {
pmap_remove(pmap, sva, eva);
return;
}
@@ -1783,9 +1784,9 @@ pmap_is_referenced(struct vm_page *pg)
void
pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
{
- if ((prot & VM_PROT_READ) == VM_PROT_NONE)
+ if ((prot & PROT_READ) == PROT_NONE)
pmap_remove_page(pg);
- else if ((prot & VM_PROT_WRITE) == VM_PROT_NONE)
+ else if ((prot & PROT_WRITE) == PROT_NONE)
pmap_changebit(pg, PG_RO, ~0);
}
diff --git a/sys/arch/m88k/m88k/trap.c b/sys/arch/m88k/m88k/trap.c
index 2c3eb35013b..89d28383218 100644
--- a/sys/arch/m88k/m88k/trap.c
+++ b/sys/arch/m88k/m88k/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.100 2014/07/02 18:37:34 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.101 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2004, Miodrag Vallat.
* Copyright (c) 1998 Steve Murphree, Jr.
@@ -295,11 +295,11 @@ lose:
fault_addr = frame->tf_dma0;
if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
+ fault_code = PROT_WRITE;
} else {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
+ ftype = PROT_READ;
+ fault_code = PROT_READ;
}
va = trunc_page((vaddr_t)fault_addr);
@@ -399,11 +399,11 @@ user_fault:
}
if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
- ftype = VM_PROT_READ | VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
+ fault_code = PROT_WRITE;
} else {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
+ ftype = PROT_READ;
+ fault_code = PROT_READ;
}
va = trunc_page((vaddr_t)fault_addr);
@@ -813,11 +813,11 @@ lose:
fault_addr = frame->tf_dlar;
if (frame->tf_dsr & CMMU_DSR_RW) {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
+ ftype = PROT_READ;
+ fault_code = PROT_READ;
} else {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
+ fault_code = PROT_WRITE;
}
va = trunc_page((vaddr_t)fault_addr);
@@ -860,8 +860,8 @@ lose:
KERNEL_LOCK();
m88110_user_fault:
if (type == T_INSTFLT+T_USER) {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
+ ftype = PROT_READ;
+ fault_code = PROT_READ;
#ifdef TRAPDEBUG
printf("User Instruction fault exip %x isr %x ilar %x\n",
frame->tf_exip, frame->tf_isr, frame->tf_ilar);
@@ -869,11 +869,11 @@ m88110_user_fault:
} else {
fault_addr = frame->tf_dlar;
if (frame->tf_dsr & CMMU_DSR_RW) {
- ftype = VM_PROT_READ;
- fault_code = VM_PROT_READ;
+ ftype = PROT_READ;
+ fault_code = PROT_READ;
} else {
- ftype = VM_PROT_READ|VM_PROT_WRITE;
- fault_code = VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
+ fault_code = PROT_WRITE;
}
#ifdef TRAPDEBUG
printf("User Data access fault exip %x dsr %x dlar %x\n",
diff --git a/sys/arch/m88k/m88k/vm_machdep.c b/sys/arch/m88k/m88k/vm_machdep.c
index 38d7a078653..08a27322e43 100644
--- a/sys/arch/m88k/m88k/vm_machdep.c
+++ b/sys/arch/m88k/m88k/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.22 2013/01/16 19:04:43 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.23 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 1998 Steve Murphree, Jr.
@@ -205,8 +205,8 @@ vmapbuf(bp, len)
if (pmap_extract(pmap, (vaddr_t)addr, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), kva, pa,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
addr += PAGE_SIZE;
kva += PAGE_SIZE;
}
diff --git a/sys/arch/macppc/macppc/dma.c b/sys/arch/macppc/macppc/dma.c
index 0c61b43fcec..9ca7d327faf 100644
--- a/sys/arch/macppc/macppc/dma.c
+++ b/sys/arch/macppc/macppc/dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: dma.c,v 1.40 2014/07/12 18:44:42 tedu Exp $ */
+/* $OpenBSD: dma.c,v 1.41 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -485,8 +485,8 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
if (size == 0)
panic("_bus_dmamem_map: size botch");
error = pmap_enter(pmap_kernel(), va, addr | pmapflags,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/macppc/macppc/machdep.c b/sys/arch/macppc/macppc/machdep.c
index 69d0672e128..5bbfc22d38f 100644
--- a/sys/arch/macppc/macppc/machdep.c
+++ b/sys/arch/macppc/macppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.163 2014/10/27 21:56:57 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.164 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -765,7 +765,7 @@ dumpsys()
(ptoa(dumpsize) - maddr) / (1024 * 1024));
pmap_enter(pmap_kernel(), dumpspace, maddr,
- VM_PROT_READ, PMAP_WIRED);
+ PROT_READ, PMAP_WIRED);
if ((error = (*dump)(dumpdev, blkno,
(caddr_t)dumpspace, PAGE_SIZE)) != 0)
break;
@@ -1085,7 +1085,7 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
bpa, size, *bshp, spa);
#endif
for (; len > 0; len -= PAGE_SIZE) {
- pmap_kenter_cache(vaddr, spa, VM_PROT_READ | VM_PROT_WRITE,
+ pmap_kenter_cache(vaddr, spa, PROT_READ | PROT_WRITE,
(flags & BUS_SPACE_MAP_CACHEABLE) ?
PMAP_CACHE_WT : PMAP_CACHE_CI);
spa += PAGE_SIZE;
@@ -1137,7 +1137,7 @@ mapiodev(paddr_t pa, psize_t len)
for (vaddr = va; size > 0; size -= PAGE_SIZE) {
pmap_kenter_cache(vaddr, spa,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_CACHE_DEFAULT);
+ PROT_READ | PROT_WRITE, PMAP_CACHE_DEFAULT);
spa += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
diff --git a/sys/arch/mips64/mips64/mips64_machdep.c b/sys/arch/mips64/mips64/mips64_machdep.c
index b0663d27ac5..a8e49bb9a0b 100644
--- a/sys/arch/mips64/mips64/mips64_machdep.c
+++ b/sys/arch/mips64/mips64/mips64_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mips64_machdep.c,v 1.16 2014/08/12 04:28:07 miod Exp $ */
+/* $OpenBSD: mips64_machdep.c,v 1.17 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2009, 2010, 2012 Miodrag Vallat.
@@ -184,7 +184,7 @@ exec_md_map(struct proc *p, struct exec_package *pack)
va = 0;
rc = uvm_map(&p->p_vmspace->vm_map, &va, PAGE_SIZE, NULL,
UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_ALL, UVM_INH_COPY,
+ UVM_MAPFLAG(PROT_NONE, PROT_MASK, UVM_INH_COPY,
UVM_ADV_NORMAL, UVM_FLAG_COPYONW));
if (rc != 0)
return rc;
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
index ccf92aacf75..97ee57ac8c8 100644
--- a/sys/arch/mips64/mips64/pmap.c
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.76 2014/09/30 06:51:58 jmatthew Exp $ */
+/* $OpenBSD: pmap.c,v 1.77 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -739,7 +739,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
vaddr_t va;
int s;
- if (prot == VM_PROT_NONE) {
+ if (prot == PROT_NONE) {
DPRINTF(PDB_REMOVE, ("pmap_page_protect(%p, 0x%x)\n", pg, prot));
} else {
DPRINTF(PDB_FOLLOW|PDB_PROTECT,
@@ -747,13 +747,13 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
switch (prot) {
- case VM_PROT_READ|VM_PROT_WRITE:
- case VM_PROT_ALL:
+ case PROT_READ | PROT_WRITE:
+ case PROT_MASK:
break;
/* copy_on_write */
- case VM_PROT_READ:
- case VM_PROT_READ|VM_PROT_EXECUTE:
+ case PROT_READ:
+ case PROT_READ | PROT_EXEC:
pv = pg_to_pvh(pg);
s = splvm();
/*
@@ -796,12 +796,12 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
("pmap_protect(%p, %p, %p, 0x%x)\n",
pmap, (void *)sva, (void *)eva, prot));
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ if ((prot & PROT_READ) == PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
}
- p = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
+ p = (prot & PROT_WRITE) ? PG_M : PG_RO;
if (pmap == pmap_kernel()) {
/*
@@ -909,7 +909,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL) {
- if (!(prot & VM_PROT_WRITE)) {
+ if (!(prot & PROT_WRITE)) {
npte = PG_ROPAGE;
} else {
if (pmap == pmap_kernel()) {
@@ -932,10 +932,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
/* Set page referenced/modified status based on flags */
- if (flags & VM_PROT_WRITE)
+ if (flags & PROT_WRITE)
atomic_setbits_int(&pg->pg_flags,
PGF_ATTR_MOD | PGF_ATTR_REF);
- else if (flags & VM_PROT_ALL)
+ else if (flags & PROT_MASK)
atomic_setbits_int(&pg->pg_flags, PGF_ATTR_REF);
stat_count(enter_stats.managed);
@@ -945,7 +945,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* then it must be device memory which may be volatile.
*/
stat_count(enter_stats.unmanaged);
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
npte = PG_IOPAGE & ~PG_G;
} else {
npte = (PG_IOPAGE | PG_RO) & ~(PG_G | PG_M);
@@ -1052,7 +1052,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* flag it in the pte.
*/
if (r4000_errata != 0) {
- if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
+ if (pg != NULL && (prot & PROT_EXEC)) {
if ((pg->pg_flags & PGF_EOP_CHECKED) == 0)
atomic_setbits_int(&pg->pg_flags,
PGF_EOP_CHECKED |
@@ -1070,7 +1070,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* If mapping an executable page, invalidate ICache.
*/
- if (pg != NULL && (prot & VM_PROT_EXECUTE))
+ if (pg != NULL && (prot & PROT_EXEC))
Mips_InvalidateICache(ci, va, PAGE_SIZE);
return 0;
@@ -1091,7 +1091,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
#endif
npte = vad_to_pfn(pa) | PG_G | PG_WIRED;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
npte |= PG_RWPAGE;
else
npte |= PG_ROPAGE;
diff --git a/sys/arch/mips64/mips64/r4000_errata.c b/sys/arch/mips64/mips64/r4000_errata.c
index f0e6af8e69d..7f55d011c90 100644
--- a/sys/arch/mips64/mips64/r4000_errata.c
+++ b/sys/arch/mips64/mips64/r4000_errata.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: r4000_errata.c,v 1.4 2014/04/04 20:52:05 miod Exp $ */
+/* $OpenBSD: r4000_errata.c,v 1.5 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2014 Miodrag Vallat.
@@ -215,7 +215,7 @@ eop_tlb_miss_handler(struct trap_frame *trapframe, struct cpu_info *ci,
onfault = pcb->pcb_onfault;
pcb->pcb_onfault = 0;
KERNEL_LOCK();
- (void)uvm_fault(map, va, 0, VM_PROT_READ | VM_PROT_EXECUTE);
+ (void)uvm_fault(map, va, 0, PROT_READ | PROT_EXEC);
KERNEL_UNLOCK();
pcb->pcb_onfault = onfault;
}
diff --git a/sys/arch/mips64/mips64/trap.c b/sys/arch/mips64/mips64/trap.c
index 477e5b73d63..2810fb3c9d6 100644
--- a/sys/arch/mips64/mips64/trap.c
+++ b/sys/arch/mips64/mips64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.101 2014/09/30 06:51:58 jmatthew Exp $ */
+/* $OpenBSD: trap.c,v 1.102 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@@ -283,7 +283,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
if (pmap_is_page_ro(pmap_kernel(),
trunc_page(trapframe->badvaddr), entry)) {
/* write to read only page in the kernel */
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
pcb = &p->p_addr->u_pcb;
goto kernel_fault;
}
@@ -320,7 +320,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
if (pmap_is_page_ro(pmap,
trunc_page(trapframe->badvaddr), entry)) {
/* write to read only page */
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
pcb = &p->p_addr->u_pcb;
goto fault_common_no_miss;
}
@@ -340,7 +340,7 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
case T_TLB_LD_MISS:
case T_TLB_ST_MISS:
- ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ;
+ ftype = (type == T_TLB_ST_MISS) ? PROT_WRITE : PROT_READ;
pcb = &p->p_addr->u_pcb;
/* check for kernel address */
if (trapframe->badvaddr < 0) {
@@ -379,12 +379,12 @@ itsa(struct trap_frame *trapframe, struct cpu_info *ci, struct proc *p,
}
case T_TLB_LD_MISS+T_USER:
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
pcb = &p->p_addr->u_pcb;
goto fault_common;
case T_TLB_ST_MISS+T_USER:
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
pcb = &p->p_addr->u_pcb;
fault_common:
@@ -453,13 +453,13 @@ fault_common_no_miss:
case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */
case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */
- ucode = 0; /* XXX should be VM_PROT_something */
+ ucode = 0; /* XXX should be PROT_something */
i = SIGBUS;
typ = BUS_ADRALN;
break;
case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */
case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */
- ucode = 0; /* XXX should be VM_PROT_something */
+ ucode = 0; /* XXX should be PROT_something */
i = SIGBUS;
typ = BUS_OBJERR;
break;
@@ -659,7 +659,7 @@ fault_common_no_miss:
p->p_md.md_fppgva + PAGE_SIZE);
(void)uvm_map_protect(map, p->p_md.md_fppgva,
p->p_md.md_fppgva + PAGE_SIZE,
- UVM_PROT_NONE, FALSE);
+ PROT_NONE, FALSE);
return;
}
/* FALLTHROUGH */
@@ -1498,7 +1498,7 @@ fpe_branch_emulate(struct proc *p, struct trap_frame *tf, uint32_t insn,
*/
rc = uvm_map_protect(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_RWX, FALSE);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_MASK, FALSE);
if (rc != 0) {
#ifdef DEBUG
printf("%s: uvm_map_protect on %p failed: %d\n",
@@ -1507,7 +1507,7 @@ fpe_branch_emulate(struct proc *p, struct trap_frame *tf, uint32_t insn,
return rc;
}
rc = uvm_fault_wire(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_RWX);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_MASK);
if (rc != 0) {
#ifdef DEBUG
printf("%s: uvm_fault_wire on %p failed: %d\n",
@@ -1535,7 +1535,7 @@ fpe_branch_emulate(struct proc *p, struct trap_frame *tf, uint32_t insn,
}
(void)uvm_map_protect(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_RX, FALSE);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_READ | PROT_EXEC, FALSE);
p->p_md.md_fpbranchva = dest;
p->p_md.md_fpslotva = (vaddr_t)tf->pc + 4;
p->p_md.md_flags |= MDP_FPUSED;
@@ -1548,7 +1548,7 @@ err:
uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE);
err2:
(void)uvm_map_protect(map, p->p_md.md_fppgva,
- p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_NONE, FALSE);
+ p->p_md.md_fppgva + PAGE_SIZE, PROT_NONE, FALSE);
return rc;
}
#endif
diff --git a/sys/arch/mips64/mips64/vm_machdep.c b/sys/arch/mips64/mips64/vm_machdep.c
index 88adc033d46..da6741cc0f7 100644
--- a/sys/arch/mips64/mips64/vm_machdep.c
+++ b/sys/arch/mips64/mips64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.29 2014/05/10 22:25:16 jasper Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.30 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1992, 1993
@@ -245,8 +245,8 @@ vmapbuf(bp, len)
pmap, uva);
pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
sz -= PAGE_SIZE;
diff --git a/sys/arch/octeon/octeon/bus_dma.c b/sys/arch/octeon/octeon/bus_dma.c
index 2086aa9cf81..09d8553bf59 100644
--- a/sys/arch/octeon/octeon/bus_dma.c
+++ b/sys/arch/octeon/octeon/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.13 2014/09/13 16:06:36 doug Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.14 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -457,8 +457,8 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
panic("_dmamem_map: size botch");
pa = (*t->_device_to_pa)(addr);
error = pmap_enter(pmap_kernel(), va, pa,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | pmap_flags);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | pmap_flags);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/powerpc/powerpc/pmap.c b/sys/arch/powerpc/powerpc/pmap.c
index 965536022f4..953e5072c25 100644
--- a/sys/arch/powerpc/powerpc/pmap.c
+++ b/sys/arch/powerpc/powerpc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.131 2014/11/02 00:11:32 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.132 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2001, 2002, 2007 Dale Rahn.
@@ -616,7 +616,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
else
pte_insert32(pted);
- if (prot & VM_PROT_EXECUTE) {
+ if (prot & PROT_EXEC) {
u_int sn = VP_SR(va);
pm->pm_exec[sn]++;
@@ -625,7 +625,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
if (pg != NULL) {
need_sync = ((pg->pg_flags & PG_PMAP_EXE) == 0);
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
atomic_clearbits_int(&pg->pg_flags,
PG_PMAP_EXE);
else
@@ -638,7 +638,7 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
* Should we be paranoid about writeable non-exec
* mappings ? if so, clear the exec tag
*/
- if ((prot & VM_PROT_WRITE) && (pg != NULL))
+ if ((prot & PROT_WRITE) && (pg != NULL))
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
}
@@ -794,7 +794,7 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
pmap_kremove_pg(va); /* pted is reused */
pm->pm_stats.resident_count++;
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL)
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
@@ -832,7 +832,7 @@ _pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, int flags, int cache)
pted->pted_va |= PTED_VA_WIRED_M;
- if (prot & VM_PROT_EXECUTE) {
+ if (prot & PROT_EXEC) {
u_int sn = VP_SR(va);
pm->pm_exec[sn]++;
@@ -1030,14 +1030,14 @@ pmap_fill_pte64(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
else
pte64->pte_lo |= (PTE_M_64 | PTE_I_64 | PTE_G_64);
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
pte64->pte_lo |= PTE_RW_64;
else
pte64->pte_lo |= PTE_RO_64;
pted->pted_va = va & ~PAGE_MASK;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
pted->pted_va |= PTED_VA_EXEC_M;
else
pte64->pte_lo |= PTE_N_64;
@@ -1068,7 +1068,7 @@ pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
else
pte32->pte_lo |= (PTE_M_32 | PTE_I_32 | PTE_G_32);
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
pte32->pte_lo |= PTE_RW_32;
else
pte32->pte_lo |= PTE_RO_32;
@@ -1076,7 +1076,7 @@ pmap_fill_pte32(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
pted->pted_va = va & ~PAGE_MASK;
/* XXX Per-page execution control. */
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
pted->pted_va |= PTED_VA_EXEC_M;
pted->pted_pmap = pm;
@@ -1227,7 +1227,7 @@ pmap_zero_page(struct vm_page *pg)
#endif
/* simple_lock(&pmap_zero_page_lock); */
- pmap_kenter_pa(zero_page, pa, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(zero_page, pa, PROT_READ | PROT_WRITE);
#ifdef USE_DCBZ
for (i = PAGE_SIZE/CACHELINESIZE; i>0; i--) {
__asm volatile ("dcbz 0,%0" :: "r"(addr));
@@ -1251,8 +1251,8 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg)
paddr_t dstpa = VM_PAGE_TO_PHYS(dstpg);
/* simple_lock(&pmap_copy_page_lock); */
- pmap_kenter_pa(copy_src_page, srcpa, VM_PROT_READ);
- pmap_kenter_pa(copy_dst_page, dstpa, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(copy_src_page, srcpa, PROT_READ);
+ pmap_kenter_pa(copy_dst_page, dstpa, PROT_READ | PROT_WRITE);
bcopy((void *)copy_src_page, (void *)copy_dst_page, PAGE_SIZE);
@@ -2054,7 +2054,7 @@ pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot)
pg = PHYS_TO_VM_PAGE(pted->p.pted_pte64.pte_lo & PTE_RPGN_64);
if (pg->pg_flags & PG_PMAP_EXE) {
- if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == VM_PROT_WRITE) {
+ if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_WRITE) {
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
} else {
pmap_syncicache_user_virt(pm, va);
@@ -2064,7 +2064,7 @@ pmap_page_ro64(pmap_t pm, vaddr_t va, vm_prot_t prot)
pted->p.pted_pte64.pte_lo &= ~PTE_PP_64;
pted->p.pted_pte64.pte_lo |= PTE_RO_64;
- if ((prot & VM_PROT_EXECUTE) == 0)
+ if ((prot & PROT_EXEC) == 0)
pted->p.pted_pte64.pte_lo |= PTE_N_64;
sr = ptesr(pm->pm_sr, va);
@@ -2113,7 +2113,7 @@ pmap_page_ro32(pmap_t pm, vaddr_t va, vm_prot_t prot)
pg = PHYS_TO_VM_PAGE(pted->p.pted_pte32.pte_lo & PTE_RPGN_32);
if (pg->pg_flags & PG_PMAP_EXE) {
- if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == VM_PROT_WRITE) {
+ if ((prot & (PROT_WRITE | PROT_EXEC)) == PROT_WRITE) {
atomic_clearbits_int(&pg->pg_flags, PG_PMAP_EXE);
} else {
pmap_syncicache_user_virt(pm, va);
@@ -2170,7 +2170,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
/* need to lock for this pv */
s = splvm();
- if (prot == VM_PROT_NONE) {
+ if (prot == PROT_NONE) {
while (!LIST_EMPTY(&(pg->mdpage.pv_list))) {
pted = LIST_FIRST(&(pg->mdpage.pv_list));
pmap_remove_pg(pted->pted_pmap, pted->pted_va);
@@ -2194,7 +2194,7 @@ void
pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
int s;
- if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
+ if (prot & (PROT_READ | PROT_EXEC)) {
s = splvm();
if (ppc_proc_is_64b) {
while (sva < eva) {
@@ -2302,7 +2302,7 @@ pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr, int exec_fault)
/* 0 - physmaxaddr mapped 1-1 */
if (va < physmaxaddr) {
u_int32_t aligned_va;
- vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
+ vm_prot_t prot = PROT_READ | PROT_WRITE;
extern caddr_t kernel_text;
extern caddr_t etext;
@@ -2310,7 +2310,7 @@ pte_spill_r(u_int32_t va, u_int32_t msr, u_int32_t dsisr, int exec_fault)
if (va >= trunc_page((vaddr_t)&kernel_text) &&
va < round_page((vaddr_t)&etext)) {
- prot |= VM_PROT_EXECUTE;
+ prot |= PROT_EXEC;
}
aligned_va = trunc_page(va);
diff --git a/sys/arch/powerpc/powerpc/trap.c b/sys/arch/powerpc/powerpc/trap.c
index 6915c9c6121..5586f35ec73 100644
--- a/sys/arch/powerpc/powerpc/trap.c
+++ b/sys/arch/powerpc/powerpc/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.102 2014/09/06 09:42:23 mpi Exp $ */
+/* $OpenBSD: trap.c,v 1.103 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.3 1996/10/13 03:31:37 christos Exp $ */
/*
@@ -289,9 +289,9 @@ trap(struct trapframe *frame)
return;
}
if (frame->dsisr & DSISR_STORE)
- ftype = VM_PROT_READ | VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
else
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
KERNEL_LOCK();
if (uvm_fault(map, trunc_page(va), 0, ftype) == 0) {
KERNEL_UNLOCK();
@@ -323,10 +323,10 @@ printf("kern dsi on addr %x iar %x\n", frame->dar, frame->srr0);
KERNEL_LOCK();
if (frame->dsisr & DSISR_STORE) {
- ftype = VM_PROT_READ | VM_PROT_WRITE;
- vftype = VM_PROT_WRITE;
+ ftype = PROT_READ | PROT_WRITE;
+ vftype = PROT_WRITE;
} else
- vftype = ftype = VM_PROT_READ;
+ vftype = ftype = PROT_READ;
if (uvm_fault(&p->p_vmspace->vm_map,
trunc_page(frame->dar), 0, ftype) == 0) {
uvm_grow(p, trunc_page(frame->dar));
@@ -355,7 +355,7 @@ printf("dsi on addr %x iar %x lr %x\n", frame->dar, frame->srr0,frame->lr);
break;
KERNEL_LOCK();
- ftype = VM_PROT_READ | VM_PROT_EXECUTE;
+ ftype = PROT_READ | PROT_EXEC;
if (uvm_fault(&p->p_vmspace->vm_map,
trunc_page(frame->srr0), 0, ftype) == 0) {
uvm_grow(p, trunc_page(frame->srr0));
@@ -373,7 +373,7 @@ printf("isi iar %x lr %x\n", frame->srr0, frame->lr);
/* XXX Have to make sure that sigreturn does the right thing. */
sv.sival_int = frame->srr0;
KERNEL_LOCK();
- trapsignal(p, SIGSEGV, VM_PROT_EXECUTE, SEGV_MAPERR, sv);
+ trapsignal(p, SIGSEGV, PROT_EXEC, SEGV_MAPERR, sv);
KERNEL_UNLOCK();
break;
case EXC_SC|EXC_USER:
@@ -483,7 +483,7 @@ printf("isi iar %x lr %x\n", frame->srr0, frame->lr);
else {
sv.sival_int = frame->srr0;
KERNEL_LOCK();
- trapsignal(p, SIGBUS, VM_PROT_EXECUTE, BUS_ADRALN,
+ trapsignal(p, SIGBUS, PROT_EXEC, BUS_ADRALN,
sv);
KERNEL_UNLOCK();
}
diff --git a/sys/arch/powerpc/powerpc/vm_machdep.c b/sys/arch/powerpc/powerpc/vm_machdep.c
index e7cfa6db6ff..475fe57504c 100644
--- a/sys/arch/powerpc/powerpc/vm_machdep.c
+++ b/sys/arch/powerpc/powerpc/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.46 2013/01/16 19:04:43 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.47 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.1 1996/09/30 16:34:57 ws Exp $ */
/*
@@ -217,7 +217,7 @@ vmapbuf(struct buf *bp, vsize_t len)
pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
faddr, &pa);
pmap_enter(vm_map_pmap(phys_map), taddr, pa,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
faddr += NBPG;
taddr += NBPG;
}
diff --git a/sys/arch/sgi/sgi/bus_dma.c b/sys/arch/sgi/sgi/bus_dma.c
index 7304c302bd1..c0d1519ee35 100644
--- a/sys/arch/sgi/sgi/bus_dma.c
+++ b/sys/arch/sgi/sgi/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.38 2014/09/13 16:06:37 doug Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.39 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2003-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -505,8 +505,8 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
#endif
pa = (*t->_device_to_pa)(addr);
error = pmap_enter(pmap_kernel(), va, pa,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | pmap_flags);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | pmap_flags);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/sh/sh/pmap.c b/sys/arch/sh/sh/pmap.c
index 085df11e5fc..fd895d0fee1 100644
--- a/sys/arch/sh/sh/pmap.c
+++ b/sys/arch/sh/sh/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.21 2014/01/20 21:19:28 guenther Exp $ */
+/* $OpenBSD: pmap.c,v 1.22 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.55 2006/08/07 23:19:36 tsutsui Exp $ */
/*-
@@ -296,7 +296,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
boolean_t kva = (pmap == pmap_kernel());
/* "flags" never exceed "prot" */
- KDASSERT(prot != 0 && ((flags & VM_PROT_ALL) & ~prot) == 0);
+ KDASSERT(prot != 0 && ((flags & PROT_MASK) & ~prot) == 0);
pg = PHYS_TO_VM_PAGE(pa);
entry = (pa & PG_PPN) | PG_4K;
@@ -308,16 +308,16 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
entry |= PG_C; /* always cached */
/* Modified/reference tracking */
- if (flags & VM_PROT_WRITE) {
+ if (flags & PROT_WRITE) {
entry |= PG_V | PG_D;
pvh->pvh_flags |= PVH_MODIFIED | PVH_REFERENCED;
- } else if (flags & VM_PROT_ALL) {
+ } else if (flags & PROT_MASK) {
entry |= PG_V;
pvh->pvh_flags |= PVH_REFERENCED;
}
/* Protection */
- if ((prot & VM_PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
+ if ((prot & PROT_WRITE) && (pvh->pvh_flags & PVH_MODIFIED)) {
if (kva)
entry |= PG_PR_KRW | PG_SH;
else
@@ -343,11 +343,11 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
} else { /* bus-space (always uncached map) */
if (kva) {
entry |= PG_V | PG_SH |
- ((prot & VM_PROT_WRITE) ?
+ ((prot & PROT_WRITE) ?
(PG_PR_KRW | PG_D) : PG_PR_KRO);
} else {
entry |= PG_V |
- ((prot & VM_PROT_WRITE) ?
+ ((prot & PROT_WRITE) ?
(PG_PR_URW | PG_D) : PG_PR_URO);
}
}
@@ -370,7 +370,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
sh_tlb_update(pmap->pm_asid, va, entry);
if (!SH_HAS_UNIFIED_CACHE &&
- (prot == (VM_PROT_READ | VM_PROT_EXECUTE)))
+ (prot == (PROT_READ | PROT_EXEC)))
sh_icache_sync_range_index(va, PAGE_SIZE);
if (entry & _PG_WIRED)
@@ -444,11 +444,11 @@ __pmap_pv_enter(pmap_t pmap, struct vm_page *pg, vaddr_t va, vm_prot_t prot)
if (SH_HAS_VIRTUAL_ALIAS) {
/* Remove all other mapping on this physical page */
pvh = &pg->mdpage;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
have_writeable = 1;
else {
SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
- if (pv->pv_prot & VM_PROT_WRITE) {
+ if (pv->pv_prot & PROT_WRITE) {
have_writeable = 1;
break;
}
@@ -557,7 +557,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
KDASSERT(va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS);
entry = (pa & PG_PPN) | PG_V | PG_SH | PG_4K;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
entry |= (PG_PR_KRW | PG_D);
else
entry |= PG_PR_KRO;
@@ -632,7 +632,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
sva = trunc_page(sva);
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ if ((prot & PROT_READ) == PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
}
@@ -641,14 +641,14 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
default:
panic("pmap_protect: invalid protection mode %x", prot);
/* NOTREACHED */
- case VM_PROT_READ:
+ case PROT_READ:
/* FALLTHROUGH */
- case VM_PROT_READ | VM_PROT_EXECUTE:
+ case PROT_READ | PROT_EXEC:
protbits = kernel ? PG_PR_KRO : PG_PR_URO;
break;
- case VM_PROT_READ | VM_PROT_WRITE:
+ case PROT_READ | PROT_WRITE:
/* FALLTHROUGH */
- case VM_PROT_ALL:
+ case PROT_MASK:
protbits = kernel ? PG_PR_KRW : PG_PR_URW;
break;
}
@@ -660,7 +660,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
continue;
if (SH_HAS_VIRTUAL_ALIAS && (entry & PG_D)) {
- if (!SH_HAS_UNIFIED_CACHE && (prot & VM_PROT_EXECUTE))
+ if (!SH_HAS_UNIFIED_CACHE && (prot & PROT_EXEC))
sh_icache_sync_range_index(va, PAGE_SIZE);
else
sh_dcache_wbinv_range_index(va, PAGE_SIZE);
@@ -711,14 +711,14 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
int s;
switch (prot) {
- case VM_PROT_READ | VM_PROT_WRITE:
+ case PROT_READ | PROT_WRITE:
/* FALLTHROUGH */
- case VM_PROT_ALL:
+ case PROT_MASK:
break;
- case VM_PROT_READ:
+ case PROT_READ:
/* FALLTHROUGH */
- case VM_PROT_READ | VM_PROT_EXECUTE:
+ case PROT_READ | PROT_EXEC:
s = splvm();
SLIST_FOREACH(pv, &pvh->pvh_head, pv_link) {
pmap = pv->pv_pmap;
diff --git a/sys/arch/sh/sh/trap.c b/sys/arch/sh/sh/trap.c
index 7474f955124..a2b7a21987e 100644
--- a/sys/arch/sh/sh/trap.c
+++ b/sys/arch/sh/sh/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.31 2014/10/13 04:47:22 miod Exp $ */
+/* $OpenBSD: trap.c,v 1.32 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: exception.c,v 1.32 2006/09/04 23:57:52 uwe Exp $ */
/* $NetBSD: syscall.c,v 1.6 2006/03/07 07:21:50 thorpej Exp $ */
@@ -350,15 +350,15 @@ tlb_exception(struct proc *p, struct trapframe *tf, uint32_t va)
switch (tf->tf_expevt) {
case EXPEVT_TLB_MISS_LD:
track = PVH_REFERENCED;
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
break;
case EXPEVT_TLB_MISS_ST:
track = PVH_REFERENCED;
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
break;
case EXPEVT_TLB_MOD:
track = PVH_REFERENCED | PVH_MODIFIED;
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
break;
case EXPEVT_TLB_PROT_LD:
TLB_ASSERT((int)va > 0,
@@ -376,7 +376,7 @@ tlb_exception(struct proc *p, struct trapframe *tf, uint32_t va)
case EXPEVT_TLB_PROT_ST:
track = 0; /* call uvm_fault first. (COW) */
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
break;
default:
diff --git a/sys/arch/sh/sh/vm_machdep.c b/sys/arch/sh/sh/vm_machdep.c
index 163d312e346..b48aeba66d2 100644
--- a/sys/arch/sh/sh/vm_machdep.c
+++ b/sys/arch/sh/sh/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.11 2013/01/16 19:04:43 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.12 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.53 2006/08/31 16:49:21 matt Exp $ */
/*
@@ -340,7 +340,7 @@ vmapbuf(struct buf *bp, vsize_t len)
while (len) {
pmap_extract(upmap, faddr, &fpa);
pmap_enter(kpmap, taddr, fpa,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
len -= PAGE_SIZE;
diff --git a/sys/arch/socppc/socppc/dma.c b/sys/arch/socppc/socppc/dma.c
index b19139179b1..b1dd82f5866 100644
--- a/sys/arch/socppc/socppc/dma.c
+++ b/sys/arch/socppc/socppc/dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: dma.c,v 1.12 2014/07/12 18:44:42 tedu Exp $ */
+/* $OpenBSD: dma.c,v 1.13 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -482,8 +482,8 @@ _dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size,
if (size == 0)
panic("_bus_dmamem_map: size botch");
error = pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/socppc/socppc/machdep.c b/sys/arch/socppc/socppc/machdep.c
index 08bccc29d09..0017be81f79 100644
--- a/sys/arch/socppc/socppc/machdep.c
+++ b/sys/arch/socppc/socppc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.58 2014/10/27 21:56:57 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.59 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.4 1996/10/16 19:33:11 ws Exp $ */
/*
@@ -579,7 +579,7 @@ bus_mem_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
bpa, size, *bshp, spa);
#endif
for (; len > 0; len -= PAGE_SIZE) {
- pmap_kenter_cache(vaddr, spa, VM_PROT_READ | VM_PROT_WRITE,
+ pmap_kenter_cache(vaddr, spa, PROT_READ | PROT_WRITE,
(flags & BUS_SPACE_MAP_CACHEABLE) ?
PMAP_CACHE_WT : PMAP_CACHE_CI);
spa += PAGE_SIZE;
@@ -632,7 +632,7 @@ mapiodev(paddr_t pa, psize_t len)
for (vaddr = va; size > 0; size -= PAGE_SIZE) {
pmap_kenter_cache(vaddr, spa,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_CACHE_DEFAULT);
+ PROT_READ | PROT_WRITE, PMAP_CACHE_DEFAULT);
spa += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
diff --git a/sys/arch/solbourne/solbourne/machdep.c b/sys/arch/solbourne/solbourne/machdep.c
index 9e80bb88d12..416c76d0315 100644
--- a/sys/arch/solbourne/solbourne/machdep.c
+++ b/sys/arch/solbourne/solbourne/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.43 2014/09/20 09:28:24 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.44 2014/11/16 12:30:58 deraadt Exp $ */
/* OpenBSD: machdep.c,v 1.105 2005/04/11 15:13:01 deraadt Exp */
/*
@@ -130,7 +130,8 @@ cpu_startup()
/*
* fix message buffer mapping
*/
- pmap_map(MSGBUF_VA, MSGBUF_PA, MSGBUF_PA + MSGBUFSIZE, UVM_PROT_RW);
+ pmap_map(MSGBUF_VA, MSGBUF_PA, MSGBUF_PA + MSGBUFSIZE,
+ PROT_READ | PROT_WRITE);
initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);
proc0.p_addr = proc0paddr;
@@ -629,7 +630,8 @@ mapdev(phys, virt, offset, size)
pmtype = PMAP_IOENC(phys->rr_iospace);
do {
- pmap_kenter_pa(va, pa | pmtype | PMAP_NC, UVM_PROT_RW);
+ pmap_kenter_pa(va, pa | pmtype | PMAP_NC,
+ PROT_READ | PROT_WRITE);
va += PAGE_SIZE;
pa += PAGE_SIZE;
} while ((size -= PAGE_SIZE) > 0);
@@ -732,7 +734,7 @@ kap_maskcheck()
void (*test)(void);
pmap_enter(pmap_kernel(), TMPMAP_VA,
- trunc_page((vaddr_t)masktest) | PMAP_BWS, UVM_PROT_READ, 0);
+ trunc_page((vaddr_t)masktest) | PMAP_BWS, PROT_READ, 0);
test = (void (*)(void))(TMPMAP_VA + ((vaddr_t)masktest & PAGE_MASK));
cpcb->pcb_onfault = (caddr_t)kap_maskfault;
diff --git a/sys/arch/solbourne/solbourne/mem.c b/sys/arch/solbourne/solbourne/mem.c
index d8eb8658bfa..cda1296247b 100644
--- a/sys/arch/solbourne/solbourne/mem.c
+++ b/sys/arch/solbourne/solbourne/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.2 2007/09/22 16:21:32 krw Exp $ */
+/* $OpenBSD: mem.c,v 1.3 2014/11/16 12:30:58 deraadt Exp $ */
/* OpenBSD: mem.c,v 1.21 2003/06/02 23:27:55 millert Exp */
/*
@@ -140,7 +140,7 @@ mmrw(dev, uio, flags)
}
pmap_enter(pmap_kernel(), mem_page,
trunc_page(pa), uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ : PROT_WRITE, PMAP_WIRED);
pmap_update(pmap_kernel());
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
diff --git a/sys/arch/solbourne/solbourne/pmap.c b/sys/arch/solbourne/solbourne/pmap.c
index a75d209a338..58f85528376 100644
--- a/sys/arch/solbourne/solbourne/pmap.c
+++ b/sys/arch/solbourne/solbourne/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.6 2014/01/24 05:33:32 jsg Exp $ */
+/* $OpenBSD: pmap.c,v 1.7 2014/11/16 12:30:58 deraadt Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
*
@@ -789,7 +789,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
struct pvlist *pvl;
int s;
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) { /* remove all */
+ if ((prot & PROT_READ) == PROT_NONE) { /* remove all */
s = splvm();
pvl = pg_to_pvl(pg);
@@ -802,7 +802,7 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
}
splx(s);
- } else if ((prot & VM_PROT_WRITE) == VM_PROT_NONE) {
+ } else if ((prot & PROT_WRITE) == PROT_NONE) {
s = splvm();
pvl = pg_to_pvl(pg);
@@ -838,7 +838,7 @@ pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t e, vm_prot_t prot)
DPRINTF(PDB_PROTECT,
("pmap_protect(%p,%08x,%08x,%x)\n", pmap, sva, e, prot));
- if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ if ((prot & PROT_READ) == PROT_NONE) {
pmap_remove(pmap, sva, e);
splx(s);
return;
@@ -863,7 +863,7 @@ pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t e, vm_prot_t prot)
continue;
npte = (opte & ~PG_RO) |
- (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
+ (prot & PROT_WRITE) ? PG_RW : PG_RO;
if (opte != npte) {
*pte = npte;
tlb_flush(va);
@@ -944,7 +944,7 @@ pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
pa = trunc_page(pa);
- npte |= pa | PG_V | (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+ npte |= pa | PG_V | (prot & PROT_WRITE ? PG_RW : PG_RO);
pg = PHYS_TO_VM_PAGE(pa);
if (pg != NULL) {
@@ -1064,7 +1064,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
}
pa = trunc_page(pa);
- npte |= pa | PG_V | PG_W | (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+ npte |= pa | PG_V | PG_W | (prot & PROT_WRITE ? PG_RW : PG_RO);
if ((opte & PG_W) == 0)
pmap_kernel()->pm_stats.wired_count++;
@@ -1476,7 +1476,7 @@ pmap_changeprot(struct pmap *pmap, vaddr_t va, vm_prot_t prot, int wired)
s = splvm();
- npte = PG_S | (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+ npte = PG_S | (prot & PROT_WRITE ? PG_RW : PG_RO);
pte = pmap_pte(pmap, va);
if ((*pte & PG_PROT) != npte) {
diff --git a/sys/arch/solbourne/solbourne/trap.c b/sys/arch/solbourne/solbourne/trap.c
index d50588dff5e..798773463aa 100644
--- a/sys/arch/solbourne/solbourne/trap.c
+++ b/sys/arch/solbourne/solbourne/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.21 2014/05/11 00:12:44 guenther Exp $ */
+/* $OpenBSD: trap.c,v 1.22 2014/11/16 12:30:58 deraadt Exp $ */
/* OpenBSD: trap.c,v 1.42 2004/12/06 20:12:25 miod Exp */
/*
@@ -651,7 +651,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
*/
if (type == T_TEXTFAULT)
v = pc;
- ftype = ser & FCR_RO ? VM_PROT_WRITE : VM_PROT_READ;
+ ftype = ser & FCR_RO ? PROT_WRITE : PROT_READ;
va = trunc_page(v);
if (psr & PSR_PS) {
if (type == T_TEXTFAULT) {
@@ -724,8 +724,8 @@ kfault:
}
sv.sival_int = v;
- trapsignal(p, SIGSEGV, (ser & FCR_RO) ? VM_PROT_WRITE :
- VM_PROT_READ, SEGV_MAPERR, sv);
+ trapsignal(p, SIGSEGV, (ser & FCR_RO) ? PROT_WRITE :
+ PROT_READ, SEGV_MAPERR, sv);
}
out:
if ((psr & PSR_PS) == 0) {
diff --git a/sys/arch/sparc/dev/if_ie.c b/sys/arch/sparc/dev/if_ie.c
index f07eab3c829..1dc5dad2d1f 100644
--- a/sys/arch/sparc/dev/if_ie.c
+++ b/sys/arch/sparc/dev/if_ie.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_ie.c,v 1.47 2014/07/22 10:35:35 mpi Exp $ */
+/* $OpenBSD: if_ie.c,v 1.48 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: if_ie.c,v 1.33 1997/07/29 17:55:38 fair Exp $ */
/*-
@@ -557,7 +557,7 @@ ieattach(parent, self, aux)
panic("ie pmap_extract");
pmap_enter(pmap_kernel(), trunc_page(IEOB_ADBASE+IE_SCP_ADDR),
(paddr_t)pa | PMAP_NC /*| PMAP_IOC*/,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
pmap_update(pmap_kernel());
sc->scp = (volatile struct ie_sys_conf_ptr *)
diff --git a/sys/arch/sparc/sparc/autoconf.c b/sys/arch/sparc/sparc/autoconf.c
index 5a6b80a7c0d..bb338bf3e7c 100644
--- a/sys/arch/sparc/sparc/autoconf.c
+++ b/sys/arch/sparc/sparc/autoconf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: autoconf.c,v 1.95 2014/07/12 18:44:43 tedu Exp $ */
+/* $OpenBSD: autoconf.c,v 1.96 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: autoconf.c,v 1.73 1997/07/29 09:41:53 fair Exp $ */
/*
@@ -357,7 +357,7 @@ bootstrap()
*/
pmap_kenter_pa(INTRREG_VA, PMAP_NC | PMAP_OBIO |
(CPU_ISSUN4E ? INT_ENABLE_REG_PHYSADR_4E :
- INT_ENABLE_REG_PHYSADR_44C), VM_PROT_READ | VM_PROT_WRITE);
+ INT_ENABLE_REG_PHYSADR_44C), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
/* Disable all interrupts */
*((unsigned char *)INTRREG_VA) = 0;
diff --git a/sys/arch/sparc/sparc/clock.c b/sys/arch/sparc/sparc/clock.c
index befc549d698..d958101459f 100644
--- a/sys/arch/sparc/sparc/clock.c
+++ b/sys/arch/sparc/sparc/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.29 2014/07/12 18:44:43 tedu Exp $ */
+/* $OpenBSD: clock.c,v 1.30 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: clock.c,v 1.52 1997/05/24 20:16:05 pk Exp $ */
/*
@@ -438,9 +438,9 @@ clockattach(parent, self, aux)
* the MK48T08 is 8K
*/
cl = (struct clockreg *)mapiodev(ra->ra_reg, 0, 8192);
- pmap_changeprot(pmap_kernel(), (vaddr_t)cl, VM_PROT_READ, 1);
+ pmap_changeprot(pmap_kernel(), (vaddr_t)cl, PROT_READ, 1);
pmap_changeprot(pmap_kernel(), (vaddr_t)cl + 4096,
- VM_PROT_READ, 1);
+ PROT_READ, 1);
cl = (struct clockreg *)((int)cl + CLK_MK48T08_OFF);
} else {
/*
@@ -448,7 +448,7 @@ clockattach(parent, self, aux)
*/
cl = (struct clockreg *)mapiodev(ra->ra_reg, 0,
sizeof *clockreg);
- pmap_changeprot(pmap_kernel(), (vaddr_t)cl, VM_PROT_READ, 1);
+ pmap_changeprot(pmap_kernel(), (vaddr_t)cl, PROT_READ, 1);
}
idp = &cl->cl_idprom;
@@ -635,9 +635,9 @@ clk_wenable(onoff)
s = splhigh();
if (onoff)
- prot = writers++ == 0 ? VM_PROT_READ|VM_PROT_WRITE : 0;
+ prot = writers++ == 0 ? PROT_READ | PROT_WRITE : 0;
else
- prot = --writers == 0 ? VM_PROT_READ : 0;
+ prot = --writers == 0 ? PROT_READ : 0;
splx(s);
if (prot)
pmap_changeprot(pmap_kernel(), (vaddr_t)clockreg & ~(NBPG-1),
diff --git a/sys/arch/sparc/sparc/iommu.c b/sys/arch/sparc/sparc/iommu.c
index 90de900e83b..b93bcd48ab7 100644
--- a/sys/arch/sparc/sparc/iommu.c
+++ b/sys/arch/sparc/sparc/iommu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: iommu.c,v 1.27 2014/07/11 09:36:26 mpi Exp $ */
+/* $OpenBSD: iommu.c,v 1.28 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: iommu.c,v 1.13 1997/07/29 09:42:04 fair Exp $ */
/*
@@ -253,7 +253,7 @@ iommu_attach(parent, self, aux)
while (m) {
paddr_t pa = VM_PAGE_TO_PHYS(m);
- pmap_kenter_pa(va, pa | PMAP_NC, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa | PMAP_NC, PROT_READ | PROT_WRITE);
va += PAGE_SIZE;
m = TAILQ_NEXT(m, pageq);
}
@@ -730,7 +730,7 @@ iommu_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
panic("iommu_dmamem_map: size botch");
addr = VM_PAGE_TO_PHYS(m);
- pmap_kenter_pa(va, addr | cbit, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, addr | cbit, PROT_READ | PROT_WRITE);
#if 0
if (flags & BUS_DMA_COHERENT)
/* XXX */;
diff --git a/sys/arch/sparc/sparc/machdep.c b/sys/arch/sparc/sparc/machdep.c
index 997b3a76f60..40a19b746f7 100644
--- a/sys/arch/sparc/sparc/machdep.c
+++ b/sys/arch/sparc/sparc/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.165 2014/09/20 09:28:24 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.166 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.85 1997/09/12 08:55:02 pk Exp $ */
/*
@@ -163,7 +163,7 @@ cpu_startup()
/* Enter the new mapping */
pmap_map(MSGBUF_VA, msgbufpa, msgbufpa + PAGE_SIZE,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
/* Re-initialize the message buffer. */
initmsgbuf((caddr_t)(MSGBUF_VA + (CPU_ISSUN4 ? 4096 : 0)), MSGBUFSIZE);
@@ -718,7 +718,7 @@ dumpsys()
printf("%d ", i / (1024*1024));
(void) pmap_map(dumpspace, maddr, maddr + n,
- VM_PROT_READ);
+ PROT_READ);
error = (*dump)(dumpdev, blkno,
(caddr_t)dumpspace, (int)n);
pmap_remove(pmap_kernel(), dumpspace, dumpspace + n);
@@ -823,7 +823,7 @@ mapdev(phys, virt, offset, size)
do {
pmap_kenter_pa(va, pa | pmtype | PMAP_NC,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
va += PAGE_SIZE;
pa += PAGE_SIZE;
} while ((size -= PAGE_SIZE) > 0);
diff --git a/sys/arch/sparc/sparc/mem.c b/sys/arch/sparc/sparc/mem.c
index 1069bc191cb..a2b3b0c2b14 100644
--- a/sys/arch/sparc/sparc/mem.c
+++ b/sys/arch/sparc/sparc/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.22 2007/09/22 16:21:32 krw Exp $ */
+/* $OpenBSD: mem.c,v 1.23 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: mem.c,v 1.13 1996/03/30 21:12:16 christos Exp $ */
/*
@@ -141,7 +141,7 @@ mmrw(dev, uio, flags)
}
pmap_enter(pmap_kernel(), mem_page,
trunc_page(pa), uio->uio_rw == UIO_READ ?
- VM_PROT_READ : VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ : PROT_WRITE, PMAP_WIRED);
pmap_update(pmap_kernel());
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
diff --git a/sys/arch/sparc/sparc/pmap.c b/sys/arch/sparc/sparc/pmap.c
index 9d35e0a6fae..d74376a44b9 100644
--- a/sys/arch/sparc/sparc/pmap.c
+++ b/sys/arch/sparc/sparc/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.165 2014/07/12 18:44:43 tedu Exp $ */
+/* $OpenBSD: pmap.c,v 1.166 2014/11/16 12:30:58 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.118 1998/05/19 19:00:18 thorpej Exp $ */
/*
@@ -668,35 +668,35 @@ sparc_protection_init4m(void)
for (prot = 0; prot < 8; prot++) {
switch (prot) {
- case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case PROT_READ | PROT_WRITE | PROT_EXEC:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RWX_RWX;
break;
- case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ case PROT_READ | PROT_WRITE | PROT_NONE:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RW_RW;
break;
- case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case PROT_READ | PROT_NONE | PROT_EXEC:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_RX_RX;
break;
- case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ case PROT_READ | PROT_NONE | PROT_NONE:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_R_R;
break;
- case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case PROT_NONE | PROT_WRITE | PROT_EXEC:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RWX_RWX;
break;
- case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ case PROT_NONE | PROT_WRITE | PROT_NONE:
kp[prot] = PPROT_N_RWX;
up[prot] = PPROT_RW_RW;
break;
- case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case PROT_NONE | PROT_NONE | PROT_EXEC:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_X_X;
break;
- case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ case PROT_NONE | PROT_NONE | PROT_NONE:
kp[prot] = PPROT_N_RX;
up[prot] = PPROT_R_R;
break;
@@ -1655,8 +1655,8 @@ mmu_pagein(pm, va, prot)
struct regmap *rp;
struct segmap *sp;
- if (prot != VM_PROT_NONE)
- bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
+ if (prot != PROT_NONE)
+ bits = PG_V | ((prot & PROT_WRITE) ? PG_W : 0);
else
bits = 0;
@@ -4331,17 +4331,17 @@ pmap_page_protect4_4c(struct vm_page *pg, vm_prot_t prot)
#ifdef DEBUG
if ((pmapdebug & PDB_CHANGEPROT) ||
- (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
+ (pmapdebug & PDB_REMOVE && prot == PROT_NONE))
printf("pmap_page_protect(0x%lx, 0x%x)\n", pg, prot);
#endif
pv = &pg->mdpage.pv_head;
/*
* Skip operations that do not take away write permission.
*/
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
return;
write_user_windows(); /* paranoia */
- if (prot & VM_PROT_READ) {
+ if (prot & PROT_READ) {
pv_changepte4_4c(pv, 0, PG_W);
return;
}
@@ -4514,10 +4514,10 @@ pmap_protect4_4c(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
struct regmap *rp;
struct segmap *sp;
- if (pm == NULL || prot & VM_PROT_WRITE)
+ if (pm == NULL || prot & PROT_WRITE)
return;
- if ((prot & VM_PROT_READ) == 0) {
+ if ((prot & PROT_READ) == 0) {
pmap_remove(pm, sva, eva);
return;
}
@@ -4628,9 +4628,9 @@ pmap_changeprot4_4c(pm, va, prot, wired)
va = trunc_page(va);
if (pm == pmap_kernel())
- newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
+ newprot = prot & PROT_WRITE ? PG_S|PG_W : PG_S;
else
- newprot = prot & VM_PROT_WRITE ? PG_W : 0;
+ newprot = prot & PROT_WRITE ? PG_W : 0;
vr = VA_VREG(va);
vs = VA_VSEG(va);
s = splvm(); /* conservative */
@@ -4728,17 +4728,17 @@ pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
#ifdef DEBUG
if ((pmapdebug & PDB_CHANGEPROT) ||
- (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
+ (pmapdebug & PDB_REMOVE && prot == PROT_NONE))
printf("pmap_page_protect(0x%lx, 0x%x)\n", pg, prot);
#endif
pv = &pg->mdpage.pv_head;
/*
* Skip operations that do not take away write permission.
*/
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
return;
write_user_windows(); /* paranoia */
- if (prot & VM_PROT_READ) {
+ if (prot & PROT_READ) {
pv_changepte4m(pv, 0, PPROT_WRITE);
return;
}
@@ -4841,7 +4841,7 @@ pmap_protect4m(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
struct segmap *sp;
int newprot;
- if ((prot & VM_PROT_READ) == 0) {
+ if ((prot & PROT_READ) == 0) {
pmap_remove(pm, sva, eva);
return;
}
@@ -4851,7 +4851,7 @@ pmap_protect4m(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
* or PROT_WRITE, we don't attempt to guess what to do, just lower
* to read-only and let the real protection be faulted in.
*/
- newprot = pte_prot4m(pm, VM_PROT_READ);
+ newprot = pte_prot4m(pm, PROT_READ);
write_user_windows();
ctx = getcontext4m();
@@ -5031,7 +5031,7 @@ pmap_enter4_4c(pm, va, pa, prot, flags)
pv = NULL;
pteproto |= atop(pa) & PG_PFNUM;
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
pteproto |= PG_W;
ctx = getcontext4();
@@ -5350,7 +5350,7 @@ pmap_kenter_pa4_4c(va, pa, prot)
int pteproto, ctx;
pteproto = PG_S | PG_V | PMAP_T2PTE_4(pa);
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
pteproto |= PG_W;
pa &= ~PMAP_TNC_4;
@@ -5440,9 +5440,9 @@ pmap_enter4m(pm, va, pa, prot, flags)
panic("pmap_enter4m: can't fail, but did");
#endif
if (pv) {
- if (flags & VM_PROT_WRITE)
+ if (flags & PROT_WRITE)
pv->pv_flags |= PV_MOD4M;
- if (flags & VM_PROT_READ)
+ if (flags & PROT_READ)
pv->pv_flags |= PV_REF4M;
}
setcontext4m(ctx);
@@ -5684,7 +5684,7 @@ pmap_kenter_pa4m(va, pa, prot)
pteproto = ((pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0) |
PMAP_T2PTE_SRMMU(pa) | SRMMU_TEPTE |
- ((prot & VM_PROT_WRITE) ? PPROT_N_RWX : PPROT_N_RX);
+ ((prot & PROT_WRITE) ? PPROT_N_RWX : PPROT_N_RX);
pa &= ~PMAP_TNC_SRMMU;
@@ -6274,7 +6274,7 @@ pmap_remove_holes(struct vm_map *map)
(void)uvm_map(map, &shole, ehole - shole, NULL,
UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_SHARE,
+ UVM_MAPFLAG(PROT_NONE, PROT_NONE, UVM_INH_SHARE,
UVM_ADV_RANDOM,
UVM_FLAG_NOMERGE | UVM_FLAG_HOLE | UVM_FLAG_FIXED));
}
diff --git a/sys/arch/sparc/sparc/trap.c b/sys/arch/sparc/sparc/trap.c
index dbbb324fa8e..6dd2af60776 100644
--- a/sys/arch/sparc/sparc/trap.c
+++ b/sys/arch/sparc/sparc/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.68 2014/05/11 00:12:44 guenther Exp $ */
+/* $OpenBSD: trap.c,v 1.69 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.58 1997/09/12 08:55:01 pk Exp $ */
/*
@@ -604,7 +604,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
v = pc;
if (VA_INHOLE(v))
goto fault;
- ftype = ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ;
+ ftype = ser & SER_WRITE ? PROT_WRITE : PROT_READ;
va = trunc_page(v);
if (psr & PSR_PS) {
#if defined(SUN4)
@@ -648,7 +648,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
*/
vm = p->p_vmspace;
rv = mmu_pagein(vm->vm_map.pmap, va,
- ser & SER_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
+ ser & SER_WRITE ? PROT_WRITE : PROT_READ);
if (rv < 0)
goto fault;
if (rv > 0)
@@ -677,7 +677,7 @@ mem_access_fault(type, ser, v, pc, psr, tf)
* entries for `wired' pages). Instead, we call
* mmu_pagein here to make sure the new PTE gets installed.
*/
- (void) mmu_pagein(vm->vm_map.pmap, va, VM_PROT_NONE);
+ (void) mmu_pagein(vm->vm_map.pmap, va, PROT_NONE);
} else {
/*
* Pagein failed. If doing copyin/out, return to onfault
@@ -702,8 +702,8 @@ kfault:
}
sv.sival_int = v;
- trapsignal(p, SIGSEGV, (ser & SER_WRITE) ? VM_PROT_WRITE :
- VM_PROT_READ, SEGV_MAPERR, sv);
+ trapsignal(p, SIGSEGV, (ser & SER_WRITE) ? PROT_WRITE :
+ PROT_READ, SEGV_MAPERR, sv);
}
out:
if ((psr & PSR_PS) == 0) {
@@ -792,11 +792,11 @@ mem_access_fault4m(type, sfsr, sfva, tf)
if ((sfsr & SFSR_AT_STORE)) {
/* stores are never text faults. */
- ftype = VM_PROT_WRITE;
+ ftype = PROT_WRITE;
} else {
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
if ((sfsr & SFSR_AT_TEXT) || (type == T_TEXTFAULT)) {
- ftype |= VM_PROT_EXECUTE;
+ ftype |= PROT_EXEC;
}
}
@@ -842,7 +842,7 @@ mem_access_fault4m(type, sfsr, sfva, tf)
*/
if (mmumod == SUN4M_MMU_HS) { /* On HS, we have va for both */
if (vm_fault(kernel_map, trunc_page(pc),
- VM_PROT_READ, 0))
+ PROT_READ, 0))
#ifdef DEBUG
printf("mem_access_fault: "
"can't pagein 1st text fault.\n")
diff --git a/sys/arch/sparc/sparc/vm_machdep.c b/sys/arch/sparc/sparc/vm_machdep.c
index 9e933403d2c..afcab31befd 100644
--- a/sys/arch/sparc/sparc/vm_machdep.c
+++ b/sys/arch/sparc/sparc/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.57 2014/07/12 18:44:43 tedu Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.58 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.30 1997/03/10 23:55:40 pk Exp $ */
/*
@@ -205,7 +205,7 @@ dvma_mapin_space(map, va, len, canwait, space)
#endif
#endif
pmap_kenter_pa(tva, pa | PMAP_NC,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
}
tva += PAGE_SIZE;
@@ -302,7 +302,7 @@ vmapbuf(struct buf *bp, vsize_t sz)
pa |= PMAP_NC;
pmap_enter(pmap_kernel(), kva, pa,
- VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
+ PROT_READ | PROT_WRITE, PMAP_WIRED);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
diff --git a/sys/arch/sparc64/dev/iommu.c b/sys/arch/sparc64/dev/iommu.c
index e789becd742..da63933f805 100644
--- a/sys/arch/sparc64/dev/iommu.c
+++ b/sys/arch/sparc64/dev/iommu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: iommu.c,v 1.70 2014/10/26 18:17:16 kettenis Exp $ */
+/* $OpenBSD: iommu.c,v 1.71 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: iommu.c,v 1.47 2002/02/08 20:03:45 eeh Exp $ */
/*
@@ -188,8 +188,8 @@ iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase)
for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
pa = VM_PAGE_TO_PHYS(m);
pmap_enter(pmap_kernel(), va, pa | PMAP_NVC,
- VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/sparc64/sparc64/clock.c b/sys/arch/sparc64/sparc64/clock.c
index 70209032749..43457cf709a 100644
--- a/sys/arch/sparc64/sparc64/clock.c
+++ b/sys/arch/sparc64/sparc64/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.54 2014/07/12 18:44:43 tedu Exp $ */
+/* $OpenBSD: clock.c,v 1.55 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: clock.c,v 1.41 2001/07/24 19:29:25 eeh Exp $ */
/*
@@ -308,10 +308,10 @@ clock_bus_wenable(handle, onoff)
s = splhigh();
if (onoff)
prot = writers++ == 0 ?
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED : 0;
+ PROT_READ | PROT_WRITE | PMAP_WIRED : 0;
else
prot = --writers == 0 ?
- VM_PROT_READ | PMAP_WIRED : 0;
+ PROT_READ | PMAP_WIRED : 0;
splx(s);
if (prot) {
diff --git a/sys/arch/sparc64/sparc64/cpu.c b/sys/arch/sparc64/sparc64/cpu.c
index 976e74bfbe2..15ae546e82b 100644
--- a/sys/arch/sparc64/sparc64/cpu.c
+++ b/sys/arch/sparc64/sparc64/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.62 2014/03/29 18:09:30 guenther Exp $ */
+/* $OpenBSD: cpu.c,v 1.63 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: cpu.c,v 1.13 2001/05/26 21:27:15 chs Exp $ */
/*
@@ -147,7 +147,7 @@ alloc_cpuinfo(struct mainbus_attach_args *ma)
cpu0paddr += sz;
for (pa = pa0; pa < cpu0paddr; pa += PAGE_SIZE, va += PAGE_SIZE)
- pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
+ pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
diff --git a/sys/arch/sparc64/sparc64/emul.c b/sys/arch/sparc64/sparc64/emul.c
index 3cd9f33faa2..ee3a8f20b0e 100644
--- a/sys/arch/sparc64/sparc64/emul.c
+++ b/sys/arch/sparc64/sparc64/emul.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: emul.c,v 1.22 2011/07/11 15:40:47 guenther Exp $ */
+/* $OpenBSD: emul.c,v 1.23 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: emul.c,v 1.8 2001/06/29 23:58:40 eeh Exp $ */
/*-
@@ -394,7 +394,7 @@ emul_qf(int32_t insv, struct proc *p, union sigval sv, struct trapframe *tf)
segv:
KERNEL_LOCK();
- trapsignal(p, SIGSEGV, isload ? VM_PROT_READ : VM_PROT_WRITE,
+ trapsignal(p, SIGSEGV, isload ? PROT_READ : PROT_WRITE,
SEGV_MAPERR, sv);
KERNEL_UNLOCK();
return (0);
diff --git a/sys/arch/sparc64/sparc64/machdep.c b/sys/arch/sparc64/sparc64/machdep.c
index bfa6dbec3f1..ba74694247a 100644
--- a/sys/arch/sparc64/sparc64/machdep.c
+++ b/sys/arch/sparc64/sparc64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.170 2014/10/25 16:58:59 kettenis Exp $ */
+/* $OpenBSD: machdep.c,v 1.171 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: machdep.c,v 1.108 2001/07/24 19:30:14 eeh Exp $ */
/*-
@@ -825,7 +825,7 @@ printf("starting dump, blkno %lld\n", (long long)blkno);
if (i && (i % (1024*1024)) == 0)
printf("%lld ", i / (1024*1024));
(void) pmap_enter(pmap_kernel(), dumpspace, maddr,
- VM_PROT_READ, VM_PROT_READ|PMAP_WIRED);
+ PROT_READ, PROT_READ | PMAP_WIRED);
pmap_update(pmap_kernel());
error = (*dump)(dumpdev, blkno,
(caddr_t)dumpspace, (int)n);
@@ -1476,8 +1476,8 @@ _bus_dmamem_map(t, t0, segs, nsegs, size, kvap, flags)
#endif
addr = VM_PAGE_TO_PHYS(m);
error = pmap_enter(pmap_kernel(), va, addr | cbit,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
@@ -1596,7 +1596,7 @@ sparc_bus_map(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t addr,
vaddr_t va;
u_int64_t pa;
paddr_t pm_flags = 0;
- vm_prot_t pm_prot = VM_PROT_READ;
+ vm_prot_t pm_prot = PROT_READ;
if (flags & BUS_SPACE_MAP_PROMADDRESS) {
hp->bh_ptr = addr;
@@ -1657,7 +1657,7 @@ sparc_bus_map(bus_space_tag_t t, bus_space_tag_t t0, bus_addr_t addr,
pa = trunc_page(addr);
if ((flags & BUS_SPACE_MAP_READONLY) == 0)
- pm_prot |= VM_PROT_WRITE;
+ pm_prot |= PROT_WRITE;
#ifdef BUS_SPACE_DEBUG
{ /* scope */
@@ -1728,7 +1728,7 @@ sparc_bus_protect(bus_space_tag_t t, bus_space_tag_t t0, bus_space_handle_t h,
}
prot = (flags & BUS_SPACE_MAP_READONLY) ?
- VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE;
+ PROT_READ : PROT_READ | PROT_WRITE;
if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0)
pm_flags |= PMAP_NC;
diff --git a/sys/arch/sparc64/sparc64/mdesc.c b/sys/arch/sparc64/sparc64/mdesc.c
index 77ce1cb528c..2d905f4ef72 100644
--- a/sys/arch/sparc64/sparc64/mdesc.c
+++ b/sys/arch/sparc64/sparc64/mdesc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mdesc.c,v 1.5 2014/10/24 21:49:34 kettenis Exp $ */
+/* $OpenBSD: mdesc.c,v 1.6 2014/11/16 12:30:59 deraadt Exp $ */
/*
* Copyright (c) 2009 Mark Kettenis
*
@@ -69,8 +69,9 @@ again:
m = TAILQ_FIRST(&mlist);
for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
pa = VM_PAGE_TO_PHYS(m);
- pmap_enter(pmap_kernel(), va, pa, VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ pmap_enter(pmap_kernel(), va, pa,
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
diff --git a/sys/arch/sparc64/sparc64/mem.c b/sys/arch/sparc64/sparc64/mem.c
index a38bec453f2..d85f651f6a2 100644
--- a/sys/arch/sparc64/sparc64/mem.c
+++ b/sys/arch/sparc64/sparc64/mem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mem.c,v 1.11 2007/11/14 20:43:12 kettenis Exp $ */
+/* $OpenBSD: mem.c,v 1.12 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: mem.c,v 1.18 2001/04/24 04:31:12 thorpej Exp $ */
/*
@@ -129,8 +129,8 @@ mmrw(dev, uio, flags)
error = EFAULT;
goto unlock;
}
- prot = uio->uio_rw == UIO_READ ? VM_PROT_READ :
- VM_PROT_WRITE;
+ prot = uio->uio_rw == UIO_READ ? PROT_READ :
+ PROT_WRITE;
pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
trunc_page(v), prot, prot|PMAP_WIRED);
pmap_update(pmap_kernel());
diff --git a/sys/arch/sparc64/sparc64/pmap.c b/sys/arch/sparc64/sparc64/pmap.c
index 5b495837eb3..d550d591a8a 100644
--- a/sys/arch/sparc64/sparc64/pmap.c
+++ b/sys/arch/sparc64/sparc64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.82 2014/07/08 17:19:25 deraadt Exp $ */
+/* $OpenBSD: pmap.c,v 1.83 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.107 2001/08/31 16:47:41 eeh Exp $ */
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
/*
@@ -1971,24 +1971,24 @@ pmap_kenter_pa(va, pa, prot)
tte.tag = TSB_TAG(0,pm->pm_ctx,va);
if (CPU_ISSUN4V) {
tte.data = SUN4V_TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
- (VM_PROT_WRITE & prot), 1, 0, 1, 0);
+ (PROT_WRITE & prot), 1, 0, 1, 0);
/*
* We don't track modification on kenter mappings.
*/
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
tte.data |= SUN4V_TLB_REAL_W|SUN4V_TLB_W;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
tte.data |= SUN4V_TLB_EXEC;
tte.data |= SUN4V_TLB_TSB_LOCK; /* wired */
} else {
tte.data = SUN4U_TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
- (VM_PROT_WRITE & prot), 1, 0, 1, 0);
+ (PROT_WRITE & prot), 1, 0, 1, 0);
/*
* We don't track modification on kenter mappings.
*/
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
tte.data |= SUN4U_TLB_REAL_W|SUN4U_TLB_W;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
tte.data |= SUN4U_TLB_EXEC;
tte.data |= SUN4U_TLB_TSB_LOCK; /* wired */
}
@@ -2131,13 +2131,13 @@ pmap_enter(pm, va, pa, prot, flags)
if (pv != NULL) {
aliased = (pv->pv_va&(PV_ALIAS|PV_NVC));
#ifdef DIAGNOSTIC
- if ((flags & VM_PROT_ALL) & ~prot)
+ if ((flags & PROT_MASK) & ~prot)
panic("pmap_enter: access_type exceeds prot");
#endif
/* If we don't have the traphandler do it, set the ref/mod bits now */
- if (flags & VM_PROT_ALL)
+ if (flags & PROT_MASK)
pv->pv_va |= PV_REF;
- if (flags & VM_PROT_WRITE)
+ if (flags & PROT_WRITE)
pv->pv_va |= PV_MOD;
pv->pv_va |= pmap_tte2flags(tte.data);
#ifdef DEBUG
@@ -2158,21 +2158,21 @@ pmap_enter(pm, va, pa, prot, flags)
#endif
if (CPU_ISSUN4V) {
tte.data = SUN4V_TSB_DATA(0, size, pa, pm == pmap_kernel(),
- (flags & VM_PROT_WRITE), (!(pa & PMAP_NC)),
+ (flags & PROT_WRITE), (!(pa & PMAP_NC)),
aliased, 1, (pa & PMAP_LITTLE));
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
tte.data |= SUN4V_TLB_REAL_W;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
tte.data |= SUN4V_TLB_EXEC;
if (wired)
tte.data |= SUN4V_TLB_TSB_LOCK;
} else {
tte.data = SUN4U_TSB_DATA(0, size, pa, pm == pmap_kernel(),
- (flags & VM_PROT_WRITE), (!(pa & PMAP_NC)),
+ (flags & PROT_WRITE), (!(pa & PMAP_NC)),
aliased, 1, (pa & PMAP_LITTLE));
- if (prot & VM_PROT_WRITE)
+ if (prot & PROT_WRITE)
tte.data |= SUN4U_TLB_REAL_W;
- if (prot & VM_PROT_EXECUTE)
+ if (prot & PROT_EXEC)
tte.data |= SUN4U_TLB_EXEC;
if (wired)
tte.data |= SUN4U_TLB_TSB_LOCK;
@@ -2318,11 +2318,11 @@ pmap_protect(pm, sva, eva, prot)
KDASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK);
KDASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata);
- if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
- (VM_PROT_WRITE|VM_PROT_EXECUTE))
+ if ((prot & (PROT_WRITE | PROT_EXEC)) ==
+ (PROT_WRITE | PROT_EXEC))
return;
- if (prot == VM_PROT_NONE) {
+ if (prot == PROT_NONE) {
pmap_remove(pm, sva, eva);
return;
}
@@ -2362,14 +2362,14 @@ pmap_protect(pm, sva, eva, prot)
}
/* Just do the pmap and TSB, not the pv_list */
if (CPU_ISSUN4V) {
- if ((prot & VM_PROT_WRITE) == 0)
+ if ((prot & PROT_WRITE) == 0)
data &= ~(SUN4V_TLB_W|SUN4V_TLB_REAL_W);
- if ((prot & VM_PROT_EXECUTE) == 0)
+ if ((prot & PROT_EXEC) == 0)
data &= ~(SUN4V_TLB_EXEC);
} else {
- if ((prot & VM_PROT_WRITE) == 0)
+ if ((prot & PROT_WRITE) == 0)
data &= ~(SUN4U_TLB_W|SUN4U_TLB_REAL_W);
- if ((prot & VM_PROT_EXECUTE) == 0)
+ if ((prot & PROT_EXEC) == 0)
data &= ~(SUN4U_TLB_EXEC);
}
KDASSERT((data & TLB_NFO) == 0);
@@ -3018,28 +3018,28 @@ pmap_page_protect(pg, prot)
(unsigned long long)pa, prot);
#endif
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
pv_check();
return;
}
- if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
+ if (prot & (PROT_READ | PROT_EXEC)) {
/* copy_on_write */
set = TLB_V;
if (CPU_ISSUN4V) {
clear = SUN4V_TLB_REAL_W|SUN4V_TLB_W;
- if (VM_PROT_EXECUTE & prot)
+ if (PROT_EXEC & prot)
set |= SUN4V_TLB_EXEC;
else
clear |= SUN4V_TLB_EXEC;
} else {
clear = SUN4U_TLB_REAL_W|SUN4U_TLB_W;
- if (VM_PROT_EXECUTE & prot)
+ if (PROT_EXEC & prot)
set |= SUN4U_TLB_EXEC;
else
clear |= SUN4U_TLB_EXEC;
- if (VM_PROT_EXECUTE == prot)
+ if (PROT_EXEC == prot)
set |= SUN4U_TLB_EXEC_ONLY;
}
@@ -3639,8 +3639,8 @@ pmap_remove_holes(struct vm_map *map)
return;
(void)uvm_map(map, &shole, ehole - shole, NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_SHARE,
- UVM_ADV_RANDOM,
+ UVM_MAPFLAG(PROT_NONE, PROT_NONE, UVM_INH_SHARE,
+ POSIX_MADV_RANDOM,
UVM_FLAG_NOMERGE | UVM_FLAG_HOLE | UVM_FLAG_FIXED));
}
@@ -3692,8 +3692,8 @@ pmap_testout()
pmap_get_page(&pa, NULL, pmap_kernel());
pg = PHYS_TO_VM_PAGE(pa);
- pmap_enter(pmap_kernel(), va, pa, VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE);
+ pmap_enter(pmap_kernel(), va, pa, PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
/* Now clear reference and modify */
@@ -3754,11 +3754,11 @@ pmap_testout()
ref, mod);
/* Check pmap_protect() */
- pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
+ pmap_protect(pmap_kernel(), va, va+1, PROT_READ);
pmap_update(pmap_kernel());
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
- printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
+ printf("pmap_protect(PROT_READ): ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
@@ -3769,8 +3769,8 @@ pmap_testout()
ref, mod);
/* Modify page */
- pmap_enter(pmap_kernel(), va, pa, VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE);
+ pmap_enter(pmap_kernel(), va, pa, PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
*loc = 1;
@@ -3780,11 +3780,11 @@ pmap_testout()
ref, mod);
/* Check pmap_protect() */
- pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
+ pmap_protect(pmap_kernel(), va, va+1, PROT_NONE);
pmap_update(pmap_kernel());
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
- printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
+ printf("pmap_protect(PROT_READ): ref %d, mod %d\n",
ref, mod);
/* Now clear reference and modify */
@@ -3795,8 +3795,8 @@ pmap_testout()
ref, mod);
/* Modify page */
- pmap_enter(pmap_kernel(), va, pa, VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE);
+ pmap_enter(pmap_kernel(), va, pa, PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
*loc = 1;
@@ -3806,7 +3806,7 @@ pmap_testout()
ref, mod);
/* Check pmap_pag_protect() */
- pmap_page_protect(pg, VM_PROT_READ);
+ pmap_page_protect(pg, PROT_READ);
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("pmap_protect(): ref %d, mod %d\n",
@@ -3821,8 +3821,8 @@ pmap_testout()
/* Modify page */
- pmap_enter(pmap_kernel(), va, pa, VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE);
+ pmap_enter(pmap_kernel(), va, pa, PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
*loc = 1;
@@ -3832,7 +3832,7 @@ pmap_testout()
ref, mod);
/* Check pmap_pag_protect() */
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
ref = pmap_is_referenced(pg);
mod = pmap_is_modified(pg);
printf("pmap_protect(): ref %d, mod %d\n",
diff --git a/sys/arch/sparc64/sparc64/trap.c b/sys/arch/sparc64/sparc64/trap.c
index 70a3411b08e..f4c9067e423 100644
--- a/sys/arch/sparc64/sparc64/trap.c
+++ b/sys/arch/sparc64/sparc64/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.84 2014/05/11 00:12:44 guenther Exp $ */
+/* $OpenBSD: trap.c,v 1.85 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.73 2001/08/09 01:03:01 eeh Exp $ */
/*
@@ -794,10 +794,10 @@ data_access_fault(tf, type, pc, addr, sfva, sfsr)
*/
if (type == T_FDMMU_MISS || (sfsr & SFSR_FV) == 0) {
/* Punt */
- access_type = VM_PROT_READ;
+ access_type = PROT_READ;
} else {
- access_type = (sfsr & SFSR_W) ? VM_PROT_READ|VM_PROT_WRITE
- : VM_PROT_READ;
+ access_type = (sfsr & SFSR_W) ? PROT_READ | PROT_WRITE
+ : PROT_READ;
}
if (tstate & TSTATE_PRIV) {
KERNEL_LOCK();
@@ -975,7 +975,7 @@ data_access_error(tf, type, afva, afsr, sfva, sfsr)
}
KERNEL_LOCK();
- trapsignal(p, SIGSEGV, VM_PROT_READ|VM_PROT_WRITE, SEGV_MAPERR, sv);
+ trapsignal(p, SIGSEGV, PROT_READ | PROT_WRITE, SEGV_MAPERR, sv);
KERNEL_UNLOCK();
out:
@@ -1016,7 +1016,7 @@ text_access_fault(tf, type, pc, sfsr)
/* Now munch on protections... */
- access_type = VM_PROT_EXECUTE;
+ access_type = PROT_EXEC;
if (tstate & TSTATE_PRIV) {
extern int trap_trace_dis;
trap_trace_dis = 1; /* Disable traptrace for printf */
@@ -1124,7 +1124,7 @@ text_access_error(tf, type, pc, sfsr, afva, afsr)
va = trunc_page(pc);
/* Now munch on protections... */
- access_type = VM_PROT_EXECUTE;
+ access_type = PROT_EXEC;
if (tstate & TSTATE_PRIV) {
extern int trap_trace_dis;
trap_trace_dis = 1; /* Disable traptrace for printf */
diff --git a/sys/arch/sparc64/sparc64/vm_machdep.c b/sys/arch/sparc64/sparc64/vm_machdep.c
index fde01c2ebb1..61494dd2856 100644
--- a/sys/arch/sparc64/sparc64/vm_machdep.c
+++ b/sys/arch/sparc64/sparc64/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.31 2014/10/24 20:26:58 kettenis Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.32 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.38 2001/06/30 00:02:20 eeh Exp $ */
/*
@@ -104,9 +104,9 @@ vmapbuf(struct buf *bp, vsize_t len)
panic("vmapbuf: null page frame");
/* Now map the page into kernel space. */
pmap_enter(pmap_kernel(), kva,
- pa /* | PMAP_NC */,
- VM_PROT_READ|VM_PROT_WRITE,
- VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ pa /* | PMAP_NC */,
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
diff --git a/sys/arch/vax/vax/bus_dma.c b/sys/arch/vax/vax/bus_dma.c
index 76f8c39873c..e6519b5d9d4 100644
--- a/sys/arch/vax/vax/bus_dma.c
+++ b/sys/arch/vax/vax/bus_dma.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: bus_dma.c,v 1.30 2014/09/13 16:06:37 doug Exp $ */
+/* $OpenBSD: bus_dma.c,v 1.31 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: bus_dma.c,v 1.5 1999/11/13 00:32:20 thorpej Exp $ */
/*-
@@ -444,7 +444,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
if (vax_boardtype == VAX_BTYP_43) {
pmap_map((vaddr_t)*kvap, segs[0].ds_addr|KA43_DIAGMEM,
(segs[0].ds_addr|KA43_DIAGMEM) + size,
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
}
return 0;
}
@@ -467,8 +467,8 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
if (vax_boardtype == VAX_BTYP_43)
addr |= KA43_DIAGMEM;
error = pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ |
- VM_PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED | PMAP_CANFAIL);
if (error) {
pmap_update(pmap_kernel());
km_free((void *)sva, ssize, &kv_any, &kp_none);
diff --git a/sys/arch/vax/vax/pmap.c b/sys/arch/vax/vax/pmap.c
index 4eb3160b1ce..bb9ceaa4323 100644
--- a/sys/arch/vax/vax/pmap.c
+++ b/sys/arch/vax/vax/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.69 2014/05/24 20:13:52 guenther Exp $ */
+/* $OpenBSD: pmap.c,v 1.70 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: pmap.c,v 1.74 1999/11/13 21:32:25 matt Exp $ */
/*
* Copyright (c) 1994, 1998, 1999, 2003 Ludd, University of Lule}, Sweden.
@@ -283,7 +283,7 @@ pmap_bootstrap()
* memory mapped in. This makes some mm routines both simpler
* and faster, but takes ~0.75% more memory.
*/
- pmap_map(KERNBASE, 0, avail_end, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_map(KERNBASE, 0, avail_end, PROT_READ | PROT_WRITE);
/*
* Kernel code is always readable for user, it must be because
* of the emulation code that is somewhere in there.
@@ -860,7 +860,7 @@ pmap_remove_holes(struct vm_map *map)
return;
(void)uvm_map(map, &shole, ehole - shole, NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_SHARE,
+ UVM_MAPFLAG(PROT_NONE, PROT_NONE, UVM_INH_SHARE,
UVM_ADV_RANDOM,
UVM_FLAG_NOMERGE | UVM_FLAG_HOLE | UVM_FLAG_FIXED));
}
@@ -969,7 +969,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
pmap_kernel()->pm_stats.resident_count++;
pmap_kernel()->pm_stats.wired_count++;
}
- mapin8(ptp, PG_V | ((prot & VM_PROT_WRITE) ? PG_KW : PG_KR) |
+ mapin8(ptp, PG_V | ((prot & PROT_WRITE) ? PG_KW : PG_KR) |
PG_PFNUM(pa) | PG_W | PG_SREF);
if (opte & PG_V) {
mtpr(0, PR_TBIA);
@@ -1038,7 +1038,7 @@ pmap_enter(struct pmap *pmap, vaddr_t v, paddr_t p, vm_prot_t prot, int flags)
switch (SEGTYPE(v)) {
case SYSSEG:
pteptr = Sysmap + vax_btop(v - KERNBASE);
- newpte = prot & VM_PROT_WRITE ? PG_KW : PG_KR;
+ newpte = prot & PROT_WRITE ? PG_KW : PG_KR;
break;
case P0SEG:
pteidx = vax_btop(v);
@@ -1047,7 +1047,7 @@ pmap_enter(struct pmap *pmap, vaddr_t v, paddr_t p, vm_prot_t prot, int flags)
return ENOMEM;
}
pteptr = pmap->pm_p0br + pteidx;
- newpte = prot & VM_PROT_WRITE ? PG_RW : PG_RO;
+ newpte = prot & PROT_WRITE ? PG_RW : PG_RO;
break;
case P1SEG:
pteidx = vax_btop(v - 0x40000000);
@@ -1056,7 +1056,7 @@ pmap_enter(struct pmap *pmap, vaddr_t v, paddr_t p, vm_prot_t prot, int flags)
return ENOMEM;
}
pteptr = pmap->pm_p1br + pteidx;
- newpte = prot & VM_PROT_WRITE ? PG_RW : PG_RO;
+ newpte = prot & PROT_WRITE ? PG_RW : PG_RO;
break;
default:
panic("bad seg");
@@ -1152,11 +1152,11 @@ pmap_enter(struct pmap *pmap, vaddr_t v, paddr_t p, vm_prot_t prot, int flags)
pmap->pm_stats.wired_count++;
}
- if (flags & VM_PROT_READ) {
+ if (flags & PROT_READ) {
pg->mdpage.pv_attr |= PG_V;
newpte |= PG_V;
}
- if (flags & VM_PROT_WRITE)
+ if (flags & PROT_WRITE)
pg->mdpage.pv_attr |= PG_M;
if (flags & PMAP_WIRED)
@@ -1184,7 +1184,7 @@ pmap_map(vaddr_t va, paddr_t pstart, paddr_t pend, int prot)
pentry = Sysmap + vax_btop(va);
for (count = pstart; count < pend; count += VAX_NBPG) {
*pentry++ = vax_btop(count) | PG_V |
- (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
+ (prot & PROT_WRITE ? PG_KW : PG_KR);
}
return va + (count - pstart) + KERNBASE;
}
@@ -1257,7 +1257,7 @@ pmap_protect(struct pmap *pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
#endif
start &= ~KERNBASE;
end &= ~KERNBASE;
- pr = (prot & VM_PROT_WRITE ? PG_KW : PG_KR);
+ pr = (prot & PROT_WRITE ? PG_KW : PG_KR);
break;
case P1SEG:
@@ -1270,7 +1270,7 @@ pmap_protect(struct pmap *pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
pt = pmap->pm_p1br;
start &= 0x3fffffff;
end = (end == KERNBASE ? 0x40000000 : end & 0x3fffffff);
- pr = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+ pr = (prot & PROT_WRITE ? PG_RW : PG_RO);
break;
case P0SEG:
@@ -1284,7 +1284,7 @@ pmap_protect(struct pmap *pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
if (vax_btop(end) > lr)
end = lr * VAX_NBPG;
pt = pmap->pm_p0br;
- pr = (prot & VM_PROT_WRITE ? PG_RW : PG_RO);
+ pr = (prot & PROT_WRITE ? PG_RW : PG_RO);
break;
default:
panic("unsupported segtype: %d", (int)SEGTYPE(start));
@@ -1301,7 +1301,7 @@ pmap_protect(struct pmap *pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
while (pts < ptd) {
if ((*kvtopte((vaddr_t)pts) & PG_FRAME) != 0 && *pts != PG_NV) {
- if (prot == VM_PROT_NONE) {
+ if (prot == PROT_NONE) {
pmap->pm_stats.resident_count--;
if ((*pts & PG_W))
pmap->pm_stats.wired_count--;
@@ -1519,11 +1519,11 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
if (pg->mdpage.pv_head == NULL)
return;
- if (prot == VM_PROT_ALL) /* 'cannot happen' */
+ if (prot == PROT_MASK) /* 'cannot happen' */
return;
RECURSESTART;
- if (prot == VM_PROT_NONE) {
+ if (prot == PROT_NONE) {
s = splvm();
npv = pg->mdpage.pv_head;
pg->mdpage.pv_head = NULL;
diff --git a/sys/arch/vax/vax/trap.c b/sys/arch/vax/vax/trap.c
index bc6f28b2f89..7260b4ec777 100644
--- a/sys/arch/vax/vax/trap.c
+++ b/sys/arch/vax/vax/trap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: trap.c,v 1.53 2014/05/11 00:12:44 guenther Exp $ */
+/* $OpenBSD: trap.c,v 1.54 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: trap.c,v 1.47 1999/08/21 19:26:20 matt Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@@ -187,9 +187,9 @@ if(faultdebug)printf("trap accflt type %lx, code %lx, pc %lx, psl %lx\n",
map = &p->p_vmspace->vm_map;
if (frame->trap & T_WRITE)
- ftype = VM_PROT_WRITE|VM_PROT_READ;
+ ftype = PROT_READ | PROT_WRITE;
else
- ftype = VM_PROT_READ;
+ ftype = PROT_READ;
addr = trunc_page((vaddr_t)frame->code);
rv = uvm_fault(map, addr, 0, ftype);
diff --git a/sys/arch/vax/vax/vm_machdep.c b/sys/arch/vax/vax/vm_machdep.c
index 851b05c3a9e..7ab91cfa425 100644
--- a/sys/arch/vax/vax/vm_machdep.c
+++ b/sys/arch/vax/vax/vm_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vm_machdep.c,v 1.41 2013/11/24 22:08:25 miod Exp $ */
+/* $OpenBSD: vm_machdep.c,v 1.42 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.67 2000/06/29 07:14:34 mrg Exp $ */
/*
@@ -291,7 +291,8 @@ vmapbuf(bp, len)
&pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
- VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
faddr += PAGE_SIZE;
taddr += PAGE_SIZE;
}
diff --git a/sys/arch/zaurus/zaurus/zaurus_machdep.c b/sys/arch/zaurus/zaurus/zaurus_machdep.c
index 9a99de421e8..998e67be22e 100644
--- a/sys/arch/zaurus/zaurus/zaurus_machdep.c
+++ b/sys/arch/zaurus/zaurus/zaurus_machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: zaurus_machdep.c,v 1.52 2014/09/20 09:28:25 kettenis Exp $ */
+/* $OpenBSD: zaurus_machdep.c,v 1.53 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: lubbock_machdep.c,v 1.2 2003/07/15 00:25:06 lukem Exp $ */
/*
@@ -432,7 +432,7 @@ map_io_area(paddr_t pagedir)
for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE)
pmap_map_section(pagedir, l1_sec_table[loop].va + sz,
l1_sec_table[loop].pa + sz,
- VM_PROT_READ|VM_PROT_WRITE,
+ PROT_READ | PROT_WRITE,
l1_sec_table[loop].flags);
++loop;
}
@@ -459,7 +459,7 @@ bootstrap_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
startpa = trunc_page(bpa);
pmap_map_section((vaddr_t)pagedir, va, startpa,
- VM_PROT_READ | VM_PROT_WRITE, PTE_NOCACHE);
+ PROT_READ | PROT_WRITE, PTE_NOCACHE);
cpu_tlb_flushD();
*bshp = (bus_space_handle_t)(va + (bpa - startpa));
@@ -939,10 +939,10 @@ initarm(void *arg0, void *arg1, void *arg2)
logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, textsize,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, KERNEL_BASE + logical,
physical_start + logical, totalsize - textsize,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
}
#ifdef VERBOSE_INIT_ARM
@@ -951,21 +951,21 @@ initarm(void *arg0, void *arg1, void *arg2)
/* Map the stack pages */
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
- IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ IRQ_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
- ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ ABT_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
- UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ UND_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
- UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
+ UPAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
- L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
+ L1_TABLE_SIZE, PROT_READ | PROT_WRITE, PTE_PAGETABLE);
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
+ PROT_READ | PROT_WRITE, PTE_PAGETABLE);
}
/* Map the Mini-Data cache clean area. */
@@ -977,10 +977,10 @@ initarm(void *arg0, void *arg1, void *arg2)
/* MULTI-ICE requires that page 0 is NC/NB so that it can download the
* cache-clean code there. */
pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
- VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
+ PROT_READ | PROT_WRITE, PTE_NOCACHE);
#else
pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ PROT_READ | PROT_WRITE, PTE_CACHE);
#endif
/*
diff --git a/sys/compat/linux/linux_misc.c b/sys/compat/linux/linux_misc.c
index bf37e3d101f..357d8de61e7 100644
--- a/sys/compat/linux/linux_misc.c
+++ b/sys/compat/linux/linux_misc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: linux_misc.c,v 1.91 2014/09/08 01:47:06 guenther Exp $ */
+/* $OpenBSD: linux_misc.c,v 1.92 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: linux_misc.c,v 1.27 1996/05/20 01:59:21 fvdl Exp $ */
/*-
@@ -810,8 +810,8 @@ linux_to_bsd_mmap_args(cma, uap)
SCARG(cma, addr) = (void *)SCARG(uap, addr);
SCARG(cma, len) = SCARG(uap, len);
SCARG(cma, prot) = SCARG(uap, prot);
- if (SCARG(cma, prot) & VM_PROT_WRITE) /* XXX */
- SCARG(cma, prot) |= VM_PROT_READ;
+ if (SCARG(cma, prot) & PROT_WRITE) /* XXX */
+ SCARG(cma, prot) |= PROT_READ;
SCARG(cma, flags) = flags;
SCARG(cma, fd) = flags & MAP_ANON ? -1 : SCARG(uap, fd);
SCARG(cma, pad) = 0;
diff --git a/sys/ddb/db_watch.c b/sys/ddb/db_watch.c
index 5d777d60199..960a4af0aa4 100644
--- a/sys/ddb/db_watch.c
+++ b/sys/ddb/db_watch.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: db_watch.c,v 1.12 2014/09/14 14:17:24 jsg Exp $ */
+/* $OpenBSD: db_watch.c,v 1.13 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: db_watch.c,v 1.9 1996/03/30 22:30:12 christos Exp $ */
/*
@@ -188,7 +188,7 @@ db_set_watchpoints(void)
for (watch = db_watchpoint_list; watch != 0;
watch = watch->link)
pmap_protect(pmap_kernel(), trunc_page(watch->loaddr),
- round_page(watch->hiaddr), VM_PROT_READ);
+ round_page(watch->hiaddr), PROT_READ);
pmap_update(pmap_kernel());
db_watchpoints_inserted = TRUE;
}
diff --git a/sys/dev/audio.c b/sys/dev/audio.c
index d522504f859..a8aa7e11b6a 100644
--- a/sys/dev/audio.c
+++ b/sys/dev/audio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: audio.c,v 1.123 2014/09/14 14:17:24 jsg Exp $ */
+/* $OpenBSD: audio.c,v 1.124 2014/11/16 12:30:59 deraadt Exp $ */
/* $NetBSD: audio.c,v 1.119 1999/11/09 16:50:47 augustss Exp $ */
/*
@@ -2028,18 +2028,18 @@ audio_mmap(dev_t dev, off_t off, int prot)
* The idea here was to use the protection to determine if
* we are mapping the read or write buffer, but it fails.
* The VM system is broken in (at least) two ways.
- * 1) If you map memory VM_PROT_WRITE you SIGSEGV
- * when writing to it, so VM_PROT_READ|VM_PROT_WRITE
+ * 1) If you map memory PROT_WRITE you SIGSEGV
+ * when writing to it, so PROT_READ|PROT_WRITE
* has to be used for mmapping the play buffer.
- * 2) Even if calling mmap() with VM_PROT_READ|VM_PROT_WRITE
- * audio_mmap will get called at some point with VM_PROT_READ
+ * 2) Even if calling mmap() with PROT_READ|PROT_WRITE
+ * audio_mmap will get called at some point with PROT_READ
* only.
* So, alas, we always map the play buffer for now.
*/
- if (prot == (VM_PROT_READ|VM_PROT_WRITE) ||
- prot == VM_PROT_WRITE)
+ if (prot == (PROT_READ | PROT_WRITE) ||
+ prot == PROT_WRITE)
cb = &sc->sc_pr;
- else if (prot == VM_PROT_READ)
+ else if (prot == PROT_READ)
cb = &sc->sc_rr;
else
return -1;
diff --git a/sys/dev/ic/sti.c b/sys/dev/ic/sti.c
index c77411c9d94..36447c2f344 100644
--- a/sys/dev/ic/sti.c
+++ b/sys/dev/ic/sti.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sti.c,v 1.73 2014/08/30 14:42:05 miod Exp $ */
+/* $OpenBSD: sti.c,v 1.74 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2000-2003 Michael Shalayeff
@@ -337,7 +337,7 @@ sti_rom_setup(struct sti_rom *rom, bus_space_tag_t iot, bus_space_tag_t memt,
STI_DISABLE_ROM(rom->rom_softc);
if ((error = uvm_map_protect(kernel_map, rom->rom_code,
- rom->rom_code + round_page(size), UVM_PROT_RX, FALSE))) {
+ rom->rom_code + round_page(size), PROT_READ | PROT_EXEC, FALSE))) {
printf(": uvm_map_protect failed (%d)\n", error);
uvm_km_free(kernel_map, rom->rom_code, round_page(size));
return (error);
diff --git a/sys/dev/isa/aha.c b/sys/dev/isa/aha.c
index 3b142cbaa37..a2e3e6f1e8a 100644
--- a/sys/dev/isa/aha.c
+++ b/sys/dev/isa/aha.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: aha.c,v 1.77 2014/09/14 14:17:25 jsg Exp $ */
+/* $OpenBSD: aha.c,v 1.78 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: aha.c,v 1.11 1996/05/12 23:51:23 mycroft Exp $ */
#undef AHADIAG
@@ -1079,15 +1079,15 @@ aha_init(sc)
TAILQ_INIT(&pglist);
if (uvm_pglistalloc(size, 0, 0xffffff, PAGE_SIZE, 0, &pglist, 1,
UVM_PLA_NOWAIT) || uvm_map(kernel_map, &va, size, NULL,
- UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
- UVM_INH_NONE, UVM_ADV_RANDOM, 0)))
+ UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_MASK, PROT_MASK,
+ UVM_INH_NONE, POSIX_MADV_RANDOM, 0)))
panic("aha_init: could not allocate mailbox");
wmbx = (struct aha_mbx *)va;
for (pg = TAILQ_FIRST(&pglist); pg != NULL;
pg = TAILQ_NEXT(pg, pageq)) {
pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 150fee67be2..64d9f0f0ca7 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem.c,v 1.75 2014/09/20 21:17:43 kettenis Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.76 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -306,7 +306,7 @@ kmap(struct vm_page *pg)
va = pmap_map_direct(pg);
#else
va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
#endif
return (void *)va;
@@ -1450,8 +1450,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = 0;
ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, size,
- obj->uao, args->offset, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ obj->uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (ret == 0)
uao_reference(obj->uao);
drm_gem_object_unreference_unlocked(obj);
@@ -1473,7 +1473,7 @@ i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
drm_i915_private_t *dev_priv = dev->dev_private;
paddr_t paddr;
int lcv, ret;
- int write = !!(access_type & VM_PROT_WRITE);
+ int write = !!(access_type & PROT_WRITE);
vm_prot_t mapprot;
boolean_t locked = TRUE;
@@ -1527,7 +1527,7 @@ i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
* it wrong, and makes us fully coherent with the gpu re mmap.
*/
if (write == 0)
- mapprot &= ~VM_PROT_WRITE;
+ mapprot &= ~PROT_WRITE;
/* XXX try and be more efficient when we do this */
for (lcv = 0 ; lcv < npages ; lcv++, offset += PAGE_SIZE,
vaddr += PAGE_SIZE) {
@@ -1622,7 +1622,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
for (pg = &dev_priv->pgs[atop(obj->gtt_offset)];
pg != &dev_priv->pgs[atop(obj->gtt_offset + obj->base.size)];
pg++)
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
obj->fault_mappable = false;
}
diff --git a/sys/dev/pci/drm/i915/i915_gem_tiling.c b/sys/dev/pci/drm/i915/i915_gem_tiling.c
index 569d310fbe2..bf1d2a1dcd0 100644
--- a/sys/dev/pci/drm/i915/i915_gem_tiling.c
+++ b/sys/dev/pci/drm/i915/i915_gem_tiling.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem_tiling.c,v 1.14 2014/01/30 15:10:48 kettenis Exp $ */
+/* $OpenBSD: i915_gem_tiling.c,v 1.15 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -483,7 +483,7 @@ i915_gem_swizzle_page(struct vm_page *pg)
va = pmap_map_direct(pg);
#else
va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
#endif
vaddr = (char *)va;
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.c b/sys/dev/pci/drm/i915/intel_ringbuffer.c
index 57ab540dd18..4ca0ea6f64b 100644
--- a/sys/dev/pci/drm/i915/intel_ringbuffer.c
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_ringbuffer.c,v 1.20 2014/03/30 00:58:24 jsg Exp $ */
+/* $OpenBSD: intel_ringbuffer.c,v 1.21 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright © 2008-2010 Intel Corporation
*
@@ -475,8 +475,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
pc->cpu_page = (volatile u_int32_t *)vm_map_min(kernel_map);
obj->base.uao->pgops->pgo_reference(obj->base.uao);
ret = uvm_map(kernel_map, (vaddr_t *)&pc->cpu_page,
- PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
- UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (ret != 0) {
DRM_ERROR("Failed to map status page.\n");
obj->base.uao->pgops->pgo_detach(obj->base.uao);
@@ -1122,8 +1122,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.page_addr = (u_int32_t *)vm_map_min(kernel_map);
obj->base.uao->pgops->pgo_reference(obj->base.uao);
ret = uvm_map(kernel_map, (vaddr_t *)&ring->status_page.page_addr,
- PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
- UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (ret != 0) {
obj->base.uao->pgops->pgo_detach(obj->base.uao);
ret = -ENOMEM;
diff --git a/sys/dev/pci/drm/ttm/ttm_bo.c b/sys/dev/pci/drm/ttm/ttm_bo.c
index 30e1eb9c20f..6b58efab9ce 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo.c,v 1.8 2014/04/12 06:08:22 jsg Exp $ */
+/* $OpenBSD: ttm_bo.c,v 1.9 2014/11/16 12:31:00 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -1649,14 +1649,14 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
page = PHYS_TO_VM_PAGE(paddr);
if (unlikely(page == NULL))
continue;
- pmap_page_protect(page, VM_PROT_NONE);
+ pmap_page_protect(page, PROT_NONE);
}
} else if (ttm) {
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (unlikely(page == NULL))
continue;
- pmap_page_protect(page, VM_PROT_NONE);
+ pmap_page_protect(page, PROT_NONE);
}
}
ttm_mem_io_free_vm(bo);
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c
index 34cbd214769..9862576ef13 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_util.c,v 1.5 2014/02/10 02:24:05 jsg Exp $ */
+/* $OpenBSD: ttm_bo_util.c,v 1.6 2014/11/16 12:31:00 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -531,7 +531,7 @@ kmap(struct vm_page *pg)
va = uvm_km_valloc(kernel_map, PAGE_SIZE);
if (va == 0)
return (NULL);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
#endif
return (void *)va;
@@ -565,8 +565,8 @@ vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
for (i = 0; i < npages; i++) {
pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
pmap_update(pmap_kernel());
}
diff --git a/sys/kern/exec_elf.c b/sys/kern/exec_elf.c
index 9dad649fddc..e37402cf386 100644
--- a/sys/kern/exec_elf.c
+++ b/sys/kern/exec_elf.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_elf.c,v 1.105 2014/11/14 23:26:48 tedu Exp $ */
+/* $OpenBSD: exec_elf.c,v 1.106 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 1996 Per Fogelstrom
@@ -242,9 +242,9 @@ ELFNAME(load_psection)(struct exec_vmcmd_set *vcset, struct vnode *vp,
diff = uaddr - *addr;
}
- *prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0;
- *prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0;
- *prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0;
+ *prot |= (ph->p_flags & PF_R) ? PROT_READ : 0;
+ *prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0;
+ *prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0;
msize = ph->p_memsz + diff;
offset = ph->p_offset - bdiff;
@@ -379,7 +379,7 @@ ELFNAME(load_file)(struct proc *p, char *path, struct exec_package *epp,
* would (i.e. something safely out of the way).
*/
if (pos == ELFDEFNNAME(NO_ADDR)) {
- pos = uvm_map_hint(p->p_vmspace, VM_PROT_EXECUTE);
+ pos = uvm_map_hint(p->p_vmspace, PROT_EXEC);
}
pos = ELF_ROUND(pos, file_align);
@@ -642,7 +642,7 @@ ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp)
* Decide whether it's text or data by looking
* at the protection of the section
*/
- if (prot & VM_PROT_WRITE) {
+ if (prot & PROT_WRITE) {
/* data section */
if (epp->ep_dsize == ELFDEFNNAME(NO_ADDR)) {
epp->ep_daddr = addr;
@@ -658,7 +658,7 @@ ELFNAME2(exec,makecmds)(struct proc *p, struct exec_package *epp)
epp->ep_dsize = addr+size -
epp->ep_daddr;
}
- } else if (prot & VM_PROT_EXECUTE) {
+ } else if (prot & PROT_EXEC) {
/* text section */
if (epp->ep_tsize == ELFDEFNNAME(NO_ADDR)) {
epp->ep_taddr = addr;
@@ -1111,11 +1111,11 @@ ELFNAMEEND(coredump_writeseghdrs)(struct proc *p, void *iocookie,
phdr.p_filesz = realsize;
phdr.p_memsz = size;
phdr.p_flags = 0;
- if (us->prot & VM_PROT_READ)
+ if (us->prot & PROT_READ)
phdr.p_flags |= PF_R;
- if (us->prot & VM_PROT_WRITE)
+ if (us->prot & PROT_WRITE)
phdr.p_flags |= PF_W;
- if (us->prot & VM_PROT_EXECUTE)
+ if (us->prot & PROT_EXEC)
phdr.p_flags |= PF_X;
phdr.p_align = PAGE_SIZE;
diff --git a/sys/kern/exec_subr.c b/sys/kern/exec_subr.c
index 4ea39a9f435..d00b0f78e5d 100644
--- a/sys/kern/exec_subr.c
+++ b/sys/kern/exec_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: exec_subr.c,v 1.39 2014/11/14 23:26:48 tedu Exp $ */
+/* $OpenBSD: exec_subr.c,v 1.40 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: exec_subr.c,v 1.9 1994/12/04 03:10:42 mycroft Exp $ */
/*
@@ -186,7 +186,7 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
* first, attach to the object
*/
- uobj = uvn_attach(cmd->ev_vp, VM_PROT_READ|VM_PROT_EXECUTE);
+ uobj = uvn_attach(cmd->ev_vp, PROT_READ | PROT_EXEC);
if (uobj == NULL)
return (ENOMEM);
@@ -196,8 +196,8 @@ vmcmd_map_pagedvn(struct proc *p, struct exec_vmcmd *cmd)
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr, cmd->ev_len,
uobj, cmd->ev_offset, 0,
- UVM_MAPFLAG(cmd->ev_prot, VM_PROT_ALL, UVM_INH_COPY,
- UVM_ADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
+ UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, UVM_INH_COPY,
+ POSIX_MADV_NORMAL, UVM_FLAG_COPYONW|UVM_FLAG_FIXED));
/*
* check for error
@@ -234,8 +234,8 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(prot | UVM_PROT_WRITE, UVM_PROT_ALL, UVM_INH_COPY,
- UVM_ADV_NORMAL,
+ UVM_MAPFLAG(prot | PROT_WRITE, PROT_MASK, UVM_INH_COPY,
+ POSIX_MADV_NORMAL,
UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
if (error)
@@ -247,7 +247,7 @@ vmcmd_map_readvn(struct proc *p, struct exec_vmcmd *cmd)
if (error)
return (error);
- if ((prot & VM_PROT_WRITE) == 0) {
+ if ((prot & PROT_WRITE) == 0) {
/*
* we had to map in the area at PROT_WRITE so that vn_rdwr()
* could write to it. however, the caller seems to want
@@ -279,8 +279,8 @@ vmcmd_map_zero(struct proc *p, struct exec_vmcmd *cmd)
cmd->ev_addr = trunc_page(cmd->ev_addr); /* required by uvm_map */
error = uvm_map(&p->p_vmspace->vm_map, &cmd->ev_addr,
round_page(cmd->ev_len), NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(cmd->ev_prot, UVM_PROT_ALL, UVM_INH_COPY,
- UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
+ UVM_MAPFLAG(cmd->ev_prot, PROT_MASK, UVM_INH_COPY,
+ POSIX_MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_COPYONW));
if (error)
return error;
@@ -351,17 +351,17 @@ exec_setup_stack(struct proc *p, struct exec_package *epp)
#ifdef MACHINE_STACK_GROWS_UP
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
- epp->ep_maxsaddr + epp->ep_ssize, NULLVP, 0, VM_PROT_NONE);
+ epp->ep_maxsaddr + epp->ep_ssize, NULLVP, 0, PROT_NONE);
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
epp->ep_maxsaddr, NULLVP, 0,
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
#else
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero,
((epp->ep_minsaddr - epp->ep_ssize) - epp->ep_maxsaddr),
- epp->ep_maxsaddr, NULLVP, 0, VM_PROT_NONE);
+ epp->ep_maxsaddr, NULLVP, 0, PROT_NONE);
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, epp->ep_ssize,
(epp->ep_minsaddr - epp->ep_ssize), NULLVP, 0,
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
#endif
return (0);
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 34355c02ad8..77df73c6e96 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: init_main.c,v 1.223 2014/10/25 12:53:14 miod Exp $ */
+/* $OpenBSD: init_main.c,v 1.224 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: init_main.c,v 1.84.4.1 1996/06/02 09:08:06 mrg Exp $ */
/*
@@ -634,8 +634,8 @@ start_init(void *arg)
#endif
if (uvm_map(&p->p_vmspace->vm_map, &addr, PAGE_SIZE,
NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_ALL, UVM_INH_COPY,
- UVM_ADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)))
+ UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_MASK, UVM_INH_COPY,
+ POSIX_MADV_NORMAL, UVM_FLAG_FIXED|UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW)))
panic("init: couldn't allocate argument space");
p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 24ca23c37c6..910f9b9b004 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_exec.c,v 1.147 2014/10/18 15:20:32 kettenis Exp $ */
+/* $OpenBSD: kern_exec.c,v 1.148 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: kern_exec.c,v 1.75 1996/02/09 18:59:28 christos Exp $ */
/*-
@@ -835,8 +835,8 @@ exec_sigcode_map(struct process *pr, struct emul *e)
uao_reference(e->e_sigobject); /* permanent reference */
if ((r = uvm_map(kernel_map, &va, round_page(sz), e->e_sigobject,
- 0, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0)))) {
+ 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
+ UVM_INH_SHARE, POSIX_MADV_RANDOM, 0)))) {
uao_detach(e->e_sigobject);
return (ENOMEM);
}
@@ -847,8 +847,8 @@ exec_sigcode_map(struct process *pr, struct emul *e)
pr->ps_sigcode = 0; /* no hint */
uao_reference(e->e_sigobject);
if (uvm_map(&pr->ps_vmspace->vm_map, &pr->ps_sigcode, round_page(sz),
- e->e_sigobject, 0, 0, UVM_MAPFLAG(UVM_PROT_RX, UVM_PROT_RX,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0))) {
+ e->e_sigobject, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_EXEC,
+ PROT_READ | PROT_EXEC, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0))) {
uao_detach(e->e_sigobject);
return (ENOMEM);
}
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index dc52bc330ff..a7d48d5c3b0 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc.c,v 1.123 2014/11/06 17:29:23 tedu Exp $ */
+/* $OpenBSD: kern_malloc.c,v 1.124 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
/*
@@ -294,7 +294,7 @@ malloc(size_t size, int type, int flags)
vm_map_lock(kmem_map);
rv = uvm_map_checkprot(kmem_map, addr,
- addr + sizeof(struct kmem_freelist), VM_PROT_WRITE);
+ addr + sizeof(struct kmem_freelist), PROT_WRITE);
vm_map_unlock(kmem_map);
if (!rv) {
diff --git a/sys/kern/kern_malloc_debug.c b/sys/kern/kern_malloc_debug.c
index 2bce1883c18..701aee09bae 100644
--- a/sys/kern/kern_malloc_debug.c
+++ b/sys/kern/kern_malloc_debug.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_malloc_debug.c,v 1.33 2014/03/28 17:57:11 mpi Exp $ */
+/* $OpenBSD: kern_malloc_debug.c,v 1.34 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
@@ -38,7 +38,7 @@
* This is really simple. Every malloc() allocates two virtual pages,
* the second page is left unmapped, and the value returned is aligned
* so that it ends at (or very close to) the page boundary to catch overflows.
- * Every free() changes the protection of the first page to VM_PROT_NONE so
+ * Every free() changes the protection of the first page to PROT_NONE so
* that we can catch any dangling writes to it.
* To minimize the risk of writes to recycled chunks we keep an LRU of latest
* freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
@@ -139,7 +139,7 @@ debug_malloc(unsigned long size, int type, int flags, void **addr)
debug_malloc_allocs++;
splx(s);
- pmap_kenter_pa(md->md_va, md->md_pa, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(md->md_va, md->md_pa, PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
md->md_size = size;
diff --git a/sys/kern/kern_physio.c b/sys/kern/kern_physio.c
index bcb84149439..338a4fba936 100644
--- a/sys/kern/kern_physio.c
+++ b/sys/kern/kern_physio.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_physio.c,v 1.41 2014/09/14 14:17:25 jsg Exp $ */
+/* $OpenBSD: kern_physio.c,v 1.42 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: kern_physio.c,v 1.28 1997/05/19 10:43:28 pk Exp $ */
/*-
@@ -139,7 +139,7 @@ physio(void (*strategy)(struct buf *), dev_t dev, int flags,
*/
error = uvm_vslock_device(p, iovp->iov_base, todo,
(flags & B_READ) ?
- VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ, &map);
+ PROT_READ | PROT_WRITE : PROT_READ, &map);
if (error)
goto done;
if (map) {
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index 133029ac393..813c421e1f4 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_resource.c,v 1.50 2014/03/30 21:54:48 guenther Exp $ */
+/* $OpenBSD: kern_resource.c,v 1.51 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: kern_resource.c,v 1.38 1996/10/23 07:19:38 matthias Exp $ */
/*-
@@ -281,7 +281,7 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
vm_prot_t prot;
if (limp->rlim_cur > alimp->rlim_cur) {
- prot = VM_PROT_READ|VM_PROT_WRITE;
+ prot = PROT_READ | PROT_WRITE;
size = limp->rlim_cur - alimp->rlim_cur;
#ifdef MACHINE_STACK_GROWS_UP
addr = USRSTACK + alimp->rlim_cur;
@@ -289,7 +289,7 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
addr = USRSTACK - limp->rlim_cur;
#endif
} else {
- prot = VM_PROT_NONE;
+ prot = PROT_NONE;
size = alimp->rlim_cur - limp->rlim_cur;
#ifdef MACHINE_STACK_GROWS_UP
addr = USRSTACK + limp->rlim_cur;
diff --git a/sys/kern/kern_sysctl.c b/sys/kern/kern_sysctl.c
index acb46adc2cc..3d43b2f944e 100644
--- a/sys/kern/kern_sysctl.c
+++ b/sys/kern/kern_sysctl.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_sysctl.c,v 1.269 2014/11/03 17:20:46 bluhm Exp $ */
+/* $OpenBSD: kern_sysctl.c,v 1.270 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.17 1996/05/20 17:49:05 mrg Exp $ */
/*-
@@ -214,7 +214,7 @@ sys___sysctl(struct proc *p, void *v, register_t *retval)
return (ENOMEM);
}
error = uvm_vslock(p, SCARG(uap, old), oldlen,
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
if (error) {
rw_exit_write(&sysctl_lock);
return (error);
diff --git a/sys/kern/subr_hibernate.c b/sys/kern/subr_hibernate.c
index d140bc0b76c..16303cd2e55 100644
--- a/sys/kern/subr_hibernate.c
+++ b/sys/kern/subr_hibernate.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_hibernate.c,v 1.108 2014/11/05 05:48:45 mlarkin Exp $ */
+/* $OpenBSD: subr_hibernate.c,v 1.109 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -1392,7 +1392,7 @@ hibernate_write_chunks(union hibernate_info *hib)
if (rle == 0) {
pmap_kenter_pa(hibernate_temp_page,
inaddr & PMAP_PA_MASK,
- VM_PROT_READ);
+ PROT_READ);
pmap_activate(curproc);
@@ -1570,7 +1570,7 @@ hibernate_read_image(union hibernate_info *hib)
/* Map chunktable pages */
for (i = 0; i < HIBERNATE_CHUNK_TABLE_SIZE; i += PAGE_SIZE)
pmap_kenter_pa(chunktable + i, piglet_chunktable + i,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
/* Read the chunktable from disk into the piglet chunktable */
@@ -1665,7 +1665,7 @@ hibernate_read_chunks(union hibernate_info *hib, paddr_t pig_start,
for(i = 0; i < 24 ; i++)
pmap_kenter_pa(hibernate_fchunk_area + (i * PAGE_SIZE),
piglet_base + ((4 + i) * PAGE_SIZE),
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
nchunks = hib->chunk_ctr;
@@ -1725,8 +1725,8 @@ hibernate_read_chunks(union hibernate_info *hib, paddr_t pig_start,
/* Map pages for this read */
for (j = 0; j < num_io_pages; j ++)
pmap_kenter_pa(tempva + j * PAGE_SIZE,
- img_cur + j * PAGE_SIZE,
- VM_PROT_READ | VM_PROT_WRITE);
+ img_cur + j * PAGE_SIZE,
+ PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
@@ -1796,7 +1796,7 @@ hibernate_suspend(void)
hib.image_offset, ctod(end) - ctod(start));
pmap_kenter_pa(HIBERNATE_HIBALLOC_PAGE, HIBERNATE_HIBALLOC_PAGE,
- VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
pmap_activate(curproc);
DPRINTF("hibernate: writing chunks\n");
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index 39a21e6495e..8b5adf88c10 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: sysv_shm.c,v 1.58 2014/07/13 15:29:04 tedu Exp $ */
+/* $OpenBSD: sysv_shm.c,v 1.59 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: sysv_shm.c,v 1.50 1998/10/21 22:24:29 tron Exp $ */
/*
@@ -248,9 +248,9 @@ sys_shmat(struct proc *p, void *v, register_t *retval)
if (i >= shmmap_h->shmseg)
return (EMFILE);
size = round_page(shmseg->shm_segsz);
- prot = VM_PROT_READ;
+ prot = PROT_READ;
if ((SCARG(uap, shmflg) & SHM_RDONLY) == 0)
- prot |= VM_PROT_WRITE;
+ prot |= PROT_WRITE;
flags = MAP_ANON | MAP_SHARED;
if (SCARG(uap, shmaddr)) {
flags |= MAP_FIXED;
@@ -267,7 +267,7 @@ sys_shmat(struct proc *p, void *v, register_t *retval)
uao_reference(shm_handle->shm_object);
error = uvm_map(&p->p_vmspace->vm_map, &attach_va, size,
shm_handle->shm_object, 0, 0, UVM_MAPFLAG(prot, prot,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (error) {
uao_detach(shm_handle->shm_object);
return (error);
diff --git a/sys/kern/vfs_biomem.c b/sys/kern/vfs_biomem.c
index c2ccb1067c2..c822cfb7ca7 100644
--- a/sys/kern/vfs_biomem.c
+++ b/sys/kern/vfs_biomem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: vfs_biomem.c,v 1.30 2014/08/11 19:16:56 miod Exp $ */
+/* $OpenBSD: vfs_biomem.c,v 1.31 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2007 Artur Grabowski <art@openbsd.org>
@@ -53,8 +53,8 @@ buf_mem_init(vsize_t size)
buf_kva_start = vm_map_min(kernel_map);
if (uvm_map(kernel_map, &buf_kva_start, size, NULL,
- UVM_UNKNOWN_OFFSET, PAGE_SIZE, UVM_MAPFLAG(UVM_PROT_NONE,
- UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_NORMAL, 0)))
+ UVM_UNKNOWN_OFFSET, PAGE_SIZE, UVM_MAPFLAG(PROT_NONE,
+ PROT_NONE, UVM_INH_NONE, POSIX_MADV_NORMAL, 0)))
panic("bufinit: can't reserve VM for buffers");
buf_kva_end = buf_kva_start + size;
@@ -138,7 +138,7 @@ buf_map(struct buf *bp)
KASSERT(pg != NULL);
pmap_kenter_pa(va + ptoa(i), VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ|VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE);
}
pmap_update(pmap_kernel());
bp->b_data = (caddr_t)va;
diff --git a/sys/tmpfs/tmpfs_subr.c b/sys/tmpfs/tmpfs_subr.c
index 88a4717a12b..89026cda0b5 100644
--- a/sys/tmpfs/tmpfs_subr.c
+++ b/sys/tmpfs/tmpfs_subr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: tmpfs_subr.c,v 1.6 2014/03/28 17:57:11 mpi Exp $ */
+/* $OpenBSD: tmpfs_subr.c,v 1.7 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: tmpfs_subr.c,v 1.79 2012/03/13 18:40:50 elad Exp $ */
/*
@@ -1227,10 +1227,10 @@ tmpfs_uiomove(tmpfs_node_t *node, struct uio *uio, vsize_t len)
if (len >= TMPFS_UIO_MAXBYTES) {
sz = TMPFS_UIO_MAXBYTES;
- adv = UVM_ADV_NORMAL;
+ adv = POSIX_MADV_NORMAL;
} else {
sz = len;
- adv = UVM_ADV_SEQUENTIAL;
+ adv = POSIX_MADV_SEQUENTIAL;
}
if (tmpfs_uio_cached(node))
@@ -1239,8 +1239,8 @@ tmpfs_uiomove(tmpfs_node_t *node, struct uio *uio, vsize_t len)
uao_reference(node->tn_uobj);
error = uvm_map(kernel_map, &va, round_page(pgoff + sz), node->tn_uobj,
- trunc_page(uio->uio_offset), 0, UVM_MAPFLAG(UVM_PROT_RW,
- UVM_PROT_RW, UVM_INH_NONE, adv, 0));
+ trunc_page(uio->uio_offset), 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_NONE, adv, 0));
if (error) {
uao_detach(node->tn_uobj); /* Drop reference. */
return error;
@@ -1266,8 +1266,8 @@ tmpfs_zeropg(tmpfs_node_t *node, voff_t pgnum, vaddr_t pgoff)
uao_reference(node->tn_uobj);
error = uvm_map(kernel_map, &va, PAGE_SIZE, node->tn_uobj, pgnum, 0,
- UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, UVM_ADV_NORMAL,
- 0));
+ UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
+ UVM_INH_NONE, POSIX_MADV_NORMAL, 0));
if (error) {
uao_detach(node->tn_uobj); /* Drop reference. */
return error;
diff --git a/sys/uvm/uvm_amap.c b/sys/uvm/uvm_amap.c
index f020dcb043a..1c258b40c65 100644
--- a/sys/uvm/uvm_amap.c
+++ b/sys/uvm/uvm_amap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_amap.c,v 1.54 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_amap.c,v 1.55 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_amap.c,v 1.27 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -1032,7 +1032,7 @@ amap_add(struct vm_aref *aref, vaddr_t offset, struct vm_anon *anon,
if (amap->am_anon[slot]->an_page != NULL &&
(amap->am_flags & AMAP_SHARED) != 0) {
pmap_page_protect(amap->am_anon[slot]->an_page,
- VM_PROT_NONE);
+ PROT_NONE);
/*
* XXX: suppose page is supposed to be wired somewhere?
*/
diff --git a/sys/uvm/uvm_anon.c b/sys/uvm/uvm_anon.c
index c1b979961b0..72823d7fe8d 100644
--- a/sys/uvm/uvm_anon.c
+++ b/sys/uvm/uvm_anon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_anon.c,v 1.40 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_anon.c,v 1.41 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -121,7 +121,7 @@ uvm_anfree(struct vm_anon *anon)
atomic_setbits_int(&pg->pg_flags, PG_RELEASED);
return;
}
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
uvm_lock_pageq(); /* lock out pagedaemon */
uvm_pagefree(pg); /* bye bye */
uvm_unlock_pageq(); /* free the daemon */
@@ -250,7 +250,7 @@ uvm_anon_pagein(struct vm_anon *anon)
/* deactivate the page (to put it on a page queue) */
pmap_clear_reference(pg);
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
uvm_lock_pageq();
uvm_pagedeactivate(pg);
uvm_unlock_pageq();
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index 12c18b351e7..f4d9addc226 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.69 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.70 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -878,7 +878,7 @@ uao_detach_locked(struct uvm_object *uobj)
uvm_lock_pageq();
continue;
}
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
uao_dropswap(&aobj->u_obj, pg->offset >> PAGE_SHIFT);
uvm_pagefree(pg);
}
@@ -970,7 +970,7 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
uvm_lock_pageq();
/* zap all mappings for the page. */
- pmap_page_protect(pp, VM_PROT_NONE);
+ pmap_page_protect(pp, PROT_NONE);
/* ...and deactivate the page. */
uvm_pagedeactivate(pp);
@@ -991,7 +991,7 @@ uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
continue;
/* zap all mappings for the page. */
- pmap_page_protect(pp, VM_PROT_NONE);
+ pmap_page_protect(pp, PROT_NONE);
uao_dropswap(uobj, pp->offset >> PAGE_SHIFT);
uvm_lock_pageq();
@@ -1418,7 +1418,7 @@ uao_pagein_page(struct uvm_aobj *aobj, int pageidx)
pg = NULL;
npages = 1;
rv = uao_get(&aobj->u_obj, (voff_t)pageidx << PAGE_SHIFT,
- &pg, &npages, 0, VM_PROT_READ|VM_PROT_WRITE, 0, 0);
+ &pg, &npages, 0, PROT_READ | PROT_WRITE, 0, 0);
switch (rv) {
case VM_PAGER_OK:
diff --git a/sys/uvm/uvm_device.c b/sys/uvm/uvm_device.c
index b58f0b3053b..cfdd845079f 100644
--- a/sys/uvm/uvm_device.c
+++ b/sys/uvm/uvm_device.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_device.c,v 1.49 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_device.c,v 1.50 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_device.c,v 1.30 2000/11/25 06:27:59 chs Exp $ */
/*
@@ -118,7 +118,6 @@ udv_attach(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
* Check that the specified range of the device allows the
* desired protection.
*
- * XXX assumes VM_PROT_* == PROT_*
* XXX clobbers off and size, but nothing else here needs them.
*/
while (size != 0) {
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h
index d1c1e1bc97e..8780317d9bb 100644
--- a/sys/uvm/uvm_extern.h
+++ b/sys/uvm/uvm_extern.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_extern.h,v 1.122 2014/11/15 21:42:07 deraadt Exp $ */
+/* $OpenBSD: uvm_extern.h,v 1.123 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */
/*
@@ -81,22 +81,7 @@ struct vm_page;
typedef struct vm_page *vm_page_t;
/* protections bits */
-#define UVM_PROT_MASK 0x07 /* protection mask */
-#define UVM_PROT_NONE 0x00 /* protection none */
-#define UVM_PROT_ALL 0x07 /* everything */
-#define UVM_PROT_READ 0x01 /* read */
-#define UVM_PROT_WRITE 0x02 /* write */
-#define UVM_PROT_EXEC 0x04 /* exec */
-
-/* protection short codes */
-#define UVM_PROT_R 0x01 /* read */
-#define UVM_PROT_W 0x02 /* write */
-#define UVM_PROT_RW 0x03 /* read-write */
-#define UVM_PROT_X 0x04 /* exec */
-#define UVM_PROT_RX 0x05 /* read-exec */
-#define UVM_PROT_WX 0x06 /* write-exec */
-#define UVM_PROT_RWX 0x07 /* read-write-exec */
-
+#define PROT_MASK (PROT_READ | PROT_WRITE | PROT_EXEC)
/* 0x08: not used */
/* inherit codes */
@@ -107,44 +92,11 @@ typedef struct vm_page *vm_page_t;
#define UVM_INH_ZERO 0x30 /* "zero" */
/* 0x40, 0x80: not used */
-
/* bits 0x700: max protection, 0x800: not used */
-
/* bits 0x7000: advice, 0x8000: not used */
typedef int vm_prot_t;
-/*
- * Protection values, defined as bits within the vm_prot_t type
- *
- * These are funky definitions from old CMU VM and are kept
- * for compatibility reasons, one day they are going to die,
- * just like everybody else.
- */
-
-#define VM_PROT_NONE ((vm_prot_t) 0x00)
-
-#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */
-#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */
-#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */
-
-/*
- * The default protection for newly-created virtual memory
- */
-
-#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
-
-/*
- * The maximum privileges possible, for parameter checking.
- */
-
-#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
-
-/* advice: matches MADV_* from sys/mman.h */
-#define UVM_ADV_NORMAL 0x0 /* 'normal' */
-#define UVM_ADV_RANDOM 0x1 /* 'random' */
-#define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */
-/* 0x3: will need, 0x4: dontneed */
#define UVM_ADV_MASK 0x7 /* mask */
/* mapping flags */
@@ -159,9 +111,9 @@ typedef int vm_prot_t;
#define UVM_FLAG_NOFAULT 0x1000000 /* don't fault */
/* macros to extract info */
-#define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK)
+#define UVM_PROTECTION(X) ((X) & PROT_MASK)
#define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4)
-#define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK)
+#define UVM_MAXPROTECTION(X) (((X) >> 8) & PROT_MASK)
#define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK)
#define UVM_MAPFLAG(prot, maxprot, inh, advice, flags) \
@@ -209,6 +161,7 @@ typedef int vm_prot_t;
#include <sys/queue.h>
#include <sys/tree.h>
#include <sys/lock.h>
+#include <sys/mman.h>
#ifdef _KERNEL
struct buf;
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c
index b45b43c3024..4c7cf86f945 100644
--- a/sys/uvm/uvm_fault.c
+++ b/sys/uvm/uvm_fault.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_fault.c,v 1.78 2014/10/03 17:41:00 kettenis Exp $ */
+/* $OpenBSD: uvm_fault.c,v 1.79 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_fault.c,v 1.51 2000/08/06 00:22:53 thorpej Exp $ */
/*
@@ -184,7 +184,7 @@ uvmfault_anonflush(struct vm_anon **anons, int n)
if (pg && (pg->pg_flags & PG_BUSY) == 0 && pg->loan_count == 0) {
uvm_lock_pageq();
if (pg->wire_count == 0) {
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
uvm_pagedeactivate(pg);
}
uvm_unlock_pageq();
@@ -206,15 +206,15 @@ uvmfault_init()
npages = atop(16384);
if (npages > 0) {
KASSERT(npages <= UVM_MAXRANGE / 2);
- uvmadvice[UVM_ADV_NORMAL].nforw = npages;
- uvmadvice[UVM_ADV_NORMAL].nback = npages - 1;
+ uvmadvice[POSIX_MADV_NORMAL].nforw = npages;
+ uvmadvice[POSIX_MADV_NORMAL].nback = npages - 1;
}
npages = atop(32768);
if (npages > 0) {
KASSERT(npages <= UVM_MAXRANGE / 2);
- uvmadvice[UVM_ADV_SEQUENTIAL].nforw = npages - 1;
- uvmadvice[UVM_ADV_SEQUENTIAL].nback = npages;
+ uvmadvice[POSIX_MADV_SEQUENTIAL].nforw = npages - 1;
+ uvmadvice[POSIX_MADV_SEQUENTIAL].nback = npages;
}
}
@@ -380,7 +380,7 @@ uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
* anon and try again.
*/
if (pg->pg_flags & PG_RELEASED) {
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
uvm_anfree(anon); /* frees page for us */
if (locked)
uvmfault_unlockall(ufi, amap, NULL,
@@ -506,7 +506,7 @@ uvmfault_update_stats(struct uvm_faultinfo *ufi)
* the map locked off during I/O.
*/
#define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
- ~VM_PROT_WRITE : VM_PROT_ALL)
+ ~PROT_WRITE : PROT_MASK)
int
uvm_fault(vm_map_t orig_map, vaddr_t vaddr, vm_fault_t fault_type,
vm_prot_t access_type)
@@ -571,7 +571,7 @@ ReFault:
/* handle "needs_copy" case. */
if (UVM_ET_ISNEEDSCOPY(ufi.entry)) {
- if ((access_type & VM_PROT_WRITE) ||
+ if ((access_type & PROT_WRITE) ||
(ufi.entry->object.uvm_obj == NULL)) {
/* need to clear */
uvmfault_unlockmaps(&ufi, FALSE);
@@ -583,7 +583,7 @@ ReFault:
* ensure that we pmap_enter page R/O since
* needs_copy is still true
*/
- enter_prot &= ~VM_PROT_WRITE;
+ enter_prot &= ~PROT_WRITE;
}
}
@@ -710,7 +710,7 @@ ReFault:
*/
(void) pmap_enter(ufi.orig_map->pmap, currva,
VM_PAGE_TO_PHYS(anon->an_page),
- (anon->an_ref > 1) ? (enter_prot & ~VM_PROT_WRITE) :
+ (anon->an_ref > 1) ? (enter_prot & ~PROT_WRITE) :
enter_prot,
PMAP_CANFAIL |
(VM_MAPENT_ISWIRED(ufi.entry) ? PMAP_WIRED : 0));
@@ -887,12 +887,12 @@ ReFault:
/* special handling for loaned pages */
if (anon->an_page->loan_count) {
- if ((access_type & VM_PROT_WRITE) == 0) {
+ if ((access_type & PROT_WRITE) == 0) {
/*
* for read faults on loaned pages we just cap the
* protection at read-only.
*/
- enter_prot = enter_prot & ~VM_PROT_WRITE;
+ enter_prot = enter_prot & ~PROT_WRITE;
} else {
/*
* note that we can't allow writes into a loaned page!
@@ -923,8 +923,7 @@ ReFault:
uvm_pagecopy(anon->an_page, pg);
/* force reload */
- pmap_page_protect(anon->an_page,
- VM_PROT_NONE);
+ pmap_page_protect(anon->an_page, PROT_NONE);
uvm_lock_pageq(); /* KILL loan */
if (uobj)
/* if we were loaning */
@@ -963,7 +962,7 @@ ReFault:
* if we are out of anon VM we kill the process (XXX: could wait?).
*/
- if ((access_type & VM_PROT_WRITE) != 0 && anon->an_ref > 1) {
+ if ((access_type & PROT_WRITE) != 0 && anon->an_ref > 1) {
uvmexp.flt_acow++;
oanon = anon; /* oanon = old */
anon = uvm_analloc();
@@ -1008,7 +1007,7 @@ ReFault:
oanon = anon;
pg = anon->an_page;
if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
- enter_prot = enter_prot & ~VM_PROT_WRITE;
+ enter_prot = enter_prot & ~PROT_WRITE;
}
/*
@@ -1077,7 +1076,7 @@ Case2:
promote = TRUE; /* always need anon here */
} else {
KASSERT(uobjpage != PGO_DONTCARE);
- promote = (access_type & VM_PROT_WRITE) &&
+ promote = (access_type & PROT_WRITE) &&
UVM_ET_ISCOPYONWRITE(ufi.entry);
}
@@ -1172,7 +1171,7 @@ Case2:
*/
uvmexp.flt_obj++;
if (UVM_ET_ISCOPYONWRITE(ufi.entry))
- enter_prot &= ~VM_PROT_WRITE;
+ enter_prot &= ~PROT_WRITE;
pg = uobjpage; /* map in the actual object */
/* assert(uobjpage != PGO_DONTCARE) */
@@ -1183,10 +1182,10 @@ Case2:
*/
if (uobjpage->loan_count) {
- if ((access_type & VM_PROT_WRITE) == 0) {
+ if ((access_type & PROT_WRITE) == 0) {
/* read fault: cap the protection at readonly */
/* cap! */
- enter_prot = enter_prot & ~VM_PROT_WRITE;
+ enter_prot = enter_prot & ~PROT_WRITE;
} else {
/* write fault: must break the loan here */
/* alloc new un-owned page */
@@ -1227,7 +1226,7 @@ Case2:
uvm_pagecopy(uobjpage, pg); /* old -> new */
atomic_clearbits_int(&pg->pg_flags,
PG_FAKE|PG_CLEAN);
- pmap_page_protect(uobjpage, VM_PROT_NONE);
+ pmap_page_protect(uobjpage, PROT_NONE);
if (uobjpage->pg_flags & PG_WANTED)
wakeup(uobjpage);
atomic_clearbits_int(&uobjpage->pg_flags,
@@ -1320,7 +1319,7 @@ Case2:
* procs see it
*/
if ((amap_flags(amap) & AMAP_SHARED) != 0) {
- pmap_page_protect(uobjpage, VM_PROT_NONE);
+ pmap_page_protect(uobjpage, PROT_NONE);
}
/* dispose of uobjpage. drop handle to uobj as well. */
@@ -1427,7 +1426,7 @@ uvm_fault_wire(vm_map_t map, vaddr_t start, vaddr_t end, vm_prot_t access_type)
/*
* now fault it in a page at a time. if the fault fails then we have
- * to undo what we have done. note that in uvm_fault VM_PROT_NONE
+ * to undo what we have done. note that in uvm_fault PROT_NONE
* is replaced with the max protection if fault_type is VM_FAULT_WIRE.
*/
for (va = start ; va < end ; va += PAGE_SIZE) {
diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c
index cd01ee5a67e..703cdd82b9e 100644
--- a/sys/uvm/uvm_glue.c
+++ b/sys/uvm/uvm_glue.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_glue.c,v 1.66 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: uvm_glue.c,v 1.67 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */
/*
@@ -89,7 +89,7 @@ uvm_kernacc(caddr_t addr, size_t len, int rw)
{
boolean_t rv;
vaddr_t saddr, eaddr;
- vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE;
+ vm_prot_t prot = rw == B_READ ? PROT_READ : PROT_WRITE;
saddr = trunc_page((vaddr_t)addr);
eaddr = round_page((vaddr_t)addr + len);
@@ -120,7 +120,7 @@ uvm_chgkprot(caddr_t addr, size_t len, int rw)
paddr_t pa;
vaddr_t sva, eva;
- prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE;
+ prot = rw == B_READ ? PROT_READ : PROT_READ | PROT_WRITE;
eva = round_page((vaddr_t)addr + len);
for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) {
/*
@@ -240,8 +240,7 @@ uvm_vslock_device(struct proc *p, void *addr, size_t len,
while ((pg = TAILQ_FIRST(&pgl)) != NULL) {
TAILQ_REMOVE(&pgl, pg, pageq);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
- VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
va += PAGE_SIZE;
}
pmap_update(pmap_kernel());
diff --git a/sys/uvm/uvm_init.c b/sys/uvm/uvm_init.c
index 980d1f2f71c..b6120802173 100644
--- a/sys/uvm/uvm_init.c
+++ b/sys/uvm/uvm_init.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_init.c,v 1.34 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_init.c,v 1.35 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_init.c,v 1.14 2000/06/27 17:29:23 mrg Exp $ */
/*
@@ -147,15 +147,15 @@ uvm_init(void)
#ifdef DEADBEEF0
kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE;
if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE,
- NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
- UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)))
+ NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_NONE,
+ PROT_NONE, UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_FLAG_FIXED)))
panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF0);
#endif
#ifdef DEADBEEF1
kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE;
if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE,
- NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE,
- UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)))
+ NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_NONE,
+ PROT_NONE, UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_FLAG_FIXED)))
panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1);
#endif
/*
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c
index aa34283e9fd..21518979482 100644
--- a/sys/uvm/uvm_km.c
+++ b/sys/uvm/uvm_km.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_km.c,v 1.116 2014/11/13 00:47:44 tedu Exp $ */
+/* $OpenBSD: uvm_km.c,v 1.117 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */
/*
@@ -182,8 +182,8 @@ uvm_km_init(vaddr_t start, vaddr_t end)
);
kernel_map_store.pmap = pmap_kernel();
if (base != start && uvm_map(&kernel_map_store, &base, start - base,
- NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
- UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0)
+ NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(PROT_MASK, PROT_MASK,
+ UVM_INH_NONE, POSIX_MADV_RANDOM, UVM_FLAG_FIXED)) != 0)
panic("uvm_km_init: could not reserve space for kernel");
kernel_map = &kernel_map_store;
@@ -209,8 +209,8 @@ uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size,
/* first allocate a blank spot in the parent map */
if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
- UVM_ADV_RANDOM, mapflags)) != 0) {
+ UVM_MAPFLAG(PROT_MASK, PROT_MASK, UVM_INH_NONE,
+ POSIX_MADV_RANDOM, mapflags)) != 0) {
panic("uvm_km_suballoc: unable to allocate space in parent map");
}
@@ -338,8 +338,9 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
/* allocate some virtual space */
if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET,
- valign, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
- UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) {
+ valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_NONE,
+ POSIX_MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) {
return(0);
}
@@ -390,11 +391,11 @@ uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size,
*/
if (obj == NULL) {
pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
- UVM_PROT_RW);
+ PROT_READ | PROT_WRITE);
} else {
pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
- UVM_PROT_RW,
- PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
}
loopva += PAGE_SIZE;
offset += PAGE_SIZE;
@@ -454,8 +455,8 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
/* allocate some virtual space */
if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
- UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
- UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) {
+ UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(PROT_MASK, PROT_MASK,
+ UVM_INH_NONE, POSIX_MADV_RANDOM, 0)) != 0)) {
return(0);
}
@@ -491,7 +492,9 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit)
* object, so we always use regular old pmap_enter().
*/
pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg),
- UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
+ /* XXX why is the above executable? */
loopva += PAGE_SIZE;
offset += PAGE_SIZE;
@@ -540,8 +543,8 @@ uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags)
/* allocate some virtual space, demand filled by kernel_object. */
if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object,
- UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
- UVM_INH_NONE, UVM_ADV_RANDOM, flags)) != 0)) {
+ UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(PROT_MASK, PROT_MASK,
+ UVM_INH_NONE, POSIX_MADV_RANDOM, flags)) != 0)) {
return(0);
}
@@ -574,8 +577,8 @@ uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
* by kernel_object.
*/
if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
- prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
- UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) {
+ prefer, 0, UVM_MAPFLAG(PROT_MASK,
+ PROT_MASK, UVM_INH_NONE, POSIX_MADV_RANDOM, 0)) == 0)) {
return(kva);
}
@@ -658,8 +661,9 @@ uvm_km_page_init(void)
addr = vm_map_min(kernel_map);
if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT,
NULL, UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE,
- UVM_ADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) {
+ UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_NONE,
+ POSIX_MADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) {
bulk /= 2;
continue;
}
@@ -721,8 +725,9 @@ uvm_km_thread(void *arg)
* So, only use UVM_KMF_TRYLOCK for the first page
* if fp != NULL
*/
- flags = UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_NONE, UVM_ADV_RANDOM,
+ flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE,
+ UVM_INH_NONE, POSIX_MADV_RANDOM,
fp != NULL ? UVM_KMF_TRYLOCK : 0);
memset(pg, 0, sizeof(pg));
for (i = 0; i < nitems(pg); i++) {
@@ -734,8 +739,9 @@ uvm_km_thread(void *arg)
}
/* made progress, so don't sleep for more */
- flags = UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_NONE, UVM_ADV_RANDOM,
+ flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE,
+ UVM_INH_NONE, POSIX_MADV_RANDOM,
UVM_KMF_TRYLOCK);
}
@@ -865,9 +871,9 @@ km_alloc(size_t sz, const struct kmem_va_mode *kv,
#endif
alloc_va:
if (kv->kv_executable) {
- prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
+ prot = PROT_READ | PROT_WRITE | PROT_EXEC;
} else {
- prot = VM_PROT_READ | VM_PROT_WRITE;
+ prot = PROT_READ | PROT_WRITE;
}
if (kp->kp_pageable) {
@@ -914,7 +920,7 @@ try_map:
va = vm_map_min(map);
if (uvm_map(map, &va, sz, uobj, kd->kd_prefer,
kv->kv_align, UVM_MAPFLAG(prot, prot, UVM_INH_NONE,
- UVM_ADV_RANDOM, mapflags))) {
+ POSIX_MADV_RANDOM, mapflags))) {
if (kv->kv_wait && kd->kd_waitok) {
tsleep(map, PVM, "km_allocva", 0);
goto try_map;
diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c
index 1437471d631..ea6a385caba 100644
--- a/sys/uvm/uvm_map.c
+++ b/sys/uvm/uvm_map.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_map.c,v 1.177 2014/11/13 00:47:44 tedu Exp $ */
+/* $OpenBSD: uvm_map.c,v 1.178 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@@ -1066,7 +1066,7 @@ uvm_map(struct vm_map *map, vaddr_t *addr, vsize_t sz,
* Note: we enforce the alignment restriction,
* but ignore pmap_prefer.
*/
- } else if ((maxprot & VM_PROT_EXECUTE) != 0 &&
+ } else if ((maxprot & PROT_EXEC) != 0 &&
map->uaddr_exe != NULL) {
/* Run selection algorithm for executables. */
error = uvm_addr_invoke(map, map->uaddr_exe, &first, &last,
@@ -1871,7 +1871,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) {
KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
- iter->protection == VM_PROT_NONE)
+ iter->protection == PROT_NONE)
continue;
/*
@@ -1882,7 +1882,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
*/
if (!VM_MAPENT_ISWIRED(iter) && !UVM_ET_ISSUBMAP(iter) &&
UVM_ET_ISNEEDSCOPY(iter) &&
- ((iter->protection & VM_PROT_WRITE) ||
+ ((iter->protection & PROT_WRITE) ||
iter->object.uvm_obj == NULL)) {
amap_copy(map, iter, M_WAITOK, TRUE,
iter->start, iter->end);
@@ -1903,7 +1903,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
for (iter = first; error == 0 && iter != end;
iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) {
if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
- iter->protection == VM_PROT_NONE)
+ iter->protection == PROT_NONE)
continue;
error = uvm_fault_wire(map, iter->start, iter->end,
@@ -1931,7 +1931,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
first = RB_NEXT(uvm_map_addr, &map->addr, first)) {
if (UVM_ET_ISHOLE(first) ||
first->start == first->end ||
- first->protection == VM_PROT_NONE)
+ first->protection == PROT_NONE)
continue;
first->wired_count--;
@@ -1945,7 +1945,7 @@ uvm_map_pageable_wire(struct vm_map *map, struct vm_map_entry *first,
for (; iter != end;
iter = RB_NEXT(uvm_map_addr, &map->addr, iter)) {
if (UVM_ET_ISHOLE(iter) || iter->start == iter->end ||
- iter->protection == VM_PROT_NONE)
+ iter->protection == PROT_NONE)
continue;
iter->wired_count--;
@@ -2910,7 +2910,7 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
*/
if (iter->protection != old_prot) {
mask = UVM_ET_ISCOPYONWRITE(iter) ?
- ~VM_PROT_WRITE : VM_PROT_ALL;
+ ~PROT_WRITE : PROT_MASK;
/* update pmap */
if ((iter->protection & mask) == PROT_NONE &&
@@ -2935,13 +2935,13 @@ uvm_map_protect(struct vm_map *map, vaddr_t start, vaddr_t end,
/*
* If the map is configured to lock any future mappings,
- * wire this entry now if the old protection was VM_PROT_NONE
- * and the new protection is not VM_PROT_NONE.
+ * wire this entry now if the old protection was PROT_NONE
+ * and the new protection is not PROT_NONE.
*/
if ((map->flags & VM_MAP_WIREFUTURE) != 0 &&
VM_MAPENT_ISWIRED(iter) == 0 &&
- old_prot == VM_PROT_NONE &&
- new_prot != VM_PROT_NONE) {
+ old_prot == PROT_NONE &&
+ new_prot != PROT_NONE) {
if (uvm_map_pageable(map, iter->start, iter->end,
FALSE, UVM_LK_ENTER | UVM_LK_EXIT) != 0) {
/*
@@ -3347,13 +3347,12 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
* calling pmap_protect needlessly.
*/
if (!UVM_ET_ISNEEDSCOPY(old_entry)) {
- if (old_entry->max_protection &
- VM_PROT_WRITE) {
+ if (old_entry->max_protection & PROT_WRITE) {
pmap_protect(old_map->pmap,
old_entry->start,
old_entry->end,
old_entry->protection &
- ~VM_PROT_WRITE);
+ ~PROT_WRITE);
pmap_update(old_map->pmap);
}
old_entry->etype |= UVM_ET_NEEDSCOPY;
@@ -3366,7 +3365,7 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
* we only need to protect the child if the
* parent has write access.
*/
- if (old_entry->max_protection & VM_PROT_WRITE)
+ if (old_entry->max_protection & PROT_WRITE)
protect_child = TRUE;
else
protect_child = FALSE;
@@ -3386,7 +3385,7 @@ uvm_mapent_forkcopy(struct vmspace *new_vm, struct vm_map *new_map,
pmap_protect(new_map->pmap, new_entry->start,
new_entry->end,
new_entry->protection &
- ~VM_PROT_WRITE);
+ ~PROT_WRITE);
}
}
@@ -3535,7 +3534,7 @@ uvm_map_hint(struct vmspace *vm, vm_prot_t prot)
* If executable skip first two pages, otherwise start
* after data + heap region.
*/
- if ((prot & VM_PROT_EXECUTE) != 0 &&
+ if ((prot & PROT_EXEC) != 0 &&
(vaddr_t)vm->vm_daddr >= I386_MAX_EXE_ADDR) {
addr = (PAGE_SIZE*2) +
(arc4random() & (I386_MAX_EXE_ADDR / 2 - 1));
@@ -3878,7 +3877,7 @@ uvm_map_extract(struct vm_map *srcmap, vaddr_t start, vsize_t len,
if (uvm_map_findspace(kernel_map, &tmp1, &tmp2, &dstaddr, len,
MAX(PAGE_SIZE, PMAP_PREFER_ALIGN()), PMAP_PREFER_OFFSET(start),
- VM_PROT_NONE, 0) != 0) {
+ PROT_NONE, 0) != 0) {
error = ENOMEM;
goto fail2;
}
@@ -4065,7 +4064,7 @@ deactivate_it:
KASSERT(pg->uanon == anon);
/* zap all mappings for the page. */
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
/* ...and deactivate the page. */
uvm_pagedeactivate(pg);
@@ -4108,7 +4107,7 @@ flush_object:
*/
if (uobj != NULL &&
((flags & PGO_FREE) == 0 ||
- ((entry->max_protection & VM_PROT_WRITE) != 0 &&
+ ((entry->max_protection & PROT_WRITE) != 0 &&
(entry->etype & UVM_ET_COPYONWRITE) == 0))) {
rv = uobj->pgops->pgo_flush(uobj,
cp_start - entry->start + entry->offset,
diff --git a/sys/uvm/uvm_mmap.c b/sys/uvm/uvm_mmap.c
index 9f914fe2140..3ed329127b7 100644
--- a/sys/uvm/uvm_mmap.c
+++ b/sys/uvm/uvm_mmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_mmap.c,v 1.99 2014/10/03 17:41:00 kettenis Exp $ */
+/* $OpenBSD: uvm_mmap.c,v 1.100 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_mmap.c,v 1.49 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -127,7 +127,7 @@ sys_mquery(struct proc *p, void *v, register_t *retval)
size = (vsize_t) SCARG(uap, len);
fd = SCARG(uap, fd);
- if ((prot & VM_PROT_ALL) != prot)
+ if ((prot & PROT_MASK) != prot)
return (EINVAL);
if (SCARG(uap, flags) & MAP_FIXED)
@@ -210,7 +210,7 @@ sys_mincore(struct proc *p, void *v, register_t *retval)
* Lock down vec, so our returned status isn't outdated by
* storing the status byte for a page.
*/
- if ((error = uvm_vslock(p, vec, npgs, VM_PROT_WRITE)) != 0) {
+ if ((error = uvm_vslock(p, vec, npgs, PROT_WRITE)) != 0) {
free(pgs, M_TEMP, 0);
return (error);
}
@@ -341,7 +341,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
* Fixup the old deprecated MAP_COPY into MAP_PRIVATE, and
* validate the flags.
*/
- if ((prot & VM_PROT_ALL) != prot)
+ if ((prot & PROT_MASK) != prot)
return (EINVAL);
if ((flags & MAP_FLAGMASK) != flags)
return (EINVAL);
@@ -435,11 +435,11 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
}
/* now check protection */
- maxprot = VM_PROT_EXECUTE;
+ maxprot = PROT_EXEC;
/* check read access */
if (fp->f_flag & FREAD)
- maxprot |= VM_PROT_READ;
+ maxprot |= PROT_READ;
else if (prot & PROT_READ) {
error = EACCES;
goto out;
@@ -458,7 +458,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
VOP_GETATTR(vp, &va, p->p_ucred, p)))
goto out;
if ((va.va_flags & (IMMUTABLE|APPEND)) == 0)
- maxprot |= VM_PROT_WRITE;
+ maxprot |= PROT_WRITE;
else if (prot & PROT_WRITE) {
error = EPERM;
goto out;
@@ -469,7 +469,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
}
} else {
/* MAP_PRIVATE mappings can always write to */
- maxprot |= VM_PROT_WRITE;
+ maxprot |= PROT_WRITE;
}
/* set handle to vnode */
@@ -485,7 +485,7 @@ sys_mmap(struct proc *p, void *v, register_t *retval)
is_anon: /* label for SunOS style /dev/zero */
handle = NULL;
- maxprot = VM_PROT_ALL;
+ maxprot = PROT_MASK;
pos = 0;
}
@@ -604,7 +604,7 @@ sys_munmap(struct proc *p, void *v, register_t *retval)
* interesting system call semantic: make sure entire range is
* allocated before allowing an unmap.
*/
- if (!uvm_map_checkprot(map, addr, addr + size, VM_PROT_NONE)) {
+ if (!uvm_map_checkprot(map, addr, addr + size, PROT_NONE)) {
vm_map_unlock(map);
return (EINVAL);
}
@@ -642,7 +642,7 @@ sys_mprotect(struct proc *p, void *v, register_t *retval)
size = (vsize_t)SCARG(uap, len);
prot = SCARG(uap, prot);
- if ((prot & VM_PROT_ALL) != prot)
+ if ((prot & PROT_MASK) != prot)
return (EINVAL);
/*
@@ -904,7 +904,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
struct uvm_object *uobj;
struct vnode *vp;
int error;
- int advice = UVM_ADV_NORMAL;
+ int advice = POSIX_MADV_NORMAL;
uvm_flag_t uvmflag = 0;
vsize_t align = 0; /* userland page size */
@@ -950,7 +950,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
vp = (struct vnode *) handle; /* get vnode */
if (vp->v_type != VCHR) {
uobj = uvn_attach(vp, (flags & MAP_SHARED) ?
- maxprot : (maxprot & ~VM_PROT_WRITE));
+ maxprot : (maxprot & ~PROT_WRITE));
/*
* XXXCDC: hack from old code
@@ -976,27 +976,27 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
* the uncache to kill the uvn and trigger I/O.
*/
if (flags & MAP_SHARED) {
- if ((prot & VM_PROT_WRITE) ||
- (maxprot & VM_PROT_WRITE)) {
+ if ((prot & PROT_WRITE) ||
+ (maxprot & PROT_WRITE)) {
uvm_vnp_uncache(vp);
}
}
} else {
uobj = udv_attach(vp->v_rdev,
(flags & MAP_SHARED) ? maxprot :
- (maxprot & ~VM_PROT_WRITE), foff, size);
+ (maxprot & ~PROT_WRITE), foff, size);
/*
* XXX Some devices don't like to be mapped with
* XXX PROT_EXEC, but we don't really have a
* XXX better way of handling this, right now
*/
if (uobj == NULL && (prot & PROT_EXEC) == 0) {
- maxprot &= ~VM_PROT_EXECUTE;
+ maxprot &= ~PROT_EXEC;
uobj = udv_attach(vp->v_rdev,
(flags & MAP_SHARED) ? maxprot :
- (maxprot & ~VM_PROT_WRITE), foff, size);
+ (maxprot & ~PROT_WRITE), foff, size);
}
- advice = UVM_ADV_RANDOM;
+ advice = POSIX_MADV_RANDOM;
}
if (uobj == NULL)
@@ -1020,7 +1020,7 @@ uvm_mmap(vm_map_t map, vaddr_t *addr, vsize_t size, vm_prot_t prot,
* POSIX 1003.1b -- if our address space was configured
* to lock all future mappings, wire the one we just made.
*/
- if (prot == VM_PROT_NONE) {
+ if (prot == PROT_NONE) {
/*
* No more work to do in this case.
*/
diff --git a/sys/uvm/uvm_object.c b/sys/uvm/uvm_object.c
index afe6bd5993a..0e8246998eb 100644
--- a/sys/uvm/uvm_object.c
+++ b/sys/uvm/uvm_object.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_object.c,v 1.9 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_object.c,v 1.10 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2006 The NetBSD Foundation, Inc.
@@ -35,6 +35,7 @@
*/
#include <sys/param.h>
+#include <sys/mman.h>
#include <uvm/uvm.h>
@@ -79,7 +80,7 @@ uvm_objwire(struct uvm_object *uobj, voff_t start, voff_t end,
/* Get the pages */
memset(pgs, 0, sizeof(pgs));
error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
- VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
+ PROT_READ | PROT_WRITE, POSIX_MADV_SEQUENTIAL,
PGO_ALLPAGES | PGO_SYNCIO);
if (error)
diff --git a/sys/uvm/uvm_page.c b/sys/uvm/uvm_page.c
index cf9064ccc6a..3c93ee8907b 100644
--- a/sys/uvm/uvm_page.c
+++ b/sys/uvm/uvm_page.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_page.c,v 1.131 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: uvm_page.c,v 1.132 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $ */
/*
@@ -385,7 +385,7 @@ uvm_pageboot_alloc(vsize_t size)
* Note this memory is no longer managed, so using
* pmap_kenter is safe.
*/
- pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE);
}
pmap_update(pmap_kernel());
return(addr);
@@ -1097,7 +1097,7 @@ uvm_page_unbusy(struct vm_page **pgs, int npgs)
uobj = pg->uobject;
if (uobj != NULL) {
uvm_lock_pageq();
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
/* XXX won't happen right now */
if (pg->pg_flags & PQ_AOBJ)
uao_dropswap(uobj,
diff --git a/sys/uvm/uvm_pager.c b/sys/uvm/uvm_pager.c
index 784d81373e5..63b276f1a7f 100644
--- a/sys/uvm/uvm_pager.c
+++ b/sys/uvm/uvm_pager.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pager.c,v 1.69 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_pager.c,v 1.70 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_pager.c,v 1.36 2000/11/27 18:26:41 chs Exp $ */
/*
@@ -233,9 +233,9 @@ uvm_pagermapin(struct vm_page **pps, int npages, int flags)
vsize_t size;
struct vm_page *pp;
- prot = VM_PROT_READ;
+ prot = PROT_READ;
if (flags & UVMPAGER_MAPIN_READ)
- prot |= VM_PROT_WRITE;
+ prot |= PROT_WRITE;
size = ptoa(npages);
KASSERT(size <= MAXBSIZE);
@@ -395,7 +395,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, struct vm_page **pps, int *npages,
PG_RELEASED);
/* XXX: protect wired page? see above comment. */
- pmap_page_protect(pclust, VM_PROT_READ);
+ pmap_page_protect(pclust, PROT_READ);
if (!forward) {
ppsp--; /* back up one page */
*ppsp = pclust;
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index d69dbd053a1..32d7b40060a 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.73 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.74 2014/11/16 12:31:00 deraadt Exp $ */
/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
@@ -473,7 +473,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
}
/* zap all mappings with pmap_page_protect... */
- pmap_page_protect(p, VM_PROT_NONE);
+ pmap_page_protect(p, PROT_NONE);
uvm_pagefree(p);
uvmexp.pdfreed++;
@@ -546,7 +546,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
swap_backed = ((p->pg_flags & PQ_SWAPBACKED) != 0);
atomic_setbits_int(&p->pg_flags, PG_BUSY);
UVM_PAGE_OWN(p, "scan_inactive");
- pmap_page_protect(p, VM_PROT_READ);
+ pmap_page_protect(p, PROT_READ);
uvmexp.pgswapout++;
/*
@@ -741,7 +741,7 @@ uvmpd_scan_inactive(struct pglist *pglst)
p->uanon = NULL;
uvm_anfree(anon); /* kills anon */
- pmap_page_protect(p, VM_PROT_NONE);
+ pmap_page_protect(p, PROT_NONE);
anon = NULL;
uvm_lock_pageq();
nextpg = TAILQ_NEXT(p, pageq);
@@ -920,7 +920,7 @@ uvmpd_scan(void)
* inactive pages.
*/
if (inactive_shortage > 0) {
- pmap_page_protect(p, VM_PROT_NONE);
+ pmap_page_protect(p, PROT_NONE);
/* no need to check wire_count as pg is "active" */
uvm_pagedeactivate(p);
uvmexp.pddeact++;
@@ -961,7 +961,7 @@ uvmpd_drop(struct pglist *pglst)
}
/* zap all mappings with pmap_page_protect... */
- pmap_page_protect(p, VM_PROT_NONE);
+ pmap_page_protect(p, PROT_NONE);
uvm_pagefree(p);
}
}
diff --git a/sys/uvm/uvm_pmap.h b/sys/uvm/uvm_pmap.h
index ec9af64fcb0..85dcb0037e5 100644
--- a/sys/uvm/uvm_pmap.h
+++ b/sys/uvm/uvm_pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmap.h,v 1.22 2010/12/26 15:41:00 miod Exp $ */
+/* $OpenBSD: uvm_pmap.h,v 1.23 2014/11/16 12:31:01 deraadt Exp $ */
/* $NetBSD: uvm_pmap.h,v 1.1 2000/06/27 09:00:14 mrg Exp $ */
/*
@@ -86,7 +86,7 @@ typedef struct pmap_statistics *pmap_statistics_t;
#include <machine/pmap.h>
/*
- * Flags passed to pmap_enter(). Note the bottom 3 bits are VM_PROT_*
+ * Flags passed to pmap_enter(). Note the bottom 3 bits are PROT_*
* bits, used to indicate the access type that was made (to seed modified
* and referenced information).
*/
diff --git a/sys/uvm/uvm_unix.c b/sys/uvm/uvm_unix.c
index 193fd99cf23..81c82bff826 100644
--- a/sys/uvm/uvm_unix.c
+++ b/sys/uvm/uvm_unix.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_unix.c,v 1.50 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: uvm_unix.c,v 1.51 2014/11/16 12:31:01 deraadt Exp $ */
/* $NetBSD: uvm_unix.c,v 1.18 2000/09/13 15:00:25 thorpej Exp $ */
/*
@@ -85,8 +85,8 @@ sys_obreak(struct proc *p, void *v, register_t *retval)
if (new > old) {
error = uvm_map(&vm->vm_map, &old, new - old, NULL,
UVM_UNKNOWN_OFFSET, 0,
- UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RWX, UVM_INH_COPY,
- UVM_ADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
+ UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_MASK, UVM_INH_COPY,
+ POSIX_MADV_NORMAL, UVM_FLAG_AMAPPAD|UVM_FLAG_FIXED|
UVM_FLAG_OVERLAY|UVM_FLAG_COPYONW));
if (error) {
uprintf("sbrk: grow %ld failed, error = %d\n",
@@ -159,7 +159,7 @@ uvm_coredump(struct proc *p, struct vnode *vp, struct ucred *cred,
panic("uvm_coredump: user process with submap?");
}
- if (!(entry->protection & VM_PROT_WRITE) &&
+ if (!(entry->protection & PROT_WRITE) &&
entry->start != p->p_p->ps_sigcode)
continue;
@@ -268,7 +268,7 @@ uvm_coredump_walkmap(struct proc *p, void *iocookie,
panic("uvm_coredump: user process with submap?");
}
- if (!(entry->protection & VM_PROT_WRITE) &&
+ if (!(entry->protection & PROT_WRITE) &&
entry->start != p->p_p->ps_sigcode)
continue;
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index 07411a7d6ca..43fda694ace 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_vnode.c,v 1.84 2014/07/11 16:35:40 jsg Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.85 2014/11/16 12:31:01 deraadt Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@@ -170,7 +170,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot)
uvn->u_obj.uo_refs++; /* bump uvn ref! */
/* check for new writeable uvn */
- if ((accessprot & VM_PROT_WRITE) != 0 &&
+ if ((accessprot & PROT_WRITE) != 0 &&
(uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) {
LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
/* we are now on wlist! */
@@ -236,7 +236,7 @@ uvn_attach(struct vnode *vp, vm_prot_t accessprot)
uvn->u_size = used_vnode_size;
/* if write access, we need to add it to the wlist */
- if (accessprot & VM_PROT_WRITE) {
+ if (accessprot & PROT_WRITE) {
LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist);
uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */
}
@@ -648,7 +648,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
if ((pp->pg_flags & PG_CLEAN) != 0 &&
(flags & PGO_FREE) != 0 &&
(pp->pg_flags & PQ_ACTIVE) != 0)
- pmap_page_protect(pp, VM_PROT_NONE);
+ pmap_page_protect(pp, PROT_NONE);
if ((pp->pg_flags & PG_CLEAN) != 0 &&
pmap_is_modified(pp))
atomic_clearbits_int(&pp->pg_flags, PG_CLEAN);
@@ -661,7 +661,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
if (!needs_clean) {
if (flags & PGO_DEACTIVATE) {
if (pp->wire_count == 0) {
- pmap_page_protect(pp, VM_PROT_NONE);
+ pmap_page_protect(pp, PROT_NONE);
uvm_pagedeactivate(pp);
}
} else if (flags & PGO_FREE) {
@@ -674,7 +674,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
curoff -= PAGE_SIZE;
continue;
} else {
- pmap_page_protect(pp, VM_PROT_NONE);
+ pmap_page_protect(pp, PROT_NONE);
/* removed page from object */
uvm_pagefree(pp);
}
@@ -692,7 +692,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
*/
atomic_setbits_int(&pp->pg_flags, PG_BUSY);
UVM_PAGE_OWN(pp, "uvn_flush");
- pmap_page_protect(pp, VM_PROT_READ);
+ pmap_page_protect(pp, PROT_READ);
/* if we're async, free the page in aiodoned */
if ((flags & (PGO_FREE|PGO_SYNCIO)) == PGO_FREE)
atomic_setbits_int(&pp->pg_flags, PG_RELEASED);
@@ -786,7 +786,7 @@ ReTry:
/* dispose of page */
if (flags & PGO_DEACTIVATE) {
if (ptmp->wire_count == 0) {
- pmap_page_protect(ptmp, VM_PROT_NONE);
+ pmap_page_protect(ptmp, PROT_NONE);
uvm_pagedeactivate(ptmp);
}
} else if (flags & PGO_FREE &&
@@ -802,7 +802,7 @@ ReTry:
"lost!\n");
retval = FALSE;
}
- pmap_page_protect(ptmp, VM_PROT_NONE);
+ pmap_page_protect(ptmp, PROT_NONE);
uvm_pagefree(ptmp);
}