summaryrefslogtreecommitdiff
path: root/sys/dev/pci/drm
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2014-11-16 12:31:02 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2014-11-16 12:31:02 +0000
commit84110ac1ecd0b90236884f584c62f0aba5630fc2 (patch)
treefad09b733c1e1122314b073b82f6d36a1024e050 /sys/dev/pci/drm
parentcbbf1bf46622bbdb478ef4df4e4e027e0b83fc62 (diff)
Replace a plethora of historical protection options with just
PROT_NONE, PROT_READ, PROT_WRITE, and PROT_EXEC from mman.h. PROT_MASK is introduced as the one true way of extracting those bits. Remove UVM_ADV_* wrapper, using the standard names. ok doug guenther kettenis
Diffstat (limited to 'sys/dev/pci/drm')
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c14
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_tiling.c4
-rw-r--r--sys/dev/pci/drm/i915/intel_ringbuffer.c10
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo.c6
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_util.c8
5 files changed, 21 insertions, 21 deletions
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 150fee67be2..64d9f0f0ca7 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem.c,v 1.75 2014/09/20 21:17:43 kettenis Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.76 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -306,7 +306,7 @@ kmap(struct vm_page *pg)
va = pmap_map_direct(pg);
#else
va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
#endif
return (void *)va;
@@ -1450,8 +1450,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
addr = 0;
ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, size,
- obj->uao, args->offset, 0, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW,
- UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ obj->uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (ret == 0)
uao_reference(obj->uao);
drm_gem_object_unreference_unlocked(obj);
@@ -1473,7 +1473,7 @@ i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
drm_i915_private_t *dev_priv = dev->dev_private;
paddr_t paddr;
int lcv, ret;
- int write = !!(access_type & VM_PROT_WRITE);
+ int write = !!(access_type & PROT_WRITE);
vm_prot_t mapprot;
boolean_t locked = TRUE;
@@ -1527,7 +1527,7 @@ i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
* it wrong, and makes us fully coherent with the gpu re mmap.
*/
if (write == 0)
- mapprot &= ~VM_PROT_WRITE;
+ mapprot &= ~PROT_WRITE;
/* XXX try and be more efficient when we do this */
for (lcv = 0 ; lcv < npages ; lcv++, offset += PAGE_SIZE,
vaddr += PAGE_SIZE) {
@@ -1622,7 +1622,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
for (pg = &dev_priv->pgs[atop(obj->gtt_offset)];
pg != &dev_priv->pgs[atop(obj->gtt_offset + obj->base.size)];
pg++)
- pmap_page_protect(pg, VM_PROT_NONE);
+ pmap_page_protect(pg, PROT_NONE);
obj->fault_mappable = false;
}
diff --git a/sys/dev/pci/drm/i915/i915_gem_tiling.c b/sys/dev/pci/drm/i915/i915_gem_tiling.c
index 569d310fbe2..bf1d2a1dcd0 100644
--- a/sys/dev/pci/drm/i915/i915_gem_tiling.c
+++ b/sys/dev/pci/drm/i915/i915_gem_tiling.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem_tiling.c,v 1.14 2014/01/30 15:10:48 kettenis Exp $ */
+/* $OpenBSD: i915_gem_tiling.c,v 1.15 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -483,7 +483,7 @@ i915_gem_swizzle_page(struct vm_page *pg)
va = pmap_map_direct(pg);
#else
va = uvm_km_valloc_wait(phys_map, PAGE_SIZE);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
#endif
vaddr = (char *)va;
diff --git a/sys/dev/pci/drm/i915/intel_ringbuffer.c b/sys/dev/pci/drm/i915/intel_ringbuffer.c
index 57ab540dd18..4ca0ea6f64b 100644
--- a/sys/dev/pci/drm/i915/intel_ringbuffer.c
+++ b/sys/dev/pci/drm/i915/intel_ringbuffer.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: intel_ringbuffer.c,v 1.20 2014/03/30 00:58:24 jsg Exp $ */
+/* $OpenBSD: intel_ringbuffer.c,v 1.21 2014/11/16 12:31:00 deraadt Exp $ */
/*
* Copyright © 2008-2010 Intel Corporation
*
@@ -475,8 +475,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
pc->cpu_page = (volatile u_int32_t *)vm_map_min(kernel_map);
obj->base.uao->pgops->pgo_reference(obj->base.uao);
ret = uvm_map(kernel_map, (vaddr_t *)&pc->cpu_page,
- PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
- UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (ret != 0) {
DRM_ERROR("Failed to map status page.\n");
obj->base.uao->pgops->pgo_detach(obj->base.uao);
@@ -1122,8 +1122,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
ring->status_page.page_addr = (u_int32_t *)vm_map_min(kernel_map);
obj->base.uao->pgops->pgo_reference(obj->base.uao);
ret = uvm_map(kernel_map, (vaddr_t *)&ring->status_page.page_addr,
- PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(UVM_PROT_RW,
- UVM_PROT_RW, UVM_INH_SHARE, UVM_ADV_RANDOM, 0));
+ PAGE_SIZE, obj->base.uao, 0, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE, UVM_INH_SHARE, POSIX_MADV_RANDOM, 0));
if (ret != 0) {
obj->base.uao->pgops->pgo_detach(obj->base.uao);
ret = -ENOMEM;
diff --git a/sys/dev/pci/drm/ttm/ttm_bo.c b/sys/dev/pci/drm/ttm/ttm_bo.c
index 30e1eb9c20f..6b58efab9ce 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo.c,v 1.8 2014/04/12 06:08:22 jsg Exp $ */
+/* $OpenBSD: ttm_bo.c,v 1.9 2014/11/16 12:31:00 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -1649,14 +1649,14 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
page = PHYS_TO_VM_PAGE(paddr);
if (unlikely(page == NULL))
continue;
- pmap_page_protect(page, VM_PROT_NONE);
+ pmap_page_protect(page, PROT_NONE);
}
} else if (ttm) {
for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (unlikely(page == NULL))
continue;
- pmap_page_protect(page, VM_PROT_NONE);
+ pmap_page_protect(page, PROT_NONE);
}
}
ttm_mem_io_free_vm(bo);
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c
index 34cbd214769..9862576ef13 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_util.c,v 1.5 2014/02/10 02:24:05 jsg Exp $ */
+/* $OpenBSD: ttm_bo_util.c,v 1.6 2014/11/16 12:31:00 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -531,7 +531,7 @@ kmap(struct vm_page *pg)
va = uvm_km_valloc(kernel_map, PAGE_SIZE);
if (va == 0)
return (NULL);
- pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), UVM_PROT_RW);
+ pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), PROT_READ | PROT_WRITE);
pmap_update(pmap_kernel());
#endif
return (void *)va;
@@ -565,8 +565,8 @@ vmap(struct vm_page **pages, unsigned int npages, unsigned long flags,
for (i = 0; i < npages; i++) {
pa = VM_PAGE_TO_PHYS(pages[i]) | prot;
pmap_enter(pmap_kernel(), va + (i * PAGE_SIZE), pa,
- VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
+ PROT_READ | PROT_WRITE,
+ PROT_READ | PROT_WRITE | PMAP_WIRED);
pmap_update(pmap_kernel());
}