summaryrefslogtreecommitdiff
path: root/lib/libdrm
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libdrm')
-rw-r--r--lib/libdrm/Makefile5
-rw-r--r--lib/libdrm/Makefile.inc4
-rw-r--r--lib/libdrm/intel/intel_bufmgr.c107
-rw-r--r--lib/libdrm/intel/intel_bufmgr.h5
-rw-r--r--lib/libdrm/intel/intel_bufmgr_fake.c31
-rw-r--r--lib/libdrm/intel/intel_bufmgr_gem.c366
-rw-r--r--lib/libdrm/intel/intel_bufmgr_priv.h12
-rw-r--r--lib/libdrm/intel/intel_chipset.h43
-rw-r--r--lib/libdrm/intel/intel_decode.c23
-rw-r--r--lib/libdrm/intel/shlib_version2
-rw-r--r--lib/libdrm/libdrm.h89
-rw-r--r--lib/libdrm/radeon/r600_pci_ids.h7
-rw-r--r--lib/libdrm/radeon/radeon_bo.c46
-rw-r--r--lib/libdrm/radeon/radeon_bo_gem.c31
-rw-r--r--lib/libdrm/radeon/radeon_cs.c50
-rw-r--r--lib/libdrm/radeon/radeon_cs_gem.c9
-rw-r--r--lib/libdrm/radeon/radeon_cs_space.c18
-rw-r--r--lib/libdrm/radeon/radeon_surface.c31
-rw-r--r--lib/libdrm/xf86atomic.h9
-rw-r--r--lib/libdrm/xf86drm.c8
-rw-r--r--lib/libdrm/xf86drmMode.c2
-rw-r--r--lib/libdrm/xf86drmMode.h9
22 files changed, 703 insertions, 204 deletions
diff --git a/lib/libdrm/Makefile b/lib/libdrm/Makefile
index 70be6d32f..b996193de 100644
--- a/lib/libdrm/Makefile
+++ b/lib/libdrm/Makefile
@@ -1,4 +1,4 @@
-# $OpenBSD: Makefile,v 1.10 2013/08/08 10:52:48 jsg Exp $
+# $OpenBSD: Makefile,v 1.11 2015/02/07 01:34:35 jsg Exp $
.include <bsd.xconf.mk>
.include "${.CURDIR}/Makefile.inc"
@@ -15,7 +15,8 @@ INCSDIR= ${X11BASE}/include/
CPPFLAGS+= -I${.CURDIR} \
-I${X11BASE}/include \
-I/usr/include/dev/pci/drm \
- -DX_PRIVSEP
+ -DX_PRIVSEP \
+ -DHAVE_VISIBILITY
INCS= xf86drm.h \
xf86drmMode.h
diff --git a/lib/libdrm/Makefile.inc b/lib/libdrm/Makefile.inc
index fb938a647..446b057a4 100644
--- a/lib/libdrm/Makefile.inc
+++ b/lib/libdrm/Makefile.inc
@@ -1,6 +1,6 @@
-# $OpenBSD: Makefile.inc,v 1.13 2014/08/14 04:00:28 jsg Exp $
+# $OpenBSD: Makefile.inc,v 1.14 2015/02/07 01:34:35 jsg Exp $
-PACKAGE_VERSION= 2.4.56
+PACKAGE_VERSION= 2.4.59
NOPROFILE=
diff --git a/lib/libdrm/intel/intel_bufmgr.c b/lib/libdrm/intel/intel_bufmgr.c
index 905556f64..234cd13e1 100644
--- a/lib/libdrm/intel/intel_bufmgr.c
+++ b/lib/libdrm/intel/intel_bufmgr.c
@@ -37,6 +37,7 @@
#include <drm.h>
#include <i915_drm.h>
#include <pciaccess.h>
+#include "libdrm.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "xf86drm.h"
@@ -46,21 +47,35 @@
* Convenience functions for buffer management methods.
*/
-drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment)
+drm_public drm_intel_bo *
+drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
-drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
- const char *name,
- unsigned long size,
- unsigned int alignment)
+drm_public drm_intel_bo *
+drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
-drm_intel_bo *
+drm_public drm_intel_bo *
+drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name, void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ if (bufmgr->bo_alloc_userptr)
+ return bufmgr->bo_alloc_userptr(bufmgr, name, addr, tiling_mode,
+ stride, size, flags);
+ return NULL;
+}
+
+drm_public drm_intel_bo *
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
@@ -69,12 +84,14 @@ drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
tiling_mode, pitch, flags);
}
-void drm_intel_bo_reference(drm_intel_bo *bo)
+drm_public void
+drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
-void drm_intel_bo_unreference(drm_intel_bo *bo)
+drm_public void
+drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
return;
@@ -82,24 +99,26 @@ void drm_intel_bo_unreference(drm_intel_bo *bo)
bo->bufmgr->bo_unreference(bo);
}
-int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
+drm_public int
+drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
-int drm_intel_bo_unmap(drm_intel_bo *buf)
+drm_public int
+drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
-int
+drm_public int
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
-int
+drm_public int
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
@@ -118,24 +137,26 @@ drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
return 0;
}
-void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
+drm_public void
+drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
-void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
+drm_public void
+drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
-int
+drm_public int
drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
-int
+drm_public int
drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int rings)
@@ -155,17 +176,20 @@ drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
}
}
-void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
+drm_public void
+drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
-int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
+drm_public int
+drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
-int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
+drm_public int
+drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
@@ -173,7 +197,7 @@ int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
return -ENODEV;
}
-int
+drm_public int
drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
@@ -184,7 +208,7 @@ drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
}
/* For fence registers, not GL fences */
-int
+drm_public int
drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
@@ -195,7 +219,8 @@ drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
}
-int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
+drm_public int
+drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
@@ -203,7 +228,8 @@ int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
return -ENODEV;
}
-int drm_intel_bo_unpin(drm_intel_bo *bo)
+drm_public int
+drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
@@ -211,8 +237,9 @@ int drm_intel_bo_unpin(drm_intel_bo *bo)
return -ENODEV;
}
-int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
- uint32_t stride)
+drm_public int
+drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t stride)
{
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
@@ -221,8 +248,9 @@ int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
return 0;
}
-int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
- uint32_t * swizzle_mode)
+drm_public int
+drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+ uint32_t * swizzle_mode)
{
if (bo->bufmgr->bo_get_tiling)
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
@@ -232,40 +260,46 @@ int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
return 0;
}
-int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
+drm_public int
+drm_intel_bo_disable_reuse(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
return bo->bufmgr->bo_disable_reuse(bo);
return 0;
}
-int drm_intel_bo_is_reusable(drm_intel_bo *bo)
+drm_public int
+drm_intel_bo_is_reusable(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_is_reusable)
return bo->bufmgr->bo_is_reusable(bo);
return 0;
}
-int drm_intel_bo_busy(drm_intel_bo *bo)
+drm_public int
+drm_intel_bo_busy(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_busy)
return bo->bufmgr->bo_busy(bo);
return 0;
}
-int drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
+drm_public int
+drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
{
if (bo->bufmgr->bo_madvise)
return bo->bufmgr->bo_madvise(bo, madv);
return -1;
}
-int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+drm_public int
+drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}
-int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
+drm_public int
+drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
if (bufmgr->get_pipe_from_crtc_id)
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
@@ -298,9 +332,8 @@ err:
return size;
}
-int drm_intel_get_aperture_sizes(int fd,
- size_t *mappable,
- size_t *total)
+drm_public int
+drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total)
{
struct drm_i915_gem_get_aperture aperture;
diff --git a/lib/libdrm/intel/intel_bufmgr.h b/lib/libdrm/intel/intel_bufmgr.h
index 9383c722e..be83a56a4 100644
--- a/lib/libdrm/intel/intel_bufmgr.h
+++ b/lib/libdrm/intel/intel_bufmgr.h
@@ -113,6 +113,11 @@ drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr, uint32_t tiling_mode,
+ uint32_t stride, unsigned long size,
+ unsigned long flags);
drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
diff --git a/lib/libdrm/intel/intel_bufmgr_fake.c b/lib/libdrm/intel/intel_bufmgr_fake.c
index d63fc815c..c4828faa2 100644
--- a/lib/libdrm/intel/intel_bufmgr_fake.c
+++ b/lib/libdrm/intel/intel_bufmgr_fake.c
@@ -49,6 +49,7 @@
#include "drm.h"
#include "i915_drm.h"
#include "mm.h"
+#include "libdrm.h"
#include "libdrm_lists.h"
/* Support gcc's __FUNCTION__ for people using other compilers */
@@ -248,7 +249,7 @@ FENCE_LTE(unsigned a, unsigned b)
return 0;
}
-void
+drm_public void
drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
unsigned int (*emit) (void *priv),
void (*wait) (unsigned int fence,
@@ -771,7 +772,7 @@ drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
* -- just evict everything
* -- and wait for idle
*/
-void
+drm_public void
drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
@@ -867,7 +868,7 @@ drm_intel_fake_bo_alloc_tiled(drm_intel_bufmgr * bufmgr,
4096);
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset,
@@ -962,7 +963,7 @@ drm_intel_fake_bo_unreference(drm_intel_bo *bo)
* Set the buffer as not requiring backing store, and instead get the callback
* invoked whenever it would be set dirty.
*/
-void
+drm_public void
drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
void (*invalidate_cb) (drm_intel_bo *bo,
void *ptr),
@@ -1416,7 +1417,7 @@ drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
bo_fake->write_domain = 0;
}
-void
+drm_public void
drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
int (*exec) (drm_intel_bo *bo,
unsigned int used,
@@ -1539,7 +1540,8 @@ drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
* Used by the X Server on LeaveVT, when the card memory is no longer our
* own.
*/
-void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
+drm_public void
+drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
struct block *block, *tmp;
@@ -1573,21 +1575,20 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
pthread_mutex_unlock(&bufmgr_fake->lock);
}
-void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
- volatile unsigned int
- *last_dispatch)
+drm_public void
+drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
+ volatile unsigned int
+ *last_dispatch)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
}
-drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
- unsigned long low_offset,
- void *low_virtual,
- unsigned long size,
- volatile unsigned int
- *last_dispatch)
+drm_public drm_intel_bufmgr *
+drm_intel_bufmgr_fake_init(int fd, unsigned long low_offset,
+ void *low_virtual, unsigned long size,
+ volatile unsigned int *last_dispatch)
{
drm_intel_bufmgr_fake *bufmgr_fake;
diff --git a/lib/libdrm/intel/intel_bufmgr_gem.c b/lib/libdrm/intel/intel_bufmgr_gem.c
index 2905bdbad..a15a4d16e 100644
--- a/lib/libdrm/intel/intel_bufmgr_gem.c
+++ b/lib/libdrm/intel/intel_bufmgr_gem.c
@@ -48,7 +48,6 @@
#include <assert.h>
#include <pthread.h>
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <stdbool.h>
@@ -57,6 +56,7 @@
#ifndef ETIME
#define ETIME ETIMEDOUT
#endif
+#include "libdrm.h"
#include "libdrm_lists.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
@@ -93,6 +93,8 @@ struct drm_intel_gem_bo_bucket {
typedef struct _drm_intel_bufmgr_gem {
drm_intel_bufmgr bufmgr;
+ atomic_t refcount;
+
int fd;
int max_relocs;
@@ -112,6 +114,8 @@ typedef struct _drm_intel_bufmgr_gem {
int num_buckets;
time_t time;
+ drmMMListHead managers;
+
drmMMListHead named;
drmMMListHead vma_cache;
int vma_count, vma_open, vma_max;
@@ -184,6 +188,11 @@ struct _drm_intel_bo_gem {
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
+ /**
+ * Virtual address of the buffer allocated by user, used for userptr
+ * objects only.
+ */
+ void *user_virtual;
int map_count;
drmMMListHead vma_list;
@@ -223,6 +232,11 @@ struct _drm_intel_bo_gem {
bool idle;
/**
+ * Boolean of whether this buffer was allocated with userptr
+ */
+ bool is_userptr;
+
+ /**
* Size in bytes of this buffer and its relocation descendents.
*
* Used to avoid costly tree walking in
@@ -749,15 +763,16 @@ retry:
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
bo_gem->stride = 0;
+ /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
+ list (vma_list), so better set the list head here */
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
tiling_mode,
stride)) {
drm_intel_gem_bo_free(&bo_gem->bo);
return NULL;
}
-
- DRMINITLISTHEAD(&bo_gem->name_list);
- DRMINITLISTHEAD(&bo_gem->vma_list);
}
bo_gem->name = name;
@@ -851,13 +866,87 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
tiling, stride);
}
+static drm_intel_bo *
+drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ void *addr,
+ uint32_t tiling_mode,
+ uint32_t stride,
+ unsigned long size,
+ unsigned long flags)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ drm_intel_bo_gem *bo_gem;
+ int ret;
+ struct drm_i915_gem_userptr userptr;
+
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (tiling_mode != I915_TILING_NONE)
+ return NULL;
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = size;
+
+ VG_CLEAR(userptr);
+ userptr.user_ptr = (uint64_t)((unsigned long)addr);
+ userptr.user_size = size;
+ userptr.flags = flags;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_USERPTR,
+ &userptr);
+ if (ret != 0) {
+ DBG("bo_create_userptr: "
+ "ioctl failed with user ptr %p size 0x%lx, "
+ "user flags 0x%lx\n", addr, size, flags);
+ free(bo_gem);
+ return NULL;
+ }
+
+ bo_gem->gem_handle = userptr.handle;
+ bo_gem->bo.handle = bo_gem->gem_handle;
+ bo_gem->bo.bufmgr = bufmgr;
+ bo_gem->is_userptr = true;
+ bo_gem->bo.virtual = addr;
+ /* Save the address provided by user */
+ bo_gem->user_virtual = addr;
+ bo_gem->tiling_mode = I915_TILING_NONE;
+ bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+ bo_gem->stride = 0;
+
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
+ bo_gem->name = name;
+ atomic_set(&bo_gem->refcount, 1);
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = false;
+
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
+ DBG("bo_create_userptr: "
+ "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
+ addr, bo_gem->gem_handle, bo_gem->name,
+ size, stride, tiling_mode);
+
+ return &bo_gem->bo;
+}
+
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
@@ -875,12 +964,14 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
* alternating names for the front/back buffer a linear search
* provides a sufficiently fast match.
*/
+ pthread_mutex_lock(&bufmgr_gem->lock);
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->global_name == handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return &bo_gem->bo;
}
}
@@ -893,6 +984,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
/* Now see if someone has used a prime handle to get this
@@ -905,13 +997,16 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->gem_handle == open_arg.handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return &bo_gem->bo;
}
}
bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
+ if (!bo_gem) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
+ }
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
@@ -933,6 +1028,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
&get_tiling);
if (ret != 0) {
drm_intel_gem_bo_unreference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
@@ -942,6 +1038,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
return &bo_gem->bo;
@@ -958,11 +1055,11 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
DRMLISTDEL(&bo_gem->vma_list);
if (bo_gem->mem_virtual) {
VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
- munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
bufmgr_gem->vma_count--;
}
if (bo_gem->gtt_virtual) {
- munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
bufmgr_gem->vma_count--;
}
@@ -1047,12 +1144,12 @@ static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
DRMLISTDELINIT(&bo_gem->vma_list);
if (bo_gem->mem_virtual) {
- munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
bo_gem->mem_virtual = NULL;
bufmgr_gem->vma_count--;
}
if (bo_gem->gtt_virtual) {
- munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
bo_gem->gtt_virtual = NULL;
bufmgr_gem->vma_count--;
}
@@ -1156,7 +1253,8 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
assert(atomic_read(&bo_gem->refcount) > 0);
- if (atomic_dec_and_test(&bo_gem->refcount)) {
+
+ if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
@@ -1164,8 +1262,12 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
clock_gettime(CLOCK_MONOTONIC, &time);
pthread_mutex_lock(&bufmgr_gem->lock);
- drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
- drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
+ drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
+ drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+ }
+
pthread_mutex_unlock(&bufmgr_gem->lock);
}
}
@@ -1177,6 +1279,12 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
struct drm_i915_gem_set_domain set_domain;
int ret;
+ if (bo_gem->is_userptr) {
+ /* Return the same user ptr */
+ bo->virtual = bo_gem->user_virtual;
+ return 0;
+ }
+
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count++ == 0)
@@ -1245,6 +1353,9 @@ map_gtt(drm_intel_bo *bo)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
if (bo_gem->map_count++ == 0)
drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
@@ -1274,9 +1385,9 @@ map_gtt(drm_intel_bo *bo)
}
/* and mmap it */
- bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
- MAP_SHARED, bufmgr_gem->fd,
- mmap_arg.offset);
+ bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
if (bo_gem->gtt_virtual == MAP_FAILED) {
bo_gem->gtt_virtual = NULL;
ret = -errno;
@@ -1298,7 +1409,8 @@ map_gtt(drm_intel_bo *bo)
return 0;
}
-int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -1356,7 +1468,8 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
* undefined).
*/
-int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
#ifdef HAVE_VALGRIND
@@ -1389,13 +1502,18 @@ int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bufmgr_gem *bufmgr_gem;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret = 0;
if (bo == NULL)
return 0;
+ if (bo_gem->is_userptr)
+ return 0;
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+
pthread_mutex_lock(&bufmgr_gem->lock);
if (bo_gem->map_count <= 0) {
@@ -1439,7 +1557,8 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
return ret;
}
-int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+drm_public int
+drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
}
@@ -1453,6 +1572,9 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
struct drm_i915_gem_pwrite pwrite;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
VG_CLEAR(pwrite);
pwrite.handle = bo_gem->gem_handle;
pwrite.offset = offset;
@@ -1505,6 +1627,9 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
struct drm_i915_gem_pread pread;
int ret;
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
VG_CLEAR(pread);
pread.handle = bo_gem->gem_handle;
pread.offset = offset;
@@ -1554,7 +1679,8 @@ drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
* handle. Userspace must make sure this race does not occur if such precision
* is important.
*/
-int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
+drm_public int
+drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -1589,7 +1715,7 @@ int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
* In combination with drm_intel_gem_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
-void
+drm_public void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
@@ -1692,6 +1818,14 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
assert(offset <= bo->size - 4);
assert((write_domain & (write_domain - 1)) == 0);
+ /* An object needing a fence is a tiled buffer, so it won't have
+ * relocs to other buffers.
+ */
+ if (need_fence) {
+ assert(target_bo_gem->reloc_count == 0);
+ target_bo_gem->reloc_tree_fences = 1;
+ }
+
/* Make sure that we're not adding a reloc to something whose size has
* already been accounted for.
*/
@@ -1699,13 +1833,8 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
if (target_bo_gem != bo_gem) {
target_bo_gem->used_as_reloc_target = true;
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+ bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
}
- /* An object needing a fence is a tiled buffer, so it won't have
- * relocs to other buffers.
- */
- if (need_fence)
- target_bo_gem->reloc_tree_fences = 1;
- bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
bo_gem->relocs[bo_gem->reloc_count].offset = offset;
bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
@@ -1751,7 +1880,7 @@ drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
read_domains, write_domain, true);
}
-int
+drm_public int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
@@ -1772,9 +1901,10 @@ drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
* Any further drm_intel_bufmgr_check_aperture_space() queries
* involving this buffer in the tree are undefined after this call.
*/
-void
+drm_public void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
struct timespec time;
@@ -1782,7 +1912,10 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
clock_gettime(CLOCK_MONOTONIC, &time);
assert(bo_gem->reloc_count >= start);
+
/* Unreference the cleared target buffers */
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
for (i = start; i < bo_gem->reloc_count; i++) {
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
if (&target_bo_gem->bo != bo) {
@@ -1792,6 +1925,9 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
}
}
bo_gem->reloc_count = start;
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
}
#ifndef __OpenBSD__
@@ -2105,7 +2241,7 @@ aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
bufmgr_gem->aub_offset += 4096;
}
-void
+drm_public void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
@@ -2380,7 +2516,7 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
flags);
}
-int
+drm_public int
drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags)
{
@@ -2474,6 +2610,12 @@ drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
+ /* Tiling with userptr surfaces is not supported
+ * on all hardware so refuse it for time being.
+ */
+ if (bo_gem->is_userptr)
+ return -EINVAL;
+
/* Linear buffers have no stride. By ensuring that we only ever use
* stride 0 with linear buffers, we simplify our code.
*/
@@ -2499,7 +2641,7 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
return 0;
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@@ -2516,25 +2658,29 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
* for named buffers, we must not create two bo's pointing at the same
* kernel object
*/
+ pthread_mutex_lock(&bufmgr_gem->lock);
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->gem_handle == handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return &bo_gem->bo;
}
}
if (ret) {
fprintf(stderr,"ret is %d %d\n", ret, errno);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
}
bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
+ if (!bo_gem) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return NULL;
-
+ }
/* Determine size of bo. The fd-to-handle ioctl really should
* return the size, but it doesn't. If we have kernel 3.12 or
* later, we can lseek on the prime fd to get the size. Older
@@ -2562,6 +2708,7 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
VG_CLEAR(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
@@ -2580,14 +2727,16 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
return &bo_gem->bo;
}
-int
+drm_public int
drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ pthread_mutex_lock(&bufmgr_gem->lock);
if (DRMLISTEMPTY(&bo_gem->name_list))
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
DRM_CLOEXEC, prime_fd) != 0)
@@ -2611,15 +2760,20 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
VG_CLEAR(flink);
flink.handle = bo_gem->gem_handle;
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
- if (ret != 0)
+ if (ret != 0) {
+ pthread_mutex_unlock(&bufmgr_gem->lock);
return -errno;
+ }
bo_gem->global_name = flink.name;
bo_gem->reusable = false;
if (DRMLISTEMPTY(&bo_gem->name_list))
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
}
*name = bo_gem->global_name;
@@ -2633,7 +2787,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
@@ -2648,7 +2802,7 @@ drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
* allocation. If this option is not enabled, all relocs will have fence
* register allocated.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -2920,7 +3074,7 @@ init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
}
}
-void
+drm_public void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -2962,7 +3116,7 @@ get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
return devid;
}
-int
+drm_public int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -2976,7 +3130,7 @@ drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
* This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
* for it to have any effect.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename)
{
@@ -2995,7 +3149,7 @@ drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
* You can set up a GTT and upload your objects into the referenced
* space, then send off batchbuffers and get BMPs out the other end.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -3040,7 +3194,8 @@ drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
/* Set up the GTT. The max we can handle is 256M */
aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
- aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
+ /* Need to use GTT_ENTRY type for recent emulator */
+ aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_GTT_ENTRY | 0 | AUB_TRACE_OP_DATA_WRITE);
aub_out(bufmgr_gem, 0); /* subtype */
aub_out(bufmgr_gem, 0); /* offset */
aub_out(bufmgr_gem, gtt_size); /* size */
@@ -3051,7 +3206,7 @@ drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
}
}
-drm_intel_context *
+drm_public drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
@@ -3078,7 +3233,7 @@ drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
return context;
}
-void
+drm_public void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
drm_intel_bufmgr_gem *bufmgr_gem;
@@ -3101,7 +3256,7 @@ drm_intel_gem_context_destroy(drm_intel_context *ctx)
free(ctx);
}
-int
+drm_public int
drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
@@ -3135,7 +3290,7 @@ drm_intel_get_reset_stats(drm_intel_context *ctx,
return ret;
}
-int
+drm_public int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
@@ -3175,7 +3330,7 @@ drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
* default state (no annotations), call this function with a \c count
* of zero.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count)
@@ -3195,13 +3350,94 @@ drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
bo_gem->aub_annotation_count = count;
}
+static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
+static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
+
+static drm_intel_bufmgr_gem *
+drm_intel_bufmgr_gem_find(int fd)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+
+ DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
+ if (bufmgr_gem->fd == fd) {
+ atomic_inc(&bufmgr_gem->refcount);
+ return bufmgr_gem;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
+ DRMLISTDEL(&bufmgr_gem->managers);
+ drm_intel_bufmgr_gem_destroy(bufmgr);
+ }
+
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+ }
+}
+
+static bool
+has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int ret;
+ void *ptr;
+ long pgsz;
+ struct drm_i915_gem_userptr userptr;
+ struct drm_gem_close close_bo;
+
+ pgsz = sysconf(_SC_PAGESIZE);
+ assert(pgsz > 0);
+
+ ret = posix_memalign(&ptr, pgsz, pgsz);
+ if (ret) {
+ DBG("Failed to get a page (%ld) for userptr detection!\n",
+ pgsz);
+ return false;
+ }
+
+ memset(&userptr, 0, sizeof(userptr));
+ userptr.user_ptr = (uint64_t)(unsigned long)ptr;
+ userptr.user_size = pgsz;
+
+retry:
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+ if (ret) {
+ if (errno == ENODEV && userptr.flags == 0) {
+ userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
+ goto retry;
+ }
+ free(ptr);
+ return false;
+ }
+
+ close_bo.handle = userptr.handle;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
+ free(ptr);
+ if (ret) {
+ fprintf(stderr, "Failed to release test userptr object! (%d) "
+ "i915 kernel driver may not be sane!\n", errno);
+ return false;
+ }
+
+ return true;
+}
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
*/
-drm_intel_bufmgr *
+drm_public drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
@@ -3212,15 +3448,23 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bool exec2 = false;
#endif
+ pthread_mutex_lock(&bufmgr_list_mutex);
+
+ bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
+ if (bufmgr_gem)
+ goto exit;
+
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
if (bufmgr_gem == NULL)
- return NULL;
+ goto exit;
bufmgr_gem->fd = fd;
+ atomic_set(&bufmgr_gem->refcount, 1);
if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
free(bufmgr_gem);
- return NULL;
+ bufmgr_gem = NULL;
+ goto exit;
}
ret = drmIoctl(bufmgr_gem->fd,
@@ -3255,9 +3499,12 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->gen = 7;
else if (IS_GEN8(bufmgr_gem->pci_device))
bufmgr_gem->gen = 8;
+ else if (IS_GEN9(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 9;
else {
free(bufmgr_gem);
- return NULL;
+ bufmgr_gem = NULL;
+ goto exit;
}
if (IS_GEN3(bufmgr_gem->pci_device) &&
@@ -3291,6 +3538,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
+ if (has_userptr(bufmgr_gem))
+ bufmgr_gem->bufmgr.bo_alloc_userptr =
+ drm_intel_gem_bo_alloc_userptr;
+
gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_wait_timeout = ret == 0;
@@ -3377,7 +3628,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
#endif
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
- bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+ bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space =
drm_intel_gem_check_aperture_space;
@@ -3393,5 +3644,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
bufmgr_gem->vma_max = -1; /* unlimited by default */
- return &bufmgr_gem->bufmgr;
+ DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
+
+exit:
+ pthread_mutex_unlock(&bufmgr_list_mutex);
+
+ return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
}
diff --git a/lib/libdrm/intel/intel_bufmgr_priv.h b/lib/libdrm/intel/intel_bufmgr_priv.h
index 2592d42d5..59ebd1860 100644
--- a/lib/libdrm/intel/intel_bufmgr_priv.h
+++ b/lib/libdrm/intel/intel_bufmgr_priv.h
@@ -62,6 +62,18 @@ struct _drm_intel_bufmgr {
unsigned int alignment);
/**
+ * Allocate a buffer object from an existing user accessible
+ * address malloc'd with the provided size.
+ * Alignment is used when mapping to the gtt.
+ * Flags may be I915_VMAP_READ_ONLY or I915_USERPTR_UNSYNCHRONIZED
+ */
+ drm_intel_bo *(*bo_alloc_userptr)(drm_intel_bufmgr *bufmgr,
+ const char *name, void *addr,
+ uint32_t tiling_mode, uint32_t stride,
+ unsigned long size,
+ unsigned long flags);
+
+ /**
* Allocate a tiled buffer object.
*
* Alignment for tiled objects is set automatically; the 'flags'
diff --git a/lib/libdrm/intel/intel_chipset.h b/lib/libdrm/intel/intel_chipset.h
index 6f9bfad95..e22a86735 100644
--- a/lib/libdrm/intel/intel_chipset.h
+++ b/lib/libdrm/intel/intel_chipset.h
@@ -165,6 +165,22 @@
#define PCI_CHIP_CHERRYVIEW_2 0x22b2
#define PCI_CHIP_CHERRYVIEW_3 0x22b3
+#define PCI_CHIP_SKYLAKE_ULT_GT2 0x1916
+#define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906
+#define PCI_CHIP_SKYLAKE_ULT_GT3 0x1926
+#define PCI_CHIP_SKYLAKE_ULT_GT2F 0x1921
+#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E
+#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E
+#define PCI_CHIP_SKYLAKE_DT_GT2 0x1912
+#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902
+#define PCI_CHIP_SKYLAKE_HALO_GT2 0x191B
+#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B
+#define PCI_CHIP_SKYLAKE_HALO_GT1 0x190B
+#define PCI_CHIP_SKYLAKE_SRV_GT2 0x191A
+#define PCI_CHIP_SKYLAKE_SRV_GT3 0x192A
+#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A
+#define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D
+
#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
(devid) == PCI_CHIP_I915_GM || \
(devid) == PCI_CHIP_I945_GM || \
@@ -324,12 +340,37 @@
#define IS_GEN8(devid) (IS_BROADWELL(devid) || \
IS_CHERRYVIEW(devid))
+#define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULX_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_HALO_GT1 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT1)
+
+#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_ULT_GT2F || \
+ (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_HALO_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT2 || \
+ (devid) == PCI_CHIP_SKYLAKE_WKS_GT2)
+
+#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT3 || \
+ (devid) == PCI_CHIP_SKYLAKE_HALO_GT3 || \
+ (devid) == PCI_CHIP_SKYLAKE_SRV_GT3)
+
+#define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \
+ IS_SKL_GT2(devid) || \
+ IS_SKL_GT3(devid))
+
+#define IS_GEN9(devid) IS_SKYLAKE(devid)
+
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
IS_GEN6(dev) || \
IS_GEN7(dev) || \
- IS_GEN8(dev))
+ IS_GEN8(dev) || \
+ IS_GEN9(dev))
#endif /* _INTEL_CHIPSET_H */
diff --git a/lib/libdrm/intel/intel_decode.c b/lib/libdrm/intel/intel_decode.c
index 61239dd96..7d5cbe5a1 100644
--- a/lib/libdrm/intel/intel_decode.c
+++ b/lib/libdrm/intel/intel_decode.c
@@ -21,6 +21,10 @@
* IN THE SOFTWARE.
*/
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
@@ -29,6 +33,7 @@
#include <stdarg.h>
#include <string.h>
+#include "libdrm.h"
#include "xf86drm.h"
#include "intel_chipset.h"
#include "intel_bufmgr.h"
@@ -3812,7 +3817,7 @@ decode_3d_i830(struct drm_intel_decode *ctx)
return 1;
}
-struct drm_intel_decode *
+drm_public struct drm_intel_decode *
drm_intel_decode_context_alloc(uint32_t devid)
{
struct drm_intel_decode *ctx;
@@ -3824,7 +3829,9 @@ drm_intel_decode_context_alloc(uint32_t devid)
ctx->devid = devid;
ctx->out = stdout;
- if (IS_GEN8(devid))
+ if (IS_GEN9(devid))
+ ctx->gen = 9;
+ else if (IS_GEN8(devid))
ctx->gen = 8;
else if (IS_GEN7(devid))
ctx->gen = 7;
@@ -3844,20 +3851,20 @@ drm_intel_decode_context_alloc(uint32_t devid)
return ctx;
}
-void
+drm_public void
drm_intel_decode_context_free(struct drm_intel_decode *ctx)
{
free(ctx);
}
-void
+drm_public void
drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
int dump_past_end)
{
ctx->dump_past_end = !!dump_past_end;
}
-void
+drm_public void
drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
void *data, uint32_t hw_offset, int count)
{
@@ -3866,7 +3873,7 @@ drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
ctx->base_count = count;
}
-void
+drm_public void
drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
uint32_t head, uint32_t tail)
{
@@ -3874,7 +3881,7 @@ drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
ctx->tail = tail;
}
-void
+drm_public void
drm_intel_decode_set_output_file(struct drm_intel_decode *ctx,
FILE *out)
{
@@ -3888,7 +3895,7 @@ drm_intel_decode_set_output_file(struct drm_intel_decode *ctx,
* \param count number of DWORDs to decode in the batch buffer
* \param hw_offset hardware address for the buffer
*/
-void
+drm_public void
drm_intel_decode(struct drm_intel_decode *ctx)
{
int ret;
diff --git a/lib/libdrm/intel/shlib_version b/lib/libdrm/intel/shlib_version
index 3f0196ebf..83a67c373 100644
--- a/lib/libdrm/intel/shlib_version
+++ b/lib/libdrm/intel/shlib_version
@@ -1,2 +1,2 @@
major=3
-minor=1
+minor=2
diff --git a/lib/libdrm/libdrm.h b/lib/libdrm/libdrm.h
new file mode 100644
index 000000000..acfada5ce
--- /dev/null
+++ b/lib/libdrm/libdrm.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright © 2014 NVIDIA Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef LIBDRM_LIBDRM_H
+#define LIBDRM_LIBDRM_H
+
+#if defined(HAVE_VISIBILITY)
+# define drm_private __attribute__((visibility("hidden")))
+# define drm_public __attribute__((visibility("default")))
+#else
+# define drm_private
+# define drm_public
+#endif
+
+
+/**
+ * Static (compile-time) assertion.
+ * Basically, use COND to dimension an array. If COND is false/zero the
+ * array size will be -1 and we'll get a compilation error.
+ */
+#define STATIC_ASSERT(COND) \
+ do { \
+ (void) sizeof(char [1 - 2*!(COND)]); \
+ } while (0)
+
+
+#include <sys/mman.h>
+
+#if defined(ANDROID)
+#include <errno.h> /* for EINVAL */
+
+extern void *__mmap2(void *, size_t, int, int, int, size_t);
+
+static inline void *drm_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, loff_t offset)
+{
+ /* offset must be aligned to 4096 (not necessarily the page size) */
+ if (offset & 4095) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+
+ return __mmap2(addr, length, prot, flags, fd, (size_t) (offset >> 12));
+}
+
+# define drm_munmap(addr, length) \
+ munmap(addr, length)
+
+
+#else
+
+/* assume large file support exists */
+# define drm_mmap(addr, length, prot, flags, fd, offset) \
+ mmap(addr, length, prot, flags, fd, offset)
+
+
+static inline int drm_munmap(void *addr, size_t length)
+{
+ /* Copied from configure code generated by AC_SYS_LARGEFILE */
+#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + \
+ (((off_t) 1 << 31) << 31))
+ STATIC_ASSERT(LARGE_OFF_T % 2147483629 == 721 &&
+ LARGE_OFF_T % 2147483647 == 1);
+#undef LARGE_OFF_T
+
+ return munmap(addr, length);
+}
+#endif
+
+#endif
diff --git a/lib/libdrm/radeon/r600_pci_ids.h b/lib/libdrm/radeon/r600_pci_ids.h
index de25f1697..3e1136db2 100644
--- a/lib/libdrm/radeon/r600_pci_ids.h
+++ b/lib/libdrm/radeon/r600_pci_ids.h
@@ -366,6 +366,7 @@ CHIPSET(0x6828, VERDE_6828, VERDE)
CHIPSET(0x6829, VERDE_6829, VERDE)
CHIPSET(0x682A, VERDE_682A, VERDE)
CHIPSET(0x682B, VERDE_682B, VERDE)
+CHIPSET(0x682C, VERDE_682C, VERDE)
CHIPSET(0x682D, VERDE_682D, VERDE)
CHIPSET(0x682F, VERDE_682F, VERDE)
CHIPSET(0x6830, VERDE_6830, VERDE)
@@ -382,8 +383,11 @@ CHIPSET(0x6600, OLAND_6600, OLAND)
CHIPSET(0x6601, OLAND_6601, OLAND)
CHIPSET(0x6602, OLAND_6602, OLAND)
CHIPSET(0x6603, OLAND_6603, OLAND)
+CHIPSET(0x6604, OLAND_6604, OLAND)
+CHIPSET(0x6605, OLAND_6605, OLAND)
CHIPSET(0x6606, OLAND_6606, OLAND)
CHIPSET(0x6607, OLAND_6607, OLAND)
+CHIPSET(0x6608, OLAND_6608, OLAND)
CHIPSET(0x6610, OLAND_6610, OLAND)
CHIPSET(0x6611, OLAND_6611, OLAND)
CHIPSET(0x6613, OLAND_6613, OLAND)
@@ -401,6 +405,8 @@ CHIPSET(0x666F, HAINAN_666F, HAINAN)
CHIPSET(0x6640, BONAIRE_6640, BONAIRE)
CHIPSET(0x6641, BONAIRE_6641, BONAIRE)
+CHIPSET(0x6646, BONAIRE_6646, BONAIRE)
+CHIPSET(0x6647, BONAIRE_6647, BONAIRE)
CHIPSET(0x6649, BONAIRE_6649, BONAIRE)
CHIPSET(0x6650, BONAIRE_6650, BONAIRE)
CHIPSET(0x6651, BONAIRE_6651, BONAIRE)
@@ -460,6 +466,7 @@ CHIPSET(0x1313, KAVERI_1313, KAVERI)
CHIPSET(0x1315, KAVERI_1315, KAVERI)
CHIPSET(0x1316, KAVERI_1316, KAVERI)
CHIPSET(0x1317, KAVERI_1317, KAVERI)
+CHIPSET(0x1318, KAVERI_1318, KAVERI)
CHIPSET(0x131B, KAVERI_131B, KAVERI)
CHIPSET(0x131C, KAVERI_131C, KAVERI)
CHIPSET(0x131D, KAVERI_131D, KAVERI)
diff --git a/lib/libdrm/radeon/radeon_bo.c b/lib/libdrm/radeon/radeon_bo.c
index 6a0f8e792..865e3f7e0 100644
--- a/lib/libdrm/radeon/radeon_bo.c
+++ b/lib/libdrm/radeon/radeon_bo.c
@@ -29,10 +29,14 @@
* Dave Airlie
* Jérôme Glisse <glisse@freedesktop.org>
*/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include <libdrm.h>
#include <radeon_bo.h>
#include <radeon_bo_int.h>
-void radeon_bo_debug(struct radeon_bo *bo, const char *op)
+drm_public void radeon_bo_debug(struct radeon_bo *bo, const char *op)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
@@ -40,26 +44,23 @@ void radeon_bo_debug(struct radeon_bo *bo, const char *op)
op, bo, bo->handle, boi->size, boi->cref);
}
-struct radeon_bo *radeon_bo_open(struct radeon_bo_manager *bom,
- uint32_t handle,
- uint32_t size,
- uint32_t alignment,
- uint32_t domains,
- uint32_t flags)
+drm_public struct radeon_bo *
+radeon_bo_open(struct radeon_bo_manager *bom, uint32_t handle, uint32_t size,
+ uint32_t alignment, uint32_t domains, uint32_t flags)
{
struct radeon_bo *bo;
bo = bom->funcs->bo_open(bom, handle, size, alignment, domains, flags);
return bo;
}
-void radeon_bo_ref(struct radeon_bo *bo)
+drm_public void radeon_bo_ref(struct radeon_bo *bo)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
boi->cref++;
boi->bom->funcs->bo_ref(boi);
}
-struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo)
+drm_public struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
if (bo == NULL)
@@ -69,19 +70,19 @@ struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo)
return boi->bom->funcs->bo_unref(boi);
}
-int radeon_bo_map(struct radeon_bo *bo, int write)
+drm_public int radeon_bo_map(struct radeon_bo *bo, int write)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
return boi->bom->funcs->bo_map(boi, write);
}
-int radeon_bo_unmap(struct radeon_bo *bo)
+drm_public int radeon_bo_unmap(struct radeon_bo *bo)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
return boi->bom->funcs->bo_unmap(boi);
}
-int radeon_bo_wait(struct radeon_bo *bo)
+drm_public int radeon_bo_wait(struct radeon_bo *bo)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
if (!boi->bom->funcs->bo_wait)
@@ -89,27 +90,29 @@ int radeon_bo_wait(struct radeon_bo *bo)
return boi->bom->funcs->bo_wait(boi);
}
-int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain)
+drm_public int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
return boi->bom->funcs->bo_is_busy(boi, domain);
}
-int radeon_bo_set_tiling(struct radeon_bo *bo,
- uint32_t tiling_flags, uint32_t pitch)
+drm_public int
+radeon_bo_set_tiling(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
return boi->bom->funcs->bo_set_tiling(boi, tiling_flags, pitch);
}
-int radeon_bo_get_tiling(struct radeon_bo *bo,
- uint32_t *tiling_flags, uint32_t *pitch)
+drm_public int
+radeon_bo_get_tiling(struct radeon_bo *bo,
+ uint32_t *tiling_flags, uint32_t *pitch)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
return boi->bom->funcs->bo_get_tiling(boi, tiling_flags, pitch);
}
-int radeon_bo_is_static(struct radeon_bo *bo)
+drm_public int radeon_bo_is_static(struct radeon_bo *bo)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
if (boi->bom->funcs->bo_is_static)
@@ -117,18 +120,19 @@ int radeon_bo_is_static(struct radeon_bo *bo)
return 0;
}
-int radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs)
+drm_public int
+radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
return boi->cref > 1;
}
-uint32_t radeon_bo_get_handle(struct radeon_bo *bo)
+drm_public uint32_t radeon_bo_get_handle(struct radeon_bo *bo)
{
return bo->handle;
}
-uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo)
+drm_public uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
uint32_t src_domain;
diff --git a/lib/libdrm/radeon/radeon_bo_gem.c b/lib/libdrm/radeon/radeon_bo_gem.c
index ebfaebb3a..3bcbf40ed 100644
--- a/lib/libdrm/radeon/radeon_bo_gem.c
+++ b/lib/libdrm/radeon/radeon_bo_gem.c
@@ -36,8 +36,8 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/mman.h>
#include <errno.h>
+#include "libdrm.h"
#include "xf86drm.h"
#include "xf86atomic.h"
#include "drm.h"
@@ -134,7 +134,7 @@ static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
return (struct radeon_bo *)boi;
}
if (bo_gem->priv_ptr) {
- munmap(bo_gem->priv_ptr, boi->size);
+ drm_munmap(bo_gem->priv_ptr, boi->size);
}
/* Zero out args to make valgrind happy */
@@ -178,7 +178,7 @@ static int bo_map(struct radeon_bo_int *boi, int write)
boi, boi->handle, r);
return r;
}
- ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
+ ptr = drm_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
if (ptr == MAP_FAILED)
return -errno;
bo_gem->priv_ptr = ptr;
@@ -197,7 +197,7 @@ static int bo_unmap(struct radeon_bo_int *boi)
if (--bo_gem->map_count > 0) {
return 0;
}
- //munmap(bo->ptr, bo->size);
+ //drm_munmap(bo->ptr, bo->size);
boi->ptr = NULL;
return 0;
}
@@ -283,7 +283,7 @@ static struct radeon_bo_funcs bo_gem_funcs = {
bo_is_busy,
};
-struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
+drm_public struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
{
struct bo_manager_gem *bomg;
@@ -296,7 +296,7 @@ struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
return (struct radeon_bo_manager*)bomg;
}
-void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
+drm_public void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
{
struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
@@ -306,19 +306,22 @@ void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
free(bomg);
}
-uint32_t radeon_gem_name_bo(struct radeon_bo *bo)
+drm_public uint32_t
+radeon_gem_name_bo(struct radeon_bo *bo)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
return bo_gem->name;
}
-void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
+drm_public void *
+radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
return &bo_gem->reloc_in_cs;
}
-int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
+drm_public int
+radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
@@ -339,7 +342,8 @@ int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
return 0;
}
-int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
+drm_public int
+radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
{
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
struct drm_radeon_gem_set_domain args;
@@ -356,7 +360,7 @@ int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t
return r;
}
-int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
+drm_public int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
{
struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
int ret;
@@ -365,9 +369,8 @@ int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
return ret;
}
-struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
- int fd_handle,
- uint32_t size)
+drm_public struct radeon_bo *
+radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
{
struct radeon_bo_gem *bo;
int r;
diff --git a/lib/libdrm/radeon/radeon_cs.c b/lib/libdrm/radeon/radeon_cs.c
index d0e922be0..fe5bbcec4 100644
--- a/lib/libdrm/radeon/radeon_cs.c
+++ b/lib/libdrm/radeon/radeon_cs.c
@@ -1,19 +1,22 @@
-
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+#include "libdrm.h"
#include <stdio.h>
#include "radeon_cs.h"
#include "radeon_cs_int.h"
-struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm, uint32_t ndw)
+drm_public struct radeon_cs *
+radeon_cs_create(struct radeon_cs_manager *csm, uint32_t ndw)
{
struct radeon_cs_int *csi = csm->funcs->cs_create(csm, ndw);
return (struct radeon_cs *)csi;
}
-int radeon_cs_write_reloc(struct radeon_cs *cs,
- struct radeon_bo *bo,
- uint32_t read_domain,
- uint32_t write_domain,
- uint32_t flags)
+drm_public int
+radeon_cs_write_reloc(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domain, uint32_t write_domain,
+ uint32_t flags)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
@@ -24,56 +27,54 @@ int radeon_cs_write_reloc(struct radeon_cs *cs,
flags);
}
-int radeon_cs_begin(struct radeon_cs *cs,
- uint32_t ndw,
- const char *file,
- const char *func,
- int line)
+drm_public int
+radeon_cs_begin(struct radeon_cs *cs, uint32_t ndw,
+ const char *file, const char *func, int line)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->csm->funcs->cs_begin(csi, ndw, file, func, line);
}
-int radeon_cs_end(struct radeon_cs *cs,
- const char *file,
- const char *func,
- int line)
+drm_public int
+radeon_cs_end(struct radeon_cs *cs,
+ const char *file, const char *func, int line)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->csm->funcs->cs_end(csi, file, func, line);
}
-int radeon_cs_emit(struct radeon_cs *cs)
+drm_public int radeon_cs_emit(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->csm->funcs->cs_emit(csi);
}
-int radeon_cs_destroy(struct radeon_cs *cs)
+drm_public int radeon_cs_destroy(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->csm->funcs->cs_destroy(csi);
}
-int radeon_cs_erase(struct radeon_cs *cs)
+drm_public int radeon_cs_erase(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->csm->funcs->cs_erase(csi);
}
-int radeon_cs_need_flush(struct radeon_cs *cs)
+drm_public int radeon_cs_need_flush(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->csm->funcs->cs_need_flush(csi);
}
-void radeon_cs_print(struct radeon_cs *cs, FILE *file)
+drm_public void radeon_cs_print(struct radeon_cs *cs, FILE *file)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
csi->csm->funcs->cs_print(csi, file);
}
-void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
+drm_public void
+radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
if (domain == RADEON_GEM_DOMAIN_VRAM)
@@ -82,14 +83,15 @@ void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
csi->csm->gart_limit = limit;
}
-void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data)
+drm_public void radeon_cs_space_set_flush(struct radeon_cs *cs,
+ void (*fn)(void *), void *data)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
csi->space_flush_fn = fn;
csi->space_flush_data = data;
}
-uint32_t radeon_cs_get_id(struct radeon_cs *cs)
+drm_public uint32_t radeon_cs_get_id(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return csi->id;
diff --git a/lib/libdrm/radeon/radeon_cs_gem.c b/lib/libdrm/radeon/radeon_cs_gem.c
index b87c6b136..705ee0564 100644
--- a/lib/libdrm/radeon/radeon_cs_gem.c
+++ b/lib/libdrm/radeon/radeon_cs_gem.c
@@ -29,12 +29,14 @@
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
-#include <sys/mman.h>
#include <sys/ioctl.h>
#include "radeon_cs.h"
#include "radeon_cs_int.h"
@@ -42,6 +44,7 @@
#include "radeon_cs_gem.h"
#include "radeon_bo_gem.h"
#include "drm.h"
+#include "libdrm.h"
#include "xf86drm.h"
#include "xf86atomic.h"
#include "radeon_drm.h"
@@ -533,7 +536,7 @@ static int radeon_get_device_id(int fd, uint32_t *device_id)
return r;
}
-struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
+drm_public struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
{
struct radeon_cs_manager_gem *csm;
@@ -547,7 +550,7 @@ struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
return &csm->base;
}
-void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
+drm_public void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
{
free(csm);
}
diff --git a/lib/libdrm/radeon/radeon_cs_space.c b/lib/libdrm/radeon/radeon_cs_space.c
index be047a7bf..cca650bf5 100644
--- a/lib/libdrm/radeon/radeon_cs_space.c
+++ b/lib/libdrm/radeon/radeon_cs_space.c
@@ -25,9 +25,13 @@
*/
/*
*/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
+#include "libdrm.h"
#include "radeon_cs.h"
#include "radeon_bo_int.h"
#include "radeon_cs_int.h"
@@ -161,7 +165,9 @@ static int radeon_cs_do_space_check(struct radeon_cs_int *cs, struct radeon_cs_s
return RADEON_CS_SPACE_OK;
}
-void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
+drm_public void
+radeon_cs_space_add_persistent_bo(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
@@ -203,9 +209,9 @@ again:
return 0;
}
-int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
- struct radeon_bo *bo,
- uint32_t read_domains, uint32_t write_domain)
+drm_public int
+radeon_cs_space_check_with_bo(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
@@ -224,13 +230,13 @@ int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
return ret;
}
-int radeon_cs_space_check(struct radeon_cs *cs)
+drm_public int radeon_cs_space_check(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
return radeon_cs_check_space_internal(csi, NULL);
}
-void radeon_cs_space_reset_bos(struct radeon_cs *cs)
+drm_public void radeon_cs_space_reset_bos(struct radeon_cs *cs)
{
struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
int i;
diff --git a/lib/libdrm/radeon/radeon_surface.c b/lib/libdrm/radeon/radeon_surface.c
index 8a1fe7df5..bd9ee6d11 100644
--- a/lib/libdrm/radeon/radeon_surface.c
+++ b/lib/libdrm/radeon/radeon_surface.c
@@ -26,15 +26,18 @@
* Authors:
* Jérôme Glisse <jglisse@redhat.com>
*/
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
#include <stdbool.h>
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/mman.h>
#include <sys/ioctl.h>
#include "drm.h"
+#include "libdrm.h"
#include "xf86drm.h"
#include "radeon_drm.h"
#include "radeon_surface.h"
@@ -363,6 +366,8 @@ static int r6_surface_init_2d(struct radeon_surface_manager *surf_man,
xalign = (surf_man->hw_info.group_bytes * surf_man->hw_info.num_banks) /
(tilew * surf->bpe * surf->nsamples);
xalign = MAX2(tilew * surf_man->hw_info.num_banks, xalign);
+ if (surf->flags & RADEON_SURF_FMASK)
+ xalign = MAX2(128, xalign);
yalign = tilew * surf_man->hw_info.num_pipes;
if (surf->flags & RADEON_SURF_SCANOUT) {
xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
@@ -592,7 +597,7 @@ static void eg_surf_minify(struct radeon_surface *surf,
mtile_ps = (mtile_pr * surflevel->nblk_y) / mtileh;
surflevel->offset = offset;
- surflevel->pitch_bytes = surflevel->nblk_x * bpe * slice_pt;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
surflevel->slice_size = mtile_ps * mtileb * slice_pt;
surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
@@ -1307,7 +1312,7 @@ static int si_surface_sanity(struct radeon_surface_manager *surf_man,
/* default value */
surf->mtilea = 1;
surf->bankw = 1;
- surf->bankw = 1;
+ surf->bankh = 1;
surf->tile_split = 64;
surf->stencil_tile_split = 64;
}
@@ -1495,7 +1500,7 @@ static void si_surf_minify_2d(struct radeon_surface *surf,
/* macro tile per slice */
mtile_ps = (mtile_pr * surflevel->nblk_y) / yalign;
surflevel->offset = offset;
- surflevel->pitch_bytes = surflevel->nblk_x * bpe * slice_pt;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
surflevel->slice_size = mtile_ps * mtileb * slice_pt;
surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
@@ -2134,7 +2139,7 @@ static int cik_surface_sanity(struct radeon_surface_manager *surf_man,
/* default value */
surf->mtilea = 1;
surf->bankw = 1;
- surf->bankw = 1;
+ surf->bankh = 1;
surf->tile_split = 64;
surf->stencil_tile_split = 64;
}
@@ -2395,7 +2400,8 @@ static int cik_surface_best(struct radeon_surface_manager *surf_man,
/* ===========================================================================
* public API
*/
-struct radeon_surface_manager *radeon_surface_manager_new(int fd)
+drm_public struct radeon_surface_manager *
+radeon_surface_manager_new(int fd)
{
struct radeon_surface_manager *surf_man;
@@ -2443,7 +2449,8 @@ out_err:
return NULL;
}
-void radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
+drm_public void
+radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
{
free(surf_man);
}
@@ -2515,8 +2522,9 @@ static int radeon_surface_sanity(struct radeon_surface_manager *surf_man,
return 0;
}
-int radeon_surface_init(struct radeon_surface_manager *surf_man,
- struct radeon_surface *surf)
+drm_public int
+radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
{
unsigned mode, type;
int r;
@@ -2531,8 +2539,9 @@ int radeon_surface_init(struct radeon_surface_manager *surf_man,
return surf_man->surface_init(surf_man, surf);
}
-int radeon_surface_best(struct radeon_surface_manager *surf_man,
- struct radeon_surface *surf)
+drm_public int
+radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
{
unsigned mode, type;
int r;
diff --git a/lib/libdrm/xf86atomic.h b/lib/libdrm/xf86atomic.h
index d0f4d66f2..3f1797953 100644
--- a/lib/libdrm/xf86atomic.h
+++ b/lib/libdrm/xf86atomic.h
@@ -97,4 +97,13 @@ typedef struct { uint_t atomic; } atomic_t;
#error libdrm requires atomic operations, please define them for your CPU/compiler.
#endif
+static inline int atomic_add_unless(atomic_t *v, int add, int unless)
+{
+ int c, old;
+ c = atomic_read(v);
+ while (c != unless && (old = atomic_cmpxchg(v, c, c + add)) != c)
+ c = old;
+ return c == unless;
+}
+
#endif
diff --git a/lib/libdrm/xf86drm.c b/lib/libdrm/xf86drm.c
index 5a377bca4..9a3c4f96f 100644
--- a/lib/libdrm/xf86drm.c
+++ b/lib/libdrm/xf86drm.c
@@ -48,7 +48,6 @@
#include <sys/stat.h>
#define stat_t struct stat
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/time.h>
#include <stdarg.h>
@@ -58,6 +57,7 @@
#endif
#include "xf86drm.h"
+#include "libdrm.h"
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
#define DRM_MAJOR 145
@@ -1144,7 +1144,7 @@ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
size = (size + pagesize_mask) & ~pagesize_mask;
- *address = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
+ *address = drm_mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, handle);
if (*address == MAP_FAILED)
return -errno;
return 0;
@@ -1164,7 +1164,7 @@ int drmMap(int fd, drm_handle_t handle, drmSize size, drmAddressPtr address)
*/
int drmUnmap(drmAddress address, drmSize size)
{
- return munmap(address, size);
+ return drm_munmap(address, size);
}
drmBufInfoPtr drmGetBufInfo(int fd)
@@ -1271,7 +1271,7 @@ int drmUnmapBufs(drmBufMapPtr bufs)
int i;
for (i = 0; i < bufs->count; i++) {
- munmap(bufs->list[i].address, bufs->list[i].total);
+ drm_munmap(bufs->list[i].address, bufs->list[i].total);
}
drmFree(bufs->list);
diff --git a/lib/libdrm/xf86drmMode.c b/lib/libdrm/xf86drmMode.c
index da65015fc..ca9177b62 100644
--- a/lib/libdrm/xf86drmMode.c
+++ b/lib/libdrm/xf86drmMode.c
@@ -806,6 +806,8 @@ int drmCheckModesettingSupported(const char *busid)
return -EINVAL;
return (modesetting ? 0 : -ENOSYS);
}
+#elif defined(__DragonFly__)
+ return 0;
#endif
#ifdef __OpenBSD__
int fd;
diff --git a/lib/libdrm/xf86drmMode.h b/lib/libdrm/xf86drmMode.h
index b260af7ca..856a6bb0f 100644
--- a/lib/libdrm/xf86drmMode.h
+++ b/lib/libdrm/xf86drmMode.h
@@ -240,6 +240,15 @@ typedef struct _drmModeProperty {
uint32_t *blob_ids; /* store the blob IDs */
} drmModePropertyRes, *drmModePropertyPtr;
+static inline int drm_property_type_is(drmModePropertyPtr property,
+ uint32_t type)
+{
+ /* instanceof for props.. handles extended type vs original types: */
+ if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
+ return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type;
+ return property->flags & type;
+}
+
typedef struct _drmModeCrtc {
uint32_t crtc_id;
uint32_t buffer_id; /**< FB id to connect to 0 = disconnect */