summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMartin Pieuchot <mpi@cvs.openbsd.org>2012-11-16 12:44:37 +0000
committerMatthieu Herrb <matthieu.herrb@laas.fr>2012-11-16 13:52:38 +0000
commit0780836a2ce276b8697d127ac8f5b8463bb7d6fe (patch)
treeb9894d6a4b426f1355c3bed27d9577207dc4d87f
parent8c0b9a29cc37b38638970b5187e048ebf931ea2b (diff)
Update to libdrm 2.4.31.
-rw-r--r--lib/libdrm/intel/Makefile2
-rw-r--r--lib/libdrm/intel/intel_bufmgr.c61
-rw-r--r--lib/libdrm/intel/intel_bufmgr.h20
-rw-r--r--lib/libdrm/intel/intel_bufmgr_fake.c39
-rw-r--r--lib/libdrm/intel/intel_bufmgr_gem.c645
-rw-r--r--lib/libdrm/intel/intel_chipset.h112
-rw-r--r--lib/libdrm/intel/shlib_version4
-rw-r--r--lib/libdrm/radeon/Makefile4
-rw-r--r--lib/libdrm/radeon/r600_pci_ids.h271
-rw-r--r--lib/libdrm/radeon/radeon_surface.c995
-rw-r--r--lib/libdrm/radeon/radeon_surface.h114
-rw-r--r--lib/libdrm/radeon/shlib_version2
-rw-r--r--lib/libdrm/shlib_version4
-rw-r--r--lib/libdrm/xf86drm.c209
-rw-r--r--lib/libdrm/xf86drm.h47
-rw-r--r--lib/libdrm/xf86drmMode.c381
-rw-r--r--lib/libdrm/xf86drmMode.h68
17 files changed, 2702 insertions, 276 deletions
diff --git a/lib/libdrm/intel/Makefile b/lib/libdrm/intel/Makefile
index 0a942340d..5a15e7b89 100644
--- a/lib/libdrm/intel/Makefile
+++ b/lib/libdrm/intel/Makefile
@@ -19,7 +19,7 @@ SRCS= intel_bufmgr.c \
PKGCONFIG= libdrm_intel.pc
-LDADD+= -L${X11BASE}/lib -lX11
+LDADD+= -L${X11BASE}/lib -lX11 -lpciaccess
includes:
cd ${.CURDIR}; for i in ${INCS}; do \
diff --git a/lib/libdrm/intel/intel_bufmgr.c b/lib/libdrm/intel/intel_bufmgr.c
index e949ff2a0..905556f64 100644
--- a/lib/libdrm/intel/intel_bufmgr.c
+++ b/lib/libdrm/intel/intel_bufmgr.c
@@ -36,8 +36,10 @@
#include <errno.h>
#include <drm.h>
#include <i915_drm.h>
+#include <pciaccess.h>
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
+#include "xf86drm.h"
/** @file intel_bufmgr.c
*
@@ -102,7 +104,7 @@ drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
int ret;
- if (bo->bufmgr->bo_subdata)
+ if (bo->bufmgr->bo_get_subdata)
return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
if (size == 0 || data == NULL)
@@ -143,11 +145,14 @@ drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
cliprects, num_cliprects, DR4,
rings);
- if (rings == 0)
+ switch (rings) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
return bo->bufmgr->bo_exec(bo, used,
cliprects, num_cliprects, DR4);
-
- return -ENODEV;
+ default:
+ return -ENODEV;
+ }
}
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
@@ -266,3 +271,51 @@ int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
return -1;
}
+
+static size_t
+drm_intel_probe_agp_aperture_size(int fd)
+{
+ struct pci_device *pci_dev;
+ size_t size = 0;
+ int ret;
+
+ ret = pci_system_init();
+ if (ret)
+ goto err;
+
+ /* XXX handle multiple adaptors? */
+ pci_dev = pci_device_find_by_slot(0, 0, 2, 0);
+ if (pci_dev == NULL)
+ goto err;
+
+ ret = pci_device_probe(pci_dev);
+ if (ret)
+ goto err;
+
+ size = pci_dev->regions[2].size;
+err:
+ pci_system_cleanup ();
+ return size;
+}
+
+int drm_intel_get_aperture_sizes(int fd,
+ size_t *mappable,
+ size_t *total)
+{
+
+ struct drm_i915_gem_get_aperture aperture;
+ int ret;
+
+ ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+ if (ret)
+ return ret;
+
+ *mappable = 0;
+ /* XXX add a query for the kernel value? */
+ if (*mappable == 0)
+ *mappable = drm_intel_probe_agp_aperture_size(fd);
+ if (*mappable == 0)
+ *mappable = 64 * 1024 * 1024; /* minimum possible value */
+ *total = aperture.aper_size;
+ return 0;
+}
diff --git a/lib/libdrm/intel/intel_bufmgr.h b/lib/libdrm/intel/intel_bufmgr.h
index daa18b4cd..85da8b9aa 100644
--- a/lib/libdrm/intel/intel_bufmgr.h
+++ b/lib/libdrm/intel/intel_bufmgr.h
@@ -34,6 +34,7 @@
#ifndef INTEL_BUFMGR_H
#define INTEL_BUFMGR_H
+#include <stdio.h>
#include <stdint.h>
struct drm_clip_rect;
@@ -145,12 +146,18 @@ drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
unsigned int handle);
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
+void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
+ int limit);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
+int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
+void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
+int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
+
/* drm_intel_bufmgr_fake.c */
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset,
@@ -184,6 +191,19 @@ void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
+struct drm_intel_decode *drm_intel_decode_context_alloc(uint32_t devid);
+void drm_intel_decode_context_free(struct drm_intel_decode *ctx);
+void drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
+ void *data, uint32_t hw_offset,
+ int count);
+void drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
+ int dump_past_end);
+void drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
+ uint32_t head, uint32_t tail);
+void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
+void drm_intel_decode(struct drm_intel_decode *ctx);
+
+
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
*/
diff --git a/lib/libdrm/intel/intel_bufmgr_fake.c b/lib/libdrm/intel/intel_bufmgr_fake.c
index bd4a59b82..d9b5cfdc8 100644
--- a/lib/libdrm/intel/intel_bufmgr_fake.c
+++ b/lib/libdrm/intel/intel_bufmgr_fake.c
@@ -1309,42 +1309,6 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
return 0;
}
-static int
-drm_intel_fake_pin(drm_intel_bo *bo, uint32_t alignment)
-{
- drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
- drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-
- assert(bo_fake->is_static == 0);
-
- bo_fake->alignment = alignment;
- if (drm_intel_fake_bo_validate(bo) == -1)
- return ENOMEM;
-
- bo_fake->is_static = 1;
- bo->virtual = bo_fake->block->virtual;
- /* we should be on the on_hardware list, take us off for now */
- DRMLISTDEL(bo_fake->block);
-
- return 0;
-}
-
-static int
-drm_intel_fake_unpin(drm_intel_bo *bo)
-{
- drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
- drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-
- assert(bo_fake->is_static);
-
- bo_fake->is_static = 0;
- bo->virtual = NULL;
- DRMLISTDEL(bo_fake->block);
- DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
-
- return 0;
-}
-
/**
* Incorporates the validation flags associated with each relocation into
* the combined validation flags for the buffer on this batchbuffer submission.
@@ -1656,9 +1620,6 @@ drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
bufmgr_fake->bufmgr.bo_wait_rendering =
drm_intel_fake_bo_wait_rendering;
bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
- bufmgr_fake->bufmgr.bo_pin = drm_intel_fake_pin;
- bufmgr_fake->bufmgr.bo_unpin = drm_intel_fake_unpin;
-
bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
bufmgr_fake->bufmgr.check_aperture_space =
diff --git a/lib/libdrm/intel/intel_bufmgr_gem.c b/lib/libdrm/intel/intel_bufmgr_gem.c
index 265cde4f4..6f2ea53bd 100644
--- a/lib/libdrm/intel/intel_bufmgr_gem.c
+++ b/lib/libdrm/intel/intel_bufmgr_gem.c
@@ -39,6 +39,7 @@
#endif
#include <xf86drm.h>
+#include <xf86atomic.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -50,12 +51,10 @@
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <stdbool.h>
#include "errno.h"
#include "libdrm_lists.h"
-#if 0
-#include "intel_atomic.h"
-#endif
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "intel_chipset.h"
@@ -86,6 +85,9 @@ typedef struct _drm_intel_bufmgr_gem {
pthread_mutex_t lock;
+#ifndef __OpenBSD__
+ struct drm_i915_gem_exec_object *exec_objects;
+#endif
struct drm_i915_gem_exec_object2 *exec2_objects;
drm_intel_bo **exec_bos;
int exec_size;
@@ -97,6 +99,8 @@ typedef struct _drm_intel_bufmgr_gem {
time_t time;
drmMMListHead named;
+ drmMMListHead vma_cache;
+ int vma_count, vma_open, vma_max;
uint64_t gtt_size;
int available_fences;
@@ -105,8 +109,9 @@ typedef struct _drm_intel_bufmgr_gem {
unsigned int has_bsd : 1;
unsigned int has_blt : 1;
unsigned int has_relaxed_fencing : 1;
+ unsigned int has_llc : 1;
unsigned int bo_reuse : 1;
- char fenced_relocs;
+ bool fenced_relocs;
} drm_intel_bufmgr_gem;
#define DRM_INTEL_RELOC_FENCE (1<<0)
@@ -119,7 +124,7 @@ typedef struct _drm_intel_reloc_target_info {
struct _drm_intel_bo_gem {
drm_intel_bo bo;
- int refcount;
+ atomic_t refcount;
uint32_t gem_handle;
const char *name;
@@ -152,6 +157,12 @@ struct _drm_intel_bo_gem {
drm_intel_reloc_target *reloc_target_info;
/** Number of entries in relocs */
int reloc_count;
+ /** Mapped address for the buffer, saved across map/unmap cycles */
+ void *mem_virtual;
+ /** GTT virtual address for the buffer, saved across map/unmap cycles */
+ void *gtt_virtual;
+ int map_count;
+ drmMMListHead vma_list;
/** BO cache list */
drmMMListHead head;
@@ -160,24 +171,24 @@ struct _drm_intel_bo_gem {
* Boolean of whether this BO and its children have been included in
* the current drm_intel_bufmgr_check_aperture_space() total.
*/
- char included_in_check_aperture;
+ bool included_in_check_aperture;
/**
* Boolean of whether this buffer has been used as a relocation
* target and had its size accounted for, and thus can't have any
* further relocations added to it.
*/
- char used_as_reloc_target;
+ bool used_as_reloc_target;
/**
* Boolean of whether we have encountered an error whilst building the relocation tree.
*/
- char has_error;
+ bool has_error;
/**
* Boolean of whether this buffer can be re-used
*/
- char reusable;
+ bool reusable;
/**
* Size in bytes of this buffer and its relocation descendents.
@@ -193,8 +204,8 @@ struct _drm_intel_bo_gem {
*/
int reloc_tree_fences;
- /** Mapped address for the buffer, saved across map/unmap cycles */
- void *saved_virtual;
+ /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
+ bool mapped_cpu_write;
};
static unsigned int
@@ -275,7 +286,9 @@ drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
if (*tiling_mode == I915_TILING_NONE)
return ALIGN(pitch, 64);
- if (*tiling_mode == I915_TILING_X)
+ if (*tiling_mode == I915_TILING_X
+ || (IS_915(bufmgr_gem->pci_device)
+ && *tiling_mode == I915_TILING_Y))
tile_width = 512;
else
tile_width = 128;
@@ -352,15 +365,12 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
static inline void
drm_intel_gem_bo_reference(drm_intel_bo *bo)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- /* XXX atomics */
- pthread_mutex_lock(&bufmgr_gem->lock);
- bo_gem->refcount++;
- pthread_mutex_unlock(&bufmgr_gem->lock);
+ atomic_inc(&bo_gem->refcount);
}
+#ifndef __OpenBSD__
/**
* Adds the given buffer to the list of buffers to be validated (moved into the
* appropriate memory type) with the next batch submission.
@@ -370,6 +380,45 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
* access flags.
*/
static void
+drm_intel_add_validate_buffer(drm_intel_bo *bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int index;
+
+ if (bo_gem->validate_index != -1)
+ return;
+
+ /* Extend the array of validation entries as necessary. */
+ if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ int new_size = bufmgr_gem->exec_size * 2;
+
+ if (new_size == 0)
+ new_size = 5;
+
+ bufmgr_gem->exec_objects =
+ realloc(bufmgr_gem->exec_objects,
+ sizeof(*bufmgr_gem->exec_objects) * new_size);
+ bufmgr_gem->exec_bos =
+ realloc(bufmgr_gem->exec_bos,
+ sizeof(*bufmgr_gem->exec_bos) * new_size);
+ bufmgr_gem->exec_size = new_size;
+ }
+
+ index = bufmgr_gem->exec_count;
+ bo_gem->validate_index = index;
+ /* Fill in array entry */
+ bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+ bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+ bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
+ bufmgr_gem->exec_objects[index].alignment = 0;
+ bufmgr_gem->exec_objects[index].offset = 0;
+ bufmgr_gem->exec_bos[index] = bo;
+ bufmgr_gem->exec_count++;
+}
+#endif
+
+static void
drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
@@ -472,7 +521,7 @@ drm_intel_setup_reloc_list(drm_intel_bo *bo)
bo_gem->reloc_target_info = malloc(max_relocs *
sizeof(drm_intel_reloc_target));
if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
- bo_gem->has_error = 1;
+ bo_gem->has_error = true;
free (bo_gem->relocs);
bo_gem->relocs = NULL;
@@ -557,12 +606,12 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
unsigned int page_size = getpagesize();
int ret;
struct drm_intel_gem_bo_bucket *bucket;
- int alloc_from_cache;
+ bool alloc_from_cache;
unsigned long bo_size;
- int for_render = 0;
+ bool for_render = false;
if (flags & BO_ALLOC_FOR_RENDER)
- for_render = 1;
+ for_render = true;
/* Round the allocated size up to a power of two number of pages. */
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
@@ -581,7 +630,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a buffer out of the cache if available */
retry:
- alloc_from_cache = 0;
+ alloc_from_cache = false;
if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
if (for_render) {
/* Allocate new render-target BOs from the tail (MRU)
@@ -591,7 +640,7 @@ retry:
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.prev, head);
DRMLISTDEL(&bo_gem->head);
- alloc_from_cache = 1;
+ alloc_from_cache = true;
} else {
/* For non-render-target BOs (where we're probably
* going to map it first thing in order to fill it
@@ -603,7 +652,7 @@ retry:
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.next, head);
if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
- alloc_from_cache = 1;
+ alloc_from_cache = true;
DRMLISTDEL(&bo_gem->head);
}
}
@@ -661,15 +710,16 @@ retry:
}
DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
}
bo_gem->name = name;
- bo_gem->refcount = 1;
+ atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
bo_gem->reloc_tree_fences = 0;
- bo_gem->used_as_reloc_target = 0;
- bo_gem->has_error = 0;
- bo_gem->reusable = 1;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = true;
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
@@ -728,13 +778,14 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
aligned_y = y;
height_alignment = 2;
- if (tiling == I915_TILING_X)
+ if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
+ height_alignment = 16;
+ else if (tiling == I915_TILING_X
+ || (IS_915(bufmgr_gem->pci_device)
+ && tiling == I915_TILING_Y))
height_alignment = 8;
else if (tiling == I915_TILING_Y)
height_alignment = 32;
- /* i8xx has a interleaved 2-row tile layout */
- if (IS_GEN2(bufmgr_gem) && tiling != I915_TILING_NONE)
- height_alignment *= 2;
aligned_y = ALIGN(y, height_alignment);
stride = x * cpp;
@@ -805,12 +856,12 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = name;
- bo_gem->refcount = 1;
+ atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
bo_gem->gem_handle = open_arg.handle;
bo_gem->bo.handle = open_arg.handle;
bo_gem->global_name = handle;
- bo_gem->reusable = 0;
+ bo_gem->reusable = false;
memset(&get_tiling, 0, sizeof(get_tiling));
get_tiling.handle = bo_gem->gem_handle;
@@ -826,6 +877,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
@@ -840,10 +892,15 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
struct drm_gem_close close;
int ret;
- if (bo->virtual)
- munmap(bo->virtual, bo_gem->bo.size);
- else if (bo_gem->saved_virtual)
- munmap(bo_gem->saved_virtual, bo_gem->bo.size);
+ DRMLISTDEL(&bo_gem->vma_list);
+ if (bo_gem->mem_virtual) {
+ munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->gtt_virtual) {
+ munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ bufmgr_gem->vma_count--;
+ }
/* Close this object */
memset(&close, 0, sizeof(close));
@@ -886,6 +943,67 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
bufmgr_gem->time = time;
}
+static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int limit;
+
+ DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
+ bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
+
+ if (bufmgr_gem->vma_max < 0)
+ return;
+
+ /* We may need to evict a few entries in order to create new mmaps */
+ limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
+ if (limit < 0)
+ limit = 0;
+
+ while (bufmgr_gem->vma_count > limit) {
+ drm_intel_bo_gem *bo_gem;
+
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+ bufmgr_gem->vma_cache.next,
+ vma_list);
+ assert(bo_gem->map_count == 0);
+ DRMLISTDELINIT(&bo_gem->vma_list);
+
+ if (bo_gem->mem_virtual) {
+ munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+ bo_gem->mem_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
+ if (bo_gem->gtt_virtual) {
+ munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+ bo_gem->gtt_virtual = NULL;
+ bufmgr_gem->vma_count--;
+ }
+ }
+}
+
+static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ bufmgr_gem->vma_open--;
+ DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
+ if (bo_gem->mem_virtual)
+ bufmgr_gem->vma_count++;
+ if (bo_gem->gtt_virtual)
+ bufmgr_gem->vma_count++;
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
+static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
+ drm_intel_bo_gem *bo_gem)
+{
+ bufmgr_gem->vma_open++;
+ DRMLISTDEL(&bo_gem->vma_list);
+ if (bo_gem->mem_virtual)
+ bufmgr_gem->vma_count--;
+ if (bo_gem->gtt_virtual)
+ bufmgr_gem->vma_count--;
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
static void
drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
{
@@ -903,7 +1021,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
}
}
bo_gem->reloc_count = 0;
- bo_gem->used_as_reloc_target = 0;
+ bo_gem->used_as_reloc_target = false;
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
@@ -918,6 +1036,13 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
bo_gem->relocs = NULL;
}
+ /* Clear any left-over mappings */
+ if (bo_gem->map_count) {
+ DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
+ bo_gem->map_count = 0;
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ }
+
DRMLISTDEL(&bo_gem->name_list);
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
@@ -941,30 +1066,28 @@ static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- /* XXX atomics */
- assert(bo_gem->refcount > 0);
- if (--bo_gem->refcount == 0)
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ if (atomic_dec_and_test(&bo_gem->refcount))
drm_intel_gem_bo_unreference_final(bo, time);
}
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
{
- drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- pthread_mutex_lock(&bufmgr_gem->lock);
- assert(bo_gem->refcount > 0);
- if (--bo_gem->refcount == 0) {
+ assert(atomic_read(&bo_gem->refcount) > 0);
+ if (atomic_dec_and_test(&bo_gem->refcount)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
clock_gettime(CLOCK_MONOTONIC, &time);
+ pthread_mutex_lock(&bufmgr_gem->lock);
drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
}
- pthread_mutex_unlock(&bufmgr_gem->lock);
}
/*
@@ -972,23 +1095,25 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
* faulted gtt memory, or the backing pages. This is due to cache coherency
* issues.
*
- * Therefore, bo_map_gtt calls bo_map, and bo_unmap_gtt calls bo_unmap.
+ * Therefore, bo_map_gtt calls bo_map.
*/
static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_set_domain set_domain;
- struct drm_i915_gem_mmap mmap_arg;
int ret;
pthread_mutex_lock(&bufmgr_gem->lock);
- /* Allow recursive mapping. Mesa may recursively map buffers with
- * nested display loops.
- */
- if (bo_gem->saved_virtual == NULL) {
- DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ if (!bo_gem->mem_virtual) {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ DBG("bo_map: %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
memset(&mmap_arg, 0, sizeof(mmap_arg));
mmap_arg.handle = bo_gem->gem_handle;
@@ -1002,34 +1127,35 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle,
bo_gem->name, strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
-
- bo_gem->saved_virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
+ bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
- bo_gem->saved_virtual);
- bo->virtual = bo_gem->saved_virtual;
+ bo_gem->mem_virtual);
+ bo->virtual = bo_gem->mem_virtual;
set_domain.handle = bo_gem->gem_handle;
- set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT /* XXX _CPU */;
if (write_enable)
- set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT /* XXX _CPU */;
else
set_domain.write_domain = 0;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
if (ret != 0) {
- ret = -errno;
DBG("%s:%d: Error setting to CPU domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
strerror(errno));
- pthread_mutex_unlock(&bufmgr_gem->lock);
- return ret;
}
+ if (write_enable)
+ bo_gem->mapped_cpu_write = true;
+
pthread_mutex_unlock(&bufmgr_gem->lock);
return 0;
@@ -1038,21 +1164,133 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_map(bo, 1);
+#ifndef __OpenBSD__
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+
+ if (bo_gem->map_count++ == 0)
+ drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
+
+ /* Get a mapping of the buffer if we haven't before. */
+ if (bo_gem->gtt_virtual == NULL) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+
+ DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
+ bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
+
+ memset(&mmap_arg, 0, sizeof(mmap_arg));
+ mmap_arg.handle = bo_gem->gem_handle;
+
+ /* Get the fake offset back... */
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_MMAP_GTT,
+ &mmap_arg);
+ if (ret != 0) {
+ ret = -errno;
+ DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+
+ /* and mmap it */
+ bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, bufmgr_gem->fd,
+ mmap_arg.offset);
+ if (bo_gem->gtt_virtual == MAP_FAILED) {
+ bo_gem->gtt_virtual = NULL;
+ ret = -errno;
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__,
+ bo_gem->gem_handle, bo_gem->name,
+ strerror(errno));
+ if (--bo_gem->map_count == 0)
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ return ret;
+ }
+ }
+
+ bo->virtual = bo_gem->gtt_virtual;
+
+ DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+ bo_gem->gtt_virtual);
+
+ /* Now move it to the GTT domain so that the CPU caches are flushed */
+ set_domain.handle = bo_gem->gem_handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN,
+ &set_domain);
+ if (ret != 0) {
+ DBG("%s:%d: Error setting domain %d: %s\n",
+ __FILE__, __LINE__, bo_gem->gem_handle,
+ strerror(errno));
+ }
+
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return 0;
+#endif
}
-
+
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_sw_finish sw_finish;
+ int ret = 0;
if (bo == NULL)
return 0;
pthread_mutex_lock(&bufmgr_gem->lock);
- bo->virtual = NULL;
+
+ if (bo_gem->map_count <= 0) {
+ DBG("attempted to unmap an unmapped bo\n");
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+ /* Preserve the old behaviour of just treating this as a
+ * no-op rather than reporting the error.
+ */
+ return 0;
+ }
+
+ if (bo_gem->mapped_cpu_write) {
+#ifndef __OpenBSD__
+ /* Cause a flush to happen if the buffer's pinned for
+ * scanout, so the results show up in a timely manner.
+ * Unlike GTT set domains, this only does work if the
+ * buffer should be scanout-related.
+ */
+ sw_finish.handle = bo_gem->gem_handle;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_SW_FINISH,
+ &sw_finish);
+ ret = ret == -1 ? -errno : 0;
+#endif
+ bo_gem->mapped_cpu_write = false;
+ }
+
+ /* We need to unmap after every innovation as we cannot track
+ * an open vma for every bo as that will exhaasut the system
+ * limits and cause later failures.
+ */
+ if (--bo_gem->map_count == 0) {
+ drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
+ bo->virtual = NULL;
+ }
pthread_mutex_unlock(&bufmgr_gem->lock);
- return 0;
+ return ret;
}
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
@@ -1090,7 +1328,7 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
static int
drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
-#if 0
+#ifndef __OpenBSD__
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
int ret;
@@ -1185,6 +1423,9 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
int i;
free(bufmgr_gem->exec2_objects);
+#ifndef __OpenBSD__
+ free(bufmgr_gem->exec_objects);
+#endif
free(bufmgr_gem->exec_bos);
pthread_mutex_destroy(&bufmgr_gem->lock);
@@ -1220,28 +1461,28 @@ static int
do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain,
- int need_fence)
+ bool need_fence)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
- int fenced_command;
+ bool fenced_command;
if (bo_gem->has_error)
return -ENOMEM;
if (target_bo_gem->has_error) {
- bo_gem->has_error = 1;
+ bo_gem->has_error = true;
return -ENOMEM;
}
/* We never use HW fences for rendering on 965+ */
if (bufmgr_gem->gen >= 4)
- need_fence = 0;
+ need_fence = false;
fenced_command = need_fence;
if (target_bo_gem->tiling_mode == I915_TILING_NONE)
- need_fence = 0;
+ need_fence = false;
/* Create a new relocation list if needed */
if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
@@ -1259,7 +1500,7 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
*/
assert(!bo_gem->used_as_reloc_target);
if (target_bo_gem != bo_gem) {
- target_bo_gem->used_as_reloc_target = 1;
+ target_bo_gem->used_as_reloc_target = true;
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
}
/* An object needing a fence is a tiled buffer, so it won't have
@@ -1310,15 +1551,82 @@ drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
uint32_t read_domains, uint32_t write_domain)
{
return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
- read_domains, write_domain, 1);
+ read_domains, write_domain, true);
+}
+
+int
+drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ return bo_gem->reloc_count;
+}
+
+/**
+ * Removes existing relocation entries in the BO after "start".
+ *
+ * This allows a user to avoid a two-step process for state setup with
+ * counting up all the buffer objects and doing a
+ * drm_intel_bufmgr_check_aperture_space() before emitting any of the
+ * relocations for the state setup. Instead, save the state of the
+ * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
+ * state, and then check if it still fits in the aperture.
+ *
+ * Any further drm_intel_bufmgr_check_aperture_space() queries
+ * involving this buffer in the tree are undefined after this call.
+ */
+void
+drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+ struct timespec time;
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+
+ assert(bo_gem->reloc_count >= start);
+ /* Unreference the cleared target buffers */
+ for (i = start; i < bo_gem->reloc_count; i++) {
+ if (bo_gem->reloc_target_info[i].bo != bo) {
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->
+ reloc_target_info[i].bo,
+ time.tv_sec);
+ }
+ }
+ bo_gem->reloc_count = start;
}
+#ifndef __OpenBSD__
/**
* Walk the tree of relocations rooted at BO and accumulate the list of
* validations to be performed and update the relocation buffers with
* index values into the validation list.
*/
static void
+drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ int i;
+
+ if (bo_gem->relocs == NULL)
+ return;
+
+ for (i = 0; i < bo_gem->reloc_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
+
+ if (target_bo == bo)
+ continue;
+
+ /* Continue walking the tree depth-first. */
+ drm_intel_gem_bo_process_reloc(target_bo);
+
+ /* Add the target to the validate list */
+ drm_intel_add_validate_buffer(target_bo);
+ }
+}
+#endif
+
+static void
drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
@@ -1345,6 +1653,29 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
}
}
+
+#ifndef __OpenBSD__
+static void
+drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
+{
+ int i;
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ /* Update the buffer offset */
+ if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+ DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+ bo_gem->gem_handle, bo_gem->name, bo->offset,
+ (unsigned long long)bufmgr_gem->exec_objects[i].
+ offset);
+ bo->offset = bufmgr_gem->exec_objects[i].offset;
+ }
+ }
+}
+#endif
+
static void
drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
{
@@ -1364,6 +1695,74 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
}
}
+#ifndef __OpenBSD__
+static int
+drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
+ drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ struct drm_i915_gem_execbuffer execbuf;
+ int ret, i;
+
+ if (bo_gem->has_error)
+ return -ENOMEM;
+
+ pthread_mutex_lock(&bufmgr_gem->lock);
+ /* Update indices and set up the validate list. */
+ drm_intel_gem_bo_process_reloc(bo);
+
+ /* Add the batch buffer to the validation list. There are no
+ * relocations pointing to it.
+ */
+ drm_intel_add_validate_buffer(bo);
+
+ execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
+ execbuf.buffer_count = bufmgr_gem->exec_count;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = used;
+ execbuf.cliprects_ptr = (uintptr_t) cliprects;
+ execbuf.num_cliprects = num_cliprects;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = DR4;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_EXECBUFFER,
+ &execbuf);
+ if (ret != 0) {
+ ret = -errno;
+ if (errno == ENOSPC) {
+ DBG("Execbuffer fails to pin. "
+ "Estimate: %u. Actual: %u. Available: %u\n",
+ drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+ bufmgr_gem->
+ exec_count),
+ (unsigned int)bufmgr_gem->gtt_size);
+ }
+ }
+ drm_intel_update_buffer_offsets(bufmgr_gem);
+
+ if (bufmgr_gem->bufmgr.debug)
+ drm_intel_gem_dump_validation_list(bufmgr_gem);
+
+ for (i = 0; i < bufmgr_gem->exec_count; i++) {
+ drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ /* Disconnect the buffer from the validate list */
+ bo_gem->validate_index = -1;
+ bufmgr_gem->exec_bos[i] = NULL;
+ }
+ bufmgr_gem->exec_count = 0;
+ pthread_mutex_unlock(&bufmgr_gem->lock);
+
+ return ret;
+}
+#endif
+
static int
drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
@@ -1402,6 +1801,12 @@ drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
execbuf.buffer_count = bufmgr_gem->exec_count;
execbuf.batch_start_offset = 0;
execbuf.batch_len = used;
+#ifndef __OpenBSD__
+ execbuf.cliprects_ptr = (uintptr_t)cliprects;
+ execbuf.num_cliprects = num_cliprects;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = DR4;
+#endif
execbuf.flags = flags;
execbuf.rsvd1 = 0;
execbuf.rsvd2 = 0;
@@ -1577,7 +1982,7 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
if (ret != 0)
return -errno;
bo_gem->global_name = flink.name;
- bo_gem->reusable = 0;
+ bo_gem->reusable = false;
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
}
@@ -1598,7 +2003,7 @@ drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
- bufmgr_gem->bo_reuse = 1;
+ bufmgr_gem->bo_reuse = true;
}
/**
@@ -1614,7 +2019,7 @@ drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
- bufmgr_gem->fenced_relocs = 1;
+ bufmgr_gem->fenced_relocs = true;
}
/**
@@ -1632,7 +2037,7 @@ drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
return 0;
total += bo->size;
- bo_gem->included_in_check_aperture = 1;
+ bo_gem->included_in_check_aperture = true;
for (i = 0; i < bo_gem->reloc_count; i++)
total +=
@@ -1680,7 +2085,7 @@ drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
if (bo == NULL || !bo_gem->included_in_check_aperture)
return;
- bo_gem->included_in_check_aperture = 0;
+ bo_gem->included_in_check_aperture = false;
for (i = 0; i < bo_gem->reloc_count; i++)
drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
@@ -1797,7 +2202,7 @@ drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- bo_gem->reusable = 0;
+ bo_gem->reusable = false;
return 0;
}
@@ -1880,6 +2285,16 @@ init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
}
}
+void
+drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+
+ bufmgr_gem->vma_max = limit;
+
+ drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
+}
+
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
@@ -1892,7 +2307,10 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
drm_intel_bufmgr_gem *bufmgr_gem;
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
- int ret;
+ int ret, tmp;
+#ifndef __OpenBSD__
+ bool exec2 = false;
+#endif
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
if (bufmgr_gem == NULL)
@@ -1929,14 +2347,38 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
}
- if (IS_GEN2(bufmgr_gem))
+ if (IS_GEN2(bufmgr_gem->pci_device))
bufmgr_gem->gen = 2;
- else if (IS_GEN3(bufmgr_gem))
+ else if (IS_GEN3(bufmgr_gem->pci_device))
bufmgr_gem->gen = 3;
- else if (IS_GEN4(bufmgr_gem))
+ else if (IS_GEN4(bufmgr_gem->pci_device))
bufmgr_gem->gen = 4;
- else
+ else if (IS_GEN5(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 5;
+ else if (IS_GEN6(bufmgr_gem->pci_device))
bufmgr_gem->gen = 6;
+ else if (IS_GEN7(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 7;
+ else
+ assert(0);
+
+ if (IS_GEN3(bufmgr_gem->pci_device) &&
+ bufmgr_gem->gtt_size > 256*1024*1024) {
+ /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
+ * be used for tiled blits. To simplify the accounting, just
+ * substract the unmappable part (fixed to 256MB on all known
+ * gen3 devices) if the kernel advertises it. */
+ bufmgr_gem->gtt_size -= 256*1024*1024;
+ }
+
+ gp.value = &tmp;
+
+#ifndef __OpenBSD__
+ gp.param = I915_PARAM_HAS_EXECBUF2;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (!ret)
+ exec2 = true;
+#endif
gp.param = I915_PARAM_HAS_BSD;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
@@ -1950,6 +2392,21 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
+#ifndef __OpenBSD__
+ gp.param = I915_PARAM_HAS_LLC;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret == -EINVAL) {
+#endif
+ /* Kernel does not supports HAS_LLC query, fallback to GPU
+ * generation detection and assume that we have LLC on GEN6/7
+ */
+ bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
+ IS_GEN7(bufmgr_gem->pci_device));
+#ifndef __OpenBSD__
+ } else
+ bufmgr_gem->has_llc = ret == 0;
+#endif
+
if (bufmgr_gem->gen < 4) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
@@ -2002,8 +2459,19 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
- bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
- bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
+#ifndef __OpenBSD__
+ /*
+ * Only the new one is available on OpenBSD
+ */
+ /* Use the new one if available */
+ if (exec2) {
+#endif
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
+ bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
+#ifndef __OpenBSD__
+ } else
+ bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+#endif
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
@@ -2019,5 +2487,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
DRMINITLISTHEAD(&bufmgr_gem->named);
init_cache_buckets(bufmgr_gem);
+ DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
+ bufmgr_gem->vma_max = -1; /* unlimited by default */
+
return &bufmgr_gem->bufmgr;
}
diff --git a/lib/libdrm/intel/intel_chipset.h b/lib/libdrm/intel/intel_chipset.h
index b4e0747e7..e3a30fc7d 100644
--- a/lib/libdrm/intel/intel_chipset.h
+++ b/lib/libdrm/intel/intel_chipset.h
@@ -28,22 +28,39 @@
#ifndef _INTEL_CHIPSET_H
#define _INTEL_CHIPSET_H
-#define IS_830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845(dev) ((dev)->pci_device == 0x2562)
-#define IS_85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_865(dev) ((dev)->pci_device == 0x2572)
+#define PCI_CHIP_ILD_G 0x0042
+#define PCI_CHIP_ILM_G 0x0046
+
+#define PCI_CHIP_SANDYBRIDGE_GT1 0x0102 /* desktop */
+#define PCI_CHIP_SANDYBRIDGE_GT2 0x0112
+#define PCI_CHIP_SANDYBRIDGE_GT2_PLUS 0x0122
+#define PCI_CHIP_SANDYBRIDGE_M_GT1 0x0106 /* mobile */
+#define PCI_CHIP_SANDYBRIDGE_M_GT2 0x0116
+#define PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS 0x0126
+#define PCI_CHIP_SANDYBRIDGE_S 0x010A /* server */
+
+#define PCI_CHIP_IVYBRIDGE_GT1 0x0152 /* desktop */
+#define PCI_CHIP_IVYBRIDGE_GT2 0x0162
+#define PCI_CHIP_IVYBRIDGE_M_GT1 0x0156 /* mobile */
+#define PCI_CHIP_IVYBRIDGE_M_GT2 0x0166
+#define PCI_CHIP_IVYBRIDGE_S 0x015a /* server */
+
+#define IS_830(dev) (dev == 0x3577)
+#define IS_845(dev) (dev == 0x2562)
+#define IS_85X(dev) (dev == 0x3582)
+#define IS_865(dev) (dev == 0x2572)
#define IS_GEN2(dev) (IS_830(dev) || \
IS_845(dev) || \
IS_85X(dev) || \
IS_865(dev))
-#define IS_915G(dev) ((dev)->pci_device == 0x2582 || \
- (dev)->pci_device == 0x258a)
-#define IS_915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_945GM(dev) ((dev)->pci_device == 0x27A2 || \
- (dev)->pci_device == 0x27AE)
+#define IS_915G(dev) (dev == 0x2582 || \
+ dev == 0x258a)
+#define IS_915GM(dev) (dev == 0x2592)
+#define IS_945G(dev) (dev == 0x2772)
+#define IS_945GM(dev) (dev == 0x27A2 || \
+ dev == 0x27AE)
#define IS_915(dev) (IS_915G(dev) || \
IS_915GM(dev))
@@ -53,49 +70,68 @@
IS_G33(dev) || \
IS_PINEVIEW(dev))
-#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
- (dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2)
+#define IS_G33(dev) (dev == 0x29C2 || \
+ dev == 0x29B2 || \
+ dev == 0x29D2)
-#define IS_PINEVIEW(dev) ((dev)->pci_device == 0xa001 || \
- (dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (dev == 0xa001 || \
+ dev == 0xa011)
#define IS_GEN3(dev) (IS_915(dev) || \
IS_945(dev) || \
IS_G33(dev) || \
IS_PINEVIEW(dev))
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
-
-#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- (dev)->pci_device == 0x0042 || \
- (dev)->pci_device == 0x0046 || \
+#define IS_I965GM(dev) (dev == 0x2A02)
+
+#define IS_GEN4(dev) (dev == 0x2972 || \
+ dev == 0x2982 || \
+ dev == 0x2992 || \
+ dev == 0x29A2 || \
+ dev == 0x2A02 || \
+ dev == 0x2A12 || \
+ dev == 0x2A42 || \
+ dev == 0x2E02 || \
+ dev == 0x2E12 || \
+ dev == 0x2E22 || \
+ dev == 0x2E32 || \
+ dev == 0x2E42 || \
+ dev == 0x0042 || \
+ dev == 0x0046 || \
IS_I965GM(dev) || \
IS_G4X(dev))
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev) (dev == 0x2A42)
+
+
+#define IS_GEN5(dev) (dev == PCI_CHIP_ILD_G || \
+ dev == PCI_CHIP_ILM_G)
+
+#define IS_GEN6(dev) (dev == PCI_CHIP_SANDYBRIDGE_GT1 || \
+ dev == PCI_CHIP_SANDYBRIDGE_GT2 || \
+ dev == PCI_CHIP_SANDYBRIDGE_GT2_PLUS || \
+ dev == PCI_CHIP_SANDYBRIDGE_M_GT1 || \
+ dev == PCI_CHIP_SANDYBRIDGE_M_GT2 || \
+ dev == PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS || \
+ dev == PCI_CHIP_SANDYBRIDGE_S)
+
+#define IS_GEN7(dev) (dev == PCI_CHIP_IVYBRIDGE_GT1 || \
+ dev == PCI_CHIP_IVYBRIDGE_GT2 || \
+ dev == PCI_CHIP_IVYBRIDGE_M_GT1 || \
+ dev == PCI_CHIP_IVYBRIDGE_M_GT2 || \
+ dev == PCI_CHIP_IVYBRIDGE_S)
-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
+#define IS_G4X(dev) (dev == 0x2E02 || \
+ dev == 0x2E12 || \
+ dev == 0x2E22 || \
+ dev == 0x2E32 || \
+ dev == 0x2E42 || \
IS_GM45(dev))
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
- IS_GEN6(dev))
+ IS_GEN6(dev) || \
+ IS_GEN7(dev))
#endif /* _INTEL_CHIPSET_H */
diff --git a/lib/libdrm/intel/shlib_version b/lib/libdrm/intel/shlib_version
index 79d3d3afd..b52599a16 100644
--- a/lib/libdrm/intel/shlib_version
+++ b/lib/libdrm/intel/shlib_version
@@ -1,2 +1,2 @@
-major=1
-minor=3
+major=2
+minor=0
diff --git a/lib/libdrm/radeon/Makefile b/lib/libdrm/radeon/Makefile
index b5d9a9048..a8df81dc1 100644
--- a/lib/libdrm/radeon/Makefile
+++ b/lib/libdrm/radeon/Makefile
@@ -15,13 +15,15 @@ INCS= radeon_bo.h \
radeon_bo_gem.h \
radeon_cs_gem.h \
radeon_bo_int.h \
- radeon_cs_int.h
+ radeon_cs_int.h \
+ r600_pci_ids.h
SRCS= radeon_bo_gem.c \
radeon_cs_gem.c \
radeon_cs_space.c \
radeon_bo.c \
radeon_cs.c \
+ radeon_surface.c \
bof.c
diff --git a/lib/libdrm/radeon/r600_pci_ids.h b/lib/libdrm/radeon/r600_pci_ids.h
new file mode 100644
index 000000000..0ffb741d7
--- /dev/null
+++ b/lib/libdrm/radeon/r600_pci_ids.h
@@ -0,0 +1,271 @@
+CHIPSET(0x9400, R600_9400, R600)
+CHIPSET(0x9401, R600_9401, R600)
+CHIPSET(0x9402, R600_9402, R600)
+CHIPSET(0x9403, R600_9403, R600)
+CHIPSET(0x9405, R600_9405, R600)
+CHIPSET(0x940A, R600_940A, R600)
+CHIPSET(0x940B, R600_940B, R600)
+CHIPSET(0x940F, R600_940F, R600)
+
+CHIPSET(0x94C0, RV610_94C0, RV610)
+CHIPSET(0x94C1, RV610_94C1, RV610)
+CHIPSET(0x94C3, RV610_94C3, RV610)
+CHIPSET(0x94C4, RV610_94C4, RV610)
+CHIPSET(0x94C5, RV610_94C5, RV610)
+CHIPSET(0x94C6, RV610_94C6, RV610)
+CHIPSET(0x94C7, RV610_94C7, RV610)
+CHIPSET(0x94C8, RV610_94C8, RV610)
+CHIPSET(0x94C9, RV610_94C9, RV610)
+CHIPSET(0x94CB, RV610_94CB, RV610)
+CHIPSET(0x94CC, RV610_94CC, RV610)
+CHIPSET(0x94CD, RV610_94CD, RV610)
+
+CHIPSET(0x9580, RV630_9580, RV630)
+CHIPSET(0x9581, RV630_9581, RV630)
+CHIPSET(0x9583, RV630_9583, RV630)
+CHIPSET(0x9586, RV630_9586, RV630)
+CHIPSET(0x9587, RV630_9587, RV630)
+CHIPSET(0x9588, RV630_9588, RV630)
+CHIPSET(0x9589, RV630_9589, RV630)
+CHIPSET(0x958A, RV630_958A, RV630)
+CHIPSET(0x958B, RV630_958B, RV630)
+CHIPSET(0x958C, RV630_958C, RV630)
+CHIPSET(0x958D, RV630_958D, RV630)
+CHIPSET(0x958E, RV630_958E, RV630)
+CHIPSET(0x958F, RV630_958F, RV630)
+
+CHIPSET(0x9500, RV670_9500, RV670)
+CHIPSET(0x9501, RV670_9501, RV670)
+CHIPSET(0x9504, RV670_9504, RV670)
+CHIPSET(0x9505, RV670_9505, RV670)
+CHIPSET(0x9506, RV670_9506, RV670)
+CHIPSET(0x9507, RV670_9507, RV670)
+CHIPSET(0x9508, RV670_9508, RV670)
+CHIPSET(0x9509, RV670_9509, RV670)
+CHIPSET(0x950F, RV670_950F, RV670)
+CHIPSET(0x9511, RV670_9511, RV670)
+CHIPSET(0x9515, RV670_9515, RV670)
+CHIPSET(0x9517, RV670_9517, RV670)
+CHIPSET(0x9519, RV670_9519, RV670)
+
+CHIPSET(0x95C0, RV620_95C0, RV620)
+CHIPSET(0x95C2, RV620_95C2, RV620)
+CHIPSET(0x95C4, RV620_95C4, RV620)
+CHIPSET(0x95C5, RV620_95C5, RV620)
+CHIPSET(0x95C6, RV620_95C6, RV620)
+CHIPSET(0x95C7, RV620_95C7, RV620)
+CHIPSET(0x95C9, RV620_95C9, RV620)
+CHIPSET(0x95CC, RV620_95CC, RV620)
+CHIPSET(0x95CD, RV620_95CD, RV620)
+CHIPSET(0x95CE, RV620_95CE, RV620)
+CHIPSET(0x95CF, RV620_95CF, RV620)
+
+CHIPSET(0x9590, RV635_9590, RV635)
+CHIPSET(0x9591, RV635_9591, RV635)
+CHIPSET(0x9593, RV635_9593, RV635)
+CHIPSET(0x9595, RV635_9595, RV635)
+CHIPSET(0x9596, RV635_9596, RV635)
+CHIPSET(0x9597, RV635_9597, RV635)
+CHIPSET(0x9598, RV635_9598, RV635)
+CHIPSET(0x9599, RV635_9599, RV635)
+CHIPSET(0x959B, RV635_959B, RV635)
+
+CHIPSET(0x9610, RS780_9610, RS780)
+CHIPSET(0x9611, RS780_9611, RS780)
+CHIPSET(0x9612, RS780_9612, RS780)
+CHIPSET(0x9613, RS780_9613, RS780)
+CHIPSET(0x9614, RS780_9614, RS780)
+CHIPSET(0x9615, RS780_9615, RS780)
+CHIPSET(0x9616, RS780_9616, RS780)
+
+CHIPSET(0x9710, RS880_9710, RS880)
+CHIPSET(0x9711, RS880_9711, RS880)
+CHIPSET(0x9712, RS880_9712, RS880)
+CHIPSET(0x9713, RS880_9713, RS880)
+CHIPSET(0x9714, RS880_9714, RS880)
+CHIPSET(0x9715, RS880_9715, RS880)
+
+CHIPSET(0x9440, RV770_9440, RV770)
+CHIPSET(0x9441, RV770_9441, RV770)
+CHIPSET(0x9442, RV770_9442, RV770)
+CHIPSET(0x9443, RV770_9443, RV770)
+CHIPSET(0x9444, RV770_9444, RV770)
+CHIPSET(0x9446, RV770_9446, RV770)
+CHIPSET(0x944A, RV770_944A, RV770)
+CHIPSET(0x944B, RV770_944B, RV770)
+CHIPSET(0x944C, RV770_944C, RV770)
+CHIPSET(0x944E, RV770_944E, RV770)
+CHIPSET(0x9450, RV770_9450, RV770)
+CHIPSET(0x9452, RV770_9452, RV770)
+CHIPSET(0x9456, RV770_9456, RV770)
+CHIPSET(0x945A, RV770_945A, RV770)
+CHIPSET(0x945B, RV770_945B, RV770)
+CHIPSET(0x945E, RV770_945E, RV770)
+CHIPSET(0x9460, RV790_9460, RV770)
+CHIPSET(0x9462, RV790_9462, RV770)
+CHIPSET(0x946A, RV770_946A, RV770)
+CHIPSET(0x946B, RV770_946B, RV770)
+CHIPSET(0x947A, RV770_947A, RV770)
+CHIPSET(0x947B, RV770_947B, RV770)
+
+CHIPSET(0x9480, RV730_9480, RV730)
+CHIPSET(0x9487, RV730_9487, RV730)
+CHIPSET(0x9488, RV730_9488, RV730)
+CHIPSET(0x9489, RV730_9489, RV730)
+CHIPSET(0x948A, RV730_948A, RV730)
+CHIPSET(0x948F, RV730_948F, RV730)
+CHIPSET(0x9490, RV730_9490, RV730)
+CHIPSET(0x9491, RV730_9491, RV730)
+CHIPSET(0x9495, RV730_9495, RV730)
+CHIPSET(0x9498, RV730_9498, RV730)
+CHIPSET(0x949C, RV730_949C, RV730)
+CHIPSET(0x949E, RV730_949E, RV730)
+CHIPSET(0x949F, RV730_949F, RV730)
+
+CHIPSET(0x9540, RV710_9540, RV710)
+CHIPSET(0x9541, RV710_9541, RV710)
+CHIPSET(0x9542, RV710_9542, RV710)
+CHIPSET(0x954E, RV710_954E, RV710)
+CHIPSET(0x954F, RV710_954F, RV710)
+CHIPSET(0x9552, RV710_9552, RV710)
+CHIPSET(0x9553, RV710_9553, RV710)
+CHIPSET(0x9555, RV710_9555, RV710)
+CHIPSET(0x9557, RV710_9557, RV710)
+CHIPSET(0x955F, RV710_955F, RV710)
+
+CHIPSET(0x94A0, RV740_94A0, RV740)
+CHIPSET(0x94A1, RV740_94A1, RV740)
+CHIPSET(0x94A3, RV740_94A3, RV740)
+CHIPSET(0x94B1, RV740_94B1, RV740)
+CHIPSET(0x94B3, RV740_94B3, RV740)
+CHIPSET(0x94B4, RV740_94B4, RV740)
+CHIPSET(0x94B5, RV740_94B5, RV740)
+CHIPSET(0x94B9, RV740_94B9, RV740)
+
+CHIPSET(0x68E0, CEDAR_68E0, CEDAR)
+CHIPSET(0x68E1, CEDAR_68E1, CEDAR)
+CHIPSET(0x68E4, CEDAR_68E4, CEDAR)
+CHIPSET(0x68E5, CEDAR_68E5, CEDAR)
+CHIPSET(0x68E8, CEDAR_68E8, CEDAR)
+CHIPSET(0x68E9, CEDAR_68E9, CEDAR)
+CHIPSET(0x68F1, CEDAR_68F1, CEDAR)
+CHIPSET(0x68F2, CEDAR_68F2, CEDAR)
+CHIPSET(0x68F8, CEDAR_68F8, CEDAR)
+CHIPSET(0x68F9, CEDAR_68F9, CEDAR)
+CHIPSET(0x68FE, CEDAR_68FE, CEDAR)
+
+CHIPSET(0x68C0, REDWOOD_68C0, REDWOOD)
+CHIPSET(0x68C1, REDWOOD_68C1, REDWOOD)
+CHIPSET(0x68C8, REDWOOD_68C8, REDWOOD)
+CHIPSET(0x68C9, REDWOOD_68C9, REDWOOD)
+CHIPSET(0x68D8, REDWOOD_68D8, REDWOOD)
+CHIPSET(0x68D9, REDWOOD_68D9, REDWOOD)
+CHIPSET(0x68DA, REDWOOD_68DA, REDWOOD)
+CHIPSET(0x68DE, REDWOOD_68DE, REDWOOD)
+
+CHIPSET(0x68A0, JUNIPER_68A0, JUNIPER)
+CHIPSET(0x68A1, JUNIPER_68A1, JUNIPER)
+CHIPSET(0x68A8, JUNIPER_68A8, JUNIPER)
+CHIPSET(0x68A9, JUNIPER_68A9, JUNIPER)
+CHIPSET(0x68B0, JUNIPER_68B0, JUNIPER)
+CHIPSET(0x68B8, JUNIPER_68B8, JUNIPER)
+CHIPSET(0x68B9, JUNIPER_68B9, JUNIPER)
+CHIPSET(0x68BA, JUNIPER_68BA, JUNIPER)
+CHIPSET(0x68BE, JUNIPER_68BE, JUNIPER)
+CHIPSET(0x68BF, JUNIPER_68BF, JUNIPER)
+
+CHIPSET(0x6880, CYPRESS_6880, CYPRESS)
+CHIPSET(0x6888, CYPRESS_6888, CYPRESS)
+CHIPSET(0x6889, CYPRESS_6889, CYPRESS)
+CHIPSET(0x688A, CYPRESS_688A, CYPRESS)
+CHIPSET(0x6898, CYPRESS_6898, CYPRESS)
+CHIPSET(0x6899, CYPRESS_6899, CYPRESS)
+CHIPSET(0x689B, CYPRESS_689B, CYPRESS)
+CHIPSET(0x689E, CYPRESS_689E, CYPRESS)
+
+CHIPSET(0x689C, HEMLOCK_689C, HEMLOCK)
+CHIPSET(0x689D, HEMLOCK_689D, HEMLOCK)
+
+CHIPSET(0x9802, PALM_9802, PALM)
+CHIPSET(0x9803, PALM_9803, PALM)
+CHIPSET(0x9804, PALM_9804, PALM)
+CHIPSET(0x9805, PALM_9805, PALM)
+CHIPSET(0x9806, PALM_9806, PALM)
+CHIPSET(0x9807, PALM_9807, PALM)
+
+CHIPSET(0x9640, SUMO_9640, SUMO)
+CHIPSET(0x9641, SUMO_9641, SUMO)
+CHIPSET(0x9642, SUMO2_9642, SUMO2)
+CHIPSET(0x9643, SUMO2_9643, SUMO2)
+CHIPSET(0x9644, SUMO2_9644, SUMO2)
+CHIPSET(0x9645, SUMO2_9645, SUMO2)
+CHIPSET(0x9647, SUMO_9647, SUMO)
+CHIPSET(0x9648, SUMO_9648, SUMO)
+CHIPSET(0x964a, SUMO_964A, SUMO)
+CHIPSET(0x964e, SUMO_964E, SUMO)
+CHIPSET(0x964f, SUMO_964F, SUMO)
+
+CHIPSET(0x6700, CAYMAN_6700, CAYMAN)
+CHIPSET(0x6701, CAYMAN_6701, CAYMAN)
+CHIPSET(0x6702, CAYMAN_6702, CAYMAN)
+CHIPSET(0x6703, CAYMAN_6703, CAYMAN)
+CHIPSET(0x6704, CAYMAN_6704, CAYMAN)
+CHIPSET(0x6705, CAYMAN_6705, CAYMAN)
+CHIPSET(0x6706, CAYMAN_6706, CAYMAN)
+CHIPSET(0x6707, CAYMAN_6707, CAYMAN)
+CHIPSET(0x6708, CAYMAN_6708, CAYMAN)
+CHIPSET(0x6709, CAYMAN_6709, CAYMAN)
+CHIPSET(0x6718, CAYMAN_6718, CAYMAN)
+CHIPSET(0x6719, CAYMAN_6719, CAYMAN)
+CHIPSET(0x671C, CAYMAN_671C, CAYMAN)
+CHIPSET(0x671D, CAYMAN_671D, CAYMAN)
+CHIPSET(0x671F, CAYMAN_671F, CAYMAN)
+
+CHIPSET(0x6720, BARTS_6720, BARTS)
+CHIPSET(0x6721, BARTS_6721, BARTS)
+CHIPSET(0x6722, BARTS_6722, BARTS)
+CHIPSET(0x6723, BARTS_6723, BARTS)
+CHIPSET(0x6724, BARTS_6724, BARTS)
+CHIPSET(0x6725, BARTS_6725, BARTS)
+CHIPSET(0x6726, BARTS_6726, BARTS)
+CHIPSET(0x6727, BARTS_6727, BARTS)
+CHIPSET(0x6728, BARTS_6728, BARTS)
+CHIPSET(0x6729, BARTS_6729, BARTS)
+CHIPSET(0x6738, BARTS_6738, BARTS)
+CHIPSET(0x6739, BARTS_6739, BARTS)
+CHIPSET(0x673E, BARTS_673E, BARTS)
+CHIPSET(0x6740, TURKS_6740, TURKS)
+CHIPSET(0x6741, TURKS_6741, TURKS)
+CHIPSET(0x6742, TURKS_6742, TURKS)
+CHIPSET(0x6743, TURKS_6743, TURKS)
+CHIPSET(0x6744, TURKS_6744, TURKS)
+CHIPSET(0x6745, TURKS_6745, TURKS)
+CHIPSET(0x6746, TURKS_6746, TURKS)
+CHIPSET(0x6747, TURKS_6747, TURKS)
+CHIPSET(0x6748, TURKS_6748, TURKS)
+CHIPSET(0x6749, TURKS_6749, TURKS)
+CHIPSET(0x6750, TURKS_6750, TURKS)
+CHIPSET(0x6758, TURKS_6758, TURKS)
+CHIPSET(0x6759, TURKS_6759, TURKS)
+CHIPSET(0x675F, TURKS_675F, TURKS)
+CHIPSET(0x6840, TURKS_6840, TURKS)
+CHIPSET(0x6841, TURKS_6841, TURKS)
+CHIPSET(0x6842, TURKS_6842, TURKS)
+CHIPSET(0x6843, TURKS_6843, TURKS)
+CHIPSET(0x6849, TURKS_6849, TURKS)
+CHIPSET(0x6850, TURKS_6850, TURKS)
+CHIPSET(0x6858, TURKS_6858, TURKS)
+CHIPSET(0x6859, TURKS_6859, TURKS)
+
+CHIPSET(0x6760, CAICOS_6760, CAICOS)
+CHIPSET(0x6761, CAICOS_6761, CAICOS)
+CHIPSET(0x6762, CAICOS_6762, CAICOS)
+CHIPSET(0x6763, CAICOS_6763, CAICOS)
+CHIPSET(0x6764, CAICOS_6764, CAICOS)
+CHIPSET(0x6765, CAICOS_6765, CAICOS)
+CHIPSET(0x6766, CAICOS_6766, CAICOS)
+CHIPSET(0x6767, CAICOS_6767, CAICOS)
+CHIPSET(0x6768, CAICOS_6768, CAICOS)
+CHIPSET(0x6770, CAICOS_6770, CAICOS)
+CHIPSET(0x6778, CAICOS_6778, CAICOS)
+CHIPSET(0x6779, CAICOS_6779, CAICOS)
diff --git a/lib/libdrm/radeon/radeon_surface.c b/lib/libdrm/radeon/radeon_surface.c
new file mode 100644
index 000000000..d7e918722
--- /dev/null
+++ b/lib/libdrm/radeon/radeon_surface.c
@@ -0,0 +1,995 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include "drm.h"
+#include "xf86drm.h"
+#include "radeon_drm.h"
+#include "radeon_surface.h"
+
+#define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1))
+#define MAX2(A, B) ((A) > (B) ? (A) : (B))
+#define MIN2(A, B) ((A) < (B) ? (A) : (B))
+
+/* keep this private */
+enum radeon_family {
+ CHIP_UNKNOWN,
+ CHIP_R600,
+ CHIP_RV610,
+ CHIP_RV630,
+ CHIP_RV670,
+ CHIP_RV620,
+ CHIP_RV635,
+ CHIP_RS780,
+ CHIP_RS880,
+ CHIP_RV770,
+ CHIP_RV730,
+ CHIP_RV710,
+ CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_SUMO,
+ CHIP_SUMO2,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
+ CHIP_CAYMAN,
+ CHIP_LAST,
+};
+
+typedef int (*hw_init_surface_t)(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+typedef int (*hw_best_surface_t)(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+struct radeon_hw_info {
+ /* apply to r6, eg */
+ uint32_t group_bytes;
+ uint32_t num_banks;
+ uint32_t num_pipes;
+ /* apply to eg */
+ uint32_t row_size;
+ unsigned allow_2d;
+};
+
+struct radeon_surface_manager {
+ int fd;
+ uint32_t device_id;
+ struct radeon_hw_info hw_info;
+ unsigned family;
+ hw_init_surface_t surface_init;
+ hw_best_surface_t surface_best;
+};
+
+/* helper */
+static int radeon_get_value(int fd, unsigned req, uint32_t *value)
+{
+ struct drm_radeon_info info = {};
+ int r;
+
+ *value = 0;
+ info.request = req;
+ info.value = (uintptr_t)value;
+ r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ return r;
+}
+
+static int radeon_get_family(struct radeon_surface_manager *surf_man)
+{
+ switch (surf_man->device_id) {
+#define CHIPSET(pci_id, name, fam) case pci_id: surf_man->family = CHIP_##fam; break;
+#include "r600_pci_ids.h"
+#undef CHIPSET
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned next_power_of_two(unsigned x)
+{
+ if (x <= 1)
+ return 1;
+
+ return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
+}
+
+static unsigned mip_minify(unsigned size, unsigned level)
+{
+ unsigned val;
+
+ val = MAX2(1, size >> level);
+ if (level > 0)
+ val = next_power_of_two(val);
+ return val;
+}
+
+static void surf_minify(struct radeon_surface *surf,
+ unsigned level,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ unsigned offset)
+{
+ surf->level[level].npix_x = mip_minify(surf->npix_x, level);
+ surf->level[level].npix_y = mip_minify(surf->npix_y, level);
+ surf->level[level].npix_z = mip_minify(surf->npix_z, level);
+ surf->level[level].nblk_x = (surf->level[level].npix_x + surf->blk_w - 1) / surf->blk_w;
+ surf->level[level].nblk_y = (surf->level[level].npix_y + surf->blk_h - 1) / surf->blk_h;
+ surf->level[level].nblk_z = (surf->level[level].npix_z + surf->blk_d - 1) / surf->blk_d;
+ if (surf->level[level].mode == RADEON_SURF_MODE_2D) {
+ if (surf->level[level].nblk_x < xalign || surf->level[level].nblk_y < yalign) {
+ surf->level[level].mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surf->level[level].nblk_x = ALIGN(surf->level[level].nblk_x, xalign);
+ surf->level[level].nblk_y = ALIGN(surf->level[level].nblk_y, yalign);
+ surf->level[level].nblk_z = ALIGN(surf->level[level].nblk_z, zalign);
+
+ surf->level[level].offset = offset;
+ surf->level[level].pitch_bytes = surf->level[level].nblk_x * surf->bpe;
+ surf->level[level].slice_size = surf->level[level].pitch_bytes * surf->level[level].nblk_y;
+
+ surf->bo_size = offset + surf->level[level].slice_size * surf->level[level].nblk_z * surf->array_size;
+}
+
+/* ===========================================================================
+ * r600/r700 family
+ */
+static int r6_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 14) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+
+ switch ((tiling_config & 0xe) >> 1) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch ((tiling_config & 0x30) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch ((tiling_config & 0xc0) >> 6) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int r6_surface_init_linear(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ /* the 32 alignment is for scanout, cb or db but to allow texture to be
+ * easily bound as such we force this alignment to all surface
+ */
+ xalign = MAX2(32, surf_man->hw_info.group_bytes / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR;
+ surf_minify(surf, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if ((i == 0)) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_linear_aligned(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ xalign = MAX2(64, surf_man->hw_info.group_bytes / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+ surf_minify(surf, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if ((i == 0)) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ xalign = surf_man->hw_info.group_bytes / (tilew * surf->bpe * surf->nsamples);
+ xalign = MAX2(tilew, xalign);
+ yalign = tilew;
+ zalign = 1;
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_1D;
+ surf_minify(surf, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if ((i == 0)) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ zalign = 1;
+ xalign = (surf_man->hw_info.group_bytes * surf_man->hw_info.num_banks) /
+ (tilew * surf->bpe * surf->nsamples);
+ xalign = MAX2(tilew * surf_man->hw_info.num_banks, xalign);
+ yalign = tilew * surf_man->hw_info.num_pipes;
+ if (!start_level) {
+ surf->bo_alignment =
+ MAX2(surf_man->hw_info.num_pipes *
+ surf_man->hw_info.num_banks *
+ surf->bpe * 64,
+ xalign * yalign * surf->nsamples * surf->bpe);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_2D;
+ surf_minify(surf, i, xalign, yalign, zalign, offset);
+ if (surf->level[i].mode == RADEON_SURF_MODE_1D) {
+ return r6_surface_init_1d(surf_man, surf, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if ((i == 0)) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode;
+ int r;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ /* force 1d on kernel that can't do 2d */
+ if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ /* check surface dimension */
+ if (surf->npix_x > 8192 || surf->npix_y > 8192 || surf->npix_z > 8192) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 14) {
+ return -EINVAL;
+ }
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = r6_surface_init_linear_aligned(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = r6_surface_init_1d(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = r6_surface_init_2d(surf_man, surf, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+static int r6_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ /* no value to optimize for r6xx/r7xx */
+ return 0;
+}
+
+
+/* ===========================================================================
+ * evergreen family
+ */
+static int eg_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 14) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void eg_surf_minify(struct radeon_surface *surf,
+ unsigned level,
+ unsigned slice_pt,
+ unsigned mtilew,
+ unsigned mtileh,
+ unsigned mtileb,
+ unsigned offset)
+{
+ unsigned mtile_pr, mtile_ps;
+
+ surf->level[level].npix_x = mip_minify(surf->npix_x, level);
+ surf->level[level].npix_y = mip_minify(surf->npix_y, level);
+ surf->level[level].npix_z = mip_minify(surf->npix_z, level);
+ surf->level[level].nblk_x = (surf->level[level].npix_x + surf->blk_w - 1) / surf->blk_w;
+ surf->level[level].nblk_y = (surf->level[level].npix_y + surf->blk_h - 1) / surf->blk_h;
+ surf->level[level].nblk_z = (surf->level[level].npix_z + surf->blk_d - 1) / surf->blk_d;
+ if (surf->level[level].mode == RADEON_SURF_MODE_2D) {
+ if (surf->level[level].nblk_x < mtilew || surf->level[level].nblk_y < mtileh) {
+ surf->level[level].mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surf->level[level].nblk_x = ALIGN(surf->level[level].nblk_x, mtilew);
+ surf->level[level].nblk_y = ALIGN(surf->level[level].nblk_y, mtileh);
+ surf->level[level].nblk_z = ALIGN(surf->level[level].nblk_z, 1);
+
+ /* macro tile per row */
+ mtile_pr = surf->level[level].nblk_x / mtilew;
+ /* macro tile per slice */
+ mtile_ps = (mtile_pr * surf->level[level].nblk_y) / mtileh;
+
+ surf->level[level].offset = offset;
+ surf->level[level].pitch_bytes = surf->level[level].nblk_x * surf->bpe * slice_pt;
+ surf->level[level].slice_size = mtile_ps * mtileb * slice_pt;
+
+ surf->bo_size = offset + surf->level[level].slice_size * surf->level[level].nblk_z * surf->array_size;
+}
+
+static int eg_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ xalign = surf_man->hw_info.group_bytes / (tilew * surf->bpe * surf->nsamples);
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ surf->stencil_offset = 0;
+ surf->stencil_tile_split = 0;
+ xalign = surf_man->hw_info.group_bytes / (tilew * surf->nsamples);
+ }
+ xalign = MAX2(tilew, xalign);
+ yalign = tilew;
+ zalign = 1;
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_1D;
+ surf_minify(surf, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if ((i == 0)) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ surf->stencil_offset = ALIGN(surf->bo_size, surf->bo_alignment);
+ surf->bo_size = surf->stencil_offset + surf->bo_size / 4;
+ }
+
+ return 0;
+}
+
+static int eg_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ unsigned tilew, tileh, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ surf->stencil_offset = 0;
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb = tilew * tileh * surf->bpe * surf->nsamples;
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > surf->tile_split) {
+ slice_pt = tileb / surf->tile_split;
+ }
+ tileb = tileb / slice_pt;
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * surf_man->hw_info.num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * surf_man->hw_info.num_banks) / surf->mtilea;
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, mtileb);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_2D;
+ eg_surf_minify(surf, i, slice_pt, mtilew, mtileh, mtileb, offset);
+ if (surf->level[i].mode == RADEON_SURF_MODE_1D) {
+ return eg_surface_init_1d(surf_man, surf, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if ((i == 0)) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ surf->stencil_offset = ALIGN(surf->bo_size, surf->bo_alignment);
+ surf->bo_size = surf->stencil_offset + surf->bo_size / 4;
+ }
+
+ return 0;
+}
+
+static int eg_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode)
+{
+ unsigned tileb;
+
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ /* check tile split */
+ if (mode == RADEON_SURF_MODE_2D) {
+ switch (surf->tile_split) {
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (surf->mtilea) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check aspect ratio */
+ if (surf_man->hw_info.num_banks < surf->mtilea) {
+ return -EINVAL;
+ }
+ /* check bank width */
+ switch (surf->bankw) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check bank height */
+ switch (surf->bankh) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ if ((tileb * surf->bankh * surf->bankw) < surf_man->hw_info.group_bytes) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int eg_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode;
+ int r;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ /* for some reason eg need to have room for stencil right after depth */
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ surf->flags |= RADEON_SURF_SBUFFER;
+ }
+
+ r = eg_surface_sanity(surf_man, surf, mode);
+ if (r) {
+ return r;
+ }
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = r6_surface_init_linear_aligned(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = eg_surface_init_1d(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = eg_surface_init_2d(surf_man, surf, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+static unsigned log2_int(unsigned x)
+{
+ unsigned l;
+
+ if (x < 2) {
+ return 0;
+ }
+ for (l = 2; ; l++) {
+ if ((unsigned)(1 << l) > x) {
+ return l - 1;
+ }
+ }
+ return 0;
+}
+
+/* compute best tile_split, bankw, bankh, mtilea
+ * depending on surface
+ */
+static int eg_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tileb, h_over_w;
+ int r;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ /* for some reason eg need to have room for stencil right after depth */
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ surf->flags |= RADEON_SURF_SBUFFER;
+ }
+
+ /* set some default value to avoid sanity check choking on them */
+ surf->tile_split = 1024;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->mtilea = surf_man->hw_info.num_banks;
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ for (; surf->bankh <= 8; surf->bankh *= 2) {
+ if ((tileb * surf->bankh * surf->bankw) >= surf_man->hw_info.group_bytes) {
+ break;
+ }
+ }
+ if (surf->mtilea > 8) {
+ surf->mtilea = 8;
+ }
+
+ r = eg_surface_sanity(surf_man, surf, mode);
+ if (r) {
+ return r;
+ }
+
+ if (mode != RADEON_SURF_MODE_2D) {
+ /* nothing to do for non 2D tiled surface */
+ return 0;
+ }
+
+ /* set tile split to row size, optimize latter for multi-sample surface
+ * tile split >= 256 for render buffer surface. Also depth surface want
+ * smaller value for optimal performances.
+ */
+ surf->tile_split = surf_man->hw_info.row_size;
+ surf->stencil_tile_split = surf_man->hw_info.row_size / 2;
+
+ /* bankw or bankh greater than 1 increase alignment requirement, not
+ * sure if it's worth using smaller bankw & bankh to stick with 2D
+ * tiling on small surface rather than falling back to 1D tiling.
+ * Use recommanded value based on tile size for now.
+ *
+ * fmask buffer has different optimal value figure them out once we
+ * use it.
+ */
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* assume 1 bytes for stencil, we optimize for stencil as stencil
+ * and depth shares surface values
+ */
+ tileb = MIN2(surf->tile_split, 64 * surf->nsamples);
+ } else {
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ }
+
+ /* use bankw of 1 to minimize width alignment, might be interesting to
+ * increase it for large surface
+ */
+ surf->bankw = 1;
+ switch (tileb) {
+ case 64:
+ surf->bankh = 4;
+ break;
+ case 128:
+ case 256:
+ surf->bankh = 2;
+ break;
+ default:
+ surf->bankh = 1;
+ break;
+ }
+ /* double check the constraint */
+ for (; surf->bankh <= 8; surf->bankh *= 2) {
+ if ((tileb * surf->bankh * surf->bankw) >= surf_man->hw_info.group_bytes) {
+ break;
+ }
+ }
+
+ h_over_w = (((surf->bankh * surf_man->hw_info.num_banks) << 16) /
+ (surf->bankw * surf_man->hw_info.num_pipes)) >> 16;
+ surf->mtilea = 1 << (log2_int(h_over_w) >> 1);
+
+ return 0;
+}
+
+
+/* ===========================================================================
+ * public API
+ */
+struct radeon_surface_manager *radeon_surface_manager_new(int fd)
+{
+ struct radeon_surface_manager *surf_man;
+
+ surf_man = calloc(1, sizeof(struct radeon_surface_manager));
+ if (surf_man == NULL) {
+ return NULL;
+ }
+ surf_man->fd = fd;
+ if (radeon_get_value(fd, RADEON_INFO_DEVICE_ID, &surf_man->device_id)) {
+ goto out_err;
+ }
+ if (radeon_get_family(surf_man)) {
+ goto out_err;
+ }
+
+ if (surf_man->family <= CHIP_RV740) {
+ if (r6_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &r6_surface_init;
+ surf_man->surface_best = &r6_surface_best;
+ } else {
+ if (eg_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &eg_surface_init;
+ surf_man->surface_best = &eg_surface_best;
+ }
+
+ return surf_man;
+out_err:
+ free(surf_man);
+ return NULL;
+}
+
+void radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
+{
+ free(surf_man);
+}
+
+static int radeon_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned type,
+ unsigned mode)
+{
+ if (surf_man == NULL || surf_man->surface_init == NULL || surf == NULL) {
+ return -EINVAL;
+ }
+
+ /* all dimension must be at least 1 ! */
+ if (!surf->npix_x || !surf->npix_y || !surf->npix_z) {
+ return -EINVAL;
+ }
+ if (!surf->blk_w || !surf->blk_h || !surf->blk_d) {
+ return -EINVAL;
+ }
+ if (!surf->array_size) {
+ return -EINVAL;
+ }
+ /* array size must be a power of 2 */
+ surf->array_size = next_power_of_two(surf->array_size);
+
+ switch (surf->nsamples) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check type */
+ switch (type) {
+ case RADEON_SURF_TYPE_1D:
+ if (surf->npix_y > 1) {
+ return -EINVAL;
+ }
+ case RADEON_SURF_TYPE_2D:
+ if (surf->npix_z > 1) {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_SURF_TYPE_CUBEMAP:
+ if (surf->npix_z > 1) {
+ return -EINVAL;
+ }
+ /* deal with cubemap as they were texture array */
+ if (surf_man->family >= CHIP_RV770) {
+ surf->array_size = 8;
+ } else {
+ surf->array_size = 6;
+ }
+ break;
+ case RADEON_SURF_TYPE_3D:
+ break;
+ case RADEON_SURF_TYPE_1D_ARRAY:
+ if (surf->npix_y > 1) {
+ return -EINVAL;
+ }
+ case RADEON_SURF_TYPE_2D_ARRAY:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, type;
+ int r;
+
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
+
+ r = radeon_surface_sanity(surf_man, surf, type, mode);
+ if (r) {
+ return r;
+ }
+ return surf_man->surface_init(surf_man, surf);
+}
+
+int radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, type;
+ int r;
+
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
+
+ r = radeon_surface_sanity(surf_man, surf, type, mode);
+ if (r) {
+ return r;
+ }
+ return surf_man->surface_best(surf_man, surf);
+}
diff --git a/lib/libdrm/radeon/radeon_surface.h b/lib/libdrm/radeon/radeon_surface.h
new file mode 100644
index 000000000..bfee8ab00
--- /dev/null
+++ b/lib/libdrm/radeon/radeon_surface.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#ifndef RADEON_SURFACE_H
+#define RADEON_SURFACE_H
+
+/* Note :
+ *
+ * For texture array, the n layer are stored one after the other within each
+ * mipmap level. 0 value for field than can be hint is always valid.
+ */
+
+#define RADEON_SURF_MAX_LEVEL 32
+
+#define RADEON_SURF_TYPE_MASK 0xFF
+#define RADEON_SURF_TYPE_SHIFT 0
+#define RADEON_SURF_TYPE_1D 0
+#define RADEON_SURF_TYPE_2D 1
+#define RADEON_SURF_TYPE_3D 2
+#define RADEON_SURF_TYPE_CUBEMAP 3
+#define RADEON_SURF_TYPE_1D_ARRAY 4
+#define RADEON_SURF_TYPE_2D_ARRAY 5
+#define RADEON_SURF_MODE_MASK 0xFF
+#define RADEON_SURF_MODE_SHIFT 8
+#define RADEON_SURF_MODE_LINEAR 0
+#define RADEON_SURF_MODE_LINEAR_ALIGNED 1
+#define RADEON_SURF_MODE_1D 2
+#define RADEON_SURF_MODE_2D 3
+#define RADEON_SURF_SCANOUT (1 << 16)
+#define RADEON_SURF_ZBUFFER (1 << 17)
+#define RADEON_SURF_SBUFFER (1 << 18)
+
+#define RADEON_SURF_GET(v, field) (((v) >> RADEON_SURF_ ## field ## _SHIFT) & RADEON_SURF_ ## field ## _MASK)
+#define RADEON_SURF_SET(v, field) (((v) & RADEON_SURF_ ## field ## _MASK) << RADEON_SURF_ ## field ## _SHIFT)
+#define RADEON_SURF_CLR(v, field) ((v) & ~(RADEON_SURF_ ## field ## _MASK << RADEON_SURF_ ## field ## _SHIFT))
+
+/* first field up to mode need to match r6 struct so that we can reuse
+ * same function for linear & linear aligned
+ */
+struct radeon_surface_level {
+ uint64_t offset;
+ uint64_t slice_size;
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t nblk_x;
+ uint32_t nblk_y;
+ uint32_t nblk_z;
+ uint32_t pitch_bytes;
+ uint32_t mode;
+};
+
+struct radeon_surface {
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t blk_w;
+ uint32_t blk_h;
+ uint32_t blk_d;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t bpe;
+ uint32_t nsamples;
+ uint32_t flags;
+ /* Following is updated/fill by the allocator. It's allowed to
+ * set some of the value but they are use as hint and can be
+ * overridden (things lile bankw/bankh on evergreen for
+ * instance).
+ */
+ uint64_t bo_size;
+ uint64_t bo_alignment;
+ /* apply to eg */
+ uint32_t bankw;
+ uint32_t bankh;
+ uint32_t mtilea;
+ uint32_t tile_split;
+ uint32_t stencil_tile_split;
+ uint64_t stencil_offset;
+ struct radeon_surface_level level[RADEON_SURF_MAX_LEVEL];
+};
+
+struct radeon_surface_manager *radeon_surface_manager_new(int fd);
+void radeon_surface_manager_free(struct radeon_surface_manager *surf_man);
+int radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+int radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+#endif
diff --git a/lib/libdrm/radeon/shlib_version b/lib/libdrm/radeon/shlib_version
index 1edea46de..893819d18 100644
--- a/lib/libdrm/radeon/shlib_version
+++ b/lib/libdrm/radeon/shlib_version
@@ -1,2 +1,2 @@
major=1
-minor=0
+minor=1
diff --git a/lib/libdrm/shlib_version b/lib/libdrm/shlib_version
index f7f3cf728..012c14171 100644
--- a/lib/libdrm/shlib_version
+++ b/lib/libdrm/shlib_version
@@ -1,2 +1,2 @@
-major=2
-minor=6
+major=3
+minor=0
diff --git a/lib/libdrm/xf86drm.c b/lib/libdrm/xf86drm.c
index 79313ed28..038b28c8c 100644
--- a/lib/libdrm/xf86drm.c
+++ b/lib/libdrm/xf86drm.c
@@ -31,10 +31,14 @@
* DEALINGS IN THE SOFTWARE.
*/
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
+#include <strings.h>
#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
@@ -55,8 +59,16 @@
#include "xf86drm.h"
-#ifndef DRM_MAX_MINOR
-#define DRM_MAX_MINOR 16
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#define DRM_MAJOR 145
+#endif
+
+#ifdef __NetBSD__
+#define DRM_MAJOR 34
+#endif
+
+#ifndef DRM_MAJOR
+#define DRM_MAJOR 226 /* Linux */
#endif
/*
@@ -139,23 +151,6 @@ void drmFree(void *pt)
free(pt);
}
-/* drmStrdup can't use strdup(3), since it doesn't call _DRM_MALLOC... */
-static char *drmStrdup(const char *s)
-{
- char *retval;
-
- if (!s)
- return NULL;
-
- retval = malloc(strlen(s)+1);
- if (!retval)
- return NULL;
-
- strcpy(retval, s);
-
- return retval;
-}
-
/**
* Call ioctl, restarting if it is interupted
*/
@@ -213,7 +208,7 @@ drmHashEntry *drmGetEntry(int fd)
* PCI:b:d:f format and the newer pci:oooo:bb:dd.f format. In the format, o is
* domain, b is bus, d is device, f is function.
*/
-static int drmMatchBusID(const char *id1, const char *id2)
+static int drmMatchBusID(const char *id1, const char *id2, int pci_domain_ok)
{
/* First, check if the IDs are exactly the same */
if (strcasecmp(id1, id2) == 0)
@@ -241,6 +236,13 @@ static int drmMatchBusID(const char *id1, const char *id2)
return 0;
}
+ /* If domains aren't properly supported by the kernel interface,
+ * just ignore them, which sucks less than picking a totally random
+ * card with "open by name"
+ */
+ if (!pci_domain_ok)
+ o1 = o2 = 0;
+
if ((o1 != o2) || (b1 != b2) || (d1 != d2) || (f1 != f2))
return 0;
else
@@ -250,6 +252,36 @@ static int drmMatchBusID(const char *id1, const char *id2)
}
/**
+ * Handles error checking for chown call.
+ *
+ * \param path to file.
+ * \param id of the new owner.
+ * \param id of the new group.
+ *
+ * \return zero if success or -1 if failure.
+ *
+ * \internal
+ * Checks for failure. If failure was caused by signal call chown again.
+ * If any other failure happened then it will output error mesage using
+ * drmMsg() call.
+ */
+static int chown_check_return(const char *path, uid_t owner, gid_t group)
+{
+ int rv;
+
+ do {
+ rv = chown(path, owner, group);
+ } while (rv != 0 && errno == EINTR);
+
+ if (rv == 0)
+ return 0;
+
+ drmMsg("Failed to change owner or group for file %s! %d: %s\n",
+ path, errno, strerror(errno));
+ return -1;
+}
+
+/**
* Open the DRM device, creating it if necessary.
*
* \param dev major and minor numbers of the device.
@@ -282,6 +314,54 @@ static int drmOpenDevice(long dev, int minor, int type)
group = (serv_group >= 0) ? serv_group : DRM_DEV_GID;
}
+#ifndef __OpenBSD__
+#if !defined(UDEV)
+ if (stat(DRM_DIR_NAME, &st)) {
+ if (!isroot)
+ return DRM_ERR_NOT_ROOT;
+ mkdir(DRM_DIR_NAME, DRM_DEV_DIRMODE);
+ chown_check_return(DRM_DIR_NAME, 0, 0); /* root:root */
+ chmod(DRM_DIR_NAME, DRM_DEV_DIRMODE);
+ }
+
+ /* Check if the device node exists and create it if necessary. */
+ if (stat(buf, &st)) {
+ if (!isroot)
+ return DRM_ERR_NOT_ROOT;
+ remove(buf);
+ mknod(buf, S_IFCHR | devmode, dev);
+ }
+
+ if (drm_server_info) {
+ chown_check_return(buf, user, group);
+ chmod(buf, devmode);
+ }
+#else
+ /* if we modprobed then wait for udev */
+ {
+ int udev_count = 0;
+wait_for_udev:
+ if (stat(DRM_DIR_NAME, &st)) {
+ usleep(20);
+ udev_count++;
+
+ if (udev_count == 50)
+ return -1;
+ goto wait_for_udev;
+ }
+
+ if (stat(buf, &st)) {
+ usleep(20);
+ udev_count++;
+
+ if (udev_count == 50)
+ return -1;
+ goto wait_for_udev;
+ }
+ }
+#endif
+#endif /* __OpenBSD__ */
+
#ifndef X_PRIVSEP
fd = open(buf, O_RDWR, 0);
#else
@@ -292,8 +372,29 @@ static int drmOpenDevice(long dev, int minor, int type)
if (fd >= 0)
return fd;
+#if !defined(UDEV) && !defined(__OpenBSD__)
+ /* Check if the device node is not what we expect it to be, and recreate it
+ * and try again if so.
+ */
+ if (st.st_rdev != dev) {
+ if (!isroot)
+ return DRM_ERR_NOT_ROOT;
+ remove(buf);
+ mknod(buf, S_IFCHR | devmode, dev);
+ if (drm_server_info) {
+ chown_check_return(buf, user, group);
+ chmod(buf, devmode);
+ }
+ }
+ fd = open(buf, O_RDWR, 0);
+ drmMsg("drmOpenDevice: open result is %d, (%s)\n",
+ fd, fd < 0 ? strerror(errno) : "OK");
+ if (fd >= 0)
+ return fd;
+
drmMsg("drmOpenDevice: Open failed\n");
-
+ remove(buf);
+#endif
return -errno;
}
@@ -380,7 +481,7 @@ int drmAvailable(void)
*/
static int drmOpenByBusid(const char *busid)
{
- int i;
+ int i, pci_domain_ok = 1;
int fd;
const char *buf;
drmSetVersion sv;
@@ -390,14 +491,27 @@ static int drmOpenByBusid(const char *busid)
fd = drmOpenMinor(i, 1, DRM_NODE_RENDER);
drmMsg("drmOpenByBusid: drmOpenMinor returns %d\n", fd);
if (fd >= 0) {
+ /* We need to try for 1.4 first for proper PCI domain support
+ * and if that fails, we know the kernel is busted
+ */
sv.drm_di_major = 1;
- sv.drm_di_minor = 1;
+ sv.drm_di_minor = 4;
sv.drm_dd_major = -1; /* Don't care */
sv.drm_dd_minor = -1; /* Don't care */
- drmSetInterfaceVersion(fd, &sv);
+ if (drmSetInterfaceVersion(fd, &sv)) {
+#ifndef __alpha__
+ pci_domain_ok = 0;
+#endif
+ sv.drm_di_major = 1;
+ sv.drm_di_minor = 1;
+ sv.drm_dd_major = -1; /* Don't care */
+ sv.drm_dd_minor = -1; /* Don't care */
+ drmMsg("drmOpenByBusid: Interface 1.4 failed, trying 1.1\n",fd);
+ drmSetInterfaceVersion(fd, &sv);
+ }
buf = drmGetBusid(fd);
drmMsg("drmOpenByBusid: drmGetBusid reports %s\n", buf);
- if (buf && drmMatchBusID(buf, busid)) {
+ if (buf && drmMatchBusID(buf, busid, pci_domain_ok)) {
drmFreeBusid(buf);
return fd;
}
@@ -604,11 +718,11 @@ static void drmCopyVersion(drmVersionPtr d, const drm_version_t *s)
d->version_minor = s->version_minor;
d->version_patchlevel = s->version_patchlevel;
d->name_len = s->name_len;
- d->name = drmStrdup(s->name);
+ d->name = strdup(s->name);
d->date_len = s->date_len;
- d->date = drmStrdup(s->date);
+ d->date = strdup(s->date);
d->desc_len = s->desc_len;
- d->desc = drmStrdup(s->desc);
+ d->desc = strdup(s->desc);
}
@@ -703,6 +817,20 @@ drmVersionPtr drmGetLibVersion(int fd)
return (drmVersionPtr)version;
}
+int drmGetCap(int fd, uint64_t capability, uint64_t *value)
+{
+#ifndef __OpenBSD__
+ struct drm_get_cap cap = { capability, 0 };
+ int ret;
+
+ ret = drmIoctl(fd, DRM_IOCTL_GET_CAP, &cap);
+ if (ret)
+ return ret;
+
+ *value = cap.value;
+#endif
+ return 0;
+}
/**
* Free the bus ID information.
@@ -857,7 +985,7 @@ int drmAddMap(int fd, drm_handle_t offset, drmSize size, drmMapType type,
if (drmIoctl(fd, DRM_IOCTL_ADD_MAP, &map))
return -errno;
if (handle)
- *handle = (drm_handle_t)map.handle;
+ *handle = (drm_handle_t)(uintptr_t)map.handle;
return 0;
}
@@ -865,7 +993,7 @@ int drmRmMap(int fd, drm_handle_t handle)
{
drm_map_t map;
- map.handle = (void *)handle;
+ map.handle = (void *)(uintptr_t)handle;
if(drmIoctl(fd, DRM_IOCTL_RM_MAP, &map))
return -errno;
@@ -2001,7 +2129,7 @@ int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
drm_ctx_priv_map_t map;
map.ctx_id = ctx_id;
- map.handle = (void *)handle;
+ map.handle = (void *)(uintptr_t)handle;
if (drmIoctl(fd, DRM_IOCTL_SET_SAREA_CTX, &map))
return -errno;
@@ -2018,7 +2146,7 @@ int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
if (drmIoctl(fd, DRM_IOCTL_GET_SAREA_CTX, &map))
return -errno;
if (handle)
- *handle = (drm_handle_t)map.handle;
+ *handle = (drm_handle_t)(uintptr_t)map.handle;
return 0;
}
@@ -2390,23 +2518,16 @@ void drmCloseOnce(int fd)
int drmSetMaster(int fd)
{
-#ifdef NOTYET
- int ret;
-
- fprintf(stderr,"Setting master \n");
- ret = ioctl(fd, DRM_IOCTL_SET_MASTER, 0);
- return ret;
+#ifndef __OpenBSD__
+ return ioctl(fd, DRM_IOCTL_SET_MASTER, 0);
#endif
return 0;
}
int drmDropMaster(int fd)
{
-#ifdef NOTYET
- int ret;
- fprintf(stderr,"Dropping master \n");
- ret = ioctl(fd, DRM_IOCTL_DROP_MASTER, 0);
- return ret;
+#ifndef __OpenBSD__
+ return ioctl(fd, DRM_IOCTL_DROP_MASTER, 0);
#endif
return 0;
}
@@ -2434,7 +2555,7 @@ char *drmGetDeviceNameFromFd(int fd)
if (i == DRM_MAX_MINOR)
return NULL;
- return drmStrdup(name);
+ return strdup(name);
}
#ifdef X_PRIVSEP
diff --git a/lib/libdrm/xf86drm.h b/lib/libdrm/xf86drm.h
index 92c4aea9b..f87d7d96d 100644
--- a/lib/libdrm/xf86drm.h
+++ b/lib/libdrm/xf86drm.h
@@ -39,6 +39,35 @@
#include <stdint.h>
#include <drm.h>
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
+#ifndef DRM_MAX_MINOR
+#define DRM_MAX_MINOR 16
+#endif
+
+#if defined(__linux__)
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_IOC_VOID _IOC_NONE
+#define DRM_IOC_READ _IOC_READ
+#define DRM_IOC_WRITE _IOC_WRITE
+#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#else /* One of the *BSDs */
+
+#include <sys/ioccom.h>
+#define DRM_IOCTL_NR(n) ((n) & 0xff)
+#define DRM_IOC_VOID IOC_VOID
+#define DRM_IOC_READ IOC_OUT
+#define DRM_IOC_WRITE IOC_IN
+#define DRM_IOC_READWRITE IOC_INOUT
+#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
+
+#endif
+
/* Defaults, if nothing set in xf86config */
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
@@ -47,7 +76,6 @@
(S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-
#define DRM_DIR_NAME "/dev"
#define DRM_DEV_NAME "%s/drm%d"
#define DRM_CONTROL_DEV_NAME "%s/drmC%d"
@@ -272,12 +300,15 @@ typedef struct _drmTextureRegion {
typedef enum {
DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ /* bits 1-6 are reserved for high crtcs */
+ DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drmVBlankSeqType;
+#define DRM_VBLANK_HIGH_CRTC_SHIFT 1
typedef struct _drmVBlankReq {
drmVBlankSeqType type;
@@ -518,6 +549,7 @@ extern int drmOpenControl(int minor);
extern int drmClose(int fd);
extern drmVersionPtr drmGetVersion(int fd);
extern drmVersionPtr drmGetLibVersion(int fd);
+extern int drmGetCap(int fd, uint64_t capability, uint64_t *value);
extern void drmFreeVersion(drmVersionPtr);
extern int drmGetMagic(int fd, drm_magic_t * magic);
extern char *drmGetBusid(int fd);
@@ -669,7 +701,7 @@ extern void drmMsg(const char *format, ...);
extern int drmSetMaster(int fd);
extern int drmDropMaster(int fd);
-#define DRM_EVENT_CONTEXT_VERSION 1
+#define DRM_EVENT_CONTEXT_VERSION 2
typedef struct _drmEventContext {
@@ -682,10 +714,21 @@ typedef struct _drmEventContext {
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data);
+
+ void (*page_flip_handler)(int fd,
+ unsigned int sequence,
+ unsigned int tv_sec,
+ unsigned int tv_usec,
+ void *user_data);
+
} drmEventContext, *drmEventContextPtr;
extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
extern char *drmGetDeviceNameFromFd(int fd);
+#if defined(__cplusplus) || defined(c_plusplus)
+}
+#endif
+
#endif
diff --git a/lib/libdrm/xf86drmMode.c b/lib/libdrm/xf86drmMode.c
index 221bf67cf..a2603a806 100644
--- a/lib/libdrm/xf86drmMode.c
+++ b/lib/libdrm/xf86drmMode.c
@@ -46,11 +46,18 @@
#include <drm.h>
#include <string.h>
#include <dirent.h>
+#include <unistd.h>
#include <errno.h>
#define U642VOID(x) ((void *)(unsigned long)(x))
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
+static inline int DRM_IOCTL(int fd, unsigned long cmd, void *arg)
+{
+ int ret = drmIoctl(fd, cmd, arg);
+ return ret < 0 ? -errno : ret;
+}
+
/*
* Util functions
*/
@@ -89,6 +96,10 @@ void drmModeFreeResources(drmModeResPtr ptr)
if (!ptr)
return;
+ drmFree(ptr->fbs);
+ drmFree(ptr->crtcs);
+ drmFree(ptr->connectors);
+ drmFree(ptr->encoders);
drmFree(ptr);
}
@@ -135,35 +146,62 @@ void drmModeFreeEncoder(drmModeEncoderPtr ptr)
drmModeResPtr drmModeGetResources(int fd)
{
- struct drm_mode_card_res res;
+ struct drm_mode_card_res res, counts;
drmModeResPtr r = 0;
+retry:
memset(&res, 0, sizeof(struct drm_mode_card_res));
-
if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
return 0;
- if (res.count_fbs)
+ counts = res;
+
+ if (res.count_fbs) {
res.fb_id_ptr = VOID2U64(drmMalloc(res.count_fbs*sizeof(uint32_t)));
- if (res.count_crtcs)
+ if (!res.fb_id_ptr)
+ goto err_allocs;
+ }
+ if (res.count_crtcs) {
res.crtc_id_ptr = VOID2U64(drmMalloc(res.count_crtcs*sizeof(uint32_t)));
- if (res.count_connectors)
+ if (!res.crtc_id_ptr)
+ goto err_allocs;
+ }
+ if (res.count_connectors) {
res.connector_id_ptr = VOID2U64(drmMalloc(res.count_connectors*sizeof(uint32_t)));
- if (res.count_encoders)
+ if (!res.connector_id_ptr)
+ goto err_allocs;
+ }
+ if (res.count_encoders) {
res.encoder_id_ptr = VOID2U64(drmMalloc(res.count_encoders*sizeof(uint32_t)));
+ if (!res.encoder_id_ptr)
+ goto err_allocs;
+ }
- if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res)) {
- r = NULL;
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETRESOURCES, &res))
goto err_allocs;
+
+ /* The number of available connectors and etc may have changed with a
+ * hotplug event in between the ioctls, in which case the field is
+ * silently ignored by the kernel.
+ */
+ if (counts.count_fbs < res.count_fbs ||
+ counts.count_crtcs < res.count_crtcs ||
+ counts.count_connectors < res.count_connectors ||
+ counts.count_encoders < res.count_encoders)
+ {
+ drmFree(U642VOID(res.fb_id_ptr));
+ drmFree(U642VOID(res.crtc_id_ptr));
+ drmFree(U642VOID(res.connector_id_ptr));
+ drmFree(U642VOID(res.encoder_id_ptr));
+
+ goto retry;
}
/*
* return
*/
-
-
if (!(r = drmMalloc(sizeof(*r))))
- return 0;
+ goto err_allocs;
r->min_width = res.min_width;
r->max_width = res.max_width;
@@ -173,11 +211,23 @@ drmModeResPtr drmModeGetResources(int fd)
r->count_crtcs = res.count_crtcs;
r->count_connectors = res.count_connectors;
r->count_encoders = res.count_encoders;
- /* TODO we realy should test if these allocs fails. */
- r->fbs = drmAllocCpy(U642VOID(res.fb_id_ptr), res.count_fbs, sizeof(uint32_t));
- r->crtcs = drmAllocCpy(U642VOID(res.crtc_id_ptr), res.count_crtcs, sizeof(uint32_t));
- r->connectors = drmAllocCpy(U642VOID(res.connector_id_ptr), res.count_connectors, sizeof(uint32_t));
- r->encoders = drmAllocCpy(U642VOID(res.encoder_id_ptr), res.count_encoders, sizeof(uint32_t));
+
+ r->fbs = drmAllocCpy(U642VOID(res.fb_id_ptr), res.count_fbs, sizeof(uint32_t));
+ r->crtcs = drmAllocCpy(U642VOID(res.crtc_id_ptr), res.count_crtcs, sizeof(uint32_t));
+ r->connectors = drmAllocCpy(U642VOID(res.connector_id_ptr), res.count_connectors, sizeof(uint32_t));
+ r->encoders = drmAllocCpy(U642VOID(res.encoder_id_ptr), res.count_encoders, sizeof(uint32_t));
+ if ((res.count_fbs && !r->fbs) ||
+ (res.count_crtcs && !r->crtcs) ||
+ (res.count_connectors && !r->connectors) ||
+ (res.count_encoders && !r->encoders))
+ {
+ drmFree(r->fbs);
+ drmFree(r->crtcs);
+ drmFree(r->connectors);
+ drmFree(r->encoders);
+ drmFree(r);
+ r = 0;
+ }
err_allocs:
drmFree(U642VOID(res.fb_id_ptr));
@@ -202,16 +252,41 @@ int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
f.depth = depth;
f.handle = bo_handle;
- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_ADDFB, &f)))
+ if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB, &f)))
+ return ret;
+
+ *buf_id = f.fb_id;
+ return 0;
+}
+
+#ifndef __OpenBSD__
+int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, uint32_t bo_handles[4],
+ uint32_t pitches[4], uint32_t offsets[4],
+ uint32_t *buf_id, uint32_t flags)
+{
+ struct drm_mode_fb_cmd2 f;
+ int ret;
+
+ f.width = width;
+ f.height = height;
+ f.pixel_format = pixel_format;
+ f.flags = flags;
+ memcpy(f.handles, bo_handles, 4 * sizeof(bo_handles[0]));
+ memcpy(f.pitches, pitches, 4 * sizeof(pitches[0]));
+ memcpy(f.offsets, offsets, 4 * sizeof(offsets[0]));
+
+ if ((ret = DRM_IOCTL(fd, DRM_IOCTL_MODE_ADDFB2, &f)))
return ret;
*buf_id = f.fb_id;
return 0;
}
+#endif
int drmModeRmFB(int fd, uint32_t bufferId)
{
- return drmIoctl(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_RMFB, &bufferId);
}
@@ -240,6 +315,20 @@ drmModeFBPtr drmModeGetFB(int fd, uint32_t buf)
return r;
}
+#ifndef __OpenBSD__
+int drmModeDirtyFB(int fd, uint32_t bufferId,
+ drmModeClipPtr clips, uint32_t num_clips)
+{
+ struct drm_mode_fb_dirty_cmd dirty = { 0 };
+
+ dirty.fb_id = bufferId;
+ dirty.clips_ptr = VOID2U64(clips);
+ dirty.num_clips = num_clips;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DIRTYFB, &dirty);
+}
+#endif
+
/*
* Crtc functions
@@ -292,7 +381,7 @@ int drmModeSetCrtc(int fd, uint32_t crtcId, uint32_t bufferId,
} else
crtc.mode_valid = 0;
- return drmIoctl(fd, DRM_IOCTL_MODE_SETCRTC, &crtc);
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETCRTC, &crtc);
}
/*
@@ -309,7 +398,7 @@ int drmModeSetCursor(int fd, uint32_t crtcId, uint32_t bo_handle, uint32_t width
arg.height = height;
arg.handle = bo_handle;
- return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg);
}
int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y)
@@ -321,7 +410,7 @@ int drmModeMoveCursor(int fd, uint32_t crtcId, int x, int y)
arg.x = x;
arg.y = y;
- return drmIoctl(fd, DRM_IOCTL_MODE_CURSOR, &arg);
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_CURSOR, &arg);
}
/*
@@ -358,37 +447,57 @@ drmModeEncoderPtr drmModeGetEncoder(int fd, uint32_t encoder_id)
drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
{
- struct drm_mode_get_connector conn;
+ struct drm_mode_get_connector conn, counts;
drmModeConnectorPtr r = NULL;
+retry:
+ memset(&conn, 0, sizeof(struct drm_mode_get_connector));
conn.connector_id = connector_id;
- conn.connector_type_id = 0;
- conn.connector_type = 0;
- conn.count_modes = 0;
- conn.modes_ptr = 0;
- conn.count_props = 0;
- conn.props_ptr = 0;
- conn.prop_values_ptr = 0;
- conn.count_encoders = 0;
- conn.encoders_ptr = 0;
if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
return 0;
+ counts = conn;
+
if (conn.count_props) {
conn.props_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint32_t)));
+ if (!conn.props_ptr)
+ goto err_allocs;
conn.prop_values_ptr = VOID2U64(drmMalloc(conn.count_props*sizeof(uint64_t)));
+ if (!conn.prop_values_ptr)
+ goto err_allocs;
}
- if (conn.count_modes)
+ if (conn.count_modes) {
conn.modes_ptr = VOID2U64(drmMalloc(conn.count_modes*sizeof(struct drm_mode_modeinfo)));
+ if (!conn.modes_ptr)
+ goto err_allocs;
+ }
- if (conn.count_encoders)
+ if (conn.count_encoders) {
conn.encoders_ptr = VOID2U64(drmMalloc(conn.count_encoders*sizeof(uint32_t)));
+ if (!conn.encoders_ptr)
+ goto err_allocs;
+ }
if (drmIoctl(fd, DRM_IOCTL_MODE_GETCONNECTOR, &conn))
goto err_allocs;
+ /* The number of available connectors and etc may have changed with a
+ * hotplug event in between the ioctls, in which case the field is
+ * silently ignored by the kernel.
+ */
+ if (counts.count_props < conn.count_props ||
+ counts.count_modes < conn.count_modes ||
+ counts.count_encoders < conn.count_encoders) {
+ drmFree(U642VOID(conn.props_ptr));
+ drmFree(U642VOID(conn.prop_values_ptr));
+ drmFree(U642VOID(conn.modes_ptr));
+ drmFree(U642VOID(conn.encoders_ptr));
+
+ goto retry;
+ }
+
if(!(r = drmMalloc(sizeof(*r)))) {
goto err_allocs;
}
@@ -401,7 +510,6 @@ drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
/* convert subpixel from kernel to userspace */
r->subpixel = conn.subpixel + 1;
r->count_modes = conn.count_modes;
- /* TODO we should test if these alloc & cpy fails. */
r->count_props = conn.count_props;
r->props = drmAllocCpy(U642VOID(conn.props_ptr), conn.count_props, sizeof(uint32_t));
r->prop_values = drmAllocCpy(U642VOID(conn.prop_values_ptr), conn.count_props, sizeof(uint64_t));
@@ -411,8 +519,17 @@ drmModeConnectorPtr drmModeGetConnector(int fd, uint32_t connector_id)
r->connector_type = conn.connector_type;
r->connector_type_id = conn.connector_type_id;
- if (!r->props || !r->prop_values || !r->modes || !r->encoders)
- goto err_allocs;
+ if ((r->count_props && !r->props) ||
+ (r->count_props && !r->prop_values) ||
+ (r->count_modes && !r->modes) ||
+ (r->count_encoders && !r->encoders)) {
+ drmFree(r->props);
+ drmFree(r->prop_values);
+ drmFree(r->modes);
+ drmFree(r->encoders);
+ drmFree(r);
+ r = 0;
+ }
err_allocs:
drmFree(U642VOID(conn.prop_values_ptr));
@@ -430,7 +547,7 @@ int drmModeAttachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_inf
memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
res.connector_id = connector_id;
- return drmIoctl(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_ATTACHMODE, &res);
}
int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_info)
@@ -440,7 +557,7 @@ int drmModeDetachMode(int fd, uint32_t connector_id, drmModeModeInfoPtr mode_inf
memcpy(&res.mode, mode_info, sizeof(struct drm_mode_modeinfo));
res.connector_id = connector_id;
- return drmIoctl(fd, DRM_IOCTL_MODE_DETACHMODE, &res);
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_DETACHMODE, &res);
}
@@ -533,7 +650,7 @@ drmModePropertyBlobPtr drmModeGetPropertyBlob(int fd, uint32_t blob_id)
}
if (!(r = drmMalloc(sizeof(*r))))
- return NULL;
+ goto err_allocs;
r->id = blob.blob_id;
r->length = blob.length;
@@ -557,16 +674,12 @@ int drmModeConnectorSetProperty(int fd, uint32_t connector_id, uint32_t property
uint64_t value)
{
struct drm_mode_connector_set_property osp;
- int ret;
osp.connector_id = connector_id;
osp.prop_id = property_id;
osp.value = value;
- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp)))
- return ret;
-
- return 0;
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETPROPERTY, &osp);
}
/*
@@ -635,7 +748,6 @@ int drmCheckModesettingSupported(const char *busid)
int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue)
{
- int ret;
struct drm_mode_crtc_lut l;
l.crtc_id = crtc_id;
@@ -644,16 +756,12 @@ int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
l.green = VOID2U64(green);
l.blue = VOID2U64(blue);
- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_GETGAMMA, &l)))
- return ret;
-
- return 0;
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_GETGAMMA, &l);
}
int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue)
{
- int ret;
struct drm_mode_crtc_lut l;
l.crtc_id = crtc_id;
@@ -662,10 +770,7 @@ int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
l.green = VOID2U64(green);
l.blue = VOID2U64(blue);
- if ((ret = drmIoctl(fd, DRM_IOCTL_MODE_SETGAMMA, &l)))
- return ret;
-
- return 0;
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETGAMMA, &l);
}
int drmHandleEvent(int fd, drmEventContextPtr evctx)
@@ -699,6 +804,17 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
vblank->tv_usec,
U642VOID (vblank->user_data));
break;
+ case DRM_EVENT_FLIP_COMPLETE:
+ if (evctx->version < 2 ||
+ evctx->page_flip_handler == NULL)
+ break;
+ vblank = (struct drm_event_vblank *) e;
+ evctx->page_flip_handler(fd,
+ vblank->sequence,
+ vblank->tv_sec,
+ vblank->tv_usec,
+ U642VOID (vblank->user_data));
+ break;
default:
break;
}
@@ -707,3 +823,160 @@ int drmHandleEvent(int fd, drmEventContextPtr evctx)
return 0;
}
+
+#ifndef __OpenBSD__
+int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data)
+{
+ struct drm_mode_crtc_page_flip flip;
+
+ flip.fb_id = fb_id;
+ flip.crtc_id = crtc_id;
+ flip.user_data = VOID2U64(user_data);
+ flip.flags = flags;
+ flip.reserved = 0;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_PAGE_FLIP, &flip);
+}
+
+int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id,
+ uint32_t fb_id, uint32_t flags,
+ uint32_t crtc_x, uint32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+
+{
+ struct drm_mode_set_plane s;
+
+ s.plane_id = plane_id;
+ s.crtc_id = crtc_id;
+ s.fb_id = fb_id;
+ s.flags = flags;
+ s.crtc_x = crtc_x;
+ s.crtc_y = crtc_y;
+ s.crtc_w = crtc_w;
+ s.crtc_h = crtc_h;
+ s.src_x = src_x;
+ s.src_y = src_y;
+ s.src_w = src_w;
+ s.src_h = src_h;
+
+ return DRM_IOCTL(fd, DRM_IOCTL_MODE_SETPLANE, &s);
+}
+
+
+drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id)
+{
+ struct drm_mode_get_plane ovr, counts;
+ drmModePlanePtr r = 0;
+
+retry:
+ memset(&ovr, 0, sizeof(struct drm_mode_get_plane));
+ ovr.plane_id = plane_id;
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPLANE, &ovr))
+ return 0;
+
+ counts = ovr;
+
+ if (ovr.count_format_types) {
+ ovr.format_type_ptr = VOID2U64(drmMalloc(ovr.count_format_types *
+ sizeof(uint32_t)));
+ if (!ovr.format_type_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPLANE, &ovr))
+ goto err_allocs;
+
+ if (counts.count_format_types < ovr.count_format_types) {
+ drmFree(U642VOID(ovr.format_type_ptr));
+ goto retry;
+ }
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ goto err_allocs;
+
+ r->count_formats = ovr.count_format_types;
+ r->plane_id = ovr.plane_id;
+ r->crtc_id = ovr.crtc_id;
+ r->fb_id = ovr.fb_id;
+ r->possible_crtcs = ovr.possible_crtcs;
+ r->gamma_size = ovr.gamma_size;
+ r->formats = drmAllocCpy(U642VOID(ovr.format_type_ptr),
+ ovr.count_format_types, sizeof(uint32_t));
+ if (ovr.count_format_types && !r->formats) {
+ drmFree(r->formats);
+ drmFree(r);
+ r = 0;
+ }
+
+err_allocs:
+ drmFree(U642VOID(ovr.format_type_ptr));
+
+ return r;
+}
+
+void drmModeFreePlane(drmModePlanePtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->formats);
+ drmFree(ptr);
+}
+
+drmModePlaneResPtr drmModeGetPlaneResources(int fd)
+{
+ struct drm_mode_get_plane_res res, counts;
+ drmModePlaneResPtr r = 0;
+
+retry:
+ memset(&res, 0, sizeof(struct drm_mode_get_plane_res));
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &res))
+ return 0;
+
+ counts = res;
+
+ if (res.count_planes) {
+ res.plane_id_ptr = VOID2U64(drmMalloc(res.count_planes *
+ sizeof(uint32_t)));
+ if (!res.plane_id_ptr)
+ goto err_allocs;
+ }
+
+ if (drmIoctl(fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &res))
+ goto err_allocs;
+
+ if (counts.count_planes < res.count_planes) {
+ drmFree(U642VOID(res.plane_id_ptr));
+ goto retry;
+ }
+
+ if (!(r = drmMalloc(sizeof(*r))))
+ goto err_allocs;
+
+ r->count_planes = res.count_planes;
+ r->planes = drmAllocCpy(U642VOID(res.plane_id_ptr),
+ res.count_planes, sizeof(uint32_t));
+ if (res.count_planes && !r->planes) {
+ drmFree(r->planes);
+ drmFree(r);
+ r = 0;
+ }
+
+err_allocs:
+ drmFree(U642VOID(res.plane_id_ptr));
+
+ return r;
+}
+
+void drmModeFreePlaneResources(drmModePlaneResPtr ptr)
+{
+ if (!ptr)
+ return;
+
+ drmFree(ptr->planes);
+ drmFree(ptr);
+}
+#endif
diff --git a/lib/libdrm/xf86drmMode.h b/lib/libdrm/xf86drmMode.h
index 62304bb92..34f5fb14c 100644
--- a/lib/libdrm/xf86drmMode.h
+++ b/lib/libdrm/xf86drmMode.h
@@ -33,6 +33,10 @@
*
*/
+#if defined(__cplusplus) || defined(c_plusplus)
+extern "C" {
+#endif
+
#include <drm.h>
/*
@@ -133,6 +137,8 @@
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
+#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
@@ -145,6 +151,17 @@
#endif /* _DRM_MODE_H */
+
+/*
+ * Feature defines
+ *
+ * Just because these are defined doesn't mean that the kernel
+ * can do that feature, its just for new code vs old libdrm.
+ */
+#define DRM_MODE_FEATURE_KMS 1
+#define DRM_MODE_FEATURE_DIRTYFB 1
+
+
typedef struct _drmModeRes {
int count_fbs;
@@ -168,7 +185,7 @@ typedef struct _drmModeModeInfo {
uint16_t hdisplay, hsync_start, hsync_end, htotal, hskew;
uint16_t vdisplay, vsync_start, vsync_end, vtotal, vscan;
- uint32_t vrefresh; /* vertical refresh * 1000 */
+ uint32_t vrefresh;
uint32_t flags;
uint32_t type;
@@ -185,6 +202,8 @@ typedef struct _drmModeFB {
uint32_t handle;
} drmModeFB, *drmModeFBPtr;
+typedef struct drm_clip_rect drmModeClip, *drmModeClipPtr;
+
typedef struct _drmModePropertyBlob {
uint32_t id;
uint32_t length;
@@ -259,7 +278,25 @@ typedef struct _drmModeConnector {
uint32_t *encoders; /**< List of encoder ids */
} drmModeConnector, *drmModeConnectorPtr;
+typedef struct _drmModePlane {
+ uint32_t count_formats;
+ uint32_t *formats;
+ uint32_t plane_id;
+
+ uint32_t crtc_id;
+ uint32_t fb_id;
+
+ uint32_t crtc_x, crtc_y;
+ uint32_t x, y;
+
+ uint32_t possible_crtcs;
+ uint32_t gamma_size;
+} drmModePlane, *drmModePlanePtr;
+typedef struct _drmModePlaneRes {
+ uint32_t count_planes;
+ uint32_t *planes;
+} drmModePlaneRes, *drmModePlaneResPtr;
extern void drmModeFreeModeInfo( drmModeModeInfoPtr ptr );
extern void drmModeFreeResources( drmModeResPtr ptr );
@@ -267,6 +304,8 @@ extern void drmModeFreeFB( drmModeFBPtr ptr );
extern void drmModeFreeCrtc( drmModeCrtcPtr ptr );
extern void drmModeFreeConnector( drmModeConnectorPtr ptr );
extern void drmModeFreeEncoder( drmModeEncoderPtr ptr );
+extern void drmModeFreePlane( drmModePlanePtr ptr );
+extern void drmModeFreePlaneResources(drmModePlaneResPtr ptr);
/**
* Retrives all of the resources associated with a card.
@@ -288,11 +327,23 @@ extern drmModeFBPtr drmModeGetFB(int fd, uint32_t bufferId);
extern int drmModeAddFB(int fd, uint32_t width, uint32_t height, uint8_t depth,
uint8_t bpp, uint32_t pitch, uint32_t bo_handle,
uint32_t *buf_id);
+/* ...with a specific pixel format */
+extern int drmModeAddFB2(int fd, uint32_t width, uint32_t height,
+ uint32_t pixel_format, uint32_t bo_handles[4],
+ uint32_t pitches[4], uint32_t offsets[4],
+ uint32_t *buf_id, uint32_t flags);
/**
* Destroies the given framebuffer.
*/
extern int drmModeRmFB(int fd, uint32_t bufferId);
+/**
+ * Mark a region of a framebuffer as dirty.
+ */
+extern int drmModeDirtyFB(int fd, uint32_t bufferId,
+ drmModeClipPtr clips, uint32_t num_clips);
+
+
/*
* Crtc functions
*/
@@ -362,3 +413,18 @@ extern int drmModeCrtcSetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue);
extern int drmModeCrtcGetGamma(int fd, uint32_t crtc_id, uint32_t size,
uint16_t *red, uint16_t *green, uint16_t *blue);
+extern int drmModePageFlip(int fd, uint32_t crtc_id, uint32_t fb_id,
+ uint32_t flags, void *user_data);
+
+extern drmModePlaneResPtr drmModeGetPlaneResources(int fd);
+extern drmModePlanePtr drmModeGetPlane(int fd, uint32_t plane_id);
+extern int drmModeSetPlane(int fd, uint32_t plane_id, uint32_t crtc_id,
+ uint32_t fb_id, uint32_t flags,
+ uint32_t crtc_x, uint32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+#if defined(__cplusplus) || defined(c_plusplus)
+}
+#endif