summaryrefslogtreecommitdiff
path: root/lib/libdrm/intel
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libdrm/intel')
-rw-r--r--lib/libdrm/intel/intel_bufmgr.h5
-rw-r--r--lib/libdrm/intel/intel_bufmgr_gem.c108
-rw-r--r--lib/libdrm/intel/intel_chipset.h22
-rw-r--r--lib/libdrm/intel/intel_decode.c6
-rw-r--r--lib/libdrm/intel/shlib_version2
5 files changed, 128 insertions, 15 deletions
diff --git a/lib/libdrm/intel/intel_bufmgr.h b/lib/libdrm/intel/intel_bufmgr.h
index 15f818e75..2eb9742ba 100644
--- a/lib/libdrm/intel/intel_bufmgr.h
+++ b/lib/libdrm/intel/intel_bufmgr.h
@@ -248,6 +248,11 @@ int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result);
+int drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending);
+
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
*/
diff --git a/lib/libdrm/intel/intel_bufmgr_gem.c b/lib/libdrm/intel/intel_bufmgr_gem.c
index fb1a44c25..cbdf779df 100644
--- a/lib/libdrm/intel/intel_bufmgr_gem.c
+++ b/lib/libdrm/intel/intel_bufmgr_gem.c
@@ -151,6 +151,8 @@ struct _drm_intel_bo_gem {
/**
* Kenel-assigned global name for this object
+ *
+ * List contains both flink named and prime fd'd objects
*/
unsigned int global_name;
drmMMListHead name_list;
@@ -866,10 +868,6 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
}
}
- bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
- return NULL;
-
VG_CLEAR(open_arg);
open_arg.name = handle;
ret = drmIoctl(bufmgr_gem->fd,
@@ -878,9 +876,26 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
- free(bo_gem);
return NULL;
}
+ /* Now see if someone has used a prime handle to get this
+ * object from the kernel before by looking through the list
+ * again for a matching gem_handle
+ */
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->gem_handle == open_arg.handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ return &bo_gem->bo;
+ }
+ }
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
bo_gem->bo.virtual = NULL;
@@ -1954,12 +1969,14 @@ aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
aub_out(bufmgr_gem,
CMD_AUB_TRACE_HEADER_BLOCK |
- (5 - 2));
+ ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
aub_out(bufmgr_gem,
AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
aub_out(bufmgr_gem, subtype);
aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
aub_out(bufmgr_gem, size);
+ if (bufmgr_gem->gen >= 8)
+ aub_out(bufmgr_gem, 0);
aub_write_bo_data(bo, offset, size);
}
@@ -2036,20 +2053,28 @@ aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
/* Make a ring buffer to execute our batchbuffer. */
memset(ringbuffer, 0, sizeof(ringbuffer));
- ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
- ringbuffer[ring_count++] = batch_buffer;
+ if (bufmgr_gem->gen >= 8) {
+ ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
+ ringbuffer[ring_count++] = batch_buffer;
+ ringbuffer[ring_count++] = 0;
+ } else {
+ ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
+ ringbuffer[ring_count++] = batch_buffer;
+ }
/* Write out the ring. This appears to trigger execution of
* the ring in the simulator.
*/
aub_out(bufmgr_gem,
CMD_AUB_TRACE_HEADER_BLOCK |
- (5 - 2));
+ ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
aub_out(bufmgr_gem,
AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
aub_out(bufmgr_gem, 0); /* general/surface subtype */
aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
aub_out(bufmgr_gem, ring_count * 4);
+ if (bufmgr_gem->gen >= 8)
+ aub_out(bufmgr_gem, 0);
/* FIXME: Need some flush operations here? */
aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
@@ -2455,8 +2480,25 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
uint32_t handle;
drm_intel_bo_gem *bo_gem;
struct drm_i915_gem_get_tiling get_tiling;
+ drmMMListHead *list;
ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
+
+ /*
+ * See if the kernel has already returned this buffer to us. Just as
+ * for named buffers, we must not create two bo's pointing at the same
+ * kernel object
+ */
+ for (list = bufmgr_gem->named.next;
+ list != &bufmgr_gem->named;
+ list = list->next) {
+ bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
+ if (bo_gem->gem_handle == handle) {
+ drm_intel_gem_bo_reference(&bo_gem->bo);
+ return &bo_gem->bo;
+ }
+ }
+
if (ret) {
fprintf(stderr,"ret is %d %d\n", ret, errno);
return NULL;
@@ -2491,8 +2533,8 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s
bo_gem->has_error = false;
bo_gem->reusable = false;
- DRMINITLISTHEAD(&bo_gem->name_list);
DRMINITLISTHEAD(&bo_gem->vma_list);
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
VG_CLEAR(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
@@ -2517,6 +2559,9 @@ drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ if (DRMLISTEMPTY(&bo_gem->name_list))
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+
if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
DRM_CLOEXEC, prime_fd) != 0)
return -errno;
@@ -2546,7 +2591,8 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
bo_gem->global_name = flink.name;
bo_gem->reusable = false;
- DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
+ if (DRMLISTEMPTY(&bo_gem->name_list))
+ DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
}
*name = bo_gem->global_name;
@@ -2966,11 +3012,13 @@ drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
aub_out(bufmgr_gem, 0); /* comment len */
/* Set up the GTT. The max we can handle is 256M */
- aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
+ aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
aub_out(bufmgr_gem, 0); /* subtype */
aub_out(bufmgr_gem, 0); /* offset */
aub_out(bufmgr_gem, gtt_size); /* size */
+ if (bufmgr_gem->gen >= 8)
+ aub_out(bufmgr_gem, 0);
for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
aub_out(bufmgr_gem, entry);
}
@@ -3023,6 +3071,40 @@ drm_intel_gem_context_destroy(drm_intel_context *ctx)
}
int
+drm_intel_get_reset_stats(drm_intel_context *ctx,
+ uint32_t *reset_count,
+ uint32_t *active,
+ uint32_t *pending)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem;
+ struct drm_i915_reset_stats stats;
+ int ret;
+
+ if (ctx == NULL)
+ return -EINVAL;
+
+ memset(&stats, 0, sizeof(stats));
+
+ bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
+ stats.ctx_id = ctx->ctx_id;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GET_RESET_STATS,
+ &stats);
+ if (ret == 0) {
+ if (reset_count != NULL)
+ *reset_count = stats.reset_count;
+
+ if (active != NULL)
+ *active = stats.batch_active;
+
+ if (pending != NULL)
+ *pending = stats.batch_pending;
+ }
+
+ return ret;
+}
+
+int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
@@ -3140,6 +3222,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->gen = 6;
else if (IS_GEN7(bufmgr_gem->pci_device))
bufmgr_gem->gen = 7;
+ else if (IS_GEN8(bufmgr_gem->pci_device))
+ bufmgr_gem->gen = 8;
else {
free(bufmgr_gem);
return NULL;
diff --git a/lib/libdrm/intel/intel_chipset.h b/lib/libdrm/intel/intel_chipset.h
index aeb439eb4..e5589be8f 100644
--- a/lib/libdrm/intel/intel_chipset.h
+++ b/lib/libdrm/intel/intel_chipset.h
@@ -148,6 +148,12 @@
#define PCI_CHIP_HASWELL_CRW_E_GT1 0x0D0E /* Reserved */
#define PCI_CHIP_HASWELL_CRW_E_GT2 0x0D1E
#define PCI_CHIP_HASWELL_CRW_E_GT3 0x0D2E
+#define BDW_SPARE 0x2
+#define BDW_ULT 0x6
+#define BDW_SERVER 0xa
+#define BDW_IRIS 0xb
+#define BDW_WORKSTATION 0xd
+#define BDW_ULX 0xe
#define PCI_CHIP_VALLEYVIEW_PO 0x0f30 /* VLV PO board */
#define PCI_CHIP_VALLEYVIEW_1 0x0f31
@@ -296,10 +302,24 @@
IS_HSW_GT2(devid) || \
IS_HSW_GT3(devid))
+#define IS_BROADWELL(devid) (((devid & 0xff00) != 0x1600) ? 0 : \
+ (((devid & 0x00f0) >> 4) > 3) ? 0 : \
+ ((devid & 0x000f) == BDW_SPARE) ? 1 : \
+ ((devid & 0x000f) == BDW_ULT) ? 1 : \
+ ((devid & 0x000f) == BDW_IRIS) ? 1 : \
+ ((devid & 0x000f) == BDW_SERVER) ? 1 : \
+ ((devid & 0x000f) == BDW_WORKSTATION) ? 1 : \
+ ((devid & 0x000f) == BDW_ULX) ? 1 : 0)
+
+
+#define IS_GEN8(devid) IS_BROADWELL(devid)
+
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
IS_GEN6(dev) || \
- IS_GEN7(dev))
+ IS_GEN7(dev) || \
+ IS_GEN8(dev))
+
#endif /* _INTEL_CHIPSET_H */
diff --git a/lib/libdrm/intel/intel_decode.c b/lib/libdrm/intel/intel_decode.c
index ff19f92e9..c0a0cafc9 100644
--- a/lib/libdrm/intel/intel_decode.c
+++ b/lib/libdrm/intel/intel_decode.c
@@ -257,6 +257,8 @@ decode_mi(struct drm_intel_decode *ctx)
{ 0x03, 0, 1, 1, "MI_WAIT_FOR_EVENT", decode_MI_WAIT_FOR_EVENT },
{ 0x16, 0x7f, 3, 3, "MI_SEMAPHORE_MBOX" },
{ 0x26, 0x1f, 3, 4, "MI_FLUSH_DW" },
+ { 0x28, 0x3f, 3, 3, "MI_REPORT_PERF_COUNT" },
+ { 0x29, 0xff, 3, 3, "MI_LOAD_REGISTER_MEM" },
{ 0x0b, 0, 1, 1, "MI_SUSPEND_FLUSH"},
}, *opcode_mi = NULL;
@@ -3825,7 +3827,9 @@ drm_intel_decode_context_alloc(uint32_t devid)
ctx->devid = devid;
ctx->out = stdout;
- if (IS_GEN7(devid))
+ if (IS_GEN8(devid))
+ ctx->gen = 8;
+ else if (IS_GEN7(devid))
ctx->gen = 7;
else if (IS_GEN6(devid))
ctx->gen = 6;
diff --git a/lib/libdrm/intel/shlib_version b/lib/libdrm/intel/shlib_version
index 012c14171..3f0196ebf 100644
--- a/lib/libdrm/intel/shlib_version
+++ b/lib/libdrm/intel/shlib_version
@@ -1,2 +1,2 @@
major=3
-minor=0
+minor=1