summaryrefslogtreecommitdiff
path: root/lib/libdrm/include/drm/i915_drm.h
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2018-09-13 12:02:28 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2018-09-13 12:02:28 +0000
commitdbcf863bb4f327692c345f403dee758bfa99087d (patch)
tree2ac5d64585881796395e46971c16095f0eca446a /lib/libdrm/include/drm/i915_drm.h
parent20ef3ba50ee2d8fcfaece2fb71a1cf67a3b69221 (diff)
Merge libdrm 2.4.94
Diffstat (limited to 'lib/libdrm/include/drm/i915_drm.h')
-rw-r--r--lib/libdrm/include/drm/i915_drm.h321
1 files changed, 307 insertions, 14 deletions
diff --git a/lib/libdrm/include/drm/i915_drm.h b/lib/libdrm/include/drm/i915_drm.h
index 5ebe0462f..16e452aa1 100644
--- a/lib/libdrm/include/drm/i915_drm.h
+++ b/lib/libdrm/include/drm/i915_drm.h
@@ -86,6 +86,62 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
+/*
+ * Different engines serve different roles, and there may be more than one
+ * engine serving each role. enum drm_i915_gem_engine_class provides a
+ * classification of the role of the engine, which may be used when requesting
+ * operations to be performed on a certain subset of engines, or for providing
+ * information about that group.
+ */
+enum drm_i915_gem_engine_class {
+ I915_ENGINE_CLASS_RENDER = 0,
+ I915_ENGINE_CLASS_COPY = 1,
+ I915_ENGINE_CLASS_VIDEO = 2,
+ I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+
+ I915_ENGINE_CLASS_INVALID = -1
+};
+
+/**
+ * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
+ *
+ */
+
+enum drm_i915_pmu_engine_sample {
+ I915_SAMPLE_BUSY = 0,
+ I915_SAMPLE_WAIT = 1,
+ I915_SAMPLE_SEMA = 2
+};
+
+#define I915_PMU_SAMPLE_BITS (4)
+#define I915_PMU_SAMPLE_MASK (0xf)
+#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
+#define I915_PMU_CLASS_SHIFT \
+ (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
+
+#define __I915_PMU_ENGINE(class, instance, sample) \
+ ((class) << I915_PMU_CLASS_SHIFT | \
+ (instance) << I915_PMU_SAMPLE_BITS | \
+ (sample))
+
+#define I915_PMU_ENGINE_BUSY(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
+
+#define I915_PMU_ENGINE_WAIT(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
+
+#define I915_PMU_ENGINE_SEMA(class, instance) \
+ __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
+
+#define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
+
+#define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
+#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
+#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
+#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+
+#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
@@ -260,6 +316,9 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
#define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
#define DRM_I915_PERF_OPEN 0x36
+#define DRM_I915_PERF_ADD_CONFIG 0x37
+#define DRM_I915_PERF_REMOVE_CONFIG 0x38
+#define DRM_I915_QUERY 0x39
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -315,6 +374,9 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
#define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
+#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
+#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
+#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -393,10 +455,20 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_MIN_EU_IN_POOL 39
#define I915_PARAM_MMAP_GTT_VERSION 40
-/* Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
* priorities and the driver will attempt to execute batches in priority order.
+ * The param returns a capability bitmask, nonzero implies that the scheduler
+ * is enabled, with different features present according to the mask.
+ *
+ * The initial priority for each batch is supplied by the context and is
+ * controlled via I915_CONTEXT_PARAM_PRIORITY.
*/
#define I915_PARAM_HAS_SCHEDULER 41
+#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
+#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
+#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+
#define I915_PARAM_HUC_STATUS 42
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -412,6 +484,51 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_HAS_EXEC_FENCE 44
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
+ * user specified bufffers for post-mortem debugging of GPU hangs. See
+ * EXEC_OBJECT_CAPTURE.
+ */
+#define I915_PARAM_HAS_EXEC_CAPTURE 45
+
+#define I915_PARAM_SLICE_MASK 46
+
+/* Assuming it's uniform for each slice, this queries the mask of subslices
+ * per-slice for this system.
+ */
+#define I915_PARAM_SUBSLICE_MASK 47
+
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
+ * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
+ */
+#define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
+
+/*
+ * Query whether every context (both per-file default and user created) is
+ * isolated (insofar as HW supports). If this parameter is not true, then
+ * freshly created contexts may inherit values from an existing context,
+ * rather than default HW values. If true, it also ensures (insofar as HW
+ * supports) that all state set by this context will not leak to any other
+ * context.
+ *
+ * As not every engine across every gen support contexts, the returned
+ * value reports the support of context isolation for individual engines by
+ * returning a bitmask of each engine class set to true if that class supports
+ * isolation.
+ */
+#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
+
+/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
+ * registers. This used to be fixed per platform but from CNL onwards, this
+ * might vary depending on the parts.
+ */
+#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -666,6 +783,8 @@ struct drm_i915_gem_relocation_entry {
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
+/** WC domain - uncached access */
+#define I915_GEM_DOMAIN_WC 0x00000080
/** @} */
struct drm_i915_gem_exec_object {
@@ -773,8 +892,15 @@ struct drm_i915_gem_exec_object2 {
* I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
*/
#define EXEC_OBJECT_ASYNC (1<<6)
+/* Request that the contents of this execobject be copied into the error
+ * state upon a GPU hang involving this batch for post-mortem debugging.
+ * These buffers are recorded in no particular order as "user" in
+ * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
+ * if the kernel supports this flag.
+ */
+#define EXEC_OBJECT_CAPTURE (1<<7)
/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
__u64 flags;
union {
@@ -784,6 +910,18 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
+struct drm_i915_gem_exec_fence {
+ /**
+ * User's handle for a drm_syncobj to wait on or signal.
+ */
+ __u32 handle;
+
+#define I915_EXEC_FENCE_WAIT (1<<0)
+#define I915_EXEC_FENCE_SIGNAL (1<<1)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
+ __u32 flags;
+};
+
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
@@ -798,7 +936,11 @@ struct drm_i915_gem_execbuffer2 {
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
- /** This is a struct drm_clip_rect *cliprects */
+ /**
+ * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
+ * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
+ * struct drm_i915_gem_exec_fence *fences.
+ */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
@@ -889,7 +1031,24 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_OUT (1<<17)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1))
+/*
+ * Traditionally the execbuf ioctl has only considered the final element in
+ * the execobject[] to be the executable batch. Often though, the client
+ * will known the batch object prior to construction and being able to place
+ * it into the execobject[] array first can simplify the relocation tracking.
+ * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
+ * execobject[] as the * batch instead (the default is to use the last
+ * element).
+ */
+#define I915_EXEC_BATCH_FIRST (1<<18)
+
+/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
+ * define an array of i915_gem_exec_fence structures which specify a set of
+ * dma fences to wait upon or signal.
+ */
+#define I915_EXEC_FENCE_ARRAY (1<<19)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1201,7 +1360,9 @@ struct drm_intel_overlay_attrs {
* active on a given plane.
*/
-#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
+#define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
+ * flags==0 to disable colorkeying.
+ */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
@@ -1239,14 +1400,16 @@ struct drm_i915_reg_read {
* be specified
*/
__u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
- * single instruction 8byte read, in order to workaround that use
- * offset (0x2538 | 1) instead.
+ * single instruction 8byte read, in order to workaround that pass
+ * flag I915_REG_READ_8B_WA in offset field.
*
*/
@@ -1289,17 +1452,26 @@ struct drm_i915_gem_context_param {
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
#define I915_CONTEXT_PARAM_BANNABLE 0x5
+#define I915_CONTEXT_PARAM_PRIORITY 0x6
+#define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
+#define I915_CONTEXT_DEFAULT_PRIORITY 0
+#define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
__u64 value;
};
enum drm_i915_oa_format {
- I915_OA_FORMAT_A13 = 1,
- I915_OA_FORMAT_A29,
- I915_OA_FORMAT_A13_B8_C8,
- I915_OA_FORMAT_B4_C8,
- I915_OA_FORMAT_A45_B8_C8,
- I915_OA_FORMAT_B4_C8_A16,
- I915_OA_FORMAT_C4_B8,
+ I915_OA_FORMAT_A13 = 1, /* HSW only */
+ I915_OA_FORMAT_A29, /* HSW only */
+ I915_OA_FORMAT_A13_B8_C8, /* HSW only */
+ I915_OA_FORMAT_B4_C8, /* HSW only */
+ I915_OA_FORMAT_A45_B8_C8, /* HSW only */
+ I915_OA_FORMAT_B4_C8_A16, /* HSW only */
+ I915_OA_FORMAT_C4_B8, /* HSW+ */
+
+ /* Gen8+ */
+ I915_OA_FORMAT_A12,
+ I915_OA_FORMAT_A12_B8_C8,
+ I915_OA_FORMAT_A32u40_A4u32_B8_C8,
I915_OA_FORMAT_MAX /* non-ABI */
};
@@ -1424,6 +1596,127 @@ enum drm_i915_perf_record_type {
DRM_I915_PERF_RECORD_MAX /* non-ABI */
};
+/**
+ * Structure to upload perf dynamic configuration into the kernel.
+ */
+struct drm_i915_perf_oa_config {
+ /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ char uuid[36];
+
+ __u32 n_mux_regs;
+ __u32 n_boolean_regs;
+ __u32 n_flex_regs;
+
+ /*
+ * These fields are pointers to tuples of u32 values (register address,
+ * value). For example the expected length of the buffer pointed by
+ * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ */
+ __u64 mux_regs_ptr;
+ __u64 boolean_regs_ptr;
+ __u64 flex_regs_ptr;
+};
+
+struct drm_i915_query_item {
+ __u64 query_id;
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+
+ /*
+ * When set to zero by userspace, this is filled with the size of the
+ * data to be written at the data_ptr pointer. The kernel sets this
+ * value to a negative value to signal an error on a particular query
+ * item.
+ */
+ __s32 length;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * Data will be written at the location pointed by data_ptr when the
+ * value of length matches the length of the data to be written by the
+ * kernel.
+ */
+ __u64 data_ptr;
+};
+
+struct drm_i915_query {
+ __u32 num_items;
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * This points to an array of num_items drm_i915_query_item structures.
+ */
+ __u64 items_ptr;
+};
+
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
+ *
+ * data: contains the 3 pieces of information :
+ *
+ * - the slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the following
+ * formula :
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * - the subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. The availability of subslice Y in slice
+ * X can be queried with the following formula :
+ *
+ * (data[subslice_offset +
+ * X * subslice_stride +
+ * Y / 8] >> (Y % 8)) & 1
+ *
+ * - the EU mask for each subslice in each slice with one bit per EU telling
+ * whether an EU is available. The availability of EU Z in subslice Y in
+ * slice X can be queried with the following formula :
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8] >> (Z % 8)) & 1
+ */
+struct drm_i915_query_topology_info {
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u16 flags;
+
+ __u16 max_slices;
+ __u16 max_subslices;
+ __u16 max_eus_per_subslice;
+
+ /*
+ * Offset in data[] at which the subslice masks are stored.
+ */
+ __u16 subslice_offset;
+
+ /*
+ * Stride at which each of the subslice masks for each slice are
+ * stored.
+ */
+ __u16 subslice_stride;
+
+ /*
+ * Offset in data[] at which the EU masks are stored.
+ */
+ __u16 eu_offset;
+
+ /*
+ * Stride at which each of the EU masks for each subslice are stored.
+ */
+ __u16 eu_stride;
+
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif