diff options
Diffstat (limited to 'lib/libdrm/intel')
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr.c | 20 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr.h | 2 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr_fake.c | 1 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr_gem.c | 250 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_bufmgr_priv.h | 21 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_chipset.h | 117 | ||||
-rw-r--r-- | lib/libdrm/intel/intel_decode.c | 2 |
7 files changed, 352 insertions, 61 deletions
diff --git a/lib/libdrm/intel/intel_bufmgr.c b/lib/libdrm/intel/intel_bufmgr.c index 14ea9f9fa..a28534003 100644 --- a/lib/libdrm/intel/intel_bufmgr.c +++ b/lib/libdrm/intel/intel_bufmgr.c @@ -261,6 +261,15 @@ drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, } int +drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset) +{ + if (bo->bufmgr->bo_set_softpin_offset) + return bo->bufmgr->bo_set_softpin_offset(bo, offset); + + return -ENODEV; +} + +int drm_intel_bo_disable_reuse(drm_intel_bo *bo) { if (bo->bufmgr->bo_disable_reuse) @@ -293,6 +302,17 @@ drm_intel_bo_madvise(drm_intel_bo *bo, int madv) } int +drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) +{ + if (bo->bufmgr->bo_use_48b_address_range) { + bo->bufmgr->bo_use_48b_address_range(bo, enable); + return 0; + } + + return -ENODEV; +} + +int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) { return bo->bufmgr->bo_references(bo, target_bo); diff --git a/lib/libdrm/intel/intel_bufmgr.h b/lib/libdrm/intel/intel_bufmgr.h index 95eecb800..a1abbcd2b 100644 --- a/lib/libdrm/intel/intel_bufmgr.h +++ b/lib/libdrm/intel/intel_bufmgr.h @@ -164,6 +164,8 @@ int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name); int drm_intel_bo_busy(drm_intel_bo *bo); int drm_intel_bo_madvise(drm_intel_bo *bo, int madv); +int drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable); +int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset); int drm_intel_bo_disable_reuse(drm_intel_bo *bo); int drm_intel_bo_is_reusable(drm_intel_bo *bo); diff --git a/lib/libdrm/intel/intel_bufmgr_fake.c b/lib/libdrm/intel/intel_bufmgr_fake.c index 551e05dee..7f4c7b9fa 100644 --- a/lib/libdrm/intel/intel_bufmgr_fake.c +++ b/lib/libdrm/intel/intel_bufmgr_fake.c @@ -42,6 +42,7 @@ #include <string.h> #include <assert.h> #include <errno.h> +#include <strings.h> #include <xf86drm.h> #include <pthread.h> #include "intel_bufmgr.h" diff --git a/lib/libdrm/intel/intel_bufmgr_gem.c b/lib/libdrm/intel/intel_bufmgr_gem.c index ef437cbd2..93261ce97 100644 --- a/lib/libdrm/intel/intel_bufmgr_gem.c +++ b/lib/libdrm/intel/intel_bufmgr_gem.c @@ -83,6 +83,22 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define MAX2(A, B) ((A) > (B) ? (A) : (B)) +/** + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16)) + +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((__u32)(n)) + typedef struct _drm_intel_bo_gem drm_intel_bo_gem; struct drm_intel_gem_bo_bucket { @@ -186,6 +202,13 @@ struct _drm_intel_bo_gem { drm_intel_reloc_target *reloc_target_info; /** Number of entries in relocs */ int reloc_count; + /** Array of BOs that are referenced by this buffer and will be softpinned */ + drm_intel_bo **softpin_target; + /** Number softpinned BOs that are referenced by this buffer */ + int softpin_target_count; + /** Maximum amount of softpinned BOs that are referenced by this buffer */ + int softpin_target_size; + /** Mapped address for the buffer, saved across map/unmap cycles */ void *mem_virtual; /** GTT virtual address for the buffer, saved across map/unmap cycles */ @@ -239,6 +262,20 @@ struct _drm_intel_bo_gem { bool is_userptr; /** + * Boolean of whether this buffer can be placed in the full 48-bit + * address range on gen8+. + * + * By default, buffers will be keep in a 32-bit range, unless this + * flag is explicitly set. + */ + bool use_48b_address_range; + + /** + * Whether this buffer is softpinned at offset specified by the user + */ + bool is_softpin; + + /** * Size in bytes of this buffer and its relocation descendents. * * Used to avoid costly tree walking in @@ -391,8 +428,9 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; - if (bo_gem->relocs == NULL) { - DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, + if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) { + DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle, + bo_gem->is_softpin ? "*" : "", bo_gem->name); continue; } @@ -402,16 +440,36 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *) target_bo; - DBG("%2d: %d (%s)@0x%08llx -> " - "%d (%s)@0x%08lx + 0x%08x\n", + DBG("%2d: %d %s(%s)@0x%08x %08x -> " + "%d (%s)@0x%08x %08x + 0x%08x\n", i, - bo_gem->gem_handle, bo_gem->name, - (unsigned long long)bo_gem->relocs[j].offset, + bo_gem->gem_handle, + bo_gem->is_softpin ? "*" : "", + bo_gem->name, + upper_32_bits(bo_gem->relocs[j].offset), + lower_32_bits(bo_gem->relocs[j].offset), target_gem->gem_handle, target_gem->name, - target_bo->offset64, + upper_32_bits(target_bo->offset64), + lower_32_bits(target_bo->offset64), bo_gem->relocs[j].delta); } + + for (j = 0; j < bo_gem->softpin_target_count; j++) { + drm_intel_bo *target_bo = bo_gem->softpin_target[j]; + drm_intel_bo_gem *target_gem = + (drm_intel_bo_gem *) target_bo; + DBG("%2d: %d %s(%s) -> " + "%d *(%s)@0x%08x %08x\n", + i, + bo_gem->gem_handle, + bo_gem->is_softpin ? "*" : "", + bo_gem->name, + target_gem->gem_handle, + target_gem->name, + upper_32_bits(target_bo->offset64), + lower_32_bits(target_bo->offset64)); + } } } @@ -477,11 +535,17 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; int index; + int flags = 0; + + if (need_fence) + flags |= EXEC_OBJECT_NEEDS_FENCE; + if (bo_gem->use_48b_address_range) + flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS; + if (bo_gem->is_softpin) + flags |= EXEC_OBJECT_PINNED; if (bo_gem->validate_index != -1) { - if (need_fence) - bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= - EXEC_OBJECT_NEEDS_FENCE; + bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags; return; } @@ -508,15 +572,12 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; bufmgr_gem->exec2_objects[index].alignment = bo->align; - bufmgr_gem->exec2_objects[index].offset = 0; + bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ? + bo->offset64 : 0; bufmgr_gem->exec_bos[index] = bo; - bufmgr_gem->exec2_objects[index].flags = 0; + bufmgr_gem->exec2_objects[index].flags = flags; bufmgr_gem->exec2_objects[index].rsvd1 = 0; bufmgr_gem->exec2_objects[index].rsvd2 = 0; - if (need_fence) { - bufmgr_gem->exec2_objects[index].flags |= - EXEC_OBJECT_NEEDS_FENCE; - } bufmgr_gem->exec_count++; } @@ -789,6 +850,7 @@ retry: bo_gem->used_as_reloc_target = false; bo_gem->has_error = false; bo_gem->reusable = true; + bo_gem->use_48b_address_range = false; drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment); @@ -935,6 +997,7 @@ drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, bo_gem->used_as_reloc_target = false; bo_gem->has_error = false; bo_gem->reusable = false; + bo_gem->use_48b_address_range = false; drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0); @@ -1090,6 +1153,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, bo_gem->bo.handle = open_arg.handle; bo_gem->global_name = handle; bo_gem->reusable = false; + bo_gem->use_48b_address_range = false; memclear(get_tiling); get_tiling.handle = bo_gem->gem_handle; @@ -1265,8 +1329,12 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) time); } } + for (i = 0; i < bo_gem->softpin_target_count; i++) + drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i], + time); bo_gem->reloc_count = 0; bo_gem->used_as_reloc_target = false; + bo_gem->softpin_target_count = 0; DBG("bo_unreference final: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); @@ -1280,6 +1348,11 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) free(bo_gem->relocs); bo_gem->relocs = NULL; } + if (bo_gem->softpin_target) { + free(bo_gem->softpin_target); + bo_gem->softpin_target = NULL; + bo_gem->softpin_target_size = 0; + } /* Clear any left-over mappings */ if (bo_gem->map_count) { @@ -1919,14 +1992,6 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; } - bo_gem->relocs[bo_gem->reloc_count].offset = offset; - bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; - bo_gem->relocs[bo_gem->reloc_count].target_handle = - target_bo_gem->gem_handle; - bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; - bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; - bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; - bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; if (target_bo != bo) drm_intel_gem_bo_reference(target_bo); @@ -1936,21 +2001,77 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, else bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0; + bo_gem->relocs[bo_gem->reloc_count].offset = offset; + bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; + bo_gem->relocs[bo_gem->reloc_count].target_handle = + target_bo_gem->gem_handle; + bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; + bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; + bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; bo_gem->reloc_count++; return 0; } +static void +drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + bo_gem->use_48b_address_range = enable; +} + +static int +drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; + if (bo_gem->has_error) + return -ENOMEM; + + if (target_bo_gem->has_error) { + bo_gem->has_error = true; + return -ENOMEM; + } + + if (!target_bo_gem->is_softpin) + return -EINVAL; + if (target_bo_gem == bo_gem) + return -EINVAL; + + if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) { + int new_size = bo_gem->softpin_target_size * 2; + if (new_size == 0) + new_size = bufmgr_gem->max_relocs; + + bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size * + sizeof(drm_intel_bo *)); + if (!bo_gem->softpin_target) + return -ENOMEM; + + bo_gem->softpin_target_size = new_size; + } + bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo; + drm_intel_gem_bo_reference(target_bo); + bo_gem->softpin_target_count++; + + return 0; +} + static int drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, drm_intel_bo *target_bo, uint32_t target_offset, uint32_t read_domains, uint32_t write_domain) { drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo; - return do_bo_emit_reloc(bo, offset, target_bo, target_offset, - read_domains, write_domain, - !bufmgr_gem->fenced_relocs); + if (target_bo_gem->is_softpin) + return drm_intel_gem_bo_add_softpin_target(bo, target_bo); + else + return do_bo_emit_reloc(bo, offset, target_bo, target_offset, + read_domains, write_domain, + !bufmgr_gem->fenced_relocs); } static int @@ -1983,6 +2104,8 @@ drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) * * Any further drm_intel_bufmgr_check_aperture_space() queries * involving this buffer in the tree are undefined after this call. + * + * This also removes all softpinned targets being referenced by the BO. */ void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) @@ -2009,6 +2132,12 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) } bo_gem->reloc_count = start; + for (i = 0; i < bo_gem->softpin_target_count; i++) { + drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i]; + drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec); + } + bo_gem->softpin_target_count = 0; + pthread_mutex_unlock(&bufmgr_gem->lock); } @@ -2051,7 +2180,7 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; int i; - if (bo_gem->relocs == NULL) + if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) return; for (i = 0; i < bo_gem->reloc_count; i++) { @@ -2072,6 +2201,17 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) /* Add the target to the validate list */ drm_intel_add_validate_buffer2(target_bo, need_fence); } + + for (i = 0; i < bo_gem->softpin_target_count; i++) { + drm_intel_bo *target_bo = bo_gem->softpin_target[i]; + + if (target_bo == bo) + continue; + + drm_intel_gem_bo_mark_mmaps_incoherent(bo); + drm_intel_gem_bo_process_reloc2(target_bo); + drm_intel_add_validate_buffer2(target_bo, false); + } } @@ -2087,10 +2227,12 @@ drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) /* Update the buffer offset */ if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { - DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", - bo_gem->gem_handle, bo_gem->name, bo->offset64, - (unsigned long long)bufmgr_gem->exec_objects[i]. - offset); + DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", + bo_gem->gem_handle, bo_gem->name, + upper_32_bits(bo->offset64), + lower_32_bits(bo->offset64), + upper_32_bits(bufmgr_gem->exec_objects[i].offset), + lower_32_bits(bufmgr_gem->exec_objects[i].offset)); bo->offset64 = bufmgr_gem->exec_objects[i].offset; bo->offset = bufmgr_gem->exec_objects[i].offset; } @@ -2109,9 +2251,16 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) /* Update the buffer offset */ if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { - DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", - bo_gem->gem_handle, bo_gem->name, bo->offset64, - (unsigned long long)bufmgr_gem->exec2_objects[i].offset); + /* If we're seeing softpinned object here it means that the kernel + * has relocated our object... Indicating a programming error + */ + assert(!bo_gem->is_softpin); + DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n", + bo_gem->gem_handle, bo_gem->name, + upper_32_bits(bo->offset64), + lower_32_bits(bo->offset64), + upper_32_bits(bufmgr_gem->exec2_objects[i].offset), + lower_32_bits(bufmgr_gem->exec2_objects[i].offset)); bo->offset64 = bufmgr_gem->exec2_objects[i].offset; bo->offset = bufmgr_gem->exec2_objects[i].offset; } @@ -2437,6 +2586,17 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, return 0; } +static int +drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset) +{ + drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; + + bo_gem->is_softpin = true; + bo->offset64 = offset; + bo->offset = offset; + return 0; +} + drm_intel_bo * drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size) { @@ -2500,6 +2660,7 @@ drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int s bo_gem->used_as_reloc_target = false; bo_gem->has_error = false; bo_gem->reusable = false; + bo_gem->use_48b_address_range = false; DRMINITLISTHEAD(&bo_gem->vma_list); DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); @@ -2815,6 +2976,13 @@ _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) return 1; } + for (i = 0; i< bo_gem->softpin_target_count; i++) { + if (bo_gem->softpin_target[i] == target_bo) + return 1; + if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo)) + return 1; + } + return 0; } @@ -3275,6 +3443,11 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0); + gp.param = I915_PARAM_HAS_EXEC_SOFTPIN; + ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); + if (ret == 0 && *gp.value > 0) + bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset; + if (bufmgr_gem->gen < 4) { gp.param = I915_PARAM_NUM_FENCES_AVAIL; gp.value = &bufmgr_gem->available_fences; @@ -3301,6 +3474,13 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) } } + if (bufmgr_gem->gen >= 8) { + gp.param = I915_PARAM_HAS_ALIASING_PPGTT; + ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); + if (ret == 0 && *gp.value == 3) + bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range; + } + /* Let's go with one relocation per every 2 dwords (but round down a bit * since a power of two will mean an extra page allocation for the reloc * buffer). diff --git a/lib/libdrm/intel/intel_bufmgr_priv.h b/lib/libdrm/intel/intel_bufmgr_priv.h index 59ebd1860..7e360a0b2 100644 --- a/lib/libdrm/intel/intel_bufmgr_priv.h +++ b/lib/libdrm/intel/intel_bufmgr_priv.h @@ -152,6 +152,20 @@ struct _drm_intel_bufmgr { void (*destroy) (drm_intel_bufmgr *bufmgr); /** + * Indicate if the buffer can be placed anywhere in the full ppgtt + * address range (2^48). + * + * Any resource used with flat/heapless (0x00000000-0xfffff000) + * General State Heap (GSH) or Intructions State Heap (ISH) must + * be in a 32-bit range. 48-bit range will only be used when explicitly + * requested. + * + * \param bo Buffer to set the use_48b_address_range flag. + * \param enable The flag value. + */ + void (*bo_use_48b_address_range) (drm_intel_bo *bo, uint32_t enable); + + /** * Add relocation entry in reloc_buf, which will be updated with the * target buffer's real offset on on command submission. * @@ -227,6 +241,13 @@ struct _drm_intel_bufmgr { uint32_t * swizzle_mode); /** + * Set the offset at which this buffer will be softpinned + * \param bo Buffer to set the softpin offset for + * \param offset Softpin offset + */ + int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset); + + /** * Create a visible name for a buffer which can be used by other apps * * \param buf Buffer to create a name for diff --git a/lib/libdrm/intel/intel_chipset.h b/lib/libdrm/intel/intel_chipset.h index 253ea7105..35148e522 100644 --- a/lib/libdrm/intel/intel_chipset.h +++ b/lib/libdrm/intel/intel_chipset.h @@ -165,21 +165,50 @@ #define PCI_CHIP_CHERRYVIEW_2 0x22b2 #define PCI_CHIP_CHERRYVIEW_3 0x22b3 -#define PCI_CHIP_SKYLAKE_ULT_GT2 0x1916 +#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902 #define PCI_CHIP_SKYLAKE_ULT_GT1 0x1906 -#define PCI_CHIP_SKYLAKE_ULT_GT3 0x1926 -#define PCI_CHIP_SKYLAKE_ULT_GT2F 0x1921 -#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E -#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E +#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A /* Reserved */ +#define PCI_CHIP_SKYLAKE_ULX_GT1 0x190E /* Reserved */ #define PCI_CHIP_SKYLAKE_DT_GT2 0x1912 -#define PCI_CHIP_SKYLAKE_DT_GT1 0x1902 +#define PCI_CHIP_SKYLAKE_FUSED0_GT2 0x1913 /* Reserved */ +#define PCI_CHIP_SKYLAKE_FUSED1_GT2 0x1915 /* Reserved */ +#define PCI_CHIP_SKYLAKE_ULT_GT2 0x1916 +#define PCI_CHIP_SKYLAKE_FUSED2_GT2 0x1917 /* Reserved */ +#define PCI_CHIP_SKYLAKE_SRV_GT2 0x191A /* Reserved */ #define PCI_CHIP_SKYLAKE_HALO_GT2 0x191B -#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B -#define PCI_CHIP_SKYLAKE_HALO_GT1 0x190B -#define PCI_CHIP_SKYLAKE_SRV_GT2 0x191A -#define PCI_CHIP_SKYLAKE_SRV_GT3 0x192A -#define PCI_CHIP_SKYLAKE_SRV_GT1 0x190A #define PCI_CHIP_SKYLAKE_WKS_GT2 0x191D +#define PCI_CHIP_SKYLAKE_ULX_GT2 0x191E +#define PCI_CHIP_SKYLAKE_MOBILE_GT2 0x1921 /* Reserved */ +#define PCI_CHIP_SKYLAKE_GT3 0x1926 +#define PCI_CHIP_SKYLAKE_HALO_GT3 0x192B /* Reserved */ +#define PCI_CHIP_SKYLAKE_SRV_GT4 0x192A +#define PCI_CHIP_SKYLAKE_DT_GT4 0x1932 +#define PCI_CHIP_SKYLAKE_SRV_GT4X 0x193A +#define PCI_CHIP_SKYLAKE_H_GT4 0x193B +#define PCI_CHIP_SKYLAKE_WKS_GT4 0x193D + +#define PCI_CHIP_KABYLAKE_ULT_GT2 0x5916 +#define PCI_CHIP_KABYLAKE_ULT_GT1_5 0x5913 +#define PCI_CHIP_KABYLAKE_ULT_GT1 0x5906 +#define PCI_CHIP_KABYLAKE_ULT_GT3 0x5926 +#define PCI_CHIP_KABYLAKE_ULT_GT2F 0x5921 +#define PCI_CHIP_KABYLAKE_ULX_GT1_5 0x5915 +#define PCI_CHIP_KABYLAKE_ULX_GT1 0x590E +#define PCI_CHIP_KABYLAKE_ULX_GT2 0x591E +#define PCI_CHIP_KABYLAKE_DT_GT2 0x5912 +#define PCI_CHIP_KABYLAKE_DT_GT1_5 0x5917 +#define PCI_CHIP_KABYLAKE_DT_GT1 0x5902 +#define PCI_CHIP_KABYLAKE_DT_GT4 0x5932 +#define PCI_CHIP_KABYLAKE_HALO_GT2 0x591B +#define PCI_CHIP_KABYLAKE_HALO_GT4 0x593B +#define PCI_CHIP_KABYLAKE_HALO_GT3 0x592B +#define PCI_CHIP_KABYLAKE_HALO_GT1 0x590B +#define PCI_CHIP_KABYLAKE_SRV_GT2 0x591A +#define PCI_CHIP_KABYLAKE_SRV_GT3 0x592A +#define PCI_CHIP_KABYLAKE_SRV_GT1 0x590A +#define PCI_CHIP_KABYLAKE_SRV_GT4 0x593A +#define PCI_CHIP_KABYLAKE_WKS_GT2 0x591D +#define PCI_CHIP_KABYLAKE_WKS_GT4 0x593D #define PCI_CHIP_BROXTON_0 0x0A84 #define PCI_CHIP_BROXTON_1 0x1A84 @@ -347,31 +376,71 @@ #define IS_SKL_GT1(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT1 || \ (devid) == PCI_CHIP_SKYLAKE_ULX_GT1 || \ (devid) == PCI_CHIP_SKYLAKE_DT_GT1 || \ - (devid) == PCI_CHIP_SKYLAKE_HALO_GT1 || \ (devid) == PCI_CHIP_SKYLAKE_SRV_GT1) -#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT2 || \ - (devid) == PCI_CHIP_SKYLAKE_ULT_GT2F || \ - (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \ - (devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \ - (devid) == PCI_CHIP_SKYLAKE_HALO_GT2 || \ +#define IS_SKL_GT2(devid) ((devid) == PCI_CHIP_SKYLAKE_DT_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_FUSED0_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_FUSED1_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_ULT_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_FUSED2_GT2 || \ (devid) == PCI_CHIP_SKYLAKE_SRV_GT2 || \ - (devid) == PCI_CHIP_SKYLAKE_WKS_GT2) - -#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_ULT_GT3 || \ - (devid) == PCI_CHIP_SKYLAKE_HALO_GT3 || \ - (devid) == PCI_CHIP_SKYLAKE_SRV_GT3) + (devid) == PCI_CHIP_SKYLAKE_HALO_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_WKS_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_ULX_GT2 || \ + (devid) == PCI_CHIP_SKYLAKE_MOBILE_GT2) + +#define IS_SKL_GT3(devid) ((devid) == PCI_CHIP_SKYLAKE_GT3 || \ + (devid) == PCI_CHIP_SKYLAKE_HALO_GT3) + +#define IS_SKL_GT4(devid) ((devid) == PCI_CHIP_SKYLAKE_SRV_GT4 || \ + (devid) == PCI_CHIP_SKYLAKE_DT_GT4 || \ + (devid) == PCI_CHIP_SKYLAKE_SRV_GT4X || \ + (devid) == PCI_CHIP_SKYLAKE_H_GT4 || \ + (devid) == PCI_CHIP_SKYLAKE_WKS_GT4) + +#define IS_KBL_GT1(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT1_5 || \ + (devid) == PCI_CHIP_KABYLAKE_ULX_GT1_5 || \ + (devid) == PCI_CHIP_KABYLAKE_DT_GT1_5 || \ + (devid) == PCI_CHIP_KABYLAKE_ULT_GT1 || \ + (devid) == PCI_CHIP_KABYLAKE_ULX_GT1 || \ + (devid) == PCI_CHIP_KABYLAKE_DT_GT1 || \ + (devid) == PCI_CHIP_KABYLAKE_HALO_GT1 || \ + (devid) == PCI_CHIP_KABYLAKE_SRV_GT1) + +#define IS_KBL_GT2(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT2 || \ + (devid) == PCI_CHIP_KABYLAKE_ULT_GT2F || \ + (devid) == PCI_CHIP_KABYLAKE_ULX_GT2 || \ + (devid) == PCI_CHIP_KABYLAKE_DT_GT2 || \ + (devid) == PCI_CHIP_KABYLAKE_HALO_GT2 || \ + (devid) == PCI_CHIP_KABYLAKE_SRV_GT2 || \ + (devid) == PCI_CHIP_KABYLAKE_WKS_GT2) + +#define IS_KBL_GT3(devid) ((devid) == PCI_CHIP_KABYLAKE_ULT_GT3 || \ + (devid) == PCI_CHIP_KABYLAKE_HALO_GT3 || \ + (devid) == PCI_CHIP_KABYLAKE_SRV_GT3) + +#define IS_KBL_GT4(devid) ((devid) == PCI_CHIP_KABYLAKE_DT_GT4 || \ + (devid) == PCI_CHIP_KABYLAKE_HALO_GT4 || \ + (devid) == PCI_CHIP_KABYLAKE_SRV_GT4 || \ + (devid) == PCI_CHIP_KABYLAKE_WKS_GT4) + +#define IS_KABYLAKE(devid) (IS_KBL_GT1(devid) || \ + IS_KBL_GT2(devid) || \ + IS_KBL_GT3(devid) || \ + IS_KBL_GT4(devid)) #define IS_SKYLAKE(devid) (IS_SKL_GT1(devid) || \ IS_SKL_GT2(devid) || \ - IS_SKL_GT3(devid)) + IS_SKL_GT3(devid) || \ + IS_SKL_GT4(devid)) #define IS_BROXTON(devid) ((devid) == PCI_CHIP_BROXTON_0 || \ (devid) == PCI_CHIP_BROXTON_1 || \ (devid) == PCI_CHIP_BROXTON_2) #define IS_GEN9(devid) (IS_SKYLAKE(devid) || \ - IS_BROXTON(devid)) + IS_BROXTON(devid) || \ + IS_KABYLAKE(devid)) #define IS_9XX(dev) (IS_GEN3(dev) || \ IS_GEN4(dev) || \ diff --git a/lib/libdrm/intel/intel_decode.c b/lib/libdrm/intel/intel_decode.c index e7aef742a..287c34277 100644 --- a/lib/libdrm/intel/intel_decode.c +++ b/lib/libdrm/intel/intel_decode.c @@ -38,8 +38,6 @@ #include "intel_chipset.h" #include "intel_bufmgr.h" -/* The compiler throws ~90 warnings. Do not spam the build, until we fix them. */ -#pragma GCC diagnostic ignored "-Wmissing-field-initializers" /* Struct for tracking drm_intel_decode state. */ struct drm_intel_decode { |