summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/iris
diff options
context:
space:
mode:
Diffstat (limited to 'lib/mesa/src/gallium/drivers/iris')
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_batch.c59
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_batch.h19
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_blit.c108
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_blorp.c2
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_bufmgr.c114
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_bufmgr.h26
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_clear.c49
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_context.c15
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_context.h108
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_disk_cache.c2
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_draw.c16
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_fence.c18
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_fence.h3
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_formats.c302
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_monitor.c9
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_perf.c6
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_performance_query.c7
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_pipe_control.c4
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_program.c44
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_program_cache.c70
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_query.c21
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_resolve.c547
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_resource.c76
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_resource.h7
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_screen.c9
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_screen.h99
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_state.c186
-rw-r--r--lib/mesa/src/gallium/drivers/iris/meson.build2
28 files changed, 1135 insertions, 793 deletions
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_batch.c b/lib/mesa/src/gallium/drivers/iris/iris_batch.c
index ec12bfa3f..120aaf9cb 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_batch.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_batch.c
@@ -63,6 +63,12 @@
#define FILE_DEBUG_FLAG DEBUG_BUFMGR
+/* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
+ * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
+ * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
+ */
+#define BATCH_RESERVED 16
+
static void
iris_batch_reset(struct iris_batch *batch);
@@ -104,11 +110,11 @@ dump_validation_list(struct iris_batch *batch)
uint64_t flags = batch->validation_list[i].flags;
assert(batch->validation_list[i].handle ==
batch->exec_bos[i]->gem_handle);
- fprintf(stderr, "[%2d]: %2d %-14s @ 0x%"PRIx64" (%"PRIu64"B)\t %2d refs %s\n",
+ fprintf(stderr, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64"B)\t %2d refs %s\n",
i,
batch->validation_list[i].handle,
batch->exec_bos[i]->name,
- (uint64_t)batch->validation_list[i].offset,
+ batch->validation_list[i].offset,
batch->exec_bos[i]->size,
batch->exec_bos[i]->refcount,
(flags & EXEC_OBJECT_WRITE) ? " (write)" : "");
@@ -169,6 +175,7 @@ decode_batch(struct iris_batch *batch)
void
iris_init_batch(struct iris_batch *batch,
struct iris_screen *screen,
+ struct iris_vtable *vtbl,
struct pipe_debug_callback *dbg,
struct pipe_device_reset_callback *reset,
struct hash_table_u64 *state_sizes,
@@ -177,6 +184,7 @@ iris_init_batch(struct iris_batch *batch,
int priority)
{
batch->screen = screen;
+ batch->vtbl = vtbl;
batch->dbg = dbg;
batch->reset = reset;
batch->state_sizes = state_sizes;
@@ -356,24 +364,6 @@ create_batch(struct iris_batch *batch)
}
static void
-iris_batch_maybe_noop(struct iris_batch *batch)
-{
- /* We only insert the NOOP at the beginning of the batch. */
- assert(iris_batch_bytes_used(batch) == 0);
-
- if (batch->noop_enabled) {
- /* Emit MI_BATCH_BUFFER_END to prevent any further command to be
- * executed.
- */
- uint32_t *map = batch->map_next;
-
- map[0] = (0xA << 23);
-
- batch->map_next += 4;
- }
-}
-
-static void
iris_batch_reset(struct iris_batch *batch)
{
struct iris_screen *screen = batch->screen;
@@ -392,8 +382,6 @@ iris_batch_reset(struct iris_batch *batch)
iris_syncpt_reference(screen, &syncpt, NULL);
iris_cache_sets_clear(batch);
-
- iris_batch_maybe_noop(batch);
}
void
@@ -662,10 +650,6 @@ _iris_batch_flush(struct iris_batch *batch, const char *file, int line)
if (unlikely(INTEL_DEBUG &
(DEBUG_BATCH | DEBUG_SUBMIT | DEBUG_PIPE_CONTROL))) {
- const char *basefile = strstr(file, "iris/");
- if (basefile)
- file = basefile + 5;
-
fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5db (%0.1f%%) "
"(cmds), %4d BOs (%0.1fMb aperture)\n",
file, line, batch_name_to_string(batch->name), batch->hw_ctx_id,
@@ -741,26 +725,3 @@ iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
{
return find_validation_entry(batch, bo) != NULL;
}
-
-/**
- * Updates the state of the noop feature.
- */
-uint64_t
-iris_batch_prepare_noop(struct iris_batch *batch, bool noop_enable, uint64_t dirty_flags)
-{
- if (batch->noop_enabled == noop_enable)
- return 0;
-
- batch->noop_enabled = noop_enable;
-
- iris_batch_flush(batch);
-
- /* If the batch was empty, flush had no effect, so insert our noop. */
- if (iris_batch_bytes_used(batch) == 0)
- iris_batch_maybe_noop(batch);
-
- /* We only need to update the entire state if we transition from noop ->
- * not-noop.
- */
- return !batch->noop_enabled ? dirty_flags : 0;
-}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_batch.h b/lib/mesa/src/gallium/drivers/iris/iris_batch.h
index 19ad39597..42c0f3e78 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_batch.h
+++ b/lib/mesa/src/gallium/drivers/iris/iris_batch.h
@@ -38,14 +38,8 @@
/* The kernel assumes batchbuffers are smaller than 256kB. */
#define MAX_BATCH_SIZE (256 * 1024)
-/* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
- * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
- * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
- */
-#define BATCH_RESERVED 16
-
/* Our target batch size - flush approximately at this point. */
-#define BATCH_SZ (64 * 1024 - BATCH_RESERVED)
+#define BATCH_SZ (64 * 1024)
enum iris_batch_name {
IRIS_BATCH_RENDER,
@@ -62,6 +56,7 @@ struct iris_address {
struct iris_batch {
struct iris_screen *screen;
+ struct iris_vtable *vtbl;
struct pipe_debug_callback *dbg;
struct pipe_device_reset_callback *reset;
@@ -90,11 +85,6 @@ struct iris_batch {
int exec_count;
int exec_array_size;
- /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first
- * instruction is a MI_BATCH_BUFFER_END).
- */
- bool noop_enabled;
-
/**
* A list of iris_syncpts associated with this batch.
*
@@ -143,6 +133,7 @@ struct iris_batch {
void iris_init_batch(struct iris_batch *batch,
struct iris_screen *screen,
+ struct iris_vtable *vtbl,
struct pipe_debug_callback *dbg,
struct pipe_device_reset_callback *reset,
struct hash_table_u64 *state_sizes,
@@ -158,10 +149,6 @@ void _iris_batch_flush(struct iris_batch *batch, const char *file, int line);
bool iris_batch_references(struct iris_batch *batch, struct iris_bo *bo);
-uint64_t iris_batch_prepare_noop(struct iris_batch *batch,
- bool noop_enable,
- uint64_t dirty_flags);
-
#define RELOC_WRITE EXEC_OBJECT_WRITE
void iris_use_pinned_bo(struct iris_batch *batch, struct iris_bo *bo,
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_blit.c b/lib/mesa/src/gallium/drivers/iris/iris_blit.c
index f0a43e959..4b43901d4 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_blit.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_blit.c
@@ -229,7 +229,8 @@ apply_blit_scissor(const struct pipe_scissor_state *scissor,
}
void
-iris_blorp_surf_for_resource(struct isl_device *isl_dev,
+iris_blorp_surf_for_resource(struct iris_vtable *vtbl,
+ struct isl_device *isl_dev,
struct blorp_surf *surf,
struct pipe_resource *p_res,
enum isl_aux_usage aux_usage,
@@ -318,21 +319,6 @@ tex_cache_flush_hack(struct iris_batch *batch,
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
}
-static enum isl_aux_usage
-iris_resource_blorp_write_aux_usage(struct iris_context *ice,
- struct iris_resource *res,
- enum isl_format render_format)
-{
- if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
- ISL_SURF_USAGE_STENCIL_BIT)) {
- assert(render_format == res->surf.format);
- return res->aux.usage;
- } else {
- return iris_resource_render_aux_usage(ice, res, render_format,
- false, false);
- }
-}
-
/**
* The pipe->blit() driver hook.
*
@@ -374,9 +360,9 @@ iris_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
iris_resource_texture_aux_usage(ice, src_res, src_fmt.fmt);
if (iris_resource_level_has_hiz(src_res, info->src.level))
- assert(src_res->surf.format == src_fmt.fmt);
+ src_aux_usage = ISL_AUX_USAGE_NONE;
- bool src_clear_supported = isl_aux_usage_has_fast_clears(src_aux_usage) &&
+ bool src_clear_supported = src_aux_usage != ISL_AUX_USAGE_NONE &&
src_res->surf.format == src_fmt.fmt;
iris_resource_prepare_access(ice, batch, src_res, info->src.level, 1,
@@ -387,14 +373,14 @@ iris_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
iris_format_for_usage(devinfo, info->dst.format,
ISL_SURF_USAGE_RENDER_TARGET_BIT);
enum isl_aux_usage dst_aux_usage =
- iris_resource_blorp_write_aux_usage(ice, dst_res, dst_fmt.fmt);
- bool dst_clear_supported = isl_aux_usage_has_fast_clears(dst_aux_usage);
+ iris_resource_render_aux_usage(ice, dst_res, dst_fmt.fmt, false, false);
+ bool dst_clear_supported = dst_aux_usage != ISL_AUX_USAGE_NONE;
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
info->src.resource, src_aux_usage,
info->src.level, false);
- iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
info->dst.resource, dst_aux_usage,
info->dst.level, true);
@@ -519,7 +505,23 @@ iris_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
iris_format_for_usage(devinfo, stc_dst->base.format,
ISL_SURF_USAGE_RENDER_TARGET_BIT);
stc_dst_aux_usage =
- iris_resource_blorp_write_aux_usage(ice, stc_dst, dst_fmt.fmt);
+ iris_resource_render_aux_usage(ice, stc_dst, dst_fmt.fmt, false, false);
+
+ /* Resolve destination surface before blit because :
+ * 1. when we try to blit from the same surface, we can't read and
+ * write to the same surfaces at the same time when we have
+ * compression enabled so it's safe to resolve surface first and then
+ * do blit.
+ * 2. While bliting from one surface to another surface, we might be
+ * mixing compression formats, Our experiments shows that if after
+ * blit if we set DepthStencilResource flag to 0, blit passes but
+ * clear fails.
+ *
+ * XXX: In second case by destructing the compression, we might lose
+ * some performance.
+ */
+ if (devinfo->gen >= 12)
+ stc_dst_aux_usage = ISL_AUX_USAGE_NONE;
iris_resource_prepare_access(ice, batch, src_res, info->src.level, 1,
info->src.box.z, info->src.box.depth,
@@ -527,10 +529,10 @@ iris_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
iris_resource_prepare_access(ice, batch, stc_dst, info->dst.level, 1,
info->dst.box.z, info->dst.box.depth,
stc_dst_aux_usage, false);
- iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
&src_res->base, stc_src_aux_usage,
info->src.level, false);
- iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
&stc_dst->base, stc_dst_aux_usage,
info->dst.level, true);
@@ -569,41 +571,41 @@ iris_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
}
static void
-get_copy_region_aux_settings(struct iris_context *ice,
+get_copy_region_aux_settings(const struct gen_device_info *devinfo,
struct iris_resource *res,
enum isl_aux_usage *out_aux_usage,
bool *out_clear_supported,
bool is_render_target)
{
- struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
-
switch (res->aux.usage) {
case ISL_AUX_USAGE_HIZ:
- case ISL_AUX_USAGE_HIZ_CCS:
- case ISL_AUX_USAGE_HIZ_CCS_WT:
- if (is_render_target) {
- *out_aux_usage = res->aux.usage;
+ if (!is_render_target && iris_sample_with_depth_aux(devinfo, res)) {
+ *out_aux_usage = ISL_AUX_USAGE_HIZ;
+ *out_clear_supported = true;
} else {
- *out_aux_usage = iris_resource_texture_aux_usage(ice, res,
- res->surf.format);
+ *out_aux_usage = ISL_AUX_USAGE_NONE;
+ *out_clear_supported = false;
}
- *out_clear_supported = (*out_aux_usage != ISL_AUX_USAGE_NONE);
break;
case ISL_AUX_USAGE_MCS:
case ISL_AUX_USAGE_MCS_CCS:
case ISL_AUX_USAGE_CCS_E:
- *out_aux_usage = res->aux.usage;
- /* Prior to Gen9, fast-clear only supported 0/1 clear colors. Since
- * we're going to re-interpret the format as an integer format possibly
- * with a different number of components, we can't handle clear colors
- * until Gen9.
+ /* A stencil resolve operation must be performed prior to doing resource
+ * copies or used by CPU.
+ * (see HSD 1209978162)
*/
- *out_clear_supported = devinfo->gen >= 9;
- break;
- case ISL_AUX_USAGE_STC_CCS:
- *out_aux_usage = res->aux.usage;
- *out_clear_supported = false;
+ if (is_render_target && isl_surf_usage_is_stencil(res->surf.usage)) {
+ *out_aux_usage = ISL_AUX_USAGE_NONE;
+ *out_clear_supported = false;
+ } else {
+ *out_aux_usage = res->aux.usage;
+ /* Prior to Gen9, fast-clear only supported 0/1 clear colors. Since
+ * we're going to re-interpret the format as an integer format possibly
+ * with a different number of components, we can't handle clear colors
+ * until Gen9.
+ */
+ *out_clear_supported = devinfo->gen >= 9;
+ }
break;
default:
*out_aux_usage = ISL_AUX_USAGE_NONE;
@@ -633,14 +635,15 @@ iris_copy_region(struct blorp_context *blorp,
struct blorp_batch blorp_batch;
struct iris_context *ice = blorp->driver_ctx;
struct iris_screen *screen = (void *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
struct iris_resource *src_res = (void *) src;
struct iris_resource *dst_res = (void *) dst;
enum isl_aux_usage src_aux_usage, dst_aux_usage;
bool src_clear_supported, dst_clear_supported;
- get_copy_region_aux_settings(ice, src_res, &src_aux_usage,
+ get_copy_region_aux_settings(devinfo, src_res, &src_aux_usage,
&src_clear_supported, false);
- get_copy_region_aux_settings(ice, dst_res, &dst_aux_usage,
+ get_copy_region_aux_settings(devinfo, dst_res, &dst_aux_usage,
&dst_clear_supported, true);
if (iris_batch_references(batch, src_res->bo))
@@ -667,9 +670,9 @@ iris_copy_region(struct blorp_context *blorp,
// XXX: what about one surface being a buffer and not the other?
struct blorp_surf src_surf, dst_surf;
- iris_blorp_surf_for_resource(&screen->isl_dev, &src_surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &src_surf,
src, src_aux_usage, src_level, false);
- iris_blorp_surf_for_resource(&screen->isl_dev, &dst_surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &screen->isl_dev, &dst_surf,
dst, dst_aux_usage, dst_level, true);
iris_resource_prepare_access(ice, batch, src_res, src_level, 1,
@@ -728,7 +731,6 @@ iris_resource_copy_region(struct pipe_context *ctx,
const struct pipe_box *src_box)
{
struct iris_context *ice = (void *) ctx;
- struct iris_screen *screen = (void *) ctx->screen;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
struct iris_resource *src = (void *) p_src;
struct iris_resource *dst = (void *) p_dst;
@@ -747,8 +749,8 @@ iris_resource_copy_region(struct pipe_context *ctx,
iris_emit_pipe_control_flush(batch,
"stall for MI_COPY_MEM_MEM copy_region",
PIPE_CONTROL_CS_STALL);
- screen->vtbl.copy_mem_mem(batch, dst_bo, dstx, iris_resource_bo(p_src),
- src_box->x, src_box->width);
+ ice->vtbl.copy_mem_mem(batch, dst_bo, dstx, iris_resource_bo(p_src),
+ src_box->x, src_box->width);
return;
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_blorp.c b/lib/mesa/src/gallium/drivers/iris/iris_blorp.c
index fdc4c49ea..3162571d8 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_blorp.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_blorp.c
@@ -164,7 +164,7 @@ blorp_alloc_binding_table(struct blorp_batch *blorp_batch,
iris_use_pinned_bo(batch, binder->bo, false);
- batch->screen->vtbl.update_surface_base_address(batch, binder);
+ ice->vtbl.update_surface_base_address(batch, binder);
}
static void *
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.c b/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.c
index 3fada9710..3ed2204d4 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.c
@@ -177,7 +177,6 @@ struct iris_bufmgr {
struct util_vma_heap vma_allocator[IRIS_MEMZONE_COUNT];
bool has_llc:1;
- bool has_mmap_offset:1;
bool bo_reuse:1;
struct gen_aux_map_context *aux_map_ctx;
@@ -194,6 +193,10 @@ static int bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
static void bo_free(struct iris_bo *bo);
+static uint64_t vma_alloc(struct iris_bufmgr *bufmgr,
+ enum iris_memory_zone memzone,
+ uint64_t size, uint64_t alignment);
+
static struct iris_bo *
find_and_ref_external_bo(struct hash_table *ht, unsigned int key)
{
@@ -714,6 +717,7 @@ iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
p_atomic_set(&bo->refcount, 1);
bo->size = open_arg.size;
+ bo->gtt_offset = 0;
bo->bufmgr = bufmgr;
bo->gem_handle = open_arg.handle;
bo->name = name;
@@ -942,74 +946,11 @@ print_flags(unsigned flags)
}
static void *
-iris_bo_gem_mmap_legacy(struct pipe_debug_callback *dbg,
- struct iris_bo *bo, bool wc)
-{
- struct iris_bufmgr *bufmgr = bo->bufmgr;
-
- struct drm_i915_gem_mmap mmap_arg = {
- .handle = bo->gem_handle,
- .size = bo->size,
- .flags = wc ? I915_MMAP_WC : 0,
- };
-
- int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
- if (ret != 0) {
- DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
- __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return NULL;
- }
- void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
-
- return map;
-}
-
-static void *
-iris_bo_gem_mmap_offset(struct pipe_debug_callback *dbg, struct iris_bo *bo,
- bool wc)
-{
- struct iris_bufmgr *bufmgr = bo->bufmgr;
-
- struct drm_i915_gem_mmap_offset mmap_arg = {
- .handle = bo->gem_handle,
- .flags = wc ? I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
- };
-
- /* Get the fake offset back */
- int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &mmap_arg);
- if (ret != 0) {
- DBG("%s:%d: Error preparing buffer %d (%s): %s .\n",
- __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return NULL;
- }
-
- /* And map it */
- void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- bufmgr->fd, mmap_arg.offset);
- if (map == MAP_FAILED) {
- DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
- __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
- return NULL;
- }
-
- return map;
-}
-
-static void *
-iris_bo_gem_mmap(struct pipe_debug_callback *dbg, struct iris_bo *bo, bool wc)
-{
- struct iris_bufmgr *bufmgr = bo->bufmgr;
-
- if (bufmgr->has_mmap_offset)
- return iris_bo_gem_mmap_offset(dbg, bo, wc);
- else
- return iris_bo_gem_mmap_legacy(dbg, bo, wc);
-}
-
-static void *
iris_bo_map_cpu(struct pipe_debug_callback *dbg,
struct iris_bo *bo, unsigned flags)
{
+ struct iris_bufmgr *bufmgr = bo->bufmgr;
+
/* We disallow CPU maps for writing to non-coherent buffers, as the
* CPU map can become invalidated when a batch is flushed out, which
* can happen at unpredictable times. You should use WC maps instead.
@@ -1018,11 +959,18 @@ iris_bo_map_cpu(struct pipe_debug_callback *dbg,
if (!bo->map_cpu) {
DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);
- void *map = iris_bo_gem_mmap(dbg, bo, false);
- if (!map) {
+
+ struct drm_i915_gem_mmap mmap_arg = {
+ .handle = bo->gem_handle,
+ .size = bo->size,
+ };
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ if (ret != 0) {
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
return NULL;
}
-
+ void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
VG_DEFINED(map, bo->size);
if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
@@ -1067,13 +1015,24 @@ static void *
iris_bo_map_wc(struct pipe_debug_callback *dbg,
struct iris_bo *bo, unsigned flags)
{
+ struct iris_bufmgr *bufmgr = bo->bufmgr;
+
if (!bo->map_wc) {
DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);
- void *map = iris_bo_gem_mmap(dbg, bo, true);
- if (!map) {
+
+ struct drm_i915_gem_mmap mmap_arg = {
+ .handle = bo->gem_handle,
+ .size = bo->size,
+ .flags = I915_MMAP_WC,
+ };
+ int ret = gen_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ if (ret != 0) {
+ DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
+ __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
return NULL;
}
+ void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
VG_DEFINED(map, bo->size);
if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
@@ -1780,18 +1739,6 @@ static struct gen_mapped_pinned_buffer_alloc aux_map_allocator = {
.free = gen_aux_map_buffer_free,
};
-static int
-gem_param(int fd, int name)
-{
- int v = -1; /* No param uses (yet) the sign bit, reserve it for errors */
-
- struct drm_i915_getparam gp = { .param = name, .value = &v };
- if (gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
- return -1;
-
- return v;
-}
-
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
@@ -1832,7 +1779,6 @@ iris_bufmgr_create(struct gen_device_info *devinfo, int fd, bool bo_reuse)
bufmgr->has_llc = devinfo->has_llc;
bufmgr->bo_reuse = bo_reuse;
- bufmgr->has_mmap_offset = gem_param(fd, I915_PARAM_MMAP_GTT_VERSION) >= 4;
STATIC_ASSERT(IRIS_MEMZONE_SHADER_START == 0ull);
const uint64_t _4GB = 1ull << 32;
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.h b/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.h
index 084de82c3..831f7adce 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.h
+++ b/lib/mesa/src/gallium/drivers/iris/iris_bufmgr.h
@@ -102,9 +102,6 @@ struct iris_bo {
/** Buffer manager context associated with this buffer object */
struct iris_bufmgr *bufmgr;
- /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
- uint32_t hash;
-
/** The GEM handle for this buffer object. */
uint32_t gem_handle;
@@ -136,6 +133,15 @@ struct iris_bo {
*/
unsigned index;
+ /**
+ * Boolean of whether the GPU is definitely not accessing the buffer.
+ *
+ * This is only valid when reusable, since non-reusable
+ * buffers are those that have been shared with other
+ * processes, so we don't know their state.
+ */
+ bool idle;
+
int refcount;
const char *name;
@@ -171,15 +177,6 @@ struct iris_bo {
struct list_head exports;
/**
- * Boolean of whether the GPU is definitely not accessing the buffer.
- *
- * This is only valid when reusable, since non-reusable
- * buffers are those that have been shared with other
- * processes, so we don't know their state.
- */
- bool idle;
-
- /**
* Boolean of whether this buffer can be re-used
*/
bool reusable;
@@ -198,6 +195,9 @@ struct iris_bo {
* Boolean of whether this buffer points into user memory
*/
bool userptr;
+
+ /** Pre-computed hash using _mesa_hash_pointer for cache tracking sets */
+ uint32_t hash;
};
#define BO_ALLOC_ZEROED (1<<0)
@@ -259,7 +259,7 @@ void iris_bo_unreference(struct iris_bo *bo);
#define MAP_PERSISTENT PIPE_TRANSFER_PERSISTENT
#define MAP_COHERENT PIPE_TRANSFER_COHERENT
/* internal */
-#define MAP_INTERNAL_MASK (0xffu << 24)
+#define MAP_INTERNAL_MASK (0xff << 24)
#define MAP_RAW (0x01 << 24)
#define MAP_FLAGS (MAP_READ | MAP_WRITE | MAP_ASYNC | \
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_clear.c b/lib/mesa/src/gallium/drivers/iris/iris_clear.c
index 6f8636c0f..958a3d247 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_clear.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_clear.c
@@ -78,7 +78,7 @@ can_fast_clear_color(struct iris_context *ice,
if (INTEL_DEBUG & DEBUG_NO_FAST_CLEAR)
return false;
- if (!isl_aux_usage_has_fast_clears(res->aux.usage))
+ if (res->aux.usage == ISL_AUX_USAGE_NONE)
return false;
/* Check for partial clear */
@@ -219,7 +219,7 @@ fast_clear_color(struct iris_context *ice,
* is not something that should happen often, we stall on the CPU here
* to resolve the predication, and then proceed.
*/
- batch->screen->vtbl.resolve_conditional_render(ice);
+ ice->vtbl.resolve_conditional_render(ice);
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
return;
@@ -305,7 +305,7 @@ fast_clear_color(struct iris_context *ice,
blorp_batch_init(&ice->blorp, &blorp_batch, batch, blorp_flags);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
p_res, res->aux.usage, level, true);
/* In newer gens (> 9), the hardware will do a linear -> sRGB conversion of
@@ -376,7 +376,7 @@ clear_color(struct iris_context *ice,
box->z, box->depth, aux_usage);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
p_res, aux_usage, level, true);
struct blorp_batch blorp_batch;
@@ -475,7 +475,7 @@ fast_clear_depth(struct iris_context *ice,
* even more complex, so the easiest thing to do when the fast clear
* depth is changing is to stall on the CPU and resolve the predication.
*/
- batch->screen->vtbl.resolve_conditional_render(ice);
+ ice->vtbl.resolve_conditional_render(ice);
if (ice->state.predicate == IRIS_PREDICATE_STATE_DONT_RENDER)
return;
@@ -582,13 +582,13 @@ clear_depth_stencil(struct iris_context *ice,
/* At this point, we might have fast cleared the depth buffer. So if there's
* no stencil clear pending, return early.
*/
- if (!(clear_depth || (clear_stencil && stencil_res))) {
+ if (!(clear_depth || clear_stencil)) {
return;
}
if (clear_depth && z_res) {
iris_resource_prepare_depth(ice, batch, z_res, level, box->z, box->depth);
- iris_blorp_surf_for_resource(&batch->screen->isl_dev,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev,
&z_surf, &z_res->base, z_res->aux.usage,
level, true);
}
@@ -600,7 +600,7 @@ clear_depth_stencil(struct iris_context *ice,
if (stencil_mask) {
iris_resource_prepare_access(ice, batch, stencil_res, level, 1, box->z,
box->depth, stencil_res->aux.usage, false);
- iris_blorp_surf_for_resource(&batch->screen->isl_dev,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev,
&stencil_surf, &stencil_res->base,
stencil_res->aux.usage, level, true);
}
@@ -636,7 +636,6 @@ clear_depth_stencil(struct iris_context *ice,
static void
iris_clear(struct pipe_context *ctx,
unsigned buffers,
- const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *p_color,
double depth,
unsigned stencil)
@@ -646,23 +645,15 @@ iris_clear(struct pipe_context *ctx,
assert(buffers != 0);
- struct pipe_box box = {
- .width = cso_fb->width,
- .height = cso_fb->height,
- };
-
- if (scissor_state) {
- box.x = scissor_state->minx;
- box.y = scissor_state->miny;
- box.width = MIN2(box.width, scissor_state->maxx - scissor_state->minx);
- box.height = MIN2(box.height, scissor_state->maxy - scissor_state->miny);
- }
-
if (buffers & PIPE_CLEAR_DEPTHSTENCIL) {
struct pipe_surface *psurf = cso_fb->zsbuf;
+ struct pipe_box box = {
+ .width = cso_fb->width,
+ .height = cso_fb->height,
+ .depth = psurf->u.tex.last_layer - psurf->u.tex.first_layer + 1,
+ .z = psurf->u.tex.first_layer,
+ };
- box.depth = psurf->u.tex.last_layer - psurf->u.tex.first_layer + 1;
- box.z = psurf->u.tex.first_layer,
clear_depth_stencil(ice, psurf->texture, psurf->u.tex.level, &box, true,
buffers & PIPE_CLEAR_DEPTH,
buffers & PIPE_CLEAR_STENCIL,
@@ -677,8 +668,12 @@ iris_clear(struct pipe_context *ctx,
if (buffers & (PIPE_CLEAR_COLOR0 << i)) {
struct pipe_surface *psurf = cso_fb->cbufs[i];
struct iris_surface *isurf = (void *) psurf;
- box.depth = psurf->u.tex.last_layer - psurf->u.tex.first_layer + 1,
- box.z = psurf->u.tex.first_layer,
+ struct pipe_box box = {
+ .width = cso_fb->width,
+ .height = cso_fb->height,
+ .depth = psurf->u.tex.last_layer - psurf->u.tex.first_layer + 1,
+ .z = psurf->u.tex.first_layer,
+ };
clear_color(ice, psurf->texture, psurf->u.tex.level, &box,
true, isurf->view.format, isurf->view.swizzle,
@@ -716,10 +711,10 @@ iris_clear_texture(struct pipe_context *ctx,
uint8_t stencil = 0;
if (fmt_desc->unpack_z_float)
- util_format_unpack_z_float(p_res->format, &depth, data, 1);
+ fmt_desc->unpack_z_float(&depth, 0, data, 0, 1, 1);
if (fmt_desc->unpack_s_8uint)
- util_format_unpack_s_8uint(p_res->format, &stencil, data, 1);
+ fmt_desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1);
clear_depth_stencil(ice, p_res, level, box, true, true, true,
depth, stencil);
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_context.c b/lib/mesa/src/gallium/drivers/iris/iris_context.c
index 862105b84..dc2a09a57 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_context.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_context.c
@@ -84,12 +84,12 @@ iris_lost_context_state(struct iris_batch *batch)
ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
- batch->screen->vtbl.init_render_context(batch);
+ ice->vtbl.init_render_context(batch);
} else if (batch->name == IRIS_BATCH_COMPUTE) {
ice = container_of(batch, ice, batches[IRIS_BATCH_COMPUTE]);
assert(&ice->batches[IRIS_BATCH_COMPUTE] == batch);
- batch->screen->vtbl.init_compute_context(batch);
+ ice->vtbl.init_compute_context(batch);
} else {
unreachable("unhandled batch reset");
}
@@ -99,7 +99,7 @@ iris_lost_context_state(struct iris_batch *batch)
memset(ice->state.last_grid, 0, sizeof(ice->state.last_grid));
batch->last_surface_base_address = ~0ull;
batch->last_aux_map_state = 0;
- batch->screen->vtbl.lost_genx_state(ice, batch);
+ ice->vtbl.lost_genx_state(ice, batch);
}
static enum pipe_reset_status
@@ -190,12 +190,11 @@ static void
iris_destroy_context(struct pipe_context *ctx)
{
struct iris_context *ice = (struct iris_context *)ctx;
- struct iris_screen *screen = (struct iris_screen *)ctx->screen;
if (ctx->stream_uploader)
u_upload_destroy(ctx->stream_uploader);
- screen->vtbl.destroy_state(ice);
+ ice->vtbl.destroy_state(ice);
iris_destroy_program_cache(ice);
iris_destroy_border_color_pool(ice);
u_upload_destroy(ice->state.surface_uploader);
@@ -304,13 +303,13 @@ iris_create_context(struct pipe_screen *pscreen, void *priv, unsigned flags)
ice->state.sizes = _mesa_hash_table_u64_create(ice);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- iris_init_batch(&ice->batches[i], screen, &ice->dbg,
+ iris_init_batch(&ice->batches[i], screen, &ice->vtbl, &ice->dbg,
&ice->reset, ice->state.sizes,
ice->batches, (enum iris_batch_name) i, priority);
}
- screen->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
- screen->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
+ ice->vtbl.init_render_context(&ice->batches[IRIS_BATCH_RENDER]);
+ ice->vtbl.init_compute_context(&ice->batches[IRIS_BATCH_COMPUTE]);
return ctx;
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_context.h b/lib/mesa/src/gallium/drivers/iris/iris_context.h
index 1b4ceb810..48bc8696f 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_context.h
+++ b/lib/mesa/src/gallium/drivers/iris/iris_context.h
@@ -25,7 +25,6 @@
#include "pipe/p_context.h"
#include "pipe/p_state.h"
-#include "util/slab.h"
#include "util/u_debug.h"
#include "intel/blorp/blorp.h"
#include "intel/dev/gen_debug.h"
@@ -362,9 +361,6 @@ struct iris_uncompiled_shader {
bool needs_edge_flag;
- /* Whether shader uses atomic operations. */
- bool uses_atomic_load_store;
-
/** Constant data scraped from the shader by nir_opt_large_constants */
struct pipe_resource *const_data;
@@ -410,8 +406,6 @@ struct iris_binding_table {
* (iris_uncompiled_shader), due to state-based recompiles (brw_*_prog_key).
*/
struct iris_compiled_shader {
- struct list_head link;
-
/** Reference to the uploaded assembly. */
struct iris_state_ref assembly;
@@ -497,6 +491,90 @@ struct iris_stream_output_target {
};
/**
+ * Virtual table for generation-specific (genxml) function calls.
+ */
+struct iris_vtable {
+ void (*destroy_state)(struct iris_context *ice);
+ void (*init_render_context)(struct iris_batch *batch);
+ void (*init_compute_context)(struct iris_batch *batch);
+ void (*upload_render_state)(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_draw_info *draw);
+ void (*update_surface_base_address)(struct iris_batch *batch,
+ struct iris_binder *binder);
+ void (*upload_compute_state)(struct iris_context *ice,
+ struct iris_batch *batch,
+ const struct pipe_grid_info *grid);
+ void (*rebind_buffer)(struct iris_context *ice,
+ struct iris_resource *res);
+ void (*resolve_conditional_render)(struct iris_context *ice);
+ void (*load_register_reg32)(struct iris_batch *batch, uint32_t dst,
+ uint32_t src);
+ void (*load_register_reg64)(struct iris_batch *batch, uint32_t dst,
+ uint32_t src);
+ void (*load_register_imm32)(struct iris_batch *batch, uint32_t reg,
+ uint32_t val);
+ void (*load_register_imm64)(struct iris_batch *batch, uint32_t reg,
+ uint64_t val);
+ void (*load_register_mem32)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset);
+ void (*load_register_mem64)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset);
+ void (*store_register_mem32)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated);
+ void (*store_register_mem64)(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated);
+ void (*store_data_imm32)(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint32_t value);
+ void (*store_data_imm64)(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint64_t value);
+ void (*copy_mem_mem)(struct iris_batch *batch,
+ struct iris_bo *dst_bo, uint32_t dst_offset,
+ struct iris_bo *src_bo, uint32_t src_offset,
+ unsigned bytes);
+ void (*emit_raw_pipe_control)(struct iris_batch *batch,
+ const char *reason, uint32_t flags,
+ struct iris_bo *bo, uint32_t offset,
+ uint64_t imm);
+
+ void (*emit_mi_report_perf_count)(struct iris_batch *batch,
+ struct iris_bo *bo,
+ uint32_t offset_in_bytes,
+ uint32_t report_id);
+
+ unsigned (*derived_program_state_size)(enum iris_program_cache_id id);
+ void (*store_derived_program_state)(struct iris_context *ice,
+ enum iris_program_cache_id cache_id,
+ struct iris_compiled_shader *shader);
+ uint32_t *(*create_so_decl_list)(const struct pipe_stream_output_info *sol,
+ const struct brw_vue_map *vue_map);
+ void (*populate_vs_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_vs_prog_key *key);
+ void (*populate_tcs_key)(const struct iris_context *ice,
+ struct iris_tcs_prog_key *key);
+ void (*populate_tes_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_tes_prog_key *key);
+ void (*populate_gs_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ gl_shader_stage last_stage,
+ struct iris_gs_prog_key *key);
+ void (*populate_fs_key)(const struct iris_context *ice,
+ const struct shader_info *info,
+ struct iris_fs_prog_key *key);
+ void (*populate_cs_key)(const struct iris_context *ice,
+ struct iris_cs_prog_key *key);
+ void (*lost_genx_state)(struct iris_context *ice, struct iris_batch *batch);
+};
+
+/**
* A pool containing SAMPLER_BORDER_COLOR_STATE entries.
*
* See iris_border_color.c for more information.
@@ -527,6 +605,8 @@ struct iris_context {
/** Slab allocator for iris_transfer_map objects. */
struct slab_child_pool transfer_pool;
+ struct iris_vtable vtbl;
+
struct blorp_context blorp;
struct iris_batch batches[IRIS_BATCH_COUNT];
@@ -584,9 +664,6 @@ struct iris_context {
struct iris_compiled_shader *prog[MESA_SHADER_STAGES];
struct brw_vue_map *last_vue_map;
- /** List of shader variants whose deletion has been deferred for now */
- struct list_head deleted_variants[MESA_SHADER_STAGES];
-
struct u_upload_mgr *uploader;
struct hash_table *cache;
@@ -609,9 +686,6 @@ struct iris_context {
struct gen_perf_context *perf_ctx;
- /** Frame number for debug prints */
- uint32_t frame;
-
struct {
uint64_t dirty;
uint64_t dirty_for_nos[IRIS_NOS_COUNT];
@@ -767,12 +841,12 @@ void iris_init_perfquery_functions(struct pipe_context *ctx);
void iris_update_compiled_shaders(struct iris_context *ice);
void iris_update_compiled_compute_shader(struct iris_context *ice);
void iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
- unsigned threads,
uint32_t *dst);
/* iris_blit.c */
-void iris_blorp_surf_for_resource(struct isl_device *isl_dev,
+void iris_blorp_surf_for_resource(struct iris_vtable *vtbl,
+ struct isl_device *isl_dev,
struct blorp_surf *surf,
struct pipe_resource *p_res,
enum isl_aux_usage aux_usage,
@@ -871,14 +945,12 @@ struct iris_compiled_shader *iris_upload_shader(struct iris_context *ice,
const void *iris_find_previous_compile(const struct iris_context *ice,
enum iris_program_cache_id cache_id,
unsigned program_string_id);
-void iris_delete_shader_variants(struct iris_context *ice,
- struct iris_uncompiled_shader *ish);
bool iris_blorp_lookup_shader(struct blorp_batch *blorp_batch,
const void *key,
uint32_t key_size,
uint32_t *kernel_out,
void *prog_data_out);
-bool iris_blorp_upload_shader(struct blorp_batch *blorp_batch, uint32_t stage,
+bool iris_blorp_upload_shader(struct blorp_batch *blorp_batch,
const void *key, uint32_t key_size,
const void *kernel, uint32_t kernel_size,
const struct brw_stage_prog_data *prog_data,
@@ -922,8 +994,6 @@ void gen9_toggle_preemption(struct iris_context *ice,
struct iris_batch *batch,
const struct pipe_draw_info *draw);
-
-
#ifdef genX
# include "iris_genx_protos.h"
#else
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_disk_cache.c b/lib/mesa/src/gallium/drivers/iris/iris_disk_cache.c
index f03860f26..2b5889c49 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_disk_cache.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_disk_cache.c
@@ -196,7 +196,7 @@ iris_disk_cache_retrieve(struct iris_context *ice,
stage == MESA_SHADER_TESS_EVAL ||
stage == MESA_SHADER_GEOMETRY) {
struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
- so_decls = screen->vtbl.create_so_decl_list(&ish->stream_output,
+ so_decls = ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_draw.c b/lib/mesa/src/gallium/drivers/iris/iris_draw.c
index 76bf55ebe..08132a5ca 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_draw.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_draw.c
@@ -179,7 +179,7 @@ iris_indirect_draw_vbo(struct iris_context *ice,
if (info.indirect->indirect_draw_count &&
ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
/* Upload MI_PREDICATE_RESULT to GPR15.*/
- batch->screen->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
+ ice->vtbl.load_register_reg64(batch, CS_GPR(15), MI_PREDICATE_RESULT);
}
uint64_t orig_dirty = ice->state.dirty;
@@ -191,7 +191,7 @@ iris_indirect_draw_vbo(struct iris_context *ice,
iris_update_draw_parameters(ice, &info);
- batch->screen->vtbl.upload_render_state(ice, batch, &info);
+ ice->vtbl.upload_render_state(ice, batch, &info);
ice->state.dirty &= ~IRIS_ALL_DIRTY_FOR_RENDER;
@@ -201,7 +201,7 @@ iris_indirect_draw_vbo(struct iris_context *ice,
if (info.indirect->indirect_draw_count &&
ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
/* Restore MI_PREDICATE_RESULT. */
- batch->screen->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(15));
+ ice->vtbl.load_register_reg64(batch, MI_PREDICATE_RESULT, CS_GPR(15));
}
/* Put this back for post-draw resolves, we'll clear it again after. */
@@ -218,7 +218,7 @@ iris_simple_draw_vbo(struct iris_context *ice,
iris_update_draw_parameters(ice, draw);
- batch->screen->vtbl.upload_render_state(ice, batch, draw);
+ ice->vtbl.upload_render_state(ice, batch, draw);
}
/**
@@ -260,7 +260,7 @@ iris_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
iris_binder_reserve_3d(ice);
- batch->screen->vtbl.update_surface_base_address(batch, &ice->state.binder);
+ ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
iris_handle_always_flush_cache(batch);
@@ -358,17 +358,17 @@ iris_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info *grid)
iris_update_grid_size_resource(ice, grid);
iris_binder_reserve_compute(ice);
- batch->screen->vtbl.update_surface_base_address(batch, &ice->state.binder);
+ ice->vtbl.update_surface_base_address(batch, &ice->state.binder);
if (ice->state.compute_predicate) {
- batch->screen->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
+ ice->vtbl.load_register_mem64(batch, MI_PREDICATE_RESULT,
ice->state.compute_predicate, 0);
ice->state.compute_predicate = NULL;
}
iris_handle_always_flush_cache(batch);
- batch->screen->vtbl.upload_compute_state(ice, batch, grid);
+ ice->vtbl.upload_compute_state(ice, batch, grid);
iris_handle_always_flush_cache(batch);
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_fence.c b/lib/mesa/src/gallium/drivers/iris/iris_fence.c
index 0d3144d61..288c43fb2 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_fence.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_fence.c
@@ -133,8 +133,7 @@ iris_fence_reference(struct pipe_screen *p_screen,
struct pipe_fence_handle **dst,
struct pipe_fence_handle *src)
{
- if (pipe_reference(*dst ? &(*dst)->ref : NULL,
- src ? &src->ref : NULL))
+ if (pipe_reference(&(*dst)->ref, &src->ref))
iris_fence_destroy(p_screen, *dst);
*dst = src;
@@ -157,10 +156,6 @@ iris_wait_syncpt(struct pipe_screen *p_screen,
return gen_ioctl(screen->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
}
-#define CSI "\e["
-#define BLUE_HEADER CSI "0;97;44m"
-#define NORMAL CSI "0m"
-
static void
iris_fence_flush(struct pipe_context *ctx,
struct pipe_fence_handle **out_fence,
@@ -169,17 +164,6 @@ iris_fence_flush(struct pipe_context *ctx,
struct iris_screen *screen = (void *) ctx->screen;
struct iris_context *ice = (struct iris_context *)ctx;
- if (flags & PIPE_FLUSH_END_OF_FRAME) {
- ice->frame++;
-
- if (INTEL_DEBUG & DEBUG_SUBMIT) {
- fprintf(stderr, "%s ::: FRAME %-10u (ctx %p)%-35c%s\n",
- (INTEL_DEBUG & DEBUG_COLOR) ? BLUE_HEADER : "",
- ice->frame, ctx, ' ',
- (INTEL_DEBUG & DEBUG_COLOR) ? NORMAL : "");
- }
- }
-
/* XXX PIPE_FLUSH_DEFERRED */
for (unsigned i = 0; i < IRIS_BATCH_COUNT; i++)
iris_batch_flush(&ice->batches[i]);
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_fence.h b/lib/mesa/src/gallium/drivers/iris/iris_fence.h
index 0ce5fa8eb..caf2ceeb3 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_fence.h
+++ b/lib/mesa/src/gallium/drivers/iris/iris_fence.h
@@ -51,8 +51,7 @@ iris_syncpt_reference(struct iris_screen *screen,
struct iris_syncpt **dst,
struct iris_syncpt *src)
{
- if (pipe_reference(*dst ? &(*dst)->ref : NULL,
- src ? &src->ref: NULL))
+ if (pipe_reference(&(*dst)->ref, &src->ref))
iris_syncpt_destroy(screen, *dst);
*dst = src;
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_formats.c b/lib/mesa/src/gallium/drivers/iris/iris_formats.c
index dc497c161..a35b663a5 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_formats.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_formats.c
@@ -34,12 +34,310 @@
#include "iris_resource.h"
#include "iris_screen.h"
+static enum isl_format
+iris_isl_format_for_pipe_format(enum pipe_format pf)
+{
+ static const enum isl_format table[PIPE_FORMAT_COUNT] = {
+ [0 ... PIPE_FORMAT_COUNT-1] = ISL_FORMAT_UNSUPPORTED,
+
+ [PIPE_FORMAT_B8G8R8A8_UNORM] = ISL_FORMAT_B8G8R8A8_UNORM,
+ [PIPE_FORMAT_B8G8R8X8_UNORM] = ISL_FORMAT_B8G8R8X8_UNORM,
+ [PIPE_FORMAT_B5G5R5A1_UNORM] = ISL_FORMAT_B5G5R5A1_UNORM,
+ [PIPE_FORMAT_B4G4R4A4_UNORM] = ISL_FORMAT_B4G4R4A4_UNORM,
+ [PIPE_FORMAT_B5G6R5_UNORM] = ISL_FORMAT_B5G6R5_UNORM,
+ [PIPE_FORMAT_R10G10B10A2_UNORM] = ISL_FORMAT_R10G10B10A2_UNORM,
+
+ [PIPE_FORMAT_Z16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_Z32_UNORM] = ISL_FORMAT_R32_UNORM,
+ [PIPE_FORMAT_Z32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ /* We translate the combined depth/stencil formats to depth only here */
+ [PIPE_FORMAT_Z24_UNORM_S8_UINT] = ISL_FORMAT_R24_UNORM_X8_TYPELESS,
+ [PIPE_FORMAT_Z24X8_UNORM] = ISL_FORMAT_R24_UNORM_X8_TYPELESS,
+ [PIPE_FORMAT_Z32_FLOAT_S8X24_UINT] = ISL_FORMAT_R32_FLOAT,
+
+ [PIPE_FORMAT_S8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_X24S8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_X32_S8X24_UINT] = ISL_FORMAT_R8_UINT,
+
+ [PIPE_FORMAT_R64_FLOAT] = ISL_FORMAT_R64_FLOAT,
+ [PIPE_FORMAT_R64G64_FLOAT] = ISL_FORMAT_R64G64_FLOAT,
+ [PIPE_FORMAT_R64G64B64_FLOAT] = ISL_FORMAT_R64G64B64_FLOAT,
+ [PIPE_FORMAT_R64G64B64A64_FLOAT] = ISL_FORMAT_R64G64B64A64_FLOAT,
+ [PIPE_FORMAT_R32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+ [PIPE_FORMAT_R32G32_FLOAT] = ISL_FORMAT_R32G32_FLOAT,
+ [PIPE_FORMAT_R32G32B32_FLOAT] = ISL_FORMAT_R32G32B32_FLOAT,
+ [PIPE_FORMAT_R32G32B32A32_FLOAT] = ISL_FORMAT_R32G32B32A32_FLOAT,
+ [PIPE_FORMAT_R32_UNORM] = ISL_FORMAT_R32_UNORM,
+ [PIPE_FORMAT_R32G32_UNORM] = ISL_FORMAT_R32G32_UNORM,
+ [PIPE_FORMAT_R32G32B32_UNORM] = ISL_FORMAT_R32G32B32_UNORM,
+ [PIPE_FORMAT_R32G32B32A32_UNORM] = ISL_FORMAT_R32G32B32A32_UNORM,
+ [PIPE_FORMAT_R32_USCALED] = ISL_FORMAT_R32_USCALED,
+ [PIPE_FORMAT_R32G32_USCALED] = ISL_FORMAT_R32G32_USCALED,
+ [PIPE_FORMAT_R32G32B32_USCALED] = ISL_FORMAT_R32G32B32_USCALED,
+ [PIPE_FORMAT_R32G32B32A32_USCALED] = ISL_FORMAT_R32G32B32A32_USCALED,
+ [PIPE_FORMAT_R32_SNORM] = ISL_FORMAT_R32_SNORM,
+ [PIPE_FORMAT_R32G32_SNORM] = ISL_FORMAT_R32G32_SNORM,
+ [PIPE_FORMAT_R32G32B32_SNORM] = ISL_FORMAT_R32G32B32_SNORM,
+ [PIPE_FORMAT_R32G32B32A32_SNORM] = ISL_FORMAT_R32G32B32A32_SNORM,
+ [PIPE_FORMAT_R32_SSCALED] = ISL_FORMAT_R32_SSCALED,
+ [PIPE_FORMAT_R32G32_SSCALED] = ISL_FORMAT_R32G32_SSCALED,
+ [PIPE_FORMAT_R32G32B32_SSCALED] = ISL_FORMAT_R32G32B32_SSCALED,
+ [PIPE_FORMAT_R32G32B32A32_SSCALED] = ISL_FORMAT_R32G32B32A32_SSCALED,
+ [PIPE_FORMAT_R16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_R16G16_UNORM] = ISL_FORMAT_R16G16_UNORM,
+ [PIPE_FORMAT_R16G16B16_UNORM] = ISL_FORMAT_R16G16B16_UNORM,
+ [PIPE_FORMAT_R16G16B16A16_UNORM] = ISL_FORMAT_R16G16B16A16_UNORM,
+ [PIPE_FORMAT_R16_USCALED] = ISL_FORMAT_R16_USCALED,
+ [PIPE_FORMAT_R16G16_USCALED] = ISL_FORMAT_R16G16_USCALED,
+ [PIPE_FORMAT_R16G16B16_USCALED] = ISL_FORMAT_R16G16B16_USCALED,
+ [PIPE_FORMAT_R16G16B16A16_USCALED] = ISL_FORMAT_R16G16B16A16_USCALED,
+ [PIPE_FORMAT_R16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_R16G16_SNORM] = ISL_FORMAT_R16G16_SNORM,
+ [PIPE_FORMAT_R16G16B16_SNORM] = ISL_FORMAT_R16G16B16_SNORM,
+ [PIPE_FORMAT_R16G16B16A16_SNORM] = ISL_FORMAT_R16G16B16A16_SNORM,
+ [PIPE_FORMAT_R16_SSCALED] = ISL_FORMAT_R16_SSCALED,
+ [PIPE_FORMAT_R16G16_SSCALED] = ISL_FORMAT_R16G16_SSCALED,
+ [PIPE_FORMAT_R16G16B16_SSCALED] = ISL_FORMAT_R16G16B16_SSCALED,
+ [PIPE_FORMAT_R16G16B16A16_SSCALED] = ISL_FORMAT_R16G16B16A16_SSCALED,
+ [PIPE_FORMAT_R8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_R8G8_UNORM] = ISL_FORMAT_R8G8_UNORM,
+ [PIPE_FORMAT_R8G8B8_UNORM] = ISL_FORMAT_R8G8B8_UNORM,
+ [PIPE_FORMAT_R8G8B8A8_UNORM] = ISL_FORMAT_R8G8B8A8_UNORM,
+ [PIPE_FORMAT_R8_USCALED] = ISL_FORMAT_R8_USCALED,
+ [PIPE_FORMAT_R8G8_USCALED] = ISL_FORMAT_R8G8_USCALED,
+ [PIPE_FORMAT_R8G8B8_USCALED] = ISL_FORMAT_R8G8B8_USCALED,
+ [PIPE_FORMAT_R8G8B8A8_USCALED] = ISL_FORMAT_R8G8B8A8_USCALED,
+ [PIPE_FORMAT_R8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_R8G8_SNORM] = ISL_FORMAT_R8G8_SNORM,
+ [PIPE_FORMAT_R8G8B8_SNORM] = ISL_FORMAT_R8G8B8_SNORM,
+ [PIPE_FORMAT_R8G8B8A8_SNORM] = ISL_FORMAT_R8G8B8A8_SNORM,
+ [PIPE_FORMAT_R8_SSCALED] = ISL_FORMAT_R8_SSCALED,
+ [PIPE_FORMAT_R8G8_SSCALED] = ISL_FORMAT_R8G8_SSCALED,
+ [PIPE_FORMAT_R8G8B8_SSCALED] = ISL_FORMAT_R8G8B8_SSCALED,
+ [PIPE_FORMAT_R8G8B8A8_SSCALED] = ISL_FORMAT_R8G8B8A8_SSCALED,
+ [PIPE_FORMAT_R32_FIXED] = ISL_FORMAT_R32_SFIXED,
+ [PIPE_FORMAT_R32G32_FIXED] = ISL_FORMAT_R32G32_SFIXED,
+ [PIPE_FORMAT_R32G32B32_FIXED] = ISL_FORMAT_R32G32B32_SFIXED,
+ [PIPE_FORMAT_R32G32B32A32_FIXED] = ISL_FORMAT_R32G32B32A32_SFIXED,
+ [PIPE_FORMAT_R16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_R16G16_FLOAT] = ISL_FORMAT_R16G16_FLOAT,
+ [PIPE_FORMAT_R16G16B16_FLOAT] = ISL_FORMAT_R16G16B16_FLOAT,
+ [PIPE_FORMAT_R16G16B16A16_FLOAT] = ISL_FORMAT_R16G16B16A16_FLOAT,
+
+ [PIPE_FORMAT_R8G8B8_SRGB] = ISL_FORMAT_R8G8B8_UNORM_SRGB,
+ [PIPE_FORMAT_B8G8R8A8_SRGB] = ISL_FORMAT_B8G8R8A8_UNORM_SRGB,
+ [PIPE_FORMAT_B8G8R8X8_SRGB] = ISL_FORMAT_B8G8R8X8_UNORM_SRGB,
+ [PIPE_FORMAT_R8G8B8A8_SRGB] = ISL_FORMAT_R8G8B8A8_UNORM_SRGB,
+
+ [PIPE_FORMAT_DXT1_RGB] = ISL_FORMAT_BC1_UNORM,
+ [PIPE_FORMAT_DXT1_RGBA] = ISL_FORMAT_BC1_UNORM,
+ [PIPE_FORMAT_DXT3_RGBA] = ISL_FORMAT_BC2_UNORM,
+ [PIPE_FORMAT_DXT5_RGBA] = ISL_FORMAT_BC3_UNORM,
+
+ [PIPE_FORMAT_DXT1_SRGB] = ISL_FORMAT_BC1_UNORM_SRGB,
+ [PIPE_FORMAT_DXT1_SRGBA] = ISL_FORMAT_BC1_UNORM_SRGB,
+ [PIPE_FORMAT_DXT3_SRGBA] = ISL_FORMAT_BC2_UNORM_SRGB,
+ [PIPE_FORMAT_DXT5_SRGBA] = ISL_FORMAT_BC3_UNORM_SRGB,
+
+ [PIPE_FORMAT_RGTC1_UNORM] = ISL_FORMAT_BC4_UNORM,
+ [PIPE_FORMAT_RGTC1_SNORM] = ISL_FORMAT_BC4_SNORM,
+ [PIPE_FORMAT_RGTC2_UNORM] = ISL_FORMAT_BC5_UNORM,
+ [PIPE_FORMAT_RGTC2_SNORM] = ISL_FORMAT_BC5_SNORM,
+
+ [PIPE_FORMAT_R10G10B10A2_USCALED] = ISL_FORMAT_R10G10B10A2_USCALED,
+ [PIPE_FORMAT_R11G11B10_FLOAT] = ISL_FORMAT_R11G11B10_FLOAT,
+ [PIPE_FORMAT_R9G9B9E5_FLOAT] = ISL_FORMAT_R9G9B9E5_SHAREDEXP,
+ [PIPE_FORMAT_R1_UNORM] = ISL_FORMAT_R1_UNORM,
+ [PIPE_FORMAT_R10G10B10X2_USCALED] = ISL_FORMAT_R10G10B10X2_USCALED,
+ [PIPE_FORMAT_B10G10R10A2_UNORM] = ISL_FORMAT_B10G10R10A2_UNORM,
+ [PIPE_FORMAT_R8G8B8X8_UNORM] = ISL_FORMAT_R8G8B8X8_UNORM,
+
+ /* Just use red formats for these - they're actually renderable,
+ * and faster to sample than the legacy L/I/A/LA formats.
+ */
+ [PIPE_FORMAT_I8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_I8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_I8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_I8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_I16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_I16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_I16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_I16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_I16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_I32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_I32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_I32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ [PIPE_FORMAT_L8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_L8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_L8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_L8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_L16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_L16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_L16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_L16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_L16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_L32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_L32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_L32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ /* We also map alpha and luminance-alpha formats to red as well,
+ * though most of these (other than A8_UNORM) will be non-renderable.
+ */
+ [PIPE_FORMAT_A8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_A8_UNORM] = ISL_FORMAT_R8_UNORM,
+ [PIPE_FORMAT_A8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_A8_SNORM] = ISL_FORMAT_R8_SNORM,
+ [PIPE_FORMAT_A16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_A16_UNORM] = ISL_FORMAT_R16_UNORM,
+ [PIPE_FORMAT_A16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_A16_SNORM] = ISL_FORMAT_R16_SNORM,
+ [PIPE_FORMAT_A16_FLOAT] = ISL_FORMAT_R16_FLOAT,
+ [PIPE_FORMAT_A32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_A32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_A32_FLOAT] = ISL_FORMAT_R32_FLOAT,
+
+ [PIPE_FORMAT_L8A8_UINT] = ISL_FORMAT_R8G8_UINT,
+ [PIPE_FORMAT_L8A8_UNORM] = ISL_FORMAT_R8G8_UNORM,
+ [PIPE_FORMAT_L8A8_SINT] = ISL_FORMAT_R8G8_SINT,
+ [PIPE_FORMAT_L8A8_SNORM] = ISL_FORMAT_R8G8_SNORM,
+ [PIPE_FORMAT_L16A16_UINT] = ISL_FORMAT_R16G16_UINT,
+ [PIPE_FORMAT_L16A16_UNORM] = ISL_FORMAT_R16G16_UNORM,
+ [PIPE_FORMAT_L16A16_SINT] = ISL_FORMAT_R16G16_SINT,
+ [PIPE_FORMAT_L16A16_SNORM] = ISL_FORMAT_R16G16_SNORM,
+ [PIPE_FORMAT_L16A16_FLOAT] = ISL_FORMAT_R16G16_FLOAT,
+ [PIPE_FORMAT_L32A32_UINT] = ISL_FORMAT_R32G32_UINT,
+ [PIPE_FORMAT_L32A32_SINT] = ISL_FORMAT_R32G32_SINT,
+ [PIPE_FORMAT_L32A32_FLOAT] = ISL_FORMAT_R32G32_FLOAT,
+
+ /* Sadly, we have to use luminance[-alpha] formats for sRGB decoding. */
+ [PIPE_FORMAT_R8_SRGB] = ISL_FORMAT_L8_UNORM_SRGB,
+ [PIPE_FORMAT_L8_SRGB] = ISL_FORMAT_L8_UNORM_SRGB,
+ [PIPE_FORMAT_L8A8_SRGB] = ISL_FORMAT_L8A8_UNORM_SRGB,
+
+ [PIPE_FORMAT_R10G10B10A2_SSCALED] = ISL_FORMAT_R10G10B10A2_SSCALED,
+ [PIPE_FORMAT_R10G10B10A2_SNORM] = ISL_FORMAT_R10G10B10A2_SNORM,
+
+ [PIPE_FORMAT_B10G10R10A2_USCALED] = ISL_FORMAT_B10G10R10A2_USCALED,
+ [PIPE_FORMAT_B10G10R10A2_SSCALED] = ISL_FORMAT_B10G10R10A2_SSCALED,
+ [PIPE_FORMAT_B10G10R10A2_SNORM] = ISL_FORMAT_B10G10R10A2_SNORM,
+
+ [PIPE_FORMAT_R8_UINT] = ISL_FORMAT_R8_UINT,
+ [PIPE_FORMAT_R8G8_UINT] = ISL_FORMAT_R8G8_UINT,
+ [PIPE_FORMAT_R8G8B8_UINT] = ISL_FORMAT_R8G8B8_UINT,
+ [PIPE_FORMAT_R8G8B8A8_UINT] = ISL_FORMAT_R8G8B8A8_UINT,
+
+ [PIPE_FORMAT_R8_SINT] = ISL_FORMAT_R8_SINT,
+ [PIPE_FORMAT_R8G8_SINT] = ISL_FORMAT_R8G8_SINT,
+ [PIPE_FORMAT_R8G8B8_SINT] = ISL_FORMAT_R8G8B8_SINT,
+ [PIPE_FORMAT_R8G8B8A8_SINT] = ISL_FORMAT_R8G8B8A8_SINT,
+
+ [PIPE_FORMAT_R16_UINT] = ISL_FORMAT_R16_UINT,
+ [PIPE_FORMAT_R16G16_UINT] = ISL_FORMAT_R16G16_UINT,
+ [PIPE_FORMAT_R16G16B16_UINT] = ISL_FORMAT_R16G16B16_UINT,
+ [PIPE_FORMAT_R16G16B16A16_UINT] = ISL_FORMAT_R16G16B16A16_UINT,
+
+ [PIPE_FORMAT_R16_SINT] = ISL_FORMAT_R16_SINT,
+ [PIPE_FORMAT_R16G16_SINT] = ISL_FORMAT_R16G16_SINT,
+ [PIPE_FORMAT_R16G16B16_SINT] = ISL_FORMAT_R16G16B16_SINT,
+ [PIPE_FORMAT_R16G16B16A16_SINT] = ISL_FORMAT_R16G16B16A16_SINT,
+
+ [PIPE_FORMAT_R32_UINT] = ISL_FORMAT_R32_UINT,
+ [PIPE_FORMAT_R32G32_UINT] = ISL_FORMAT_R32G32_UINT,
+ [PIPE_FORMAT_R32G32B32_UINT] = ISL_FORMAT_R32G32B32_UINT,
+ [PIPE_FORMAT_R32G32B32A32_UINT] = ISL_FORMAT_R32G32B32A32_UINT,
+
+ [PIPE_FORMAT_R32_SINT] = ISL_FORMAT_R32_SINT,
+ [PIPE_FORMAT_R32G32_SINT] = ISL_FORMAT_R32G32_SINT,
+ [PIPE_FORMAT_R32G32B32_SINT] = ISL_FORMAT_R32G32B32_SINT,
+ [PIPE_FORMAT_R32G32B32A32_SINT] = ISL_FORMAT_R32G32B32A32_SINT,
+
+ [PIPE_FORMAT_B10G10R10A2_UINT] = ISL_FORMAT_B10G10R10A2_UINT,
+
+ [PIPE_FORMAT_ETC1_RGB8] = ISL_FORMAT_ETC1_RGB8,
+
+ [PIPE_FORMAT_R8G8B8X8_SRGB] = ISL_FORMAT_R8G8B8X8_UNORM_SRGB,
+ [PIPE_FORMAT_B10G10R10X2_UNORM] = ISL_FORMAT_B10G10R10X2_UNORM,
+ [PIPE_FORMAT_R16G16B16X16_UNORM] = ISL_FORMAT_R16G16B16X16_UNORM,
+ [PIPE_FORMAT_R16G16B16X16_FLOAT] = ISL_FORMAT_R16G16B16X16_FLOAT,
+ [PIPE_FORMAT_R32G32B32X32_FLOAT] = ISL_FORMAT_R32G32B32X32_FLOAT,
+
+ [PIPE_FORMAT_R10G10B10A2_UINT] = ISL_FORMAT_R10G10B10A2_UINT,
+
+ [PIPE_FORMAT_B5G6R5_SRGB] = ISL_FORMAT_B5G6R5_UNORM_SRGB,
+
+ [PIPE_FORMAT_BPTC_RGBA_UNORM] = ISL_FORMAT_BC7_UNORM,
+ [PIPE_FORMAT_BPTC_SRGBA] = ISL_FORMAT_BC7_UNORM_SRGB,
+ [PIPE_FORMAT_BPTC_RGB_FLOAT] = ISL_FORMAT_BC6H_SF16,
+ [PIPE_FORMAT_BPTC_RGB_UFLOAT] = ISL_FORMAT_BC6H_UF16,
+
+ [PIPE_FORMAT_ETC2_RGB8] = ISL_FORMAT_ETC2_RGB8,
+ [PIPE_FORMAT_ETC2_SRGB8] = ISL_FORMAT_ETC2_SRGB8,
+ [PIPE_FORMAT_ETC2_RGB8A1] = ISL_FORMAT_ETC2_RGB8_PTA,
+ [PIPE_FORMAT_ETC2_SRGB8A1] = ISL_FORMAT_ETC2_SRGB8_PTA,
+ [PIPE_FORMAT_ETC2_RGBA8] = ISL_FORMAT_ETC2_EAC_RGBA8,
+ [PIPE_FORMAT_ETC2_SRGBA8] = ISL_FORMAT_ETC2_EAC_SRGB8_A8,
+ [PIPE_FORMAT_ETC2_R11_UNORM] = ISL_FORMAT_EAC_R11,
+ [PIPE_FORMAT_ETC2_R11_SNORM] = ISL_FORMAT_EAC_SIGNED_R11,
+ [PIPE_FORMAT_ETC2_RG11_UNORM] = ISL_FORMAT_EAC_RG11,
+ [PIPE_FORMAT_ETC2_RG11_SNORM] = ISL_FORMAT_EAC_SIGNED_RG11,
+
+ [PIPE_FORMAT_FXT1_RGB] = ISL_FORMAT_FXT1,
+ [PIPE_FORMAT_FXT1_RGBA] = ISL_FORMAT_FXT1,
+
+ [PIPE_FORMAT_ASTC_4x4] = ISL_FORMAT_ASTC_LDR_2D_4X4_FLT16,
+ [PIPE_FORMAT_ASTC_5x4] = ISL_FORMAT_ASTC_LDR_2D_5X4_FLT16,
+ [PIPE_FORMAT_ASTC_5x5] = ISL_FORMAT_ASTC_LDR_2D_5X5_FLT16,
+ [PIPE_FORMAT_ASTC_6x5] = ISL_FORMAT_ASTC_LDR_2D_6X5_FLT16,
+ [PIPE_FORMAT_ASTC_6x6] = ISL_FORMAT_ASTC_LDR_2D_6X6_FLT16,
+ [PIPE_FORMAT_ASTC_8x5] = ISL_FORMAT_ASTC_LDR_2D_8X5_FLT16,
+ [PIPE_FORMAT_ASTC_8x6] = ISL_FORMAT_ASTC_LDR_2D_8X6_FLT16,
+ [PIPE_FORMAT_ASTC_8x8] = ISL_FORMAT_ASTC_LDR_2D_8X8_FLT16,
+ [PIPE_FORMAT_ASTC_10x5] = ISL_FORMAT_ASTC_LDR_2D_10X5_FLT16,
+ [PIPE_FORMAT_ASTC_10x6] = ISL_FORMAT_ASTC_LDR_2D_10X6_FLT16,
+ [PIPE_FORMAT_ASTC_10x8] = ISL_FORMAT_ASTC_LDR_2D_10X8_FLT16,
+ [PIPE_FORMAT_ASTC_10x10] = ISL_FORMAT_ASTC_LDR_2D_10X10_FLT16,
+ [PIPE_FORMAT_ASTC_12x10] = ISL_FORMAT_ASTC_LDR_2D_12X10_FLT16,
+ [PIPE_FORMAT_ASTC_12x12] = ISL_FORMAT_ASTC_LDR_2D_12X12_FLT16,
+
+ [PIPE_FORMAT_ASTC_4x4_SRGB] = ISL_FORMAT_ASTC_LDR_2D_4X4_U8SRGB,
+ [PIPE_FORMAT_ASTC_5x4_SRGB] = ISL_FORMAT_ASTC_LDR_2D_5X4_U8SRGB,
+ [PIPE_FORMAT_ASTC_5x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_5X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_6x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_6X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_6x6_SRGB] = ISL_FORMAT_ASTC_LDR_2D_6X6_U8SRGB,
+ [PIPE_FORMAT_ASTC_8x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_8X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_8x6_SRGB] = ISL_FORMAT_ASTC_LDR_2D_8X6_U8SRGB,
+ [PIPE_FORMAT_ASTC_8x8_SRGB] = ISL_FORMAT_ASTC_LDR_2D_8X8_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x5_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X5_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x6_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X6_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x8_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X8_U8SRGB,
+ [PIPE_FORMAT_ASTC_10x10_SRGB] = ISL_FORMAT_ASTC_LDR_2D_10X10_U8SRGB,
+ [PIPE_FORMAT_ASTC_12x10_SRGB] = ISL_FORMAT_ASTC_LDR_2D_12X10_U8SRGB,
+ [PIPE_FORMAT_ASTC_12x12_SRGB] = ISL_FORMAT_ASTC_LDR_2D_12X12_U8SRGB,
+
+ [PIPE_FORMAT_A1B5G5R5_UNORM] = ISL_FORMAT_A1B5G5R5_UNORM,
+
+ /* We support these so that we know the API expects no alpha channel.
+ * Otherwise, the state tracker would just give us a format with alpha
+ * and we wouldn't know to override the swizzle to 1.
+ */
+ [PIPE_FORMAT_R16G16B16X16_UINT] = ISL_FORMAT_R16G16B16A16_UINT,
+ [PIPE_FORMAT_R16G16B16X16_SINT] = ISL_FORMAT_R16G16B16A16_SINT,
+ [PIPE_FORMAT_R32G32B32X32_UINT] = ISL_FORMAT_R32G32B32A32_UINT,
+ [PIPE_FORMAT_R32G32B32X32_SINT] = ISL_FORMAT_R32G32B32A32_SINT,
+ [PIPE_FORMAT_R10G10B10X2_SNORM] = ISL_FORMAT_R10G10B10A2_SNORM,
+ };
+ assert(pf < PIPE_FORMAT_COUNT);
+ return table[pf];
+}
+
struct iris_format_info
iris_format_for_usage(const struct gen_device_info *devinfo,
enum pipe_format pformat,
isl_surf_usage_flags_t usage)
{
- enum isl_format format = isl_format_for_pipe_format(pformat);
+ enum isl_format format = iris_isl_format_for_pipe_format(pformat);
struct isl_swizzle swizzle = ISL_SWIZZLE_IDENTITY;
if (format == ISL_FORMAT_UNSUPPORTED)
@@ -121,7 +419,7 @@ iris_is_format_supported(struct pipe_screen *pscreen,
if (pformat == PIPE_FORMAT_NONE)
return true;
- enum isl_format format = isl_format_for_pipe_format(pformat);
+ enum isl_format format = iris_isl_format_for_pipe_format(pformat);
if (format == ISL_FORMAT_UNSUPPORTED)
return false;
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_monitor.c b/lib/mesa/src/gallium/drivers/iris/iris_monitor.c
index 730fae271..f2c0774b1 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_monitor.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_monitor.c
@@ -72,17 +72,16 @@ iris_get_monitor_info(struct pipe_screen *pscreen, unsigned index,
case GEN_PERF_COUNTER_DATA_TYPE_BOOL32:
case GEN_PERF_COUNTER_DATA_TYPE_UINT32:
info->type = PIPE_DRIVER_QUERY_TYPE_UINT;
- assert(counter->raw_max <= UINT32_MAX);
- info->max_value.u32 = (uint32_t)counter->raw_max;
+ info->max_value.u32 = 0;
break;
case GEN_PERF_COUNTER_DATA_TYPE_UINT64:
info->type = PIPE_DRIVER_QUERY_TYPE_UINT64;
- info->max_value.u64 = counter->raw_max;
+ info->max_value.u64 = 0;
break;
case GEN_PERF_COUNTER_DATA_TYPE_FLOAT:
case GEN_PERF_COUNTER_DATA_TYPE_DOUBLE:
info->type = PIPE_DRIVER_QUERY_TYPE_FLOAT;
- info->max_value.f = counter->raw_max;
+ info->max_value.u64 = -1;
break;
default:
assert(false);
@@ -355,7 +354,7 @@ iris_get_monitor_result(struct pipe_context *ctx,
assert(gen_perf_is_query_ready(perf_ctx, monitor->query, batch));
unsigned bytes_written;
- gen_perf_get_query_data(perf_ctx, monitor->query, batch,
+ gen_perf_get_query_data(perf_ctx, monitor->query,
monitor->result_size,
(unsigned*) monitor->result_buffer,
&bytes_written);
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_perf.c b/lib/mesa/src/gallium/drivers/iris/iris_perf.c
index f2fd49721..ab1d32c13 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_perf.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_perf.c
@@ -45,7 +45,7 @@ iris_perf_emit_mi_report_perf_count(void *c,
{
struct iris_context *ice = c;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
- batch->screen->vtbl.emit_mi_report_perf_count(batch, bo, offset_in_bytes, report_id);
+ ice->vtbl.emit_mi_report_perf_count(batch, bo, offset_in_bytes, report_id);
}
static void
@@ -63,10 +63,10 @@ iris_perf_store_register_mem(void *ctx, void *bo,
struct iris_context *ice = ctx;
struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
if (reg_size == 8) {
- batch->screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
+ ice->vtbl.store_register_mem64(batch, reg, bo, offset, false);
} else {
assert(reg_size == 4);
- batch->screen->vtbl.store_register_mem32(batch, reg, bo, offset, false);
+ ice->vtbl.store_register_mem32(batch, reg, bo, offset, false);
}
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_performance_query.c b/lib/mesa/src/gallium/drivers/iris/iris_performance_query.c
index 73b250811..825f4c442 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_performance_query.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_performance_query.c
@@ -97,7 +97,7 @@ iris_new_perf_query_obj(struct pipe_context *pipe, unsigned query_index)
return (struct pipe_query *)&q->base;
}
-static bool
+static void
iris_begin_perf_query(struct pipe_context *pipe, struct pipe_query *q)
{
struct iris_context *ice = (void *) pipe;
@@ -105,7 +105,7 @@ iris_begin_perf_query(struct pipe_context *pipe, struct pipe_query *q)
struct gen_perf_query_object *obj = perf_query->query;
struct gen_perf_context *perf_ctx = ice->perf_ctx;
- return gen_perf_begin_query(perf_ctx, obj);
+ gen_perf_begin_query(perf_ctx, obj);
}
static void
@@ -214,8 +214,7 @@ iris_get_perf_query_data(struct pipe_context *pipe,
struct gen_perf_query_object *obj = perf_query->query;
struct gen_perf_context *perf_ctx = ice->perf_ctx;
- gen_perf_get_query_data(perf_ctx, obj, &ice->batches[IRIS_BATCH_RENDER],
- data_size, data, bytes_written);
+ gen_perf_get_query_data(perf_ctx, obj, data_size, data, bytes_written);
}
void
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_pipe_control.c b/lib/mesa/src/gallium/drivers/iris/iris_pipe_control.c
index 59005894b..75ec25a40 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_pipe_control.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_pipe_control.c
@@ -77,7 +77,7 @@ iris_emit_pipe_control_flush(struct iris_batch *batch,
flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
}
- batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
+ batch->vtbl->emit_raw_pipe_control(batch, reason, flags, NULL, 0, 0);
}
/**
@@ -94,7 +94,7 @@ iris_emit_pipe_control_write(struct iris_batch *batch,
struct iris_bo *bo, uint32_t offset,
uint64_t imm)
{
- batch->screen->vtbl.emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
+ batch->vtbl->emit_raw_pipe_control(batch, reason, flags, bo, offset, imm);
}
/*
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_program.c b/lib/mesa/src/gallium/drivers/iris/iris_program.c
index 05a093438..d7e470b42 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_program.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_program.c
@@ -432,9 +432,6 @@ iris_setup_uniforms(const struct brw_compiler *compiler,
load_ubo->num_components = intrin->num_components;
load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
load_ubo->src[1] = nir_src_for_ssa(offset);
- nir_intrinsic_set_align(load_ubo,
- nir_intrinsic_align_mul(intrin),
- nir_intrinsic_align_offset(intrin));
nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
intrin->dest.ssa.num_components,
intrin->dest.ssa.bit_size,
@@ -527,7 +524,6 @@ iris_setup_uniforms(const struct brw_compiler *compiler,
load->num_components = comps;
load->src[0] = nir_src_for_ssa(temp_ubo_name);
load->src[1] = nir_src_for_ssa(offset);
- nir_intrinsic_set_align(load, 4, 0);
nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
@@ -1097,7 +1093,7 @@ iris_compile_vs(struct iris_context *ice,
brw_compute_vue_map(devinfo,
&vue_prog_data->vue_map, nir->info.outputs_written,
- nir->info.separate_shader, /* pos_slots */ 1);
+ nir->info.separate_shader);
struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
@@ -1118,7 +1114,7 @@ iris_compile_vs(struct iris_context *ice,
}
uint32_t *so_decls =
- screen->vtbl.create_so_decl_list(&ish->stream_output,
+ ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
@@ -1140,13 +1136,12 @@ iris_compile_vs(struct iris_context *ice,
static void
iris_update_compiled_vs(struct iris_context *ice)
{
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_VERTEX];
struct iris_vs_prog_key key = { KEY_ID(vue.base) };
- screen->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
+ ice->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
struct iris_compiled_shader *shader =
@@ -1367,7 +1362,7 @@ iris_update_compiled_tcs(struct iris_context *ice)
};
get_unified_tess_slots(ice, &key.outputs_written,
&key.patch_outputs_written);
- screen->vtbl.populate_tcs_key(ice, &key);
+ ice->vtbl.populate_tcs_key(ice, &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TCS];
struct iris_compiled_shader *shader =
@@ -1452,7 +1447,7 @@ iris_compile_tes(struct iris_context *ice,
}
uint32_t *so_decls =
- screen->vtbl.create_so_decl_list(&ish->stream_output,
+ ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
@@ -1475,14 +1470,13 @@ iris_compile_tes(struct iris_context *ice,
static void
iris_update_compiled_tes(struct iris_context *ice)
{
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
struct iris_tes_prog_key key = { KEY_ID(vue.base) };
get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
- screen->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
+ ice->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
struct iris_compiled_shader *shader =
@@ -1553,7 +1547,7 @@ iris_compile_gs(struct iris_context *ice,
brw_compute_vue_map(devinfo,
&vue_prog_data->vue_map, nir->info.outputs_written,
- nir->info.separate_shader, /* pos_slots */ 1);
+ nir->info.separate_shader);
struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
@@ -1574,7 +1568,7 @@ iris_compile_gs(struct iris_context *ice,
}
uint32_t *so_decls =
- screen->vtbl.create_so_decl_list(&ish->stream_output,
+ ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
@@ -1601,11 +1595,10 @@ iris_update_compiled_gs(struct iris_context *ice)
ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
struct iris_compiled_shader *shader = NULL;
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
if (ish) {
struct iris_gs_prog_key key = { KEY_ID(vue.base) };
- screen->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
+ ice->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
shader =
iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
@@ -1715,8 +1708,7 @@ iris_update_compiled_fs(struct iris_context *ice)
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
struct iris_fs_prog_key key = { KEY_ID(base) };
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- screen->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
+ ice->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
@@ -1820,6 +1812,9 @@ get_vue_prog_data(struct iris_context *ice, gl_shader_stage stage)
return (void *) ice->shaders.prog[stage]->prog_data;
}
+// XXX: iris_compiled_shaders are space-leaking :(
+// XXX: do remember to unbind them if deleting them.
+
/**
* Update the current shader variants for the given state.
*
@@ -1988,8 +1983,7 @@ iris_update_compiled_cs(struct iris_context *ice)
ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
struct iris_cs_prog_key key = { KEY_ID(base) };
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- screen->vtbl.populate_cs_key(ice, &key);
+ ice->vtbl.populate_cs_key(ice, &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
struct iris_compiled_shader *shader =
@@ -2022,14 +2016,13 @@ iris_update_compiled_compute_shader(struct iris_context *ice)
void
iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
- unsigned threads,
uint32_t *dst)
{
- assert(brw_cs_push_const_total_size(cs_prog_data, threads) > 0);
+ assert(cs_prog_data->push.total.size > 0);
assert(cs_prog_data->push.cross_thread.size == 0);
assert(cs_prog_data->push.per_thread.dwords == 1);
assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
- for (unsigned t = 0; t < threads; t++)
+ for (unsigned t = 0; t < cs_prog_data->threads; t++)
dst[8 * t] = t;
}
@@ -2138,8 +2131,7 @@ iris_create_uncompiled_shader(struct pipe_context *ctx,
brw_preprocess_nir(screen->compiler, nir, NULL);
- NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
- &ish->uses_atomic_load_store);
+ NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
nir_sweep(nir);
@@ -2396,8 +2388,6 @@ iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage
pipe_resource_reference(&ish->const_data_state.res, NULL);
}
- iris_delete_shader_variants(ice, ish);
-
ralloc_free(ish->nir);
free(ish);
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_program_cache.c b/lib/mesa/src/gallium/drivers/iris/iris_program_cache.c
index 5870b9334..2c1224482 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_program_cache.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_program_cache.c
@@ -115,59 +115,6 @@ iris_find_previous_compile(const struct iris_context *ice,
return NULL;
}
-void
-iris_delete_shader_variants(struct iris_context *ice,
- struct iris_uncompiled_shader *ish)
-{
- struct hash_table *cache = ice->shaders.cache;
- gl_shader_stage stage = ish->nir->info.stage;
- enum iris_program_cache_id cache_id = stage;
-
- hash_table_foreach(cache, entry) {
- const struct keybox *keybox = entry->key;
- const struct brw_base_prog_key *key = (const void *)keybox->data;
-
- if (keybox->cache_id == cache_id &&
- key->program_string_id == ish->program_id) {
- struct iris_compiled_shader *shader = entry->data;
-
- _mesa_hash_table_remove(cache, entry);
-
- /* Shader variants may still be bound in the context even after
- * the API-facing shader has been deleted. In particular, a draw
- * may not have triggered iris_update_compiled_shaders() yet. In
- * that case, we may be referring to that shader's VUE map, stream
- * output settings, and so on. We also like to compare the old and
- * new shader programs when swapping them out to flag dirty state.
- *
- * So, it's hazardous to delete a bound shader variant. We avoid
- * doing so, choosing to instead move "deleted" shader variants to
- * a list, deferring the actual deletion until they're not bound.
- *
- * For simplicity, we always move deleted variants to the list,
- * even if we could delete them immediately. We'll then process
- * the list, catching both these variants and any others.
- */
- list_addtail(&shader->link, &ice->shaders.deleted_variants[stage]);
- }
- }
-
- /* Process any pending deferred variant deletions. */
- list_for_each_entry_safe(struct iris_compiled_shader, shader,
- &ice->shaders.deleted_variants[stage], link) {
- /* If the shader is still bound, defer deletion. */
- if (ice->shaders.prog[stage] == shader)
- continue;
-
- list_del(&shader->link);
-
- /* Actually delete the variant. */
- pipe_resource_reference(&shader->assembly.res, NULL);
- ralloc_free(shader);
- }
-}
-
-
/**
* Look for an existing entry in the cache that has identical assembly code.
*
@@ -203,10 +150,9 @@ iris_upload_shader(struct iris_context *ice,
const struct iris_binding_table *bt)
{
struct hash_table *cache = ice->shaders.cache;
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
struct iris_compiled_shader *shader =
rzalloc_size(cache, sizeof(struct iris_compiled_shader) +
- screen->vtbl.derived_program_state_size(cache_id));
+ ice->vtbl.derived_program_state_size(cache_id));
const struct iris_compiled_shader *existing =
find_existing_assembly(cache, assembly, prog_data->program_size);
@@ -228,8 +174,6 @@ iris_upload_shader(struct iris_context *ice,
memcpy(shader->map, assembly, prog_data->program_size);
}
- list_inithead(&shader->link);
-
shader->prog_data = prog_data;
shader->streamout = streamout;
shader->system_values = system_values;
@@ -244,7 +188,7 @@ iris_upload_shader(struct iris_context *ice,
ralloc_steal(shader, shader->system_values);
/* Store the 3DSTATE shader packets and other derived state. */
- screen->vtbl.store_derived_program_state(ice, cache_id, shader);
+ ice->vtbl.store_derived_program_state(ice, cache_id, shader);
struct keybox *keybox = make_keybox(shader, cache_id, key, key_size);
_mesa_hash_table_insert(ice->shaders.cache, keybox, shader);
@@ -277,7 +221,7 @@ iris_blorp_lookup_shader(struct blorp_batch *blorp_batch,
}
bool
-iris_blorp_upload_shader(struct blorp_batch *blorp_batch, uint32_t stage,
+iris_blorp_upload_shader(struct blorp_batch *blorp_batch,
const void *key, uint32_t key_size,
const void *kernel, UNUSED uint32_t kernel_size,
const struct brw_stage_prog_data *prog_data_templ,
@@ -317,9 +261,6 @@ iris_init_program_cache(struct iris_context *ice)
ice->shaders.uploader =
u_upload_create(&ice->ctx, 16384, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
IRIS_RESOURCE_FLAG_SHADER_MEMZONE);
-
- for (int i = 0; i < MESA_SHADER_STAGES; i++)
- list_inithead(&ice->shaders.deleted_variants[i]);
}
void
@@ -327,11 +268,6 @@ iris_destroy_program_cache(struct iris_context *ice)
{
for (int i = 0; i < MESA_SHADER_STAGES; i++) {
ice->shaders.prog[i] = NULL;
-
- list_for_each_entry_safe(struct iris_compiled_shader, shader,
- &ice->shaders.deleted_variants[i], link) {
- pipe_resource_reference(&shader->assembly.res, NULL);
- }
}
hash_table_foreach(ice->shaders.cache, entry) {
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_query.c b/lib/mesa/src/gallium/drivers/iris/iris_query.c
index e62669288..b6481200b 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_query.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_query.c
@@ -136,7 +136,7 @@ mark_available(struct iris_context *ice, struct iris_query *q)
offset += q->query_state_ref.offset;
if (!iris_is_query_pipelined(q)) {
- batch->screen->vtbl.store_data_imm64(batch, bo, offset, true);
+ ice->vtbl.store_data_imm64(batch, bo, offset, true);
} else {
/* Order available *after* the query results. */
flags |= PIPE_CONTROL_FLUSH_ENABLE;
@@ -205,14 +205,14 @@ write_value(struct iris_context *ice, struct iris_query *q, unsigned offset)
offset);
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
- batch->screen->vtbl.store_register_mem64(batch,
+ ice->vtbl.store_register_mem64(batch,
q->index == 0 ?
GENX(CL_INVOCATION_COUNT_num) :
SO_PRIM_STORAGE_NEEDED(q->index),
bo, offset, false);
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
- batch->screen->vtbl.store_register_mem64(batch,
+ ice->vtbl.store_register_mem64(batch,
SO_NUM_PRIMS_WRITTEN(q->index),
bo, offset, false);
break;
@@ -232,7 +232,7 @@ write_value(struct iris_context *ice, struct iris_query *q, unsigned offset)
};
const uint32_t reg = index_to_reg[q->index];
- batch->screen->vtbl.store_register_mem64(batch, reg, bo, offset, false);
+ ice->vtbl.store_register_mem64(batch, reg, bo, offset, false);
break;
}
default:
@@ -258,9 +258,9 @@ write_overflow_values(struct iris_context *ice, struct iris_query *q, bool end)
stream[s].num_prims[end]);
int w_idx = offset + offsetof(struct iris_query_so_overflow,
stream[s].prim_storage_needed[end]);
- batch->screen->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
+ ice->vtbl.store_register_mem64(batch, SO_NUM_PRIMS_WRITTEN(s),
bo, g_idx, false);
- batch->screen->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
+ ice->vtbl.store_register_mem64(batch, SO_PRIM_STORAGE_NEEDED(s),
bo, w_idx, false);
}
}
@@ -675,7 +675,7 @@ iris_get_query_result_resource(struct pipe_context *ctx,
if (q->syncpt == iris_batch_get_signal_syncpt(batch))
iris_batch_flush(batch);
- batch->screen->vtbl.copy_mem_mem(batch, dst_bo, offset,
+ ice->vtbl.copy_mem_mem(batch, dst_bo, offset,
query_bo, snapshots_landed_offset,
result_type <= PIPE_QUERY_TYPE_U32 ? 4 : 8);
return;
@@ -691,9 +691,9 @@ iris_get_query_result_resource(struct pipe_context *ctx,
if (q->ready) {
/* We happen to have the result on the CPU, so just copy it. */
if (result_type <= PIPE_QUERY_TYPE_U32) {
- batch->screen->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
+ ice->vtbl.store_data_imm32(batch, dst_bo, offset, q->result);
} else {
- batch->screen->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
+ ice->vtbl.store_data_imm64(batch, dst_bo, offset, q->result);
}
/* Make sure the result lands before they use bind the QBO elsewhere
@@ -865,7 +865,6 @@ void
genX(init_query)(struct iris_context *ice)
{
struct pipe_context *ctx = &ice->ctx;
- struct iris_screen *screen = (struct iris_screen *)ctx->screen;
ctx->create_query = iris_create_query;
ctx->create_batch_query = iris_create_batch_query;
@@ -877,5 +876,5 @@ genX(init_query)(struct iris_context *ice)
ctx->set_active_query_state = iris_set_active_query_state;
ctx->render_condition = iris_render_condition;
- screen->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
+ ice->vtbl.resolve_conditional_render = iris_resolve_conditional_render;
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_resolve.c b/lib/mesa/src/gallium/drivers/iris/iris_resolve.c
index 01a2cce1a..eb9c0f0cf 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_resolve.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_resolve.c
@@ -116,11 +116,11 @@ static void
resolve_image_views(struct iris_context *ice,
struct iris_batch *batch,
struct iris_shader_state *shs,
- const struct shader_info *info,
bool *draw_aux_buffer_disabled,
bool consider_framebuffer)
{
- uint32_t views = info ? (shs->bound_image_views & info->images_used) : 0;
+ /* TODO: Consider images used by program */
+ uint32_t views = shs->bound_image_views;
while (views) {
const int i = u_bit_scan(&views);
@@ -137,13 +137,11 @@ resolve_image_views(struct iris_context *ice,
unsigned num_layers =
pview->u.tex.last_layer - pview->u.tex.first_layer + 1;
- enum isl_aux_usage aux_usage =
- iris_image_view_aux_usage(ice, pview, info);
-
+ /* The data port doesn't understand any compression */
iris_resource_prepare_access(ice, batch, res,
pview->u.tex.level, 1,
pview->u.tex.first_layer, num_layers,
- aux_usage, false);
+ ISL_AUX_USAGE_NONE, false);
}
iris_cache_flush_for_read(batch, res->bo);
@@ -173,7 +171,7 @@ iris_predraw_resolve_inputs(struct iris_context *ice,
if (ice->state.dirty & dirty) {
resolve_sampler_views(ice, batch, shs, info, draw_aux_buffer_disabled,
consider_framebuffer);
- resolve_image_views(ice, batch, shs, info, draw_aux_buffer_disabled,
+ resolve_image_views(ice, batch, shs, draw_aux_buffer_disabled,
consider_framebuffer);
}
}
@@ -476,7 +474,7 @@ iris_resolve_color(struct iris_context *ice,
//DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
&res->base, res->aux.usage, level, true);
iris_batch_maybe_flush(batch, 1500);
@@ -501,7 +499,7 @@ iris_resolve_color(struct iris_context *ice,
/* On Gen >= 12, Stencil buffer with lossless compression needs to be
* resolve with WM_HZ_OP packet.
*/
- if (res->aux.usage == ISL_AUX_USAGE_STC_CCS) {
+ if (isl_surf_usage_is_stencil(res->surf.usage)) {
blorp_hiz_stencil_op(&blorp_batch, &surf, level, layer,
1, resolve_op);
} else {
@@ -529,7 +527,7 @@ iris_mcs_partial_resolve(struct iris_context *ice,
assert(isl_aux_usage_has_mcs(res->aux.usage));
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
&res->base, res->aux.usage, 0, true);
struct blorp_batch blorp_batch;
@@ -571,9 +569,10 @@ iris_sample_with_depth_aux(const struct gen_device_info *devinfo,
break;
return false;
case ISL_AUX_USAGE_HIZ_CCS:
+ /* Write through mode must have been enabled for prior writes. */
+ if (isl_surf_supports_hiz_ccs_wt(devinfo, &res->surf, res->aux.usage))
+ break;
return false;
- case ISL_AUX_USAGE_HIZ_CCS_WT:
- break;
default:
return false;
}
@@ -679,7 +678,7 @@ iris_hiz_exec(struct iris_context *ice,
iris_batch_maybe_flush(batch, 1500);
struct blorp_surf surf;
- iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
+ iris_blorp_surf_for_resource(&ice->vtbl, &batch->screen->isl_dev, &surf,
&res->base, res->aux.usage, level, true);
struct blorp_batch blorp_batch;
@@ -711,14 +710,6 @@ iris_hiz_exec(struct iris_context *ice,
PIPE_CONTROL_DEPTH_STALL);
}
-static bool
-level_has_aux(const struct iris_resource *res, uint32_t level)
-{
- return isl_aux_usage_has_hiz(res->aux.usage) ?
- iris_resource_level_has_hiz(res, level) :
- res->aux.usage != ISL_AUX_USAGE_NONE;
-}
-
/**
* Does the resource's slice have hiz enabled?
*/
@@ -798,6 +789,364 @@ iris_has_color_unresolved(const struct iris_resource *res,
return false;
}
+static enum isl_aux_op
+get_ccs_d_resolve_op(enum isl_aux_state aux_state,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_CCS_D);
+
+ const bool ccs_supported =
+ (aux_usage == ISL_AUX_USAGE_CCS_D) && fast_clear_supported;
+
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ if (!ccs_supported)
+ return ISL_AUX_OP_FULL_RESOLVE;
+ else
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break;
+ }
+
+ unreachable("Invalid aux state for CCS_D");
+}
+
+static enum isl_aux_op
+get_ccs_e_resolve_op(enum isl_aux_state aux_state,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ /* CCS_E surfaces can be accessed as CCS_D if we're careful. */
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_CCS_D ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ if (fast_clear_supported)
+ return ISL_AUX_OP_NONE;
+ else if (aux_usage == ISL_AUX_USAGE_CCS_E)
+ return ISL_AUX_OP_PARTIAL_RESOLVE;
+ else
+ return ISL_AUX_OP_FULL_RESOLVE;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (aux_usage != ISL_AUX_USAGE_CCS_E)
+ return ISL_AUX_OP_FULL_RESOLVE;
+ else if (!fast_clear_supported)
+ return ISL_AUX_OP_PARTIAL_RESOLVE;
+ else
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ if (aux_usage != ISL_AUX_USAGE_CCS_E)
+ return ISL_AUX_OP_FULL_RESOLVE;
+ else
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ return ISL_AUX_OP_NONE;
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ break;
+ }
+
+ unreachable("Invalid aux state for CCS_E");
+}
+
+static void
+iris_resource_prepare_ccs_access(struct iris_context *ice,
+ struct iris_batch *batch,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ enum isl_aux_state aux_state = iris_resource_get_aux_state(res, level, layer);
+
+ enum isl_aux_op resolve_op;
+ if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
+ resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage,
+ fast_clear_supported);
+ } else {
+ assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
+ resolve_op = get_ccs_d_resolve_op(aux_state, aux_usage,
+ fast_clear_supported);
+ }
+
+ if (resolve_op != ISL_AUX_OP_NONE) {
+ iris_resolve_color(ice, batch, res, level, layer, resolve_op);
+
+ switch (resolve_op) {
+ case ISL_AUX_OP_FULL_RESOLVE:
+ /* The CCS full resolve operation destroys the CCS and sets it to the
+ * pass-through state. (You can also think of this as being both a
+ * resolve and an ambiguate in one operation.)
+ */
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_PASS_THROUGH);
+ break;
+
+ case ISL_AUX_OP_PARTIAL_RESOLVE:
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ break;
+
+ default:
+ unreachable("Invalid resolve op");
+ }
+ }
+}
+
+static void
+iris_resource_finish_ccs_write(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_CCS_D ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
+ enum isl_aux_state aux_state =
+ iris_resource_get_aux_state(res, level, layer);
+
+ if (res->aux.usage == ISL_AUX_USAGE_CCS_E) {
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_E ||
+ aux_usage == ISL_AUX_USAGE_CCS_D);
+
+ if (aux_usage == ISL_AUX_USAGE_CCS_E) {
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ } else if (aux_state != ISL_AUX_STATE_PARTIAL_CLEAR) {
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_PARTIAL_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_E);
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ if (aux_usage == ISL_AUX_USAGE_CCS_E) {
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ } else {
+ /* Nothing to do */
+ }
+ break;
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ unreachable("Invalid aux state for CCS_E");
+ }
+ } else {
+ assert(res->aux.usage == ISL_AUX_USAGE_CCS_D);
+ /* CCS_D is a bit simpler */
+ switch (aux_state) {
+ case ISL_AUX_STATE_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_D);
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_PARTIAL_CLEAR);
+ break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ assert(aux_usage == ISL_AUX_USAGE_CCS_D);
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ /* Nothing to do */
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_AUX_INVALID:
+ unreachable("Invalid aux state for CCS_D");
+ }
+ }
+}
+
+static void
+iris_resource_prepare_mcs_access(struct iris_context *ice,
+ struct iris_batch *batch,
+ struct iris_resource *res,
+ uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(isl_aux_usage_has_mcs(aux_usage));
+
+ switch (iris_resource_get_aux_state(res, 0, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (!fast_clear_supported) {
+ iris_mcs_partial_resolve(ice, batch, res, layer, 1);
+ iris_resource_set_aux_state(ice, res, 0, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid aux state for MCS");
+ }
+}
+
+static void
+iris_resource_finish_mcs_write(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t layer,
+ enum isl_aux_usage aux_usage)
+{
+ assert(isl_aux_usage_has_mcs(aux_usage));
+
+ switch (iris_resource_get_aux_state(res, 0, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ iris_resource_set_aux_state(ice, res, 0, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_AUX_INVALID:
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid aux state for MCS");
+ }
+}
+
+static void
+iris_resource_prepare_hiz_access(struct iris_context *ice,
+ struct iris_batch *batch,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage,
+ bool fast_clear_supported)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ aux_usage == ISL_AUX_USAGE_HIZ ||
+ aux_usage == ISL_AUX_USAGE_HIZ_CCS ||
+ aux_usage == ISL_AUX_USAGE_CCS_E);
+
+ enum isl_aux_op hiz_op = ISL_AUX_OP_NONE;
+ switch (iris_resource_get_aux_state(res, level, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ if (aux_usage == ISL_AUX_USAGE_NONE || !fast_clear_supported)
+ hiz_op = ISL_AUX_OP_FULL_RESOLVE;
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ if (aux_usage == ISL_AUX_USAGE_NONE)
+ hiz_op = ISL_AUX_OP_FULL_RESOLVE;
+ break;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ case ISL_AUX_STATE_RESOLVED:
+ break;
+
+ case ISL_AUX_STATE_AUX_INVALID:
+ if (aux_usage != ISL_AUX_USAGE_NONE)
+ hiz_op = ISL_AUX_OP_AMBIGUATE;
+ break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid HiZ state");
+ }
+
+ if (hiz_op != ISL_AUX_OP_NONE) {
+ iris_hiz_exec(ice, batch, res, level, layer, 1, hiz_op, false);
+
+ switch (hiz_op) {
+ case ISL_AUX_OP_FULL_RESOLVE:
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_RESOLVED);
+ break;
+
+ case ISL_AUX_OP_AMBIGUATE:
+ /* The HiZ resolve operation is actually an ambiguate */
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_PASS_THROUGH);
+ break;
+
+ default:
+ unreachable("Invalid HiZ op");
+ }
+ }
+}
+
+static void
+iris_resource_finish_hiz_write(struct iris_context *ice,
+ struct iris_resource *res,
+ uint32_t level, uint32_t layer,
+ enum isl_aux_usage aux_usage)
+{
+ assert(aux_usage == ISL_AUX_USAGE_NONE ||
+ isl_aux_usage_has_hiz(aux_usage));
+
+ switch (iris_resource_get_aux_state(res, level, layer)) {
+ case ISL_AUX_STATE_CLEAR:
+ assert(isl_aux_usage_has_hiz(aux_usage));
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_CLEAR);
+ break;
+
+ case ISL_AUX_STATE_COMPRESSED_NO_CLEAR:
+ case ISL_AUX_STATE_COMPRESSED_CLEAR:
+ assert(isl_aux_usage_has_hiz(aux_usage));
+ break; /* Nothing to do */
+
+ case ISL_AUX_STATE_RESOLVED:
+ if (isl_aux_usage_has_hiz(aux_usage)) {
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ } else {
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_AUX_INVALID);
+ }
+ break;
+
+ case ISL_AUX_STATE_PASS_THROUGH:
+ if (isl_aux_usage_has_hiz(aux_usage)) {
+ iris_resource_set_aux_state(ice, res, level, layer, 1,
+ ISL_AUX_STATE_COMPRESSED_NO_CLEAR);
+ }
+ break;
+
+ case ISL_AUX_STATE_AUX_INVALID:
+ assert(!isl_aux_usage_has_hiz(aux_usage));
+ break;
+
+ case ISL_AUX_STATE_PARTIAL_CLEAR:
+ unreachable("Invalid HiZ state");
+ }
+}
+
void
iris_resource_prepare_access(struct iris_context *ice,
struct iris_batch *batch,
@@ -807,38 +1156,57 @@ iris_resource_prepare_access(struct iris_context *ice,
enum isl_aux_usage aux_usage,
bool fast_clear_supported)
{
- const uint32_t clamped_levels =
- miptree_level_range_length(res, start_level, num_levels);
- for (uint32_t l = 0; l < clamped_levels; l++) {
- const uint32_t level = start_level + l;
- if (!level_has_aux(res, level))
- continue;
+ num_levels = miptree_level_range_length(res, start_level, num_levels);
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_NONE:
+ /* Nothing to do */
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ case ISL_AUX_USAGE_MCS_CCS:
+ assert(start_level == 0 && num_levels == 1);
const uint32_t level_layers =
- miptree_layer_range_length(res, level, start_layer, num_layers);
+ miptree_layer_range_length(res, 0, start_layer, num_layers);
for (uint32_t a = 0; a < level_layers; a++) {
- const uint32_t layer = start_layer + a;
- const enum isl_aux_state aux_state =
- iris_resource_get_aux_state(res, level, layer);
- const enum isl_aux_op aux_op =
- isl_aux_prepare_access(aux_state, aux_usage, fast_clear_supported);
-
- if (aux_op == ISL_AUX_OP_NONE) {
- /* Nothing to do here. */
- } else if (isl_aux_usage_has_mcs(res->aux.usage)) {
- assert(aux_op == ISL_AUX_OP_PARTIAL_RESOLVE);
- iris_mcs_partial_resolve(ice, batch, res, layer, 1);
- } else if (isl_aux_usage_has_hiz(res->aux.usage)) {
- iris_hiz_exec(ice, batch, res, level, layer, 1, aux_op, false);
- } else {
- assert(isl_aux_usage_has_ccs(res->aux.usage));
- iris_resolve_color(ice, batch, res, level, layer, aux_op);
+ iris_resource_prepare_mcs_access(ice, batch, res, start_layer + a,
+ aux_usage, fast_clear_supported);
+ }
+ break;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ for (uint32_t l = 0; l < num_levels; l++) {
+ const uint32_t level = start_level + l;
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, level, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ iris_resource_prepare_ccs_access(ice, batch, res, level,
+ start_layer + a,
+ aux_usage, fast_clear_supported);
}
+ }
+ break;
+
+ case ISL_AUX_USAGE_HIZ:
+ case ISL_AUX_USAGE_HIZ_CCS:
+ for (uint32_t l = 0; l < num_levels; l++) {
+ const uint32_t level = start_level + l;
+ if (!iris_resource_level_has_hiz(res, level))
+ continue;
- const enum isl_aux_state new_state =
- isl_aux_state_transition_aux_op(aux_state, res->aux.usage, aux_op);
- iris_resource_set_aux_state(ice, res, level, layer, 1, new_state);
+ const uint32_t level_layers =
+ miptree_layer_range_length(res, level, start_layer, num_layers);
+ for (uint32_t a = 0; a < level_layers; a++) {
+ iris_resource_prepare_hiz_access(ice, batch, res, level,
+ start_layer + a, aux_usage,
+ fast_clear_supported);
+ }
}
+ break;
+
+ default:
+ unreachable("Invalid aux usage");
}
}
@@ -848,19 +1216,41 @@ iris_resource_finish_write(struct iris_context *ice,
uint32_t start_layer, uint32_t num_layers,
enum isl_aux_usage aux_usage)
{
- if (!level_has_aux(res, level))
- return;
-
- const uint32_t level_layers =
- miptree_layer_range_length(res, level, start_layer, num_layers);
-
- for (uint32_t a = 0; a < level_layers; a++) {
- const uint32_t layer = start_layer + a;
- const enum isl_aux_state aux_state =
- iris_resource_get_aux_state(res, level, layer);
- const enum isl_aux_state new_aux_state =
- isl_aux_state_transition_write(aux_state, aux_usage, false);
- iris_resource_set_aux_state(ice, res, level, layer, 1, new_aux_state);
+ num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
+
+ switch (res->aux.usage) {
+ case ISL_AUX_USAGE_NONE:
+ break;
+
+ case ISL_AUX_USAGE_MCS:
+ case ISL_AUX_USAGE_MCS_CCS:
+ for (uint32_t a = 0; a < num_layers; a++) {
+ iris_resource_finish_mcs_write(ice, res, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ case ISL_AUX_USAGE_CCS_D:
+ case ISL_AUX_USAGE_CCS_E:
+ for (uint32_t a = 0; a < num_layers; a++) {
+ iris_resource_finish_ccs_write(ice, res, level, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ case ISL_AUX_USAGE_HIZ:
+ case ISL_AUX_USAGE_HIZ_CCS:
+ if (!iris_resource_level_has_hiz(res, level))
+ return;
+
+ for (uint32_t a = 0; a < num_layers; a++) {
+ iris_resource_finish_hiz_write(ice, res, level, start_layer + a,
+ aux_usage);
+ }
+ break;
+
+ default:
+ unreachable("Invavlid aux usage");
}
}
@@ -954,17 +1344,12 @@ iris_resource_texture_aux_usage(struct iris_context *ice,
break;
case ISL_AUX_USAGE_HIZ_CCS:
- assert(!iris_sample_with_depth_aux(devinfo, res));
- return ISL_AUX_USAGE_NONE;
-
- case ISL_AUX_USAGE_HIZ_CCS_WT:
if (iris_sample_with_depth_aux(devinfo, res))
- return ISL_AUX_USAGE_HIZ_CCS_WT;
+ return ISL_AUX_USAGE_CCS_E;
break;
case ISL_AUX_USAGE_MCS:
case ISL_AUX_USAGE_MCS_CCS:
- case ISL_AUX_USAGE_STC_CCS:
return res->aux.usage;
case ISL_AUX_USAGE_CCS_D:
@@ -988,32 +1373,6 @@ iris_resource_texture_aux_usage(struct iris_context *ice,
return ISL_AUX_USAGE_NONE;
}
-enum isl_aux_usage
-iris_image_view_aux_usage(struct iris_context *ice,
- const struct pipe_image_view *pview,
- const struct shader_info *info)
-{
- if (!info)
- return ISL_AUX_USAGE_NONE;
-
- struct iris_screen *screen = (void *) ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct iris_resource *res = (void *) pview->resource;
-
- enum isl_format view_format = iris_image_view_get_format(ice, pview);
- enum isl_aux_usage aux_usage =
- iris_resource_texture_aux_usage(ice, res, view_format);
-
- bool uses_atomic_load_store =
- ice->shaders.uncompiled[info->stage]->uses_atomic_load_store;
-
- if ((devinfo->gen == 12 && aux_usage == ISL_AUX_USAGE_CCS_E) &&
- !uses_atomic_load_store)
- return ISL_AUX_USAGE_CCS_E;
-
- return ISL_AUX_USAGE_NONE;
-}
-
static bool
isl_formats_are_fast_clear_compatible(enum isl_format a, enum isl_format b)
{
@@ -1042,7 +1401,7 @@ iris_resource_prepare_texture(struct iris_context *ice,
enum isl_aux_usage aux_usage =
iris_resource_texture_aux_usage(ice, res, view_format);
- bool clear_supported = isl_aux_usage_has_fast_clears(aux_usage);
+ bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE;
/* Clear color is specified as ints or floats and the conversion is done by
* the sampler. If we have a texture view, we would have to perform the
@@ -1107,7 +1466,7 @@ iris_resource_prepare_render(struct iris_context *ice,
{
iris_resource_prepare_access(ice, batch, res, level, 1, start_layer,
layer_count, aux_usage,
- isl_aux_usage_has_fast_clears(aux_usage));
+ aux_usage != ISL_AUX_USAGE_NONE);
}
void
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_resource.c b/lib/mesa/src/gallium/drivers/iris/iris_resource.c
index 0f71c5a85..e6c049c1f 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_resource.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_resource.c
@@ -214,32 +214,6 @@ pipe_bind_to_isl_usage(unsigned bindings)
return usage;
}
-enum isl_format
-iris_image_view_get_format(struct iris_context *ice,
- const struct pipe_image_view *img)
-{
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
-
- isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
- enum isl_format isl_fmt =
- iris_format_for_usage(devinfo, img->format, usage).fmt;
-
- if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
- /* On Gen8, try to use typed surfaces reads (which support a
- * limited number of formats), and if not possible, fall back
- * to untyped reads.
- */
- if (devinfo->gen == 8 &&
- !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt))
- return ISL_FORMAT_RAW;
- else
- return isl_lower_storage_image_format(devinfo, isl_fmt);
- }
-
- return isl_fmt;
-}
-
struct pipe_resource *
iris_resource_get_separate_stencil(struct pipe_resource *p_res)
{
@@ -508,20 +482,8 @@ iris_resource_configure_aux(struct iris_screen *screen,
res->aux.possible_usages |=
1 << (has_ccs ? ISL_AUX_USAGE_MCS_CCS : ISL_AUX_USAGE_MCS);
} else if (has_hiz) {
- if (!has_ccs) {
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_HIZ;
- } else if (res->surf.samples == 1 &&
- (res->surf.usage & ISL_SURF_USAGE_TEXTURE_BIT)) {
- /* If this resource is single-sampled and will be used as a texture,
- * put the HiZ surface in write-through mode so that we can sample
- * from it.
- */
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_HIZ_CCS_WT;
- } else {
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_HIZ_CCS;
- }
- } else if (has_ccs && isl_surf_usage_is_stencil(res->surf.usage)) {
- res->aux.possible_usages |= 1 << ISL_AUX_USAGE_STC_CCS;
+ res->aux.possible_usages |=
+ 1 << (has_ccs ? ISL_AUX_USAGE_HIZ_CCS : ISL_AUX_USAGE_HIZ);
} else if (has_ccs) {
if (want_ccs_e_for_format(devinfo, res->surf.format))
res->aux.possible_usages |= 1 << ISL_AUX_USAGE_CCS_E;
@@ -540,8 +502,11 @@ iris_resource_configure_aux(struct iris_screen *screen,
if (!devinfo->has_sample_with_hiz || res->surf.samples > 1)
res->aux.sampler_usages &= ~(1 << ISL_AUX_USAGE_HIZ);
- /* ISL_AUX_USAGE_HIZ_CCS doesn't support sampling at all */
+ /* We don't always support sampling with HIZ_CCS. But when we do, treat it
+ * as CCS_E.*/
res->aux.sampler_usages &= ~(1 << ISL_AUX_USAGE_HIZ_CCS);
+ if (isl_surf_supports_hiz_ccs_wt(devinfo, &res->surf, res->aux.usage))
+ res->aux.sampler_usages |= 1 << ISL_AUX_USAGE_CCS_E;
enum isl_aux_state initial_state;
*aux_size_B = 0;
@@ -554,7 +519,6 @@ iris_resource_configure_aux(struct iris_screen *screen,
return !res->mod_info || res->mod_info->aux_usage == ISL_AUX_USAGE_NONE;
case ISL_AUX_USAGE_HIZ:
case ISL_AUX_USAGE_HIZ_CCS:
- case ISL_AUX_USAGE_HIZ_CCS_WT:
initial_state = ISL_AUX_STATE_AUX_INVALID;
break;
case ISL_AUX_USAGE_MCS:
@@ -572,7 +536,6 @@ iris_resource_configure_aux(struct iris_screen *screen,
break;
case ISL_AUX_USAGE_CCS_D:
case ISL_AUX_USAGE_CCS_E:
- case ISL_AUX_USAGE_STC_CCS:
/* When CCS_E is used, we need to ensure that the CCS starts off in
* a valid state. From the Sky Lake PRM, "MCS Buffer for Render
* Target(s)":
@@ -586,13 +549,11 @@ iris_resource_configure_aux(struct iris_screen *screen,
* For CCS_D, do the same thing. On Gen9+, this avoids having any
* undefined bits in the aux buffer.
*/
- if (imported) {
- assert(res->aux.usage != ISL_AUX_USAGE_STC_CCS);
+ if (imported)
initial_state =
isl_drm_modifier_get_default_aux_state(res->mod_info->modifier);
- } else {
+ else
initial_state = ISL_AUX_STATE_PASS_THROUGH;
- }
*alloc_flags |= BO_ALLOC_ZEROED;
break;
case ISL_AUX_USAGE_MC:
@@ -670,8 +631,20 @@ iris_resource_init_aux_buf(struct iris_resource *res, uint32_t alloc_flags,
res->aux.surf.size_B);
}
+ /* Bspec section titled : MCS/CCS Buffers for Render Target(s) states:
+ * - If Software wants to enable Color Compression without Fast clear,
+ * Software needs to initialize MCS with zeros.
+ * - Lossless compression and CCS initialized to all F (using HW Fast
+ * Clear or SW direct Clear)
+ *
+ * We think, the first bullet point above is referring to CCS aux
+ * surface. Since we initialize the MCS in the clear state, we also
+ * initialize the CCS in the clear state (via SW direct clear) to keep
+ * the two in sync.
+ */
memset((char*)map + res->aux.extra_aux.offset,
- 0, res->aux.extra_aux.surf.size_B);
+ isl_aux_usage_has_mcs(res->aux.usage) ? 0xFF : 0,
+ res->aux.extra_aux.surf.size_B);
/* Zero the indirect clear color to match ::fast_clear_color. */
memset((char *)map + res->aux.clear_color_offset, 0,
@@ -1334,7 +1307,7 @@ iris_invalidate_resource(struct pipe_context *ctx,
/* Rebind the buffer, replacing any state referring to the old BO's
* address, and marking state dirty so it's reemitted.
*/
- screen->vtbl.rebind_buffer(ice, res);
+ ice->vtbl.rebind_buffer(ice, res);
util_range_set_empty(&res->valid_buffer_range);
@@ -1854,16 +1827,13 @@ iris_transfer_map(struct pipe_context *ctx,
if (resource->target != PIPE_BUFFER) {
bool need_hiz_resolve = iris_resource_level_has_hiz(res, level);
- bool need_stencil_resolve = res->aux.usage == ISL_AUX_USAGE_STC_CCS;
need_color_resolve =
(res->aux.usage == ISL_AUX_USAGE_CCS_D ||
res->aux.usage == ISL_AUX_USAGE_CCS_E) &&
iris_has_color_unresolved(res, level, 1, box->z, box->depth);
- need_resolve = need_color_resolve ||
- need_hiz_resolve ||
- need_stencil_resolve;
+ need_resolve = need_color_resolve || need_hiz_resolve;
}
bool map_would_stall = false;
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_resource.h b/lib/mesa/src/gallium/drivers/iris/iris_resource.h
index a8a7ab689..d3489aa36 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_resource.h
+++ b/lib/mesa/src/gallium/drivers/iris/iris_resource.h
@@ -31,7 +31,6 @@
struct iris_batch;
struct iris_context;
-struct shader_info;
#define IRIS_MAX_MIPLEVELS 15
@@ -464,12 +463,6 @@ void iris_resource_prepare_texture(struct iris_context *ice,
uint32_t start_level, uint32_t num_levels,
uint32_t start_layer, uint32_t num_layers);
-enum isl_aux_usage iris_image_view_aux_usage(struct iris_context *ice,
- const struct pipe_image_view *pview,
- const struct shader_info *info);
-enum isl_format iris_image_view_get_format(struct iris_context *ice,
- const struct pipe_image_view *img);
-
static inline bool
iris_resource_unfinished_aux_import(struct iris_resource *res)
{
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_screen.c b/lib/mesa/src/gallium/drivers/iris/iris_screen.c
index 154edec41..78d49848f 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_screen.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_screen.c
@@ -179,7 +179,6 @@ iris_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_TGSI_BALLOT:
case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
case PIPE_CAP_CLEAR_TEXTURE:
- case PIPE_CAP_CLEAR_SCISSORED:
case PIPE_CAP_TGSI_VOTE:
case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
case PIPE_CAP_TEXTURE_GATHER_SM5:
@@ -200,7 +199,6 @@ iris_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_GL_SPIRV_VARIABLE_POINTERS:
case PIPE_CAP_DEMOTE_TO_HELPER_INVOCATION:
case PIPE_CAP_NATIVE_FENCE_FD:
- case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
return true;
case PIPE_CAP_FBFETCH:
return BRW_MAX_DRAW_BUFFERS;
@@ -212,8 +210,6 @@ iris_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_FRAGMENT_SHADER_INTERLOCK:
case PIPE_CAP_ATOMIC_FLOAT_MINMAX:
return devinfo->gen >= 9;
- case PIPE_CAP_DEPTH_BOUNDS_TEST:
- return devinfo->gen >= 12;
case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
return 1;
case PIPE_CAP_MAX_RENDER_TARGETS:
@@ -319,9 +315,6 @@ iris_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
PIPE_CONTEXT_PRIORITY_MEDIUM |
PIPE_CONTEXT_PRIORITY_HIGH;
- case PIPE_CAP_FRONTEND_NOOP:
- return true;
-
// XXX: don't hardcode 00:00:02.0 PCI here
case PIPE_CAP_PCI_GROUP:
return 0;
@@ -648,7 +641,7 @@ iris_screen_create(int fd, const struct pipe_screen_config *config)
*
* Checking the last feature availability will include all previous ones.
*/
- if (iris_getparam_integer(fd, I915_PARAM_HAS_CONTEXT_ISOLATION) <= 0) {
+ if (!iris_getparam_integer(fd, I915_PARAM_HAS_CONTEXT_ISOLATION)) {
debug_error("Kernel is too old for Iris. Consider upgrading to kernel v4.16.\n");
return NULL;
}
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_screen.h b/lib/mesa/src/gallium/drivers/iris/iris_screen.h
index 3e564c53c..4144d48d2 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_screen.h
+++ b/lib/mesa/src/gallium/drivers/iris/iris_screen.h
@@ -31,19 +31,10 @@
#include "intel/dev/gen_device_info.h"
#include "intel/isl/isl.h"
#include "iris_bufmgr.h"
-#include "iris_binder.h"
-#include "iris_resource.h"
-struct gen_l3_config;
-struct brw_vue_map;
+struct iris_bo;
struct iris_monitor_config;
-struct iris_vs_prog_key;
-struct iris_tcs_prog_key;
-struct iris_tes_prog_key;
-struct iris_gs_prog_key;
-struct iris_fs_prog_key;
-struct iris_cs_prog_key;
-enum iris_program_cache_id;
+struct gen_l3_config;
#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
#define WRITE_ONCE(x, v) *(volatile __typeof__(x) *)&(x) = (v)
@@ -52,90 +43,6 @@ enum iris_program_cache_id;
#define IRIS_MAX_SOL_BUFFERS 4
#define IRIS_MAP_BUFFER_ALIGNMENT 64
-/**
- * Virtual table for generation-specific (genxml) function calls.
- */
-struct iris_vtable {
- void (*destroy_state)(struct iris_context *ice);
- void (*init_render_context)(struct iris_batch *batch);
- void (*init_compute_context)(struct iris_batch *batch);
- void (*upload_render_state)(struct iris_context *ice,
- struct iris_batch *batch,
- const struct pipe_draw_info *draw);
- void (*update_surface_base_address)(struct iris_batch *batch,
- struct iris_binder *binder);
- void (*upload_compute_state)(struct iris_context *ice,
- struct iris_batch *batch,
- const struct pipe_grid_info *grid);
- void (*rebind_buffer)(struct iris_context *ice,
- struct iris_resource *res);
- void (*resolve_conditional_render)(struct iris_context *ice);
- void (*load_register_reg32)(struct iris_batch *batch, uint32_t dst,
- uint32_t src);
- void (*load_register_reg64)(struct iris_batch *batch, uint32_t dst,
- uint32_t src);
- void (*load_register_imm32)(struct iris_batch *batch, uint32_t reg,
- uint32_t val);
- void (*load_register_imm64)(struct iris_batch *batch, uint32_t reg,
- uint64_t val);
- void (*load_register_mem32)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset);
- void (*load_register_mem64)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset);
- void (*store_register_mem32)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated);
- void (*store_register_mem64)(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated);
- void (*store_data_imm32)(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint32_t value);
- void (*store_data_imm64)(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint64_t value);
- void (*copy_mem_mem)(struct iris_batch *batch,
- struct iris_bo *dst_bo, uint32_t dst_offset,
- struct iris_bo *src_bo, uint32_t src_offset,
- unsigned bytes);
- void (*emit_raw_pipe_control)(struct iris_batch *batch,
- const char *reason, uint32_t flags,
- struct iris_bo *bo, uint32_t offset,
- uint64_t imm);
-
- void (*emit_mi_report_perf_count)(struct iris_batch *batch,
- struct iris_bo *bo,
- uint32_t offset_in_bytes,
- uint32_t report_id);
-
- unsigned (*derived_program_state_size)(enum iris_program_cache_id id);
- void (*store_derived_program_state)(struct iris_context *ice,
- enum iris_program_cache_id cache_id,
- struct iris_compiled_shader *shader);
- uint32_t *(*create_so_decl_list)(const struct pipe_stream_output_info *sol,
- const struct brw_vue_map *vue_map);
- void (*populate_vs_key)(const struct iris_context *ice,
- const struct shader_info *info,
- gl_shader_stage last_stage,
- struct iris_vs_prog_key *key);
- void (*populate_tcs_key)(const struct iris_context *ice,
- struct iris_tcs_prog_key *key);
- void (*populate_tes_key)(const struct iris_context *ice,
- const struct shader_info *info,
- gl_shader_stage last_stage,
- struct iris_tes_prog_key *key);
- void (*populate_gs_key)(const struct iris_context *ice,
- const struct shader_info *info,
- gl_shader_stage last_stage,
- struct iris_gs_prog_key *key);
- void (*populate_fs_key)(const struct iris_context *ice,
- const struct shader_info *info,
- struct iris_fs_prog_key *key);
- void (*populate_cs_key)(const struct iris_context *ice,
- struct iris_cs_prog_key *key);
- void (*lost_genx_state)(struct iris_context *ice, struct iris_batch *batch);
-};
-
struct iris_screen {
struct pipe_screen base;
@@ -158,8 +65,6 @@ struct iris_screen {
bool no_hw;
- struct iris_vtable vtbl;
-
/** Global program_string_id counter (see get_program_string_id()) */
unsigned program_id;
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_state.c b/lib/mesa/src/gallium/drivers/iris/iris_state.c
index a5b825f1b..7f4ec3d0b 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_state.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_state.c
@@ -95,7 +95,6 @@
#include "util/u_transfer.h"
#include "util/u_upload_mgr.h"
#include "util/u_viewport.h"
-#include "util/u_memory.h"
#include "drm-uapi/i915_drm.h"
#include "nir.h"
#include "intel/compiler/brw_compiler.h"
@@ -1057,8 +1056,7 @@ struct iris_depth_buffer_state {
uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
GENX(3DSTATE_STENCIL_BUFFER_length) +
GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
- GENX(3DSTATE_CLEAR_PARAMS_length) +
- GENX(MI_LOAD_REGISTER_IMM_length) * 2];
+ GENX(3DSTATE_CLEAR_PARAMS_length)];
};
/**
@@ -2681,6 +2679,7 @@ iris_set_shader_images(struct pipe_context *ctx,
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
#if GEN_GEN == 8
@@ -2704,13 +2703,27 @@ iris_set_shader_images(struct pipe_context *ctx,
res->bind_history |= PIPE_BIND_SHADER_IMAGE;
res->bind_stages |= 1 << stage;
- enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
+ isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
+ enum isl_format isl_fmt =
+ iris_format_for_usage(devinfo, img->format, usage).fmt;
+
+ bool untyped_fallback = false;
+
+ if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
+ /* On Gen8, try to use typed surfaces reads (which support a
+ * limited number of formats), and if not possible, fall back
+ * to untyped reads.
+ */
+ untyped_fallback = GEN_GEN == 8 &&
+ !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt);
- /* Render compression with images supported on gen12+ only. */
- unsigned aux_usages = GEN_GEN >= 12 ? res->aux.possible_usages :
- 1 << ISL_AUX_USAGE_NONE;
+ if (untyped_fallback)
+ isl_fmt = ISL_FORMAT_RAW;
+ else
+ isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt);
+ }
- alloc_surface_states(&iv->surface_state, aux_usages);
+ alloc_surface_states(&iv->surface_state, 1 << ISL_AUX_USAGE_NONE);
iv->surface_state.bo_address = res->bo->gtt_offset;
void *map = iv->surface_state.cpu;
@@ -2723,16 +2736,16 @@ iris_set_shader_images(struct pipe_context *ctx,
.base_array_layer = img->u.tex.first_layer,
.array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
.swizzle = ISL_SWIZZLE_IDENTITY,
- .usage = ISL_SURF_USAGE_STORAGE_BIT,
+ .usage = usage,
};
- /* If using untyped fallback. */
- if (isl_fmt == ISL_FORMAT_RAW) {
+ if (untyped_fallback) {
fill_buffer_surface_state(&screen->isl_dev, res, map,
isl_fmt, ISL_SWIZZLE_IDENTITY,
0, res->bo->size);
} else {
- unsigned aux_modes = aux_usages;
+ /* Images don't support compression */
+ unsigned aux_modes = 1 << ISL_AUX_USAGE_NONE;
while (aux_modes) {
enum isl_aux_usage usage = u_bit_scan(&aux_modes);
@@ -3858,8 +3871,7 @@ iris_emit_sbe_swiz(struct iris_batch *batch,
/* XXX: this should be generated when putting programs in place */
- for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
- const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
+ for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) {
const int input_index = wm_prog_data->urb_setup[fs_attr];
if (input_index < 0 || input_index >= 16)
continue;
@@ -4186,13 +4198,6 @@ iris_store_tcs_state(struct iris_context *ice,
hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
hs.IncludeVertexHandles = true;
-#if GEN_GEN == 12
- /* Patch Count threshold specifies the maximum number of patches that
- * will be accumulated before a thread dispatch is forced.
- */
- hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
-#endif
-
#if GEN_GEN >= 9
hs.DispatchMode = vue_prog_data->dispatch_mode;
hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
@@ -4368,6 +4373,7 @@ iris_store_cs_state(struct iris_context *ice,
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
desc.KernelStartPointer = KSP(shader);
desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
+ desc.NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads;
desc.SharedLocalMemorySize =
encode_slm_size(GEN_GEN, prog_data->total_shared);
desc.BarrierEnable = cs_prog_data->uses_barrier;
@@ -4670,8 +4676,7 @@ use_ubo_ssbo(struct iris_batch *batch,
static uint32_t
use_image(struct iris_batch *batch, struct iris_context *ice,
- struct iris_shader_state *shs, const struct shader_info *info,
- int i)
+ struct iris_shader_state *shs, int i)
{
struct iris_image_view *iv = &shs->image[i];
struct iris_resource *res = (void *) iv->base.resource;
@@ -4687,11 +4692,7 @@ use_image(struct iris_batch *batch, struct iris_context *ice,
if (res->aux.bo)
iris_use_pinned_bo(batch, res->aux.bo, write);
- enum isl_aux_usage aux_usage =
- iris_image_view_aux_usage(ice, &iv->base, info);
-
- return iv->surface_state.ref.offset +
- surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
+ return iv->surface_state.ref.offset;
}
#define push_bt_entry(addr) \
@@ -4791,7 +4792,7 @@ iris_populate_binding_table(struct iris_context *ice,
}
foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
- uint32_t addr = use_image(batch, ice, shs, info, i);
+ uint32_t addr = use_image(batch, ice, shs, i);
push_bt_entry(addr);
}
@@ -5235,15 +5236,11 @@ emit_push_constant_packets(struct iris_context *ice,
int stage,
const struct push_bos *push_bos)
{
- UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
-#if GEN_GEN >= 12
- pkt.MOCS = isl_dev->mocs.internal;
-#endif
if (prog_data) {
/* The Skylake PRM contains the following restriction:
*
@@ -5275,8 +5272,6 @@ emit_push_constant_packet_all(struct iris_context *ice,
uint32_t shader_mask,
const struct push_bos *push_bos)
{
- struct isl_device *isl_dev = &batch->screen->isl_dev;
-
if (!push_bos) {
iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
pc.ShaderUpdateEnable = shader_mask;
@@ -5293,7 +5288,6 @@ emit_push_constant_packet_all(struct iris_context *ice,
assert(n <= max_pointers);
iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
all.DWordLength = num_dwords - 2;
- all.MOCS = isl_dev->mocs.internal;
all.ShaderUpdateEnable = shader_mask;
all.PointerBufferMask = (1 << n) - 1;
}
@@ -5913,22 +5907,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
* first.
*/
uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
- uint32_t cso_z_size = batch->screen->isl_dev.ds.size - clear_length;;
-
-#if GEN_GEN == 12
- /* GEN:BUG:14010455700
- *
- * ISL will change some CHICKEN registers depending on the depth surface
- * format, along with emitting the depth and stencil packets. In that
- * case, we want to do a depth flush and stall, so the pipeline is not
- * using these settings while we change the registers.
- */
- iris_emit_end_of_pipe_sync(batch,
- "Workaround: Stop pipeline for 14010455700",
- PIPE_CONTROL_DEPTH_STALL |
- PIPE_CONTROL_DEPTH_CACHE_FLUSH);
-#endif
-
+ uint32_t cso_z_size = sizeof(cso_z->packets) - clear_length;
iris_batch_emit(batch, cso_z->packets, cso_z_size);
if (GEN_GEN >= 12) {
/* GEN:BUG:1408224581
@@ -5990,7 +5969,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
int count = util_bitcount64(ice->state.bound_vertex_buffers);
- uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
+ int dynamic_bound = ice->state.bound_vertex_buffers;
if (ice->state.vs_uses_draw_params) {
assert(ice->draw.draw_params.res);
@@ -6473,9 +6452,6 @@ iris_upload_compute_state(struct iris_context *ice,
struct brw_stage_prog_data *prog_data = shader->prog_data;
struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
- const uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
- const unsigned threads = DIV_ROUND_UP(group_size, cs_prog_data->simd_size);
-
/* Always pin the binder. If we're emitting new binding table pointers,
* we need it. If not, we're probably inheriting old tables via the
* context, and need it anyway. Since true zero-bindings cases are
@@ -6537,7 +6513,7 @@ iris_upload_compute_state(struct iris_context *ice,
vfe.URBEntryAllocationSize = 2;
vfe.CURBEAllocationSize =
- ALIGN(cs_prog_data->push.per_thread.regs * threads +
+ ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
cs_prog_data->push.cross_thread.regs, 2);
}
}
@@ -6548,19 +6524,18 @@ iris_upload_compute_state(struct iris_context *ice,
assert(cs_prog_data->push.cross_thread.dwords == 0 &&
cs_prog_data->push.per_thread.dwords == 1 &&
cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
- const unsigned push_const_size =
- brw_cs_push_const_total_size(cs_prog_data, threads);
uint32_t *curbe_data_map =
stream_state(batch, ice->state.dynamic_uploader,
&ice->state.last_res.cs_thread_ids,
- ALIGN(push_const_size, 64), 64,
+ ALIGN(cs_prog_data->push.total.size, 64), 64,
&curbe_data_offset);
assert(curbe_data_map);
- memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
- iris_fill_cs_push_const_buffer(cs_prog_data, threads, curbe_data_map);
+ memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64));
+ iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map);
iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
- curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
+ curbe.CURBETotalDataLength =
+ ALIGN(cs_prog_data->push.total.size, 64);
curbe.CURBEDataStartAddress = curbe_data_offset;
}
}
@@ -6574,7 +6549,6 @@ iris_upload_compute_state(struct iris_context *ice,
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
idd.SamplerStatePointer = shs->sampler_table.offset;
idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
- idd.NumberofThreadsinGPGPUThreadGroup = threads;
}
for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
@@ -6589,6 +6563,7 @@ iris_upload_compute_state(struct iris_context *ice,
}
}
+ uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
uint32_t right_mask;
@@ -6623,7 +6598,7 @@ iris_upload_compute_state(struct iris_context *ice,
ggw.SIMDSize = cs_prog_data->simd_size / 16;
ggw.ThreadDepthCounterMaximum = 0;
ggw.ThreadHeightCounterMaximum = 0;
- ggw.ThreadWidthCounterMaximum = threads - 1;
+ ggw.ThreadWidthCounterMaximum = cs_prog_data->threads - 1;
ggw.ThreadGroupIDXDimension = grid->grid[0];
ggw.ThreadGroupIDYDimension = grid->grid[1];
ggw.ThreadGroupIDZDimension = grid->grid[2];
@@ -6926,8 +6901,7 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
imm);
}
- if ((GEN_GEN == 9 || (GEN_GEN == 12 && devinfo->revision == 0 /* A0*/)) &&
- IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
+ if (GEN_GEN == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
/* Project: SKL / Argument: LRI Post Sync Operation [23]
*
* "PIPECONTROL command with “Command Streamer Stall Enable” must be
@@ -6936,8 +6910,6 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
* PIPELINE_SELECT command is set to GPGPU mode of operation)."
*
* The same text exists a few rows below for Post Sync Op.
- *
- * On Gen12 this is GEN:BUG:1607156449.
*/
iris_emit_raw_pipe_control(batch,
"workaround: CS stall before gpgpu post-sync",
@@ -7477,19 +7449,6 @@ genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
#endif
}
-static void
-iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
-{
- struct iris_context *ice = (struct iris_context *) ctx;
-
- ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER],
- enable,
- IRIS_ALL_DIRTY_FOR_RENDER);
- ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE],
- enable,
- IRIS_ALL_DIRTY_FOR_COMPUTE);
-}
-
void
genX(init_state)(struct iris_context *ice)
{
@@ -7534,38 +7493,37 @@ genX(init_state)(struct iris_context *ice)
ctx->create_stream_output_target = iris_create_stream_output_target;
ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
ctx->set_stream_output_targets = iris_set_stream_output_targets;
- ctx->set_frontend_noop = iris_set_frontend_noop;
-
- screen->vtbl.destroy_state = iris_destroy_state;
- screen->vtbl.init_render_context = iris_init_render_context;
- screen->vtbl.init_compute_context = iris_init_compute_context;
- screen->vtbl.upload_render_state = iris_upload_render_state;
- screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
- screen->vtbl.upload_compute_state = iris_upload_compute_state;
- screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
- screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
- screen->vtbl.rebind_buffer = iris_rebind_buffer;
- screen->vtbl.load_register_reg32 = iris_load_register_reg32;
- screen->vtbl.load_register_reg64 = iris_load_register_reg64;
- screen->vtbl.load_register_imm32 = iris_load_register_imm32;
- screen->vtbl.load_register_imm64 = iris_load_register_imm64;
- screen->vtbl.load_register_mem32 = iris_load_register_mem32;
- screen->vtbl.load_register_mem64 = iris_load_register_mem64;
- screen->vtbl.store_register_mem32 = iris_store_register_mem32;
- screen->vtbl.store_register_mem64 = iris_store_register_mem64;
- screen->vtbl.store_data_imm32 = iris_store_data_imm32;
- screen->vtbl.store_data_imm64 = iris_store_data_imm64;
- screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
- screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
- screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
- screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
- screen->vtbl.populate_vs_key = iris_populate_vs_key;
- screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
- screen->vtbl.populate_tes_key = iris_populate_tes_key;
- screen->vtbl.populate_gs_key = iris_populate_gs_key;
- screen->vtbl.populate_fs_key = iris_populate_fs_key;
- screen->vtbl.populate_cs_key = iris_populate_cs_key;
- screen->vtbl.lost_genx_state = iris_lost_genx_state;
+
+ ice->vtbl.destroy_state = iris_destroy_state;
+ ice->vtbl.init_render_context = iris_init_render_context;
+ ice->vtbl.init_compute_context = iris_init_compute_context;
+ ice->vtbl.upload_render_state = iris_upload_render_state;
+ ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
+ ice->vtbl.upload_compute_state = iris_upload_compute_state;
+ ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+ ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
+ ice->vtbl.rebind_buffer = iris_rebind_buffer;
+ ice->vtbl.load_register_reg32 = iris_load_register_reg32;
+ ice->vtbl.load_register_reg64 = iris_load_register_reg64;
+ ice->vtbl.load_register_imm32 = iris_load_register_imm32;
+ ice->vtbl.load_register_imm64 = iris_load_register_imm64;
+ ice->vtbl.load_register_mem32 = iris_load_register_mem32;
+ ice->vtbl.load_register_mem64 = iris_load_register_mem64;
+ ice->vtbl.store_register_mem32 = iris_store_register_mem32;
+ ice->vtbl.store_register_mem64 = iris_store_register_mem64;
+ ice->vtbl.store_data_imm32 = iris_store_data_imm32;
+ ice->vtbl.store_data_imm64 = iris_store_data_imm64;
+ ice->vtbl.copy_mem_mem = iris_copy_mem_mem;
+ ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
+ ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
+ ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
+ ice->vtbl.populate_vs_key = iris_populate_vs_key;
+ ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
+ ice->vtbl.populate_tes_key = iris_populate_tes_key;
+ ice->vtbl.populate_gs_key = iris_populate_gs_key;
+ ice->vtbl.populate_fs_key = iris_populate_fs_key;
+ ice->vtbl.populate_cs_key = iris_populate_cs_key;
+ ice->vtbl.lost_genx_state = iris_lost_genx_state;
ice->state.dirty = ~0ull;
diff --git a/lib/mesa/src/gallium/drivers/iris/meson.build b/lib/mesa/src/gallium/drivers/iris/meson.build
index 580391ac1..69db8050e 100644
--- a/lib/mesa/src/gallium/drivers/iris/meson.build
+++ b/lib/mesa/src/gallium/drivers/iris/meson.build
@@ -69,7 +69,7 @@ foreach v : ['80', '90', '100', '110', '120']
iris_gen_libs += static_library(
'iris_gen@0@'.format(v),
['iris_blorp.c', 'iris_query.c', 'iris_state.c', gen_xml_pack],
- include_directories : [inc_include, inc_src, inc_mapi, inc_mesa, inc_gallium, inc_gallium_aux, inc_intel],
+ include_directories : [inc_common, inc_intel],
c_args : [
c_vis_args, no_override_init_args, c_sse2_args,
'-DGEN_VERSIONx10=@0@'.format(v),