summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/iris/iris_state.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2020-08-26 05:30:39 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2020-08-26 05:30:39 +0000
commit27c93456b58343162f7c4ad20ca6bea0c9a91646 (patch)
tree945c20b63e0b9975ee40f114c5312f8d8f1a2d0b /lib/mesa/src/gallium/drivers/iris/iris_state.c
parent875b83a3ee95e248388fbf72271acc80f6f97987 (diff)
Import Mesa 20.1.6
Diffstat (limited to 'lib/mesa/src/gallium/drivers/iris/iris_state.c')
-rw-r--r--lib/mesa/src/gallium/drivers/iris/iris_state.c1862
1 files changed, 1273 insertions, 589 deletions
diff --git a/lib/mesa/src/gallium/drivers/iris/iris_state.c b/lib/mesa/src/gallium/drivers/iris/iris_state.c
index 7d945ecec..a5b825f1b 100644
--- a/lib/mesa/src/gallium/drivers/iris/iris_state.c
+++ b/lib/mesa/src/gallium/drivers/iris/iris_state.c
@@ -90,14 +90,16 @@
#include "pipe/p_screen.h"
#include "util/u_dual_blend.h"
#include "util/u_inlines.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_framebuffer.h"
#include "util/u_transfer.h"
#include "util/u_upload_mgr.h"
#include "util/u_viewport.h"
+#include "util/u_memory.h"
#include "drm-uapi/i915_drm.h"
#include "nir.h"
#include "intel/compiler/brw_compiler.h"
+#include "intel/common/gen_aux_map.h"
#include "intel/common/gen_l3_config.h"
#include "intel/common/gen_sample_positions.h"
#include "iris_batch.h"
@@ -109,20 +111,6 @@
#include "iris_genx_macros.h"
#include "intel/common/gen_guardband.h"
-#if GEN_GEN == 8
-#define MOCS_PTE 0x18
-#define MOCS_WB 0x78
-#else
-#define MOCS_PTE (1 << 1)
-#define MOCS_WB (2 << 1)
-#endif
-
-static uint32_t
-mocs(const struct iris_bo *bo)
-{
- return bo && bo->external ? MOCS_PTE : MOCS_WB;
-}
-
/**
* Statically assert that PIPE_* enums match the hardware packets.
* (As long as they match, we don't need to translate them.)
@@ -353,9 +341,10 @@ stream_state(struct iris_batch *batch,
struct iris_bo *bo = iris_resource_bo(*out_res);
iris_use_pinned_bo(batch, bo, false);
- *out_offset += iris_bo_offset_from_base_address(bo);
+ iris_record_state_size(batch->state_sizes,
+ bo->gtt_offset + *out_offset, size);
- iris_record_state_size(batch->state_sizes, *out_offset, size);
+ *out_offset += iris_bo_offset_from_base_address(bo);
return ptr;
}
@@ -391,8 +380,10 @@ emit_state(struct iris_batch *batch,
(!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
static void
-flush_for_state_base_change(struct iris_batch *batch)
+flush_before_state_base_change(struct iris_batch *batch)
{
+ const struct gen_device_info *devinfo = &batch->screen->devinfo;
+
/* Flush before emitting STATE_BASE_ADDRESS.
*
* This isn't documented anywhere in the PRM. However, it seems to be
@@ -415,10 +406,68 @@ flush_for_state_base_change(struct iris_batch *batch)
* rendering. It's a bit of a big hammer but it appears to work.
*/
iris_emit_end_of_pipe_sync(batch,
- "change STATE_BASE_ADDRESS",
+ "change STATE_BASE_ADDRESS (flushes)",
PIPE_CONTROL_RENDER_TARGET_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
- PIPE_CONTROL_DATA_CACHE_FLUSH);
+ PIPE_CONTROL_DATA_CACHE_FLUSH |
+ /* GEN:BUG:1606662791:
+ *
+ * Software must program PIPE_CONTROL command
+ * with "HDC Pipeline Flush" prior to
+ * programming of the below two non-pipeline
+ * state :
+ * * STATE_BASE_ADDRESS
+ * * 3DSTATE_BINDING_TABLE_POOL_ALLOC
+ */
+ ((GEN_GEN == 12 && devinfo->revision == 0 /* A0 */ ?
+ PIPE_CONTROL_FLUSH_HDC : 0)));
+}
+
+static void
+flush_after_state_base_change(struct iris_batch *batch)
+{
+ /* After re-setting the surface state base address, we have to do some
+ * cache flusing so that the sampler engine will pick up the new
+ * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
+ * Shared Function > 3D Sampler > State > State Caching (page 96):
+ *
+ * Coherency with system memory in the state cache, like the texture
+ * cache is handled partially by software. It is expected that the
+ * command stream or shader will issue Cache Flush operation or
+ * Cache_Flush sampler message to ensure that the L1 cache remains
+ * coherent with system memory.
+ *
+ * [...]
+ *
+ * Whenever the value of the Dynamic_State_Base_Addr,
+ * Surface_State_Base_Addr are altered, the L1 state cache must be
+ * invalidated to ensure the new surface or sampler state is fetched
+ * from system memory.
+ *
+ * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
+ * which, according the PIPE_CONTROL instruction documentation in the
+ * Broadwell PRM:
+ *
+ * Setting this bit is independent of any other bit in this packet.
+ * This bit controls the invalidation of the L1 and L2 state caches
+ * at the top of the pipe i.e. at the parsing time.
+ *
+ * Unfortunately, experimentation seems to indicate that state cache
+ * invalidation through a PIPE_CONTROL does nothing whatsoever in
+ * regards to surface state and binding tables. In stead, it seems that
+ * invalidating the texture cache is what is actually needed.
+ *
+ * XXX: As far as we have been able to determine through
+ * experimentation, shows that flush the texture cache appears to be
+ * sufficient. The theory here is that all of the sampling/rendering
+ * units cache the binding table in the texture cache. However, we have
+ * yet to be able to actually confirm this.
+ */
+ iris_emit_end_of_pipe_sync(batch,
+ "change STATE_BASE_ADDRESS (invalidates)",
+ PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
+ PIPE_CONTROL_CONST_CACHE_INVALIDATE |
+ PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
static void
@@ -441,6 +490,128 @@ _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
}
static void
+iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
+ uint32_t src)
+{
+ _iris_emit_lrr(batch, dst, src);
+}
+
+static void
+iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
+ uint32_t src)
+{
+ _iris_emit_lrr(batch, dst, src);
+ _iris_emit_lrr(batch, dst + 4, src + 4);
+}
+
+static void
+iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
+ uint32_t val)
+{
+ _iris_emit_lri(batch, reg, val);
+}
+
+static void
+iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
+ uint64_t val)
+{
+ _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
+ _iris_emit_lri(batch, reg + 4, val >> 32);
+}
+
+/**
+ * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
+ */
+static void
+iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset)
+{
+ iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
+ lrm.RegisterAddress = reg;
+ lrm.MemoryAddress = ro_bo(bo, offset);
+ }
+}
+
+/**
+ * Load a 64-bit value from a buffer into a MMIO register via
+ * two MI_LOAD_REGISTER_MEM commands.
+ */
+static void
+iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset)
+{
+ iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
+ iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
+}
+
+static void
+iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated)
+{
+ iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
+ srm.RegisterAddress = reg;
+ srm.MemoryAddress = rw_bo(bo, offset);
+ srm.PredicateEnable = predicated;
+ }
+}
+
+static void
+iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
+ struct iris_bo *bo, uint32_t offset,
+ bool predicated)
+{
+ iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
+ iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
+}
+
+static void
+iris_store_data_imm32(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint32_t imm)
+{
+ iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
+ sdi.Address = rw_bo(bo, offset);
+ sdi.ImmediateData = imm;
+ }
+}
+
+static void
+iris_store_data_imm64(struct iris_batch *batch,
+ struct iris_bo *bo, uint32_t offset,
+ uint64_t imm)
+{
+ /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
+ * 2 in genxml but it's actually variable length and we need 5 DWords.
+ */
+ void *map = iris_get_command_space(batch, 4 * 5);
+ _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
+ sdi.DWordLength = 5 - 2;
+ sdi.Address = rw_bo(bo, offset);
+ sdi.ImmediateData = imm;
+ }
+}
+
+static void
+iris_copy_mem_mem(struct iris_batch *batch,
+ struct iris_bo *dst_bo, uint32_t dst_offset,
+ struct iris_bo *src_bo, uint32_t src_offset,
+ unsigned bytes)
+{
+ /* MI_COPY_MEM_MEM operates on DWords. */
+ assert(bytes % 4 == 0);
+ assert(dst_offset % 4 == 0);
+ assert(src_offset % 4 == 0);
+
+ for (unsigned i = 0; i < bytes; i += 4) {
+ iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
+ cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
+ cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
+ }
+ }
+}
+
+static void
emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
{
#if GEN_GEN >= 8 && GEN_GEN < 10
@@ -513,7 +684,8 @@ init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
static void
init_state_base_address(struct iris_batch *batch)
{
- flush_for_state_base_change(batch);
+ uint32_t mocs = batch->screen->isl_dev.mocs.internal;
+ flush_before_state_base_change(batch);
/* We program most base addresses once at context initialization time.
* Each base address points at a 4GB memory zone, and never needs to
@@ -523,11 +695,12 @@ init_state_base_address(struct iris_batch *batch)
* updated occasionally. See iris_binder.c for the details there.
*/
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
- sba.GeneralStateMOCS = MOCS_WB;
- sba.StatelessDataPortAccessMOCS = MOCS_WB;
- sba.DynamicStateMOCS = MOCS_WB;
- sba.IndirectObjectMOCS = MOCS_WB;
- sba.InstructionMOCS = MOCS_WB;
+ sba.GeneralStateMOCS = mocs;
+ sba.StatelessDataPortAccessMOCS = mocs;
+ sba.DynamicStateMOCS = mocs;
+ sba.IndirectObjectMOCS = mocs;
+ sba.InstructionMOCS = mocs;
+ sba.SurfaceStateMOCS = mocs;
sba.GeneralStateBaseAddressModifyEnable = true;
sba.DynamicStateBaseAddressModifyEnable = true;
@@ -537,7 +710,7 @@ init_state_base_address(struct iris_batch *batch)
sba.DynamicStateBufferSizeModifyEnable = true;
#if (GEN_GEN >= 9)
sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
- sba.BindlessSurfaceStateMOCS = MOCS_WB;
+ sba.BindlessSurfaceStateMOCS = mocs;
#endif
sba.IndirectObjectBufferSizeModifyEnable = true;
sba.InstructionBuffersizeModifyEnable = true;
@@ -550,15 +723,28 @@ init_state_base_address(struct iris_batch *batch)
sba.InstructionBufferSize = 0xfffff;
sba.DynamicStateBufferSize = 0xfffff;
}
+
+ flush_after_state_base_change(batch);
}
static void
-iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg,
- bool has_slm, bool wants_dc_cache)
+iris_emit_l3_config(struct iris_batch *batch,
+ const struct gen_l3_config *cfg)
{
uint32_t reg_val;
- iris_pack_state(GENX(L3CNTLREG), &reg_val, reg) {
- reg.SLMEnable = has_slm;
+
+#if GEN_GEN >= 12
+#define L3_ALLOCATION_REG GENX(L3ALLOC)
+#define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
+#else
+#define L3_ALLOCATION_REG GENX(L3CNTLREG)
+#define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
+#endif
+
+ iris_pack_state(L3_ALLOCATION_REG, &reg_val, reg) {
+#if GEN_GEN < 11
+ reg.SLMEnable = cfg->n[GEN_L3P_SLM] > 0;
+#endif
#if GEN_GEN == 11
/* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
* in L3CNTLREG register. The default setting of the bit is not the
@@ -572,23 +758,10 @@ iris_emit_l3_config(struct iris_batch *batch, const struct gen_l3_config *cfg,
reg.DCAllocation = cfg->n[GEN_L3P_DC];
reg.AllAllocation = cfg->n[GEN_L3P_ALL];
}
- iris_emit_lri(batch, L3CNTLREG, reg_val);
+ _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val);
}
-static void
-iris_emit_default_l3_config(struct iris_batch *batch,
- const struct gen_device_info *devinfo,
- bool compute)
-{
- bool wants_dc_cache = true;
- bool has_slm = compute;
- const struct gen_l3_weights w =
- gen_get_default_l3_weights(devinfo, wants_dc_cache, has_slm);
- const struct gen_l3_config *cfg = gen_get_l3_config(devinfo, w);
- iris_emit_l3_config(batch, cfg, has_slm, wants_dc_cache);
-}
-
-#if GEN_GEN == 9 || GEN_GEN == 10
+#if GEN_GEN == 9
static void
iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
{
@@ -687,6 +860,32 @@ iris_upload_slice_hashing_state(struct iris_batch *batch)
}
#endif
+static void
+iris_alloc_push_constants(struct iris_batch *batch)
+{
+ /* For now, we set a static partitioning of the push constant area,
+ * assuming that all stages could be in use.
+ *
+ * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
+ * see if that improves performance by offering more space to
+ * the VS/FS when those aren't in use. Also, try dynamically
+ * enabling/disabling it like i965 does. This would be more
+ * stalls and may not actually help; we don't know yet.
+ */
+ for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
+ iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
+ alloc._3DCommandSubOpcode = 18 + i;
+ alloc.ConstantBufferOffset = 6 * i;
+ alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
+ }
+ }
+}
+
+#if GEN_GEN >= 12
+static void
+init_aux_map_state(struct iris_batch *batch);
+#endif
+
/**
* Upload the initial GPU state for a render context.
*
@@ -694,17 +893,14 @@ iris_upload_slice_hashing_state(struct iris_batch *batch)
* way, but we never actually change.
*/
static void
-iris_init_render_context(struct iris_screen *screen,
- struct iris_batch *batch,
- struct iris_vtable *vtbl,
- struct pipe_debug_callback *dbg)
+iris_init_render_context(struct iris_batch *batch)
{
- UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
uint32_t reg_val;
emit_pipeline_select(batch, _3D);
- iris_emit_default_l3_config(batch, devinfo, false);
+ iris_emit_l3_config(batch, batch->screen->l3_config_3d);
init_state_base_address(batch);
@@ -736,37 +932,39 @@ iris_init_render_context(struct iris_screen *screen,
#endif
#if GEN_GEN == 11
- iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
- reg.HeaderlessMessageforPreemptableContexts = 1;
- reg.HeaderlessMessageforPreemptableContextsMask = 1;
- }
- iris_emit_lri(batch, SAMPLER_MODE, reg_val);
+ iris_pack_state(GENX(TCCNTLREG), &reg_val, reg) {
+ reg.L3DataPartialWriteMergingEnable = true;
+ reg.ColorZPartialWriteMergingEnable = true;
+ reg.URBPartialWriteMergingEnable = true;
+ reg.TCDisable = true;
+ }
+ iris_emit_lri(batch, TCCNTLREG, reg_val);
- /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
- iris_pack_state(GENX(HALF_SLICE_CHICKEN7), &reg_val, reg) {
- reg.EnabledTexelOffsetPrecisionFix = 1;
- reg.EnabledTexelOffsetPrecisionFixMask = 1;
- }
- iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
+ iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
+ reg.HeaderlessMessageforPreemptableContexts = 1;
+ reg.HeaderlessMessageforPreemptableContextsMask = 1;
+ }
+ iris_emit_lri(batch, SAMPLER_MODE, reg_val);
- iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
- reg.StateCacheRedirectToCSSectionEnable = true;
- reg.StateCacheRedirectToCSSectionEnableMask = true;
- }
- iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
+ /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
+ iris_pack_state(GENX(HALF_SLICE_CHICKEN7), &reg_val, reg) {
+ reg.EnabledTexelOffsetPrecisionFix = 1;
+ reg.EnabledTexelOffsetPrecisionFixMask = 1;
+ }
+ iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
- /* Hardware specification recommends disabling repacking for the
- * compatibility with decompression mechanism in display controller.
- */
- if (devinfo->disable_ccs_repack) {
- iris_pack_state(GENX(CACHE_MODE_0), &reg_val, reg) {
- reg.DisableRepackingforCompression = true;
- reg.DisableRepackingforCompressionMask = true;
- }
- iris_emit_lri(batch, CACHE_MODE_0, reg_val);
+ /* Hardware specification recommends disabling repacking for the
+ * compatibility with decompression mechanism in display controller.
+ */
+ if (devinfo->disable_ccs_repack) {
+ iris_pack_state(GENX(CACHE_MODE_0), &reg_val, reg) {
+ reg.DisableRepackingforCompression = true;
+ reg.DisableRepackingforCompressionMask = true;
}
+ iris_emit_lri(batch, CACHE_MODE_0, reg_val);
+ }
- iris_upload_slice_hashing_state(batch);
+ iris_upload_slice_hashing_state(batch);
#endif
/* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
@@ -803,40 +1001,45 @@ iris_init_render_context(struct iris_screen *screen,
/* TODO: may need to set an offset for origin-UL framebuffers */
iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
- /* Set a static partitioning of the push constant area. */
- /* TODO: this may be a bad idea...could starve the push ringbuffers... */
- for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
- iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
- alloc._3DCommandSubOpcode = 18 + i;
- alloc.ConstantBufferOffset = 6 * i;
- alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
- }
- }
+ iris_alloc_push_constants(batch);
-#if GEN_GEN == 10
- /* Gen11+ is enabled for us by the kernel. */
- iris_enable_obj_preemption(batch, true);
+#if GEN_GEN >= 12
+ init_aux_map_state(batch);
#endif
}
static void
-iris_init_compute_context(struct iris_screen *screen,
- struct iris_batch *batch,
- struct iris_vtable *vtbl,
- struct pipe_debug_callback *dbg)
+iris_init_compute_context(struct iris_batch *batch)
{
- UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
+ /* GEN:BUG:1607854226:
+ *
+ * Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
+ */
+#if GEN_GEN == 12
+ emit_pipeline_select(batch, _3D);
+#else
emit_pipeline_select(batch, GPGPU);
+#endif
- iris_emit_default_l3_config(batch, devinfo, true);
+ iris_emit_l3_config(batch, batch->screen->l3_config_cs);
init_state_base_address(batch);
+#if GEN_GEN == 12
+ emit_pipeline_select(batch, GPGPU);
+#endif
+
#if GEN_GEN == 9
if (devinfo->is_geminilake)
init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
#endif
+
+#if GEN_GEN >= 12
+ init_aux_map_state(batch);
+#endif
+
}
struct iris_vertex_buffer_state {
@@ -854,7 +1057,8 @@ struct iris_depth_buffer_state {
uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
GENX(3DSTATE_STENCIL_BUFFER_length) +
GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
- GENX(3DSTATE_CLEAR_PARAMS_length)];
+ GENX(3DSTATE_CLEAR_PARAMS_length) +
+ GENX(MI_LOAD_REGISTER_IMM_length) * 2];
};
/**
@@ -871,6 +1075,10 @@ struct iris_genx_state {
uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
+#if GEN_GEN == 8
+ bool pma_fix_enabled;
+#endif
+
#if GEN_GEN == 9
/* Is object level preemption enabled? */
bool object_preemption;
@@ -1058,6 +1266,9 @@ iris_bind_blend_state(struct pipe_context *ctx, void *state)
ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_BLEND];
+
+ if (GEN_GEN == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
}
/**
@@ -1086,12 +1297,19 @@ struct iris_depth_stencil_alpha_state {
/** Partial 3DSTATE_WM_DEPTH_STENCIL. */
uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
+#if GEN_GEN >= 12
+ uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
+#endif
+
/** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
struct pipe_alpha_state alpha;
/** Outbound to resolve and cache set tracking. */
bool depth_writes_enabled;
bool stencil_writes_enabled;
+
+ /** Outbound to Gen8-9 PMA stall equations */
+ bool depth_test_enabled;
};
/**
@@ -1111,6 +1329,7 @@ iris_create_zsa_state(struct pipe_context *ctx,
cso->alpha = state->alpha;
cso->depth_writes_enabled = state->depth.writemask;
+ cso->depth_test_enabled = state->depth.enabled;
cso->stencil_writes_enabled =
state->stencil[0].writemask != 0 ||
(two_sided_stencil && state->stencil[1].writemask != 0);
@@ -1144,6 +1363,16 @@ iris_create_zsa_state(struct pipe_context *ctx,
/* wmds.[Backface]StencilReferenceValue are merged later */
}
+#if GEN_GEN >= 12
+ iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
+ depth_bounds.DepthBoundsTestValueModifyDisable = false;
+ depth_bounds.DepthBoundsTestEnableModifyDisable = false;
+ depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test;
+ depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min;
+ depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max;
+ }
+#endif
+
return cso;
}
@@ -1174,12 +1403,192 @@ iris_bind_zsa_state(struct pipe_context *ctx, void *state)
ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
+
+#if GEN_GEN >= 12
+ if (cso_changed(depth_bounds))
+ ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
+#endif
}
ice->state.cso_zsa = new_cso;
ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
+
+ if (GEN_GEN == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
+}
+
+#if GEN_GEN == 8
+static bool
+want_pma_fix(struct iris_context *ice)
+{
+ UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
+ UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
+ const struct brw_wm_prog_data *wm_prog_data = (void *)
+ ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
+ const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
+ const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
+ const struct iris_blend_state *cso_blend = ice->state.cso_blend;
+
+ /* In very specific combinations of state, we can instruct Gen8-9 hardware
+ * to avoid stalling at the pixel mask array. The state equations are
+ * documented in these places:
+ *
+ * - Gen8 Depth PMA Fix: CACHE_MODE_1::NP_PMA_FIX_ENABLE
+ * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
+ *
+ * Both equations share some common elements:
+ *
+ * no_hiz_op =
+ * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
+ * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
+ * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
+ * 3DSTATE_WM_HZ_OP::StencilBufferClear) &&
+ *
+ * killpixels =
+ * 3DSTATE_WM::ForceKillPix != ForceOff &&
+ * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
+ * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
+ * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
+ * 3DSTATE_PS_BLEND::AlphaTestEnable ||
+ * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
+ *
+ * (Technically the stencil PMA treats ForceKillPix differently,
+ * but I think this is a documentation oversight, and we don't
+ * ever use it in this way, so it doesn't matter).
+ *
+ * common_pma_fix =
+ * 3DSTATE_WM::ForceThreadDispatch != 1 &&
+ * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
+ * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
+ * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
+ * 3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
+ * 3DSTATE_PS_EXTRA::PixelShaderValid &&
+ * no_hiz_op
+ *
+ * These are always true:
+ *
+ * 3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
+ * 3DSTATE_PS_EXTRA::PixelShaderValid
+ *
+ * Also, we never use the normal drawing path for HiZ ops; these are true:
+ *
+ * !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
+ * 3DSTATE_WM_HZ_OP::DepthBufferResolve ||
+ * 3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
+ * 3DSTATE_WM_HZ_OP::StencilBufferClear)
+ *
+ * This happens sometimes:
+ *
+ * 3DSTATE_WM::ForceThreadDispatch != 1
+ *
+ * However, we choose to ignore it as it either agrees with the signal
+ * (dispatch was already enabled, so nothing out of the ordinary), or
+ * there are no framebuffer attachments (so no depth or HiZ anyway,
+ * meaning the PMA signal will already be disabled).
+ */
+
+ if (!cso_fb->zsbuf)
+ return false;
+
+ struct iris_resource *zres, *sres;
+ iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
+
+ /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
+ * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
+ */
+ if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
+ return false;
+
+ /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
+ if (wm_prog_data->early_fragment_tests)
+ return false;
+
+ /* 3DSTATE_WM::ForceKillPix != ForceOff &&
+ * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
+ * 3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
+ * 3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
+ * 3DSTATE_PS_BLEND::AlphaTestEnable ||
+ * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
+ */
+ bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
+ cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled;
+
+ /* The Gen8 depth PMA equation becomes:
+ *
+ * depth_writes =
+ * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
+ * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
+ *
+ * stencil_writes =
+ * 3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
+ * 3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
+ * 3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
+ *
+ * Z_PMA_OPT =
+ * common_pma_fix &&
+ * 3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
+ * ((killpixels && (depth_writes || stencil_writes)) ||
+ * 3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
+ *
+ */
+ if (!cso_zsa->depth_test_enabled)
+ return false;
+
+ return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
+ (killpixels && (cso_zsa->depth_writes_enabled ||
+ (sres && cso_zsa->stencil_writes_enabled)));
+}
+#endif
+
+void
+genX(update_pma_fix)(struct iris_context *ice,
+ struct iris_batch *batch,
+ bool enable)
+{
+#if GEN_GEN == 8
+ struct iris_genx_state *genx = ice->state.genx;
+
+ if (genx->pma_fix_enabled == enable)
+ return;
+
+ genx->pma_fix_enabled = enable;
+
+ /* According to the Broadwell PIPE_CONTROL documentation, software should
+ * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
+ * prior to the LRI. If stencil buffer writes are enabled, then a Render * Cache Flush is also necessary.
+ *
+ * The Gen9 docs say to use a depth stall rather than a command streamer
+ * stall. However, the hardware seems to violently disagree. A full
+ * command streamer stall seems to be needed in both cases.
+ */
+ iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
+
+ uint32_t reg_val;
+ iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
+ reg.NPPMAFixEnable = enable;
+ reg.NPEarlyZFailsDisable = enable;
+ reg.NPPMAFixEnableMask = true;
+ reg.NPEarlyZFailsDisableMask = true;
+ }
+ iris_emit_lri(batch, CACHE_MODE_1, reg_val);
+
+ /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
+ * Flush bits is often necessary. We do it regardless because it's easier.
+ * The render cache flush is also necessary if stencil writes are enabled.
+ *
+ * Again, the Gen9 docs give a different set of flushes but the Broadwell
+ * flushes seem to work just as well.
+ */
+ iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_RENDER_TARGET_FLUSH);
+#endif
}
/**
@@ -1366,9 +1775,11 @@ iris_create_rasterizer_state(struct pipe_context *ctx,
const unsigned line_stipple_factor = state->line_stipple_factor + 1;
iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
- line.LineStipplePattern = state->line_stipple_pattern;
- line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
- line.LineStippleRepeatCount = line_stipple_factor;
+ if (state->line_stipple_enable) {
+ line.LineStipplePattern = state->line_stipple_pattern;
+ line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
+ line.LineStippleRepeatCount = line_stipple_factor;
+ }
}
return cso;
@@ -1551,11 +1962,17 @@ iris_bind_sampler_states(struct pipe_context *ctx,
assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
+ bool dirty = false;
+
for (int i = 0; i < count; i++) {
- shs->samplers[start + i] = states[i];
+ if (shs->samplers[start + i] != states[i]) {
+ shs->samplers[start + i] = states[i];
+ dirty = true;
+ }
}
- ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
+ if (dirty)
+ ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
}
/**
@@ -1589,10 +2006,12 @@ iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
return;
struct pipe_resource *res = shs->sampler_table.res;
- shs->sampler_table.offset +=
- iris_bo_offset_from_base_address(iris_resource_bo(res));
+ struct iris_bo *bo = iris_resource_bo(res);
+
+ iris_record_state_size(ice->state.sizes,
+ bo->gtt_offset + shs->sampler_table.offset, size);
- iris_record_state_size(ice->state.sizes, shs->sampler_table.offset, size);
+ shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
/* Make sure all land in the same BO */
iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
@@ -1707,18 +2126,18 @@ fill_buffer_surface_state(struct isl_device *isl_dev,
.format = format,
.swizzle = swizzle,
.stride_B = cpp,
- .mocs = mocs(res->bo));
+ .mocs = iris_mocs(res->bo, isl_dev));
}
#define SURFACE_STATE_ALIGNMENT 64
/**
* Allocate several contiguous SURFACE_STATE structures, one for each
- * supported auxiliary surface mode.
+ * supported auxiliary surface mode. This only allocates the CPU-side
+ * copy, they will need to be uploaded later after they're filled in.
*/
-static void *
-alloc_surface_states(struct u_upload_mgr *mgr,
- struct iris_state_ref *ref,
+static void
+alloc_surface_states(struct iris_surface_state *surf_state,
unsigned aux_usages)
{
const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
@@ -1728,13 +2147,68 @@ alloc_surface_states(struct u_upload_mgr *mgr,
assert(aux_usages != 0);
+ /* In case we're re-allocating them... */
+ free(surf_state->cpu);
+
+ surf_state->num_states = util_bitcount(aux_usages);
+ surf_state->cpu = calloc(surf_state->num_states, surf_size);
+ surf_state->ref.offset = 0;
+ pipe_resource_reference(&surf_state->ref.res, NULL);
+
+ assert(surf_state->cpu);
+}
+
+/**
+ * Upload the CPU side SURFACE_STATEs into a GPU buffer.
+ */
+static void
+upload_surface_states(struct u_upload_mgr *mgr,
+ struct iris_surface_state *surf_state)
+{
+ const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
+ const unsigned bytes = surf_state->num_states * surf_size;
+
void *map =
- upload_state(mgr, ref, util_bitcount(aux_usages) * surf_size,
- SURFACE_STATE_ALIGNMENT);
+ upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
- ref->offset += iris_bo_offset_from_base_address(iris_resource_bo(ref->res));
+ surf_state->ref.offset +=
+ iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
- return map;
+ if (map)
+ memcpy(map, surf_state->cpu, bytes);
+}
+
+/**
+ * Update resource addresses in a set of SURFACE_STATE descriptors,
+ * and re-upload them if necessary.
+ */
+static bool
+update_surface_state_addrs(struct u_upload_mgr *mgr,
+ struct iris_surface_state *surf_state,
+ struct iris_bo *bo)
+{
+ if (surf_state->bo_address == bo->gtt_offset)
+ return false;
+
+ STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
+ STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
+
+ uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
+
+ /* First, update the CPU copies. We assume no other fields exist in
+ * the QWord containing Surface Base Address.
+ */
+ for (unsigned i = 0; i < surf_state->num_states; i++) {
+ *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
+ ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
+ }
+
+ /* Next, upload the updated copies to a GPU buffer. */
+ upload_surface_states(mgr, surf_state);
+
+ surf_state->bo_address = bo->gtt_offset;
+
+ return true;
}
#if GEN_GEN == 8
@@ -1750,11 +2224,11 @@ get_rt_read_isl_surf(const struct gen_device_info *devinfo,
struct iris_resource *res,
enum pipe_texture_target target,
struct isl_view *view,
+ uint32_t *offset_to_tile,
uint32_t *tile_x_sa,
uint32_t *tile_y_sa,
struct isl_surf *surf)
{
-
*surf = res->surf;
const enum isl_dim_layout dim_layout =
@@ -1777,9 +2251,9 @@ get_rt_read_isl_surf(const struct gen_device_info *devinfo,
assert(view->levels == 1 && view->array_len == 1);
assert(*tile_x_sa == 0 && *tile_y_sa == 0);
- res->offset += iris_resource_get_tile_offsets(res, view->base_level,
- view->base_array_layer,
- tile_x_sa, tile_y_sa);
+ *offset_to_tile = iris_resource_get_tile_offsets(res, view->base_level,
+ view->base_array_layer,
+ tile_x_sa, tile_y_sa);
const unsigned l = view->base_level;
surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
@@ -1804,14 +2278,15 @@ fill_surface_state(struct isl_device *isl_dev,
struct isl_surf *surf,
struct isl_view *view,
unsigned aux_usage,
+ uint32_t extra_main_offset,
uint32_t tile_x_sa,
uint32_t tile_y_sa)
{
struct isl_surf_fill_state_info f = {
.surf = surf,
.view = view,
- .mocs = mocs(res->bo),
- .address = res->bo->gtt_offset + res->offset,
+ .mocs = iris_mocs(res->bo, isl_dev),
+ .address = res->bo->gtt_offset + res->offset + extra_main_offset,
.x_offset_sa = tile_x_sa,
.y_offset_sa = tile_y_sa,
};
@@ -1871,11 +2346,9 @@ iris_create_sampler_view(struct pipe_context *ctx,
isv->res = (struct iris_resource *) tex;
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &isv->surface_state,
- isv->res->aux.sampler_usages);
- if (!unlikely(map))
- return NULL;
+ alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
+
+ isv->surface_state.bo_address = isv->res->bo->gtt_offset;
isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
@@ -1899,6 +2372,8 @@ iris_create_sampler_view(struct pipe_context *ctx,
.usage = usage,
};
+ void *map = isv->surface_state.cpu;
+
/* Fill out SURFACE_STATE for this view. */
if (tmpl->target != PIPE_BUFFER) {
isv->view.base_level = tmpl->u.tex.first_level;
@@ -1919,7 +2394,7 @@ iris_create_sampler_view(struct pipe_context *ctx,
* surface state with HiZ.
*/
fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
- &isv->view, aux_usage, 0, 0);
+ &isv->view, aux_usage, 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
}
@@ -1929,6 +2404,8 @@ iris_create_sampler_view(struct pipe_context *ctx,
tmpl->u.buf.offset, tmpl->u.buf.size);
}
+ upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
+
return &isv->base;
}
@@ -1938,7 +2415,8 @@ iris_sampler_view_destroy(struct pipe_context *ctx,
{
struct iris_sampler_view *isv = (void *) state;
pipe_resource_reference(&state->texture, NULL);
- pipe_resource_reference(&isv->surface_state.res, NULL);
+ pipe_resource_reference(&isv->surface_state.ref.res, NULL);
+ free(isv->surface_state.cpu);
free(isv);
}
@@ -2034,56 +2512,49 @@ iris_create_surface(struct pipe_context *ctx,
return psurf;
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &surf->surface_state,
- res->aux.possible_usages);
- if (!unlikely(map)) {
- pipe_resource_reference(&surf->surface_state.res, NULL);
- return NULL;
- }
+ alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
+ surf->surface_state.bo_address = res->bo->gtt_offset;
#if GEN_GEN == 8
- void *map_read = alloc_surface_states(ice->state.surface_uploader,
- &surf->surface_state_read,
- res->aux.possible_usages);
- if (!unlikely(map_read)) {
- pipe_resource_reference(&surf->surface_state_read.res, NULL);
- return NULL;
- }
+ alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
+ surf->surface_state_read.bo_address = res->bo->gtt_offset;
#endif
if (!isl_format_is_compressed(res->surf.format)) {
if (iris_resource_unfinished_aux_import(res))
iris_resource_finish_aux_import(&screen->base, res);
+ void *map = surf->surface_state.cpu;
+ UNUSED void *map_read = surf->surface_state_read.cpu;
+
/* This is a normal surface. Fill out a SURFACE_STATE for each possible
* auxiliary surface mode and return the pipe_surface.
*/
unsigned aux_modes = res->aux.possible_usages;
while (aux_modes) {
-#if GEN_GEN == 8
- uint32_t offset = res->offset;
-#endif
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
fill_surface_state(&screen->isl_dev, map, res, &res->surf,
- view, aux_usage, 0, 0);
+ view, aux_usage, 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
#if GEN_GEN == 8
struct isl_surf surf;
- uint32_t tile_x_sa = 0, tile_y_sa = 0;
+ uint32_t offset_to_tile = 0, tile_x_sa = 0, tile_y_sa = 0;
get_rt_read_isl_surf(devinfo, res, target, read_view,
- &tile_x_sa, &tile_y_sa, &surf);
+ &offset_to_tile, &tile_x_sa, &tile_y_sa, &surf);
fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view,
- aux_usage, tile_x_sa, tile_y_sa);
- /* Restore offset because we change offset in case of handling
- * non_coherent fb fetch
- */
- res->offset = offset;
+ aux_usage, offset_to_tile, tile_x_sa, tile_y_sa);
map_read += SURFACE_STATE_ALIGNMENT;
#endif
}
+ upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
+
+#if GEN_GEN == 8
+ upload_surface_states(ice->state.surface_uploader,
+ &surf->surface_state_read);
+#endif
+
return psurf;
}
@@ -2156,13 +2627,16 @@ iris_create_surface(struct pipe_context *ctx,
struct isl_surf_fill_state_info f = {
.surf = &isl_surf,
.view = view,
- .mocs = mocs(res->bo),
+ .mocs = iris_mocs(res->bo, &screen->isl_dev),
.address = res->bo->gtt_offset + offset_B,
.x_offset_sa = tile_x_sa,
.y_offset_sa = tile_y_sa,
};
- isl_surf_fill_state_s(&screen->isl_dev, map, &f);
+ isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
+
+ upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
+
return psurf;
}
@@ -2207,7 +2681,6 @@ iris_set_shader_images(struct pipe_context *ctx,
{
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
gl_shader_stage stage = stage_from_pipe(p_stage);
struct iris_shader_state *shs = &ice->state.shaders[stage];
#if GEN_GEN == 8
@@ -2224,37 +2697,23 @@ iris_set_shader_images(struct pipe_context *ctx,
const struct pipe_image_view *img = &p_images[i];
struct iris_resource *res = (void *) img->resource;
- void *map =
- alloc_surface_states(ice->state.surface_uploader,
- &iv->surface_state, 1 << ISL_AUX_USAGE_NONE);
- if (!unlikely(map))
- return;
-
util_copy_image_view(&iv->base, img);
shs->bound_image_views |= 1 << (start_slot + i);
res->bind_history |= PIPE_BIND_SHADER_IMAGE;
+ res->bind_stages |= 1 << stage;
- isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
- enum isl_format isl_fmt =
- iris_format_for_usage(devinfo, img->format, usage).fmt;
+ enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
- bool untyped_fallback = false;
+ /* Render compression with images supported on gen12+ only. */
+ unsigned aux_usages = GEN_GEN >= 12 ? res->aux.possible_usages :
+ 1 << ISL_AUX_USAGE_NONE;
- if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
- /* On Gen8, try to use typed surfaces reads (which support a
- * limited number of formats), and if not possible, fall back
- * to untyped reads.
- */
- untyped_fallback = GEN_GEN == 8 &&
- !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt);
+ alloc_surface_states(&iv->surface_state, aux_usages);
+ iv->surface_state.bo_address = res->bo->gtt_offset;
- if (untyped_fallback)
- isl_fmt = ISL_FORMAT_RAW;
- else
- isl_fmt = isl_lower_storage_image_format(devinfo, isl_fmt);
- }
+ void *map = iv->surface_state.cpu;
if (res->base.target != PIPE_BUFFER) {
struct isl_view view = {
@@ -2264,21 +2723,21 @@ iris_set_shader_images(struct pipe_context *ctx,
.base_array_layer = img->u.tex.first_layer,
.array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
.swizzle = ISL_SWIZZLE_IDENTITY,
- .usage = usage,
+ .usage = ISL_SURF_USAGE_STORAGE_BIT,
};
- if (untyped_fallback) {
+ /* If using untyped fallback. */
+ if (isl_fmt == ISL_FORMAT_RAW) {
fill_buffer_surface_state(&screen->isl_dev, res, map,
isl_fmt, ISL_SWIZZLE_IDENTITY,
0, res->bo->size);
} else {
- /* Images don't support compression */
- unsigned aux_modes = 1 << ISL_AUX_USAGE_NONE;
+ unsigned aux_modes = aux_usages;
while (aux_modes) {
enum isl_aux_usage usage = u_bit_scan(&aux_modes);
fill_surface_state(&screen->isl_dev, map, res, &res->surf,
- &view, usage, 0, 0);
+ &view, usage, 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
}
@@ -2288,7 +2747,7 @@ iris_set_shader_images(struct pipe_context *ctx,
&image_params[start_slot + i],
&res->surf, &view);
} else {
- util_range_add(&res->valid_buffer_range, img->u.buf.offset,
+ util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset,
img->u.buf.offset + img->u.buf.size);
fill_buffer_surface_state(&screen->isl_dev, res, map,
@@ -2297,9 +2756,11 @@ iris_set_shader_images(struct pipe_context *ctx,
fill_buffer_image_param(&image_params[start_slot + i],
img->format, img->u.buf.size);
}
+
+ upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
} else {
pipe_resource_reference(&iv->base.resource, NULL);
- pipe_resource_reference(&iv->surface_state.res, NULL);
+ pipe_resource_reference(&iv->surface_state.ref.res, NULL);
fill_default_image_param(&image_params[start_slot + i]);
}
}
@@ -2339,7 +2800,12 @@ iris_set_sampler_views(struct pipe_context *ctx,
struct iris_sampler_view *view = (void *) pview;
if (view) {
view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
+ view->res->bind_stages |= 1 << stage;
+
shs->bound_sampler_views |= 1 << (start + i);
+
+ update_surface_state_addrs(ice->state.surface_uploader,
+ &view->surface_state, view->res->bo);
}
}
@@ -2372,8 +2838,9 @@ iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
{
struct iris_surface *surf = (void *) p_surf;
pipe_resource_reference(&p_surf->texture, NULL);
- pipe_resource_reference(&surf->surface_state.res, NULL);
- pipe_resource_reference(&surf->surface_state_read.res, NULL);
+ pipe_resource_reference(&surf->surface_state.ref.res, NULL);
+ pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
+ free(surf->surface_state.cpu);
free(surf);
}
@@ -2579,12 +3046,12 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
info.depth_surf = &zres->surf;
info.depth_address = zres->bo->gtt_offset + zres->offset;
- info.mocs = mocs(zres->bo);
+ info.mocs = iris_mocs(zres->bo, isl_dev);
view.format = zres->surf.format;
if (iris_resource_level_has_hiz(zres, view.base_level)) {
- info.hiz_usage = ISL_AUX_USAGE_HIZ;
+ info.hiz_usage = zres->aux.usage;
info.hiz_surf = &zres->aux.surf;
info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
}
@@ -2592,11 +3059,12 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
if (stencil_res) {
view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
+ info.stencil_aux_usage = stencil_res->aux.usage;
info.stencil_surf = &stencil_res->surf;
info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
if (!zres) {
view.format = stencil_res->surf.format;
- info.mocs = mocs(stencil_res->bo);
+ info.mocs = iris_mocs(stencil_res->bo, isl_dev);
}
}
}
@@ -2617,28 +3085,14 @@ iris_set_framebuffer_state(struct pipe_context *ctx,
/* Render target change */
ice->state.dirty |= IRIS_DIRTY_BINDINGS_FS;
+ ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
+
ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
ice->state.dirty |= ice->state.dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
-#if GEN_GEN == 11
- // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
- // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
-
- /* The PIPE_CONTROL command description says:
- *
- * "Whenever a Binding Table Index (BTI) used by a Render Target Message
- * points to a different RENDER_SURFACE_STATE, SW must issue a Render
- * Target Cache Flush by enabling this bit. When render target flush
- * is set due to new association of BTI, PS Scoreboard Stall bit must
- * be set in this packet."
- */
- // XXX: does this need to happen at 3DSTATE_BTP_PS time?
- iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
- "workaround: RT BTI change [draw]",
- PIPE_CONTROL_RENDER_TARGET_FLUSH |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
-#endif
+ if (GEN_GEN == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
}
/**
@@ -2657,6 +3111,9 @@ iris_set_constant_buffer(struct pipe_context *ctx,
struct iris_shader_state *shs = &ice->state.shaders[stage];
struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
+ /* TODO: Only do this if the buffer changes? */
+ pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
+
if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
shs->bound_cbufs |= 1u << index;
@@ -2686,21 +3143,13 @@ iris_set_constant_buffer(struct pipe_context *ctx,
struct iris_resource *res = (void *) cbuf->buffer;
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
-
- iris_upload_ubo_ssbo_surf_state(ice, cbuf,
- &shs->constbuf_surf_state[index],
- false);
+ res->bind_stages |= 1 << stage;
} else {
shs->bound_cbufs &= ~(1u << index);
pipe_resource_reference(&cbuf->buffer, NULL);
- pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
}
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
- // XXX: maybe not necessary all the time...?
- // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
- // XXX: pull model we may need actual new bindings...
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
}
static void
@@ -2818,8 +3267,9 @@ iris_set_shader_buffers(struct pipe_context *ctx,
iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, true);
res->bind_history |= PIPE_BIND_SHADER_BUFFER;
+ res->bind_stages |= 1 << stage;
- util_range_add(&res->valid_buffer_range, ssbo->buffer_offset,
+ util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
ssbo->buffer_offset + ssbo->buffer_size);
} else {
pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
@@ -2848,6 +3298,7 @@ iris_set_vertex_buffers(struct pipe_context *ctx,
const struct pipe_vertex_buffer *buffers)
{
struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
struct iris_genx_state *genx = ice->state.genx;
ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
@@ -2880,10 +3331,10 @@ iris_set_vertex_buffers(struct pipe_context *ctx,
vb.AddressModifyEnable = true;
vb.BufferPitch = buffer->stride;
if (res) {
- vb.BufferSize = res->bo->size - (int) buffer->buffer_offset;
+ vb.BufferSize = res->base.width0 - (int) buffer->buffer_offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
- vb.MOCS = mocs(res->bo);
+ vb.MOCS = iris_mocs(res->bo, &screen->isl_dev);
} else {
vb.NullVertexBuffer = true;
}
@@ -3063,7 +3514,7 @@ iris_create_stream_output_target(struct pipe_context *ctx,
cso->base.buffer_size = buffer_size;
cso->base.context = ctx;
- util_range_add(&res->valid_buffer_range, buffer_offset,
+ util_range_add(&res->base, &res->valid_buffer_range, buffer_offset,
buffer_offset + buffer_size);
upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
@@ -3099,6 +3550,7 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
struct iris_context *ice = (struct iris_context *) ctx;
struct iris_genx_state *genx = ice->state.genx;
uint32_t *so_buffers = genx->so_buffers;
+ struct iris_screen *screen = (struct iris_screen *)ctx->screen;
const bool active = num_targets > 0;
if (ice->state.streamout_active != active) {
@@ -3145,8 +3597,14 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
unsigned offset = offsets[i];
if (!tgt) {
- iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob)
+ iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
+#if GEN_GEN < 12
sob.SOBufferIndex = i;
+#else
+ sob._3DCommandOpcode = 0;
+ sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
+#endif
+ }
continue;
}
@@ -3167,16 +3625,20 @@ iris_set_stream_output_targets(struct pipe_context *ctx,
offset = 0;
iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
+#if GEN_GEN < 12
+ sob.SOBufferIndex = i;
+#else
+ sob._3DCommandOpcode = 0;
+ sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
+#endif
sob.SurfaceBaseAddress =
rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset);
sob.SOBufferEnable = true;
sob.StreamOffsetWriteEnable = true;
sob.StreamOutputBufferOffsetAddressEnable = true;
- sob.MOCS = mocs(res->bo);
+ sob.MOCS = iris_mocs(res->bo, &screen->isl_dev);
sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
-
- sob.SOBufferIndex = i;
sob.StreamOffset = offset;
sob.StreamOutputBufferOffsetAddress =
rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
@@ -3396,7 +3858,8 @@ iris_emit_sbe_swiz(struct iris_batch *batch,
/* XXX: this should be generated when putting programs in place */
- for (int fs_attr = 0; fs_attr < VARYING_SLOT_MAX; fs_attr++) {
+ for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
+ const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
const int input_index = wm_prog_data->urb_setup[fs_attr];
if (input_index < 0 || input_index >= 16)
continue;
@@ -3548,14 +4011,14 @@ static void
iris_populate_vs_key(const struct iris_context *ice,
const struct shader_info *info,
gl_shader_stage last_stage,
- struct brw_vs_prog_key *key)
+ struct iris_vs_prog_key *key)
{
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
if (info->clip_distance_array_size == 0 &&
(info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
last_stage == MESA_SHADER_VERTEX)
- key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
+ key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
@@ -3563,7 +4026,7 @@ iris_populate_vs_key(const struct iris_context *ice,
*/
static void
iris_populate_tcs_key(const struct iris_context *ice,
- struct brw_tcs_prog_key *key)
+ struct iris_tcs_prog_key *key)
{
}
@@ -3574,14 +4037,14 @@ static void
iris_populate_tes_key(const struct iris_context *ice,
const struct shader_info *info,
gl_shader_stage last_stage,
- struct brw_tes_prog_key *key)
+ struct iris_tes_prog_key *key)
{
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
if (info->clip_distance_array_size == 0 &&
(info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
last_stage == MESA_SHADER_TESS_EVAL)
- key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
+ key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
@@ -3591,14 +4054,14 @@ static void
iris_populate_gs_key(const struct iris_context *ice,
const struct shader_info *info,
gl_shader_stage last_stage,
- struct brw_gs_prog_key *key)
+ struct iris_gs_prog_key *key)
{
const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
if (info->clip_distance_array_size == 0 &&
(info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
last_stage == MESA_SHADER_GEOMETRY)
- key->nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
+ key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
}
/**
@@ -3607,7 +4070,7 @@ iris_populate_gs_key(const struct iris_context *ice,
static void
iris_populate_fs_key(const struct iris_context *ice,
const struct shader_info *info,
- struct brw_wm_prog_key *key)
+ struct iris_fs_prog_key *key)
{
struct iris_screen *screen = (void *) ice->ctx.screen;
const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
@@ -3640,7 +4103,7 @@ iris_populate_fs_key(const struct iris_context *ice,
static void
iris_populate_cs_key(const struct iris_context *ice,
- struct brw_cs_prog_key *key)
+ struct iris_cs_prog_key *key)
{
}
@@ -3651,17 +4114,9 @@ KSP(const struct iris_compiled_shader *shader)
return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
}
-/* Gen11 workaround table #2056 WABTPPrefetchDisable suggests to disable
- * prefetching of binding tables in A0 and B0 steppings. XXX: Revisit
- * this WA on C0 stepping.
- *
- * TODO: Fill out SamplerCount for prefetching?
- */
-
#define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage) \
pkt.KernelStartPointer = KSP(shader); \
- pkt.BindingTableEntryCount = GEN_GEN == 11 ? 0 : \
- shader->bt.size_bytes / 4; \
+ pkt.BindingTableEntryCount = shader->bt.size_bytes / 4; \
pkt.FloatingPointMode = prog_data->use_alt_mode; \
\
pkt.DispatchGRFStartRegisterForURBData = \
@@ -3715,10 +4170,29 @@ iris_store_tcs_state(struct iris_context *ice,
iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
+#if GEN_GEN >= 12
+ /* GEN:BUG:1604578095:
+ *
+ * Hang occurs when the number of max threads is less than 2 times
+ * the number of instance count. The number of max threads must be
+ * more than 2 times the number of instance count.
+ */
+ assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
+ hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
+ hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
+#endif
+
hs.InstanceCount = tcs_prog_data->instances - 1;
hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
hs.IncludeVertexHandles = true;
+#if GEN_GEN == 12
+ /* Patch Count threshold specifies the maximum number of patches that
+ * will be accumulated before a thread dispatch is forced.
+ */
+ hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
+#endif
+
#if GEN_GEN >= 9
hs.DispatchMode = vue_prog_data->dispatch_mode;
hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
@@ -3828,9 +4302,7 @@ iris_store_fs_state(struct iris_context *ice,
iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
ps.VectorMaskEnable = true;
- // XXX: WABTPPrefetchDisable, see above, drop at C0
- ps.BindingTableEntryCount = GEN_GEN == 11 ? 0 :
- shader->bt.size_bytes / 4;
+ ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
ps.FloatingPointMode = prog_data->use_alt_mode;
ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
@@ -3896,12 +4368,23 @@ iris_store_cs_state(struct iris_context *ice,
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
desc.KernelStartPointer = KSP(shader);
desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
- desc.NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads;
desc.SharedLocalMemorySize =
encode_slm_size(GEN_GEN, prog_data->total_shared);
desc.BarrierEnable = cs_prog_data->uses_barrier;
desc.CrossThreadConstantDataReadLength =
cs_prog_data->push.cross_thread.regs;
+#if GEN_GEN >= 12
+ /* TODO: Check if we are missing workarounds and enable mid-thread
+ * preemption.
+ *
+ * We still have issues with mid-thread preemption (it was already
+ * disabled by the kernel on gen11, due to missing workarounds). It's
+ * possible that we are just missing some workarounds, and could enable
+ * it later, but for now let's disable it to fix a GPU in compute in Car
+ * Chase (and possibly more).
+ */
+ desc.ThreadPreemptionDisable = true;
+#endif
}
}
@@ -4007,6 +4490,7 @@ surf_state_offset_for_aux(struct iris_resource *res,
util_bitcount(aux_modes & ((1 << aux_usage) - 1));
}
+#if GEN_GEN == 9
static void
surf_state_update_clear_value(struct iris_batch *batch,
struct iris_resource *res,
@@ -4047,12 +4531,13 @@ surf_state_update_clear_value(struct iris_batch *batch,
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_STATE_CACHE_INVALIDATE);
}
+#endif
static void
update_clear_value(struct iris_context *ice,
struct iris_batch *batch,
struct iris_resource *res,
- struct iris_state_ref *state,
+ struct iris_surface_state *surf_state,
unsigned all_aux_modes,
struct isl_view *view)
{
@@ -4069,19 +4554,23 @@ update_clear_value(struct iris_context *ice,
while (aux_modes) {
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
- surf_state_update_clear_value(batch, res, state, all_aux_modes,
- aux_usage);
+ surf_state_update_clear_value(batch, res, &surf_state->ref,
+ all_aux_modes, aux_usage);
}
#elif GEN_GEN == 8
- pipe_resource_reference(&state->res, NULL);
+ /* TODO: Could update rather than re-filling */
+ alloc_surface_states(surf_state, all_aux_modes);
+
+ void *map = surf_state->cpu;
- void *map = alloc_surface_states(ice->state.surface_uploader,
- state, all_aux_modes);
while (aux_modes) {
enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
- fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage, 0, 0);
+ fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
+ 0, 0, 0);
map += SURFACE_STATE_ALIGNMENT;
}
+
+ upload_surface_states(ice->state.surface_uploader, surf_state);
#endif
}
@@ -4105,9 +4594,9 @@ use_surface(struct iris_context *ice,
iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture), writeable);
if (GEN_GEN == 8 && is_read_surface) {
- iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false);
} else {
- iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false);
}
if (res->aux.bo) {
@@ -4127,8 +4616,9 @@ use_surface(struct iris_context *ice,
}
}
- offset = (GEN_GEN == 8 && is_read_surface) ? surf->surface_state_read.offset
- : surf->surface_state.offset;
+ offset = (GEN_GEN == 8 && is_read_surface)
+ ? surf->surface_state_read.ref.offset
+ : surf->surface_state.ref.offset;
return offset +
surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
@@ -4139,12 +4629,11 @@ use_sampler_view(struct iris_context *ice,
struct iris_batch *batch,
struct iris_sampler_view *isv)
{
- // XXX: ASTC hacks
enum isl_aux_usage aux_usage =
- iris_resource_texture_aux_usage(ice, isv->res, isv->view.format, 0);
+ iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
iris_use_pinned_bo(batch, isv->res->bo, false);
- iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false);
if (isv->res->aux.bo) {
iris_use_pinned_bo(batch, isv->res->aux.bo, false);
@@ -4158,7 +4647,7 @@ use_sampler_view(struct iris_context *ice,
}
}
- return isv->surface_state.offset +
+ return isv->surface_state.ref.offset +
surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
aux_usage);
}
@@ -4170,7 +4659,7 @@ use_ubo_ssbo(struct iris_batch *batch,
struct iris_state_ref *surf_state,
bool writable)
{
- if (!buf->buffer)
+ if (!buf->buffer || !surf_state->res)
return use_null_surface(batch, ice);
iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable);
@@ -4181,7 +4670,8 @@ use_ubo_ssbo(struct iris_batch *batch,
static uint32_t
use_image(struct iris_batch *batch, struct iris_context *ice,
- struct iris_shader_state *shs, int i)
+ struct iris_shader_state *shs, const struct shader_info *info,
+ int i)
{
struct iris_image_view *iv = &shs->image[i];
struct iris_resource *res = (void *) iv->base.resource;
@@ -4192,12 +4682,16 @@ use_image(struct iris_batch *batch, struct iris_context *ice,
bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
iris_use_pinned_bo(batch, res->bo, write);
- iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.res), false);
+ iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res), false);
if (res->aux.bo)
iris_use_pinned_bo(batch, res->aux.bo, write);
- return iv->surface_state.offset;
+ enum isl_aux_usage aux_usage =
+ iris_image_view_aux_usage(ice, &iv->base, info);
+
+ return iv->surface_state.ref.offset +
+ surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
}
#define push_bt_entry(addr) \
@@ -4267,7 +4761,7 @@ iris_populate_binding_table(struct iris_context *ice,
}
push_bt_entry(addr);
}
- } else {
+ } else if (GEN_GEN < 11) {
uint32_t addr = use_null_fb_surface(batch, ice);
push_bt_entry(addr);
}
@@ -4297,7 +4791,7 @@ iris_populate_binding_table(struct iris_context *ice,
}
foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
- uint32_t addr = use_image(batch, ice, shs, i);
+ uint32_t addr = use_image(batch, ice, shs, info, i);
push_bt_entry(addr);
}
@@ -4570,14 +5064,49 @@ iris_update_surface_base_address(struct iris_batch *batch,
if (batch->last_surface_base_address == binder->bo->gtt_offset)
return;
- flush_for_state_base_change(batch);
+ uint32_t mocs = batch->screen->isl_dev.mocs.internal;
+
+ flush_before_state_base_change(batch);
+
+#if GEN_GEN == 12
+ /* GEN:BUG:1607854226:
+ *
+ * Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
+ * mode by putting the pipeline temporarily in 3D mode..
+ */
+ if (batch->name == IRIS_BATCH_COMPUTE)
+ emit_pipeline_select(batch, _3D);
+#endif
iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
- sba.SurfaceStateMOCS = MOCS_WB;
sba.SurfaceStateBaseAddressModifyEnable = true;
sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
+
+ /* The hardware appears to pay attention to the MOCS fields even
+ * if you don't set the "Address Modify Enable" bit for the base.
+ */
+ sba.GeneralStateMOCS = mocs;
+ sba.StatelessDataPortAccessMOCS = mocs;
+ sba.DynamicStateMOCS = mocs;
+ sba.IndirectObjectMOCS = mocs;
+ sba.InstructionMOCS = mocs;
+ sba.SurfaceStateMOCS = mocs;
+#if GEN_GEN >= 9
+ sba.BindlessSurfaceStateMOCS = mocs;
+#endif
}
+#if GEN_GEN == 12
+ /* GEN:BUG:1607854226:
+ *
+ * Put the pipeline back into compute mode.
+ */
+ if (batch->name == IRIS_BATCH_COMPUTE)
+ emit_pipeline_select(batch, GPGPU);
+#endif
+
+ flush_after_state_base_change(batch);
+
batch->last_surface_base_address = binder->bo->gtt_offset;
}
@@ -4593,6 +5122,194 @@ iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
}
+#if GEN_GEN >= 12
+void
+genX(invalidate_aux_map_state)(struct iris_batch *batch)
+{
+ struct iris_screen *screen = batch->screen;
+ void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
+ if (!aux_map_ctx)
+ return;
+ uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
+ if (batch->last_aux_map_state != aux_map_state_num) {
+ /* HSD 1209978178: docs say that before programming the aux table:
+ *
+ * "Driver must ensure that the engine is IDLE but ensure it doesn't
+ * add extra flushes in the case it knows that the engine is already
+ * IDLE."
+ *
+ * An end of pipe sync is needed here, otherwise we see GPU hangs in
+ * dEQP-GLES31.functional.copy_image.* tests.
+ */
+ iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
+ PIPE_CONTROL_CS_STALL);
+
+ /* If the aux-map state number increased, then we need to rewrite the
+ * register. Rewriting the register is used to both set the aux-map
+ * translation table address, and also to invalidate any previously
+ * cached translations.
+ */
+ iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
+ batch->last_aux_map_state = aux_map_state_num;
+ }
+}
+
+static void
+init_aux_map_state(struct iris_batch *batch)
+{
+ struct iris_screen *screen = batch->screen;
+ void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
+ if (!aux_map_ctx)
+ return;
+
+ uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
+ assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
+ iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
+ base_addr);
+}
+#endif
+
+struct push_bos {
+ struct {
+ struct iris_address addr;
+ uint32_t length;
+ } buffers[4];
+ int buffer_count;
+ uint32_t max_length;
+};
+
+static void
+setup_constant_buffers(struct iris_context *ice,
+ struct iris_batch *batch,
+ int stage,
+ struct push_bos *push_bos)
+{
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
+
+ uint32_t push_range_sum = 0;
+
+ int n = 0;
+ for (int i = 0; i < 4; i++) {
+ const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
+
+ if (range->length == 0)
+ continue;
+
+ push_range_sum += range->length;
+
+ if (range->length > push_bos->max_length)
+ push_bos->max_length = range->length;
+
+ /* Range block is a binding table index, map back to UBO index. */
+ unsigned block_index = iris_bti_to_group_index(
+ &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
+ assert(block_index != IRIS_SURFACE_NOT_USED);
+
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
+ struct iris_resource *res = (void *) cbuf->buffer;
+
+ assert(cbuf->buffer_offset % 32 == 0);
+
+ push_bos->buffers[n].length = range->length;
+ push_bos->buffers[n].addr =
+ res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
+ : ro_bo(batch->screen->workaround_bo, 0);
+ n++;
+ }
+
+ /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
+ *
+ * "The sum of all four read length fields must be less than or
+ * equal to the size of 64."
+ */
+ assert(push_range_sum <= 64);
+
+ push_bos->buffer_count = n;
+}
+
+static void
+emit_push_constant_packets(struct iris_context *ice,
+ struct iris_batch *batch,
+ int stage,
+ const struct push_bos *push_bos)
+{
+ UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+ struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
+
+ iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
+ pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
+#if GEN_GEN >= 12
+ pkt.MOCS = isl_dev->mocs.internal;
+#endif
+ if (prog_data) {
+ /* The Skylake PRM contains the following restriction:
+ *
+ * "The driver must ensure The following case does not occur
+ * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
+ * buffer 3 read length equal to zero committed followed by a
+ * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
+ * zero committed."
+ *
+ * To avoid this, we program the buffers in the highest slots.
+ * This way, slot 0 is only used if slot 3 is also used.
+ */
+ int n = push_bos->buffer_count;
+ assert(n <= 4);
+ const unsigned shift = 4 - n;
+ for (int i = 0; i < n; i++) {
+ pkt.ConstantBody.ReadLength[i + shift] =
+ push_bos->buffers[i].length;
+ pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
+ }
+ }
+ }
+}
+
+#if GEN_GEN >= 12
+static void
+emit_push_constant_packet_all(struct iris_context *ice,
+ struct iris_batch *batch,
+ uint32_t shader_mask,
+ const struct push_bos *push_bos)
+{
+ struct isl_device *isl_dev = &batch->screen->isl_dev;
+
+ if (!push_bos) {
+ iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
+ pc.ShaderUpdateEnable = shader_mask;
+ }
+ return;
+ }
+
+ const uint32_t n = push_bos->buffer_count;
+ const uint32_t max_pointers = 4;
+ const uint32_t num_dwords = 2 + 2 * n;
+ uint32_t const_all[2 + 2 * max_pointers];
+ uint32_t *dw = &const_all[0];
+
+ assert(n <= max_pointers);
+ iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
+ all.DWordLength = num_dwords - 2;
+ all.MOCS = isl_dev->mocs.internal;
+ all.ShaderUpdateEnable = shader_mask;
+ all.PointerBufferMask = (1 << n) - 1;
+ }
+ dw += 2;
+
+ for (int i = 0; i < n; i++) {
+ _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
+ dw + i * 2, data) {
+ data.PointerToConstantBuffer = push_bos->buffers[i].addr;
+ data.ConstantBufferReadLength = push_bos->buffers[i].length;
+ }
+ }
+ iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
+}
+#endif
+
static void
iris_upload_dirty_render_state(struct iris_context *ice,
struct iris_batch *batch,
@@ -4703,9 +5420,22 @@ iris_upload_dirty_render_state(struct iris_context *ice,
assert(size[i] != 0);
}
- genX(emit_urb_setup)(ice, batch, size,
- ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
- ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL);
+ unsigned entries[4], start[4];
+ gen_get_urb_config(&batch->screen->devinfo,
+ batch->screen->l3_config_3d,
+ ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
+ ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
+ size, entries, start,
+ &ice->state.urb_deref_block_size);
+
+ for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
+ iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
+ urb._3DCommandSubOpcode += i;
+ urb.VSURBStartingAddress = start[i];
+ urb.VSURBEntryAllocationSize = size[i] - 1;
+ urb.VSNumberofURBEntries = entries[i];
+ }
+ }
}
if (dirty & IRIS_DIRTY_BLEND_STATE) {
@@ -4771,8 +5501,23 @@ iris_upload_dirty_render_state(struct iris_context *ice,
}
}
+ /* GEN:BUG:1604061319
+ *
+ * 3DSTATE_CONSTANT_* needs to be programmed before BTP_*
+ *
+ * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
+ * any stage has a dirty binding table.
+ */
+ const bool emit_const_wa = GEN_GEN >= 11 &&
+ (dirty & IRIS_ALL_DIRTY_BINDINGS) != 0;
+
+#if GEN_GEN >= 12
+ uint32_t nobuffer_stages = 0;
+#endif
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)))
+ if (!(dirty & (IRIS_DIRTY_CONSTANTS_VS << stage)) &&
+ !emit_const_wa)
continue;
struct iris_shader_state *shs = &ice->state.shaders[stage];
@@ -4784,52 +5529,42 @@ iris_upload_dirty_render_state(struct iris_context *ice,
if (shs->sysvals_need_upload)
upload_sysvals(ice, stage);
- struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
-
- iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
- pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
- if (prog_data) {
- /* The Skylake PRM contains the following restriction:
- *
- * "The driver must ensure The following case does not occur
- * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
- * buffer 3 read length equal to zero committed followed by a
- * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to
- * zero committed."
- *
- * To avoid this, we program the buffers in the highest slots.
- * This way, slot 0 is only used if slot 3 is also used.
- */
- int n = 3;
-
- for (int i = 3; i >= 0; i--) {
- const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
+ struct push_bos push_bos = {};
+ setup_constant_buffers(ice, batch, stage, &push_bos);
- if (range->length == 0)
- continue;
-
- /* Range block is a binding table index, map back to UBO index. */
- unsigned block_index = iris_bti_to_group_index(
- &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
- assert(block_index != IRIS_SURFACE_NOT_USED);
-
- struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
- struct iris_resource *res = (void *) cbuf->buffer;
-
- assert(cbuf->buffer_offset % 32 == 0);
+#if GEN_GEN >= 12
+ /* If this stage doesn't have any push constants, emit it later in a
+ * single CONSTANT_ALL packet with all the other stages.
+ */
+ if (push_bos.buffer_count == 0) {
+ nobuffer_stages |= 1 << stage;
+ continue;
+ }
- pkt.ConstantBody.ReadLength[n] = range->length;
- pkt.ConstantBody.Buffer[n] =
- res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
- : ro_bo(batch->screen->workaround_bo, 0);
- n--;
- }
- }
+ /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
+ * contains only 5 bits, so we can only use it for buffers smaller than
+ * 32.
+ */
+ if (push_bos.max_length < 32) {
+ emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
+ continue;
}
+#endif
+ emit_push_constant_packets(ice, batch, stage, &push_bos);
}
+#if GEN_GEN >= 12
+ if (nobuffer_stages)
+ emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
+#endif
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
- if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
+ /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
+ * in order to commit constants. TODO: Investigate "Disable Gather
+ * at Set Shader" to go back to legacy mode...
+ */
+ if (dirty & ((IRIS_DIRTY_BINDINGS_VS |
+ (GEN_GEN == 9 ? IRIS_DIRTY_CONSTANTS_VS : 0)) << stage)) {
iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
ptr._3DCommandSubOpcode = 38 + stage;
ptr.PointertoVSBindingTable = binder->bt_offset[stage];
@@ -4837,6 +5572,24 @@ iris_upload_dirty_render_state(struct iris_context *ice,
}
}
+ if (GEN_GEN >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
+ // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
+ // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
+
+ /* The PIPE_CONTROL command description says:
+ *
+ * "Whenever a Binding Table Index (BTI) used by a Render Target
+ * Message points to a different RENDER_SURFACE_STATE, SW must issue a
+ * Render Target Cache Flush by enabling this bit. When render target
+ * flush is set due to new association of BTI, PS Scoreboard Stall bit
+ * must be set in this packet."
+ */
+ // XXX: does this need to happen at 3DSTATE_BTP_PS time?
+ iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
+ PIPE_CONTROL_RENDER_TARGET_FLUSH |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD);
+ }
+
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
iris_populate_binding_table(ice, batch, stage, false);
@@ -5055,13 +5808,17 @@ iris_upload_dirty_render_state(struct iris_context *ice,
ARRAY_SIZE(cso_rast->clip));
}
- if (dirty & IRIS_DIRTY_RASTER) {
+ if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
struct iris_rasterizer_state *cso = ice->state.cso_rast;
iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
sf.ViewportTransformEnable = !ice->state.window_space_position;
+
+#if GEN_GEN >= 12
+ sf.DerefBlockSize = ice->state.urb_deref_block_size;
+#endif
}
iris_emit_merge(batch, cso->sf, dynamic_sf,
ARRAY_SIZE(dynamic_sf));
@@ -5130,6 +5887,10 @@ iris_upload_dirty_render_state(struct iris_context *ice,
#else
iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
#endif
+
+#if GEN_GEN >= 12
+ iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
+#endif
}
if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
@@ -5152,8 +5913,35 @@ iris_upload_dirty_render_state(struct iris_context *ice,
* first.
*/
uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
- uint32_t cso_z_size = sizeof(cso_z->packets) - clear_length;
+ uint32_t cso_z_size = batch->screen->isl_dev.ds.size - clear_length;;
+
+#if GEN_GEN == 12
+ /* GEN:BUG:14010455700
+ *
+ * ISL will change some CHICKEN registers depending on the depth surface
+ * format, along with emitting the depth and stencil packets. In that
+ * case, we want to do a depth flush and stall, so the pipeline is not
+ * using these settings while we change the registers.
+ */
+ iris_emit_end_of_pipe_sync(batch,
+ "Workaround: Stop pipeline for 14010455700",
+ PIPE_CONTROL_DEPTH_STALL |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH);
+#endif
+
iris_batch_emit(batch, cso_z->packets, cso_z_size);
+ if (GEN_GEN >= 12) {
+ /* GEN:BUG:1408224581
+ *
+ * Workaround: Gen12LP Astep only An additional pipe control with
+ * post-sync = store dword operation would be required.( w/a is to
+ * have an additional pipe control after the stencil state whenever
+ * the surface state bits of this state is changing).
+ */
+ iris_emit_pipe_control_write(batch, "WA for stencil state",
+ PIPE_CONTROL_WRITE_IMMEDIATE,
+ batch->screen->workaround_bo, 0, 0);
+ }
union isl_color_value clear_value = { .f32 = { 0, } };
@@ -5202,64 +5990,62 @@ iris_upload_dirty_render_state(struct iris_context *ice,
if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
int count = util_bitcount64(ice->state.bound_vertex_buffers);
- int dynamic_bound = ice->state.bound_vertex_buffers;
+ uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
if (ice->state.vs_uses_draw_params) {
- if (ice->draw.draw_params_offset == 0) {
- u_upload_data(ice->ctx.stream_uploader, 0, sizeof(ice->draw.params),
- 4, &ice->draw.params, &ice->draw.draw_params_offset,
- &ice->draw.draw_params_res);
- }
- assert(ice->draw.draw_params_res);
+ assert(ice->draw.draw_params.res);
struct iris_vertex_buffer_state *state =
&(ice->state.genx->vertex_buffers[count]);
- pipe_resource_reference(&state->resource, ice->draw.draw_params_res);
+ pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
struct iris_resource *res = (void *) state->resource;
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
vb.VertexBufferIndex = count;
vb.AddressModifyEnable = true;
vb.BufferPitch = 0;
- vb.BufferSize = res->bo->size - ice->draw.draw_params_offset;
+ vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset +
- (int) ice->draw.draw_params_offset);
- vb.MOCS = mocs(res->bo);
+ (int) ice->draw.draw_params.offset);
+ vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev);
}
dynamic_bound |= 1ull << count;
count++;
}
if (ice->state.vs_uses_derived_draw_params) {
- u_upload_data(ice->ctx.stream_uploader, 0,
- sizeof(ice->draw.derived_params), 4,
- &ice->draw.derived_params,
- &ice->draw.derived_draw_params_offset,
- &ice->draw.derived_draw_params_res);
-
struct iris_vertex_buffer_state *state =
&(ice->state.genx->vertex_buffers[count]);
pipe_resource_reference(&state->resource,
- ice->draw.derived_draw_params_res);
- struct iris_resource *res = (void *) ice->draw.derived_draw_params_res;
+ ice->draw.derived_draw_params.res);
+ struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
vb.VertexBufferIndex = count;
vb.AddressModifyEnable = true;
vb.BufferPitch = 0;
vb.BufferSize =
- res->bo->size - ice->draw.derived_draw_params_offset;
+ res->bo->size - ice->draw.derived_draw_params.offset;
vb.BufferStartingAddress =
ro_bo(NULL, res->bo->gtt_offset +
- (int) ice->draw.derived_draw_params_offset);
- vb.MOCS = mocs(res->bo);
+ (int) ice->draw.derived_draw_params.offset);
+ vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev);
}
dynamic_bound |= 1ull << count;
count++;
}
if (count) {
+#if GEN_GEN >= 11
+ /* Gen11+ doesn't need the cache workaround below */
+ uint64_t bound = dynamic_bound;
+ while (bound) {
+ const int i = u_bit_scan64(&bound);
+ iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
+ false);
+ }
+#else
/* The VF cache designers cut corners, and made the cache key's
* <VertexBufferIndex, Memory Address> tuple only consider the bottom
* 32 bits of the address. If you have two vertex buffers which get
@@ -5295,6 +6081,7 @@ iris_upload_dirty_render_state(struct iris_context *ice,
"workaround: VF cache 32-bit key [VB]",
flush_flags);
}
+#endif
const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
@@ -5441,10 +6228,19 @@ iris_upload_dirty_render_state(struct iris_context *ice,
}
}
+#if GEN_GEN == 8
+ if (dirty & IRIS_DIRTY_PMA_FIX) {
+ bool enable = want_pma_fix(ice);
+ genX(update_pma_fix)(ice, batch, enable);
+ }
+#endif
+
if (ice->state.current_hash_scale != 1)
genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
- /* TODO: Gen8 PMA fix */
+#if GEN_GEN >= 12
+ genX(invalidate_aux_map_state)(batch);
+#endif
}
static void
@@ -5490,7 +6286,7 @@ iris_upload_render_state(struct iris_context *ice,
uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
ib.IndexFormat = draw->index_size >> 1;
- ib.MOCS = mocs(bo);
+ ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev);
ib.BufferSize = bo->size - offset;
ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
}
@@ -5501,6 +6297,7 @@ iris_upload_render_state(struct iris_context *ice,
iris_use_pinned_bo(batch, bo, false);
}
+#if GEN_GEN < 11
/* The VF cache key only uses 32-bits, see vertex buffer comment above */
uint16_t high_bits = bo->gtt_offset >> 32ull;
if (high_bits != ice->state.last_index_bo_high_bits) {
@@ -5510,6 +6307,7 @@ iris_upload_render_state(struct iris_context *ice,
PIPE_CONTROL_CS_STALL);
ice->state.last_index_bo_high_bits = high_bits;
}
+#endif
}
#define _3DPRIM_END_OFFSET 0x2420
@@ -5550,15 +6348,14 @@ iris_upload_render_state(struct iris_context *ice,
uint32_t mi_predicate;
/* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
- ice->vtbl.load_register_imm64(batch, MI_PREDICATE_SRC1,
- draw->drawid);
+ iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid);
/* Upload the current draw count from the draw parameters buffer
* to MI_PREDICATE_SRC0.
*/
- ice->vtbl.load_register_mem32(batch, MI_PREDICATE_SRC0,
- draw_count_bo, draw_count_offset);
+ iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
+ draw_count_bo, draw_count_offset);
/* Zero the top 32-bits of MI_PREDICATE_SRC0 */
- ice->vtbl.load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
+ iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
if (draw->drawid == 0) {
mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
@@ -5676,6 +6473,9 @@ iris_upload_compute_state(struct iris_context *ice,
struct brw_stage_prog_data *prog_data = shader->prog_data;
struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
+ const uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
+ const unsigned threads = DIV_ROUND_UP(group_size, cs_prog_data->simd_size);
+
/* Always pin the binder. If we're emitting new binding table pointers,
* we need it. If not, we're probably inheriting old tables via the
* context, and need it anyway. Since true zero-bindings cases are
@@ -5698,6 +6498,10 @@ iris_upload_compute_state(struct iris_context *ice,
if (ice->state.need_border_colors)
iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false);
+#if GEN_GEN >= 12
+ genX(invalidate_aux_map_state)(batch);
+#endif
+
if (dirty & IRIS_DIRTY_CS) {
/* The MEDIA_VFE_STATE documentation for Gen8+ says:
*
@@ -5733,7 +6537,7 @@ iris_upload_compute_state(struct iris_context *ice,
vfe.URBEntryAllocationSize = 2;
vfe.CURBEAllocationSize =
- ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
+ ALIGN(cs_prog_data->push.per_thread.regs * threads +
cs_prog_data->push.cross_thread.regs, 2);
}
}
@@ -5744,18 +6548,19 @@ iris_upload_compute_state(struct iris_context *ice,
assert(cs_prog_data->push.cross_thread.dwords == 0 &&
cs_prog_data->push.per_thread.dwords == 1 &&
cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
+ const unsigned push_const_size =
+ brw_cs_push_const_total_size(cs_prog_data, threads);
uint32_t *curbe_data_map =
stream_state(batch, ice->state.dynamic_uploader,
&ice->state.last_res.cs_thread_ids,
- ALIGN(cs_prog_data->push.total.size, 64), 64,
+ ALIGN(push_const_size, 64), 64,
&curbe_data_offset);
assert(curbe_data_map);
- memset(curbe_data_map, 0x5a, ALIGN(cs_prog_data->push.total.size, 64));
- iris_fill_cs_push_const_buffer(cs_prog_data, curbe_data_map);
+ memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
+ iris_fill_cs_push_const_buffer(cs_prog_data, threads, curbe_data_map);
iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
- curbe.CURBETotalDataLength =
- ALIGN(cs_prog_data->push.total.size, 64);
+ curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
curbe.CURBEDataStartAddress = curbe_data_offset;
}
}
@@ -5769,6 +6574,7 @@ iris_upload_compute_state(struct iris_context *ice,
iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
idd.SamplerStatePointer = shs->sampler_table.offset;
idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
+ idd.NumberofThreadsinGPGPUThreadGroup = threads;
}
for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
@@ -5783,7 +6589,6 @@ iris_upload_compute_state(struct iris_context *ice,
}
}
- uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
uint32_t right_mask;
@@ -5818,7 +6623,7 @@ iris_upload_compute_state(struct iris_context *ice,
ggw.SIMDSize = cs_prog_data->simd_size / 16;
ggw.ThreadDepthCounterMaximum = 0;
ggw.ThreadHeightCounterMaximum = 0;
- ggw.ThreadWidthCounterMaximum = cs_prog_data->threads - 1;
+ ggw.ThreadWidthCounterMaximum = threads - 1;
ggw.ThreadGroupIDXDimension = grid->grid[0];
ggw.ThreadGroupIDYDimension = grid->grid[1];
ggw.ThreadGroupIDZDimension = grid->grid[2];
@@ -5842,8 +6647,8 @@ iris_destroy_state(struct iris_context *ice)
{
struct iris_genx_state *genx = ice->state.genx;
- pipe_resource_reference(&ice->draw.draw_params_res, NULL);
- pipe_resource_reference(&ice->draw.derived_draw_params_res, NULL);
+ pipe_resource_reference(&ice->draw.draw_params.res, NULL);
+ pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
/* Loop over all VBOs, including ones for draw parameters */
for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
@@ -5870,7 +6675,8 @@ iris_destroy_state(struct iris_context *ice)
}
for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
pipe_resource_reference(&shs->image[i].base.resource, NULL);
- pipe_resource_reference(&shs->image[i].surface_state.res, NULL);
+ pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
+ free(shs->image[i].surface_state.cpu);
}
for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
@@ -5902,11 +6708,9 @@ iris_destroy_state(struct iris_context *ice)
static void
iris_rebind_buffer(struct iris_context *ice,
- struct iris_resource *res,
- uint64_t old_address)
+ struct iris_resource *res)
{
struct pipe_context *ctx = &ice->ctx;
- struct iris_screen *screen = (void *) ctx->screen;
struct iris_genx_state *genx = ice->state.genx;
assert(res->base.target == PIPE_BUFFER);
@@ -5932,9 +6736,10 @@ iris_rebind_buffer(struct iris_context *ice,
STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
uint64_t *addr = (uint64_t *) &state->state[1];
+ struct iris_bo *bo = iris_resource_bo(state->resource);
- if (*addr == old_address + state->offset) {
- *addr = res->bo->gtt_offset + state->offset;
+ if (*addr != bo->gtt_offset + state->offset) {
+ *addr = bo->gtt_offset + state->offset;
ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
}
}
@@ -5957,6 +6762,9 @@ iris_rebind_buffer(struct iris_context *ice,
struct iris_shader_state *shs = &ice->state.shaders[s];
enum pipe_shader_type p_stage = stage_to_pipe(s);
+ if (!(res->bind_stages & (1 << s)))
+ continue;
+
if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
/* Skip constant buffer 0, it's for regular uniforms, not UBOs */
uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
@@ -5966,7 +6774,7 @@ iris_rebind_buffer(struct iris_context *ice,
struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
if (res->bo == iris_resource_bo(cbuf->buffer)) {
- iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
+ pipe_resource_reference(&surf_state->res, NULL);
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
}
}
@@ -5995,16 +6803,10 @@ iris_rebind_buffer(struct iris_context *ice,
while (bound_sampler_views) {
const int i = u_bit_scan(&bound_sampler_views);
struct iris_sampler_view *isv = shs->textures[i];
+ struct iris_bo *bo = isv->res->bo;
- if (res->bo == iris_resource_bo(isv->base.texture)) {
- void *map = alloc_surface_states(ice->state.surface_uploader,
- &isv->surface_state,
- isv->res->aux.sampler_usages);
- assert(map);
- fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
- isv->view.format, isv->view.swizzle,
- isv->base.u.buf.offset,
- isv->base.u.buf.size);
+ if (update_surface_state_addrs(ice->state.surface_uploader,
+ &isv->surface_state, bo)) {
ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
}
}
@@ -6015,9 +6817,11 @@ iris_rebind_buffer(struct iris_context *ice,
while (bound_image_views) {
const int i = u_bit_scan(&bound_image_views);
struct iris_image_view *iv = &shs->image[i];
+ struct iris_bo *bo = iris_resource_bo(iv->base.resource);
- if (res->bo == iris_resource_bo(iv->base.resource)) {
- iris_set_shader_images(ctx, p_stage, i, 1, &iv->base);
+ if (update_surface_state_addrs(ice->state.surface_uploader,
+ &iv->surface_state, bo)) {
+ ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << s;
}
}
}
@@ -6026,130 +6830,6 @@ iris_rebind_buffer(struct iris_context *ice,
/* ------------------------------------------------------------------- */
-static void
-iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
- uint32_t src)
-{
- _iris_emit_lrr(batch, dst, src);
-}
-
-static void
-iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
- uint32_t src)
-{
- _iris_emit_lrr(batch, dst, src);
- _iris_emit_lrr(batch, dst + 4, src + 4);
-}
-
-static void
-iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
- uint32_t val)
-{
- _iris_emit_lri(batch, reg, val);
-}
-
-static void
-iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
- uint64_t val)
-{
- _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
- _iris_emit_lri(batch, reg + 4, val >> 32);
-}
-
-/**
- * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
- */
-static void
-iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset)
-{
- iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
- lrm.RegisterAddress = reg;
- lrm.MemoryAddress = ro_bo(bo, offset);
- }
-}
-
-/**
- * Load a 64-bit value from a buffer into a MMIO register via
- * two MI_LOAD_REGISTER_MEM commands.
- */
-static void
-iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset)
-{
- iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
- iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
-}
-
-static void
-iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated)
-{
- iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
- srm.RegisterAddress = reg;
- srm.MemoryAddress = rw_bo(bo, offset);
- srm.PredicateEnable = predicated;
- }
-}
-
-static void
-iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
- struct iris_bo *bo, uint32_t offset,
- bool predicated)
-{
- iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
- iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
-}
-
-static void
-iris_store_data_imm32(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint32_t imm)
-{
- iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
- sdi.Address = rw_bo(bo, offset);
- sdi.ImmediateData = imm;
- }
-}
-
-static void
-iris_store_data_imm64(struct iris_batch *batch,
- struct iris_bo *bo, uint32_t offset,
- uint64_t imm)
-{
- /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
- * 2 in genxml but it's actually variable length and we need 5 DWords.
- */
- void *map = iris_get_command_space(batch, 4 * 5);
- _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
- sdi.DWordLength = 5 - 2;
- sdi.Address = rw_bo(bo, offset);
- sdi.ImmediateData = imm;
- }
-}
-
-static void
-iris_copy_mem_mem(struct iris_batch *batch,
- struct iris_bo *dst_bo, uint32_t dst_offset,
- struct iris_bo *src_bo, uint32_t src_offset,
- unsigned bytes)
-{
- /* MI_COPY_MEM_MEM operates on DWords. */
- assert(bytes % 4 == 0);
- assert(dst_offset % 4 == 0);
- assert(src_offset % 4 == 0);
-
- for (unsigned i = 0; i < bytes; i += 4) {
- iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
- cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i);
- cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
- }
- }
-}
-
-/* ------------------------------------------------------------------- */
-
static unsigned
flags_to_post_sync_op(uint32_t flags)
{
@@ -6234,7 +6914,20 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
0, NULL, 0, 0);
}
- if (GEN_GEN == 9 && IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
+ /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
+ * invalidates the instruction cache
+ */
+ if (GEN_GEN == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
+ iris_emit_raw_pipe_control(batch,
+ "workaround: CS stall before instruction "
+ "cache invalidate",
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
+ imm);
+ }
+
+ if ((GEN_GEN == 9 || (GEN_GEN == 12 && devinfo->revision == 0 /* A0*/)) &&
+ IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
/* Project: SKL / Argument: LRI Post Sync Operation [23]
*
* "PIPECONTROL command with “Command Streamer Stall Enable” must be
@@ -6243,23 +6936,14 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
* PIPELINE_SELECT command is set to GPGPU mode of operation)."
*
* The same text exists a few rows below for Post Sync Op.
+ *
+ * On Gen12 this is GEN:BUG:1607156449.
*/
iris_emit_raw_pipe_control(batch,
"workaround: CS stall before gpgpu post-sync",
PIPE_CONTROL_CS_STALL, bo, offset, imm);
}
- if (GEN_GEN == 10 && (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
- /* Cannonlake:
- * "Before sending a PIPE_CONTROL command with bit 12 set, SW must issue
- * another PIPE_CONTROL with Render Target Cache Flush Enable (bit 12)
- * = 0 and Pipe Control Flush Enable (bit 7) = 1"
- */
- iris_emit_raw_pipe_control(batch,
- "workaround: PC flush before RT flush",
- PIPE_CONTROL_FLUSH_ENABLE, bo, offset, imm);
- }
-
/* "Flush Types" workarounds ---------------------------------------------
* We do these now because they may add post-sync operations or CS stalls.
*/
@@ -6278,25 +6962,6 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
}
}
- /* #1130 from Gen10 workarounds page:
- *
- * "Enable Depth Stall on every Post Sync Op if Render target Cache
- * Flush is not enabled in same PIPE CONTROL and Enable Pixel score
- * board stall if Render target cache flush is enabled."
- *
- * Applicable to CNL B0 and C0 steppings only.
- *
- * The wording here is unclear, and this workaround doesn't look anything
- * like the internal bug report recommendations, but leave it be for now...
- */
- if (GEN_GEN == 10) {
- if (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) {
- flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
- } else if (flags & non_lri_post_sync_flags) {
- flags |= PIPE_CONTROL_DEPTH_STALL;
- }
- }
-
if (flags & PIPE_CONTROL_DEPTH_STALL) {
/* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
*
@@ -6444,6 +7109,23 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
flags |= PIPE_CONTROL_CS_STALL;
}
+ if (GEN_GEN >= 12 && ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ||
+ (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))) {
+ /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
+ * Enable):
+ *
+ * Unified Cache (Tile Cache Disabled):
+ *
+ * When the Color and Depth (Z) streams are enabled to be cached in
+ * the DC space of L2, Software must use "Render Target Cache Flush
+ * Enable" and "Depth Cache Flush Enable" along with "Tile Cache
+ * Flush" for getting the color and depth (Z) write data to be
+ * globally observable. In this mode of operation it is not required
+ * to set "CS Stall" upon setting "Tile Cache Flush" bit.
+ */
+ flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
+ }
+
if (GEN_GEN == 9 && devinfo->gt == 4) {
/* TODO: The big Skylake GT4 post sync op workaround */
}
@@ -6535,11 +7217,20 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
}
+ if (GEN_GEN >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
+ /* GEN:BUG:1409600907:
+ *
+ * "PIPE_CONTROL with Depth Stall Enable bit must be set
+ * with any PIPE_CONTROL with Depth Flush Enable bit set.
+ */
+ flags |= PIPE_CONTROL_DEPTH_STALL;
+ }
+
/* Emit --------------------------------------------------------------- */
if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
fprintf(stderr,
- " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
+ " PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
(flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
(flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
(flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
@@ -6562,10 +7253,17 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
(flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
(flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
(flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
+ (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
imm, reason);
}
iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
+#if GEN_GEN >= 12
+ pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
+#endif
+#if GEN_GEN >= 11
+ pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
+#endif
pc.LRIPostSyncOperation = NoLRIOperation;
pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
@@ -6598,34 +7296,6 @@ iris_emit_raw_pipe_control(struct iris_batch *batch,
}
}
-void
-genX(emit_urb_setup)(struct iris_context *ice,
- struct iris_batch *batch,
- const unsigned size[4],
- bool tess_present, bool gs_present)
-{
- const struct gen_device_info *devinfo = &batch->screen->devinfo;
- const unsigned push_size_kB = 32;
- unsigned entries[4];
- unsigned start[4];
-
- ice->shaders.last_vs_entry_size = size[MESA_SHADER_VERTEX];
-
- gen_get_urb_config(devinfo, 1024 * push_size_kB,
- 1024 * ice->shaders.urb_size,
- tess_present, gs_present,
- size, entries, start);
-
- for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
- iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
- urb._3DCommandSubOpcode += i;
- urb.VSURBStartingAddress = start[i];
- urb.VSURBEntryAllocationSize = size[i] - 1;
- urb.VSNumberofURBEntries = entries[i];
- }
- }
-}
-
#if GEN_GEN == 9
/**
* Preemption on Gen9 has to be enabled or disabled in various cases.
@@ -6807,6 +7477,19 @@ genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
#endif
}
+static void
+iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
+{
+ struct iris_context *ice = (struct iris_context *) ctx;
+
+ ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER],
+ enable,
+ IRIS_ALL_DIRTY_FOR_RENDER);
+ ice->state.dirty |= iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE],
+ enable,
+ IRIS_ALL_DIRTY_FOR_COMPUTE);
+}
+
void
genX(init_state)(struct iris_context *ice)
{
@@ -6851,38 +7534,38 @@ genX(init_state)(struct iris_context *ice)
ctx->create_stream_output_target = iris_create_stream_output_target;
ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
ctx->set_stream_output_targets = iris_set_stream_output_targets;
-
- ice->vtbl.destroy_state = iris_destroy_state;
- ice->vtbl.init_render_context = iris_init_render_context;
- ice->vtbl.init_compute_context = iris_init_compute_context;
- ice->vtbl.upload_render_state = iris_upload_render_state;
- ice->vtbl.update_surface_base_address = iris_update_surface_base_address;
- ice->vtbl.upload_compute_state = iris_upload_compute_state;
- ice->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
- ice->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
- ice->vtbl.rebind_buffer = iris_rebind_buffer;
- ice->vtbl.load_register_reg32 = iris_load_register_reg32;
- ice->vtbl.load_register_reg64 = iris_load_register_reg64;
- ice->vtbl.load_register_imm32 = iris_load_register_imm32;
- ice->vtbl.load_register_imm64 = iris_load_register_imm64;
- ice->vtbl.load_register_mem32 = iris_load_register_mem32;
- ice->vtbl.load_register_mem64 = iris_load_register_mem64;
- ice->vtbl.store_register_mem32 = iris_store_register_mem32;
- ice->vtbl.store_register_mem64 = iris_store_register_mem64;
- ice->vtbl.store_data_imm32 = iris_store_data_imm32;
- ice->vtbl.store_data_imm64 = iris_store_data_imm64;
- ice->vtbl.copy_mem_mem = iris_copy_mem_mem;
- ice->vtbl.derived_program_state_size = iris_derived_program_state_size;
- ice->vtbl.store_derived_program_state = iris_store_derived_program_state;
- ice->vtbl.create_so_decl_list = iris_create_so_decl_list;
- ice->vtbl.populate_vs_key = iris_populate_vs_key;
- ice->vtbl.populate_tcs_key = iris_populate_tcs_key;
- ice->vtbl.populate_tes_key = iris_populate_tes_key;
- ice->vtbl.populate_gs_key = iris_populate_gs_key;
- ice->vtbl.populate_fs_key = iris_populate_fs_key;
- ice->vtbl.populate_cs_key = iris_populate_cs_key;
- ice->vtbl.mocs = mocs;
- ice->vtbl.lost_genx_state = iris_lost_genx_state;
+ ctx->set_frontend_noop = iris_set_frontend_noop;
+
+ screen->vtbl.destroy_state = iris_destroy_state;
+ screen->vtbl.init_render_context = iris_init_render_context;
+ screen->vtbl.init_compute_context = iris_init_compute_context;
+ screen->vtbl.upload_render_state = iris_upload_render_state;
+ screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
+ screen->vtbl.upload_compute_state = iris_upload_compute_state;
+ screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
+ screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
+ screen->vtbl.rebind_buffer = iris_rebind_buffer;
+ screen->vtbl.load_register_reg32 = iris_load_register_reg32;
+ screen->vtbl.load_register_reg64 = iris_load_register_reg64;
+ screen->vtbl.load_register_imm32 = iris_load_register_imm32;
+ screen->vtbl.load_register_imm64 = iris_load_register_imm64;
+ screen->vtbl.load_register_mem32 = iris_load_register_mem32;
+ screen->vtbl.load_register_mem64 = iris_load_register_mem64;
+ screen->vtbl.store_register_mem32 = iris_store_register_mem32;
+ screen->vtbl.store_register_mem64 = iris_store_register_mem64;
+ screen->vtbl.store_data_imm32 = iris_store_data_imm32;
+ screen->vtbl.store_data_imm64 = iris_store_data_imm64;
+ screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
+ screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
+ screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
+ screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
+ screen->vtbl.populate_vs_key = iris_populate_vs_key;
+ screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
+ screen->vtbl.populate_tes_key = iris_populate_tes_key;
+ screen->vtbl.populate_gs_key = iris_populate_gs_key;
+ screen->vtbl.populate_fs_key = iris_populate_fs_key;
+ screen->vtbl.populate_cs_key = iris_populate_cs_key;
+ screen->vtbl.lost_genx_state = iris_lost_genx_state;
ice->state.dirty = ~0ull;
@@ -6892,6 +7575,7 @@ genX(init_state)(struct iris_context *ice)
ice->state.num_viewports = 1;
ice->state.prim_mode = PIPE_PRIM_MAX;
ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
+ ice->draw.derived_params.drawid = -1;
/* Make a 1x1x1 null surface for unbound textures */
void *null_surf_map =