summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2018-01-08 05:41:34 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2018-01-08 05:41:34 +0000
commitc00801de923e125863aaf8180439d59d610b2517 (patch)
treee2896aa2785f3cf2151aeeb3c95fb5cc09a2fe02 /lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c
parentbe30e6efb92db21299b936c0e068e7088941e9c9 (diff)
Revert to Mesa 13.0.6 again.
Corruption has again been reported on Intel hardware running Xorg with the modesetting driver (which uses OpenGL based acceleration instead of SNA acceleration the intel driver defaults to). Reported in various forms on Sandy Bridge (X220), Ivy Bridge (X230) and Haswell (X240). Confirmed to not occur with the intel driver but the xserver was changed to default to the modesetting driver on >= gen4 hardware (except Ironlake). One means of triggering this is to open a large pdf with xpdf on an idle machine and highlight a section of the document. There have been reports of gpu hangs on gen4 intel hardware (T500 with GM45, X61 with 965GM) when starting Xorg as well.
Diffstat (limited to 'lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c')
-rw-r--r--lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c126
1 files changed, 38 insertions, 88 deletions
diff --git a/lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c b/lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c
index 5aa90ced6..5a21ca44e 100644
--- a/lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c
+++ b/lib/mesa/src/gallium/drivers/freedreno/freedreno_resource.c
@@ -51,46 +51,39 @@
static void
fd_invalidate_resource(struct fd_context *ctx, struct pipe_resource *prsc)
{
+ int i;
+
/* Go through the entire state and see if the resource is bound
* anywhere. If it is, mark the relevant state as dirty. This is called on
* realloc_bo.
*/
+ /* Constbufs */
+ for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS && !(ctx->dirty & FD_DIRTY_CONSTBUF); i++) {
+ if (ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer == prsc)
+ ctx->dirty |= FD_DIRTY_CONSTBUF;
+ if (ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer == prsc)
+ ctx->dirty |= FD_DIRTY_CONSTBUF;
+ }
+
/* VBOs */
- for (unsigned i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
- if (ctx->vtx.vertexbuf.vb[i].buffer.resource == prsc)
+ for (i = 0; i < ctx->vtx.vertexbuf.count && !(ctx->dirty & FD_DIRTY_VTXBUF); i++) {
+ if (ctx->vtx.vertexbuf.vb[i].buffer == prsc)
ctx->dirty |= FD_DIRTY_VTXBUF;
}
- /* per-shader-stage resources: */
- for (unsigned stage = 0; stage < PIPE_SHADER_TYPES; stage++) {
- /* Constbufs.. note that constbuf[0] is normal uniforms emitted in
- * cmdstream rather than by pointer..
- */
- const unsigned num_ubos = util_last_bit(ctx->constbuf[stage].enabled_mask);
- for (unsigned i = 1; i < num_ubos; i++) {
- if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_CONST)
- break;
- if (ctx->constbuf[stage].cb[i].buffer == prsc)
- ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_CONST;
- }
-
- /* Textures */
- for (unsigned i = 0; i < ctx->tex[stage].num_textures; i++) {
- if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_TEX)
- break;
- if (ctx->tex[stage].textures[i] && (ctx->tex[stage].textures[i]->texture == prsc))
- ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_TEX;
- }
+ /* Index buffer */
+ if (ctx->indexbuf.buffer == prsc)
+ ctx->dirty |= FD_DIRTY_INDEXBUF;
- /* SSBOs */
- const unsigned num_ssbos = util_last_bit(ctx->shaderbuf[stage].enabled_mask);
- for (unsigned i = 0; i < num_ssbos; i++) {
- if (ctx->dirty_shader[stage] & FD_DIRTY_SHADER_SSBO)
- break;
- if (ctx->shaderbuf[stage].sb[i].buffer == prsc)
- ctx->dirty_shader[stage] |= FD_DIRTY_SHADER_SSBO;
- }
+ /* Textures */
+ for (i = 0; i < ctx->verttex.num_textures && !(ctx->dirty & FD_DIRTY_VERTTEX); i++) {
+ if (ctx->verttex.textures[i] && (ctx->verttex.textures[i]->texture == prsc))
+ ctx->dirty |= FD_DIRTY_VERTTEX;
+ }
+ for (i = 0; i < ctx->fragtex.num_textures && !(ctx->dirty & FD_DIRTY_FRAGTEX); i++) {
+ if (ctx->fragtex.textures[i] && (ctx->fragtex.textures[i]->texture == prsc))
+ ctx->dirty |= FD_DIRTY_FRAGTEX;
}
}
@@ -109,6 +102,7 @@ realloc_bo(struct fd_resource *rsc, uint32_t size)
fd_bo_del(rsc->bo);
rsc->bo = fd_bo_new(screen->dev, size, flags);
+ rsc->timestamp = 0;
util_range_set_empty(&rsc->valid_buffer_range);
fd_bc_invalidate_resource(rsc, true);
}
@@ -185,7 +179,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
*/
fd_bc_invalidate_resource(rsc, false);
- mtx_lock(&ctx->screen->lock);
+ pipe_mutex_lock(ctx->screen->lock);
/* Swap the backing bo's, so shadow becomes the old buffer,
* blit from shadow to new buffer. From here on out, we
@@ -202,6 +196,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
/* TODO valid_buffer_range?? */
swap(rsc->bo, shadow->bo);
+ swap(rsc->timestamp, shadow->timestamp);
swap(rsc->write_batch, shadow->write_batch);
/* at this point, the newly created shadow buffer is not referenced
@@ -217,7 +212,7 @@ fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
}
swap(rsc->batch_mask, shadow->batch_mask);
- mtx_unlock(&ctx->screen->lock);
+ pipe_mutex_unlock(ctx->screen->lock);
struct pipe_blit_info blit = {0};
blit.dst.resource = prsc;
@@ -704,9 +699,7 @@ static uint32_t
setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format format)
{
struct pipe_resource *prsc = &rsc->base.b;
- struct fd_screen *screen = fd_screen(prsc->screen);
enum util_format_layout layout = util_format_description(format)->layout;
- uint32_t pitchalign = screen->gmem_alignw;
uint32_t level, size = 0;
uint32_t width = prsc->width0;
uint32_t height = prsc->height0;
@@ -716,18 +709,15 @@ setup_slices(struct fd_resource *rsc, uint32_t alignment, enum pipe_format forma
*/
uint32_t layers_in_level = rsc->layer_first ? 1 : prsc->array_size;
- if (is_a5xx(screen) && (rsc->base.b.target >= PIPE_TEXTURE_2D))
- height = align(height, screen->gmem_alignh);
-
for (level = 0; level <= prsc->last_level; level++) {
struct fd_resource_slice *slice = fd_resource_slice(rsc, level);
uint32_t blocks;
if (layout == UTIL_FORMAT_LAYOUT_ASTC)
slice->pitch = width =
- util_align_npot(width, pitchalign * util_format_get_blockwidth(format));
+ util_align_npot(width, 32 * util_format_get_blockwidth(format));
else
- slice->pitch = width = align(width, pitchalign);
+ slice->pitch = width = align(width, 32);
slice->offset = size;
blocks = util_format_get_nblocks(format, width, height);
/* 1d array and 2d array textures must all have the same layer size
@@ -785,25 +775,6 @@ fd_resource_resize(struct pipe_resource *prsc, uint32_t sz)
realloc_bo(rsc, setup_slices(rsc, 1, prsc->format));
}
-// TODO common helper?
-static bool
-has_depth(enum pipe_format format)
-{
- switch (format) {
- case PIPE_FORMAT_Z16_UNORM:
- case PIPE_FORMAT_Z32_UNORM:
- case PIPE_FORMAT_Z32_FLOAT:
- case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
- case PIPE_FORMAT_Z24_UNORM_S8_UINT:
- case PIPE_FORMAT_S8_UINT_Z24_UNORM:
- case PIPE_FORMAT_Z24X8_UNORM:
- case PIPE_FORMAT_X8Z24_UNORM:
- return true;
- default:
- return false;
- }
-}
-
/**
* Create a new texture object, using the given template info.
*/
@@ -811,7 +782,6 @@ static struct pipe_resource *
fd_resource_create(struct pipe_screen *pscreen,
const struct pipe_resource *tmpl)
{
- struct fd_screen *screen = fd_screen(pscreen);
struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
struct pipe_resource *prsc = &rsc->base.b;
enum pipe_format format = tmpl->format;
@@ -839,7 +809,7 @@ fd_resource_create(struct pipe_screen *pscreen,
if (format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
format = PIPE_FORMAT_Z32_FLOAT;
- else if (screen->gpu_id < 400 &&
+ else if (fd_screen(pscreen)->gpu_id < 400 &&
util_format_description(format)->layout == UTIL_FORMAT_LAYOUT_RGTC)
format = PIPE_FORMAT_R8G8B8A8_UNORM;
rsc->internal_format = format;
@@ -847,24 +817,8 @@ fd_resource_create(struct pipe_screen *pscreen,
assert(rsc->cpp);
- // XXX probably need some extra work if we hit rsc shadowing path w/ lrz..
- if (is_a5xx(screen) && (fd_mesa_debug & FD_DBG_LRZ) && has_depth(format)) {
- const uint32_t flags = DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
- DRM_FREEDRENO_GEM_TYPE_KMEM; /* TODO */
- unsigned lrz_pitch = align(DIV_ROUND_UP(tmpl->width0, 8), 32);
- unsigned lrz_height = DIV_ROUND_UP(tmpl->height0, 8);
- unsigned size = lrz_pitch * lrz_height * 2;
-
- size += 0x1000; /* for GRAS_LRZ_FAST_CLEAR_BUFFER */
-
- rsc->lrz_height = lrz_height;
- rsc->lrz_width = lrz_pitch;
- rsc->lrz_pitch = lrz_pitch;
- rsc->lrz = fd_bo_new(screen->dev, size, flags);
- }
-
alignment = slice_alignment(pscreen, tmpl);
- if (is_a4xx(screen) || is_a5xx(screen)) {
+ if (is_a4xx(fd_screen(pscreen))) {
switch (tmpl->target) {
case PIPE_TEXTURE_3D:
rsc->layer_first = false;
@@ -928,7 +882,6 @@ fd_resource_from_handle(struct pipe_screen *pscreen,
struct fd_resource *rsc = CALLOC_STRUCT(fd_resource);
struct fd_resource_slice *slice = &rsc->slices[0];
struct pipe_resource *prsc = &rsc->base.b;
- uint32_t pitchalign = fd_screen(pscreen)->gmem_alignw;
DBG("target=%d, format=%s, %ux%ux%u, array_size=%u, last_level=%u, "
"nr_samples=%u, usage=%u, bind=%x, flags=%x",
@@ -956,10 +909,8 @@ fd_resource_from_handle(struct pipe_screen *pscreen,
rsc->cpp = util_format_get_blocksize(tmpl->format);
slice->pitch = handle->stride / rsc->cpp;
slice->offset = handle->offset;
- slice->size0 = handle->stride * prsc->height0;
- if ((slice->pitch < align(prsc->width0, pitchalign)) ||
- (slice->pitch & (pitchalign - 1)))
+ if ((slice->pitch < align(prsc->width0, 32)) || (slice->pitch % 32))
goto fail;
assert(rsc->cpp);
@@ -1124,17 +1075,16 @@ fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
util_blitter_save_framebuffer(ctx->blitter,
ctx->batch ? &ctx->batch->framebuffer : NULL);
util_blitter_save_fragment_sampler_states(ctx->blitter,
- ctx->tex[PIPE_SHADER_FRAGMENT].num_samplers,
- (void **)ctx->tex[PIPE_SHADER_FRAGMENT].samplers);
+ ctx->fragtex.num_samplers,
+ (void **)ctx->fragtex.samplers);
util_blitter_save_fragment_sampler_views(ctx->blitter,
- ctx->tex[PIPE_SHADER_FRAGMENT].num_textures,
- ctx->tex[PIPE_SHADER_FRAGMENT].textures);
+ ctx->fragtex.num_textures, ctx->fragtex.textures);
if (!render_cond)
util_blitter_save_render_condition(ctx->blitter,
ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
if (ctx->batch)
- fd_batch_set_stage(ctx->batch, stage);
+ fd_hw_query_set_stage(ctx->batch, ctx->batch->draw, stage);
ctx->in_blit = discard;
}
@@ -1143,7 +1093,7 @@ void
fd_blitter_pipe_end(struct fd_context *ctx)
{
if (ctx->batch)
- fd_batch_set_stage(ctx->batch, FD_STAGE_NULL);
+ fd_hw_query_set_stage(ctx->batch, ctx->batch->draw, FD_STAGE_NULL);
ctx->in_blit = false;
}
@@ -1174,7 +1124,7 @@ fd_resource_context_init(struct pipe_context *pctx)
pctx->transfer_flush_region = u_transfer_flush_region_vtbl;
pctx->transfer_unmap = u_transfer_unmap_vtbl;
pctx->buffer_subdata = u_default_buffer_subdata;
- pctx->texture_subdata = u_default_texture_subdata;
+ pctx->texture_subdata = u_default_texture_subdata;
pctx->create_surface = fd_create_surface;
pctx->surface_destroy = fd_surface_destroy;
pctx->resource_copy_region = fd_resource_copy_region;