summaryrefslogtreecommitdiff
path: root/lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2020-01-22 02:49:53 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2020-01-22 02:49:53 +0000
commitd264279e28002d81821c883795911844a4c01a2c (patch)
treeb0481616eda55b543a0dc1487d096c3239885c41 /lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c
parentfdcc03929065b5bf5dd93553db219ea3e05c8c34 (diff)
Merge Mesa 19.2.8
Diffstat (limited to 'lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c')
-rw-r--r--lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c43
1 files changed, 23 insertions, 20 deletions
diff --git a/lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c b/lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c
index a1578506c..52870cd0a 100644
--- a/lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c
+++ b/lib/mesa/src/gallium/drivers/freedreno/freedreno_batch.c
@@ -89,6 +89,7 @@ batch_init(struct fd_batch *batch)
fd_reset_wfi(batch);
util_dynarray_init(&batch->draw_patches, NULL);
+ util_dynarray_init(&batch->fb_read_patches, NULL);
if (is_a2xx(ctx->screen)) {
util_dynarray_init(&batch->shader_patches, NULL);
@@ -138,7 +139,7 @@ batch_fini(struct fd_batch *batch)
/* in case batch wasn't flushed but fence was created: */
fd_fence_populate(batch->fence, 0, -1);
- fd_fence_ref(NULL, &batch->fence, NULL);
+ fd_fence_ref(&batch->fence, NULL);
fd_ringbuffer_del(batch->draw);
if (!batch->nondraw) {
@@ -168,6 +169,7 @@ batch_fini(struct fd_batch *batch)
fd_submit_del(batch->submit);
util_dynarray_fini(&batch->draw_patches);
+ util_dynarray_fini(&batch->fb_read_patches);
if (is_a2xx(batch->ctx->screen)) {
util_dynarray_fini(&batch->shader_patches);
@@ -196,7 +198,7 @@ batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
foreach_batch(dep, cache, batch->dependents_mask) {
if (flush)
- fd_batch_flush(dep, false, false);
+ fd_batch_flush(dep, false);
fd_batch_reference(&dep, NULL);
}
@@ -275,7 +277,7 @@ __fd_batch_destroy(struct fd_batch *batch)
void
__fd_batch_describe(char* buf, const struct fd_batch *batch)
{
- util_sprintf(buf, "fd_batch<%u>", batch->seqno);
+ sprintf(buf, "fd_batch<%u>", batch->seqno);
}
void
@@ -305,7 +307,7 @@ batch_cleanup_func(void *job, int id)
}
static void
-batch_flush(struct fd_batch *batch, bool force)
+batch_flush(struct fd_batch *batch)
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
@@ -323,6 +325,8 @@ batch_flush(struct fd_batch *batch, bool force)
batch->flushed = true;
+ fd_fence_ref(&batch->ctx->last_fence, batch->fence);
+
if (batch->ctx->screen->reorder) {
struct fd_batch *tmp = NULL;
fd_batch_reference(&tmp, batch);
@@ -354,7 +358,7 @@ batch_flush(struct fd_batch *batch, bool force)
* a fence to sync on
*/
void
-fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
+fd_batch_flush(struct fd_batch *batch, bool sync)
{
struct fd_batch *tmp = NULL;
bool newbatch = false;
@@ -370,7 +374,7 @@ fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
newbatch = true;
}
- batch_flush(tmp, force);
+ batch_flush(tmp);
if (newbatch) {
struct fd_context *ctx = batch->ctx;
@@ -397,31 +401,30 @@ fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
fd_batch_reference(&tmp, NULL);
}
-/* does 'batch' depend directly or indirectly on 'other' ? */
-static bool
-batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
+/* find a batches dependents mask, including recursive dependencies: */
+static uint32_t
+recursive_dependents_mask(struct fd_batch *batch)
{
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct fd_batch *dep;
-
- if (batch->dependents_mask & (1 << other->idx))
- return true;
+ uint32_t dependents_mask = batch->dependents_mask;
foreach_batch(dep, cache, batch->dependents_mask)
- if (batch_depends_on(batch, dep))
- return true;
+ dependents_mask |= recursive_dependents_mask(dep);
- return false;
+ return dependents_mask;
}
void
fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
{
+ pipe_mutex_assert_locked(batch->ctx->screen->lock);
+
if (batch->dependents_mask & (1 << dep->idx))
return;
/* a loop should not be possible */
- debug_assert(!batch_depends_on(dep, batch));
+ debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
struct fd_batch *other = NULL;
fd_batch_reference_locked(&other, dep);
@@ -433,10 +436,10 @@ static void
flush_write_batch(struct fd_resource *rsc)
{
struct fd_batch *b = NULL;
- fd_batch_reference(&b, rsc->write_batch);
+ fd_batch_reference_locked(&b, rsc->write_batch);
mtx_unlock(&b->ctx->screen->lock);
- fd_batch_flush(b, true, false);
+ fd_batch_flush(b, true);
mtx_lock(&b->ctx->screen->lock);
fd_bc_invalidate_batch(b, false);
@@ -510,7 +513,7 @@ fd_batch_check_size(struct fd_batch *batch)
debug_assert(!batch->flushed);
if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch, true);
return;
}
@@ -519,7 +522,7 @@ fd_batch_check_size(struct fd_batch *batch)
struct fd_ringbuffer *ring = batch->draw;
if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
- fd_batch_flush(batch, true, false);
+ fd_batch_flush(batch, true);
}
/* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already