summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/sna/gen2_render.c2
-rw-r--r--src/sna/gen3_render.c3
-rw-r--r--src/sna/gen4_render.c18
-rw-r--r--src/sna/gen5_render.c17
-rw-r--r--src/sna/gen6_render.c14
-rw-r--r--src/sna/gen7_render.c17
-rw-r--r--src/sna/kgem.c5
-rw-r--r--src/sna/kgem.h6
8 files changed, 46 insertions, 36 deletions
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index fea1791d..9e51cb70 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -547,7 +547,7 @@ static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
assert(sna->render_state.gen2.vertex_offset == 0);
if (sna->render_state.gen2.target == op->dst.bo->unique_id) {
- kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+ kgem_bo_mark_dirty(op->dst.bo);
return;
}
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 72a25759..81d2c95f 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1342,6 +1342,7 @@ static void gen3_emit_target(struct sna *sna,
struct gen3_render_state *state = &sna->render_state.gen3;
/* BUF_INFO is an implicit flush, so skip if the target is unchanged. */
+ assert(bo->unique_id != 0);
if (bo->unique_id != state->current_dst) {
uint32_t v;
@@ -1373,7 +1374,7 @@ static void gen3_emit_target(struct sna *sna,
state->current_dst = bo->unique_id;
}
- kgem_bo_mark_dirty(&sna->kgem, bo);
+ kgem_bo_mark_dirty(bo);
}
static void gen3_emit_composite_state(struct sna *sna,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d72a2fd7..632793f3 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -654,15 +654,12 @@ gen4_bind_bo(struct sna *sna,
assert(!kgem_bo_is_snoop(bo));
/* After the first bind, we manage the cache domains within the batch */
- if (is_dst) {
- domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(&sna->kgem, bo);
- } else
- domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
offset = kgem_bo_get_binding(bo, format);
- if (offset)
+ if (offset) {
+ if (is_dst)
+ kgem_bo_mark_dirty(bo);
return offset * sizeof(uint32_t);
+ }
offset = sna->kgem.surface -=
sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
@@ -671,6 +668,11 @@ gen4_bind_bo(struct sna *sna,
ss->ss0.surface_type = GEN4_SURFACE_2D;
ss->ss0.surface_format = format;
+ if (is_dst)
+ domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
+ else
+ domains = I915_GEM_DOMAIN_SAMPLER << 16;
+
ss->ss0.data_return_format = GEN4_SURFACERETURNFORMAT_FLOAT32;
ss->ss0.color_blend = 1;
ss->ss1.base_addr =
@@ -1385,7 +1387,7 @@ gen4_emit_state(struct sna *sna,
kgem_bo_is_dirty(op->mask.bo)));
OUT_BATCH(MI_FLUSH);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+ kgem_bo_mark_dirty(op->dst.bo);
}
}
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 9b976c82..2894c58d 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -639,16 +639,13 @@ gen5_bind_bo(struct sna *sna,
uint32_t *ss;
/* After the first bind, we manage the cache domains within the batch */
- if (is_dst) {
- domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(&sna->kgem, bo);
- } else
- domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
if (!DBG_NO_SURFACE_CACHE) {
offset = kgem_bo_get_binding(bo, format);
- if (offset)
+ if (offset) {
+ if (is_dst)
+ kgem_bo_mark_dirty(bo);
return offset * sizeof(uint32_t);
+ }
}
offset = sna->kgem.surface -=
@@ -659,6 +656,10 @@ gen5_bind_bo(struct sna *sna,
GEN5_SURFACE_BLEND_ENABLED |
format << GEN5_SURFACE_FORMAT_SHIFT);
+ if (is_dst)
+ domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
+ else
+ domains = I915_GEM_DOMAIN_SAMPLER << 16;
ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
ss[2] = ((width - 1) << GEN5_SURFACE_WIDTH_SHIFT |
@@ -1387,7 +1388,7 @@ gen5_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
OUT_BATCH(MI_FLUSH);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+ kgem_bo_mark_dirty(op->dst.bo);
}
}
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 710a35e9..af8899eb 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -903,7 +903,7 @@ gen6_emit_state(struct sna *sna,
if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
gen6_emit_flush(sna);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+ kgem_bo_mark_dirty(op->dst.bo);
need_stall = false;
}
if (need_stall) {
@@ -1230,17 +1230,13 @@ gen6_bind_bo(struct sna *sna,
uint16_t offset;
/* After the first bind, we manage the cache domains within the batch */
- if (is_dst) {
- domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(&sna->kgem, bo);
- } else
- domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
offset = kgem_bo_get_binding(bo, format);
if (offset) {
DBG(("[%x] bo(handle=%d), format=%d, reuse %s binding\n",
offset, bo->handle, format,
domains & 0xffff ? "render" : "sampler"));
+ if (is_dst)
+ kgem_bo_mark_dirty(bo);
return offset * sizeof(uint32_t);
}
@@ -1250,6 +1246,10 @@ gen6_bind_bo(struct sna *sna,
ss[0] = (GEN6_SURFACE_2D << GEN6_SURFACE_TYPE_SHIFT |
GEN6_SURFACE_BLEND_ENABLED |
format << GEN6_SURFACE_FORMAT_SHIFT);
+ if (is_dst)
+ domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
+ else
+ domains = I915_GEM_DOMAIN_SAMPLER << 16;
ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
ss[2] = ((width - 1) << GEN6_SURFACE_WIDTH_SHIFT |
(height - 1) << GEN6_SURFACE_HEIGHT_SHIFT);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 8a281e54..d34cdfa0 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1047,7 +1047,7 @@ gen7_emit_state(struct sna *sna,
need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
gen7_emit_pipe_invalidate(sna, need_stall);
kgem_clear_dirty(&sna->kgem);
- kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+ kgem_bo_mark_dirty(op->dst.bo);
need_stall = false;
}
if (need_stall)
@@ -1348,15 +1348,12 @@ gen7_bind_bo(struct sna *sna,
COMPILE_TIME_ASSERT(sizeof(struct gen7_surface_state) == 32);
/* After the first bind, we manage the cache domains within the batch */
- if (is_dst) {
- domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
- kgem_bo_mark_dirty(&sna->kgem, bo);
- } else
- domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
offset = kgem_bo_get_binding(bo, format);
- if (offset)
+ if (offset) {
+ if (is_dst)
+ kgem_bo_mark_dirty(bo);
return offset * sizeof(uint32_t);
+ }
offset = sna->kgem.surface -=
sizeof(struct gen7_surface_state) / sizeof(uint32_t);
@@ -1364,6 +1361,10 @@ gen7_bind_bo(struct sna *sna,
ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
gen7_tiling_bits(bo->tiling) |
format << GEN7_SURFACE_FORMAT_SHIFT);
+ if (is_dst)
+ domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
+ else
+ domains = I915_GEM_DOMAIN_SAMPLER << 16;
ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
ss[2] = ((width - 1) << GEN7_SURFACE_WIDTH_SHIFT |
(height - 1) << GEN7_SURFACE_HEIGHT_SHIFT);
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 11cd6a4e..c5b88ffe 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1811,6 +1811,8 @@ static void kgem_commit(struct kgem *kgem)
}
kgem_retire(kgem);
+ assert(list_is_empty(&rq->buffers));
+
gem_close(kgem->fd, rq->bo->handle);
} else {
list_add_tail(&rq->list, &kgem->requests);
@@ -3551,6 +3553,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
if (bo->exec == NULL)
_kgem_add_bo(kgem, bo);
+ assert(bo->rq == kgem->next_request);
if (kgem->gen < 40 && read_write_domain & KGEM_RELOC_FENCED) {
if (bo->tiling &&
@@ -3568,7 +3571,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
kgem->reloc[index].presumed_offset = bo->presumed_offset;
if (read_write_domain & 0x7ff)
- kgem_bo_mark_dirty(kgem, bo);
+ kgem_bo_mark_dirty(bo);
delta += bo->presumed_offset;
} else {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8227538a..cf7cf70f 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -534,15 +534,17 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
return bo->dirty;
}
-static inline void kgem_bo_mark_dirty(struct kgem *kgem, struct kgem_bo *bo)
+static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
{
if (bo->dirty)
return;
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
+ assert(bo->exec);
+ assert(bo->rq);
bo->needs_flush = bo->dirty = true;
- list_move(&bo->request, &kgem->next_request->buffers);
+ list_move(&bo->request, &bo->rq->buffers);
}
#define KGEM_BUFFER_WRITE 0x1