summaryrefslogtreecommitdiff
path: root/src/sna/gen8_render.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-07-18 23:32:38 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-07-19 08:53:57 +0100
commit95ea05645a3abe7fa69922fe40e0839940706f9d (patch)
tree0c4002191a2ccd82e9ea6a36a7f29476562721f4 /src/sna/gen8_render.c
parentbd585f626cd7b9e614cfb00dba24ab686b94b1f0 (diff)
sna: Rebalance prefer_blt to weight I915_TILING_Y higher
Mixing I915_TILING_Y and the blitter is painfully slow, so we need to take use of I915_TILING_Y into more prominent consideration and even force a ring switch in case it is being used. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/sna/gen8_render.c')
-rw-r--r--src/sna/gen8_render.c43
1 files changed, 26 insertions, 17 deletions
diff --git a/src/sna/gen8_render.c b/src/sna/gen8_render.c
index 69617da5..5d09c2cc 100644
--- a/src/sna/gen8_render.c
+++ b/src/sna/gen8_render.c
@@ -2084,11 +2084,6 @@ try_blt(struct sna *sna,
{
struct kgem_bo *bo;
- if (sna->kgem.mode == KGEM_BLT) {
- DBG(("%s: already performing BLT\n", __FUNCTION__));
- goto execute;
- }
-
if (too_large(width, height)) {
DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
__FUNCTION__, width, height));
@@ -2129,7 +2124,7 @@ try_blt(struct sna *sna,
goto execute;
}
- if (sna->kgem.ring == KGEM_BLT) {
+ if (sna->kgem.mode == KGEM_BLT) {
DBG(("%s: already performing BLT\n", __FUNCTION__));
goto execute;
}
@@ -2796,40 +2791,53 @@ prefer_blt_copy(struct sna *sna,
struct kgem_bo *dst_bo,
unsigned flags)
{
- if (sna->kgem.mode == KGEM_BLT)
- return true;
-
assert((flags & COPY_SYNC) == 0);
if (untiled_tlb_miss(src_bo) ||
- untiled_tlb_miss(dst_bo))
+ untiled_tlb_miss(dst_bo)) {
+ DBG(("%s: TLB miss -> blt\n", __func__));
return true;
+ }
- if (flags & COPY_DRI && !sna->kgem.has_semaphores)
+ if (flags & COPY_DRI && !sna->kgem.has_semaphores) {
+ DBG(("%s: DRI -> render\n", __func__));
return false;
+ }
- if (force_blt_ring(sna, dst_bo))
+ if (force_blt_ring(sna, dst_bo, src_bo)) {
+ DBG(("%s: force BLT -> blt\n", __func__));
return true;
+ }
if ((flags & COPY_SMALL ||
(sna->render_state.gt < 3 && src_bo == dst_bo)) &&
- can_switch_to_blt(sna, dst_bo, flags))
+ can_switch_to_blt(sna, dst_bo, flags)) {
+ DBG(("%s: small/self copy -> blt\n", __func__));
return true;
+ }
if (kgem_bo_is_render(dst_bo) ||
- kgem_bo_is_render(src_bo))
+ kgem_bo_is_render(src_bo)) {
+ DBG(("%s: render bo -> render\n", __func__));
return false;
+ }
if (flags & COPY_LAST &&
sna->render_state.gt < 3 &&
- can_switch_to_blt(sna, dst_bo, flags))
+ can_switch_to_blt(sna, dst_bo, flags)) {
+ DBG(("%s: copy last -> blt\n", __func__));
return true;
+ }
- if (prefer_render_ring(sna, dst_bo))
+ if (prefer_render_ring(sna, dst_bo)) {
+ DBG(("%s: prefer render -> render\n", __func__));
return false;
+ }
- if (!prefer_blt_ring(sna, dst_bo, flags))
+ if (!prefer_blt_ring(sna, dst_bo, flags)) {
+ DBG(("%s: !prefer blt -> render\n", __func__));
return false;
+ }
return prefer_blt_bo(sna, src_bo, dst_bo);
}
@@ -3116,6 +3124,7 @@ gen8_render_copy(struct sna *sna, uint8_t alu,
unaligned(src_bo, src->drawable.bitsPerPixel) ||
unaligned(dst_bo, dst->drawable.bitsPerPixel)) {
fallback:
+ DBG(("%s: blt fallback\n", __func__));
if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
return false;