summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-03-07 15:52:41 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-03-08 00:58:38 +0000
commit49a80ce1ff336fb2fa7d214bd3fddbce5a62b77a (patch)
tree0222c7266af49f66177a2696dbd30de30873ce08 /src
parent4899740f6f0f5b1a5b2b3490502ccdeb7b092877 (diff)
sna/gen2+: Prefer not to fallback if the source is busy
As if we try to perform the operation with outstanding operations on the source pixmaps, we will stall waiting for them to complete. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src')
-rw-r--r--src/sna/gen2_render.c75
-rw-r--r--src/sna/gen3_render.c79
-rw-r--r--src/sna/gen4_render.c82
-rw-r--r--src/sna/gen5_render.c78
-rw-r--r--src/sna/gen6_render.c78
-rw-r--r--src/sna/gen7_render.c75
-rw-r--r--src/sna/kgem.h1
-rw-r--r--src/sna/sna_accel.c4
-rw-r--r--src/sna/sna_render.c4
-rw-r--r--src/sna/sna_render_inline.h4
10 files changed, 310 insertions, 170 deletions
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 57bb835a..597d5f3d 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1512,16 +1512,37 @@ need_upload(PicturePtr p)
}
static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ if (priv == NULL)
+ return false;
+
+ if (priv->clear)
+ return false;
+
+ if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+ return true;
+
+ if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+ return true;
+
+ return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
{
if (sna_picture_is_solid(p, NULL))
return false;
- return (has_alphamap(p) ||
- is_unhandled_gradient(p) ||
- !gen2_check_filter(p) ||
- !gen2_check_repeat(p) ||
- need_upload(p));
+ if (is_unhandled_gradient(p) || !gen2_check_repeat(p))
+ return true;
+
+ if (pixmap && source_is_busy(pixmap))
+ return false;
+
+ return has_alphamap(p) || !gen2_check_filter(p) || need_upload(p);
}
static bool
@@ -1534,6 +1555,7 @@ gen2_composite_fallback(struct sna *sna,
PixmapPtr src_pixmap;
PixmapPtr mask_pixmap;
PixmapPtr dst_pixmap;
+ bool src_fallback, mask_fallback;
if (!gen2_check_dst_format(dst->format)) {
DBG(("%s: unknown destination format: %d\n",
@@ -1542,18 +1564,27 @@ gen2_composite_fallback(struct sna *sna,
}
dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
- mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ src_fallback = source_fallback(src, src_pixmap);
+
+ if (mask) {
+ mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ mask_fallback = source_fallback(mask, mask_pixmap);
+ } else {
+ mask_pixmap = NULL;
+ mask_fallback = NULL;
+ }
/* If we are using the destination as a source and need to
* readback in order to upload the source, do it all
* on the cpu.
*/
- if (src_pixmap == dst_pixmap && source_fallback(src)) {
+ if (src_pixmap == dst_pixmap && src_fallback) {
DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
- if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+ if (mask_pixmap == dst_pixmap && mask_fallback) {
DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
@@ -1566,34 +1597,28 @@ gen2_composite_fallback(struct sna *sna,
return FALSE;
}
- if (src_pixmap && !source_fallback(src)) {
- priv = sna_pixmap(src_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: src is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (src_pixmap && !src_fallback) {
+ DBG(("%s: src is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
- if (mask_pixmap && !source_fallback(mask)) {
- priv = sna_pixmap(mask_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: mask is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (mask_pixmap && !mask_fallback) {
+ DBG(("%s: mask is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
/* However if the dst is not on the GPU and we need to
* render one of the sources using the CPU, we may
* as well do the entire operation in place onthe CPU.
*/
- if (source_fallback(src)) {
+ if (src_fallback) {
DBG(("%s: dst is on the CPU and src will fallback\n",
__FUNCTION__));
return TRUE;
}
- if (mask && source_fallback(mask)) {
+ if (mask && mask_fallback) {
DBG(("%s: dst is on the CPU and mask will fallback\n",
__FUNCTION__));
return TRUE;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 78c7ea0a..2a18631c 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2511,16 +2511,37 @@ need_upload(PicturePtr p)
}
static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ if (priv == NULL)
+ return false;
+
+ if (priv->clear)
+ return false;
+
+ if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+ return true;
+
+ if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+ return true;
+
+ return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
{
if (sna_picture_is_solid(p, NULL))
return false;
- return (has_alphamap(p) ||
- !gen3_check_xformat(p) ||
- !gen3_check_filter(p) ||
- !gen3_check_repeat(p) ||
- need_upload(p));
+ if (!gen3_check_xformat(p) || !gen3_check_repeat(p))
+ return true;
+
+ if (pixmap && source_is_busy(pixmap))
+ return false;
+
+ return has_alphamap(p) || !gen3_check_filter(p) || need_upload(p);
}
static bool
@@ -2534,6 +2555,7 @@ gen3_composite_fallback(struct sna *sna,
PixmapPtr src_pixmap;
PixmapPtr mask_pixmap;
PixmapPtr dst_pixmap;
+ bool src_fallback, mask_fallback;
if (!gen3_check_dst_format(dst->format)) {
DBG(("%s: unknown destination format: %d\n",
@@ -2542,18 +2564,27 @@ gen3_composite_fallback(struct sna *sna,
}
dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
- mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ src_fallback = source_fallback(src, src_pixmap);
+
+ if (mask) {
+ mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ mask_fallback = source_fallback(mask, mask_pixmap);
+ } else {
+ mask_pixmap = NULL;
+ mask_fallback = false;
+ }
/* If we are using the destination as a source and need to
* readback in order to upload the source, do it all
* on the cpu.
*/
- if (src_pixmap == dst_pixmap && source_fallback(src)) {
+ if (src_pixmap == dst_pixmap && src_fallback) {
DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
- if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+ if (mask_pixmap == dst_pixmap && mask_fallback) {
DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
@@ -2575,38 +2606,28 @@ gen3_composite_fallback(struct sna *sna,
return FALSE;
}
- if (src_pixmap && !source_fallback(src)) {
- priv = sna_pixmap(src_pixmap);
- if (priv &&
- ((priv->gpu_damage && !priv->cpu_damage) ||
- (priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU))) {
- DBG(("%s: src is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (src_pixmap && !src_fallback) {
+ DBG(("%s: src is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
- if (mask_pixmap && !source_fallback(mask)) {
- priv = sna_pixmap(mask_pixmap);
- if (priv &&
- ((priv->gpu_damage && !priv->cpu_damage) ||
- (priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU))) {
- DBG(("%s: mask is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (mask_pixmap && !mask_fallback) {
+ DBG(("%s: mask is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
/* However if the dst is not on the GPU and we need to
* render one of the sources using the CPU, we may
* as well do the entire operation in place onthe CPU.
*/
- if (source_fallback(src)) {
+ if (src_fallback) {
DBG(("%s: dst is on the CPU and src will fallback\n",
__FUNCTION__));
return TRUE;
}
- if (mask && source_fallback(mask)) {
+ if (mask && mask_fallback) {
DBG(("%s: dst is on the CPU and mask will fallback\n",
__FUNCTION__));
return TRUE;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c3a82a33..02454b21 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2122,11 +2122,8 @@ try_blt(struct sna *sna,
}
static bool
-is_gradient(PicturePtr picture)
+check_gradient(PicturePtr picture)
{
- if (picture->pDrawable)
- return FALSE;
-
switch (picture->pSourcePict->type) {
case SourcePictTypeSolidFill:
case SourcePictTypeLinear:
@@ -2155,17 +2152,38 @@ need_upload(PicturePtr p)
}
static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ if (priv == NULL)
+ return false;
+
+ if (priv->clear)
+ return false;
+
+ if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+ return true;
+
+ return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
{
if (sna_picture_is_solid(p, NULL))
return false;
- return (has_alphamap(p) ||
- is_gradient(p) ||
- !gen4_check_filter(p) ||
- !gen4_check_repeat(p) ||
- !gen4_check_format(p->format) ||
- need_upload(p));
+ if (p->pSourcePict)
+ return check_gradient(p);
+
+ if (!gen4_check_repeat(p) || !gen4_check_format(p->format))
+ return true;
+
+ /* soft errors: perfer to upload/compute rather than readback */
+ if (pixmap && source_is_busy(pixmap))
+ return false;
+
+ return has_alphamap(p) || !gen4_check_filter(p) || need_upload(p);
}
static bool
@@ -2178,6 +2196,7 @@ gen4_composite_fallback(struct sna *sna,
PixmapPtr src_pixmap;
PixmapPtr mask_pixmap;
PixmapPtr dst_pixmap;
+ bool src_fallback, mask_fallback;
if (!gen4_check_dst_format(dst->format)) {
DBG(("%s: unknown destination format: %d\n",
@@ -2186,18 +2205,27 @@ gen4_composite_fallback(struct sna *sna,
}
dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
- mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ src_fallback = source_fallback(src, src_pixmap);
+
+ if (mask) {
+ mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ mask_fallback = source_fallback(mask, mask_pixmap);
+ } else {
+ mask_pixmap = NULL;
+ mask_fallback = false;
+ }
/* If we are using the destination as a source and need to
* readback in order to upload the source, do it all
* on the cpu.
*/
- if (src_pixmap == dst_pixmap && source_fallback(src)) {
+ if (src_pixmap == dst_pixmap && src_fallback) {
DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
- if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+ if (mask_pixmap == dst_pixmap && mask_fallback) {
DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
@@ -2210,34 +2238,28 @@ gen4_composite_fallback(struct sna *sna,
return FALSE;
}
- if (src_pixmap && !source_fallback(src)) {
- priv = sna_pixmap(src_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: src is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (!src_fallback) {
+ DBG(("%s: src is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
- if (mask_pixmap && !source_fallback(mask)) {
- priv = sna_pixmap(mask_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: mask is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (mask && !mask_fallback) {
+ DBG(("%s: mask is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
/* However if the dst is not on the GPU and we need to
* render one of the sources using the CPU, we may
* as well do the entire operation in place onthe CPU.
*/
- if (source_fallback(src)) {
+ if (src_fallback) {
DBG(("%s: dst is on the CPU and src will fallback\n",
__FUNCTION__));
return TRUE;
}
- if (mask && source_fallback(mask)) {
+ if (mask && mask_fallback) {
DBG(("%s: dst is on the CPU and mask will fallback\n",
__FUNCTION__));
return TRUE;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index bce5a3c0..6763edf4 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2198,17 +2198,39 @@ need_upload(PicturePtr p)
}
static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ if (priv == NULL)
+ return false;
+
+ if (priv->clear)
+ return false;
+
+ if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+ return true;
+
+ if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+ return true;
+
+ return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
{
if (sna_picture_is_solid(p, NULL))
return false;
- return (has_alphamap(p) ||
- is_gradient(p) ||
- !gen5_check_filter(p) ||
- !gen5_check_repeat(p) ||
- !gen5_check_format(p->format) ||
- need_upload(p));
+ if (is_gradient(p) ||
+ !gen5_check_repeat(p) ||
+ !gen5_check_format(p->format))
+ return true;
+
+ if (pixmap && source_is_busy(pixmap))
+ return false;
+
+ return has_alphamap(p) || !gen5_check_filter(p) || need_upload(p);
}
static bool
@@ -2221,6 +2243,7 @@ gen5_composite_fallback(struct sna *sna,
PixmapPtr src_pixmap;
PixmapPtr mask_pixmap;
PixmapPtr dst_pixmap;
+ bool src_fallback, mask_fallback;
if (!gen5_check_dst_format(dst->format)) {
DBG(("%s: unknown destination format: %d\n",
@@ -2229,18 +2252,27 @@ gen5_composite_fallback(struct sna *sna,
}
dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
- mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ src_fallback = source_fallback(src, src_pixmap);
+
+ if (mask) {
+ mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ mask_fallback = source_fallback(mask, mask_pixmap);
+ } else {
+ mask_pixmap = NULL;
+ mask_fallback = false;
+ }
/* If we are using the destination as a source and need to
* readback in order to upload the source, do it all
* on the cpu.
*/
- if (src_pixmap == dst_pixmap && source_fallback(src)) {
+ if (src_pixmap == dst_pixmap && src_fallback) {
DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
- if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+ if (mask_pixmap == dst_pixmap && mask_fallback) {
DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
@@ -2253,34 +2285,28 @@ gen5_composite_fallback(struct sna *sna,
return FALSE;
}
- if (src_pixmap && !source_fallback(src)) {
- priv = sna_pixmap(src_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: src is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (src_pixmap && !src_fallback) {
+ DBG(("%s: src is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
- if (mask_pixmap && !source_fallback(mask)) {
- priv = sna_pixmap(mask_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: mask is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (mask_pixmap && !mask_fallback) {
+ DBG(("%s: mask is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
/* However if the dst is not on the GPU and we need to
* render one of the sources using the CPU, we may
* as well do the entire operation in place onthe CPU.
*/
- if (source_fallback(src)) {
+ if (src_fallback) {
DBG(("%s: dst is on the CPU and src will fallback\n",
__FUNCTION__));
return TRUE;
}
- if (mask && source_fallback(mask)) {
+ if (mask && mask_fallback) {
DBG(("%s: dst is on the CPU and mask will fallback\n",
__FUNCTION__));
return TRUE;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 9eb4221e..390cb0a9 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2374,7 +2374,7 @@ try_blt(struct sna *sna,
}
static bool
-is_gradient(PicturePtr picture)
+check_gradient(PicturePtr picture)
{
if (picture->pDrawable)
return FALSE;
@@ -2407,17 +2407,37 @@ need_upload(PicturePtr p)
}
static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ if (priv == NULL || priv->clear)
+ return false;
+
+ if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+ return true;
+
+ if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+ return true;
+
+ return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
{
if (sna_picture_is_solid(p, NULL))
return false;
- return (has_alphamap(p) ||
- is_gradient(p) ||
- !gen6_check_filter(p) ||
- !gen6_check_repeat(p) ||
- !gen6_check_format(p->format) ||
- need_upload(p));
+ if (p->pSourcePict)
+ return check_gradient(p);
+
+ if (!gen6_check_repeat(p) || !gen6_check_format(p->format))
+ return true;
+
+ if (pixmap && source_is_busy(pixmap))
+ return false;
+
+ return has_alphamap(p) || !gen6_check_filter(p) || need_upload(p);
}
static bool
@@ -2430,6 +2450,7 @@ gen6_composite_fallback(struct sna *sna,
PixmapPtr src_pixmap;
PixmapPtr mask_pixmap;
PixmapPtr dst_pixmap;
+ bool src_fallback, mask_fallback;
if (!gen6_check_dst_format(dst->format)) {
DBG(("%s: unknown destination format: %d\n",
@@ -2438,18 +2459,27 @@ gen6_composite_fallback(struct sna *sna,
}
dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
- mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ src_fallback = source_fallback(src, src_pixmap);
+
+ if (mask) {
+ mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ mask_fallback = source_fallback(mask, mask_pixmap);
+ } else {
+ mask_pixmap = NULL;
+ mask_fallback = false;
+ }
/* If we are using the destination as a source and need to
* readback in order to upload the source, do it all
* on the cpu.
*/
- if (src_pixmap == dst_pixmap && source_fallback(src)) {
+ if (src_pixmap == dst_pixmap && src_fallback) {
DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
- if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+ if (mask_pixmap == dst_pixmap && mask_fallback) {
DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
@@ -2464,34 +2494,28 @@ gen6_composite_fallback(struct sna *sna,
return FALSE;
}
- if (src_pixmap && !source_fallback(src)) {
- priv = sna_pixmap(src_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: src is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (src_pixmap && !src_fallback) {
+ DBG(("%s: src is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
- if (mask_pixmap && !source_fallback(mask)) {
- priv = sna_pixmap(mask_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: mask is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (mask_pixmap && !mask_fallback) {
+ DBG(("%s: mask is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
/* However if the dst is not on the GPU and we need to
* render one of the sources using the CPU, we may
* as well do the entire operation in place onthe CPU.
*/
- if (source_fallback(src)) {
+ if (src_fallback) {
DBG(("%s: dst is on the CPU and src will fallback\n",
__FUNCTION__));
return TRUE;
}
- if (mask && source_fallback(mask)) {
+ if (mask && mask_fallback) {
DBG(("%s: dst is on the CPU and mask will fallback\n",
__FUNCTION__));
return TRUE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5829ae38..2b3f67b4 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2487,17 +2487,36 @@ need_upload(PicturePtr p)
}
static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ if (priv == NULL || priv->clear)
+ return false;
+
+ if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+ return true;
+
+ if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+ return true;
+
+ return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
{
if (sna_picture_is_solid(p, NULL))
return false;
- return (has_alphamap(p) ||
- is_gradient(p) ||
- !gen7_check_filter(p) ||
- !gen7_check_repeat(p) ||
- !gen7_check_format(p->format) ||
- need_upload(p));
+ if (is_gradient(p) ||
+ !gen7_check_repeat(p) ||
+ !gen7_check_format(p->format))
+ return true;
+
+ if (pixmap && source_is_busy(pixmap))
+ return false;
+
+ return has_alphamap(p) || !gen7_check_filter(p) || need_upload(p);
}
static bool
@@ -2510,6 +2529,7 @@ gen7_composite_fallback(struct sna *sna,
PixmapPtr src_pixmap;
PixmapPtr mask_pixmap;
PixmapPtr dst_pixmap;
+ bool src_fallback, mask_fallback;
if (!gen7_check_dst_format(dst->format)) {
DBG(("%s: unknown destination format: %d\n",
@@ -2518,18 +2538,27 @@ gen7_composite_fallback(struct sna *sna,
}
dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
- mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ src_fallback = source_fallback(src, src_pixmap);
+
+ if (mask) {
+ mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+ mask_fallback = source_fallback(src, mask_pixmap);
+ } else {
+ mask_pixmap = NULL;
+ mask_fallback = false;
+ }
/* If we are using the destination as a source and need to
* readback in order to upload the source, do it all
* on the cpu.
*/
- if (src_pixmap == dst_pixmap && source_fallback(src)) {
+ if (src_pixmap == dst_pixmap && src_fallback) {
DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
- if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+ if (mask_pixmap == dst_pixmap && mask_fallback) {
DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
return TRUE;
}
@@ -2544,34 +2573,28 @@ gen7_composite_fallback(struct sna *sna,
return FALSE;
}
- if (src_pixmap && !source_fallback(src)) {
- priv = sna_pixmap(src_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: src is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (src_pixmap && !src_fallback) {
+ DBG(("%s: src is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
- if (mask_pixmap && !source_fallback(mask)) {
- priv = sna_pixmap(mask_pixmap);
- if (priv && priv->gpu_damage && !priv->cpu_damage) {
- DBG(("%s: mask is already on the GPU, try to use GPU\n",
- __FUNCTION__));
- return FALSE;
- }
+ if (mask_pixmap && !mask_fallback) {
+ DBG(("%s: mask is already on the GPU, try to use GPU\n",
+ __FUNCTION__));
+ return FALSE;
}
/* However if the dst is not on the GPU and we need to
* render one of the sources using the CPU, we may
* as well do the entire operation in place onthe CPU.
*/
- if (source_fallback(src)) {
+ if (src_fallback) {
DBG(("%s: dst is on the CPU and src will fallback\n",
__FUNCTION__));
return TRUE;
}
- if (mask && source_fallback(mask)) {
+ if (mask && mask_fallback) {
DBG(("%s: dst is on the CPU and mask will fallback\n",
__FUNCTION__));
return TRUE;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 96d945e8..6c31f335 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -450,7 +450,6 @@ static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
{
DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
__FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
- assert(bo->proxy == NULL);
return bo->rq;
}
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ce3afaef..e961c2cb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -871,10 +871,10 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
return true;
}
- DBG(("%s: gpu_bo=%d, gpu_damage=%p\n",
+ DBG(("%s: gpu_bo=%d, gpu_damage=%p, cpu_damage=%p, is-clear?=%d\n",
__FUNCTION__,
priv->gpu_bo ? priv->gpu_bo->handle : 0,
- priv->gpu_damage));
+ priv->gpu_damage, priv->cpu_damage, priv->clear));
if ((flags & MOVE_READ) == 0) {
assert(flags & MOVE_WRITE);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 74c04afd..421c7ff6 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1418,7 +1418,7 @@ sna_render_picture_fixup(struct sna *sna,
if (picture->alphaMap) {
DBG(("%s: alphamap\n", __FUNCTION__));
- if ((is_gpu(picture->pDrawable) || is_gpu(picture->alphaMap->pDrawable))) {
+ if (is_gpu(picture->pDrawable) || is_gpu(picture->alphaMap->pDrawable)) {
return sna_render_picture_flatten(sna, picture, channel,
x, y, w, y, dst_x, dst_y);
}
@@ -1428,7 +1428,7 @@ sna_render_picture_fixup(struct sna *sna,
if (picture->filter == PictFilterConvolution) {
DBG(("%s: convolution\n", __FUNCTION__));
- if (picture->pDrawable && is_gpu(picture->pDrawable)) {
+ if (is_gpu(picture->pDrawable)) {
return sna_render_picture_convolve(sna, picture, channel,
x, y, w, h, dst_x, dst_y);
}
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 6c8f66ab..a523fed5 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -72,10 +72,10 @@ is_gpu(DrawablePtr drawable)
{
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
- if (priv == NULL)
+ if (priv == NULL || priv->clear)
return false;
- if (priv->gpu_damage)
+ if (priv->gpu_damage || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo)))
return true;
return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);