summaryrefslogtreecommitdiff
path: root/src/sna/sna_composite.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-03-22 09:22:52 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-03-22 09:22:52 +0000
commita6b48dd7f1eeb1a8e3841b8f3326c60b300ee9e9 (patch)
treebe6713381ed261e411cd2c51ac564e699d851aa2 /src/sna/sna_composite.c
parent52f39ae1697bef86471b7c5eef8553661f255b67 (diff)
sna: Force fallbacks if the destination is unattached
Since the removal of the ability to create a backing pixmap after the creation of its parent, it no longer becomes practical to attempt rendering with the GPU to unattached pixmaps. So having made the decision never to render to that pixmap, perform the test explicitly along the render paths. This fixes a segmentation fault introduced in 8a303f195 (sna: Remove existing damage before overwriting with a composite op) which assumed the existence of a backing pixmap along a render path. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47700 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/sna/sna_composite.c')
-rw-r--r--src/sna/sna_composite.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index b098fcc9..e5031c0d 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -413,7 +413,9 @@ sna_composite(CARD8 op,
INT16 dst_x, INT16 dst_y,
CARD16 width, CARD16 height)
{
- struct sna *sna = to_sna_from_drawable(dst->pDrawable);
+ PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
+ struct sna *sna = to_sna_from_pixmap(pixmap);
+ struct sna_pixmap *priv;
struct sna_composite_op tmp;
unsigned flags;
RegionRec region;
@@ -462,8 +464,14 @@ sna_composite(CARD8 op,
goto fallback;
}
- if (too_small(dst->pDrawable) &&
- !picture_is_gpu(src) && !picture_is_gpu(mask)) {
+ priv = sna_pixmap(pixmap);
+ if (priv == NULL) {
+ DBG(("%s: fallback as destination is unattached\n",
+ __FUNCTION__));
+ goto fallback;
+ }
+
+ if (too_small(priv) && !picture_is_gpu(src) && !picture_is_gpu(mask)) {
DBG(("%s: fallback due to too small\n", __FUNCTION__));
goto fallback;
}
@@ -479,10 +487,8 @@ sna_composite(CARD8 op,
get_drawable_dx(dst->pDrawable),
get_drawable_dy(dst->pDrawable)));
- if (op <= PictOpSrc) {
- struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
+ if (op <= PictOpSrc)
sna_damage_subtract(&priv->cpu_damage, &region);
- }
memset(&tmp, 0, sizeof(tmp));
if (!sna->render.composite(sna,
@@ -752,17 +758,17 @@ sna_composite_rectangles(CARD8 op,
boxes = pixman_region_rectangles(&region, &num_boxes);
- if (too_small(dst->pDrawable)) {
- DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
- goto fallback;
- }
-
priv = sna_pixmap(pixmap);
if (priv == NULL) {
DBG(("%s: fallback, not attached\n", __FUNCTION__));
goto fallback;
}
+ if (too_small(priv)) {
+ DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
+ goto fallback;
+ }
+
/* If we going to be overwriting any CPU damage with a subsequent
* operation, then we may as well delete it without moving it
* first to the GPU.