summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2013-11-28 14:02:18 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2013-11-28 14:06:27 +0000
commit1e382c6ece84f3040b21980a303fe00be667c4e2 (patch)
tree53c24c4861e9bde67673e1add8d08fbdeddf251b
parent569b06abeae9b3dcaa611facdd3ec30c67de765b (diff)
sna: Harden GetImage for use with very large buffers
That risk causing SIGBUS due to oom. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/sna_accel.c254
1 files changed, 144 insertions, 110 deletions
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9b54c5e2..328e144b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -15712,104 +15712,11 @@ static int sna_create_gc(GCPtr gc)
}
static bool
-sna_get_image__blt(PixmapPtr pixmap,
- RegionPtr region,
- char *dst,
- unsigned flags)
-{
- struct sna_pixmap *priv = sna_pixmap(pixmap);
- struct sna *sna = to_sna_from_pixmap(pixmap);
- struct kgem_bo *dst_bo;
- bool ok = false;
- int pitch;
-
- if (priv == NULL)
- return false;
-
- if (priv->clear) {
- int w = region->extents.x2 - region->extents.x1;
- int h = region->extents.y2 - region->extents.y1;
-
- DBG(("%s: applying clear [%08x]\n",
- __FUNCTION__, priv->clear_color));
- assert(DAMAGE_IS_ALL(priv->gpu_damage));
- assert(priv->cpu_damage == NULL);
-
- pitch = PixmapBytePad(w, pixmap->drawable.depth);
- if (priv->clear_color == 0 ||
- pixmap->drawable.bitsPerPixel == 8 ||
- priv->clear_color == (1U << pixmap->drawable.depth) - 1) {
- DBG(("%s: memset clear [%02x]\n",
- __FUNCTION__, priv->clear_color & 0xff));
- memset(dst, priv->clear_color, pitch * h);
- } else {
- pixman_fill((uint32_t *)dst,
- pitch/sizeof(uint32_t),
- pixmap->drawable.bitsPerPixel,
- 0, 0,
- w, h,
- priv->clear_color);
- }
-
- return true;
- }
-
- if (!sna->kgem.has_userptr || !USE_USERPTR_DOWNLOADS)
- return false;
-
- if (!sna->kgem.can_blt_cpu)
- return false;
-
- if (flags & (MOVE_WHOLE_HINT | MOVE_INPLACE_HINT))
- return false;
-
- if (priv->gpu_damage == NULL)
- return false;
-
- assert(priv->gpu_bo);
- if (!__kgem_bo_is_busy(&sna->kgem, priv->gpu_bo))
- return false;
-
- if (!DAMAGE_IS_ALL(priv->gpu_damage) &&
- !sna_damage_contains_box__no_reduce(priv->gpu_damage,
- &region->extents))
- return false;
-
- DBG(("%s: download through a temporary map\n", __FUNCTION__));
-
- assert(sna_damage_contains_box(priv->gpu_damage, &region->extents) == PIXMAN_REGION_IN);
- assert(sna_damage_contains_box(priv->cpu_damage, &region->extents) == PIXMAN_REGION_OUT);
-
- pitch = PixmapBytePad(region->extents.x2 - region->extents.x1,
- pixmap->drawable.depth);
- dst_bo = kgem_create_map(&sna->kgem, dst,
- pitch * (region->extents.y2 - region->extents.y1),
- false);
- if (dst_bo) {
- dst_bo->pitch = pitch;
- kgem_bo_mark_unreusable(dst_bo);
-
- ok = sna->render.copy_boxes(sna, GXcopy,
- pixmap, priv->gpu_bo, 0, 0,
- pixmap, dst_bo,
- -region->extents.x1,
- -region->extents.y1,
- &region->extents, 1,
- COPY_LAST);
-
- kgem_bo_sync__cpu(&sna->kgem, dst_bo);
- assert(dst_bo->rq == NULL);
- kgem_bo_destroy(&sna->kgem, dst_bo);
- }
-
- return ok;
-}
-
-static bool
sna_get_image__inplace(PixmapPtr pixmap,
RegionPtr region,
char *dst,
- unsigned flags)
+ unsigned flags,
+ bool idle)
{
struct sna_pixmap *priv = sna_pixmap(pixmap);
struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -15818,8 +15725,7 @@ sna_get_image__inplace(PixmapPtr pixmap,
if (!USE_INPLACE)
return false;
- if (priv == NULL || priv->gpu_bo == NULL)
- return false;
+ assert(priv && priv->gpu_bo);
switch (priv->gpu_bo->tiling) {
case I915_TILING_Y:
@@ -15831,16 +15737,13 @@ sna_get_image__inplace(PixmapPtr pixmap,
break;
}
- if (priv->move_to_gpu && !priv->move_to_gpu(sna, priv, MOVE_READ))
+ if (!kgem_bo_can_map__cpu(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC))
return false;
- if (!kgem_bo_can_map__cpu(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC))
+ if (idle && __kgem_bo_is_busy(&sna->kgem, priv->gpu_bo))
return false;
- if (priv->gpu_damage == NULL ||
- !(DAMAGE_IS_ALL(priv->gpu_damage) ||
- sna_damage_contains_box__no_reduce(priv->gpu_damage,
- &region->extents)))
+ if (priv->move_to_gpu && !priv->move_to_gpu(sna, priv, MOVE_READ))
return false;
assert(sna_damage_contains_box(priv->gpu_damage, &region->extents) == PIXMAN_REGION_IN);
@@ -15852,6 +15755,9 @@ sna_get_image__inplace(PixmapPtr pixmap,
kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC);
+ if (sigtrap_get())
+ return false;
+
if (priv->gpu_bo->tiling) {
DBG(("%s: download through a tiled CPU map\n", __FUNCTION__));
memcpy_from_tiled_x(&sna->kgem, src, dst,
@@ -15883,9 +15789,137 @@ sna_get_image__inplace(PixmapPtr pixmap,
}
}
+ sigtrap_put();
return true;
}
+static bool
+sna_get_image__blt(PixmapPtr pixmap,
+ RegionPtr region,
+ char *dst,
+ unsigned flags)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+ struct sna *sna = to_sna_from_pixmap(pixmap);
+ struct kgem_bo *dst_bo;
+ bool ok = false;
+ int pitch;
+
+ assert(priv && priv->gpu_bo);
+
+ if (!sna->kgem.has_userptr || !USE_USERPTR_DOWNLOADS)
+ return false;
+
+ if (!sna->kgem.can_blt_cpu)
+ return false;
+
+ if ((priv->create & (KGEM_CAN_CREATE_GTT | KGEM_CAN_CREATE_LARGE)) == KGEM_CAN_CREATE_GTT &&
+ kgem_bo_can_map(&sna->kgem, priv->gpu_bo)) {
+ if (flags & (MOVE_WHOLE_HINT | MOVE_INPLACE_HINT))
+ return false;
+
+ if (priv->gpu_damage == NULL)
+ return false;
+
+ assert(priv->gpu_bo);
+ if (!__kgem_bo_is_busy(&sna->kgem, priv->gpu_bo))
+ return false;
+ } else {
+ if (priv->gpu_damage == NULL)
+ return false;
+
+ assert(priv->gpu_bo);
+ }
+
+ if (priv->move_to_gpu && !priv->move_to_gpu(sna, priv, MOVE_READ))
+ return false;
+
+ DBG(("%s: download through a temporary map\n", __FUNCTION__));
+
+ assert(sna_damage_contains_box(priv->gpu_damage, &region->extents) == PIXMAN_REGION_IN);
+ assert(sna_damage_contains_box(priv->cpu_damage, &region->extents) == PIXMAN_REGION_OUT);
+
+ pitch = PixmapBytePad(region->extents.x2 - region->extents.x1,
+ pixmap->drawable.depth);
+ dst_bo = kgem_create_map(&sna->kgem, dst,
+ pitch * (region->extents.y2 - region->extents.y1),
+ false);
+ if (dst_bo) {
+ dst_bo->pitch = pitch;
+ kgem_bo_mark_unreusable(dst_bo);
+
+ ok = sna->render.copy_boxes(sna, GXcopy,
+ pixmap, priv->gpu_bo, 0, 0,
+ pixmap, dst_bo,
+ -region->extents.x1,
+ -region->extents.y1,
+ &region->extents, 1,
+ COPY_LAST);
+
+ kgem_bo_sync__cpu(&sna->kgem, dst_bo);
+ assert(dst_bo->rq == NULL);
+ kgem_bo_destroy(&sna->kgem, dst_bo);
+ }
+
+ return ok;
+}
+
+static bool
+sna_get_image__fast(PixmapPtr pixmap,
+ RegionPtr region,
+ char *dst,
+ unsigned flags)
+{
+ struct sna_pixmap *priv = sna_pixmap(pixmap);
+
+ if (priv == NULL || priv->gpu_damage == NULL)
+ return false;
+
+ if (priv->clear) {
+ int w = region->extents.x2 - region->extents.x1;
+ int h = region->extents.y2 - region->extents.y1;
+ int pitch = PixmapBytePad(w, pixmap->drawable.depth);
+
+ DBG(("%s: applying clear [%08x]\n",
+ __FUNCTION__, priv->clear_color));
+ assert(DAMAGE_IS_ALL(priv->gpu_damage));
+ assert(priv->cpu_damage == NULL);
+
+ if (priv->clear_color == 0 ||
+ pixmap->drawable.bitsPerPixel == 8 ||
+ priv->clear_color == (1U << pixmap->drawable.depth) - 1) {
+ DBG(("%s: memset clear [%02x]\n",
+ __FUNCTION__, priv->clear_color & 0xff));
+ memset(dst, priv->clear_color, pitch * h);
+ } else {
+ pixman_fill((uint32_t *)dst,
+ pitch/sizeof(uint32_t),
+ pixmap->drawable.bitsPerPixel,
+ 0, 0,
+ w, h,
+ priv->clear_color);
+ }
+
+ return true;
+ }
+
+ if (!DAMAGE_IS_ALL(priv->gpu_damage) &&
+ !sna_damage_contains_box__no_reduce(priv->gpu_damage,
+ &region->extents))
+ return false;
+
+ if (sna_get_image__inplace(pixmap, region, dst, flags, true))
+ return true;
+
+ if (sna_get_image__blt(pixmap, region, dst, flags))
+ return true;
+
+ if (sna_get_image__inplace(pixmap, region, dst, flags, false))
+ return true;
+
+ return false;
+}
+
static void
sna_get_image(DrawablePtr drawable,
int x, int y, int w, int h,
@@ -15924,10 +15958,7 @@ sna_get_image(DrawablePtr drawable,
region.extents.y2 = region.extents.y1 + h;
region.data = NULL;
- if (sna_get_image__blt(pixmap, &region, dst, flags))
- return;
-
- if (sna_get_image__inplace(pixmap, &region, dst, flags))
+ if (sna_get_image__fast(pixmap, &region, dst, flags))
return;
if (!sna_drawable_move_region_to_cpu(&pixmap->drawable,
@@ -15939,9 +15970,12 @@ sna_get_image(DrawablePtr drawable,
region.extents.x1, region.extents.y1,
region.extents.x2, region.extents.y2));
assert(has_coherent_ptr(to_sna_from_pixmap(pixmap), sna_pixmap(pixmap), MOVE_READ));
- memcpy_blt(pixmap->devPrivate.ptr, dst, drawable->bitsPerPixel,
- pixmap->devKind, PixmapBytePad(w, drawable->depth),
- region.extents.x1, region.extents.y1, 0, 0, w, h);
+ if (sigtrap_get() == 0) {
+ memcpy_blt(pixmap->devPrivate.ptr, dst, drawable->bitsPerPixel,
+ pixmap->devKind, PixmapBytePad(w, drawable->depth),
+ region.extents.x1, region.extents.y1, 0, 0, w, h);
+ sigtrap_put();
+ }
} else {
region.extents.x1 = x + drawable->x;
region.extents.y1 = y + drawable->y;