summaryrefslogtreecommitdiff
path: root/src/sna
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-07-16 21:18:24 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2012-07-16 23:48:10 +0100
commit1f79e877fb6602bd0f9dd14ac9c3511f3b7044fb (patch)
tree4516f8b321fda773c4413a3e4adcf6401290032f /src/sna
parentd141a2d59007866c9eaad020c744be446e70c346 (diff)
sna: Share the pixmap migration decision with the BLT composite routines
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/sna')
-rw-r--r--src/sna/sna_accel.c69
-rw-r--r--src/sna/sna_blt.c154
-rw-r--r--src/sna/sna_render.c111
-rw-r--r--src/sna/sna_render.h6
4 files changed, 155 insertions, 185 deletions
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d7ecb000..6c069ee0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -425,8 +425,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
pixmap->drawable.bitsPerPixel,
from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
if (priv->cpu_bo) {
- DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__,
- priv->cpu_bo->handle));
+ DBG(("%s: allocated CPU handle=%d (vmap? %d)\n", __FUNCTION__,
+ priv->cpu_bo->handle, priv->cpu_bo->vmap));
priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
priv->stride = priv->cpu_bo->pitch;
@@ -525,7 +525,17 @@ static inline uint32_t default_tiling(PixmapPtr pixmap,
if (sna->kgem.gen == 21)
return I915_TILING_X;
- if (sna_damage_is_all(&priv->cpu_damage,
+ /* Only on later generations was the render pipeline
+ * more flexible than the BLT. So on gen2/3, prefer to
+ * keep large objects accessible through the BLT.
+ */
+ if (sna->kgem.gen < 40 &&
+ (pixmap->drawable.width > sna->render.max_3d_size ||
+ pixmap->drawable.height > sna->render.max_3d_size))
+ return I915_TILING_X;
+
+ if (tiling == I915_TILING_Y &&
+ sna_damage_is_all(&priv->cpu_damage,
pixmap->drawable.width,
pixmap->drawable.height)) {
DBG(("%s: entire source is damaged, using Y-tiling\n",
@@ -533,15 +543,6 @@ static inline uint32_t default_tiling(PixmapPtr pixmap,
sna_damage_destroy(&priv->gpu_damage);
priv->undamaged = false;
- /* Only on later generations was the render pipeline
- * more flexible than the BLT. So on gen2/3, prefer to
- * keep large objects accessible through the BLT.
- */
- if (sna->kgem.gen < 40 &&
- (pixmap->drawable.width > sna->render.max_3d_size ||
- pixmap->drawable.height > sna->render.max_3d_size))
- return I915_TILING_X;
-
return I915_TILING_Y;
}
@@ -1089,7 +1090,8 @@ static inline bool use_cpu_bo_for_download(struct sna *sna,
return priv->cpu_bo != NULL && sna->kgem.can_blt_cpu;
}
-static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv)
+static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv,
+ unsigned flags)
{
if (DBG_NO_CPU_UPLOAD)
return false;
@@ -1097,6 +1099,14 @@ static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv)
if (priv->cpu_bo == NULL)
return false;
+ DBG(("%s? flags=%x, gpu busy?=%d, cpu busy?=%d\n", __FUNCTION__,
+ flags,
+ kgem_bo_is_busy(priv->gpu_bo),
+ kgem_bo_is_busy(priv->cpu_bo)));
+
+ if (flags & (MOVE_WRITE | MOVE_ASYNC_HINT))
+ return true;
+
return kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo);
}
@@ -2135,14 +2145,13 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
create |= CREATE_EXACT | CREATE_SCANOUT;
tiling = (flags & MOVE_SOURCE_HINT) ? I915_TILING_Y : DEFAULT_TILING;
+ tiling = sna_pixmap_choose_tiling(pixmap, tiling);
priv->gpu_bo = kgem_create_2d(&sna->kgem,
pixmap->drawable.width,
pixmap->drawable.height,
pixmap->drawable.bitsPerPixel,
- sna_pixmap_choose_tiling(pixmap,
- tiling),
- create);
+ tiling, create);
if (priv->gpu_bo == NULL)
return false;
@@ -2182,7 +2191,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
if (n) {
bool ok = false;
- if (use_cpu_bo_for_upload(priv)) {
+ if (use_cpu_bo_for_upload(priv, 0)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -2222,7 +2231,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
bool ok = false;
- if (use_cpu_bo_for_upload(priv)) {
+ if (use_cpu_bo_for_upload(priv, 0)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -2253,7 +2262,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
box = REGION_RECTS(&i);
ok = false;
- if (use_cpu_bo_for_upload(priv)) {
+ if (use_cpu_bo_for_upload(priv, 0)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -2641,17 +2650,25 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
priv->create));
assert(!priv->mapped);
if (flags & __MOVE_FORCE || priv->create & KGEM_CAN_CREATE_GPU) {
+ unsigned create, tiling;
+
assert(pixmap->drawable.width > 0);
assert(pixmap->drawable.height > 0);
assert(pixmap->drawable.bitsPerPixel >= 8);
+
+ tiling = (flags & MOVE_SOURCE_HINT) ? I915_TILING_Y : DEFAULT_TILING;
+ tiling = sna_pixmap_choose_tiling(pixmap, tiling);
+
+ create = 0;
+ if (priv->cpu_damage && priv->cpu_bo == NULL)
+ create = CREATE_GTT_MAP | CREATE_INACTIVE;
+
priv->gpu_bo =
kgem_create_2d(&sna->kgem,
pixmap->drawable.width,
pixmap->drawable.height,
pixmap->drawable.bitsPerPixel,
- sna_pixmap_choose_tiling(pixmap,
- DEFAULT_TILING),
- (priv->cpu_damage && priv->cpu_bo == NULL) ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
+ tiling, create);
}
if (priv->gpu_bo == NULL) {
DBG(("%s: not creating GPU bo\n", __FUNCTION__));
@@ -2698,8 +2715,11 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
assert(pixmap_contains_damage(pixmap, priv->cpu_damage));
DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
+ if (DAMAGE_IS_ALL(priv->cpu_damage))
+ flags |= MOVE_ASYNC_HINT;
+
ok = false;
- if (use_cpu_bo_for_upload(priv)) {
+ if (use_cpu_bo_for_upload(priv, flags)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -3055,7 +3075,8 @@ static bool upload_inplace(struct sna *sna,
}
if (priv->gpu_bo) {
- assert(priv->gpu_bo->proxy == NULL);
+ if (priv->gpu_bo->proxy)
+ return false;
if (!kgem_bo_can_map(&sna->kgem, priv->gpu_bo)) {
DBG(("%s? no, GPU bo not mappable\n", __FUNCTION__));
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 80fad6d2..ff8e3eb1 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1138,20 +1138,20 @@ blt_composite_copy_boxes_with_alpha(struct sna *sna,
static bool
prepare_blt_copy(struct sna *sna,
struct sna_composite_op *op,
+ struct kgem_bo *bo,
uint32_t alpha_fixup)
{
PixmapPtr src = op->u.blt.src_pixmap;
- struct sna_pixmap *priv = sna_pixmap(src);
- if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
+ if (!kgem_bo_can_blt(&sna->kgem, bo)) {
DBG(("%s: fallback -- can't blt from source\n", __FUNCTION__));
return false;
}
- if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
+ if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, bo, NULL)) {
_kgem_submit(&sna->kgem);
if (!kgem_check_many_bo_fenced(&sna->kgem,
- op->dst.bo, priv->gpu_bo, NULL)) {
+ op->dst.bo, bo, NULL)) {
DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
return false;
}
@@ -1170,9 +1170,7 @@ prepare_blt_copy(struct sna *sna,
op->box = blt_composite_copy_box_with_alpha;
op->boxes = blt_composite_copy_boxes_with_alpha;
- if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
- priv->gpu_bo,
- op->dst.bo,
+ if (!sna_blt_alpha_fixup_init(sna, &op->u.blt, bo, op->dst.bo,
src->drawable.bitsPerPixel,
alpha_fixup))
return false;
@@ -1181,15 +1179,13 @@ prepare_blt_copy(struct sna *sna,
op->box = blt_composite_copy_box;
op->boxes = blt_composite_copy_boxes;
- if (!sna_blt_copy_init(sna, &op->u.blt,
- priv->gpu_bo,
- op->dst.bo,
+ if (!sna_blt_copy_init(sna, &op->u.blt, bo, op->dst.bo,
src->drawable.bitsPerPixel,
GXcopy))
return false;
}
- return begin_blt(sna, op);
+ return true;
}
fastcall static void
@@ -1434,119 +1430,33 @@ prepare_blt_put(struct sna *sna,
uint32_t alpha_fixup)
{
PixmapPtr src = op->u.blt.src_pixmap;
- struct sna_pixmap *priv;
- struct kgem_bo *src_bo;
DBG(("%s\n", __FUNCTION__));
- op->done = nop_done;
+ if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
+ return false;
- src_bo = NULL;
- priv = sna_pixmap(src);
- if (priv)
- src_bo = priv->cpu_bo;
- if (src_bo) {
- if (alpha_fixup) {
- op->blt = blt_composite_copy_with_alpha;
- op->box = blt_composite_copy_box_with_alpha;
- op->boxes = blt_composite_copy_boxes_with_alpha;
-
- if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
- src_bo, op->dst.bo,
- op->dst.pixmap->drawable.bitsPerPixel,
- alpha_fixup))
- return false;
- } else {
- op->blt = blt_composite_copy;
- op->box = blt_composite_copy_box;
- op->boxes = blt_composite_copy_boxes;
-
- if (!sna_blt_copy_init(sna, &op->u.blt,
- src_bo, op->dst.bo,
- op->dst.pixmap->drawable.bitsPerPixel,
- GXcopy))
- return false;
- }
+ assert(src->devKind);
+ assert(src->devPrivate.ptr);
- return begin_blt(sna, op);
- } else {
- if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
- return false;
+ if (alpha_fixup)
+ return false; /* XXX */
- assert(src->devKind);
- assert(src->devPrivate.ptr);
-
- if (alpha_fixup)
- return false; /* XXX */
-
- if (alpha_fixup) {
- op->u.blt.pixel = alpha_fixup;
- op->blt = blt_put_composite_with_alpha;
- op->box = blt_put_composite_box_with_alpha;
- op->boxes = blt_put_composite_boxes_with_alpha;
- } else {
- op->blt = blt_put_composite;
- op->box = blt_put_composite_box;
- op->boxes = blt_put_composite_boxes;
- }
+ if (alpha_fixup) {
+ op->u.blt.pixel = alpha_fixup;
+ op->blt = blt_put_composite_with_alpha;
+ op->box = blt_put_composite_box_with_alpha;
+ op->boxes = blt_put_composite_boxes_with_alpha;
+ } else {
+ op->blt = blt_put_composite;
+ op->box = blt_put_composite_box;
+ op->boxes = blt_put_composite_boxes;
}
+ op->done = nop_done;
return true;
}
-static bool
-has_gpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
-{
- struct sna_pixmap *priv = sna_pixmap(pixmap);
- BoxRec area;
-
- if (!priv)
- return false;
- if (!priv->gpu_bo)
- return false;
-
- if (priv->cpu_damage == NULL)
- return true;
- if (priv->cpu_damage->mode == DAMAGE_ALL)
- return false;
-
- area.x1 = x;
- area.y1 = y;
- area.x2 = x + w;
- area.y2 = y + h;
- if (priv->gpu_damage &&
- sna_damage_contains_box__no_reduce(priv->gpu_damage, &area))
- return true;
-
- return sna_damage_contains_box(priv->cpu_damage,
- &area) == PIXMAN_REGION_OUT;
-}
-
-static bool
-has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
-{
- struct sna_pixmap *priv = sna_pixmap(pixmap);
- BoxRec area;
-
- if (!priv)
- return true;
- if (priv->gpu_damage == NULL)
- return true;
- if (priv->gpu_damage->mode == DAMAGE_ALL)
- return false;
-
- area.x1 = x;
- area.y1 = y;
- area.x2 = x + w;
- area.y2 = y + h;
- if (priv->cpu_damage &&
- sna_damage_contains_box__no_reduce(priv->cpu_damage, &area))
- return true;
-
- return sna_damage_contains_box(priv->gpu_damage,
- &area) == PIXMAN_REGION_OUT;
-}
-
static void
reduce_damage(struct sna_composite_op *op,
int dst_x, int dst_y,
@@ -1592,7 +1502,9 @@ sna_blt_composite(struct sna *sna,
PictFormat src_format = src->format;
PixmapPtr src_pixmap;
struct sna_pixmap *priv;
+ struct kgem_bo *bo;
int16_t tx, ty;
+ BoxRec box;
uint32_t alpha_fixup;
bool was_clear;
bool ret;
@@ -1748,13 +1660,15 @@ clear:
__FUNCTION__,
tmp->dst.x, tmp->dst.y, tmp->u.blt.sx, tmp->u.blt.sy, alpha_fixup));
- if (has_gpu_area(src_pixmap, x, y, width, height))
- ret = prepare_blt_copy(sna, tmp, alpha_fixup);
- else if (has_cpu_area(src_pixmap, x, y, width, height))
- ret = prepare_blt_put(sna, tmp, alpha_fixup);
- else if (sna_pixmap_move_to_gpu(src_pixmap, MOVE_READ))
- ret = prepare_blt_copy(sna, tmp, alpha_fixup);
- else
+ ret = false;
+ box.x1 = x;
+ box.y1 = y;
+ box.x2 = x + width;
+ box.y2 = y + height;
+ bo = __sna_render_pixmap_bo(sna, src_pixmap, &box, true);
+ if (bo)
+ ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
+ if (!ret)
ret = prepare_blt_put(sna, tmp, alpha_fixup);
return ret;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index dcfab910..6fb9fe37 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -288,7 +288,7 @@ void no_render_init(struct sna *sna)
}
static struct kgem_bo *
-use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
+use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
{
struct sna_pixmap *priv;
@@ -322,24 +322,40 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
break;
}
- if (priv->gpu_bo->tiling != I915_TILING_NONE &&
+ if (!blt &&
+ priv->gpu_bo->tiling != I915_TILING_NONE &&
(priv->cpu_bo->vmap || priv->cpu_bo->pitch >= 4096)) {
DBG(("%s: GPU bo exists and is tiled [%d], upload\n",
__FUNCTION__, priv->gpu_bo->tiling));
return NULL;
}
+ }
+
+ if (blt) {
+ if (priv->cpu_bo->vmap && priv->source_count++ > SOURCE_BIAS) {
+ DBG(("%s: promoting snooped CPU bo due to BLT reuse\n",
+ __FUNCTION__));
+ return NULL;
+ }
} else {
int w = box->x2 - box->x1;
int h = box->y2 - box->y1;
+ if (priv->cpu_bo->pitch >= 4096) {
+ DBG(("%s: promoting snooped CPU bo due to TLB miss\n",
+ __FUNCTION__));
+ return NULL;
+ }
+
if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
DBG(("%s: promoting snooped CPU bo due to reuse\n",
__FUNCTION__));
return NULL;
}
- if (priv->source_count++*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
- I915_TILING_NONE != kgem_choose_tiling(&sna->kgem, I915_TILING_Y,
+ if (priv->source_count*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
+ I915_TILING_NONE != kgem_choose_tiling(&sna->kgem,
+ blt ? I915_TILING_X : I915_TILING_Y,
pixmap->drawable.width,
pixmap->drawable.height,
pixmap->drawable.bitsPerPixel)) {
@@ -347,15 +363,20 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
__FUNCTION__, priv->cpu_bo->pitch));
return NULL;
}
+
+ ++priv->source_count;
}
DBG(("%s for box=(%d, %d), (%d, %d)\n",
__FUNCTION__, box->x1, box->y1, box->x2, box->y2));
+ if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_ASYNC_HINT))
+ return NULL;
+
return priv->cpu_bo;
}
static struct kgem_bo *
-move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
+move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
{
struct sna_pixmap *priv;
int count, w, h;
@@ -390,7 +411,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
if (DBG_FORCE_UPLOAD < 0) {
if (!sna_pixmap_force_to_gpu(pixmap,
- MOVE_SOURCE_HINT | MOVE_READ))
+ blt ? MOVE_READ : MOVE_SOURCE_HINT | MOVE_READ))
return NULL;
return priv->gpu_bo;
@@ -407,7 +428,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
box->x1, box->y1, box->x2, box->y2, priv->source_count,
migrate));
} else if (kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
- I915_TILING_Y, w, h,
+ blt ? I915_TILING_X : I915_TILING_Y, w, h,
pixmap->drawable.bitsPerPixel) != I915_TILING_NONE) {
count = priv->source_count++;
if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
@@ -424,7 +445,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
}
if (migrate && !sna_pixmap_force_to_gpu(pixmap,
- MOVE_SOURCE_HINT | MOVE_READ))
+ blt ? MOVE_READ : MOVE_SOURCE_HINT | MOVE_READ))
return NULL;
return priv->gpu_bo;
@@ -465,13 +486,11 @@ static struct kgem_bo *upload(struct sna *sna,
pixmap->devPrivate.ptr, box,
pixmap->devKind,
pixmap->drawable.bitsPerPixel);
- if (bo) {
+ if (channel && bo) {
channel->width = box->x2 - box->x1;
channel->height = box->y2 - box->y1;
channel->offset[0] -= box->x1;
channel->offset[1] -= box->y1;
- channel->scale[0] = 1.f/channel->width;
- channel->scale[1] = 1.f/channel->height;
if (priv &&
pixmap->usage_hint == 0 &&
@@ -483,6 +502,24 @@ static struct kgem_bo *upload(struct sna *sna,
return bo;
}
+struct kgem_bo *
+__sna_render_pixmap_bo(struct sna *sna,
+ PixmapPtr pixmap,
+ const BoxRec *box,
+ bool blt)
+{
+ struct kgem_bo *bo;
+
+ bo = use_cpu_bo(sna, pixmap, box, blt);
+ if (bo == NULL) {
+ bo = move_to_gpu(pixmap, box, blt);
+ if (bo == NULL)
+ return NULL;
+ }
+
+ return bo;
+}
+
int
sna_render_pixmap_bo(struct sna *sna,
struct sna_composite_channel *channel,
@@ -491,7 +528,6 @@ sna_render_pixmap_bo(struct sna *sna,
int16_t w, int16_t h,
int16_t dst_x, int16_t dst_y)
{
- struct kgem_bo *bo;
struct sna_pixmap *priv;
BoxRec box;
@@ -500,8 +536,6 @@ sna_render_pixmap_bo(struct sna *sna,
channel->width = pixmap->drawable.width;
channel->height = pixmap->drawable.height;
- channel->scale[0] = 1.f / pixmap->drawable.width;
- channel->scale[1] = 1.f / pixmap->drawable.height;
channel->offset[0] = x - dst_x;
channel->offset[1] = y - dst_y;
@@ -511,16 +545,16 @@ sna_render_pixmap_bo(struct sna *sna,
(DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage ||
priv->gpu_bo->proxy)) {
DBG(("%s: GPU all damaged\n", __FUNCTION__));
- channel->bo = kgem_bo_reference(priv->gpu_bo);
- return 1;
+ channel->bo = priv->gpu_bo;
+ goto done;
}
if (priv->cpu_bo &&
(DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
!priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
DBG(("%s: CPU all damaged\n", __FUNCTION__));
- channel->bo = kgem_bo_reference(priv->cpu_bo);
- return 1;
+ channel->bo = priv->cpu_bo;
+ goto done;
}
}
@@ -572,21 +606,22 @@ sna_render_pixmap_bo(struct sna *sna,
channel->offset[0], channel->offset[1],
pixmap->drawable.width, pixmap->drawable.height));
- bo = use_cpu_bo(sna, pixmap, &box);
- if (bo) {
- bo = kgem_bo_reference(bo);
+
+ channel->bo = __sna_render_pixmap_bo(sna, pixmap, &box, false);
+ if (channel->bo == NULL) {
+ DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
+ __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
+ channel->bo = upload(sna, channel, pixmap, &box);
+ if (channel->bo == NULL)
+ return 0;
} else {
- bo = move_to_gpu(pixmap, &box);
- if (bo == NULL) {
- DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
- __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
- bo = upload(sna, channel, pixmap, &box);
- } else
- bo = kgem_bo_reference(bo);
+done:
+ kgem_bo_reference(channel->bo);
}
- channel->bo = bo;
- return bo != NULL;
+ channel->scale[0] = 1.f / channel->width;
+ channel->scale[1] = 1.f / channel->height;
+ return 1;
}
static int sna_render_picture_downsample(struct sna *sna,
@@ -929,14 +964,11 @@ sna_render_picture_partial(struct sna *sna,
}
}
- if (use_cpu_bo(sna, pixmap, &box)) {
- if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
- return 0;
-
+ if (use_cpu_bo(sna, pixmap, &box, false)) {
bo = sna_pixmap(pixmap)->cpu_bo;
} else {
if (!sna_pixmap_force_to_gpu(pixmap,
- MOVE_SOURCE_HINT | MOVE_READ))
+ MOVE_READ | MOVE_SOURCE_HINT))
return 0;
bo = sna_pixmap(pixmap)->gpu_bo;
@@ -1119,12 +1151,9 @@ sna_render_picture_extract(struct sna *sna,
dst_x, dst_y);
}
- src_bo = use_cpu_bo(sna, pixmap, &box);
- if (src_bo) {
- if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
- return 0;
- } else {
- src_bo = move_to_gpu(pixmap, &box);
+ src_bo = use_cpu_bo(sna, pixmap, &box, true);
+ if (src_bo == NULL) {
+ src_bo = move_to_gpu(pixmap, &box, false);
if (src_bo == NULL) {
bo = kgem_upload_source_image(&sna->kgem,
pixmap->devPrivate.ptr,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 7c43f613..0f96acea 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -619,6 +619,12 @@ sna_get_pixel_from_rgba(uint32_t * pixel,
return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format);
}
+struct kgem_bo *
+__sna_render_pixmap_bo(struct sna *sna,
+ PixmapPtr pixmap,
+ const BoxRec *box,
+ bool blt);
+
int
sna_render_pixmap_bo(struct sna *sna,
struct sna_composite_channel *channel,