summaryrefslogtreecommitdiff
path: root/src/sna/sna_io.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-05-03 11:27:44 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2012-05-03 14:48:27 +0100
commitfcccc5528b8696fb4f9b3f9f528673b95d98a907 (patch)
tree127ebec4724527a67efab2be68cdacf8e163e58b /src/sna/sna_io.c
parent53568e8e49559094ce5b24b8709669f1f76fe2bf (diff)
sna: Improve handling of inplace IO for large transfers
If the transfer is large enough to obliterate the caches, then it is preferrable to do it inplace rather than upload a proxy texture and queue a blit. This helps prevent an inconsistency where one layer believes the operation should be done inplace only for the IO layer to perform an indirect upload. Testing show no significant impact upon the cairo-traces, but it does prevent x11perf -shmput from exploding. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src/sna/sna_io.c')
-rw-r--r--src/sna/sna_io.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 2539518b..7cec3ff2 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -516,17 +516,16 @@ static bool upload_inplace(struct kgem *kgem,
* able to almagamate a series of small writes into a single
* operation.
*/
- if (!bo->map) {
+ if (!bo->map || kgem_bo_map_will_stall(kgem, bo)) {
unsigned int bytes = 0;
while (n--) {
bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
box++;
}
- if (bytes * bpp >> 12 < kgem->half_cpu_cache_pages)
- return false;
+ return bytes * bpp >> 12 >= kgem->half_cpu_cache_pages;
}
- return !kgem_bo_map_will_stall(kgem, bo);
+ return true;
}
bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
@@ -570,7 +569,9 @@ fallback:
}
/* Try to avoid switching rings... */
- if (!can_blt || kgem->ring == KGEM_RENDER ||
+ if (!can_blt ||
+ kgem->ring == KGEM_RENDER ||
+ (kgem->has_semaphores && kgem->mode == KGEM_NONE) ||
upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
PixmapRec tmp;