summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-06-22 20:19:22 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2014-06-23 07:51:38 +0100
commit1909910fdf89216d18703e50728f4604f75d5d66 (patch)
treeca93408780eb5e2315e417e9afcb258fde1c253a
parent83cbbcd816e449402f3d49aeba3c099a20b8bc1b (diff)
sna: Inject a batch flush before adding a fresh bo
Fresh bo (those without a reservation already defined, ala presumed_offset) will cause the kernel to do a full relocation pass. So, if possible flush the already correct batch in the hope of trimming the amount of checking the kernel has to perform on this new batch. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/gen4_vertex.c3
-rw-r--r--src/sna/kgem.c51
-rw-r--r--src/sna/kgem.h2
3 files changed, 47 insertions, 9 deletions
diff --git a/src/sna/gen4_vertex.c b/src/sna/gen4_vertex.c
index 4b3496cc..0360b862 100644
--- a/src/sna/gen4_vertex.c
+++ b/src/sna/gen4_vertex.c
@@ -136,7 +136,8 @@ int gen4_vertex_finish(struct sna *sna)
if (sna->render.vbo == NULL)
sna->render.vbo = kgem_create_linear(&sna->kgem,
256*1024, CREATE_GTT_MAP);
- if (sna->render.vbo)
+ if (sna->render.vbo &&
+ kgem_check_bo(&sna->kgem, sna->render.vbo, NULL))
sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
if (sna->render.vertices == NULL) {
if (sna->render.vbo) {
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 03193c00..5e3c9f05 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3017,6 +3017,8 @@ void kgem_reset(struct kgem *kgem)
kgem->nbatch = 0;
kgem->surface = kgem->batch_size;
kgem->mode = KGEM_NONE;
+ kgem->needs_semaphore = false;
+ kgem->needs_reservation = false;
kgem->flush = 0;
kgem->batch_flags = kgem->batch_flags_base;
@@ -5193,7 +5195,44 @@ void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo)
inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
{
- return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring;
+ if (kgem->needs_semaphore)
+ return false;
+
+ if (bo->rq == NULL || RQ_RING(bo->rq) == kgem->ring)
+ return false;
+
+ kgem->needs_semaphore = true;
+ return true;
+}
+
+inline static bool needs_reservation(struct kgem *kgem, struct kgem_bo *bo)
+{
+ if (kgem->needs_reservation)
+ return false;
+
+ if (bo->presumed_offset || kgem_ring_is_idle(kgem, kgem->ring))
+ return false;
+
+ kgem->needs_reservation = true;
+ return true;
+}
+
+inline static bool needs_batch_flush(struct kgem *kgem, struct kgem_bo *bo)
+{
+ if (kgem->nreloc)
+ return false;
+
+ if (needs_semaphore(kgem, bo)) {
+ DBG(("%s: flushing before handle=%d for required semaphore\n", __FUNCTION__, bo->handle));
+ return true;
+ }
+
+ if (needs_reservation(kgem, bo)) {
+ DBG(("%s: flushing before handle=%d for new reservation\n", __FUNCTION__, bo->handle));
+ return true;
+ }
+
+ return false;
}
static bool aperture_check(struct kgem *kgem, unsigned num_pages)
@@ -5263,8 +5302,7 @@ bool kgem_check_bo(struct kgem *kgem, ...)
if (bo->exec)
continue;
- if (needs_semaphore(kgem, bo)) {
- DBG(("%s: flushing for required semaphore\n", __FUNCTION__));
+ if (needs_batch_flush(kgem, bo)) {
va_end(ap);
return false;
}
@@ -5351,10 +5389,8 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
return false;
- if (needs_semaphore(kgem, bo)) {
- DBG(("%s: flushing for required semaphore\n", __FUNCTION__));
+ if (needs_batch_flush(kgem, bo))
return false;
- }
assert_tiling(kgem, bo);
if (kgem->gen < 040 && bo->tiling != I915_TILING_NONE) {
@@ -5432,8 +5468,7 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
continue;
}
- if (needs_semaphore(kgem, bo)) {
- DBG(("%s: flushing for required semaphore\n", __FUNCTION__));
+ if (needs_batch_flush(kgem, bo)) {
va_end(ap);
return false;
}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 5152a3b8..e66bffb1 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -171,6 +171,8 @@ struct kgem {
uint32_t need_purge:1;
uint32_t need_retire:1;
uint32_t need_throttle:1;
+ uint32_t needs_semaphore:1;
+ uint32_t needs_reservation:1;
uint32_t scanout_busy:1;
uint32_t busy:1;