diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-12-03 02:05:05 +0000 |
---|---|---|
committer | Owain G. Ainsworth <oga@openbsd.org> | 2011-05-29 21:42:10 +0100 |
commit | 551fd61380347af023f405071d7a7b3acc24274c (patch) | |
tree | c8e5e326ffd29ba8f32ed6ea73e38143cd051e11 | |
parent | 3450898ed26732a6741a617cfb0f7b6c7a1fdee2 (diff) |
Wait on the current buffer to complete when running synchronously.
And remove the vestigal wait upon changing crtc as this is more properly
done in the kernel.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
(cherry picked from commit 55c5f1876e2329a938955967f5d45c814e50beb5)
Conflicts:
src/intel_batchbuffer.c
src/intel_display.c
(OGA: had to modify intel-sync to keep this working right)
-rw-r--r-- | src/intel.h | 1 | ||||
-rw-r--r-- | src/intel_batchbuffer.c | 37 | ||||
-rw-r--r-- | src/intel_batchbuffer.h | 1 |
3 files changed, 10 insertions, 29 deletions
diff --git a/src/intel.h b/src/intel.h index 91b63845..39e973c4 100644 --- a/src/intel.h +++ b/src/intel.h @@ -511,7 +511,6 @@ typedef struct intel_screen_private { /** Number of bytes to be emitted in the current BEGIN_BATCH. */ uint32_t batch_emitting; dri_bo *batch_bo; - dri_bo *last_batch_bo; /** Whether we're in a section of code that can't tolerate flushing */ Bool in_batch_atomic; /** Ending batch_used that was verified by intel_start_batch_atomic() */ diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c index 3f8053b0..b529322e 100644 --- a/src/intel_batchbuffer.c +++ b/src/intel_batchbuffer.c @@ -102,11 +102,6 @@ void intel_batch_teardown(ScrnInfoPtr scrn) intel->batch_bo = NULL; } - if (intel->last_batch_bo != NULL) { - dri_bo_unreference(intel->last_batch_bo); - intel->last_batch_bo = NULL; - } - if (intel->vertex_bo) { dri_bo_unreference(intel->vertex_bo); intel->vertex_bo = NULL; @@ -267,36 +262,18 @@ void intel_batch_submit(ScrnInfoPtr scrn, int flush) free(entry); } - /* Save a ref to the last batch emitted, which we use for syncing - * in debug code. - */ - dri_bo_unreference(intel->last_batch_bo); - intel->last_batch_bo = intel->batch_bo; - intel->batch_bo = NULL; + if (intel->debug_flush & DEBUG_FLUSH_WAIT) + drm_intel_bo_wait_rendering(intel->batch_bo); + dri_bo_unreference(intel->batch_bo); intel_next_batch(scrn); - if (intel->debug_flush & DEBUG_FLUSH_WAIT) - intel_batch_wait_last(scrn); - if (intel->batch_commit_notify) intel->batch_commit_notify(intel); intel->current_batch = 0; } -/** Waits on the last emitted batchbuffer to be completed. */ -void intel_batch_wait_last(ScrnInfoPtr scrn) -{ - intel_screen_private *intel = intel_get_screen_private(scrn); - - /* Map it CPU write, which guarantees it's done. This is a completely - * non performance path, so we don't need anything better. - */ - drm_intel_gem_bo_map_gtt(intel->last_batch_bo); - drm_intel_gem_bo_unmap_gtt(intel->last_batch_bo); -} - void intel_debug_flush(ScrnInfoPtr scrn) { intel_screen_private *intel = intel_get_screen_private(scrn); @@ -311,10 +288,16 @@ void intel_debug_flush(ScrnInfoPtr scrn) void intel_sync(ScrnInfoPtr scrn) { intel_screen_private *intel = intel_get_screen_private(scrn); + int had; if (!scrn->vtSema || !intel->batch_bo || !intel->batch_ptr) return; + + /* XXX hack while we still need this for ums */ + had = intel->debug_flush & DEBUG_FLUSH_WAIT; + intel->debug_flush |= DEBUG_FLUSH_WAIT; intel_batch_submit(scrn, TRUE); - intel_batch_wait_last(scrn); + if (!had) + intel->debug_flush &= ~DEBUG_FLUSH_WAIT; } diff --git a/src/intel_batchbuffer.h b/src/intel_batchbuffer.h index cd6d8e8d..a88d7ba3 100644 --- a/src/intel_batchbuffer.h +++ b/src/intel_batchbuffer.h @@ -38,7 +38,6 @@ void intel_batch_teardown(ScrnInfoPtr scrn); void intel_batch_emit_flush(ScrnInfoPtr scrn); void intel_batch_do_flush(ScrnInfoPtr scrn); void intel_batch_submit(ScrnInfoPtr scrn, int flush); -void intel_batch_wait_last(ScrnInfoPtr scrn); static inline int intel_batch_space(intel_screen_private *intel) { |