diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2009-12-07 11:09:14 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2009-12-07 11:15:42 +0000 |
commit | cd475bad23c02130d11c49882c11261c9f0d4ef1 (patch) | |
tree | 0f63ed78534c7251597cd3cd496bae881f77b186 | |
parent | 415aab474edd1425034981306718afd8506445f1 (diff) |
batch: Ensure we send a MI_FLUSH in the block handler for TFP
This should restore the previous level of synchronisation between
textures and pixmaps, but *does not* guarantee that a texture will be
flushed before use. tfp should be fixed so that the ddx can submit the
batch if required to flush the pixmap.
A side-effect of this patch is to rename intel_batch_flush() to
intel_batch_submit() to reduce the confusion of executing a batch buffer
with that of emitting a MI_FLUSH.
Should fix the remaining rendering corruption involving tfp [inc compiz]:
Bug 25431 [i915 bisected] piglit/texturing_tfp regressed
http://bugs.freedesktop.org/show_bug.cgi?id=25431
Bug 25481 Wrong cursor format and cursor blink rate with compiz enabled
http://bugs.freedesktop.org/show_bug.cgi?id=25481
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/i830_accel.c | 4 | ||||
-rw-r--r-- | src/i830_batchbuffer.c | 32 | ||||
-rw-r--r-- | src/i830_batchbuffer.h | 8 | ||||
-rw-r--r-- | src/i830_dri.c | 3 | ||||
-rw-r--r-- | src/i830_driver.c | 20 | ||||
-rw-r--r-- | src/i830_render.c | 2 | ||||
-rw-r--r-- | src/i830_uxa.c | 4 | ||||
-rw-r--r-- | src/i915_render.c | 2 | ||||
-rw-r--r-- | src/i965_render.c | 6 | ||||
-rw-r--r-- | src/i965_video.c | 2 |
10 files changed, 35 insertions, 48 deletions
diff --git a/src/i830_accel.c b/src/i830_accel.c index 509d6520..74808fd8 100644 --- a/src/i830_accel.c +++ b/src/i830_accel.c @@ -56,10 +56,10 @@ void i830_debug_flush(ScrnInfoPtr scrn) intel_screen_private *intel = intel_get_screen_private(scrn); if (intel->debug_flush & DEBUG_FLUSH_CACHES) - intel_batch_pipelined_flush(scrn); + intel_batch_emit_flush(scrn); if (intel->debug_flush & DEBUG_FLUSH_BATCHES) - intel_batch_flush(scrn); + intel_batch_submit(scrn); } /* The following function sets up the supported acceleration. Call it diff --git a/src/i830_batchbuffer.c b/src/i830_batchbuffer.c index 12e044af..ed80f149 100644 --- a/src/i830_batchbuffer.c +++ b/src/i830_batchbuffer.c @@ -93,16 +93,13 @@ void intel_batch_teardown(ScrnInfoPtr scrn) } } -void intel_batch_pipelined_flush(ScrnInfoPtr scrn) +void intel_batch_emit_flush(ScrnInfoPtr scrn) { intel_screen_private *intel = intel_get_screen_private(scrn); int flags; assert (!intel->in_batch_atomic); - if (intel->batch_used == 0) - return; - /* Big hammer, look to the pipelined flushes in future. */ flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE; if (IS_I965G(intel)) @@ -122,9 +119,11 @@ void intel_batch_pipelined_flush(ScrnInfoPtr scrn) entry->flush_read_domains = entry->flush_write_domain = 0; list_del(&entry->flush); } + + intel->need_mi_flush = FALSE; } -void intel_batch_flush(ScrnInfoPtr scrn) +void intel_batch_submit(ScrnInfoPtr scrn) { intel_screen_private *intel = intel_get_screen_private(scrn); int ret; @@ -175,6 +174,12 @@ void intel_batch_flush(ScrnInfoPtr scrn) entry->batch_read_domains = entry->batch_write_domain = 0; list_del(&entry->batch); } + + /* Mark that we need to flush whatever potential rendering we've done in the + * blockhandler. We could set this less often, but it's probably not worth + * the work. + */ + intel->need_mi_flush = !list_is_empty(&intel->flush_pixmaps); while (!list_is_empty(&intel->flush_pixmaps)) { struct intel_pixmap *entry; @@ -195,11 +200,6 @@ void intel_batch_flush(ScrnInfoPtr scrn) intel_next_batch(scrn); - /* Mark that we need to flush whatever potential rendering we've done in the - * blockhandler. We could set this less often, but it's probably not worth - * the work. - */ - intel->need_mi_flush = TRUE; if (intel->debug_flush & DEBUG_FLUSH_WAIT) intel_batch_wait_last(scrn); @@ -223,7 +223,6 @@ void intel_batch_wait_last(ScrnInfoPtr scrn) void intel_sync(ScrnInfoPtr scrn) { intel_screen_private *intel = intel_get_screen_private(scrn); - int flags; if (I810_DEBUG & (DEBUG_VERBOSE_ACCEL | DEBUG_VERBOSE_SYNC)) ErrorF("I830Sync\n"); @@ -231,14 +230,7 @@ void intel_sync(ScrnInfoPtr scrn) if (!scrn->vtSema || !intel->batch_bo || !intel->batch_ptr) return; - flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE; - if (IS_I965G(intel)) - flags = 0; - - BEGIN_BATCH(1); - OUT_BATCH(flags); - ADVANCE_BATCH(); - - intel_batch_flush(scrn); + intel_batch_emit_flush(scrn); + intel_batch_submit(scrn); intel_batch_wait_last(scrn); } diff --git a/src/i830_batchbuffer.h b/src/i830_batchbuffer.h index 10d6fcba..16c33efc 100644 --- a/src/i830_batchbuffer.h +++ b/src/i830_batchbuffer.h @@ -35,8 +35,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. void intel_batch_init(ScrnInfoPtr scrn); void intel_batch_teardown(ScrnInfoPtr scrn); -void intel_batch_pipelined_flush(ScrnInfoPtr scrn); -void intel_batch_flush(ScrnInfoPtr scrn); +void intel_batch_emit_flush(ScrnInfoPtr scrn); +void intel_batch_submit(ScrnInfoPtr scrn); void intel_batch_wait_last(ScrnInfoPtr scrn); static inline int intel_batch_space(intel_screen_private *intel) @@ -49,7 +49,7 @@ intel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, GLuint { assert(sz < intel->batch_bo->size - 8); if (intel_batch_space(intel) < sz) - intel_batch_flush(scrn); + intel_batch_submit(scrn); } static inline void intel_batch_start_atomic(ScrnInfoPtr scrn, unsigned int sz) @@ -193,7 +193,7 @@ do { \ if ((intel->batch_emitting > 8) && \ (I810_DEBUG & DEBUG_ALWAYS_SYNC)) { \ /* Note: not actually syncing, just flushing each batch. */ \ - intel_batch_flush(scrn); \ + intel_batch_submit(scrn); \ } \ intel->batch_emitting = 0; \ } while (0) diff --git a/src/i830_dri.c b/src/i830_dri.c index 38de093d..0246e61b 100644 --- a/src/i830_dri.c +++ b/src/i830_dri.c @@ -351,8 +351,7 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion, * * We can't rely on getting into the block handler before the DRI * client gets to run again so flush now. */ - intel->need_mi_flush = FALSE; - intel_batch_flush(scrn); + intel_batch_submit(scrn); #if ALWAYS_SYNC intel_sync(scrn); #endif diff --git a/src/i830_driver.c b/src/i830_driver.c index fbbc5855..e94a60c9 100644 --- a/src/i830_driver.c +++ b/src/i830_driver.c @@ -985,24 +985,20 @@ I830BlockHandler(int i, pointer blockData, pointer pTimeout, pointer pReadmask) screen->BlockHandler = I830BlockHandler; if (scrn->vtSema) { - Bool flush = FALSE; - /* Emit a flush of the rendering cache, or on the 965 and beyond * rendering results may not hit the framebuffer until significantly * later. + * + * XXX Under KMS this is only required because tfp does not have + * the appropriate synchronisation points, so that outstanding updates + * to the pixmap are flushed prior to use as a texture. The framebuffer + * should be handled by the kernel domain management... */ - if (intel->need_mi_flush || intel->batch_used) - flush = TRUE; + if (intel->need_mi_flush || !list_is_empty(&intel->flush_pixmaps)) + intel_batch_emit_flush(scrn); - /* Flush the batch, so that any rendering is executed in a timely - * fashion. - */ - if (flush) - intel_batch_pipelined_flush(scrn); - intel_batch_flush(scrn); + intel_batch_submit(scrn); drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE); - - intel->need_mi_flush = FALSE; } i830_uxa_block_handler(screen); diff --git a/src/i830_render.c b/src/i830_render.c index fd8003f0..4d37a404 100644 --- a/src/i830_render.c +++ b/src/i830_render.c @@ -555,7 +555,7 @@ i830_prepare_composite(int op, PicturePtr source_picture, if(i830_uxa_pixmap_is_dirty(source) || (mask && i830_uxa_pixmap_is_dirty(mask))) - intel_batch_pipelined_flush(scrn); + intel_batch_emit_flush(scrn); intel->needs_render_state_emit = TRUE; diff --git a/src/i830_uxa.c b/src/i830_uxa.c index ef02263d..9c381f32 100644 --- a/src/i830_uxa.c +++ b/src/i830_uxa.c @@ -93,7 +93,7 @@ i830_get_aperture_space(ScrnInfoPtr scrn, drm_intel_bo ** bo_table, bo_table[0] = intel->batch_bo; if (drm_intel_bufmgr_check_aperture_space(bo_table, num_bos) != 0) { - intel_batch_flush(scrn); + intel_batch_submit(scrn); bo_table[0] = intel->batch_bo; if (drm_intel_bufmgr_check_aperture_space(bo_table, num_bos) != 0) { @@ -573,7 +573,7 @@ static Bool i830_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access) if (!list_is_empty(&priv->batch) && (access == UXA_ACCESS_RW || priv->batch_write_domain)) - intel_batch_flush(scrn); + intel_batch_submit(scrn); /* No VT sema or GEM? No GTT mapping. */ if (!scrn->vtSema || bo->size > intel->max_gtt_map_size) { diff --git a/src/i915_render.c b/src/i915_render.c index b660af18..37af72b9 100644 --- a/src/i915_render.c +++ b/src/i915_render.c @@ -464,7 +464,7 @@ i915_prepare_composite(int op, PicturePtr source_picture, if(i830_uxa_pixmap_is_dirty(source) || (mask && i830_uxa_pixmap_is_dirty(mask))) - intel_batch_pipelined_flush(scrn); + intel_batch_emit_flush(scrn); intel->needs_render_state_emit = TRUE; diff --git a/src/i965_render.c b/src/i965_render.c index a6a0d6b9..62e17e4b 100644 --- a/src/i965_render.c +++ b/src/i965_render.c @@ -1627,7 +1627,7 @@ i965_prepare_composite(int op, PicturePtr source_picture, } if (!i965_composite_check_aperture(scrn)) { - intel_batch_flush(scrn); + intel_batch_submit(scrn); if (!i965_composite_check_aperture(scrn)) { intel_debug_fallback(scrn, "Couldn't fit render operation " @@ -1638,7 +1638,7 @@ i965_prepare_composite(int op, PicturePtr source_picture, if(i830_uxa_pixmap_is_dirty(source) || (mask && i830_uxa_pixmap_is_dirty(mask))) - intel_batch_pipelined_flush(scrn); + intel_batch_emit_flush(scrn); intel->needs_render_state_emit = TRUE; @@ -1805,7 +1805,7 @@ i965_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY, drm_intel_bo_subdata(vb_bo, render_state->vb_offset * 4, i * 4, vb); if (!i965_composite_check_aperture(scrn)) - intel_batch_flush(scrn); + intel_batch_submit(scrn); intel_batch_start_atomic(scrn, 200); if (intel->needs_render_state_emit) diff --git a/src/i965_video.c b/src/i965_video.c index cc9b309b..e0a8215e 100644 --- a/src/i965_video.c +++ b/src/i965_video.c @@ -1213,7 +1213,7 @@ I965DisplayVideoTextured(ScrnInfoPtr scrn, if (drm_intel_bufmgr_check_aperture_space(bo_table, ARRAY_SIZE(bo_table)) < 0) { - intel_batch_flush(scrn); + intel_batch_submit(scrn); } intel_batch_start_atomic(scrn, 100); |