summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/i830_accel.c2
-rw-r--r--src/i830_batchbuffer.c18
-rw-r--r--src/i830_batchbuffer.h6
-rw-r--r--src/i830_dri.c9
-rw-r--r--src/i830_driver.c10
-rw-r--r--src/i830_uxa.c6
-rw-r--r--src/i965_render.c4
-rw-r--r--src/i965_video.c2
8 files changed, 22 insertions, 35 deletions
diff --git a/src/i830_accel.c b/src/i830_accel.c
index 74808fd8..df4f58db 100644
--- a/src/i830_accel.c
+++ b/src/i830_accel.c
@@ -59,7 +59,7 @@ void i830_debug_flush(ScrnInfoPtr scrn)
intel_batch_emit_flush(scrn);
if (intel->debug_flush & DEBUG_FLUSH_BATCHES)
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
}
/* The following function sets up the supported acceleration. Call it
diff --git a/src/i830_batchbuffer.c b/src/i830_batchbuffer.c
index 7ff57eea..d5d7a5e4 100644
--- a/src/i830_batchbuffer.c
+++ b/src/i830_batchbuffer.c
@@ -157,20 +157,23 @@ void intel_batch_emit_flush(ScrnInfoPtr scrn)
intel_batch_do_flush(scrn);
}
-void intel_batch_submit(ScrnInfoPtr scrn)
+void intel_batch_submit(ScrnInfoPtr scrn, int flush)
{
intel_screen_private *intel = intel_get_screen_private(scrn);
int ret;
assert (!intel->in_batch_atomic);
- if (intel->batch_used == 0)
- return;
-
if (intel->vertex_flush)
intel->vertex_flush(intel);
intel_end_vertex(intel);
+ if (flush)
+ intel_batch_emit_flush(scrn);
+
+ if (intel->batch_used == 0)
+ return;
+
/* Mark the end of the batchbuffer. */
OUT_BATCH(MI_BATCH_BUFFER_END);
/* Emit a padding dword if we aren't going to be quad-word aligned. */
@@ -213,10 +216,6 @@ void intel_batch_submit(ScrnInfoPtr scrn)
list_del(&entry->batch);
}
- /* Mark that we need to flush whatever potential rendering we've done in the
- * blockhandler. We could set this less often, but it's probably not worth
- * the work.
- */
intel->need_mi_flush = !list_is_empty(&intel->flush_pixmaps);
while (!list_is_empty(&intel->flush_pixmaps))
list_del(intel->flush_pixmaps.next);
@@ -271,7 +270,6 @@ void intel_sync(ScrnInfoPtr scrn)
if (!scrn->vtSema || !intel->batch_bo || !intel->batch_ptr)
return;
- intel_batch_emit_flush(scrn);
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, TRUE);
intel_batch_wait_last(scrn);
}
diff --git a/src/i830_batchbuffer.h b/src/i830_batchbuffer.h
index 6422c8bf..62c42b5e 100644
--- a/src/i830_batchbuffer.h
+++ b/src/i830_batchbuffer.h
@@ -37,7 +37,7 @@ void intel_batch_init(ScrnInfoPtr scrn);
void intel_batch_teardown(ScrnInfoPtr scrn);
void intel_batch_emit_flush(ScrnInfoPtr scrn);
void intel_batch_do_flush(ScrnInfoPtr scrn);
-void intel_batch_submit(ScrnInfoPtr scrn);
+void intel_batch_submit(ScrnInfoPtr scrn, int flush);
void intel_batch_wait_last(ScrnInfoPtr scrn);
static inline int intel_batch_space(intel_screen_private *intel)
@@ -55,7 +55,7 @@ intel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, GLuint
{
assert(sz < intel->batch_bo->size - 8);
if (intel_batch_space(intel) < sz)
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
}
static inline void intel_batch_start_atomic(ScrnInfoPtr scrn, unsigned int sz)
@@ -199,7 +199,7 @@ do { \
if ((intel->batch_emitting > 8) && \
(I810_DEBUG & DEBUG_ALWAYS_SYNC)) { \
/* Note: not actually syncing, just flushing each batch. */ \
- intel_batch_submit(scrn); \
+ intel_batch_submit(scrn, FALSE); \
} \
intel->batch_emitting = 0; \
} while (0)
diff --git a/src/i830_dri.c b/src/i830_dri.c
index 9b33fe28..eadb25b8 100644
--- a/src/i830_dri.c
+++ b/src/i830_dri.c
@@ -400,12 +400,9 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
* later.
*
* We can't rely on getting into the block handler before the DRI
- * client gets to run again so flush now. */
- intel_batch_emit_flush(scrn);
- intel_batch_submit(scrn);
-#if ALWAYS_SYNC
- intel_sync(scrn);
-#endif
+ * client gets to run again so flush now.
+ */
+ intel_batch_submit(scrn, TRUE);
drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE);
}
diff --git a/src/i830_driver.c b/src/i830_driver.c
index 40ca89f6..b25595b0 100644
--- a/src/i830_driver.c
+++ b/src/i830_driver.c
@@ -2164,16 +2164,8 @@ I830BlockHandler(int i, pointer blockData, pointer pTimeout, pointer pReadmask)
/* Emit a flush of the rendering cache, or on the 965 and beyond
* rendering results may not hit the framebuffer until significantly
* later.
- *
- * XXX Under KMS this is only required because tfp does not have
- * the appropriate synchronisation points, so that outstanding updates
- * to the pixmap are flushed prior to use as a texture. The framebuffer
- * should be handled by the kernel domain management...
*/
- if (intel->need_mi_flush || !list_is_empty(&intel->flush_pixmaps))
- intel_batch_emit_flush(scrn);
-
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, intel->need_mi_flush);
drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE);
}
diff --git a/src/i830_uxa.c b/src/i830_uxa.c
index d75a6c8d..2aca706a 100644
--- a/src/i830_uxa.c
+++ b/src/i830_uxa.c
@@ -118,7 +118,7 @@ i830_get_aperture_space(ScrnInfoPtr scrn, drm_intel_bo ** bo_table,
bo_table[0] = intel->batch_bo;
if (drm_intel_bufmgr_check_aperture_space(bo_table, num_bos) != 0) {
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
bo_table[0] = intel->batch_bo;
if (drm_intel_bufmgr_check_aperture_space(bo_table, num_bos) !=
0) {
@@ -665,7 +665,7 @@ static Bool i830_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
if (!list_is_empty(&priv->batch) &&
(access == UXA_ACCESS_RW || priv->batch_write))
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
if (bo->size > intel->max_gtt_map_size) {
ret = dri_bo_map(bo, access == UXA_ACCESS_RW);
@@ -890,7 +890,7 @@ static Bool i830_uxa_get_image(PixmapPtr pixmap,
FreeScratchGC(gc);
- intel_batch_submit(xf86Screens[screen->myNum]);
+ intel_batch_submit(xf86Screens[screen->myNum], FALSE);
x = y = 0;
pixmap = scratch;
diff --git a/src/i965_render.c b/src/i965_render.c
index 02b74f42..18b0123d 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -1649,7 +1649,7 @@ i965_prepare_composite(int op, PicturePtr source_picture,
}
if (!i965_composite_check_aperture(scrn)) {
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
if (!i965_composite_check_aperture(scrn)) {
intel_debug_fallback(scrn,
"Couldn't fit render operation "
@@ -1826,7 +1826,7 @@ i965_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY,
drm_intel_bo_subdata(vb_bo, render_state->vb_offset * 4, i * 4, vb);
if (!i965_composite_check_aperture(scrn))
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
intel_batch_start_atomic(scrn, 200);
if (intel->needs_render_state_emit)
diff --git a/src/i965_video.c b/src/i965_video.c
index 1beea7d6..e25184b6 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -1197,7 +1197,7 @@ I965DisplayVideoTextured(ScrnInfoPtr scrn,
if (drm_intel_bufmgr_check_aperture_space(bo_table,
ARRAY_SIZE(bo_table))
< 0) {
- intel_batch_submit(scrn);
+ intel_batch_submit(scrn, FALSE);
}
intel_batch_start_atomic(scrn, 100);