summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/i830_accel.c16
-rw-r--r--src/i830_batchbuffer.c36
-rw-r--r--src/i830_batchbuffer.h6
-rw-r--r--src/i830_display.c4
-rw-r--r--src/i830_dri.c2
-rw-r--r--src/i830_driver.c8
-rw-r--r--src/i830_uxa.c8
-rw-r--r--src/i830_video.c6
-rw-r--r--src/i965_render.c4
-rw-r--r--src/i965_video.c2
10 files changed, 45 insertions, 47 deletions
diff --git a/src/i830_accel.c b/src/i830_accel.c
index 86d412df..11e7d1f7 100644
--- a/src/i830_accel.c
+++ b/src/i830_accel.c
@@ -120,20 +120,6 @@ I830WaitLpRing(ScrnInfoPtr scrn, int n, int timeout_millis)
return iters;
}
-void intel_sync(ScrnInfoPtr scrn)
-{
- intel_screen_private *intel = intel_get_screen_private(scrn);
-
- if (I810_DEBUG & (DEBUG_VERBOSE_ACCEL | DEBUG_VERBOSE_SYNC))
- ErrorF("I830Sync\n");
-
- if (!scrn->vtSema || !intel->batch_bo || !intel->batch_ptr)
- return;
-
- intel_batch_flush(scrn, TRUE);
- intel_batch_wait_last(scrn);
-}
-
void i830_debug_flush(ScrnInfoPtr scrn)
{
intel_screen_private *intel = intel_get_screen_private(scrn);
@@ -142,7 +128,7 @@ void i830_debug_flush(ScrnInfoPtr scrn)
intel_batch_pipelined_flush(scrn);
if (intel->debug_flush & DEBUG_FLUSH_BATCHES)
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
}
/* The following function sets up the supported acceleration. Call it
diff --git a/src/i830_batchbuffer.c b/src/i830_batchbuffer.c
index a8189f1d..ad8eb284 100644
--- a/src/i830_batchbuffer.c
+++ b/src/i830_batchbuffer.c
@@ -198,24 +198,13 @@ void intel_batch_pipelined_flush(ScrnInfoPtr scrn)
}
}
-void intel_batch_flush(ScrnInfoPtr scrn, Bool flush)
+void intel_batch_flush(ScrnInfoPtr scrn)
{
intel_screen_private *intel = intel_get_screen_private(scrn);
int ret;
assert (!intel->in_batch_atomic);
- if (flush) {
- int flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
-
- if (IS_I965G(intel))
- flags = 0;
-
- *(uint32_t *) (intel->batch_ptr + intel->batch_used) =
- MI_FLUSH | flags;
- intel->batch_used += 4;
- }
-
if (intel->batch_used == 0)
return;
@@ -303,3 +292,26 @@ void intel_batch_wait_last(ScrnInfoPtr scrn)
drm_intel_bo_map(intel->last_batch_bo, TRUE);
drm_intel_bo_unmap(intel->last_batch_bo);
}
+
+void intel_sync(ScrnInfoPtr scrn)
+{
+ intel_screen_private *intel = intel_get_screen_private(scrn);
+ int flags;
+
+ if (I810_DEBUG & (DEBUG_VERBOSE_ACCEL | DEBUG_VERBOSE_SYNC))
+ ErrorF("I830Sync\n");
+
+ if (!scrn->vtSema || !intel->batch_bo || !intel->batch_ptr)
+ return;
+
+ flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
+ if (IS_I965G(intel))
+ flags = 0;
+
+ BEGIN_BATCH(1);
+ OUT_BATCH(flags);
+ ADVANCE_BATCH();
+
+ intel_batch_flush(scrn);
+ intel_batch_wait_last(scrn);
+}
diff --git a/src/i830_batchbuffer.h b/src/i830_batchbuffer.h
index 5026a72c..cc6b800d 100644
--- a/src/i830_batchbuffer.h
+++ b/src/i830_batchbuffer.h
@@ -36,7 +36,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
void intel_batch_init(ScrnInfoPtr scrn);
void intel_batch_teardown(ScrnInfoPtr scrn);
void intel_batch_pipelined_flush(ScrnInfoPtr scrn);
-void intel_batch_flush(ScrnInfoPtr scrn, Bool flush);
+void intel_batch_flush(ScrnInfoPtr scrn);
void intel_batch_wait_last(ScrnInfoPtr scrn);
static inline int intel_batch_space(intel_screen_private *intel)
@@ -49,7 +49,7 @@ intel_batch_require_space(ScrnInfoPtr scrn, intel_screen_private *intel, GLuint
{
assert(sz < intel->batch_bo->size - 8);
if (intel_batch_space(intel) < sz)
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
}
static inline void intel_batch_start_atomic(ScrnInfoPtr scrn, unsigned int sz)
@@ -198,7 +198,7 @@ do { \
if ((intel->batch_emitting > 8) && \
(I810_DEBUG & DEBUG_ALWAYS_SYNC)) { \
/* Note: not actually syncing, just flushing each batch. */ \
- intel_batch_flush(scrn, FALSE); \
+ intel_batch_flush(scrn); \
} \
intel->batch_emitting = 0; \
} while (0)
diff --git a/src/i830_display.c b/src/i830_display.c
index 1ecb1131..9c8771da 100644
--- a/src/i830_display.c
+++ b/src/i830_display.c
@@ -1271,7 +1271,7 @@ i830_crtc_lock (xf86CrtcPtr crtc)
/* Sync the engine before mode switch, to finish any outstanding
* WAIT_FOR_EVENTS that may rely on CRTC state.
*/
- I830Sync(crtc->scrn);
+ intel_sync(crtc->scrn);
return FALSE;
}
@@ -1974,7 +1974,7 @@ i830_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr rotate_pixmap, void *data)
if (data) {
/* Be sure to sync acceleration before the memory gets unbound. */
- I830Sync(scrn);
+ intel_sync(scrn);
i830_free_memory(scrn, intel_crtc->rotate_mem);
intel_crtc->rotate_mem = NULL;
}
diff --git a/src/i830_dri.c b/src/i830_dri.c
index 5bb77556..2fdc116e 100644
--- a/src/i830_dri.c
+++ b/src/i830_dri.c
@@ -354,7 +354,7 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
* We can't rely on getting into the block handler before the DRI
* client gets to run again so flush now. */
intel->need_mi_flush = FALSE;
- intel_batch_flush(scrn, TRUE);
+ intel_batch_flush(scrn);
#if ALWAYS_SYNC
intel_sync(scrn);
#endif
diff --git a/src/i830_driver.c b/src/i830_driver.c
index 1e0d124c..7ba91360 100644
--- a/src/i830_driver.c
+++ b/src/i830_driver.c
@@ -2172,9 +2172,9 @@ I830BlockHandler(int i, pointer blockData, pointer pTimeout, pointer pReadmask)
/* Flush the batch, so that any rendering is executed in a timely
* fashion.
*/
- intel_batch_flush(scrn, flush);
- if (intel->have_gem)
- drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE);
+ if (flush)
+ intel_batch_pipelined_flush(scrn);
+ drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE);
intel->need_mi_flush = FALSE;
}
@@ -2805,7 +2805,7 @@ static void i830AdjustFrame(int scrnIndex, int x, int y, int flags)
if (crtc && crtc->enabled)
{
/* Sync the engine before adjust frame */
- I830Sync(scrn);
+ intel_sync(scrn);
i830PipeSetBase(crtc, crtc->desiredX + x, crtc->desiredY + y);
crtc->x = output->initial_x + x;
crtc->y = output->initial_y + y;
diff --git a/src/i830_uxa.c b/src/i830_uxa.c
index 96e10b17..ed368053 100644
--- a/src/i830_uxa.c
+++ b/src/i830_uxa.c
@@ -92,7 +92,7 @@ i830_get_aperture_space(ScrnInfoPtr scrn, drm_intel_bo ** bo_table,
bo_table[0] = intel->batch_bo;
if (drm_intel_bufmgr_check_aperture_space(bo_table, num_bos) != 0) {
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
bo_table[0] = intel->batch_bo;
if (drm_intel_bufmgr_check_aperture_space(bo_table, num_bos) !=
0) {
@@ -572,7 +572,7 @@ static Bool i830_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
if (!list_is_empty(&priv->batch) &&
(access == UXA_ACCESS_RW || priv->batch_write_domain))
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
if (bo) {
@@ -580,7 +580,7 @@ static Bool i830_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
if (!scrn->vtSema || !intel->have_gem) {
if ((ret = dri_bo_map(bo, access == UXA_ACCESS_RW)) != 0) {
xf86DrvMsg(scrn->scrnIndex, X_WARNING,
- "%s: bo map failed\n",
+ "%s: bo map failed %s\n",
__FUNCTION__, strerror(-ret));
return FALSE;
}
@@ -598,7 +598,7 @@ static Bool i830_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
if ((ret = dri_bo_map(bo, access == UXA_ACCESS_RW))
!= 0) {
xf86DrvMsg(scrn->scrnIndex, X_WARNING,
- "%s: bo map failed\n",
+ "%s: bo map failed %s\n",
__FUNCTION__, strerror(-ret));
return FALSE;
}
diff --git a/src/i830_video.c b/src/i830_video.c
index 797037e3..e076582c 100644
--- a/src/i830_video.c
+++ b/src/i830_video.c
@@ -485,7 +485,7 @@ i830_overlay_continue(ScrnInfoPtr scrn, Bool update_filter)
ADVANCE_BATCH();
OVERLAY_DEBUG("overlay_continue\n");
- I830Sync(scrn);
+ intel_sync(scrn);
}
static void
@@ -509,7 +509,7 @@ i830_overlay_off(ScrnInfoPtr scrn)
OUT_BATCH(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_BATCH(MI_NOOP);
ADVANCE_BATCH();
- I830Sync(scrn);
+ intel_sync(scrn);
}
/*
@@ -530,7 +530,7 @@ i830_overlay_off(ScrnInfoPtr scrn)
OUT_BATCH(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_BATCH(MI_NOOP);
ADVANCE_BATCH();
- I830Sync(scrn);
+ intel_sync(scrn);
}
intel->overlayOn = FALSE;
OVERLAY_DEBUG("overlay_off\n");
diff --git a/src/i965_render.c b/src/i965_render.c
index cb057d77..a6a0d6b9 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -1627,7 +1627,7 @@ i965_prepare_composite(int op, PicturePtr source_picture,
}
if (!i965_composite_check_aperture(scrn)) {
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
if (!i965_composite_check_aperture(scrn)) {
intel_debug_fallback(scrn,
"Couldn't fit render operation "
@@ -1805,7 +1805,7 @@ i965_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY,
drm_intel_bo_subdata(vb_bo, render_state->vb_offset * 4, i * 4, vb);
if (!i965_composite_check_aperture(scrn))
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
intel_batch_start_atomic(scrn, 200);
if (intel->needs_render_state_emit)
diff --git a/src/i965_video.c b/src/i965_video.c
index f17999cf..cc9b309b 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -1213,7 +1213,7 @@ I965DisplayVideoTextured(ScrnInfoPtr scrn,
if (drm_intel_bufmgr_check_aperture_space(bo_table,
ARRAY_SIZE(bo_table))
< 0) {
- intel_batch_flush(scrn, FALSE);
+ intel_batch_flush(scrn);
}
intel_batch_start_atomic(scrn, 100);