summaryrefslogtreecommitdiff
path: root/src/radeon_drm_queue.c
diff options
context:
space:
mode:
authorMichel Dänzer <michel.daenzer@amd.com>2018-07-20 16:56:22 +0200
committerMichel Dänzer <michel@daenzer.net>2018-08-17 10:02:08 +0200
commitc42f6e2e61d166c8d3ef3fcad175d7050a00288b (patch)
tree3773c3e0794a3ae38f7bb728969d7d02ad001676 /src/radeon_drm_queue.c
parentba83a866af5a3784fc4822614375cc081e93197c (diff)
Defer vblank event handling while waiting for a pending flip
This is to avoid submitting more flips while we are waiting for pending ones to complete. (Ported from amdgpu commit e52872da69ecc84dafb3355839e35b0383f0d228) Acked-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'src/radeon_drm_queue.c')
-rw-r--r--src/radeon_drm_queue.c41
1 files changed, 39 insertions, 2 deletions
diff --git a/src/radeon_drm_queue.c b/src/radeon_drm_queue.c
index 3d2f4d15..857278fd 100644
--- a/src/radeon_drm_queue.c
+++ b/src/radeon_drm_queue.c
@@ -118,6 +118,30 @@ radeon_drm_vblank_handler(int fd, unsigned int frame, unsigned int sec,
}
/*
+ * Handle deferred DRM vblank events
+ *
+ * This function must be called after radeon_drm_wait_pending_flip, once
+ * it's safe to attempt queueing a flip again
+ */
+void
+radeon_drm_queue_handle_deferred(xf86CrtcPtr crtc)
+{
+ drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
+ struct radeon_drm_queue_entry *e, *tmp;
+
+ if (drmmode_crtc->wait_flip_nesting_level == 0 ||
+ --drmmode_crtc->wait_flip_nesting_level > 0)
+ return;
+
+ xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
+ drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
+
+ if (drmmode_crtc->wait_flip_nesting_level == 0)
+ radeon_drm_queue_handle_one(e);
+ }
+}
+
+/*
* Enqueue a potential drm response; when the associated response
* appears, we've got data to pass to the handler from here
*/
@@ -191,6 +215,13 @@ radeon_drm_abort_entry(uintptr_t seq)
if (seq == RADEON_DRM_QUEUE_ERROR)
return;
+ xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
+ if (e->seq == seq) {
+ radeon_drm_abort_one(e);
+ return;
+ }
+ }
+
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_queue, list) {
if (e->seq == seq) {
radeon_drm_abort_one(e);
@@ -229,8 +260,12 @@ radeon_drm_handle_event(int fd, drmEventContext *event_context)
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_flip_signalled, list)
radeon_drm_queue_handle_one(e);
- xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list)
- radeon_drm_queue_handle_one(e);
+ xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_vblank_signalled, list) {
+ drmmode_crtc_private_ptr drmmode_crtc = e->crtc->driver_private;
+
+ if (drmmode_crtc->wait_flip_nesting_level == 0)
+ radeon_drm_queue_handle_one(e);
+ }
return r;
}
@@ -244,6 +279,8 @@ void radeon_drm_wait_pending_flip(xf86CrtcPtr crtc)
RADEONEntPtr pRADEONEnt = RADEONEntPriv(crtc->scrn);
struct radeon_drm_queue_entry *e, *tmp;
+ drmmode_crtc->wait_flip_nesting_level++;
+
xorg_list_for_each_entry_safe(e, tmp, &radeon_drm_flip_signalled, list)
radeon_drm_queue_handle_one(e);