diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-08-11 21:21:02 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2016-08-11 21:39:36 +0100 |
commit | c26a148541e321f90870bd937a6ff1967f9873ed (patch) | |
tree | 40c6e976bc01b457a835791c457286a9c7f1cbea | |
parent | 52343d7da1cc8f3aef3497dfac5d16c249b2a63d (diff) |
sna: Avoid recursing whilst waiting for events within TearFree
Occasionally TearFree likes to flushing pending event to try and avoid a
stall or a reallocation of a buffer. When it does so, we must avoid
processing vblanks as they may cause an update to the frontbuffer
re-entering the TearFree handler. Instead of deferring the obvious
recursions until the next vblank, buffer the events temporarily whilst
we wait inside TearFree.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/sna.h | 4 | ||||
-rw-r--r-- | src/sna/sna_display.c | 60 | ||||
-rw-r--r-- | src/sna/sna_dri2.c | 21 |
3 files changed, 61 insertions, 24 deletions
diff --git a/src/sna/sna.h b/src/sna/sna.h index eb3489d4..c4978cbc 100644 --- a/src/sna/sna.h +++ b/src/sna/sna.h @@ -312,6 +312,10 @@ struct sna { bool shadow_wait; bool dirty; + struct drm_event_vblank *shadow_events; + int shadow_nevent; + int shadow_size; + int max_crtc_width, max_crtc_height; RegionRec shadow_region; RegionRec shadow_cancel; diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c index aa8b4fb5..96793281 100644 --- a/src/sna/sna_display.c +++ b/src/sna/sna_display.c @@ -1523,6 +1523,48 @@ static bool overlap(const BoxRec *a, const BoxRec *b) return true; } +static void defer_event(struct sna *sna, struct drm_event *base) +{ + if (sna->mode.shadow_nevent == sna->mode.shadow_size) { + int size = sna->mode.shadow_size * 2; + void *ptr; + + ptr = realloc(sna->mode.shadow_events, + sizeof(struct drm_event_vblank)*size); + if (!ptr) + return; + + sna->mode.shadow_events = ptr; + sna->mode.shadow_size = size; + } + + memcpy(&sna->mode.shadow_events[sna->mode.shadow_nevent++], + base, sizeof(struct drm_event_vblank)); + DBG(("%s: deferring event count=%d\n", sna->mode.shadow_nevent)); +} + +static void flush_events(struct sna *sna) +{ + int n; + + if (!sna->mode.shadow_nevent) + return; + + DBG(("%s: flushing %d events=%d\n", sna->mode.shadow_nevent)); + + for (n = 0; n < sna->mode.shadow_nevent; n++) { + struct drm_event_vblank *vb = &sna->mode.shadow_events[n]; + + if ((uintptr_t)(vb->user_data) & 2) + sna_present_vblank_handler(vb); + else + sna_dri2_vblank_handler(vb); + } + + sna->mode.shadow_nevent = 0; +} + + static bool wait_for_shadow(struct sna *sna, struct sna_pixmap *priv, unsigned flags) @@ -1545,8 +1587,7 @@ static bool wait_for_shadow(struct sna *sna, goto done; assert(sna->mode.shadow_damage); - if (sna->mode.shadow_wait) - return ret; + assert(!sna->mode.shadow_wait); if ((flags & MOVE_WRITE) == 0) { if ((flags & __MOVE_SCANOUT) == 0) { @@ -1748,6 +1789,7 @@ done: priv->move_to_gpu = NULL; assert(!sna->mode.shadow_wait); + flush_events(sna); return ret; } @@ -7308,6 +7350,11 @@ bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna) if (!sna_mode_fake_init(sna, num_fake)) return false; + sna->mode.shadow_size = 256; + sna->mode.shadow_events = malloc(sna->mode.shadow_size * sizeof(struct drm_event_vblank)); + if (!sna->mode.shadow_events) + return false; + if (!sna_probe_initial_configuration(sna)) { xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(scrn); @@ -8504,7 +8551,10 @@ sna_crtc_redisplay(xf86CrtcPtr crtc, RegionPtr region, struct kgem_bo *bo) static void shadow_flip_handler(struct drm_event_vblank *e, void *data) { - sna_mode_redisplay(data); + struct sna *sna = data; + + if (!sna->mode.shadow_wait) + sna_mode_redisplay(sna); } void sna_shadow_set_crtc(struct sna *sna, @@ -9098,7 +9148,9 @@ again: struct drm_event *e = (struct drm_event *)&buffer[i]; switch (e->type) { case DRM_EVENT_VBLANK: - if (((uintptr_t)((struct drm_event_vblank *)e)->user_data) & 2) + if (sna->mode.shadow_wait) + defer_event(sna, e); + else if (((uintptr_t)((struct drm_event_vblank *)e)->user_data) & 2) sna_present_vblank_handler((struct drm_event_vblank *)e); else sna_dri2_vblank_handler((struct drm_event_vblank *)e); diff --git a/src/sna/sna_dri2.c b/src/sna/sna_dri2.c index 2c3a3ed7..876a9095 100644 --- a/src/sna/sna_dri2.c +++ b/src/sna/sna_dri2.c @@ -2514,15 +2514,6 @@ static void chain_swap(struct sna_dri2_event *chain) switch (chain->type) { case SWAP_COMPLETE: DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__)); - if (chain->sna->mode.shadow_wait) { - /* recursed from wait_for_shadow(), simply requeue */ - DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__)); - if (sna_next_vblank(chain)) - return; - - DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno)); - } - if (can_xchg(chain->sna, chain->draw, chain->front, chain->back)) { sna_dri2_xchg(chain->draw, chain->front, chain->back); } else if (can_xchg_crtc(chain->sna, chain->draw, chain->crtc, @@ -2609,10 +2600,7 @@ void sna_dri2_vblank_handler(struct drm_event_vblank *event) /* else fall through to blit */ case SWAP: assert(info->signal); - if (sna->mode.shadow_wait) { - /* recursed from wait_for_shadow(), simply requeue */ - DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__)); - } else if (can_xchg(info->sna, draw, info->front, info->back)) { + if (can_xchg(info->sna, draw, info->front, info->back)) { sna_dri2_xchg(draw, info->front, info->back); info->type = SWAP_COMPLETE; } else if (can_xchg_crtc(sna, draw, info->crtc, @@ -2655,13 +2643,6 @@ void sna_dri2_vblank_handler(struct drm_event_vblank *event) info->pending.bo->handle, info->pending.name, info->pending.bo->active_scanout, get_private(info->front)->bo->handle, info->front->name, get_private(info->front)->bo->active_scanout)); - if (sna->mode.shadow_wait) { - /* recursed from wait_for_shadow(), simply requeue */ - DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__)); - if (sna_next_vblank(info)) - return; - } - assert(info->pending.bo->active_scanout > 0); info->pending.bo->active_scanout--; |