diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2014-01-10 12:01:29 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2014-01-10 12:05:17 +0000 |
commit | b351f4a0009ce0eddb5866bd49ac59860a44f522 (patch) | |
tree | e8f87426a6816c5b6e116200ba22ba186a031d6a | |
parent | b1694c8ea5c0f2ce1ded2ab25aecfde80da16346 (diff) |
sna/dri: Handle TearFree vblanks whilst the pipe is off
As an extra complication for handling TearFree is that if we attempt to
requeue a swap whilst the pipe is off, that fails. As we have recursed
from the pixmap migration path, we have to abort the blit but still send
the event back to the client so that they unblock.
Reported-by: Harald Judt <h.judt@gmx.at>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=73469
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/sna_display.c | 204 | ||||
-rw-r--r-- | src/sna/sna_dri.c | 77 |
2 files changed, 157 insertions, 124 deletions
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c index 4818cfc4..5bce7642 100644 --- a/src/sna/sna_display.c +++ b/src/sna/sna_display.c @@ -897,6 +897,105 @@ sna_crtc_apply(xf86CrtcPtr crtc) return true; } +struct wait_for_shadow { + RegionRec region; + struct kgem_bo *bo; +}; + +static bool wait_for_shadow(struct sna *sna, struct sna_pixmap *priv, unsigned flags) +{ + struct wait_for_shadow *wait = priv->move_to_gpu_data; + struct kgem_bo *bo = wait->bo; + PixmapPtr pixmap = priv->pixmap; + DamagePtr damage; + bool ret = true; + + DBG(("%s: flags=%x, shadow_flip=%d, handle=%d, wait=%d, old=%d\n", + __FUNCTION__, flags, sna->mode.shadow_flip, + priv->gpu_bo->handle, wait->bo->handle, sna->mode.shadow->handle)); + + assert(wait->bo != priv->gpu_bo); + + if (flags == 0 || pixmap != sna->front || !sna->mode.shadow_damage) + goto done; + + if ((flags & MOVE_WRITE) == 0) + return true; + + assert(sna->mode.shadow_active); + assert(bo == sna->mode.shadow); + + assert(priv->gpu_bo->refcnt >= 1); + sna->mode.shadow = priv->gpu_bo; + + damage = sna->mode.shadow_damage; + sna->mode.shadow_damage = NULL; + + while (sna->mode.shadow_flip && sna_mode_has_pending_events(sna)) + sna_mode_wakeup(sna); + + if (sna->mode.shadow_flip) { + bo = kgem_create_2d(&sna->kgem, + pixmap->drawable.width, + pixmap->drawable.height, + pixmap->drawable.bitsPerPixel, + priv->gpu_bo->tiling, + CREATE_EXACT | CREATE_SCANOUT); + if (bo != NULL) { + DBG(("%s: replacing still-attached GPU bo\n", + __FUNCTION__)); + + kgem_bo_destroy(&sna->kgem, wait->bo); + RegionUninit(&wait->region); + + wait->region.extents.x1 = 0; + wait->region.extents.y1 = 0; + wait->region.extents.x2 = pixmap->drawable.width; + wait->region.extents.y2 = pixmap->drawable.height; + wait->region.data = NULL; + } else { + while (sna->mode.shadow_flip && + sna_mode_wait_for_event(sna)) + sna_mode_wakeup(sna); + + bo = wait->bo; + } + } + + sna->mode.shadow_damage = damage; + + if (flags & MOVE_READ) { + DBG(("%s: copying existing GPU damage: %ldx(%d, %d), (%d, %d)\n", + __FUNCTION__, (long)REGION_NUM_RECTS(&wait->region), + wait->region.extents.x1, wait->region.extents.y1, + wait->region.extents.x2, wait->region.extents.y2)); + ret = sna->render.copy_boxes(sna, GXcopy, + pixmap, priv->gpu_bo, 0, 0, + pixmap, bo, 0, 0, + REGION_RECTS(&wait->region), + REGION_NUM_RECTS(&wait->region), + 0); + } + + if (priv->cow) + sna_pixmap_undo_cow(sna, priv, 0); + + sna_pixmap_unmap(pixmap, priv); + priv->gpu_bo = bo; + + sna_dri_pixmap_update_bo(sna, pixmap); + +done: + kgem_bo_destroy(&sna->kgem, wait->bo); + RegionUninit(&wait->region); + free(wait); + + priv->move_to_gpu_data = NULL; + priv->move_to_gpu = NULL; + + return ret; +} + static bool sna_mode_enable_shadow(struct sna *sna) { ScreenPtr screen = sna->scrn->pScreen; @@ -918,11 +1017,17 @@ static bool sna_mode_enable_shadow(struct sna *sna) static void sna_mode_disable_shadow(struct sna *sna) { + struct sna_pixmap *priv; + if (!sna->mode.shadow_damage) return; DBG(("%s\n", __FUNCTION__)); + priv = sna_pixmap(sna->front); + if (priv->move_to_gpu == wait_for_shadow) + priv->move_to_gpu(sna, priv, 0); + DamageUnregister(&sna->front->drawable, sna->mode.shadow_damage); DamageDestroy(sna->mode.shadow_damage); sna->mode.shadow_damage = NULL; @@ -4331,105 +4436,6 @@ sna_crtc_redisplay(xf86CrtcPtr crtc, RegionPtr region) sna_crtc_redisplay__fallback(crtc, region, sna_crtc->bo); } -struct wait_for_shadow { - RegionRec region; - struct kgem_bo *bo; -}; - -static bool wait_for_shadow(struct sna *sna, struct sna_pixmap *priv, unsigned flags) -{ - struct wait_for_shadow *wait = priv->move_to_gpu_data; - struct kgem_bo *bo = wait->bo; - PixmapPtr pixmap = priv->pixmap; - DamagePtr damage; - bool ret = true; - - DBG(("%s: flags=%x, shadow_flip=%d, handle=%d, wait=%d, old=%d\n", - __FUNCTION__, flags, sna->mode.shadow_flip, - priv->gpu_bo->handle, wait->bo->handle, sna->mode.shadow->handle)); - - assert(wait->bo != priv->gpu_bo); - - if (flags == 0 || pixmap != sna->front || !sna->mode.shadow_damage) - goto done; - - if ((flags & MOVE_WRITE) == 0) - return true; - - assert(sna->mode.shadow_active); - assert(bo == sna->mode.shadow); - - assert(priv->gpu_bo->refcnt >= 1); - sna->mode.shadow = priv->gpu_bo; - - damage = sna->mode.shadow_damage; - sna->mode.shadow_damage = NULL; - - while (sna->mode.shadow_flip && sna_mode_has_pending_events(sna)) - sna_mode_wakeup(sna); - - if (sna->mode.shadow_flip) { - bo = kgem_create_2d(&sna->kgem, - pixmap->drawable.width, - pixmap->drawable.height, - pixmap->drawable.bitsPerPixel, - priv->gpu_bo->tiling, - CREATE_EXACT | CREATE_SCANOUT); - if (bo != NULL) { - DBG(("%s: replacing still-attached GPU bo\n", - __FUNCTION__)); - - kgem_bo_destroy(&sna->kgem, wait->bo); - RegionUninit(&wait->region); - - wait->region.extents.x1 = 0; - wait->region.extents.y1 = 0; - wait->region.extents.x2 = pixmap->drawable.width; - wait->region.extents.y2 = pixmap->drawable.height; - wait->region.data = NULL; - } else { - while (sna->mode.shadow_flip && - sna_mode_wait_for_event(sna)) - sna_mode_wakeup(sna); - - bo = wait->bo; - } - } - - sna->mode.shadow_damage = damage; - - if (flags & MOVE_READ) { - DBG(("%s: copying existing GPU damage: %ldx(%d, %d), (%d, %d)\n", - __FUNCTION__, (long)REGION_NUM_RECTS(&wait->region), - wait->region.extents.x1, wait->region.extents.y1, - wait->region.extents.x2, wait->region.extents.y2)); - ret = sna->render.copy_boxes(sna, GXcopy, - pixmap, priv->gpu_bo, 0, 0, - pixmap, bo, 0, 0, - REGION_RECTS(&wait->region), - REGION_NUM_RECTS(&wait->region), - 0); - } - - if (priv->cow) - sna_pixmap_undo_cow(sna, priv, 0); - - sna_pixmap_unmap(pixmap, priv); - priv->gpu_bo = bo; - - sna_dri_pixmap_update_bo(sna, pixmap); - -done: - kgem_bo_destroy(&sna->kgem, wait->bo); - RegionUninit(&wait->region); - free(wait); - - priv->move_to_gpu_data = NULL; - priv->move_to_gpu = NULL; - - return ret; -} - static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo, RegionPtr region) { struct sna_pixmap *priv = sna_pixmap(pixmap); diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c index bf970e04..fedf2632 100644 --- a/src/sna/sna_dri.c +++ b/src/sna/sna_dri.c @@ -1299,8 +1299,27 @@ static void chain_swap(struct sna *sna, DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__)); - chain->bo = __sna_dri_copy_region(sna, draw, NULL, - chain->back, chain->front, true); + if (sna->mode.shadow_flip && !sna->mode.shadow_damage) { + /* recursed from wait_for_shadow(), simply requeue */ + DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__)); + chain->type = DRI2_SWAP; + + VG_CLEAR(vbl); + vbl.request.type = + DRM_VBLANK_RELATIVE | + DRM_VBLANK_EVENT | + pipe_select(chain->pipe); + vbl.request.sequence = 1; + vbl.request.signal = (unsigned long)chain; + + if (!sna_wait_vblank(sna, &vbl)) + return; + + DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno)); + } else { + chain->bo = __sna_dri_copy_region(sna, draw, NULL, + chain->back, chain->front, true); + } DRI2SwapComplete(chain->client, draw, frame, tv_sec, tv_usec, @@ -1339,6 +1358,7 @@ static bool sna_dri_blit_complete(struct sna *sna, return false; } + DBG(("%s: blit finished\n", __FUNCTION__)); return true; } @@ -1350,25 +1370,9 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event) DBG(("%s(type=%d)\n", __FUNCTION__, info->type)); draw = info->draw; - if (draw == NULL) + if (draw == NULL) { + DBG(("%s -- drawable gone\n", __FUNCTION__)); goto done; - - if (sna->mode.shadow_flip && !sna->mode.shadow_damage) { - drmVBlank vbl; - - /* recursed from wait_for_shadow(), simply requeue */ - VG_CLEAR(vbl); - vbl.request.type = - DRM_VBLANK_RELATIVE | - DRM_VBLANK_EVENT | - pipe_select(info->pipe); - vbl.request.sequence = 1; - vbl.request.signal = (unsigned long)info; - - if (sna_wait_vblank(sna, &vbl)) - goto done; - - return; } switch (info->type) { @@ -1380,9 +1384,29 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event) /* else fall through to blit */ case DRI2_SWAP: - info->bo = __sna_dri_copy_region(sna, draw, NULL, - info->back, info->front, true); - info->type = DRI2_SWAP_WAIT; + if (sna->mode.shadow_flip && !sna->mode.shadow_damage) { + drmVBlank vbl; + + /* recursed from wait_for_shadow(), simply requeue */ + DBG(("%s -- recursed from wait_for_shadow(), requeuing\n", __FUNCTION__)); + + VG_CLEAR(vbl); + vbl.request.type = + DRM_VBLANK_RELATIVE | + DRM_VBLANK_EVENT | + pipe_select(info->pipe); + vbl.request.sequence = 1; + vbl.request.signal = (unsigned long)info; + + if (!sna_wait_vblank(sna, &vbl)) + return; + + DBG(("%s -- requeue failed, errno=%d\n", __FUNCTION__, errno)); + } else { + info->bo = __sna_dri_copy_region(sna, draw, NULL, + info->back, info->front, true); + info->type = DRI2_SWAP_WAIT; + } /* fall through to SwapComplete */ case DRI2_SWAP_WAIT: if (!sna_dri_blit_complete(sna, info)) @@ -1439,7 +1463,7 @@ sna_dri_immediate_blit(struct sna *sna, sync = false; DBG(("%s: emitting immediate blit, throttling client, synced? %d, chained? %d, send-event? %d\n", - __FUNCTION__, sync, sna_dri_window_get_chain((WindowPtr)draw) == info, + __FUNCTION__, sync, sna_dri_window_get_chain((WindowPtr)draw) != info, event)); if (sync) { @@ -1470,9 +1494,12 @@ sna_dri_immediate_blit(struct sna *sna, vbl.request.signal = (unsigned long)info; ret = !sna_wait_vblank(sna, &vbl); } - } else + } else { + DBG(("%s: pending blit, chained\n", __FUNCTION__)); ret = true; + } } else { + DBG(("%s: immediate blit\n", __FUNCTION__)); info->bo = __sna_dri_copy_region(sna, draw, NULL, info->back, info->front, false); if (event) |