diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-11-06 19:52:24 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2015-11-06 20:23:01 +0000 |
commit | 198246201fe9a07a60b4e1084dcf9ba2e06b5ef5 (patch) | |
tree | b9f5aa922e7ab24b65f88151d7e7b61c07c4b8f4 | |
parent | af680486608686ade375f5737bff556343366230 (diff) |
sna: Avoid handing back a cached pinned batch
A few places hold on to the request->bo as a means for checking the
fence completion. This means that it can have an elevated refcnt and so
we have to be careful to double check that our cache of batch buffers
not only are idle, but also not being used by anybody else. For example,
in the DRI2 code, it can happen that the fence is shared between two
windows and therefore the second window thinks that it's fence is still
busy as the first issues a new request touching the old fence.
Reported-by: Jan Kundrát
Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/kgem.c | 60 |
1 files changed, 31 insertions, 29 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 181a49fc..7acc69eb 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3838,6 +3838,32 @@ static int compact_batch_surface(struct kgem *kgem, int *shrink) return size * sizeof(uint32_t); } +static struct kgem_bo *first_available(struct kgem *kgem, struct list *list) +{ + struct kgem_bo *bo; + + list_for_each_entry(bo, list, list) { + assert(bo->refcnt > 0); + + if (bo->rq) { + assert(RQ(bo->rq)->bo == bo); + if (__kgem_busy(kgem, bo->handle)) + break; + + __kgem_retire_rq(kgem, RQ(bo->rq)); + assert(bo->rq == NULL); + } + + if (bo->refcnt > 1) + continue; + + list_move_tail(&bo->list, list); + return kgem_bo_reference(bo); + } + + return NULL; +} + static struct kgem_bo * kgem_create_batch(struct kgem *kgem) { @@ -3851,40 +3877,15 @@ kgem_create_batch(struct kgem *kgem) size = kgem->nbatch * sizeof(uint32_t); if (size <= 4096) { - bo = list_first_entry(&kgem->pinned_batches[0], - struct kgem_bo, - list); - if (!bo->rq) { -out_4096: - assert(bo->refcnt > 0); - list_move_tail(&bo->list, &kgem->pinned_batches[0]); - bo = kgem_bo_reference(bo); + bo = first_available(kgem, &kgem->pinned_batches[0]); + if (bo) goto write; - } - - if (!__kgem_busy(kgem, bo->handle)) { - assert(RQ(bo->rq)->bo == bo); - __kgem_retire_rq(kgem, RQ(bo->rq)); - goto out_4096; - } } if (size <= 16384) { - bo = list_first_entry(&kgem->pinned_batches[1], - struct kgem_bo, - list); - if (!bo->rq) { -out_16384: - assert(bo->refcnt > 0); - list_move_tail(&bo->list, &kgem->pinned_batches[1]); - bo = kgem_bo_reference(bo); + bo = first_available(kgem, &kgem->pinned_batches[1]); + if (bo) goto write; - } - - if (!__kgem_busy(kgem, bo->handle)) { - __kgem_retire_rq(kgem, RQ(bo->rq)); - goto out_16384; - } } if (kgem->gen == 020) { @@ -4069,6 +4070,7 @@ void _kgem_submit(struct kgem *kgem) if (rq->bo) { struct drm_i915_gem_execbuffer2 execbuf; + assert(rq->bo->refcnt == 1); assert(!rq->bo->needs_flush); i = kgem->nexec++; |