diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-11-12 09:17:17 +0000 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2015-11-12 09:19:28 +0000 |
commit | 7490b9ec263b87b3669096579ec0f0066ec328cb (patch) | |
tree | b7c63366fbad2aa037c93eaa01d40a2212299187 | |
parent | e769f9e6ca0b2575f598baf8e2f7dab02a48d6a0 (diff) |
sna: Wait upon the same ring when out-of-memory
The current out-of-memory allocation code was waiting upon the wrong
ring id instead of using the index - causing an issue if forced to wait
upon the BLT ring. If we still cannot allocate, make sure that all
caches are dropped.
References: https://bugs.freedesktop.org/show_bug.cgi?id=92911
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/kgem.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c index bc790c03..6d6e76a8 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1769,21 +1769,29 @@ restart: if (kgem->batch_bo) kgem->batch = kgem_bo_map__cpu(kgem, kgem->batch_bo); if (kgem->batch == NULL) { + int ring = kgem->ring = KGEM_BLT; + assert(ring < ARRAY_SIZE(kgem->requests)); + if (kgem->batch_bo) { kgem_bo_destroy(kgem, kgem->batch_bo); kgem->batch_bo = NULL; } - assert(kgem->ring < ARRAY_SIZE(kgem->requests)); - if (!list_is_empty(&kgem->requests[kgem->ring])) { + if (!list_is_empty(&kgem->requests[ring])) { struct kgem_request *rq; - rq = list_first_entry(&kgem->requests[kgem->ring], + rq = list_first_entry(&kgem->requests[ring], struct kgem_request, list); + assert(rq->ring == ring); + assert(rq->bo); + assert(RQ(rq->bo->rq) == rq); if (kgem_bo_wait(kgem, rq->bo) == 0) goto restart; } + if (kgem_cleanup_cache(kgem)) + goto restart; + DBG(("%s: unable to map batch bo, mallocing(size=%d)\n", __FUNCTION__, sizeof(uint32_t)*kgem->batch_size)); if (posix_memalign((void **)&kgem->batch, PAGE_SIZE, @@ -3159,6 +3167,8 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring) struct kgem_request, list); assert(rq->ring == ring); + assert(rq->bo); + assert(RQ(rq->bo->rq) == rq); if (__kgem_busy(kgem, rq->bo->handle)) break; @@ -3252,6 +3262,8 @@ bool __kgem_ring_is_idle(struct kgem *kgem, int ring) rq = list_last_entry(&kgem->requests[ring], struct kgem_request, list); assert(rq->ring == ring); + assert(rq->bo); + assert(RQ(rq->bo->rq) == rq); if (__kgem_busy(kgem, rq->bo->handle)) { DBG(("%s: last requests handle=%d still busy\n", __FUNCTION__, rq->bo->handle)); @@ -3419,6 +3431,7 @@ static void kgem_commit(struct kgem *kgem) } else { assert(rq != (struct kgem_request *)kgem); assert(rq->ring < ARRAY_SIZE(kgem->requests)); + assert(rq->bo); list_add_tail(&rq->list, &kgem->requests[rq->ring]); kgem->need_throttle = kgem->need_retire = 1; @@ -4442,6 +4455,9 @@ bool kgem_cleanup_cache(struct kgem *kgem) list); DBG(("%s: sync on cleanup\n", __FUNCTION__)); + assert(rq->ring == n); + assert(rq->bo); + assert(RQ(rq->bo->rq) == rq); kgem_bo_wait(kgem, rq->bo); } assert(list_is_empty(&kgem->requests[n])); |