diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-09-12 13:47:26 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-09-12 14:58:51 +0100 |
commit | 4b4abdaae94d164d5d0b2755907e76b9cbe0c988 (patch) | |
tree | 94947112692a8f8abb259755a6808be297c6cc75 /src | |
parent | 15911f533d6a7ef40e42ba5921fac7c62b290f8b (diff) |
sna: Flush after operating on large buffers
As we know that such operations are likely to be slow and consume
precious GTT space, mark them as candidates for flushing.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src')
-rw-r--r-- | src/sna/kgem.c | 12 | ||||
-rw-r--r-- | src/sna/kgem.h | 2 | ||||
-rw-r--r-- | src/sna/sna_accel.c | 5 | ||||
-rw-r--r-- | src/sna/sna_io.c | 1 |
4 files changed, 13 insertions, 7 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 902cba7e..727cb511 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -1286,6 +1286,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem, return; } + assert(bo->flush == false); list_move(&bo->list, &kgem->inactive[bucket(bo)]); if (bo->map) { int type = IS_CPU_MAP(bo->map); @@ -1504,7 +1505,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) assert(bo->snoop == false); assert(bo->io == false); assert(bo->scanout == false); - assert(bo->flush == false); if (bo->rq) { struct list *cache; @@ -3416,13 +3416,19 @@ create: bo->pitch = pitch; if (tiling != I915_TILING_NONE) bo->tiling = gem_set_tiling(kgem->fd, handle, tiling, pitch); + if (bucket >= NUM_CACHE_BUCKETS) { + DBG(("%s: marking large bo for automatic flushing\n", + __FUNCTION__)); + bo->flush = true; + } assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling)); debug_alloc__bo(kgem, bo); - DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d\n", - bo->pitch, bo->tiling, bo->handle, bo->unique_id)); + DBG((" new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n", + bo->pitch, bo->tiling, bo->handle, bo->unique_id, + size, num_pages(bo), bucket(bo))); return bo; } diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 1dc9c673..fb8be3d0 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -532,6 +532,8 @@ static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo) { DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__, bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL)); + if (kgem_flush(kgem)) + kgem_submit(kgem); if (bo->rq && !bo->exec) kgem_retire(kgem); return kgem_bo_is_busy(bo); diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index 4ef90196..e12585e7 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -3524,10 +3524,9 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, } /* And mark as having a valid GTT mapping for future uploads */ - if (priv->stride && - !kgem_bo_is_busy(priv->gpu_bo)) { + if (priv->stride && kgem_bo_can_map(&sna->kgem, priv->gpu_bo)) { pixmap->devPrivate.ptr = - kgem_bo_map(&sna->kgem, priv->gpu_bo); + kgem_bo_map__async(&sna->kgem, priv->gpu_bo); if (pixmap->devPrivate.ptr) { priv->mapped = true; pixmap->devKind = priv->gpu_bo->pitch; diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index 733e542c..0860dec2 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -1165,7 +1165,6 @@ bool sna_replace(struct sna *sna, pixmap->drawable.height, pixmap->drawable.bitsPerPixel, bo->tiling, busy)); - assert(!bo->flush); if ((busy || !kgem_bo_can_map(kgem, bo)) && indirect_replace(sna, pixmap, bo, src, stride)) |