diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-07-16 18:58:30 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2012-07-16 18:58:30 +0100 |
commit | 107feed2a4ca044313c70f83a62909187ff1f905 (patch) | |
tree | 368617e3c8e1c4c85de6615fead76c2f56c8e245 /src | |
parent | 818c21165c746b7b410a6e6e23b1675d88db685d (diff) |
sna: Disable snoopable uplaod buffers for gen4
The sampler really does not like using snoopable buffers...
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src')
-rw-r--r-- | src/sna/gen4_render.c | 2 | ||||
-rw-r--r-- | src/sna/kgem.c | 21 | ||||
-rw-r--r-- | src/sna/kgem.h | 6 |
3 files changed, 25 insertions, 4 deletions
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c index c6fbddb7..c985c8d3 100644 --- a/src/sna/gen4_render.c +++ b/src/sna/gen4_render.c @@ -725,6 +725,8 @@ gen4_bind_bo(struct sna *sna, uint32_t domains; uint16_t offset; + assert(!kgem_bo_is_vmap(bo)); + /* After the first bind, we manage the cache domains within the batch */ if (is_dst) { domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER; diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 51fc29db..d6ed4e0b 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -3813,6 +3813,18 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages) return bo; } +static inline bool +use_snoopable_buffer(struct kgem *kgem, uint32_t flags) +{ + if (kgem->gen == 40) + return false; + + if (kgem->gen < 30) + return flags & KGEM_BUFFER_WRITE; + + return true; +} + struct kgem_bo *kgem_create_buffer(struct kgem *kgem, uint32_t size, uint32_t flags, void **ret) @@ -4056,7 +4068,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, alloc = NUM_PAGES(size); flags &= ~KGEM_BUFFER_INPLACE; - if (flags & KGEM_BUFFER_WRITE && kgem->has_cache_level) { + if (kgem->has_cache_level && use_snoopable_buffer(kgem, flags)) { uint32_t handle; handle = gem_create(kgem->fd, alloc); @@ -4079,13 +4091,14 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, DBG(("%s: created handle=%d for buffer\n", __FUNCTION__, bo->base.handle)); + bo->base.reusable = false; + bo->base.vmap = true; + bo->mem = kgem_bo_map__cpu(kgem, &bo->base); if (bo->mem) { bo->mmapped = true; bo->need_io = false; bo->base.io = true; - bo->base.reusable = false; - bo->base.vmap = true; goto init; } else { bo->base.refcnt = 0; /* for valgrind */ @@ -4094,7 +4107,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, } } - if (flags & KGEM_BUFFER_WRITE && kgem->has_vmap) { + if (kgem->has_vmap && use_snoopable_buffer(kgem, flags)) { bo = partial_bo_alloc(alloc); if (bo) { uint32_t handle = gem_vmap(kgem->fd, bo->mem, diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 8e9b006a..63be2185 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -500,6 +500,12 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo) return kgem_bo_size(bo) <= kgem->aperture_mappable / 4; } +static inline bool kgem_bo_is_vmap(struct kgem_bo *bo) +{ + while (bo->proxy) + bo = bo->proxy; + return bo->vmap; +} static inline bool kgem_bo_is_busy(struct kgem_bo *bo) { |