summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-10-05 12:33:32 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2012-10-05 12:43:55 +0100
commit3680aa4976407886eb4be9878d5296d5a1fadccf (patch)
tree1071cc2ba0084b20e9cb2f6159d2659141088eef /src
parentd717a67ff4a59d0c2cc108cedbecdc20b4c9fb97 (diff)
sna: Do not create an unsnooped CPU mapping for readbacks
Otherwise we notice that we have a CPU mmap during read synchronized and presume that we need not take any further action. However... Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src')
-rw-r--r--src/sna/kgem.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bc2e66ea..8c9cb6cf 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4755,6 +4755,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
init_buffer_from_bo(bo, old);
bo->need_io = flags & KGEM_BUFFER_WRITE;
} else {
+ unsigned hint;
+
if (use_snoopable_buffer(kgem, flags)) {
bo = create_snoopable_buffer(kgem, alloc);
if (bo)
@@ -4765,10 +4767,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
if (bo == NULL)
return NULL;
- old = search_linear_cache(kgem, alloc,
- CREATE_INACTIVE | CREATE_CPU_MAP);
+ hint = CREATE_INACTIVE;
+ if (flags & KGEM_BUFFER_WRITE)
+ hint |= CREATE_CPU_MAP;
+ old = search_linear_cache(kgem, alloc, hint);
if (old) {
- DBG(("%s: reusing cpu map handle=%d for buffer\n",
+ DBG(("%s: reusing handle=%d for buffer\n",
__FUNCTION__, old->handle));
alloc = num_pages(old);
@@ -4791,9 +4795,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
assert(!bo->need_io);
assert(bo->base.refcnt == 1);
- bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
- if (bo->mem != NULL) {
- if (flags & KGEM_BUFFER_WRITE)
+ if (flags & KGEM_BUFFER_WRITE) {
+ bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+ if (bo->mem != NULL)
kgem_bo_sync__cpu(kgem, &bo->base);
goto init;
}
@@ -4967,6 +4971,8 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
bo->base.domain,
kgem_busy(kgem, bo->base.handle)));
+ assert(!IS_CPU_MAP(bo->base.map) || bo->base.snoop || kgem->has_llc);
+
VG_CLEAR(set_domain);
set_domain.handle = bo->base.handle;
set_domain.write_domain = 0;