summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-04-20 13:21:40 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2012-04-20 13:21:40 +0100
commitaff3614efd5c12e658fa5723934e5bd50a83a316 (patch)
tree0bfc68d93723f8e5e986cdcc109d9fc16deb25b1 /src
parentcb6a3dc2edf3cd612f833bc9a4656166735ee856 (diff)
sna: Always clear the mmapped domains when reusing partial upload buffers
As we need to make sure that we do invalidate the caches appropriately on reuse. Mildly paranoid, but strictly required by the spec. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'src')
-rw-r--r--src/sna/kgem.c41
-rw-r--r--src/sna/kgem.h1
2 files changed, 31 insertions, 11 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 72b6ad77..d97f5590 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3414,6 +3414,29 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
}
}
+void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
+{
+ assert(bo->proxy == NULL);
+ kgem_bo_submit(kgem, bo);
+
+ if (bo->domain != DOMAIN_GTT) {
+ struct drm_i915_gem_set_domain set_domain;
+
+ DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
+ bo->needs_flush, bo->domain, kgem_busy(kgem, bo->handle)));
+
+ VG_CLEAR(set_domain);
+ set_domain.handle = bo->handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+ set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+
+ if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
+ kgem_bo_retire(kgem, bo);
+ bo->domain = DOMAIN_GTT;
+ }
+ }
+}
+
void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
{
assert(!bo->reusable);
@@ -3424,7 +3447,6 @@ void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
void kgem_sync(struct kgem *kgem)
{
- struct drm_i915_gem_set_domain set_domain;
struct kgem_request *rq;
struct kgem_bo *bo;
@@ -3437,14 +3459,7 @@ void kgem_sync(struct kgem *kgem)
if (rq == kgem->next_request)
_kgem_submit(kgem);
- VG_CLEAR(set_domain);
- set_domain.handle = rq->bo->handle;
- set_domain.read_domains = I915_GEM_DOMAIN_GTT;
- set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
- drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
- kgem_retire(kgem);
-
+ kgem_bo_sync__gtt(kgem, rq->bo);
list_for_each_entry(bo, &kgem->sync_list, list)
kgem_bo_sync__cpu(kgem, bo);
@@ -3599,8 +3614,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
bo->used = size;
list_move(&bo->base.list, &kgem->active_partials);
- if (bo->base.vmap)
- kgem_bo_sync__cpu(kgem, &bo->base);
+ if (bo->mmapped) {
+ if (IS_CPU_MAP(bo->base.map))
+ kgem_bo_sync__cpu(kgem, &bo->base);
+ else
+ kgem_bo_sync__gtt(kgem, &bo->base);
+ }
goto done;
} while (kgem_retire(kgem));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 913e1a9d..1235b83f 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -365,6 +365,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);