summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-11-21 16:03:02 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2012-11-21 16:04:24 +0000
commit9c627a05247690891062a2c0c1c8f7bbc0273104 (patch)
treed4f52f49aabb84a711935bd9cc680097eae97e85
parentc0c48c7a5aca4d24936efbeaefc7674ada2ef87f (diff)
sna: Remove the kgem_bo_is_mappable refcnt assertion from freed paths
A few callers of kgem_bo_is_mappable operate on freed bo, and so need to avoid the assert(bo->refcnt). References: https://bugs.freedesktop.org/show_bug.cgi?id=47597 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r--src/sna/kgem.c6
-rw-r--r--src/sna/kgem.h17
2 files changed, 14 insertions, 9 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ac60957e..034e1827 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1374,7 +1374,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
if (bo->map) {
int type = IS_CPU_MAP(bo->map);
if (bucket(bo) >= NUM_CACHE_BUCKETS ||
- (!type && !kgem_bo_is_mappable(kgem, bo))) {
+ (!type && !__kgem_bo_is_mappable(kgem, bo))) {
munmap(MAP(bo->map), bytes(bo));
bo->map = NULL;
}
@@ -4777,7 +4777,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
if (old == NULL) {
old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
- if (old && !kgem_bo_is_mappable(kgem, old)) {
+ if (old && !__kgem_bo_is_mappable(kgem, old)) {
_kgem_bo_destroy(kgem, old);
old = NULL;
}
@@ -4785,7 +4785,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
if (old) {
DBG(("%s: reusing handle=%d for buffer\n",
__FUNCTION__, old->handle));
- assert(kgem_bo_is_mappable(kgem, old));
+ assert(__kgem_bo_is_mappable(kgem, old));
assert(!old->snoop);
assert(old->rq == NULL);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 01efc8ed..c20b4f30 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -483,13 +483,9 @@ static inline bool kgem_bo_can_blt(struct kgem *kgem,
return kgem_bo_blt_pitch_is_ok(kgem, bo);
}
-static inline bool kgem_bo_is_mappable(struct kgem *kgem,
- struct kgem_bo *bo)
+static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
+ struct kgem_bo *bo)
{
- DBG(("%s: domain=%d, offset: %d size: %d\n",
- __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
- assert(bo->refcnt);
-
if (bo->domain == DOMAIN_GTT)
return true;
@@ -503,6 +499,15 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
}
+static inline bool kgem_bo_is_mappable(struct kgem *kgem,
+ struct kgem_bo *bo)
+{
+ DBG(("%s: domain=%d, offset: %d size: %d\n",
+ __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
+ assert(bo->refcnt);
+ return __kgem_bo_is_mappable(kgem, bo);
+}
+
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
{
DBG(("%s: map=%p, tiling=%d, domain=%d\n",