diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2013-09-22 14:39:57 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2013-09-23 10:48:52 +0100 |
commit | 42330fbae862cda9ca17ec62eb0d2e4fb86032b8 (patch) | |
tree | 4604c304ef5d8ff0886d7d0c598cff22192994b8 | |
parent | dd130d1b06e8828d7a2471761bac36093b9a2391 (diff) |
sna: Track CPU/GTT maps independently
Now that we use CPU mmaps to read/write to tiled X surfaces, we find
ourselves frequently switching between CPU and GTT mmaps and so wish to
cache both.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
-rw-r--r-- | src/sna/gen3_render.c | 2 | ||||
-rw-r--r-- | src/sna/gen4_vertex.c | 2 | ||||
-rw-r--r-- | src/sna/kgem.c | 289 | ||||
-rw-r--r-- | src/sna/kgem.h | 17 | ||||
-rw-r--r-- | src/sna/sna_accel.c | 79 | ||||
-rw-r--r-- | src/sna/sna_io.c | 6 |
6 files changed, 150 insertions, 245 deletions
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c index 63dd5cc1..cb8f046d 100644 --- a/src/sna/gen3_render.c +++ b/src/sna/gen3_render.c @@ -2305,7 +2305,7 @@ static void gen3_vertex_close(struct sna *sna) sna->render.vertices = sna->render.vertex_data; sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); free_bo = bo; - } else if (IS_CPU_MAP(bo->map)) { + } else if (sna->render.vertices == MAP(bo->map__cpu)) { DBG(("%s: converting CPU map to GTT\n", __FUNCTION__)); sna->render.vertices = kgem_bo_map__gtt(&sna->kgem, bo); if (sna->render.vertices == NULL) { diff --git a/src/sna/gen4_vertex.c b/src/sna/gen4_vertex.c index 3c4911af..85e74137 100644 --- a/src/sna/gen4_vertex.c +++ b/src/sna/gen4_vertex.c @@ -169,7 +169,7 @@ void gen4_vertex_close(struct sna *sna) sna->render.vertices = sna->render.vertex_data; sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); free_bo = bo; - } else if (IS_CPU_MAP(bo->map) && !sna->kgem.has_llc) { + } else if (!sna->kgem.has_llc && sna->render.vertices == MAP(bo->map__cpu)) { DBG(("%s: converting CPU map to GTT\n", __FUNCTION__)); sna->render.vertices = kgem_bo_map__gtt(&sna->kgem, sna->render.vbo); diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 5863d2a6..ce89658b 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -110,10 +110,8 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); #define MAX_CPU_VMA_CACHE INT16_MAX #define MAP_PRESERVE_TIME 10 -#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) -#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) -#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2) -#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3) +#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) +#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 1) #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring))) @@ -176,7 +174,12 @@ struct kgem_buffer { uint32_t used; uint32_t need_io : 1; uint32_t write : 2; - uint32_t mmapped : 1; + uint32_t mmapped : 2; +}; +enum { + MMAPPED_NONE, + MMAPPED_GTT, + MMAPPED_CPU }; static struct kgem_bo *__kgem_freed_bo; @@ -1628,26 +1631,6 @@ static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) } } -static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) -{ - int type = IS_CPU_MAP(bo->map); - - assert(!IS_USER_MAP(bo->map)); - - DBG(("%s: releasing %s vma for handle=%d, count=%d\n", - __FUNCTION__, type ? "CPU" : "GTT", - bo->handle, kgem->vma[type].count)); - - VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); - munmap(MAP(bo->map), bytes(bo)); - bo->map = NULL; - - if (!list_is_empty(&bo->vma)) { - list_del(&bo->vma); - kgem->vma[type].count--; - } -} - static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) { DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); @@ -1663,21 +1646,31 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) kgem_bo_binding_free(kgem, bo); - if (IS_USER_MAP(bo->map)) { + if (IS_USER_MAP(bo->map__cpu)) { assert(bo->rq == NULL); assert(!__kgem_busy(kgem, bo->handle)); - assert(MAP(bo->map) != bo || bo->io || bo->flush); + assert(MAP(bo->map__cpu) != bo || bo->io || bo->flush); if (!(bo->io || bo->flush)) { DBG(("%s: freeing snooped base\n", __FUNCTION__)); - assert(bo != MAP(bo->map)); - free(MAP(bo->map)); + assert(bo != MAP(bo->map__cpu)); + free(MAP(bo->map__cpu)); } - bo->map = NULL; + bo->map__cpu = NULL; + } + + DBG(("%s: releasing %p:%p vma for handle=%d, count=%d\n", + __FUNCTION__, bo->map__gtt, bo->map__cpu, + bo->handle, list_is_empty(&bo->vma) ? 0 : kgem->vma[bo->map__gtt == NULL].count)); + + if (!list_is_empty(&bo->vma)) { + _list_del(&bo->vma); + kgem->vma[bo->map__gtt == NULL].count--; } - if (bo->map) - kgem_bo_release_map(kgem, bo); - assert(list_is_empty(&bo->vma)); - assert(bo->map == NULL); + + if (bo->map__gtt) + munmap(MAP(bo->map__gtt), bytes(bo)); + if (bo->map__cpu) + munmap(MAP(bo->map__cpu), bytes(bo)); _list_del(&bo->list); _list_del(&bo->request); @@ -1719,18 +1712,21 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem, assert(bo->flush == false); list_move(&bo->list, &kgem->inactive[bucket(bo)]); - if (bo->map) { - int type = IS_CPU_MAP(bo->map); + if (bo->map__gtt) { if (bucket(bo) >= NUM_CACHE_BUCKETS || - (!type && !__kgem_bo_is_mappable(kgem, bo))) { - munmap(MAP(bo->map), bytes(bo)); - bo->map = NULL; + !__kgem_bo_is_mappable(kgem, bo)) { + munmap(MAP(bo->map__gtt), bytes(bo)); + bo->map__gtt = NULL; } - if (bo->map) { - list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); - kgem->vma[type].count++; + if (bo->map__gtt) { + list_add(&bo->vma, &kgem->vma[0].inactive[bucket(bo)]); + kgem->vma[0].count++; } } + if (bo->map__cpu && !bo->map__gtt) { + list_add(&bo->vma, &kgem->vma[1].inactive[bucket(bo)]); + kgem->vma[1].count++; + } } static struct kgem_bo *kgem_bo_replace_io(struct kgem_bo *bo) @@ -1767,10 +1763,10 @@ inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, list_del(&bo->list); assert(bo->rq == NULL); assert(bo->exec == NULL); - if (bo->map) { + if (bo->map__gtt || bo->map__cpu) { assert(!list_is_empty(&bo->vma)); list_del(&bo->vma); - kgem->vma[IS_CPU_MAP(bo->map)].count--; + kgem->vma[bo->map__gtt == NULL].count--; } } @@ -1971,7 +1967,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) kgem_bo_move_to_snoop(kgem, bo); return; } - if (!IS_USER_MAP(bo->map)) + if (!IS_USER_MAP(bo->map__cpu)) bo->flush = false; if (bo->scanout) { @@ -1987,9 +1983,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) goto destroy; } - if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) - kgem_bo_release_map(kgem, bo); - assert(list_is_empty(&bo->vma)); assert(list_is_empty(&bo->list)); assert(bo->flush == false); @@ -2018,7 +2011,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) assert(bo->exec == NULL); assert(list_is_empty(&bo->request)); - if (!IS_CPU_MAP(bo->map)) { + if (bo->map__cpu == NULL) { if (!kgem_bo_set_purgeable(kgem, bo)) goto destroy; @@ -2053,9 +2046,9 @@ static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo) assert(cached->proxy == &bo->base); list_del(&cached->vma); - assert(*(struct kgem_bo **)cached->map == cached); - *(struct kgem_bo **)cached->map = NULL; - cached->map = NULL; + assert(*(struct kgem_bo **)cached->map__gtt == cached); + *(struct kgem_bo **)cached->map__gtt = NULL; + cached->map__gtt = NULL; kgem_bo_destroy(kgem, cached); } @@ -2346,7 +2339,8 @@ static void kgem_commit(struct kgem *kgem) kgem_retire(kgem); assert(list_is_empty(&rq->buffers)); - assert(rq->bo->map == NULL); + assert(rq->bo->map__gtt == NULL); + assert(rq->bo->map__cpu == NULL); gem_close(kgem->fd, rq->bo->handle); kgem_cleanup_cache(kgem); } else { @@ -2378,7 +2372,7 @@ static void kgem_finish_buffers(struct kgem *kgem) list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) { DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%s\n", __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL, - bo->write, bo->mmapped ? IS_CPU_MAP(bo->base.map) ? "cpu" : "gtt" : "no")); + bo->write, bo->mmapped == MMAPPED_CPU ? "cpu" : bo->mmapped == MMAPPED_GTT ? "gtt" : "no")); assert(next->base.list.prev == &bo->base.list); assert(bo->base.io); @@ -2403,7 +2397,7 @@ static void kgem_finish_buffers(struct kgem *kgem) used = ALIGN(bo->used, PAGE_SIZE); if (!DBG_NO_UPLOAD_ACTIVE && used + PAGE_SIZE <= bytes(&bo->base) && - (kgem->has_llc || !IS_CPU_MAP(bo->base.map) || bo->base.snoop)) { + (kgem->has_llc || bo->mmapped == MMAPPED_GTT || bo->base.snoop)) { DBG(("%s: retaining upload buffer (%d/%d)\n", __FUNCTION__, bo->used, bytes(&bo->base))); bo->used = used; @@ -2412,7 +2406,7 @@ static void kgem_finish_buffers(struct kgem *kgem) continue; } DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n", - __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map))); + __FUNCTION__, bo->used, bo->mmapped)); goto decouple; } @@ -3157,7 +3151,7 @@ bool kgem_expire_cache(struct kgem *kgem) break; } - if (bo->map && bo->delta + MAP_PRESERVE_TIME > expire) { + if (bo->map__cpu && bo->delta + MAP_PRESERVE_TIME > expire) { idle = false; list_move_tail(&bo->list, &preserve); } else { @@ -3349,7 +3343,7 @@ discard: __FUNCTION__, for_cpu ? "cpu" : "gtt")); cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; list_for_each_entry(bo, cache, vma) { - assert(IS_CPU_MAP(bo->map) == for_cpu); + assert(for_cpu ? bo->map__cpu : bo->map__gtt); assert(bucket(bo) == cache_bucket(num_pages)); assert(bo->proxy == NULL); assert(bo->rq == NULL); @@ -3429,10 +3423,10 @@ discard: bo->pitch = 0; } - if (bo->map) { + if (bo->map__gtt || bo->map__cpu) { if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { int for_cpu = !!(flags & CREATE_CPU_MAP); - if (IS_CPU_MAP(bo->map) != for_cpu) { + if (for_cpu ? bo->map__cpu : bo->map__gtt){ if (first != NULL) break; @@ -4067,8 +4061,7 @@ large_inactive: assert(bucket(bo) == bucket); assert(bo->refcnt == 0); assert(!bo->scanout); - assert(bo->map); - assert(IS_CPU_MAP(bo->map) == for_cpu); + assert(for_cpu ? bo->map__cpu : bo->map__gtt); assert(bo->rq == NULL); assert(list_is_empty(&bo->request)); assert(bo->flush == false); @@ -4319,9 +4312,6 @@ search_inactive: if (!gem_set_tiling(kgem->fd, bo->handle, tiling, pitch)) continue; - - if (bo->map) - kgem_bo_release_map(kgem, bo); } if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { @@ -4859,6 +4849,7 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) i = 0; while (kgem->vma[type].count > 0) { struct kgem_bo *bo = NULL; + void **ptr; for (j = 0; bo == NULL && j < ARRAY_SIZE(kgem->vma[type].inactive); @@ -4871,15 +4862,14 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) break; DBG(("%s: discarding inactive %s vma cache for %d\n", - __FUNCTION__, - IS_CPU_MAP(bo->map) ? "CPU" : "GTT", bo->handle)); - assert(IS_CPU_MAP(bo->map) == type); - assert(bo->map); + __FUNCTION__, type ? "CPU" : "GTT", bo->handle)); + + ptr = type ? &bo->map__cpu : &bo->map__gtt; assert(bo->rq == NULL); - VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo))); - munmap(MAP(bo->map), bytes(bo)); - bo->map = NULL; + VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(*ptr), bytes(bo))); + munmap(MAP(*ptr), bytes(bo)); + *ptr = NULL; list_del(&bo->vma); kgem->vma[type].count--; @@ -4895,12 +4885,11 @@ void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo) { void *ptr; - DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, - bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); + DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__, + bo->handle, bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain)); assert(bo->proxy == NULL); assert(list_is_empty(&bo->list)); - assert(!IS_USER_MAP(bo->map)); assert_tiling(kgem, bo); if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) { @@ -4909,10 +4898,7 @@ void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo) return kgem_bo_map__cpu(kgem, bo); } - if (IS_CPU_MAP(bo->map)) - kgem_bo_release_map(kgem, bo); - - ptr = bo->map; + ptr = MAP(bo->map__gtt); if (ptr == NULL) { assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); @@ -4927,7 +4913,7 @@ void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo) * issue with compositing managers which need to frequently * flush CPU damage to their GPU bo. */ - bo->map = ptr; + bo->map__gtt = ptr; DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); } @@ -4938,12 +4924,11 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) { void *ptr; - DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, - bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); + DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__, + bo->handle, bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain)); assert(bo->proxy == NULL); assert(list_is_empty(&bo->list)); - assert(!IS_USER_MAP(bo->map)); assert(bo->exec == NULL); assert_tiling(kgem, bo); @@ -4957,10 +4942,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) return ptr; } - if (IS_CPU_MAP(bo->map)) - kgem_bo_release_map(kgem, bo); - - ptr = bo->map; + ptr = MAP(bo->map__gtt); if (ptr == NULL) { assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2); assert(kgem->gen != 021 || bo->tiling != I915_TILING_Y); @@ -4976,7 +4958,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) * issue with compositing managers which need to frequently * flush CPU damage to their GPU bo. */ - bo->map = ptr; + bo->map__gtt = ptr; DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); } @@ -5006,18 +4988,14 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) { void *ptr; - DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, - bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); + DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p:%p, domain=%d\n", __FUNCTION__, + bo->handle, bo->presumed_offset, bo->tiling, bo->map__gtt, bo->map__cpu, bo->domain)); assert(bo->exec == NULL); assert(list_is_empty(&bo->list)); - assert(!IS_USER_MAP(bo->map)); assert_tiling(kgem, bo); - if (IS_CPU_MAP(bo->map)) - kgem_bo_release_map(kgem, bo); - - ptr = bo->map; + ptr = MAP(bo->map__gtt); if (ptr == NULL) { assert(bytes(bo) <= kgem->aperture_mappable / 4); @@ -5032,7 +5010,7 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) * issue with compositing managers which need to frequently * flush CPU damage to their GPU bo. */ - bo->map = ptr; + bo->map__gtt = ptr; DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); } @@ -5041,28 +5019,21 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo) void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo) { - if (bo->map) - return MAP(bo->map); - - kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); - return bo->map = __kgem_bo_map__gtt(kgem, bo); + return kgem_bo_map__async(kgem, bo); } void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) { struct drm_i915_gem_mmap mmap_arg; - DBG(("%s(handle=%d, size=%d, mapped? %d)\n", - __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); + DBG(("%s(handle=%d, size=%d, map=%p:%p)\n", + __FUNCTION__, bo->handle, bytes(bo), bo->map__gtt, bo->map__cpu)); assert(!bo->purged); assert(list_is_empty(&bo->list)); assert(bo->proxy == NULL); - if (IS_CPU_MAP(bo->map)) - return MAP(bo->map); - - if (bo->map) - kgem_bo_release_map(kgem, bo); + if (bo->map__cpu) + return MAP(bo->map__cpu); kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); @@ -5092,71 +5063,7 @@ retry: VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); - bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); - return (void *)(uintptr_t)mmap_arg.addr_ptr; -} - -void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) -{ - struct drm_i915_gem_mmap mmap_arg; - - DBG(("%s(handle=%d, size=%d, mapped? %d)\n", - __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map))); - assert(bo->refcnt); - assert(!bo->purged); - assert(list_is_empty(&bo->list)); - assert(bo->proxy == NULL); - - if (IS_CPU_MAP(bo->map)) - return MAP(bo->map); - -retry: - VG_CLEAR(mmap_arg); - mmap_arg.handle = bo->handle; - mmap_arg.offset = 0; - mmap_arg.size = bytes(bo); - if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { - int err = errno; - - assert(err != EINVAL); - - if (__kgem_throttle_retire(kgem, 0)) - goto retry; - - if (kgem->need_expire) { - kgem_cleanup_cache(kgem); - goto retry; - } - - ErrorF("%s: failed to mmap handle=%d, %d bytes, into CPU domain: %d\n", - __FUNCTION__, bo->handle, bytes(bo), err); - return NULL; - } - - VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo))); - if (bo->map && bo->domain == DOMAIN_CPU) { - DBG(("%s: discarding GTT vma for %d\n", __FUNCTION__, bo->handle)); - kgem_bo_release_map(kgem, bo); - } - if (bo->map == NULL) { - DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); - bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr); - } - return (void *)(uintptr_t)mmap_arg.addr_ptr; -} - -void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr) -{ - DBG(("%s(handle=%d, size=%d)\n", - __FUNCTION__, bo->handle, bytes(bo))); - assert(bo->refcnt); - - if (IS_CPU_MAP(bo->map)) { - assert(ptr == MAP(bo->map)); - return; - } - - munmap(ptr, bytes(bo)); + return bo->map__cpu = (void *)(mmap_arg.addr_ptr); } uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo) @@ -5237,7 +5144,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem, bo = proxy; } - bo->map = MAKE_USER_MAP(ptr); + bo->map__cpu = MAKE_USER_MAP(ptr); DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d (proxy? %d)\n", __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle, bo->proxy != NULL)); @@ -5404,7 +5311,7 @@ buffer_alloc(void) bo->mem = NULL; bo->need_io = false; - bo->mmapped = true; + bo->mmapped = MMAPPED_CPU; return bo; } @@ -5479,7 +5386,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc) assert(bo->base.snoop); assert(bo->base.tiling == I915_TILING_NONE); assert(num_pages(&bo->base) >= alloc); - assert(bo->mmapped == true); + assert(bo->mmapped == MMAPPED_CPU); assert(bo->need_io == false); bo->mem = kgem_bo_map__cpu(kgem, &bo->base); @@ -5526,7 +5433,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc) } assert(bo->base.refcnt == 1); - assert(bo->mmapped == true); + assert(bo->mmapped == MMAPPED_CPU); assert(bo->need_io == false); bo->mem = kgem_bo_map__cpu(kgem, &bo->base); @@ -5562,7 +5469,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc) } assert(bo->base.refcnt == 1); - assert(bo->mmapped == true); + assert(bo->mmapped == MMAPPED_CPU); assert(bo->need_io == false); if (!gem_set_caching(kgem->fd, bo->base.handle, SNOOPED)) @@ -5604,12 +5511,12 @@ free_caching: DBG(("%s: created snoop handle=%d for buffer\n", __FUNCTION__, bo->base.handle)); - assert(bo->mmapped == true); + assert(bo->mmapped == MMAPPED_CPU); assert(bo->need_io == false); bo->base.refcnt = 1; bo->base.snoop = true; - bo->base.map = MAKE_USER_MAP(bo->mem); + bo->base.map__cpu = MAKE_USER_MAP(bo->mem); return bo; } @@ -5642,7 +5549,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, /* We can reuse any write buffer which we can fit */ if (flags == KGEM_BUFFER_LAST && bo->write == KGEM_BUFFER_WRITE && - bo->base.refcnt == 1 && !bo->mmapped && + bo->base.refcnt == 1 && + bo->mmapped == MMAPPED_NONE && size <= bytes(&bo->base)) { DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n", __FUNCTION__, size, bo->used, bytes(&bo->base))); @@ -5687,7 +5595,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, assert(bo->base.io); assert(bo->base.refcnt >= 1); assert(bo->mmapped); - assert(!IS_CPU_MAP(bo->base.map) || kgem->has_llc || bo->base.snoop); + assert(bo->mmapped == MMAPPED_GTT || kgem->has_llc || bo->base.snoop); if (!kgem->has_llc && (bo->write & ~flags) & KGEM_BUFFER_INPLACE) { DBG(("%s: skip write %x buffer, need %x\n", @@ -5828,8 +5736,10 @@ skip_llc: bo->mem = kgem_bo_map(kgem, &bo->base); if (bo->mem) { - if (IS_CPU_MAP(bo->base.map)) + if (bo->mem == MAP(bo->base.map__cpu)) flags &= ~KGEM_BUFFER_INPLACE; + else + bo->mmapped = MMAPPED_GTT; goto init; } else { bo->base.refcnt = 0; @@ -5948,7 +5858,8 @@ init: assert(!bo->need_io || !bo->base.needs_flush); assert(!bo->need_io || bo->base.domain != DOMAIN_GPU); assert(bo->mem); - assert(!bo->mmapped || bo->base.map != NULL); + assert(bo->mmapped != MMAPPED_GTT || MAP(bo->base.map__gtt) == bo->mem); + assert(bo->mmapped != MMAPPED_CPU || MAP(bo->base.map__cpu) == bo->mem); bo->used = size; bo->write = flags & KGEM_BUFFER_WRITE_INPLACE; @@ -6018,7 +5929,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, bo->size.bytes -= stride; } - bo->map = MAKE_CPU_MAP(*ret); + bo->map__cpu = *ret; bo->pitch = stride; bo->unique_id = kgem_get_unique_id(kgem); return bo; @@ -6063,10 +5974,10 @@ void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr) { DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); - assert(bo->map == NULL || IS_CPU_MAP(bo->map)); + assert(bo->map__gtt == NULL); assert(bo->proxy); list_add(&bo->vma, &bo->proxy->vma); - bo->map = ptr; + bo->map__gtt = ptr; *ptr = kgem_bo_reference(bo); } @@ -6099,13 +6010,13 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo) bo->base.domain, __kgem_busy(kgem, bo->base.handle))); - assert(!IS_CPU_MAP(bo->base.map) || bo->base.snoop || kgem->has_llc); + assert(bo->mmapped == MMAPPED_GTT || bo->base.snoop || kgem->has_llc); VG_CLEAR(set_domain); set_domain.handle = bo->base.handle; set_domain.write_domain = 0; set_domain.read_domains = - IS_CPU_MAP(bo->base.map) ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT; + bo->mmapped == MMAPPED_CPU ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT; if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 14615572..83d9c744 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -55,9 +55,8 @@ struct kgem_bo { struct list request; struct list vma; - void *map; -#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1) -#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0) + void *map__cpu; + void *map__gtt; #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3)) struct kgem_bo_binding { @@ -462,8 +461,6 @@ void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo); void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo); void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write); -void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); -void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr); uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo); bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, @@ -553,14 +550,14 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem, static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo) { - DBG(("%s: map=%p, tiling=%d, domain=%d\n", - __FUNCTION__, bo->map, bo->tiling, bo->domain)); + DBG(("%s: map=%p:%p, tiling=%d, domain=%d\n", + __FUNCTION__, bo->map__gtt, bo->map__cpu, bo->tiling, bo->domain)); assert(bo->refcnt); - if (bo->map == NULL) - return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU; + if (bo->tiling == I915_TILING_NONE && (bo->domain == DOMAIN_CPU || kgem->has_llc)) + return bo->map__cpu != NULL; - return IS_CPU_MAP(bo->map) == !bo->tiling; + return bo->map__gtt != NULL; } static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo) diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index 8a55e12e..fb8a2283 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -350,7 +350,7 @@ static void assert_pixmap_damage(PixmapPtr p) } if (DAMAGE_IS_ALL(priv->gpu_damage)) { - assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))); + assert(priv->cpu == false || (priv->mapped && p->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))); } assert(!DAMAGE_IS_ALL(priv->gpu_damage) || priv->cpu_damage == NULL); @@ -1403,9 +1403,8 @@ static inline bool has_coherent_map(struct sna *sna, unsigned flags) { assert(bo); - assert(bo->map); - if (!IS_CPU_MAP(bo->map)) + if (kgem_bo_mapped(&sna->kgem, bo)) return true; if (bo->tiling == I915_TILING_Y) @@ -1414,7 +1413,7 @@ static inline bool has_coherent_map(struct sna *sna, return kgem_bo_can_map__cpu(&sna->kgem, bo, flags & MOVE_WRITE); } -static inline bool has_coherent_ptr(struct sna_pixmap *priv) +static inline bool has_coherent_ptr(struct sna *sna, struct sna_pixmap *priv) { if (priv == NULL) return true; @@ -1423,13 +1422,16 @@ static inline bool has_coherent_ptr(struct sna_pixmap *priv) if (!priv->cpu_bo) return true; - return priv->pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map); + return priv->pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu); } - if (priv->cpu && !IS_CPU_MAP(priv->gpu_bo->map)) - return false; + if (priv->pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)) + return priv->gpu_bo->tiling == I915_TILING_NONE && (priv->gpu_bo->domain == DOMAIN_CPU || sna->kgem.has_llc); + + if (priv->pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__gtt)) + return true; - return priv->pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map); + return false; } static inline bool pixmap_inplace(struct sna *sna, @@ -1991,10 +1993,9 @@ skip_inplace_map: } priv->cpu = true; - assert(IS_CPU_MAP(priv->gpu_bo->map)); + assert(pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)); kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC || flags & MOVE_WRITE); - assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->gpu_bo->map & ~3)); assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->gpu_bo)); assert_pixmap_damage(pixmap); DBG(("%s: operate inplace (CPU)\n", __FUNCTION__)); @@ -2024,7 +2025,7 @@ skip_inplace_map: if (priv->cpu_bo) { DBG(("%s: syncing CPU bo\n", __FUNCTION__)); kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo); - assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3)); + assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu)); } if (priv->clear_color == 0 || @@ -2069,7 +2070,7 @@ skip_inplace_map: box, n, COPY_LAST); } if (!ok) { - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); sna_read_boxes(sna, pixmap, priv->gpu_bo, box, n); } @@ -2111,10 +2112,9 @@ done: if (priv->cpu_bo) { if ((flags & MOVE_ASYNC_HINT) == 0) { DBG(("%s: syncing CPU bo\n", __FUNCTION__)); - assert(IS_CPU_MAP(priv->cpu_bo->map)); + assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu)); kgem_bo_sync__cpu_full(&sna->kgem, priv->cpu_bo, FORCE_FULL_SYNC || flags & MOVE_WRITE); - assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3)); assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->cpu_bo)); } } @@ -2124,7 +2124,7 @@ done: assert(pixmap->devPrivate.ptr); assert(pixmap->devKind); assert_pixmap_damage(pixmap); - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); return true; } @@ -2210,7 +2210,7 @@ static inline bool region_inplace(struct sna *sna, if (DAMAGE_IS_ALL(priv->gpu_damage)) { DBG(("%s: yes, already wholly damaged on the GPU\n", __FUNCTION__)); assert(priv->gpu_bo); - assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))); + assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))); return true; } @@ -2396,7 +2396,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, kgem_bo_map__cpu(&sna->kgem, priv->gpu_bo); if (pixmap->devPrivate.ptr != NULL) { assert(has_coherent_map(sna, priv->gpu_bo, flags)); - assert(IS_CPU_MAP(priv->gpu_bo->map)); + assert(pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)); pixmap->devKind = priv->gpu_bo->pitch; priv->cpu = true; priv->mapped = true; @@ -2420,7 +2420,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, assert_pixmap_damage(pixmap); kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo, FORCE_FULL_SYNC || flags & MOVE_WRITE); - assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->gpu_bo->map & ~3)); + assert(pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu)); assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->gpu_bo)); assert_pixmap_damage(pixmap); if (dx | dy) @@ -2468,7 +2468,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, if (priv->cpu_bo) { DBG(("%s: syncing CPU bo\n", __FUNCTION__)); kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo); - assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3)); + assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu)); } do { @@ -2516,7 +2516,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, box, n, COPY_LAST); } if (!ok) { - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); sna_read_boxes(sna, pixmap, priv->gpu_bo, box, n); } @@ -2630,7 +2630,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, } if (!ok) { - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); sna_read_boxes(sna, pixmap, priv->gpu_bo, box, n); } @@ -2658,7 +2658,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, box, n, COPY_LAST); } if (!ok) { - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); sna_read_boxes(sna, pixmap, priv->gpu_bo, box, n); } @@ -2684,7 +2684,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, box, n, COPY_LAST); } if (!ok) { - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); sna_read_boxes(sna, pixmap, priv->gpu_bo, box, n); } @@ -2731,10 +2731,9 @@ out: } if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) { DBG(("%s: syncing cpu bo\n", __FUNCTION__)); - assert(IS_CPU_MAP(priv->cpu_bo->map)); + assert(pixmap->devPrivate.ptr == MAP(priv->cpu_bo->map__cpu)); kgem_bo_sync__cpu_full(&sna->kgem, priv->cpu_bo, FORCE_FULL_SYNC || flags & MOVE_WRITE); - assert(pixmap->devPrivate.ptr == (void *)((unsigned long)priv->cpu_bo->map & ~3)); assert((flags & MOVE_WRITE) == 0 || !kgem_bo_is_busy(priv->cpu_bo)); } priv->cpu = @@ -2743,7 +2742,7 @@ out: assert(pixmap->devPrivate.ptr); assert(pixmap->devKind); assert_pixmap_damage(pixmap); - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); return true; } @@ -2924,7 +2923,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl pixmap->drawable.height)) { assert(priv->gpu_bo); assert(priv->gpu_bo->proxy == NULL); - assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))); + assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))); sna_damage_destroy(&priv->cpu_damage); list_del(&priv->flush_list); goto done; @@ -3210,7 +3209,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box, assert(priv->cpu_damage == NULL); assert(priv->gpu_bo); assert(priv->gpu_bo->proxy == NULL); - assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))); + assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))); goto use_gpu_bo; } @@ -3626,7 +3625,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) DBG(("%s: already all-damaged\n", __FUNCTION__)); assert(priv->gpu_bo); assert(priv->gpu_bo->proxy == NULL); - assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))); + assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))); sna_damage_destroy(&priv->cpu_damage); list_del(&priv->flush_list); goto active; @@ -4288,7 +4287,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, assert(box->x2 - x <= w); assert(box->y2 - y <= h); - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(to_sna_from_pixmap(pixmap), sna_pixmap(pixmap))); memcpy_blt(bits, pixmap->devPrivate.ptr, pixmap->drawable.bitsPerPixel, stride, pixmap->devKind, @@ -4710,7 +4709,7 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv, if (DAMAGE_IS_ALL(priv->gpu_damage)) { assert(priv->gpu_bo); - assert(priv->cpu == false || (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))); + assert(priv->cpu == false || (priv->mapped && pixmap->devPrivate.ptr == MAP(priv->gpu_bo->map__cpu))); return true; } @@ -5520,8 +5519,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, assert(box[i].x2 + dx <= tmp->drawable.width); assert(box[i].y2 + dy <= tmp->drawable.height); - assert(has_coherent_ptr(sna_pixmap(src_pixmap))); - assert(has_coherent_ptr(sna_pixmap(tmp))); + assert(has_coherent_ptr(sna, sna_pixmap(src_pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(tmp))); memcpy_blt(src_pixmap->devPrivate.ptr, tmp->devPrivate.ptr, src_pixmap->drawable.bitsPerPixel, @@ -5698,8 +5697,8 @@ fallback: assert(box->y1 + src_dy >= 0); assert(box->x2 + src_dx <= src_pixmap->drawable.width); assert(box->y2 + src_dy <= src_pixmap->drawable.height); - assert(has_coherent_ptr(sna_pixmap(src_pixmap))); - assert(has_coherent_ptr(sna_pixmap(dst_pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(src_pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(dst_pixmap))); memcpy_blt(src_bits, dst_bits, bpp, src_stride, dst_stride, box->x1, box->y1, @@ -10752,7 +10751,7 @@ sna_pixmap_get_source_bo(PixmapPtr pixmap) if (upload == NULL) return NULL; - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(sna, sna_pixmap(pixmap))); memcpy_blt(pixmap->devPrivate.ptr, ptr, pixmap->drawable.bitsPerPixel, pixmap->devKind, upload->pitch, @@ -11121,7 +11120,7 @@ sna_poly_fill_rect_tiled_nxm_blt(DrawablePtr drawable, assert(tile->drawable.height && tile->drawable.height <= 8); assert(tile->drawable.width && tile->drawable.width <= 8); - assert(has_coherent_ptr(sna_pixmap(tile))); + assert(has_coherent_ptr(sna, sna_pixmap(tile))); cpp = tile->drawable.bitsPerPixel/8; for (h = 0; h < tile->drawable.height; h++) { @@ -14851,7 +14850,7 @@ sna_get_image(DrawablePtr drawable, __FUNCTION__, region.extents.x1, region.extents.y1, region.extents.x2, region.extents.y2)); - assert(has_coherent_ptr(sna_pixmap(pixmap))); + assert(has_coherent_ptr(to_sna_from_pixmap(pixmap), sna_pixmap(pixmap))); memcpy_blt(pixmap->devPrivate.ptr, dst, drawable->bitsPerPixel, pixmap->devKind, PixmapBytePad(w, drawable->depth), region.extents.x1, region.extents.y1, 0, 0, w, h); @@ -15256,8 +15255,8 @@ fallback: assert(box->x2 <= src->drawable.width); assert(box->y2 <= src->drawable.height); - assert(has_coherent_ptr(sna_pixmap(src))); - assert(has_coherent_ptr(sna_pixmap(dst))); + assert(has_coherent_ptr(sna, sna_pixmap(src))); + assert(has_coherent_ptr(sna, sna_pixmap(dst))); memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr, src->drawable.bitsPerPixel, diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c index 2bd6b82d..2a318206 100644 --- a/src/sna/sna_io.c +++ b/src/sna/sna_io.c @@ -108,7 +108,7 @@ read_boxes_inplace__cpu(struct kgem *kgem, assert(kgem_bo_can_map__cpu(kgem, bo, false)); assert(bo->tiling != I915_TILING_Y); - src = __kgem_bo_map__cpu(kgem, bo); + src = kgem_bo_map__cpu(kgem, bo); if (src == NULL) return false; @@ -131,7 +131,6 @@ read_boxes_inplace__cpu(struct kgem *kgem, box++; } while (--n); } - __kgem_bo_unmap__cpu(kgem, bo, src); return true; } @@ -574,7 +573,7 @@ write_boxes_inplace__tiled(struct kgem *kgem, assert(bo->tiling == I915_TILING_X); - dst = __kgem_bo_map__cpu(kgem, bo); + dst = kgem_bo_map__cpu(kgem, bo); if (dst == NULL) return false; @@ -586,7 +585,6 @@ write_boxes_inplace__tiled(struct kgem *kgem, box->x2 - box->x1, box->y2 - box->y1); box++; } while (--n); - __kgem_bo_unmap__cpu(kgem, bo, dst); return true; } |