diff options
Diffstat (limited to 'src/sna')
-rw-r--r-- | src/sna/kgem.c | 246 | ||||
-rw-r--r-- | src/sna/kgem.h | 10 | ||||
-rw-r--r-- | src/sna/sna.h | 8 | ||||
-rw-r--r-- | src/sna/sna_accel.c | 265 | ||||
-rw-r--r-- | src/sna/sna_blt.c | 52 |
5 files changed, 375 insertions, 206 deletions
diff --git a/src/sna/kgem.c b/src/sna/kgem.c index 3ba2ec35..4fcdf370 100644 --- a/src/sna/kgem.c +++ b/src/sna/kgem.c @@ -76,8 +76,23 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags); #define MAX_CPU_VMA_CACHE INT16_MAX #define MAP_PRESERVE_TIME 10 -#define CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) & ~1)) +#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3)) #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) +#define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3)) +#define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2) + +#if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP) +#define DRM_I915_GEM_VMAP 0x2c +#define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap) +#define I915_PARAM_HAS_VMAP 18 +struct drm_i915_gem_vmap { + uint64_t user_ptr; + uint32_t user_size; + uint32_t flags; +#define I915_VMAP_READ_ONLY 0x1 + uint32_t handle; +}; +#endif struct kgem_partial_bo { struct kgem_bo base; @@ -561,6 +576,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen) list_init(&kgem->partial); list_init(&kgem->requests); list_init(&kgem->flushing); + list_init(&kgem->sync_list); list_init(&kgem->large); for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) list_init(&kgem->inactive[i]); @@ -577,7 +593,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen) kgem->next_request = __kgem_request_alloc(); -#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP) +#if defined(USE_VMAP) if (!DBG_NO_VMAP) kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0; #endif @@ -605,9 +621,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen) } kgem->has_llc = has_llc; } - kgem->has_cpu_bo = kgem->has_llc; - DBG(("%s: cpu bo enabled %d: llc? %d\n", __FUNCTION__, - kgem->has_cpu_bo, kgem->has_llc)); + DBG(("%s: cpu bo enabled %d: llc? %d, vmap? %d\n", __FUNCTION__, + kgem->has_llc | kgem->has_vmap, kgem->has_llc, kgem->has_vmap)); kgem->has_semaphores = false; if (gen >= 60 && semaphores_enabled()) @@ -688,10 +703,13 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen) kgem->max_upload_tile_size = half_gpu_max; kgem->large_object_size = MAX_CACHE_SIZE; - if (kgem->large_object_size > kgem->max_cpu_size) - kgem->large_object_size = kgem->max_cpu_size; if (kgem->large_object_size > kgem->max_gpu_size) kgem->large_object_size = kgem->max_gpu_size; + if (kgem->has_llc | kgem->has_vmap) { + if (kgem->large_object_size > kgem->max_cpu_size) + kgem->large_object_size = kgem->max_cpu_size; + } else + kgem->max_cpu_size = 0; DBG(("%s: large object thresold=%d\n", __FUNCTION__, kgem->large_object_size)); @@ -887,8 +905,10 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) /* XXX is it worth working around gcc here? */ kgem->flush |= bo->flush; - kgem->sync |= bo->sync; kgem->scanout |= bo->scanout; + + if (bo->sync) + kgem->sync = kgem->next_request; } static uint32_t kgem_end_batch(struct kgem *kgem) @@ -932,12 +952,14 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) { int type = IS_CPU_MAP(bo->map); + assert(!IS_VMAP_MAP(bo->map)); + DBG(("%s: releasing %s vma for handle=%d, count=%d\n", __FUNCTION__, type ? "CPU" : "GTT", bo->handle, kgem->vma[type].count)); - VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0)); - munmap(CPU_MAP(bo->map), bytes(bo)); + VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0)); + munmap(MAP(bo->map), bytes(bo)); bo->map = NULL; if (!list_is_empty(&bo->vma)) { @@ -951,9 +973,15 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); assert(bo->refcnt == 0); assert(bo->exec == NULL); + assert(!bo->vmap || bo->rq == NULL); kgem_bo_binding_free(kgem, bo); + if (IS_VMAP_MAP(bo->map)) { + assert(bo->rq == NULL); + free(MAP(bo->map)); + bo->map = NULL; + } if (bo->map) kgem_bo_release_map(kgem, bo); assert(list_is_empty(&bo->vma)); @@ -978,6 +1006,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem, assert(!bo->needs_flush); assert(bo->rq == NULL); assert(bo->domain != DOMAIN_GPU); + assert(bo->reusable); if (bucket(bo) >= NUM_CACHE_BUCKETS) { kgem_bo_free(kgem, bo); @@ -990,7 +1019,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem, if (bucket(bo) >= NUM_CACHE_BUCKETS || (!type && !kgem_bo_is_mappable(kgem, bo))) { list_del(&bo->vma); - munmap(CPU_MAP(bo->map), bytes(bo)); + munmap(MAP(bo->map), bytes(bo)); bo->map = NULL; } if (bo->map) { @@ -1035,6 +1064,19 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) if (NO_CACHE) goto destroy; + if (bo->vmap) { + if (bo->rq == NULL) { + if (bo->needs_flush && kgem_busy(kgem, bo->handle)) { + list_add(&bo->request, &kgem->flushing); + bo->rq = &_kgem_static_request; + } else + kgem_bo_free(kgem, bo); + } else { + assert(!bo->sync); + } + return; + } + if (bo->io) { struct kgem_bo *base; @@ -1065,7 +1107,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) assert(list_is_empty(&bo->vma)); assert(list_is_empty(&bo->list)); - assert(bo->vmap == false && bo->sync == false); + assert(bo->vmap == false); assert(bo->io == false); assert(bo->scanout == false); assert(bo->flush == false); @@ -1197,7 +1239,7 @@ bool kgem_retire(struct kgem *kgem) DBG(("%s: moving %d from flush to inactive\n", __FUNCTION__, bo->handle)); - if (kgem_bo_set_purgeable(kgem, bo)) { + if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) { bo->needs_flush = false; bo->domain = DOMAIN_NONE; bo->rq = NULL; @@ -1278,6 +1320,9 @@ bool kgem_retire(struct kgem *kgem) kgem_bo_free(kgem, rq->bo); } + if (kgem->sync == rq) + kgem->sync = NULL; + _list_del(&rq->list); free(rq); } @@ -1308,7 +1353,7 @@ static void kgem_commit(struct kgem *kgem) bo->presumed_offset = bo->exec->offset; bo->exec = NULL; - if (!bo->refcnt && !bo->reusable) { + if (!bo->refcnt && !bo->reusable && !bo->vmap) { kgem_bo_free(kgem, bo); continue; } @@ -1708,8 +1753,10 @@ void _kgem_submit(struct kgem *kgem) #if !NDEBUG if (ret < 0) { int i; - ErrorF("batch (end=%d, size=%d) submit failed: %d\n", - batch_end, size, errno); + + ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n", + kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, + kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno); i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666); if (i != -1) { @@ -1746,8 +1793,7 @@ void _kgem_submit(struct kgem *kgem) kgem->reloc[i].write_domain, (int)kgem->reloc[i].presumed_offset); } - FatalError("SNA: failed to submit batchbuffer: ret=%d\n", - errno); + FatalError("SNA: failed to submit batchbuffer\n"); } #endif @@ -2594,6 +2640,7 @@ search_inactive: cache = &kgem->inactive[bucket]; list_for_each_entry_safe(bo, next, cache, list) { assert(bucket(bo) == bucket); + assert(bo->reusable); if (size > num_pages(bo)) { DBG(("inactive too small: %d < %d\n", @@ -2673,6 +2720,59 @@ create: return bo; } +struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, + int width, + int height, + int bpp, + uint32_t flags) +{ + struct kgem_bo *bo; + + DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp)); + + if (kgem->has_llc) { + bo = kgem_create_2d(kgem, width, height, bpp, + I915_TILING_NONE, flags); + if (bo == NULL) + return bo; + + if (kgem_bo_map__cpu(kgem, bo) == NULL) { + _kgem_bo_destroy(kgem, bo); + return NULL; + } + + return bo; + } + + if (kgem->has_vmap) { + int stride, size; + void *ptr; + + stride = ALIGN(width, 2) * bpp >> 3; + stride = ALIGN(stride, 4); + size = ALIGN(height, 2) * stride; + + assert(size >= PAGE_SIZE); + + /* XXX */ + //if (posix_memalign(&ptr, 64, ALIGN(size, 64))) + if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE))) + return NULL; + + bo = kgem_create_map(kgem, ptr, size, false); + if (bo == NULL) { + free(ptr); + return NULL; + } + + bo->map = MAKE_VMAP_MAP(ptr); + bo->pitch = stride; + return bo; + } + + return NULL; +} + static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo) { struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy; @@ -2702,9 +2802,6 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) return; } - if (bo->vmap) - kgem_bo_sync__cpu(kgem, bo); - __kgem_bo_destroy(kgem, bo); } @@ -2915,8 +3012,8 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket) assert(bo->map); assert(bo->rq == NULL); - VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0)); - munmap(CPU_MAP(bo->map), bytes(bo)); + VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0)); + munmap(MAP(bo->map), bytes(bo)); bo->map = NULL; list_del(&bo->vma); kgem->vma[type].count--; @@ -2996,7 +3093,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo) { if (bo->map) - return CPU_MAP(bo->map); + return MAP(bo->map); kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo), @@ -3012,7 +3109,7 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) assert(list_is_empty(&bo->list)); if (IS_CPU_MAP(bo->map)) - return CPU_MAP(bo->map); + return MAP(bo->map); if (bo->map) kgem_bo_release_map(kgem, bo); @@ -3067,7 +3164,7 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo) return flink.name; } -#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP) +#if defined(USE_VMAP) static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only) { struct drm_i915_gem_vmap vmap; @@ -3095,14 +3192,11 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem, if (!kgem->has_vmap) return NULL; - if (size >= MAX_CACHE_SIZE) - return NULL; - handle = gem_vmap(kgem->fd, ptr, size, read_only); if (handle == 0) return NULL; - bo = __kgem_bo_alloc(handle, size); + bo = __kgem_bo_alloc(handle, NUM_PAGES(size)); if (bo == NULL) { gem_close(kgem->fd, handle); return NULL; @@ -3110,10 +3204,9 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem, bo->reusable = false; bo->vmap = true; - bo->sync = true; - DBG(("%s(ptr=%p, size=%d, read_only=%d) => handle=%d\n", - __FUNCTION__, ptr, size, read_only, handle)); + DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n", + __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle)); return bo; } #else @@ -3129,7 +3222,6 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) { kgem_bo_submit(kgem, bo); - /* XXX assumes bo is snoopable */ if (bo->domain != DOMAIN_CPU) { struct drm_i915_gem_set_domain set_domain; @@ -3148,28 +3240,40 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo) } } +void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo) +{ + assert(!bo->reusable); + list_add(&bo->list, &kgem->sync_list); + bo->sync = true; +} + void kgem_sync(struct kgem *kgem) { + struct drm_i915_gem_set_domain set_domain; + struct kgem_request *rq; + struct kgem_bo *bo; + DBG(("%s\n", __FUNCTION__)); - if (!list_is_empty(&kgem->requests)) { - struct drm_i915_gem_set_domain set_domain; - struct kgem_request *rq; + rq = kgem->sync; + if (rq == NULL) + return; - rq = list_first_entry(&kgem->requests, - struct kgem_request, - list); + if (rq == kgem->next_request) + _kgem_submit(kgem); - VG_CLEAR(set_domain); - set_domain.handle = rq->bo->handle; - set_domain.read_domains = I915_GEM_DOMAIN_GTT; - set_domain.write_domain = I915_GEM_DOMAIN_GTT; + VG_CLEAR(set_domain); + set_domain.handle = rq->bo->handle; + set_domain.read_domains = I915_GEM_DOMAIN_GTT; + set_domain.write_domain = I915_GEM_DOMAIN_GTT; - drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); - kgem_retire(kgem); - } + drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); + kgem_retire(kgem); + + list_for_each_entry(bo, &kgem->sync_list, list) + kgem_bo_sync__cpu(kgem, bo); - kgem->sync = false; + assert (kgem->sync == NULL); } void kgem_clear_dirty(struct kgem *kgem) @@ -3262,11 +3366,20 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, goto done; } - if ((bo->write & KGEM_BUFFER_WRITE) != (flags & KGEM_BUFFER_WRITE) || - (bo->write & ~flags) & KGEM_BUFFER_INPLACE) { - DBG(("%s: skip write %x buffer, need %x\n", - __FUNCTION__, bo->write, flags)); - continue; + if (flags & KGEM_BUFFER_WRITE) { + if ((bo->write & KGEM_BUFFER_WRITE) == 0 || + (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) && + !bo->base.vmap)) { + DBG(("%s: skip write %x buffer, need %x\n", + __FUNCTION__, bo->write, flags)); + continue; + } + } else { + if (bo->write & KGEM_BUFFER_WRITE) { + DBG(("%s: skip write %x buffer, need %x\n", + __FUNCTION__, bo->write, flags)); + continue; + } } if (bo->used + size <= bytes(&bo->base)) { @@ -3290,7 +3403,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, if (alloc > MAX_CACHE_SIZE) alloc = PAGE_ALIGN(size); alloc /= PAGE_SIZE; - if (kgem->has_cpu_bo) { + if (kgem->has_llc) { bo = malloc(sizeof(*bo)); if (bo == NULL) return NULL; @@ -3420,6 +3533,29 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem, alloc = PAGE_ALIGN(size) / PAGE_SIZE; flags &= ~KGEM_BUFFER_INPLACE; + if (kgem->has_vmap) { + bo = partial_bo_alloc(alloc); + if (bo) { + if (!__kgem_bo_init(&bo->base, + gem_vmap(kgem->fd, bo->mem, + alloc * PAGE_SIZE, false), + alloc)) { + free(bo); + return NULL; + } + + DBG(("%s: created vmap handle=%d for buffer\n", + __FUNCTION__, bo->base.handle)); + + bo->need_io = false; + bo->base.io = true; + bo->base.vmap = true; + bo->mmapped = true; + + goto init; + } + } + old = NULL; if ((flags & KGEM_BUFFER_WRITE) == 0) old = search_linear_cache(kgem, alloc, 0); @@ -3561,7 +3697,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, assert(width > 0 && height > 0); assert(ret != NULL); stride = ALIGN(width, 2) * bpp >> 3; - stride = ALIGN(stride, kgem->min_alignment); + stride = ALIGN(stride, 4); DBG(("%s: %dx%d, %d bpp, stride=%d\n", __FUNCTION__, width, height, bpp, stride)); diff --git a/src/sna/kgem.h b/src/sna/kgem.h index 30303ce0..58316dc3 100644 --- a/src/sna/kgem.h +++ b/src/sna/kgem.h @@ -127,7 +127,9 @@ struct kgem { struct list inactive[NUM_CACHE_BUCKETS]; struct list partial; struct list requests; + struct list sync_list; struct kgem_request *next_request; + struct kgem_request *sync; struct { struct list inactive[NUM_CACHE_BUCKETS]; @@ -142,7 +144,6 @@ struct kgem { uint16_t max_batch_size; uint32_t flush:1; - uint32_t sync:1; uint32_t need_expire:1; uint32_t need_purge:1; uint32_t need_retire:1; @@ -154,7 +155,6 @@ struct kgem { uint32_t has_relaxed_fencing :1; uint32_t has_semaphores :1; uint32_t has_llc :1; - uint32_t has_cpu_bo :1; uint16_t fence_max; uint16_t half_cpu_cache_pages; @@ -229,6 +229,11 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem, int bpp, int tiling, uint32_t flags); +struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem, + int width, + int height, + int bpp, + uint32_t flags); uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format); void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset); @@ -359,6 +364,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo); void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo); void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo); +void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo); uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo); Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, diff --git a/src/sna/sna.h b/src/sna/sna.h index 00fc80a2..c772d7d3 100644 --- a/src/sna/sna.h +++ b/src/sna/sna.h @@ -115,11 +115,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include "sna_damage.h" #include "sna_render.h" -#ifndef CREATE_PIXMAP_USAGE_SCRATCH_HEADER -#define FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER 1 -#define CREATE_PIXMAP_USAGE_SCRATCH_HEADER (unsigned)-1 -#endif - #define SNA_CURSOR_X 64 #define SNA_CURSOR_Y SNA_CURSOR_X @@ -226,13 +221,14 @@ struct sna { #define SNA_NO_THROTTLE 0x1 #define SNA_NO_DELAYED_FLUSH 0x2 + unsigned flush; + int timer[NUM_TIMERS]; uint16_t timer_active; uint16_t timer_ready; int vblank_interval; - struct list deferred_free; struct list dirty_pixmaps; struct list active_pixmaps; struct list inactive_clock[2]; diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c index 8fa59a66..52e75c76 100644 --- a/src/sna/sna_accel.c +++ b/src/sna/sna_accel.c @@ -44,6 +44,7 @@ #include <fbpict.h> #endif #include <miline.h> +#include <shmint.h> #include <sys/time.h> #include <sys/mman.h> @@ -61,7 +62,8 @@ #define USE_INPLACE 1 #define USE_WIDE_SPANS 1 /* -1 force CPU, 1 force GPU */ #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */ -#define USE_BO_FOR_SCRATCH_PIXMAP 1 +#define USE_SHM_VMAP 0 +#define PREFER_VMAP 0 #define MIGRATE_ALL 0 @@ -302,27 +304,21 @@ sna_pixmap_alloc_cpu(struct sna *sna, DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber)); assert(priv->stride); - if ((sna->kgem.has_cpu_bo || (priv->create & KGEM_CAN_CREATE_GPU) == 0) && - (priv->create & KGEM_CAN_CREATE_CPU)) { + if (priv->create & KGEM_CAN_CREATE_CPU) { DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__, pixmap->drawable.width, pixmap->drawable.height)); - priv->cpu_bo = kgem_create_2d(&sna->kgem, - pixmap->drawable.width, - pixmap->drawable.height, - pixmap->drawable.bitsPerPixel, - I915_TILING_NONE, - from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE); + priv->cpu_bo = kgem_create_cpu_2d(&sna->kgem, + pixmap->drawable.width, + pixmap->drawable.height, + pixmap->drawable.bitsPerPixel, + from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE); DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__, priv->cpu_bo->handle)); if (priv->cpu_bo) { priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo); - if (priv->ptr == NULL) { - kgem_bo_destroy(&sna->kgem, priv->cpu_bo); - priv->cpu_bo = NULL; - } else - priv->stride = priv->cpu_bo->pitch; + priv->stride = priv->cpu_bo->pitch; } } @@ -349,7 +345,8 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv) if (priv->cpu_bo) { DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n", __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo))); - + if (priv->cpu_bo->sync) + kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo); kgem_bo_destroy(&sna->kgem, priv->cpu_bo); priv->cpu_bo = NULL; } else @@ -377,14 +374,6 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv) if (priv->ptr) sna_pixmap_free_cpu(sna, priv); - if (priv->cpu_bo) { - if (priv->cpu_bo->vmap && kgem_bo_is_busy(priv->cpu_bo)) { - list_add_tail(&priv->list, &sna->deferred_free); - return false; - } - kgem_bo_destroy(&sna->kgem, priv->cpu_bo); - } - if (!sna->freed_pixmap && priv->header) { sna->freed_pixmap = pixmap; assert(priv->ptr == NULL); @@ -524,7 +513,6 @@ _sna_pixmap_reset(PixmapPtr pixmap) assert(pixmap->drawable.type == DRAWABLE_PIXMAP); assert(pixmap->drawable.class == 0); - assert(pixmap->drawable.id == 0); assert(pixmap->drawable.x == 0); assert(pixmap->drawable.y == 0); @@ -561,11 +549,6 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap) pixmap->usage_hint)); switch (pixmap->usage_hint) { - case CREATE_PIXMAP_USAGE_SCRATCH_HEADER: -#if !FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER - if (sna->kgem.has_vmap) - break; -#endif case CREATE_PIXMAP_USAGE_GLYPH_PICTURE: DBG(("%s: not attaching due to crazy usage: %d\n", __FUNCTION__, pixmap->usage_hint)); @@ -594,15 +577,6 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap) pixmap->drawable.width, pixmap->drawable.height); - if (pixmap->usage_hint == CREATE_PIXMAP_USAGE_SCRATCH_HEADER) { - priv->cpu_bo = kgem_create_map(&sna->kgem, - pixmap->devPrivate.ptr, - pixmap_size(pixmap), - 0); - if (priv->cpu_bo) - priv->cpu_bo->pitch = pixmap->devKind; - } - return priv; } @@ -630,6 +604,75 @@ create_pixmap(struct sna *sna, ScreenPtr screen, } static PixmapPtr +sna_pixmap_create_shm(ScreenPtr screen, + int width, int height, int depth, + char *addr) +{ + struct sna *sna = to_sna_from_screen(screen); + int bpp = BitsPerPixel(depth); + int pitch = PixmapBytePad(width, depth); + struct sna_pixmap *priv; + PixmapPtr pixmap; + + DBG(("%s(%d, %d, %d)\n", __FUNCTION__, + width, height, depth)); + + if (sna->freed_pixmap) { + pixmap = sna->freed_pixmap; + sna->freed_pixmap = NULL; + + pixmap->usage_hint = -1; + pixmap->refcnt = 1; + + pixmap->drawable.width = width; + pixmap->drawable.height = height; + pixmap->drawable.depth = depth; + pixmap->drawable.bitsPerPixel = bpp; + pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER; + + DBG(("%s: serial=%ld, %dx%d\n", + __FUNCTION__, + pixmap->drawable.serialNumber, + pixmap->drawable.width, + pixmap->drawable.height)); + + priv = _sna_pixmap_reset(pixmap); + } else { + pixmap = create_pixmap(sna, screen, 0, 0, depth, -1); + if (pixmap == NullPixmap) + return NullPixmap; + + pixmap->drawable.width = width; + pixmap->drawable.height = height; + pixmap->drawable.depth = depth; + pixmap->drawable.bitsPerPixel = bpp; + + priv = __sna_pixmap_attach(sna, pixmap); + if (!priv) { + fbDestroyPixmap(pixmap); + return NullPixmap; + } + } + + priv->cpu_bo = kgem_create_map(&sna->kgem, addr, pitch*height, false); + if (priv->cpu_bo == NULL) { + free(priv); + fbDestroyPixmap(pixmap); + return GetScratchPixmapHeader(screen, width, height, depth, + bpp, pitch, addr); + } + kgem_bo_set_sync(&sna->kgem, priv->cpu_bo); + priv->cpu_bo->pitch = pitch; + + priv->header = true; + sna_damage_all(&priv->cpu_damage, width, height); + + pixmap->devKind = pitch; + pixmap->devPrivate.ptr = addr; + return pixmap; +} + +static PixmapPtr sna_pixmap_create_scratch(ScreenPtr screen, int width, int height, int depth, uint32_t tiling) @@ -723,6 +766,11 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen, DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__, width, height, depth, usage)); + if ((width|height) == 0) { + usage = -1; + goto fallback; + } + if (!sna->have_render) goto fallback; @@ -733,11 +781,6 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen, goto fallback; } -#if FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER - if (width == 0 || height == 0) - goto fallback; -#endif - if (usage == CREATE_PIXMAP_USAGE_SCRATCH) { if (flags & KGEM_CAN_CREATE_GPU) return sna_pixmap_create_scratch(screen, @@ -919,14 +962,16 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags) sna_damage_destroy(&priv->cpu_damage); priv->undamaged = false; list_del(&priv->list); - if (priv->cpu_bo) + if (priv->cpu_bo) { + assert(!priv->cpu_bo->sync); sna_pixmap_free_cpu(sna, priv); + } return true; } skip_inplace_map: - if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) { + if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) { if (priv->cpu_bo->exec == NULL) kgem_retire(&sna->kgem); @@ -941,6 +986,7 @@ skip_inplace_map: list_del(&priv->list); priv->undamaged = false; } + assert(!priv->cpu_bo->sync); sna_pixmap_free_cpu(sna, priv); } } @@ -982,7 +1028,7 @@ skip_inplace_map: } if (priv->clear) { - if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) + if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) sna_pixmap_free_cpu(sna, priv); sna_damage_destroy(&priv->gpu_damage); priv->undamaged = true; @@ -1282,7 +1328,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, } } - if (priv->cpu_bo && !priv->cpu_bo->vmap) { + if (priv->cpu_bo && !priv->cpu_bo->sync) { if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL) kgem_retire(&sna->kgem); if (sync_will_stall(priv->cpu_bo)) { @@ -1372,7 +1418,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable, assert(pixmap_contains_damage(pixmap, priv->gpu_damage)); ok = FALSE; - if (priv->cpu_bo && sna->kgem.gen >= 60) + if (priv->cpu_bo && sna->kgem.gen >= 30) ok = sna->render.copy_boxes(sna, GXcopy, pixmap, priv->gpu_bo, 0, 0, pixmap, priv->cpu_bo, 0, 0, @@ -1910,6 +1956,9 @@ use_cpu_bo: if (priv->cpu_bo == NULL) return NULL; + if (priv->cpu_bo->sync && !kgem_bo_is_busy(priv->cpu_bo)) + return NULL; + /* Continue to use the shadow pixmap once mapped */ if (pixmap->devPrivate.ptr) { /* But only if we do not need to sync the CPU bo */ @@ -2084,6 +2133,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags) sna_damage_destroy(&priv->cpu_damage); priv->undamaged = false; list_del(&priv->list); + assert(!priv->cpu_bo->sync); sna_pixmap_free_cpu(to_sna_from_pixmap(pixmap), priv); } @@ -2098,8 +2148,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags) BoxPtr box; int n; - assert(pixmap->usage_hint != CREATE_PIXMAP_USAGE_SCRATCH_HEADER); - DBG(("%s(pixmap=%ld, usage=%d)\n", __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint)); @@ -2209,8 +2257,10 @@ done: pixmap->drawable.height); if (DAMAGE_IS_ALL(priv->gpu_damage)) { priv->undamaged = false; - if (priv->ptr) + if (priv->ptr) { + assert(!priv->cpu_bo->sync); sna_pixmap_free_cpu(sna, priv); + } } active: return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv); @@ -2410,8 +2460,6 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, PixmapPtr pixmap = get_drawable_pixmap(drawable); struct sna *sna = to_sna_from_pixmap(pixmap); struct sna_pixmap *priv = sna_pixmap(pixmap); - struct kgem_bo *src_bo; - Bool ok = FALSE; BoxPtr box; int nbox; int16_t dx, dy; @@ -2423,14 +2471,16 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, __FUNCTION__, nbox, box->x1, box->y1, box->x2, box->y2)); + if (gc->alu != GXcopy) + return FALSE; + if (priv->gpu_bo == NULL && !sna_pixmap_create_mappable_gpu(pixmap)) return FALSE; assert(priv->gpu_bo); - if (gc->alu == GXcopy && - !priv->pinned && nbox == 1 && + if (!priv->pinned && nbox == 1 && box->x1 <= 0 && box->y1 <= 0 && box->x2 >= pixmap->drawable.width && box->y2 >= pixmap->drawable.height) @@ -2440,25 +2490,10 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, x += dx + drawable->x; y += dy + drawable->y; - src_bo = kgem_create_map(&sna->kgem, bits, stride*h, 1); - if (src_bo) { - src_bo->pitch = stride; - ok = sna->render.copy_boxes(sna, gc->alu, - pixmap, src_bo, -x, -y, - pixmap, priv->gpu_bo, 0, 0, - box, nbox); - kgem_bo_destroy(&sna->kgem, src_bo); - } - - if (!ok && gc->alu == GXcopy) - ok = sna_write_boxes(sna, pixmap, - priv->gpu_bo, 0, 0, - bits, - stride, - -x, -y, - box, nbox); - - return ok; + return sna_write_boxes(sna, pixmap, + priv->gpu_bo, 0, 0, + bits, stride, -x, -y, + box, nbox); } static bool upload_inplace(struct sna *sna, @@ -2561,7 +2596,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL) kgem_retire(&sna->kgem); if (sync_will_stall(priv->cpu_bo)) { - if (priv->cpu_bo->vmap) { + if (priv->cpu_bo->sync) { if (sna_put_image_upload_blt(drawable, gc, region, x, y, w, h, bits, stride)) { if (!DAMAGE_IS_ALL(priv->gpu_damage)) { @@ -2603,6 +2638,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region, list_del(&priv->list); priv->undamaged = false; } + assert(!priv->cpu_bo->sync); sna_pixmap_free_cpu(sna, priv); } } @@ -3236,6 +3272,22 @@ static bool copy_use_gpu_bo(struct sna *sna, return kgem_bo_is_busy(priv->cpu_bo); } +static bool +copy_use_cpu_bo(struct sna_pixmap *priv, struct kgem_bo *dst_bo) +{ + if (priv == NULL || priv->cpu_bo == NULL) + return false; + + if (PREFER_VMAP) { + return true; + } else { + if (kgem_bo_is_busy(priv->cpu_bo) || kgem_bo_is_busy(dst_bo)) + return true; + + return !priv->cpu_bo->sync; + } +} + static void sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, BoxPtr box, int n, @@ -3433,7 +3485,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc, RegionTranslate(®ion, -dst_dx, -dst_dy); } } - } else if (src_priv && src_priv->cpu_bo) { + } else if (copy_use_cpu_bo(src_priv, dst_priv->gpu_bo)) { if (!sna->render.copy_boxes(sna, alu, src_pixmap, src_priv->cpu_bo, src_dx, src_dy, dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy, @@ -11445,17 +11497,30 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask) } static void +sna_accel_reply_callback(CallbackListPtr *list, + pointer user_data, pointer call_data) +{ + struct sna *sna = user_data; + ReplyInfoRec *info = call_data; + + if (sna->flush || !info->startOfReply) + return; + + sna->flush = sna->kgem.flush || sna->kgem.sync; +} + +static void sna_accel_flush_callback(CallbackListPtr *list, pointer user_data, pointer call_data) { struct sna *sna = user_data; struct list preserve; - if ((sna->kgem.sync|sna->kgem.flush) == 0 && - list_is_empty(&sna->dirty_pixmaps)) + if (!sna->flush) return; - DBG(("%s\n", __FUNCTION__)); + DBG(("%s: need_sync=%d, need_flush=%d, dirty? %d\n", __FUNCTION__, + sna->kgem.sync!=NULL, sna->kgem.flush, !list_is_empty(&sna->dirty_pixmaps))); /* flush any pending damage from shadow copies to tfp clients */ list_init(&preserve); @@ -11476,35 +11541,9 @@ sna_accel_flush_callback(CallbackListPtr *list, kgem_submit(&sna->kgem); sna->kgem.flush_now = 0; - if (sna->kgem.sync) { - kgem_sync(&sna->kgem); + kgem_sync(&sna->kgem); - while (!list_is_empty(&sna->deferred_free)) { - struct sna_pixmap *priv = - list_first_entry(&sna->deferred_free, - struct sna_pixmap, - list); - list_del(&priv->list); - kgem_bo_destroy(&sna->kgem, priv->cpu_bo); - fbDestroyPixmap(priv->pixmap); - free(priv); - } - } -} - -static void sna_deferred_free(struct sna *sna) -{ - struct sna_pixmap *priv, *next; - - list_for_each_entry_safe(priv, next, &sna->deferred_free, list) { - if (kgem_bo_is_busy(priv->cpu_bo)) - continue; - - list_del(&priv->list); - kgem_bo_destroy(&sna->kgem, priv->cpu_bo); - fbDestroyPixmap(priv->pixmap); - free(priv); - } + sna->flush = false; } static struct sna_pixmap *sna_accel_scanout(struct sna *sna) @@ -11768,6 +11807,7 @@ static void sna_accel_inactive(struct sna *sna) sna_damage_destroy(&priv->cpu_damage); list_del(&priv->list); + assert(!priv->cpu_bo->sync); sna_pixmap_free_cpu(sna, priv); priv->undamaged = false; @@ -11819,10 +11859,14 @@ Bool sna_accel_pre_init(struct sna *sna) return TRUE; } +static ShmFuncs shm_funcs = { sna_pixmap_create_shm, NULL }; + Bool sna_accel_init(ScreenPtr screen, struct sna *sna) { const char *backend; + if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna)) + return FALSE; if (!AddCallback(&FlushCallback, sna_accel_flush_callback, sna)) return FALSE; @@ -11830,7 +11874,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna) screen->RealizeFont = sna_realize_font; screen->UnrealizeFont = sna_unrealize_font; - list_init(&sna->deferred_free); list_init(&sna->dirty_pixmaps); list_init(&sna->active_pixmaps); list_init(&sna->inactive_clock[0]); @@ -11866,6 +11909,9 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna) } #endif + if (USE_SHM_VMAP && sna->kgem.has_vmap) + ShmRegisterFuncs(screen, &shm_funcs); + backend = "no"; sna->have_render = false; sna->default_tiling = I915_TILING_X; @@ -11933,6 +11979,7 @@ void sna_accel_close(struct sna *sna) sna_glyphs_close(sna); DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna); + DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna); kgem_cleanup_cache(&sna->kgem); } @@ -11976,8 +12023,6 @@ void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready) for (id = 0; id < NUM_TIMERS; id++) if (active & (1 << id) && FD_ISSET(sna->timer[id], ready)) sna->timer_ready |= 1 << id; - - sna_deferred_free(sna); } void sna_accel_free(struct sna *sna) diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c index a9ec8998..8c51a77c 100644 --- a/src/sna/sna_blt.c +++ b/src/sna/sna_blt.c @@ -907,7 +907,7 @@ prepare_blt_clear(struct sna *sna, { DBG(("%s\n", __FUNCTION__)); - op->blt = blt_composite_fill; + op->blt = blt_composite_fill; if (op->dst.x|op->dst.y) { op->box = blt_composite_fill_box; op->boxes = blt_composite_fill_boxes; @@ -915,7 +915,7 @@ prepare_blt_clear(struct sna *sna, op->box = blt_composite_fill_box_no_offset; op->boxes = blt_composite_fill_boxes_no_offset; } - op->done = nop_done; + op->done = nop_done; return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo, @@ -930,7 +930,7 @@ prepare_blt_fill(struct sna *sna, { DBG(("%s\n", __FUNCTION__)); - op->blt = blt_composite_fill; + op->blt = blt_composite_fill; if (op->dst.x|op->dst.y) { op->box = blt_composite_fill_box; op->boxes = blt_composite_fill_boxes; @@ -938,7 +938,7 @@ prepare_blt_fill(struct sna *sna, op->box = blt_composite_fill_box_no_offset; op->boxes = blt_composite_fill_boxes_no_offset; } - op->done = nop_done; + op->done = nop_done; return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo, op->dst.pixmap->drawable.bitsPerPixel, @@ -1126,9 +1126,9 @@ prepare_blt_copy(struct sna *sna, DBG(("%s\n", __FUNCTION__)); if (sna->kgem.gen >= 60) - op->done = gen6_blt_copy_done; + op->done = gen6_blt_copy_done; else - op->done = nop_done; + op->done = nop_done; if (alpha_fixup) { op->blt = blt_composite_copy_with_alpha; @@ -1153,14 +1153,6 @@ prepare_blt_copy(struct sna *sna, } } -static void blt_vmap_done(struct sna *sna, const struct sna_composite_op *op) -{ - struct kgem_bo *bo = (struct kgem_bo *)op->u.blt.src_pixmap; - - if (bo) - kgem_bo_destroy(&sna->kgem, bo); -} - fastcall static void blt_put_composite(struct sna *sna, const struct sna_composite_op *op, @@ -1395,26 +1387,18 @@ prepare_blt_put(struct sna *sna, uint32_t alpha_fixup) { PixmapPtr src = op->u.blt.src_pixmap; - struct sna_pixmap *priv = sna_pixmap_attach(src); - struct kgem_bo *src_bo = NULL; - struct kgem_bo *free_bo = NULL; + struct sna_pixmap *priv; + struct kgem_bo *src_bo; DBG(("%s\n", __FUNCTION__)); - if (priv) { + op->done = nop_done; + + src_bo = NULL; + priv = _sna_pixmap_attach(src); + if (priv) src_bo = priv->cpu_bo; - } else { - src_bo = kgem_create_map(&sna->kgem, - src->devPrivate.ptr, - pixmap_size(src), - 0); - free_bo = src_bo; - } if (src_bo) { - op->u.blt.src_pixmap = (void *)free_bo; - op->done = blt_vmap_done; - - src_bo->pitch = src->devKind; if (alpha_fixup) { op->blt = blt_composite_copy_with_alpha; op->box = blt_composite_copy_box_with_alpha; @@ -1435,12 +1419,15 @@ prepare_blt_put(struct sna *sna, GXcopy); } } else { - if (alpha_fixup) - return FALSE; /* XXX */ - if (!sna_pixmap_move_to_cpu(src, MOVE_READ)) return FALSE; + assert(src->devKind); + assert(src->devPrivate.ptr); + + if (alpha_fixup) + return FALSE; /* XXX */ + if (alpha_fixup) { op->u.blt.pixel = alpha_fixup; op->blt = blt_put_composite_with_alpha; @@ -1451,7 +1438,6 @@ prepare_blt_put(struct sna *sna, op->box = blt_put_composite_box; op->boxes = blt_put_composite_boxes; } - op->done = nop_done; } return TRUE; |