summaryrefslogtreecommitdiff
path: root/sys/dev/pci/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/pci/drm/ttm/ttm_bo_util.c')
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_util.c124
1 files changed, 78 insertions, 46 deletions
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c
index 4f268efc7b1..da6c459bd39 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c
@@ -1,4 +1,3 @@
-/* $OpenBSD: ttm_bo_util.c,v 1.18 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -32,11 +31,8 @@
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_placement.h>
#include <dev/pci/drm/drm_vma_manager.h>
-
-int ttm_mem_reg_ioremap(struct ttm_bo_device *, struct ttm_mem_reg *,
- void **);
-void ttm_mem_reg_iounmap(struct ttm_bo_device *, struct ttm_mem_reg *,
- void *);
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/linux_ww_mutex.h>
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
@@ -87,6 +83,7 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
+EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -95,6 +92,7 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
+EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -112,8 +110,9 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
return 0;
}
-static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+
+int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
@@ -135,9 +134,10 @@ retry:
}
return ret;
}
+EXPORT_SYMBOL(ttm_mem_io_reserve);
-static void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -150,6 +150,7 @@ static void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
+EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -182,7 +183,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -206,7 +207,8 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
flags = 0;
if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
- mem->bus.size, BUS_SPACE_MAP_LINEAR | flags, &mem->bus.bsh)) {
+ mem->bus.size, BUS_SPACE_MAP_LINEAR | flags,
+ &mem->bus.bsh)) {
printf("%s bus_space_map failed\n", __func__);
return -ENOMEM;
}
@@ -224,7 +226,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
return 0;
}
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
@@ -263,26 +265,34 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+#ifdef CONFIG_X86
+ dst = kmap_atomic_prot(d, prot);
+#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
else
dst = kmap(d);
+#endif
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
+#ifdef CONFIG_X86
+ kunmap_atomic(dst);
+#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst, PAGE_SIZE);
else
kunmap(d);
+#endif
return 0;
}
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
- vm_prot_t prot)
+ pgprot_t prot)
{
struct vm_page *s = ttm->pages[page];
void *src;
@@ -291,19 +301,27 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+#ifdef CONFIG_X86
+ src = kmap_atomic_prot(s, prot);
+#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
else
src = kmap(s);
+#endif
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
+#ifdef CONFIG_X86
+ kunmap_atomic(src);
+#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src, PAGE_SIZE);
else
kunmap(s);
+#endif
return 0;
}
@@ -339,10 +357,14 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out2;
/*
- * Move nonexistent data. NOP.
+ * Don't move nonexistent data. Clear destination instead.
*/
- if (old_iomap == NULL && ttm == NULL)
+ if (old_iomap == NULL &&
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
+ memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
+ }
/*
* TTM might be null for moves within the same region.
@@ -429,8 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
+ int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
@@ -443,7 +464,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
@@ -451,21 +471,47 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
- spin_lock(&bdev->fence_lock);
- if (bo->sync_obj)
- fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
- else
- fbo->sync_obj = NULL;
- spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
+ fbo->resv = &fbo->ttm_resv;
+ reservation_object_init(fbo->resv);
+ ret = ww_mutex_trylock(&fbo->resv->lock);
+ WARN_ON(!ret);
*new_obj = fbo;
return 0;
}
+#ifdef __linux__
+pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+{
+ /* Cached mappings need no adjustment */
+ if (caching_flags & TTM_PL_FLAG_CACHED)
+ return tmp;
+
+#if defined(__i386__) || defined(__x86_64__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ tmp = pgprot_writecombine(tmp);
+ else if (boot_cpu_data.x86 > 3)
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
+ defined(__powerpc__)
+ if (caching_flags & TTM_PL_FLAG_WC)
+ tmp = pgprot_writecombine(tmp);
+ else
+ tmp = pgprot_noncached(tmp);
+#endif
+#if defined(__sparc__) || defined(__mips__)
+ tmp = pgprot_noncached(tmp);
+#endif
+ return tmp;
+}
+EXPORT_SYMBOL(ttm_io_prot);
+#endif
+
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#ifdef PMAP_WC
@@ -475,15 +521,14 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return PMAP_NOCACHE;
}
-EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem;
int flags;
+ struct ttm_mem_reg *mem = &bo->mem;
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
@@ -539,9 +584,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL :
- ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -566,7 +609,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
- if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
@@ -617,30 +660,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- void *sync_obj,
+ struct fence *fence,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
- void *tmp_obj = NULL;
- spin_lock(&bdev->fence_lock);
- if (bo->sync_obj) {
- tmp_obj = bo->sync_obj;
- bo->sync_obj = NULL;
- }
- bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ reservation_object_add_excl_fence(bo->resv, fence);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
- spin_unlock(&bdev->fence_lock);
- if (tmp_obj)
- driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
@@ -661,14 +694,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
- spin_unlock(&bdev->fence_lock);
- if (tmp_obj)
- driver->sync_obj_unref(&tmp_obj);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
+ reservation_object_add_excl_fence(ghost_obj->resv, fence);
+
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost