summaryrefslogtreecommitdiff
path: root/sys/dev/pci
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/pci')
-rw-r--r--sys/dev/pci/drm/drmP.h137
-rw-r--r--sys/dev/pci/drm/drm_drv.c559
-rw-r--r--sys/dev/pci/drm/drm_gem.c1355
-rw-r--r--sys/dev/pci/drm/drm_linux.c13
-rw-r--r--sys/dev/pci/drm/drm_modes.c5
-rw-r--r--sys/dev/pci/drm/files.drm4
-rw-r--r--sys/dev/pci/drm/i915/i915_gem.c28
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_vm.c5
8 files changed, 1511 insertions, 595 deletions
diff --git a/sys/dev/pci/drm/drmP.h b/sys/dev/pci/drm/drmP.h
index 4cff9515d94..a23fa2b23c4 100644
--- a/sys/dev/pci/drm/drmP.h
+++ b/sys/dev/pci/drm/drmP.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drmP.h,v 1.201 2016/02/05 10:05:12 kettenis Exp $ */
+/* $OpenBSD: drmP.h,v 1.202 2016/04/05 08:22:50 kettenis Exp $ */
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*/
@@ -70,6 +70,7 @@
#include "drm_linux.h"
#include "drm_linux_list.h"
#include "drm.h"
+#include "drm_vma_manager.h"
#include "drm_mm.h"
#include "drm_atomic.h"
#include "agp.h"
@@ -347,7 +348,9 @@ struct drm_file {
u_int obj_id; /*next gem id*/
struct list_head fbs;
struct rwlock fbs_lock;
- void *driver_priv;
+
+ struct file *filp;
+ void *driver_priv;
};
/* This structure, in the struct drm_device, is always initialized while
@@ -443,25 +446,68 @@ struct drm_mem {
* Subdrivers (radeon, intel, etc) may have other locking requirement, these
* requirements will be detailed in those drivers.
*/
+
+/**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
struct drm_gem_object {
- struct uvm_object uobj;
- SPLAY_ENTRY(drm_gem_object) entry;
- struct drm_device *dev;
- struct uvm_object *uao;
- struct drm_local_map *map;
-
- size_t size;
- int name;
- int handlecount;
-/* any flags over 0x00000010 are device specific */
-#define DRM_BUSY 0x00000001
-#define DRM_WANTED 0x00000002
- u_int do_flags;
- uint32_t read_domains;
- uint32_t write_domain;
-
- uint32_t pending_read_domains;
- uint32_t pending_write_domain;
+ /** Reference count of this object */
+ struct kref refcount;
+
+ /**
+ * handle_count - gem file_priv handle count of this object
+ *
+ * Each handle also holds a reference. Note that when the handle_count
+ * drops to 0 any global names (e.g. the id in the flink namespace) will
+ * be cleared.
+ *
+ * Protected by dev->object_name_lock.
+ * */
+ unsigned handle_count;
+
+ /** Related drm device */
+ struct drm_device *dev;
+
+ /** File representing the shmem storage */
+ struct file *filp;
+
+ /* Mapping info for this object */
+ struct drm_vma_offset_node vma_node;
+
+ /**
+ * Size of the object, in bytes. Immutable over the object's
+ * lifetime.
+ */
+ size_t size;
+
+ /**
+ * Global name for this object, starts at 1. 0 means unnamed.
+ * Access is covered by the object_name_lock in the related drm_device
+ */
+ int name;
+
+ /**
+ * Memory domains. These monitor which caches contain read/write data
+ * related to the object. When transitioning from one set of domains
+ * to another, the driver is called to ensure that caches are suitably
+ * flushed and invalidated
+ */
+ uint32_t read_domains;
+ uint32_t write_domain;
+
+ /**
+ * While validating an exec operation, the
+ * new read/write domain values are computed here.
+ * They will be transferred to the above values
+ * at the point that any cache flushing occurs
+ */
+ uint32_t pending_read_domains;
+ uint32_t pending_write_domain;
+
+ struct uvm_object uobj;
+ SPLAY_ENTRY(drm_gem_object) entry;
+ struct uvm_object *uao;
};
struct drm_handle {
@@ -558,8 +604,10 @@ struct drm_driver_info {
#define DRIVER_IRQ_SHARED 0x80
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
+#define DRIVER_PRIME 0x4000
u_int flags;
+#define driver_features flags
};
#include "drm_crtc.h"
@@ -683,12 +731,12 @@ struct drm_device {
struct drm_agp_head *agp;
void *dev_private;
+ struct address_space *dev_mapping;
struct drm_local_map *agp_buffer_map;
struct drm_mode_config mode_config; /* Current mode config */
/* GEM info */
- struct mutex obj_name_lock;
atomic_t obj_count;
u_int obj_name;
atomic_t obj_memory;
@@ -696,6 +744,12 @@ struct drm_device {
struct pool objpl;
/* mode stuff */
+
+ /** \name GEM information */
+ /*@{ */
+ struct rwlock object_name_lock;
+ struct drm_vma_offset_manager *vma_offset_manager;
+ /*@} */
};
struct drm_attach_args {
@@ -743,6 +797,7 @@ int drm_order(unsigned long);
/* File operations helpers (drm_fops.c) */
struct drm_file *drm_find_file_by_minor(struct drm_device *, int);
+struct drm_device *drm_get_device_from_kdev(dev_t);
/* Memory management support (drm_memory.c) */
void *drm_alloc(size_t);
@@ -842,13 +897,18 @@ drm_sysfs_hotplug_event(struct drm_device *dev)
}
/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_destroy(struct drm_device *dev);
void drm_gem_object_release(struct drm_gem_object *obj);
+void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
-void drm_unref(struct uvm_object *);
-void drm_ref(struct uvm_object *);
-
+int drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -861,30 +921,39 @@ struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
struct drm_gem_object *drm_gem_object_find(struct drm_file *, u32);
-int drm_gem_close_ioctl(struct drm_device *, void *, struct drm_file *);
-int drm_gem_flink_ioctl(struct drm_device *, void *, struct drm_file *);
-int drm_gem_open_ioctl(struct drm_device *, void *, struct drm_file *);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev,struct drm_file *file_private);
static __inline void
drm_gem_object_reference(struct drm_gem_object *obj)
{
- drm_ref(&obj->uobj);
+ kref_get(&obj->refcount);
}
static __inline void
drm_gem_object_unreference(struct drm_gem_object *obj)
{
- drm_unref(&obj->uobj);
+ if (obj != NULL)
+ kref_put(&obj->refcount, drm_gem_object_free);
}
static __inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_unref(&obj->uobj);
- mutex_unlock(&dev->struct_mutex);
+ if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
+ struct drm_device *dev = obj->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
+ drm_gem_object_free(&obj->refcount);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
int drm_gem_dumb_destroy(struct drm_file *file,
diff --git a/sys/dev/pci/drm/drm_drv.c b/sys/dev/pci/drm/drm_drv.c
index c00d1195f1c..b67b38cbd86 100644
--- a/sys/dev/pci/drm/drm_drv.c
+++ b/sys/dev/pci/drm/drm_drv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_drv.c,v 1.145 2016/02/05 10:05:12 kettenis Exp $ */
+/* $OpenBSD: drm_drv.c,v 1.146 2016/04/05 08:22:50 kettenis Exp $ */
/*-
* Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
* Copyright © 2008 Intel Corporation
@@ -62,7 +62,6 @@
int drm_debug_flag = 1;
#endif
-struct drm_device *drm_get_device_from_kdev(dev_t);
int drm_firstopen(struct drm_device *);
int drm_lastclose(struct drm_device *);
void drm_attach(struct device *, struct device *, void *);
@@ -85,22 +84,9 @@ int drm_authmagic(struct drm_device *, void *, struct drm_file *);
int drm_file_cmp(struct drm_file *, struct drm_file *);
SPLAY_PROTOTYPE(drm_file_tree, drm_file, link, drm_file_cmp);
-/* functions used by the per-open handle code to grab references to object */
-void drm_gem_object_handle_reference(struct drm_gem_object *);
-void drm_gem_object_handle_unreference(struct drm_gem_object *);
-void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *);
-
-int drm_handle_cmp(struct drm_handle *, struct drm_handle *);
-int drm_name_cmp(struct drm_gem_object *, struct drm_gem_object *);
-int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
- vm_fault_t, vm_prot_t, int);
-boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
int drm_setunique(struct drm_device *, void *, struct drm_file *);
int drm_noop(struct drm_device *, void *, struct drm_file *);
-SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
-SPLAY_PROTOTYPE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
-
int drm_getcap(struct drm_device *, void *, struct drm_file *);
int drm_setclientcap(struct drm_device *, void *, struct drm_file *);
@@ -402,6 +388,7 @@ drm_attach(struct device *parent, struct device *self, void *aux)
struct drm_device *dev = (struct drm_device *)self;
struct drm_attach_args *da = aux;
int bus, slot, func;
+ int ret;
dev->dev_private = parent;
dev->driver = da->driver;
@@ -464,14 +451,20 @@ drm_attach(struct device *parent, struct device *self, void *aux)
}
if (dev->driver->flags & DRIVER_GEM) {
- mtx_init(&dev->obj_name_lock, IPL_NONE);
- SPLAY_INIT(&dev->name_tree);
KASSERT(dev->driver->gem_size >= sizeof(struct drm_gem_object));
/* XXX unique name */
pool_init(&dev->objpl, dev->driver->gem_size, 0, 0, 0,
"drmobjpl", NULL);
}
+ if (dev->driver->flags & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto error;
+ }
+ }
+
printf("\n");
return;
@@ -488,6 +481,9 @@ drm_detach(struct device *self, int flags)
drm_lastclose(dev);
if (dev->driver->flags & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ if (dev->driver->flags & DRIVER_GEM)
pool_destroy(&dev->objpl);
extent_destroy(dev->handle_ext);
@@ -672,6 +668,7 @@ drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
file_priv->kdev = kdev;
file_priv->flags = flags;
+ file_priv->filp = (void *)&file_priv;
file_priv->minor = minor(kdev);
INIT_LIST_HEAD(&file_priv->fbs);
INIT_LIST_HEAD(&file_priv->event_list);
@@ -681,10 +678,8 @@ drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
/* for compatibility root is always authenticated */
file_priv->authenticated = DRM_SUSER(p);
- if (dev->driver->flags & DRIVER_GEM) {
- SPLAY_INIT(&file_priv->obj_tree);
- mtx_init(&file_priv->table_lock, IPL_NONE);
- }
+ if (dev->driver->flags & DRIVER_GEM)
+ drm_gem_open(dev, file_priv);
if (dev->driver->open) {
ret = dev->driver->open(dev, file_priv);
@@ -769,19 +764,10 @@ drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
if (dev->driver->flags & DRIVER_MODESET)
drm_fb_release(dev, file_priv);
+ if (dev->driver->flags & DRIVER_GEM)
+ drm_gem_release(dev, file_priv);
+
mutex_lock(&dev->struct_mutex);
- if (dev->driver->flags & DRIVER_GEM) {
- struct drm_handle *han;
- mtx_enter(&file_priv->table_lock);
- while ((han = SPLAY_ROOT(&file_priv->obj_tree)) != NULL) {
- SPLAY_REMOVE(drm_obj_tree, &file_priv->obj_tree, han);
- mtx_leave(&file_priv->table_lock);
- drm_gem_object_handle_unreference(han->obj);
- drm_free(han);
- mtx_enter(&file_priv->table_lock);
- }
- mtx_leave(&file_priv->table_lock);
- }
dev->buf_pgid = 0;
@@ -1409,497 +1395,6 @@ drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
return ret;
}
-struct uvm_pagerops drm_pgops = {
- NULL,
- drm_ref,
- drm_unref,
- drm_fault,
- drm_flush,
-};
-
-void
-drm_ref(struct uvm_object *uobj)
-{
- uobj->uo_refs++;
-}
-
-void
-drm_unref(struct uvm_object *uobj)
-{
- struct drm_gem_object *obj = (struct drm_gem_object *)uobj;
- struct drm_device *dev = obj->dev;
-
- if (uobj->uo_refs > 1) {
- uobj->uo_refs--;
- return;
- }
-
- /* We own this thing now. It is on no queues, though it may still
- * be bound to the aperture (and on the inactive list, in which case
- * idling the buffer is what triggered the free. Since we know no one
- * else can grab it now, we can nuke with impunity.
- */
- if (dev->driver->gem_free_object != NULL)
- dev->driver->gem_free_object(obj);
-}
-
-boolean_t
-drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
-{
- return (TRUE);
-}
-
-int
-drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
- int npages, int centeridx, vm_fault_t fault_type,
- vm_prot_t access_type, int flags)
-{
- struct vm_map_entry *entry = ufi->entry;
- struct uvm_object *uobj = entry->object.uvm_obj;
- struct drm_gem_object *obj = (struct drm_gem_object *)uobj;
- struct drm_device *dev = obj->dev;
- int ret;
-
- /*
- * we do not allow device mappings to be mapped copy-on-write
- * so we kill any attempt to do so here.
- */
-
- if (UVM_ET_ISCOPYONWRITE(entry)) {
- uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
- return(VM_PAGER_ERROR);
- }
-
- /*
- * We could end up here as the result of a copyin(9) or
- * copyout(9) while handling an ioctl. So we must be careful
- * not to deadlock. Therefore we only block if the quiesce
- * count is zero, which guarantees we didn't enter from within
- * an ioctl code path.
- */
- mtx_enter(&dev->quiesce_mtx);
- if (dev->quiesce && dev->quiesce_count == 0) {
- mtx_leave(&dev->quiesce_mtx);
- uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
- mtx_enter(&dev->quiesce_mtx);
- while (dev->quiesce) {
- msleep(&dev->quiesce, &dev->quiesce_mtx,
- PZERO, "drmflt", 0);
- }
- mtx_leave(&dev->quiesce_mtx);
- return(VM_PAGER_REFAULT);
- }
- dev->quiesce_count++;
- mtx_leave(&dev->quiesce_mtx);
-
- /* Call down into driver to do the magic */
- ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
- entry->start), vaddr, pps, npages, centeridx,
- access_type, flags);
-
- mtx_enter(&dev->quiesce_mtx);
- dev->quiesce_count--;
- if (dev->quiesce)
- wakeup(&dev->quiesce_count);
- mtx_leave(&dev->quiesce_mtx);
-
- return (ret);
-}
-
-/*
- * Code to support memory managers based on the GEM (Graphics
- * Execution Manager) api.
- */
-int
-drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size)
-{
- BUG_ON((size & (PAGE_SIZE -1)) != 0);
-
- obj->dev = dev;
-
- /* uao create can't fail in the 0 case, it just sleeps */
- obj->uao = uao_create(size, 0);
- obj->size = size;
- uvm_objinit(&obj->uobj, &drm_pgops, 1);
-
- atomic_inc(&dev->obj_count);
- atomic_add(obj->size, &dev->obj_memory);
- return 0;
-}
-
-void
-drm_gem_object_release(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
-
- if (obj->uao)
- uao_detach(obj->uao);
-
- atomic_dec(&dev->obj_count);
- atomic_sub(obj->size, &dev->obj_memory);
- if (obj->do_flags & DRM_WANTED) /* should never happen, not on lists */
- wakeup(obj);
-}
-
-/**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
- */
-int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
-{
- struct drm_device *dev = obj->dev;
- struct drm_handle *han;
- int ret;
-
- if ((han = drm_calloc(1, sizeof(*han))) == NULL)
- return -ENOMEM;
-
- han->obj = obj;
- mtx_enter(&file_priv->table_lock);
-again:
- *handlep = han->handle = ++file_priv->obj_id;
- /*
- * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
- * reserved.
- */
- if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
- &file_priv->obj_tree, han))
- goto again;
- mtx_leave(&file_priv->table_lock);
-
- drm_gem_object_handle_reference(obj);
-
- if (dev->driver->gem_open_object) {
- ret = dev->driver->gem_open_object(obj, file_priv);
- if (ret) {
- drm_gem_handle_delete(file_priv, *handlep);
- return ret;
- }
- }
-
- return 0;
-}
-
-/**
- * Removes the mapping from handle to filp for this object.
- */
-int
-drm_gem_handle_delete(struct drm_file *filp, u32 handle)
-{
- struct drm_device *dev;
- struct drm_gem_object *obj;
- struct drm_handle *han, find;
-
- find.handle = handle;
- mtx_enter(&filp->table_lock);
- han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &find);
- if (han == NULL) {
- mtx_leave(&filp->table_lock);
- return -EINVAL;
- }
- obj = han->obj;
- dev = obj->dev;
-
- SPLAY_REMOVE(drm_obj_tree, &filp->obj_tree, han);
- mtx_leave(&filp->table_lock);
-
- drm_free(han);
-
- if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, filp);
- drm_gem_object_handle_unreference_unlocked(obj);
-
- return 0;
-}
-
-/**
- * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
- *
- * This implements the ->dumb_destroy kms driver callback for drivers which use
- * gem to manage their backing storage.
- */
-int
-drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- u32 handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
-/** Returns a reference to the object named by the handle. */
-struct drm_gem_object *
-drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
- u32 handle)
-{
- struct drm_gem_object *obj;
- struct drm_handle *han, search;
-
- mtx_enter(&filp->table_lock);
-
- /* Check if we currently have a reference on the object */
- search.handle = handle;
- han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
- if (han == NULL) {
- mtx_leave(&filp->table_lock);
- return NULL;
- }
- obj = han->obj;
-
- drm_gem_object_reference(obj);
-
- mtx_leave(&filp->table_lock);
-
- return obj;
-}
-
-struct drm_gem_object *
-drm_gem_object_find(struct drm_file *filp, u32 handle)
-{
- struct drm_handle *han, search;
-
- MUTEX_ASSERT_LOCKED(&filp->table_lock);
-
- search.handle = handle;
- han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
- if (han == NULL)
- return NULL;
-
- return han->obj;
-}
-
-/**
- * Releases the handle to an mm object.
- */
-int
-drm_gem_close_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_close *args = data;
- int ret;
-
- if (!(dev->driver->flags & DRIVER_GEM))
- return -ENODEV;
-
- ret = drm_gem_handle_delete(file_priv, args->handle);
-
- return ret;
-}
-
-int
-drm_gem_flink_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_flink *args = data;
- struct drm_gem_object *obj;
-
- if (!(dev->driver->flags & DRIVER_GEM))
- return -ENODEV;
-
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
- if (obj == NULL)
- return -ENOENT;
-
- mtx_enter(&dev->obj_name_lock);
- if (!obj->name) {
-again:
- obj->name = ++dev->obj_name;
- /* 0 is reserved, make sure we don't clash. */
- if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
- &dev->name_tree, obj))
- goto again;
- /* name holds a reference to the object */
- drm_ref(&obj->uobj);
- }
- mtx_leave(&dev->obj_name_lock);
-
- args->name = (uint64_t)obj->name;
-
- drm_unref(&obj->uobj);
-
- return 0;
-}
-
-/**
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
-int
-drm_gem_open_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_gem_open *args = data;
- struct drm_gem_object *obj, search;
- int ret;
- u32 handle;
-
- if (!(dev->driver->flags & DRIVER_GEM))
- return -ENODEV;
-
- mtx_enter(&dev->obj_name_lock);
- search.name = args->name;
- obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
- if (obj)
- drm_gem_object_reference(obj);
- mtx_leave(&dev->obj_name_lock);
- if (!obj)
- return -ENOENT;
-
- ret = drm_gem_handle_create(file_priv, obj, &handle);
- drm_gem_object_unreference_unlocked(obj);
- if (ret)
- return ret;
-
- args->handle = handle;
- args->size = obj->size;
-
- return 0;
-}
-
-void
-drm_gem_object_handle_reference(struct drm_gem_object *obj)
-{
- drm_gem_object_reference(obj);
- obj->handlecount++;
-}
-
-void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-{
- /* do this first in case this is the last reference */
- if (--obj->handlecount == 0) {
- struct drm_device *dev = obj->dev;
-
- mtx_enter(&dev->obj_name_lock);
- if (obj->name) {
- SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
- obj->name = 0;
- mtx_leave(&dev->obj_name_lock);
- /* name held a reference to object */
- drm_gem_object_unreference(obj);
- } else {
- mtx_leave(&dev->obj_name_lock);
- }
- }
-
- drm_gem_object_unreference(obj);
-}
-
-void
-drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * drm_gem_free_mmap_offset - release a fake mmap offset for an object
- * @obj: obj in question
- *
- * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
- */
-void
-drm_gem_free_mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_local_map *map = obj->map;
-
- TAILQ_REMOVE(&dev->maplist, map, link);
- obj->map = NULL;
-
- /* NOCOALESCE set, can't fail */
- extent_free(dev->handle_ext, map->ext, map->size, EX_NOWAIT);
-
- drm_free(map);
-}
-
-/**
- * drm_gem_create_mmap_offset - create a fake mmap offset for an object
- * @obj: obj in question
- *
- * GEM memory mapping works by handing back to userspace a fake mmap offset
- * it can use in a subsequent mmap(2) call. The DRM core code then looks
- * up the object based on the offset and sets up the various memory mapping
- * structures.
- *
- * This routine allocates and attaches a fake offset for @obj.
- */
-int
-drm_gem_create_mmap_offset(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
- struct drm_local_map *map;
- int ret;
-
- /* Set the object up for mmap'ing */
- map = drm_calloc(1, sizeof(*map));
- if (map == NULL)
- return -ENOMEM;
-
- map->flags = _DRM_DRIVER;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- ret = extent_alloc(dev->handle_ext, map->size, PAGE_SIZE, 0,
- 0, EX_NOWAIT, &map->ext);
- if (ret) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
- }
-
- TAILQ_INSERT_TAIL(&dev->maplist, map, link);
- obj->map = map;
- return 0;
-
-out_free_list:
- drm_free(map);
-
- return ret;
-}
-
-struct uvm_object *
-udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
-{
- struct drm_device *dev = drm_get_device_from_kdev(device);
- struct drm_local_map *map;
- struct drm_gem_object *obj;
-
- if (cdevsw[major(device)].d_mmap != drmmmap)
- return NULL;
-
- if (dev == NULL)
- return NULL;
-
- if (dev->driver->mmap)
- return dev->driver->mmap(dev, off, size);
-
- mutex_lock(&dev->struct_mutex);
- TAILQ_FOREACH(map, &dev->maplist, link) {
- if (off >= map->ext && off + size <= map->ext + map->size)
- break;
- }
-
- if (map == NULL || map->type != _DRM_GEM) {
- mutex_unlock(&dev->struct_mutex);
- return NULL;
- }
-
- obj = (struct drm_gem_object *)map->handle;
- drm_ref(&obj->uobj);
- mutex_unlock(&dev->struct_mutex);
- return &obj->uobj;
-}
-
/*
* Compute order. Can be made faster.
*/
@@ -1969,19 +1464,3 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
PCI_VENDOR(id), PCI_PRODUCT(id), lnkcap, lnkcap2);
return 0;
}
-
-int
-drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
-{
- return (a->handle < b->handle ? -1 : a->handle > b->handle);
-}
-
-int
-drm_name_cmp(struct drm_gem_object *a, struct drm_gem_object *b)
-{
- return (a->name < b->name ? -1 : a->name > b->name);
-}
-
-SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
-
-SPLAY_GENERATE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
diff --git a/sys/dev/pci/drm/drm_gem.c b/sys/dev/pci/drm/drm_gem.c
new file mode 100644
index 00000000000..7c1f0dfd59d
--- /dev/null
+++ b/sys/dev/pci/drm/drm_gem.c
@@ -0,0 +1,1355 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/drm_vma_manager.h>
+
+#include <uvm/uvm.h>
+
+int drm_handle_cmp(struct drm_handle *, struct drm_handle *);
+SPLAY_PROTOTYPE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
+int drm_name_cmp(struct drm_gem_object *, struct drm_gem_object *);
+SPLAY_PROTOTYPE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
+
+
+void drm_unref(struct uvm_object *);
+void drm_ref(struct uvm_object *);
+boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int);
+int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int,
+ vm_fault_t, vm_prot_t, int);
+
+struct uvm_pagerops drm_pgops = {
+ NULL,
+ drm_ref,
+ drm_unref,
+ drm_fault,
+ drm_flush,
+};
+
+void
+drm_ref(struct uvm_object *uobj)
+{
+ struct drm_gem_object *obj =
+ container_of(uobj, struct drm_gem_object, uobj);
+
+ drm_gem_object_reference(obj);
+}
+
+void
+drm_unref(struct uvm_object *uobj)
+{
+ struct drm_gem_object *obj =
+ container_of(uobj, struct drm_gem_object, uobj);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+int
+drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
+ int npages, int centeridx, vm_fault_t fault_type,
+ vm_prot_t access_type, int flags)
+{
+ struct vm_map_entry *entry = ufi->entry;
+ struct uvm_object *uobj = entry->object.uvm_obj;
+ struct drm_gem_object *obj =
+ container_of(uobj, struct drm_gem_object, uobj);
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ /*
+ * we do not allow device mappings to be mapped copy-on-write
+ * so we kill any attempt to do so here.
+ */
+
+ if (UVM_ET_ISCOPYONWRITE(entry)) {
+ uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
+ return(VM_PAGER_ERROR);
+ }
+
+ /*
+ * We could end up here as the result of a copyin(9) or
+ * copyout(9) while handling an ioctl. So we must be careful
+ * not to deadlock. Therefore we only block if the quiesce
+ * count is zero, which guarantees we didn't enter from within
+ * an ioctl code path.
+ */
+ mtx_enter(&dev->quiesce_mtx);
+ if (dev->quiesce && dev->quiesce_count == 0) {
+ mtx_leave(&dev->quiesce_mtx);
+ uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL);
+ mtx_enter(&dev->quiesce_mtx);
+ while (dev->quiesce) {
+ msleep(&dev->quiesce, &dev->quiesce_mtx,
+ PZERO, "drmflt", 0);
+ }
+ mtx_leave(&dev->quiesce_mtx);
+ return(VM_PAGER_REFAULT);
+ }
+ dev->quiesce_count++;
+ mtx_leave(&dev->quiesce_mtx);
+
+ /* Call down into driver to do the magic */
+ ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr -
+ entry->start), vaddr, pps, npages, centeridx,
+ access_type, flags);
+
+ mtx_enter(&dev->quiesce_mtx);
+ dev->quiesce_count--;
+ if (dev->quiesce)
+ wakeup(&dev->quiesce_count);
+ mtx_leave(&dev->quiesce_mtx);
+
+ return (ret);
+}
+
+boolean_t
+drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
+{
+ return (TRUE);
+}
+
+struct uvm_object *
+udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size)
+{
+ struct drm_device *dev = drm_get_device_from_kdev(device);
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv;
+ struct file *filp;
+
+ if (cdevsw[major(device)].d_mmap != drmmmap)
+ return NULL;
+
+ if (dev == NULL)
+ return NULL;
+
+ if (dev->driver->mmap)
+ return dev->driver->mmap(dev, off, size);
+
+ mutex_lock(&dev->struct_mutex);
+
+ priv = drm_find_file_by_minor(dev, minor(device));
+ if (priv == 0) {
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+ }
+ filp = priv->filp;
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ off >> PAGE_SHIFT,
+ atop(round_page(size)));
+ if (!node) {
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ drm_gem_object_reference(obj);
+
+ mutex_unlock(&dev->struct_mutex);
+ return &obj->uobj;
+}
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls. However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file. However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ * default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ * handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls. The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/*
+ * We make up offsets for buffer objects so we can recognize them at
+ * mmap time.
+ */
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if BITS_PER_LONG == 64
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+ struct drm_vma_offset_manager *vma_offset_manager;
+
+ rw_init(&dev->object_name_lock, "drmonl");
+#ifdef __linux__
+ idr_init(&dev->object_name_idr);
+#else
+ SPLAY_INIT(&dev->name_tree);
+#endif
+
+ vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ if (!vma_offset_manager) {
+ DRM_ERROR("out of memory\n");
+ return -ENOMEM;
+ }
+
+ dev->vma_offset_manager = vma_offset_manager;
+ drm_vma_offset_manager_init(vma_offset_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
+
+ return 0;
+}
+
+void
+drm_gem_destroy(struct drm_device *dev)
+{
+
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+ kfree(dev->vma_offset_manager);
+ dev->vma_offset_manager = NULL;
+}
+
+#ifdef __linux__
+
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * shmfs backing store.
+ */
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ struct file *filp;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
+
+ obj->filp = filp;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_object_init);
+
+#else
+
+int drm_gem_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ drm_gem_private_object_init(dev, obj, size);
+
+ obj->uao = uao_create(size, 0);
+ uvm_objinit(&obj->uobj, &drm_pgops, 1);
+
+ atomic_inc(&dev->obj_count);
+ atomic_add(obj->size, &dev->obj_memory);
+
+ return 0;
+}
+
+#endif
+
+/**
+ * Initialize an already allocated GEM object of the specified size with
+ * no GEM provided backing store. Instead the caller is responsible for
+ * backing the object and handling it.
+ */
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
+{
+ BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+ obj->dev = dev;
+ obj->filp = NULL;
+
+ kref_init(&obj->refcount);
+ obj->handle_count = 0;
+ obj->size = size;
+ drm_vma_node_reset(&obj->vma_node);
+}
+EXPORT_SYMBOL(drm_gem_private_object_init);
+
+static void
+drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
+{
+#ifdef __linux__
+ /*
+ * Note: obj->dma_buf can't disappear as long as we still hold a
+ * handle reference in obj->handle_count.
+ */
+ mutex_lock(&filp->prime.lock);
+ if (obj->dma_buf) {
+ drm_prime_remove_buf_handle_locked(&filp->prime,
+ obj->dma_buf);
+ }
+ mutex_unlock(&filp->prime.lock);
+#endif
+}
+
+#ifdef __linux__
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
+ }
+}
+
+#else
+
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ if (obj->name) {
+ SPLAY_REMOVE(drm_name_tree, &dev->name_tree, obj);
+ obj->name = 0;
+ }
+}
+
+#endif
+
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+#ifdef __linux__
+ /* Unbreak the reference cycle if we have an exported dma_buf. */
+ if (obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+#endif
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (WARN_ON(obj->handle_count == 0))
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ mutex_lock(&obj->dev->object_name_lock);
+ if (--obj->handle_count == 0) {
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_exported_dma_buf_free(obj);
+ }
+ mutex_unlock(&obj->dev->object_name_lock);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
+#ifdef __linux__
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+
+ /* This is gross. The idr system doesn't let us try a delete and
+ * return an error code. It just spews if you fail at deleting.
+ * So, we have to grab a lock around finding the object and then
+ * doing the delete on it and dropping the refcount, or the user
+ * could race us to double-decrement the refcount and cause a
+ * use-after-free later. Given the frequency of our handle lookups,
+ * we may want to use ida for number allocation and a hash table
+ * for the pointers, anyway.
+ */
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return -EINVAL;
+ }
+ dev = obj->dev;
+
+ /* Release reference and decrement refcount. */
+ idr_remove(&filp->object_idr, handle);
+ spin_unlock(&filp->table_lock);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, filp);
+ drm_vma_node_revoke(&obj->vma_node, filp->filp);
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_delete);
+
+#else
+
+int
+drm_gem_handle_delete(struct drm_file *filp, u32 handle)
+{
+ struct drm_device *dev;
+ struct drm_gem_object *obj;
+ struct drm_handle *han, find;
+
+ spin_lock(&filp->table_lock);
+
+ find.handle = handle;
+ han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &find);
+ if (han == NULL) {
+ spin_unlock(&filp->table_lock);
+ return -EINVAL;
+ }
+ obj = han->obj;
+ dev = obj->dev;
+
+ SPLAY_REMOVE(drm_obj_tree, &filp->obj_tree, han);
+ spin_unlock(&filp->table_lock);
+
+ drm_free(han);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, filp);
+ drm_vma_node_revoke(&obj->vma_node, filp->filp);
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, filp);
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
+}
+
+#endif
+
+/**
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+#ifdef __linux__
+
+/**
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ *
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
+ */
+int
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
+ /*
+ * Get the user-visible handle using idr. Preload and perform
+ * allocation under our spinlock.
+ */
+ idr_preload(GFP_KERNEL);
+ spin_lock(&file_priv->table_lock);
+
+ ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
+ drm_gem_object_reference(obj);
+ obj->handle_count++;
+ spin_unlock(&file_priv->table_lock);
+ idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
+ if (ret < 0) {
+ drm_gem_object_handle_unreference_unlocked(obj);
+ return ret;
+ }
+ *handlep = ret;
+
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#else
+
+int
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_handle *han;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
+ /*
+ * Get the user-visible handle using idr. Preload and perform
+ * allocation under our spinlock.
+ */
+ if ((han = drm_calloc(1, sizeof(*han))) == NULL)
+ return -ENOMEM;
+ han->obj = obj;
+ KASSERT(obj->dev != NULL);
+ spin_lock(&file_priv->table_lock);
+
+again:
+ han->handle = ++file_priv->obj_id;
+ /*
+ * Make sure we have no duplicates. this'll hurt once we wrap, 0 is
+ * reserved.
+ */
+ if (han->handle == 0 || SPLAY_INSERT(drm_obj_tree,
+ &file_priv->obj_tree, han))
+ goto again;
+ drm_gem_object_reference(obj);
+ obj->handle_count++;
+ spin_unlock(&file_priv->table_lock);
+ mutex_unlock(&dev->object_name_lock);
+ *handlep = han->handle;
+
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+
+ if (dev->driver->gem_open_object) {
+ ret = dev->driver->gem_open_object(obj, file_priv);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#endif
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
+{
+ mutex_lock(&obj->dev->object_name_lock);
+
+ return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+
+/**
+ * drm_gem_free_mmap_offset - release a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ */
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
+}
+EXPORT_SYMBOL(drm_gem_free_mmap_offset);
+
+/**
+ * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
+ * @obj: obj in question
+ * @size: the virtual size
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj, in cases where
+ * the virtual size differs from the physical size (ie. obj->size). Otherwise
+ * just use drm_gem_create_mmap_offset().
+ */
+int
+drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_device *dev = obj->dev;
+
+ return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
+ size / PAGE_SIZE);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
+
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ return drm_gem_create_mmap_offset_size(obj, obj->size);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+
+#ifdef __linux__
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = file_inode(obj->filp);
+ mapping = inode->i_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+EXPORT_SYMBOL(drm_gem_put_pages);
+
+#endif
+
+#ifdef __linux__
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ u32 handle)
+{
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle);
+ if (obj == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+#else
+
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+ u32 handle)
+{
+ struct drm_gem_object *obj;
+ struct drm_handle *han, search;
+
+ spin_lock(&filp->table_lock);
+
+ /* Check if we currently have a reference on the object */
+ search.handle = handle;
+ han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
+ if (han == NULL) {
+ spin_unlock(&filp->table_lock);
+ return NULL;
+ }
+ obj = han->obj;
+
+ drm_gem_object_reference(obj);
+
+ spin_unlock(&filp->table_lock);
+
+ return obj;
+}
+
+#endif
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_close *args = data;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ ret = drm_gem_handle_delete(file_priv, args->handle);
+
+ return ret;
+}
+
+#ifdef __linux__
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_flink *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -ENOENT;
+
+ mutex_lock(&dev->object_name_lock);
+ idr_preload(GFP_KERNEL);
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!obj->name) {
+ ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
+ if (ret < 0)
+ goto err;
+
+ obj->name = ret;
+ }
+
+ args->name = (uint64_t) obj->name;
+ ret = 0;
+
+err:
+ idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+#else
+
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_flink *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!(dev->driver->flags & DRIVER_GEM))
+ return -ENODEV;
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (obj == NULL)
+ return -ENOENT;
+
+ mutex_lock(&dev->object_name_lock);
+ if (obj->handle_count == 0) {
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!obj->name) {
+again:
+ obj->name = ++dev->obj_name;
+ /* 0 is reserved, make sure we don't clash. */
+ if (obj->name == 0 || SPLAY_INSERT(drm_name_tree,
+ &dev->name_tree, obj))
+ goto again;
+ }
+
+ args->name = (uint64_t)obj->name;
+ ret = 0;
+
+err:
+ mutex_unlock(&dev->object_name_lock);
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+}
+
+#endif
+
+#ifdef __linux__
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_open *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+ u32 handle;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->object_name_lock);
+ obj = idr_find(&dev->object_name_idr, (int) args->name);
+ if (obj) {
+ drm_gem_object_reference(obj);
+ } else {
+ mutex_unlock(&dev->object_name_lock);
+ return -ENOENT;
+ }
+
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ args->size = obj->size;
+
+ return 0;
+}
+
+#else
+
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_open *args = data;
+ struct drm_gem_object *obj, search;
+ int ret;
+ u32 handle;
+
+ if (!(dev->driver->flags & DRIVER_GEM))
+ return -ENODEV;
+
+ mutex_lock(&dev->object_name_lock);
+ search.name = args->name;
+ obj = SPLAY_FIND(drm_name_tree, &dev->name_tree, &search);
+ if (obj) {
+ drm_gem_object_reference(obj);
+ } else {
+ mutex_unlock(&dev->object_name_lock);
+ return -ENOENT;
+ }
+
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+ args->size = obj->size;
+
+ return 0;
+}
+
+#endif
+
+#ifdef __linux__
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_init(&file_private->object_idr);
+ spin_lock_init(&file_private->table_lock);
+}
+
+#else
+
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+ SPLAY_INIT(&file_private->obj_tree);
+ mtx_init(&file_private->table_lock, IPL_NONE);
+}
+
+#endif
+
+#ifdef __linux__
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+ struct drm_file *file_priv = data;
+ struct drm_gem_object *obj = ptr;
+ struct drm_device *dev = obj->dev;
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ idr_for_each(&file_private->object_idr,
+ &drm_gem_object_release_handle, file_private);
+ idr_destroy(&file_private->object_idr);
+}
+
+#else
+
+static int
+drm_gem_object_release_handle(struct drm_file *file_priv,
+ struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
+ drm_gem_object_handle_unreference_unlocked(obj);
+
+ return 0;
+}
+
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+ struct drm_handle *han;
+
+ while ((han = SPLAY_ROOT(&file_private->obj_tree)) != NULL) {
+ SPLAY_REMOVE(drm_obj_tree, &file_private->obj_tree, han);
+ drm_gem_object_release_handle(file_private, han->obj);
+ drm_free(han);
+ }
+}
+
+#endif
+
+#ifdef __linux__
+
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+ WARN_ON(obj->dma_buf);
+
+ if (obj->filp)
+ fput(obj->filp);
+}
+EXPORT_SYMBOL(drm_gem_object_release);
+
+#else
+
+void
+drm_gem_object_release(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ if (obj->uao)
+ uao_detach(obj->uao);
+
+ atomic_dec(&dev->obj_count);
+ atomic_sub(obj->size, &dev->obj_memory);
+}
+
+#endif
+
+/**
+ * Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (dev->driver->gem_free_object != NULL)
+ dev->driver->gem_free_object(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+#ifdef __linux__
+
+void drm_gem_vm_open(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+
+ drm_gem_object_reference(obj);
+
+ mutex_lock(&obj->dev->struct_mutex);
+ drm_vm_open_locked(obj->dev, vma);
+ mutex_unlock(&obj->dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_open);
+
+void drm_gem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ drm_vm_close_locked(obj->dev, vma);
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&dev->struct_mutex);
+}
+EXPORT_SYMBOL(drm_gem_vm_close);
+
+/**
+ * drm_gem_mmap_obj - memory map a GEM object
+ * @obj: the GEM object to map
+ * @obj_size: the object size to be mapped, in bytes
+ * @vma: VMA for the area to be mapped
+ *
+ * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
+ * provided by the driver. Depending on their requirements, drivers can either
+ * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * the object will be trapped, to perform migration, GTT binding, surface
+ * register allocation, or performance monitoring), or mmap the buffer memory
+ * synchronously after calling drm_gem_mmap_obj.
+ *
+ * This function is mainly intended to implement the DMABUF mmap operation, when
+ * the GEM object is not looked up based on its fake offset. To implement the
+ * DRM mmap operation, drivers should use the drm_gem_mmap() function.
+ *
+ * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
+ * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
+ * callers must verify access restrictions before calling this helper.
+ *
+ * NOTE: This function has to be protected with dev->struct_mutex
+ *
+ * Return 0 or success or -EINVAL if the object size is smaller than the VMA
+ * size, or if no gem_vm_ops are provided.
+ */
+int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
+ struct vm_area_struct *vma)
+{
+ struct drm_device *dev = obj->dev;
+
+ lockdep_assert_held(&dev->struct_mutex);
+
+ /* Check for valid size. */
+ if (obj_size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (!dev->driver->gem_vm_ops)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ vma->vm_private_data = obj;
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+
+ /* Take a ref for this mapping of the object, so that the fault
+ * handler can dereference the mmap offset's pointer to the object.
+ * This reference is cleaned up by the corresponding vm_close
+ * (which should happen whether the vma was created by this call, or
+ * by a vm_open due to mremap or partial unmap or whatever).
+ */
+ drm_gem_object_reference(obj);
+
+ drm_vm_open_locked(dev, vma);
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_mmap_obj);
+
+/**
+ * drm_gem_mmap - memory map routine for GEM objects
+ * @filp: DRM file pointer
+ * @vma: VMA for the area to be mapped
+ *
+ * If a driver supports GEM object mapping, mmap calls on the DRM file
+ * descriptor will end up here.
+ *
+ * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
+ * contain the fake offset we created when the GTT map ioctl was called on
+ * the object) and map it with a call to drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
+ */
+int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
+ int ret = 0;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ mutex_unlock(&dev->struct_mutex);
+ return drm_mmap(filp, vma);
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_mmap);
+
+#endif
+
+/*
+ * Code to support memory managers based on the GEM (Graphics
+ * Execution Manager) api.
+ */
+
+struct drm_gem_object *
+drm_gem_object_find(struct drm_file *filp, u32 handle)
+{
+ struct drm_handle *han, search;
+
+ MUTEX_ASSERT_LOCKED(&filp->table_lock);
+
+ search.handle = handle;
+ han = SPLAY_FIND(drm_obj_tree, &filp->obj_tree, &search);
+ if (han == NULL)
+ return NULL;
+
+ return han->obj;
+}
+
+int
+drm_handle_cmp(struct drm_handle *a, struct drm_handle *b)
+{
+ return (a->handle < b->handle ? -1 : a->handle > b->handle);
+}
+
+SPLAY_GENERATE(drm_obj_tree, drm_handle, entry, drm_handle_cmp);
+
+int
+drm_name_cmp(struct drm_gem_object *a, struct drm_gem_object *b)
+{
+ return (a->name < b->name ? -1 : a->name > b->name);
+}
+
+SPLAY_GENERATE(drm_name_tree, drm_gem_object, entry, drm_name_cmp);
diff --git a/sys/dev/pci/drm/drm_linux.c b/sys/dev/pci/drm/drm_linux.c
index dede7bd87e6..0e57c9476b6 100644
--- a/sys/dev/pci/drm/drm_linux.c
+++ b/sys/dev/pci/drm/drm_linux.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.c,v 1.8 2016/02/05 15:51:10 kettenis Exp $ */
+/* $OpenBSD: drm_linux.c,v 1.9 2016/04/05 08:22:50 kettenis Exp $ */
/*
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
*
@@ -208,6 +208,17 @@ vunmap(void *addr, size_t size)
uvm_km_free(kernel_map, va, size);
}
+int
+panic_cmp(struct rb_node *a, struct rb_node *b)
+{
+ panic(__func__);
+}
+
+#undef RB_ROOT
+#define RB_ROOT(head) (head)->rbh_root
+
+RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
+
#if defined(__amd64__) || defined(__i386__)
/*
diff --git a/sys/dev/pci/drm/drm_modes.c b/sys/dev/pci/drm/drm_modes.c
index e24ad89074f..f1e5ae5e770 100644
--- a/sys/dev/pci/drm/drm_modes.c
+++ b/sys/dev/pci/drm/drm_modes.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_modes.c,v 1.6 2015/09/23 23:12:11 kettenis Exp $ */
+/* $OpenBSD: drm_modes.c,v 1.7 2016/04/05 08:22:50 kettenis Exp $ */
/*
* Copyright © 1997-2003 by The XFree86 Project, Inc.
* Copyright © 2007 Dave Airlie
@@ -34,6 +34,9 @@
#include "drmP.h"
#include "drm_crtc.h"
+#undef RB_ROOT
+#define RB_ROOT(head) (head)->rbh_root
+
long simple_strtol(const char *, char **, int);
/**
diff --git a/sys/dev/pci/drm/files.drm b/sys/dev/pci/drm/files.drm
index 8e492709266..98497d875fc 100644
--- a/sys/dev/pci/drm/files.drm
+++ b/sys/dev/pci/drm/files.drm
@@ -1,5 +1,5 @@
# $NetBSD: files.drm,v 1.2 2007/03/28 11:29:37 jmcneill Exp $
-# $OpenBSD: files.drm,v 1.34 2015/10/29 07:47:03 kettenis Exp $
+# $OpenBSD: files.drm,v 1.35 2016/04/05 08:22:50 kettenis Exp $
# direct rendering modules
define drmbase {[console = -1]}
@@ -19,6 +19,8 @@ file dev/pci/drm/drm_edid.c drm
file dev/pci/drm/drm_dp_helper.c drm
file dev/pci/drm/drm_fb_helper.c drm
file dev/pci/drm/drm_rect.c drm
+file dev/pci/drm/drm_gem.c drm
+file dev/pci/drm/drm_vma_manager.c drm
file dev/pci/drm/drm_linux.c drm
file dev/pci/drm/linux_hdmi.c drm
diff --git a/sys/dev/pci/drm/i915/i915_gem.c b/sys/dev/pci/drm/i915/i915_gem.c
index 611cea5d04a..e2656e6adc5 100644
--- a/sys/dev/pci/drm/i915/i915_gem.c
+++ b/sys/dev/pci/drm/i915/i915_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_gem.c,v 1.104 2015/10/19 19:54:35 kettenis Exp $ */
+/* $OpenBSD: i915_gem.c,v 1.105 2016/04/05 08:22:50 kettenis Exp $ */
/*
* Copyright (c) 2008-2009 Owain G. Ainsworth <oga@openbsd.org>
*
@@ -42,7 +42,7 @@
*/
#include <dev/pci/drm/drmP.h>
-#include <dev/pci/drm/drm.h>
+#include <dev/pci/drm/drm_vma_manager.h>
#include <dev/pci/drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -259,15 +259,10 @@ i915_gem_create(struct drm_file *file,
return -ENOMEM;
ret = drm_gem_handle_create(file, &obj->base, &handle);
- if (ret) {
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- pool_put(&dev->objpl, obj);
- return ret;
- }
-
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
*handle_p = handle;
return 0;
@@ -1575,14 +1570,15 @@ i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
* copyout in one of the fast paths. Return failure such that
* we fall back on the slow path.
*/
- if (!obj->base.map || RWLOCK_OWNER(&dev->struct_mutex) == curproc) {
+ if (!drm_vma_node_has_offset(&obj->base.vma_node) ||
+ RWLOCK_OWNER(&dev->struct_mutex) == curproc) {
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
&obj->base.uobj, NULL);
ret = VM_PAGER_BAD;
goto out;
}
- offset -= obj->base.map->ext;
+ offset -= drm_vma_node_offset_addr(&obj->base.vma_node);
if (rw_enter(&dev->struct_mutex, RW_NOSLEEP | RW_WRITE) != 0) {
uvmfault_unlockall(ufi, NULL, &obj->base.uobj, NULL);
@@ -1745,6 +1741,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
pg++)
pmap_page_protect(pg, PROT_NONE);
+ drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
obj->fault_mappable = false;
}
@@ -1802,7 +1799,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
#endif
int ret;
- if (obj->base.map)
+ if (drm_vma_node_has_offset(&obj->base.vma_node))
return 0;
#if 0
@@ -1837,9 +1834,6 @@ out:
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- if (!obj->base.map)
- return;
-
drm_gem_free_mmap_offset(&obj->base);
}
@@ -1878,7 +1872,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
goto out;
- *offset = (u64)obj->base.map->ext;
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
drm_gem_object_unreference(&obj->base);
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_vm.c b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
index cd7a8b47fa5..905ca27feae 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_vm.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ttm_bo_vm.c,v 1.9 2015/10/23 08:21:58 kettenis Exp $ */
+/* $OpenBSD: ttm_bo_vm.c,v 1.10 2016/04/05 08:22:50 kettenis Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -43,6 +43,9 @@ struct ttm_buffer_object *
ttm_bo_vm_lookup_rb(struct ttm_bo_device *, unsigned long,
unsigned long);
+#undef RB_ROOT
+#define RB_ROOT(head) (head)->rbh_root
+
RB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb,
ttm_bo_cmp_rb_tree_items);