summaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2018-06-25 22:29:17 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2018-06-25 22:29:17 +0000
commitebc6c03003108f9b51191ef34c6b7f463eb9d06e (patch)
tree9f6e7e289658aeb6e85d92ade6a67f0259032a3f /sys/dev
parent5c354e1ab0f5234f00e02abcb869f00fba540343 (diff)
Implement DRI3/prime support. This allows graphics buffers to be passed
between processes using file descriptors. This provides an alternative to eporting them with guesable 32-bit IDs. This implementation does not (yet) allow sharing of graphics buffers between GPUs. ok mpi@, visa@
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/pci/drm/drmP.h64
-rw-r--r--sys/dev/pci/drm/drm_drv.c13
-rw-r--r--sys/dev/pci/drm/drm_gem.c9
-rw-r--r--sys/dev/pci/drm/drm_internal.h2
-rw-r--r--sys/dev/pci/drm/drm_linux.c155
-rw-r--r--sys/dev/pci/drm/drm_linux.h37
-rw-r--r--sys/dev/pci/drm/drm_prime.c820
-rw-r--r--sys/dev/pci/drm/files.drm6
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.c2
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.h4
-rw-r--r--sys/dev/pci/drm/i915/i915_gem_dmabuf.c337
-rw-r--r--sys/dev/pci/drm/radeon/radeon_drv.c4
-rw-r--r--sys/dev/pci/drm/radeon/radeon_prime.c4
13 files changed, 1434 insertions, 23 deletions
diff --git a/sys/dev/pci/drm/drmP.h b/sys/dev/pci/drm/drmP.h
index 8ed45e3a470..1d9bcf534c8 100644
--- a/sys/dev/pci/drm/drmP.h
+++ b/sys/dev/pci/drm/drmP.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drmP.h,v 1.217 2018/02/19 08:59:52 mpi Exp $ */
+/* $OpenBSD: drmP.h,v 1.218 2018/06/25 22:29:16 kettenis Exp $ */
/* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*/
@@ -352,6 +352,12 @@ struct drm_pending_event {
void (*destroy)(struct drm_pending_event *event);
};
+/* initial implementaton using a linked list - todo hashtab */
+struct drm_prime_file_private {
+ struct list_head head;
+ struct rwlock lock;
+};
+
/** File private data */
struct drm_file {
unsigned always_authenticated :1;
@@ -395,6 +401,8 @@ struct drm_file {
struct list_head event_list;
int event_space;
+ struct drm_prime_file_private prime;
+
struct selinfo rsel;
SPLAY_ENTRY(drm_file) link;
};
@@ -480,6 +488,34 @@ struct drm_gem_object {
uint32_t pending_read_domains;
uint32_t pending_write_domain;
+ /**
+ * dma_buf - dma buf associated with this GEM object
+ *
+ * Pointer to the dma-buf associated with this gem object (either
+ * through importing or exporting). We break the resulting reference
+ * loop when the last gem handle for this object is released.
+ *
+ * Protected by obj->object_name_lock
+ */
+ struct dma_buf *dma_buf;
+
+ /**
+ * import_attach - dma buf attachment backing this object
+ *
+ * Any foreign dma_buf imported as a gem object has this set to the
+ * attachment point for the device. This is invariant over the lifetime
+ * of a gem object.
+ *
+ * The driver's ->gem_free_object callback is responsible for cleaning
+ * up the dma_buf attachment and references acquired at import time.
+ *
+ * Note that the drm gem/prime core does not depend upon drivers setting
+ * this field any more. So for drivers where this doesn't make sense
+ * (e.g. virtual devices or a displaylink behind an usb bus) they can
+ * simply leave it as NULL.
+ */
+ struct dma_buf_attachment *import_attach;
+
struct uvm_object uobj;
SPLAY_ENTRY(drm_gem_object) entry;
struct uvm_object *uao;
@@ -646,6 +682,20 @@ struct drm_driver {
int (*gem_fault)(struct drm_gem_object *, struct uvm_faultinfo *,
off_t, vaddr_t, vm_page_t *, int, int, vm_prot_t, int);
+ /* prime: */
+ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
+ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
+ uint32_t handle, uint32_t flags, int *prime_fd);
+ /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
+ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
+ int prime_fd, uint32_t *handle);
+ /* export GEM -> dmabuf */
+ struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+ /* import dmabuf -> GEM */
+ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
int (*dumb_create)(struct drm_file *file_priv,
struct drm_device *dev, struct drm_mode_create_dumb *args);
int (*dumb_map_offset)(struct drm_file *file_priv,
@@ -982,6 +1032,18 @@ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc
extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
+extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd);
+extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+
bool drm_mode_parse_command_line_for_connector(const char *,
struct drm_connector *, struct drm_cmdline_mode *);
struct drm_display_mode *
diff --git a/sys/dev/pci/drm/drm_drv.c b/sys/dev/pci/drm/drm_drv.c
index 9db6ba51ca0..e09380e3257 100644
--- a/sys/dev/pci/drm/drm_drv.c
+++ b/sys/dev/pci/drm/drm_drv.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_drv.c,v 1.157 2018/01/31 05:04:41 jsg Exp $ */
+/* $OpenBSD: drm_drv.c,v 1.158 2018/06/25 22:29:16 kettenis Exp $ */
/*-
* Copyright 2007-2009 Owain G. Ainsworth <oga@openbsd.org>
* Copyright © 2008 Intel Corporation
@@ -207,10 +207,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-#ifdef notyet
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-#endif
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -736,6 +734,9 @@ drmopen(dev_t kdev, int flags, int fmt, struct proc *p)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, file_priv);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_init_file_private(&file_priv->prime);
+
if (dev->driver->open) {
ret = dev->driver->open(dev, file_priv);
if (ret != 0) {
@@ -829,6 +830,10 @@ drmclose(dev_t kdev, int flags, int fmt, struct proc *p)
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_prime_destroy_file_private(&file_priv->prime);
+
SPLAY_REMOVE(drm_file_tree, &dev->files, file_priv);
drm_free(file_priv);
@@ -1116,12 +1121,10 @@ drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_DUMB_PREFER_SHADOW:
req->value = dev->mode_config.prefer_shadow;
break;
-#ifdef notyet
case DRM_CAP_PRIME:
req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
break;
-#endif
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = drm_timestamp_monotonic;
break;
diff --git a/sys/dev/pci/drm/drm_gem.c b/sys/dev/pci/drm/drm_gem.c
index ecf81f371e3..125b78f885a 100644
--- a/sys/dev/pci/drm/drm_gem.c
+++ b/sys/dev/pci/drm/drm_gem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_gem.c,v 1.5 2017/09/03 13:28:54 jsg Exp $ */
+/* $OpenBSD: drm_gem.c,v 1.6 2018/06/25 22:29:16 kettenis Exp $ */
/*
* Copyright © 2008 Intel Corporation
*
@@ -28,6 +28,7 @@
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/drm_vma_manager.h>
+#include "drm_internal.h"
#include <uvm/uvm.h>
@@ -318,7 +319,6 @@ EXPORT_SYMBOL(drm_gem_private_object_init);
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
-#ifdef __linux__
/*
* Note: obj->dma_buf can't disappear as long as we still hold a
* handle reference in obj->handle_count.
@@ -329,7 +329,6 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
obj->dma_buf);
}
mutex_unlock(&filp->prime.lock);
-#endif
}
/**
@@ -352,13 +351,11 @@ static void drm_gem_object_handle_free(struct drm_gem_object *obj)
static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
{
-#ifdef __linux__
/* Unbreak the reference cycle if we have an exported dma_buf. */
if (obj->dma_buf) {
dma_buf_put(obj->dma_buf);
obj->dma_buf = NULL;
}
-#endif
}
static void
@@ -873,6 +870,8 @@ drm_gem_object_release(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ WARN_ON(obj->dma_buf);
+
if (obj->uao)
uao_detach(obj->uao);
diff --git a/sys/dev/pci/drm/drm_internal.h b/sys/dev/pci/drm/drm_internal.h
index ac3e1f8187a..139c9641bd5 100644
--- a/sys/dev/pci/drm/drm_internal.h
+++ b/sys/dev/pci/drm/drm_internal.h
@@ -45,7 +45,6 @@ void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
#endif
/* drm_prime.c */
-#ifdef __linux__
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
@@ -55,7 +54,6 @@ void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf);
-#endif
/* drm_info.c */
#ifdef __linux__
diff --git a/sys/dev/pci/drm/drm_linux.c b/sys/dev/pci/drm/drm_linux.c
index 0cd7f50aa55..f98cc72f414 100644
--- a/sys/dev/pci/drm/drm_linux.c
+++ b/sys/dev/pci/drm/drm_linux.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.c,v 1.23 2018/04/25 01:27:46 jsg Exp $ */
+/* $OpenBSD: drm_linux.c,v 1.24 2018/06/25 22:29:16 kettenis Exp $ */
/*
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
* Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
@@ -19,6 +19,9 @@
#include <dev/pci/drm/drmP.h>
#include <dev/pci/ppbreg.h>
#include <sys/event.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/stat.h>
struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
void *sch_ident;
@@ -803,3 +806,153 @@ fence_context_alloc(unsigned int num)
{
return __sync_add_and_fetch(&drm_fence_count, num) - num;
}
+
+int
+dmabuf_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
+{
+ return (ENXIO);
+}
+
+int
+dmabuf_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred)
+{
+ return (ENXIO);
+}
+
+int
+dmabuf_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
+{
+ return (ENOTTY);
+}
+
+int
+dmabuf_poll(struct file *fp, int events, struct proc *p)
+{
+ return (0);
+}
+
+int
+dmabuf_kqfilter(struct file *fp, struct knote *kn)
+{
+ return (EINVAL);
+}
+
+int
+dmabuf_stat(struct file *fp, struct stat *st, struct proc *p)
+{
+ struct dma_buf *dmabuf = fp->f_data;
+
+ memset(st, 0, sizeof(*st));
+ st->st_size = dmabuf->size;
+ st->st_mode = S_IFIFO; /* XXX */
+ return (0);
+}
+
+int
+dmabuf_close(struct file *fp, struct proc *p)
+{
+ struct dma_buf *dmabuf = fp->f_data;
+
+ fp->f_data = NULL;
+ dmabuf->ops->release(dmabuf);
+ free(dmabuf, M_DRM, sizeof(struct dma_buf));
+ return (0);
+}
+
+struct fileops dmabufops = {
+ .fo_read = dmabuf_read,
+ .fo_write = dmabuf_write,
+ .fo_ioctl = dmabuf_ioctl,
+ .fo_poll = dmabuf_poll,
+ .fo_kqfilter = dmabuf_kqfilter,
+ .fo_stat = dmabuf_stat,
+ .fo_close = dmabuf_close
+};
+
+struct dma_buf *
+dma_buf_export(const struct dma_buf_export_info *info)
+{
+ struct proc *p = curproc;
+ struct filedesc *fdp = p->p_fd;
+ struct dma_buf *dmabuf;
+ struct file *fp;
+ int fd, error;
+
+ fdplock(fdp);
+ error = falloc(p, &fp, &fd);
+ if (error) {
+ fdpunlock(fdp);
+ return ERR_PTR(-error);
+ }
+ fdremove(fdp, fd);
+ fdpunlock(fdp);
+ fp->f_type = DTYPE_DMABUF;
+ fp->f_ops = &dmabufops;
+ dmabuf = malloc(sizeof(struct dma_buf), M_DRM, M_WAITOK | M_ZERO);
+ dmabuf->priv = info->priv;
+ dmabuf->ops = info->ops;
+ dmabuf->size = info->size;
+ dmabuf->file = fp;
+ fp->f_data = dmabuf;
+ return dmabuf;
+}
+
+struct dma_buf *
+dma_buf_get(int fd)
+{
+ struct proc *p = curproc;
+ struct filedesc *fdp = p->p_fd;
+ struct file *fp;
+
+ if ((fp = fd_getfile(fdp, fd)) == NULL)
+ return ERR_PTR(-EBADF);
+
+ if (fp->f_type != DTYPE_DMABUF) {
+ FRELE(fp, p);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return fp->f_data;
+}
+
+void
+dma_buf_put(struct dma_buf *dmabuf)
+{
+ KASSERT(dmabuf);
+ KASSERT(dmabuf->file);
+
+ FRELE(dmabuf->file, curproc);
+}
+
+int
+dma_buf_fd(struct dma_buf *dmabuf, int flags)
+{
+ struct proc *p = curproc;
+ struct filedesc *fdp = p->p_fd;
+ struct file *fp = dmabuf->file;
+ int fd, cloexec, error;
+
+ cloexec = (flags & O_CLOEXEC) ? UF_EXCLOSE : 0;
+
+ fdplock(fdp);
+restart:
+ if ((error = fdalloc(p, 0, &fd)) != 0) {
+ if (error == ENOSPC) {
+ fdexpand(p);
+ goto restart;
+ }
+ fdpunlock(fdp);
+ return -error;
+ }
+
+ fdinsert(fdp, fd, cloexec, fp);
+ fdpunlock(fdp);
+
+ return fd;
+}
+
+void
+get_dma_buf(struct dma_buf *dmabuf)
+{
+ FREF(dmabuf->file);
+}
diff --git a/sys/dev/pci/drm/drm_linux.h b/sys/dev/pci/drm/drm_linux.h
index a7ad53c3f49..f5364511e56 100644
--- a/sys/dev/pci/drm/drm_linux.h
+++ b/sys/dev/pci/drm/drm_linux.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.h,v 1.88 2018/04/25 01:27:46 jsg Exp $ */
+/* $OpenBSD: drm_linux.h,v 1.89 2018/06/25 22:29:16 kettenis Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
* Copyright (c) 2017 Martin Pieuchot
@@ -22,6 +22,7 @@
#include <sys/param.h>
#include <sys/atomic.h>
#include <sys/errno.h>
+#include <sys/fcntl.h>
#include <sys/kernel.h>
#include <sys/signalvar.h>
#include <sys/stdint.h>
@@ -2714,4 +2715,38 @@ release_firmware(const struct firmware *fw)
void *memchr_inv(const void *, int, size_t);
+struct dma_buf_ops;
+
+struct dma_buf {
+ const struct dma_buf_ops *ops;
+ void *priv;
+ size_t size;
+ struct file *file;
+};
+
+struct dma_buf_attachment;
+
+void get_dma_buf(struct dma_buf *);
+struct dma_buf *dma_buf_get(int);
+void dma_buf_put(struct dma_buf *);
+int dma_buf_fd(struct dma_buf *, int);
+
+struct dma_buf_ops {
+ void (*release)(struct dma_buf *);
+};
+
+struct dma_buf_export_info {
+ const struct dma_buf_ops *ops;
+ size_t size;
+ int flags;
+ void *priv;
+};
+
+#define DEFINE_DMA_BUF_EXPORT_INFO(x) struct dma_buf_export_info x
+
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *);
+
+#define dma_buf_attach(x, y) NULL
+#define dma_buf_detach(x, y) panic("dma_buf_detach")
+
#endif
diff --git a/sys/dev/pci/drm/drm_prime.c b/sys/dev/pci/drm/drm_prime.c
new file mode 100644
index 00000000000..5d37dc00ae1
--- /dev/null
+++ b/sys/dev/pci/drm/drm_prime.c
@@ -0,0 +1,820 @@
+/*
+ * Copyright © 2012 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#ifdef __linux__
+#include <linux/export.h>
+#include <linux/dma-buf.h>
+#endif
+#include <dev/pci/drm/drmP.h>
+#ifdef notyet
+#include <drm/drm_gem.h>
+#endif
+
+#include "drm_internal.h"
+
+/*
+ * DMA-BUF/GEM Object references and lifetime overview:
+ *
+ * On the export the dma_buf holds a reference to the exporting GEM
+ * object. It takes this reference in handle_to_fd_ioctl, when it
+ * first calls .prime_export and stores the exporting GEM object in
+ * the dma_buf priv. This reference is released when the dma_buf
+ * object goes away in the driver .release function.
+ *
+ * On the import the importing GEM object holds a reference to the
+ * dma_buf (which in turn holds a ref to the exporting GEM object).
+ * It takes that reference in the fd_to_handle ioctl.
+ * It calls dma_buf_get, creates an attachment to it and stores the
+ * attachment in the GEM object. When this attachment is destroyed
+ * when the imported object is destroyed, we remove the attachment
+ * and drop the reference to the dma_buf.
+ *
+ * Thus the chain of references always flows in one direction
+ * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
+ *
+ * Self-importing: if userspace is using PRIME as a replacement for flink
+ * then it will get a fd->handle request for a GEM object that it created.
+ * Drivers should detect this situation and return back the gem object
+ * from the dma-buf private. Prime will do this automatically for drivers that
+ * use the drm_gem_prime_{import,export} helpers.
+ */
+
+struct drm_prime_member {
+ struct list_head entry;
+ struct dma_buf *dma_buf;
+ uint32_t handle;
+};
+
+#ifdef notyet
+struct drm_prime_attachment {
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+};
+#endif
+
+static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ member = kmalloc(sizeof(*member), GFP_KERNEL);
+ if (!member)
+ return -ENOMEM;
+
+ get_dma_buf(dma_buf);
+ member->dma_buf = dma_buf;
+ member->handle = handle;
+ list_add(&member->entry, &prime_fpriv->head);
+ return 0;
+}
+
+static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->handle == handle)
+ return member->dma_buf;
+ }
+
+ return NULL;
+}
+
+static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf,
+ uint32_t *handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ *handle = member->handle;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+#ifdef notyet
+
+static int drm_gem_map_attach(struct dma_buf *dma_buf,
+ struct device *target_dev,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_prime_attachment *prime_attach;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
+ if (!prime_attach)
+ return -ENOMEM;
+
+ prime_attach->dir = DMA_NONE;
+ attach->priv = prime_attach;
+
+ if (!dev->driver->gem_prime_pin)
+ return 0;
+
+ return dev->driver->gem_prime_pin(obj);
+}
+
+static void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_prime_attachment *prime_attach = attach->priv;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+ struct sg_table *sgt;
+
+ if (dev->driver->gem_prime_unpin)
+ dev->driver->gem_prime_unpin(obj);
+
+ if (!prime_attach)
+ return;
+
+ sgt = prime_attach->sgt;
+ if (sgt) {
+ if (prime_attach->dir != DMA_NONE)
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+ prime_attach->dir);
+ sg_free_table(sgt);
+ }
+
+ kfree(sgt);
+ kfree(prime_attach);
+ attach->priv = NULL;
+}
+
+#endif
+
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf)
+{
+ struct drm_prime_member *member, *safe;
+
+ list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ dma_buf_put(dma_buf);
+ list_del(&member->entry);
+ kfree(member);
+ }
+ }
+}
+
+#ifdef notyet
+
+static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_prime_attachment *prime_attach = attach->priv;
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(dir == DMA_NONE || !prime_attach))
+ return ERR_PTR(-EINVAL);
+
+ /* return the cached mapping when possible */
+ if (prime_attach->dir == dir)
+ return prime_attach->sgt;
+
+ /*
+ * two mappings with different directions for the same attachment are
+ * not allowed
+ */
+ if (WARN_ON(prime_attach->dir != DMA_NONE))
+ return ERR_PTR(-EBUSY);
+
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!IS_ERR(sgt)) {
+ if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+ } else {
+ prime_attach->sgt = sgt;
+ prime_attach->dir = dir;
+ }
+ }
+
+ return sgt;
+}
+
+static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ /* nothing to be done here */
+}
+
+#endif
+
+/**
+ * drm_gem_dmabuf_release - dma_buf release implementation for GEM
+ * @dma_buf: buffer to be released
+ *
+ * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
+ * must use this in their dma_buf ops structure as the release callback.
+ */
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ /* drop the reference on the export fd holds */
+ drm_gem_object_unreference_unlocked(obj);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
+
+#ifdef notyet
+
+static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ return dev->driver->gem_prime_vmap(obj);
+}
+
+static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+
+static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
+ unsigned long page_num)
+{
+ return NULL;
+}
+
+static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
+ unsigned long page_num, void *addr)
+{
+
+}
+
+static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ if (!dev->driver->gem_prime_mmap)
+ return -ENOSYS;
+
+ return dev->driver->gem_prime_mmap(obj, vma);
+}
+
+#endif
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+#ifdef notyet
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+#endif
+ .release = drm_gem_dmabuf_release,
+#ifdef notyet
+ .kmap = drm_gem_dmabuf_kmap,
+ .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
+ .kunmap = drm_gem_dmabuf_kunmap,
+ .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+#endif
+};
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
+ * simpler APIs by using the helper functions @drm_gem_prime_export and
+ * @drm_gem_prime_import. These functions implement dma-buf support in terms of
+ * six lower-level driver callbacks:
+ *
+ * Export callbacks:
+ *
+ * - @gem_prime_pin (optional): prepare a GEM object for exporting
+ *
+ * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
+ *
+ * - @gem_prime_vmap: vmap a buffer exported by your driver
+ *
+ * - @gem_prime_vunmap: vunmap a buffer exported by your driver
+ *
+ * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
+ *
+ * Import callback:
+ *
+ * - @gem_prime_import_sg_table (import): produce a GEM object from another
+ * driver's scatter/gather table
+ */
+
+/**
+ * drm_gem_prime_export - helper library implementation of the export callback
+ * @dev: drm_device to export from
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC
+ *
+ * This is the implementation of the gem_prime_export functions for GEM drivers
+ * using the PRIME helpers.
+ */
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ struct dma_buf_export_info exp_info = {
+#ifdef __linux__
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+#endif
+ .ops = &drm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ };
+
+#ifdef notyet
+ if (dev->driver->gem_prime_res_obj)
+ exp_info.resv = dev->driver->gem_prime_res_obj(obj);
+#endif
+
+ return dma_buf_export(&exp_info);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ uint32_t flags)
+{
+ struct dma_buf *dmabuf;
+
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ dmabuf = ERR_PTR(-ENOENT);
+ return dmabuf;
+ }
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ return dmabuf;
+ }
+
+ /*
+ * Note that callers do not need to clean up the export cache
+ * since the check for obj->handle_count guarantees that someone
+ * will clean it up.
+ */
+ obj->dma_buf = dmabuf;
+ get_dma_buf(obj->dma_buf);
+ /* Grab a new ref since the callers is now used by the dma-buf */
+ drm_gem_object_reference(obj);
+
+ return dmabuf;
+}
+
+/**
+ * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @handle: buffer handle to export
+ * @flags: flags like DRM_CLOEXEC
+ * @prime_fd: pointer to storage for the fd id of the create dma-buf
+ *
+ * This is the PRIME export function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual exporting from GEM object to a dma-buf is done through the
+ * gem_prime_export driver callback.
+ */
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+ struct dma_buf *dmabuf;
+
+ mutex_lock(&file_priv->prime.lock);
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
+ if (dmabuf) {
+ get_dma_buf(dmabuf);
+ goto out_have_handle;
+ }
+
+ mutex_lock(&dev->object_name_lock);
+#ifdef notyet
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
+ goto out_have_obj;
+ }
+#endif
+
+ if (obj->dma_buf) {
+ get_dma_buf(obj->dma_buf);
+ dmabuf = obj->dma_buf;
+ goto out_have_obj;
+ }
+
+ dmabuf = export_and_register_object(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ ret = PTR_ERR(dmabuf);
+ mutex_unlock(&dev->object_name_lock);
+ goto out;
+ }
+
+out_have_obj:
+ /*
+ * If we've exported this buffer then cheat and add it to the import list
+ * so we get the correct handle back. We must do this under the
+ * protection of dev->object_name_lock to ensure that a racing gem close
+ * ioctl doesn't miss to remove this buffer handle from the cache.
+ */
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ dmabuf, handle);
+ mutex_unlock(&dev->object_name_lock);
+ if (ret)
+ goto fail_put_dmabuf;
+
+out_have_handle:
+ ret = dma_buf_fd(dmabuf, flags);
+ /*
+ * We must _not_ remove the buffer from the handle cache since the newly
+ * created dma buf is already linked in the global obj->dma_buf pointer,
+ * and that is invariant as long as a userspace gem handle exists.
+ * Closing the handle will clean out the cache anyway, so we don't leak.
+ */
+ if (ret < 0) {
+ goto fail_put_dmabuf;
+ } else {
+ *prime_fd = ret;
+ ret = 0;
+ }
+
+ goto out;
+
+fail_put_dmabuf:
+ dma_buf_put(dmabuf);
+out:
+ drm_gem_object_unreference_unlocked(obj);
+out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+
+/**
+ * drm_gem_prime_import - helper library implementation of the import callback
+ * @dev: drm_device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers.
+ */
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+#ifdef notyet
+ struct sg_table *sgt;
+#endif
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(obj);
+ return obj;
+ }
+ }
+
+#ifdef notyet
+ if (!dev->driver->gem_prime_import_sg_table)
+ return ERR_PTR(-EINVAL);
+#endif
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+#ifdef notyet
+ get_dma_buf(dma_buf);
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto fail_detach;
+ }
+
+ obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail_unmap;
+ }
+
+ obj->import_attach = attach;
+
+ return obj;
+
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+#else
+ ret = 0;
+ panic(__func__);
+#endif
+}
+EXPORT_SYMBOL(drm_gem_prime_import);
+
+/**
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @prime_fd: fd id of the dma-buf which should be imported
+ * @handle: pointer to storage for the handle of the imported buffer object
+ *
+ * This is the PRIME import function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual importing of GEM object from the dma-buf is done through the
+ * gem_import_export driver callback.
+ */
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ int ret;
+
+ dma_buf = dma_buf_get(prime_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ mutex_lock(&file_priv->prime.lock);
+
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
+ dma_buf, handle);
+ if (ret == 0)
+ goto out_put;
+
+ /* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ }
+
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
+ drm_gem_object_unreference_unlocked(obj);
+ if (ret)
+ goto out_put;
+
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ dma_buf, *handle);
+ if (ret)
+ goto fail;
+
+ mutex_unlock(&file_priv->prime.lock);
+
+ dma_buf_put(dma_buf);
+
+ return 0;
+
+fail:
+ /* hmm, if driver attached, we are relying on the free-object path
+ * to detach.. which seems ok..
+ */
+ drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+ mutex_unlock(&dev->object_name_lock);
+out_put:
+ dma_buf_put(dma_buf);
+ mutex_unlock(&file_priv->prime.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+ uint32_t flags;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_handle_to_fd)
+ return -ENOSYS;
+
+ /* check flags are valid */
+ if (args->flags & ~DRM_CLOEXEC)
+ return -EINVAL;
+
+ /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
+ flags = args->flags & DRM_CLOEXEC;
+
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, flags, &args->fd);
+}
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+
+ if (!drm_core_check_feature(dev, DRIVER_PRIME))
+ return -EINVAL;
+
+ if (!dev->driver->prime_fd_to_handle)
+ return -ENOSYS;
+
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
+ args->fd, &args->handle);
+}
+
+#ifdef notyet
+
+/**
+ * drm_prime_pages_to_sg - converts a page array into an sg list
+ * @pages: pointer to the array of page pointers to convert
+ * @nr_pages: length of the page vector
+ *
+ * This helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space for use with dma_buf itself.
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+{
+ struct sg_table *sg = NULL;
+ int ret;
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return sg;
+out:
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/**
+ * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
+ * @sgt: scatter-gather table to convert
+ * @pages: array of page pointers to store the page array in
+ * @addrs: optional array to store the dma bus address of each page
+ * @max_pages: size of both the passed-in arrays
+ *
+ * Exports an sg table into an array of pages and addresses. This is currently
+ * required by the TTM driver in order to do correct fault handling.
+ */
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages)
+{
+ unsigned count;
+ struct scatterlist *sg;
+ struct page *page;
+ u32 len;
+ int pg_index;
+ dma_addr_t addr;
+
+ pg_index = 0;
+ for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+ len = sg->length;
+ page = sg_page(sg);
+ addr = sg_dma_address(sg);
+
+ while (len > 0) {
+ if (WARN_ON(pg_index >= max_pages))
+ return -1;
+ pages[pg_index] = page;
+ if (addrs)
+ addrs[pg_index] = addr;
+
+ page++;
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ pg_index++;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
+
+/**
+ * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object
+ * @obj: GEM object which was created from a dma-buf
+ * @sg: the sg-table which was pinned at import time
+ *
+ * This is the cleanup functions which GEM drivers need to call when they use
+ * @drm_gem_prime_import to import dma-bufs.
+ */
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
+{
+ struct dma_buf_attachment *attach;
+ struct dma_buf *dma_buf;
+ attach = obj->import_attach;
+ if (sg)
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+ dma_buf = attach->dmabuf;
+ dma_buf_detach(attach->dmabuf, attach);
+ /* remove the reference */
+ dma_buf_put(dma_buf);
+}
+EXPORT_SYMBOL(drm_prime_gem_destroy);
+
+#endif
+
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ INIT_LIST_HEAD(&prime_fpriv->head);
+ rw_init(&prime_fpriv->lock, "primlk");
+}
+
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
+{
+ /* by now drm_gem_release should've made sure the list is empty */
+ WARN_ON(!list_empty(&prime_fpriv->head));
+}
diff --git a/sys/dev/pci/drm/files.drm b/sys/dev/pci/drm/files.drm
index b670817f87d..1fb52aed23a 100644
--- a/sys/dev/pci/drm/files.drm
+++ b/sys/dev/pci/drm/files.drm
@@ -1,5 +1,5 @@
# $NetBSD: files.drm,v 1.2 2007/03/28 11:29:37 jmcneill Exp $
-# $OpenBSD: files.drm,v 1.41 2018/04/25 01:27:46 jsg Exp $
+# $OpenBSD: files.drm,v 1.42 2018/06/25 22:29:16 kettenis Exp $
# direct rendering modules
define drmbase {[console = -1]}
@@ -32,6 +32,7 @@ file dev/pci/drm/drm_cache.c drm
file dev/pci/drm/drm_panel.c drm & inteldrm
file dev/pci/drm/drm_dp_mst_topology.c drm
file dev/pci/drm/drm_mipi_dsi.c drm & inteldrm
+file dev/pci/drm/drm_prime.c drm
file dev/pci/drm/linux_hdmi.c drm
file dev/pci/drm/linux_list_sort.c drm
file dev/pci/drm/linux_reservation.c drm
@@ -59,6 +60,7 @@ file dev/pci/drm/i915/i915_drv.c inteldrm
file dev/pci/drm/i915/i915_gem.c inteldrm
file dev/pci/drm/i915/i915_gem_context.c inteldrm
file dev/pci/drm/i915/i915_gem_batch_pool.c inteldrm
+file dev/pci/drm/i915/i915_gem_dmabuf.c inteldrm
file dev/pci/drm/i915/i915_gem_evict.c inteldrm
file dev/pci/drm/i915/i915_gem_execbuffer.c inteldrm
file dev/pci/drm/i915/i915_gem_fence.c inteldrm
@@ -195,7 +197,7 @@ file dev/pci/drm/radeon/radeon_legacy_tv.c radeondrm
#file dev/pci/drm/radeon/radeon_mn.c radeondrm not needed?
file dev/pci/drm/radeon/radeon_object.c radeondrm
file dev/pci/drm/radeon/radeon_pm.c radeondrm
-#file dev/pci/drm/radeon/radeon_prime.c radeondrm
+file dev/pci/drm/radeon/radeon_prime.c radeondrm
file dev/pci/drm/radeon/radeon_ring.c radeondrm
file dev/pci/drm/radeon/radeon_sa.c radeondrm
file dev/pci/drm/radeon/radeon_semaphore.c radeondrm
diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c
index f577fc2828a..b2e3f0f765f 100644
--- a/sys/dev/pci/drm/i915/i915_drv.c
+++ b/sys/dev/pci/drm/i915/i915_drv.c
@@ -1793,12 +1793,10 @@ static struct drm_driver driver = {
.gem_fault = i915_gem_fault,
#endif
-#ifdef notyet
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
-#endif
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
diff --git a/sys/dev/pci/drm/i915/i915_drv.h b/sys/dev/pci/drm/i915/i915_drv.h
index bb8b2ddd35a..03b6433836b 100644
--- a/sys/dev/pci/drm/i915/i915_drv.h
+++ b/sys/dev/pci/drm/i915/i915_drv.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: i915_drv.h,v 1.79 2018/01/15 22:24:17 kettenis Exp $ */
+/* $OpenBSD: i915_drv.h,v 1.80 2018/06/25 22:29:16 kettenis Exp $ */
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
*/
/*
@@ -3215,13 +3215,11 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
-#ifdef notyet
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-#endif
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view);
diff --git a/sys/dev/pci/drm/i915/i915_gem_dmabuf.c b/sys/dev/pci/drm/i915/i915_gem_dmabuf.c
new file mode 100644
index 00000000000..92a7d26557f
--- /dev/null
+++ b/sys/dev/pci/drm/i915/i915_gem_dmabuf.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright 2012 Red Hat Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ */
+#include <dev/pci/drm/drmP.h>
+#include "i915_drv.h"
+#ifdef __linux__
+#include <linux/dma-buf.h>
+#endif
+
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_intel_bo(buf->priv);
+}
+
+#ifdef notyet
+
+static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+ struct sg_table *st;
+ struct scatterlist *src, *dst;
+ int ret, i;
+
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err_unlock;
+
+ i915_gem_object_pin_pages(obj);
+
+ /* Copy sg so that we make an independent mapping */
+ st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (st == NULL) {
+ ret = -ENOMEM;
+ goto err_unpin;
+ }
+
+ ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
+ if (ret)
+ goto err_free;
+
+ src = obj->pages->sgl;
+ dst = st->sgl;
+ for (i = 0; i < obj->pages->nents; i++) {
+ sg_set_page(dst, sg_page(src), src->length, 0);
+ dst = sg_next(dst);
+ src = sg_next(src);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ ret =-ENOMEM;
+ goto err_free_sg;
+ }
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return st;
+
+err_free_sg:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err_unlock:
+ mutex_unlock(&obj->base.dev->struct_mutex);
+err:
+ return ERR_PTR(ret);
+}
+
+static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg,
+ enum dma_data_direction dir)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
+
+ i915_gem_object_unpin_pages(obj);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+ struct sg_page_iter sg_iter;
+ struct page **pages;
+ int ret, i;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (obj->dma_buf_vmapping) {
+ obj->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = i915_gem_object_get_pages(obj);
+ if (ret)
+ goto err;
+
+ i915_gem_object_pin_pages(obj);
+
+ ret = -ENOMEM;
+
+ pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+ if (pages == NULL)
+ goto err_unpin;
+
+ i = 0;
+ for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0)
+ pages[i++] = sg_page_iter_page(&sg_iter);
+
+ obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
+ drm_free_large(pages);
+
+ if (!obj->dma_buf_vmapping)
+ goto err_unpin;
+
+ obj->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return obj->dma_buf_vmapping;
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+
+ mutex_lock(&dev->struct_mutex);
+ if (--obj->vmapping_count == 0) {
+ vunmap(obj->dma_buf_vmapping);
+ obj->dma_buf_vmapping = NULL;
+
+ i915_gem_object_unpin_pages(obj);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+ bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, write);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+#endif
+
+static const struct dma_buf_ops i915_dmabuf_ops = {
+#ifdef notyet
+ .map_dma_buf = i915_gem_map_dma_buf,
+ .unmap_dma_buf = i915_gem_unmap_dma_buf,
+#endif
+ .release = drm_gem_dmabuf_release,
+#ifdef notyet
+ .kmap = i915_gem_dmabuf_kmap,
+ .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
+ .kunmap = i915_gem_dmabuf_kunmap,
+ .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .mmap = i915_gem_dmabuf_mmap,
+ .vmap = i915_gem_dmabuf_vmap,
+ .vunmap = i915_gem_dmabuf_vunmap,
+ .begin_cpu_access = i915_gem_begin_cpu_access,
+#endif
+};
+
+struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *gem_obj, int flags)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &i915_dmabuf_ops;
+ exp_info.size = gem_obj->size;
+ exp_info.flags = flags;
+ exp_info.priv = gem_obj;
+
+
+ if (obj->ops->dmabuf_export) {
+ int ret = obj->ops->dmabuf_export(obj);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return dma_buf_export(&exp_info);
+}
+
+#ifdef notyet
+
+static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ struct sg_table *sg;
+
+ sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg))
+ return PTR_ERR(sg);
+
+ obj->pages = sg;
+ return 0;
+}
+
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+{
+ dma_buf_unmap_attachment(obj->base.import_attach,
+ obj->pages, DMA_BIDIRECTIONAL);
+}
+
+static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
+ .get_pages = i915_gem_object_get_pages_dmabuf,
+ .put_pages = i915_gem_object_put_pages_dmabuf,
+};
+
+#endif
+
+struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ /* is this one of own objects? */
+ if (dma_buf->ops == &i915_dmabuf_ops) {
+ obj = dma_buf_to_obj(dma_buf);
+ /* is it from our device? */
+ if (obj->base.dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_reference(&obj->base);
+ return &obj->base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+#ifdef notyet
+ get_dma_buf(dma_buf);
+
+ obj = i915_gem_object_alloc(dev);
+ if (obj == NULL) {
+ ret = -ENOMEM;
+ goto fail_detach;
+ }
+
+ drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+ obj->base.import_attach = attach;
+
+ return &obj->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+#else
+ ret = 0;
+ panic(__func__);
+#endif
+}
diff --git a/sys/dev/pci/drm/radeon/radeon_drv.c b/sys/dev/pci/drm/radeon/radeon_drv.c
index 3939bc06e75..b57ac74a7d6 100644
--- a/sys/dev/pci/drm/radeon/radeon_drv.c
+++ b/sys/dev/pci/drm/radeon/radeon_drv.c
@@ -606,13 +606,15 @@ struct drm_driver kms_driver = {
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = drm_gem_dumb_destroy,
-#ifdef notyet
+#ifdef __linux__
.fops = &radeon_driver_kms_fops,
+#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = radeon_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
+#ifdef notyet
.gem_prime_pin = radeon_gem_prime_pin,
.gem_prime_unpin = radeon_gem_prime_unpin,
.gem_prime_res_obj = radeon_gem_prime_res_obj,
diff --git a/sys/dev/pci/drm/radeon/radeon_prime.c b/sys/dev/pci/drm/radeon/radeon_prime.c
index 975d2d58416..1a44476e5a1 100644
--- a/sys/dev/pci/drm/radeon/radeon_prime.c
+++ b/sys/dev/pci/drm/radeon/radeon_prime.c
@@ -28,6 +28,8 @@
#include "radeon.h"
#include <dev/pci/drm/radeon_drm.h>
+#ifdef notyet
+
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
@@ -115,6 +117,8 @@ struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv;
}
+#endif
+
struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags)