summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMark Kettenis <kettenis@cvs.openbsd.org>2016-04-05 08:19:01 +0000
committerMark Kettenis <kettenis@cvs.openbsd.org>2016-04-05 08:19:01 +0000
commit335330c4a8e66b9e93fd886bf182dd2a4d1758f0 (patch)
tree786630afd6056cfbcbae484497694394b123025c /sys
parente67c399afdd802486451827f8676be7f18808ebd (diff)
Add vma offset manager code.
ok jsg@
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/pci/drm/drm_linux.h7
-rw-r--r--sys/dev/pci/drm/drm_vma_manager.c430
-rw-r--r--sys/dev/pci/drm/drm_vma_manager.h252
3 files changed, 688 insertions, 1 deletions
diff --git a/sys/dev/pci/drm/drm_linux.h b/sys/dev/pci/drm/drm_linux.h
index 0e651ba323e..c120196a585 100644
--- a/sys/dev/pci/drm/drm_linux.h
+++ b/sys/dev/pci/drm/drm_linux.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.h,v 1.45 2016/02/05 15:51:10 kettenis Exp $ */
+/* $OpenBSD: drm_linux.h,v 1.46 2016/04/05 08:19:00 kettenis Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
*
@@ -30,6 +30,8 @@ typedef u_int8_t u8;
typedef int32_t s32;
typedef int64_t s64;
+typedef uint64_t __u64;
+
typedef uint16_t __le16;
typedef uint16_t __be16;
typedef uint32_t __le32;
@@ -1377,6 +1379,9 @@ struct fb_info {
#define framebuffer_alloc(flags, device) \
kzalloc(sizeof(struct fb_info), GFP_KERNEL)
+struct address_space;
+#define unmap_mapping_range(mapping, holebegin, holeend, even_cows)
+
/*
* ACPI types and interfaces.
*/
diff --git a/sys/dev/pci/drm/drm_vma_manager.c b/sys/dev/pci/drm/drm_vma_manager.c
new file mode 100644
index 00000000000..da94ab6f0ab
--- /dev/null
+++ b/sys/dev/pci/drm/drm_vma_manager.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_mm.h"
+#include "drm_vma_manager.h"
+#include "drm_linux_rbtree.h"
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ *
+ * Additionally to offset management, the vma offset manager also handles access
+ * management. For every open-file context that is allowed to access a given
+ * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
+ * open-file with the offset of the node will fail with -EACCES. To revoke
+ * access again, use drm_vma_node_revoke(). However, the caller is responsible
+ * for destroying already existing mappings, if required.
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size)
+{
+ rw_init(&mgr->vm_lock, "drmvmo");
+ mgr->vm_addr_space_rb = RB_ROOT;
+ drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+ /* take the lock to protect against buggy drivers */
+ write_lock(&mgr->vm_lock);
+ drm_mm_takedown(&mgr->vm_addr_space_mm);
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ read_lock(&mgr->vm_lock);
+ node = drm_vma_offset_lookup_locked(mgr, start, pages);
+ read_unlock(&mgr->vm_lock);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+
+ iter = mgr->vm_addr_space_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ /* verify that the node spans the requested area */
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset < start + pages)
+ best = NULL;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages)
+{
+ int ret;
+
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+ pages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto out_unlock;
+
+ _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+ drm_mm_remove_node(&node->vm_node);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to add
+ *
+ * Add @filp to the list of allowed open-files for this node. If @filp is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct rb_node **iter;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_file *new, *entry;
+ int ret = 0;
+
+ /* Preallocate entry to avoid atomic allocations below. It is quite
+ * unlikely that an open-file is added twice to a single node so we
+ * don't optimize for this case. OOM is checked below only if the entry
+ * is actually used. */
+ new = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ write_lock(&node->vm_lock);
+
+ iter = &node->vm_files.rb_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+
+ if (filp == entry->vm_filp) {
+ entry->vm_count++;
+ goto unlock;
+ } else if (filp > entry->vm_filp) {
+ iter = &(*iter)->rb_right;
+ } else {
+ iter = &(*iter)->rb_left;
+ }
+ }
+
+ if (!new) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ new->vm_filp = filp;
+ new->vm_count = 1;
+ rb_link_node(&new->vm_rb, parent, iter);
+ rb_insert_color(&new->vm_rb, &node->vm_files);
+ new = NULL;
+
+unlock:
+ write_unlock(&node->vm_lock);
+ kfree(new);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_node_allow);
+
+/**
+ * drm_vma_node_revoke - Remove open-file from list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to remove
+ *
+ * Decrement the ref-count of @filp in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @filp from the list. You must call
+ * this once for every drm_vma_node_allow() on @filp.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * If @filp is not on the list, nothing is done.
+ */
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ write_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp) {
+ if (!--entry->vm_count) {
+ rb_erase(&entry->vm_rb, &node->vm_files);
+ kfree(entry);
+ }
+ break;
+ } else if (filp > entry->vm_filp) {
+ iter = iter->rb_right;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ write_unlock(&node->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_node_revoke);
+
+/**
+ * drm_vma_node_is_allowed - Check whether an open-file is granted access
+ * @node: Node to check
+ * @filp: Open-file to check for
+ *
+ * Search the list in @node whether @filp is currently on the list of allowed
+ * open-files (see drm_vma_node_allow()).
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * true iff @filp is on the list
+ */
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ read_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp)
+ break;
+ else if (filp > entry->vm_filp)
+ iter = iter->rb_right;
+ else
+ iter = iter->rb_left;
+ }
+
+ read_unlock(&node->vm_lock);
+
+ return iter;
+}
+EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/sys/dev/pci/drm/drm_vma_manager.h b/sys/dev/pci/drm/drm_vma_manager.h
new file mode 100644
index 00000000000..c55648e6a1b
--- /dev/null
+++ b/sys/dev/pci/drm/drm_vma_manager.h
@@ -0,0 +1,252 @@
+#ifndef __DRM_VMA_MANAGER_H__
+#define __DRM_VMA_MANAGER_H__
+
+/*
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drm_mm.h"
+#include "drm_linux_rbtree.h"
+
+struct drm_vma_offset_file {
+ struct rb_node vm_rb;
+ struct file *vm_filp;
+ unsigned long vm_count;
+};
+
+struct drm_vma_offset_node {
+ rwlock_t vm_lock;
+ struct drm_mm_node vm_node;
+ struct rb_node vm_rb;
+ struct rb_root vm_files;
+};
+
+struct drm_vma_offset_manager {
+ rwlock_t vm_lock;
+ struct rb_root vm_addr_space_rb;
+ struct drm_mm vm_addr_space_mm;
+};
+
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size);
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages);
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages);
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages);
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node);
+
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp);
+
+/**
+ * drm_vma_offset_exact_lookup() - Look up node by exact address
+ * @mgr: Manager object
+ * @start: Start address (page-based, not byte-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
+ * It only returns the exact object with the given start address.
+ *
+ * RETURNS:
+ * Node at exact start address @start.
+ */
+static inline struct drm_vma_offset_node *
+drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ node = drm_vma_offset_lookup(mgr, start, pages);
+ return (node && node->vm_node.start == start) ? node : NULL;
+}
+
+/**
+ * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
+ * are allowed while holding this lock. All other contexts are blocked from VMA
+ * until the lock is released via drm_vma_offset_unlock_lookup().
+ *
+ * Use this if you need to take a reference to the objects returned by
+ * drm_vma_offset_lookup_locked() before releasing this lock again.
+ *
+ * This lock must not be used for anything else than extended lookups. You must
+ * not call any other VMA helpers while holding this lock.
+ *
+ * Note: You're in atomic-context while holding this lock!
+ *
+ * Example:
+ * drm_vma_offset_lock_lookup(mgr);
+ * node = drm_vma_offset_lookup_locked(mgr);
+ * if (node)
+ * kref_get_unless_zero(container_of(node, sth, entr));
+ * drm_vma_offset_unlock_lookup(mgr);
+ */
+static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
+{
+ read_lock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
+ */
+static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
+{
+ read_unlock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_node_reset() - Initialize or reset node object
+ * @node: Node to initialize or reset
+ *
+ * Reset a node to its initial state. This must be called before using it with
+ * any VMA offset manager.
+ *
+ * This must not be called on an already allocated node, or you will leak
+ * memory.
+ */
+static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
+{
+ memset(node, 0, sizeof(*node));
+ node->vm_files = RB_ROOT;
+ rw_init(&node->vm_lock, "drmvma");
+}
+
+/**
+ * drm_vma_node_start() - Return start address for page-based addressing
+ * @node: Node to inspect
+ *
+ * Return the start address of the given node. This can be used as offset into
+ * the linear VM space that is provided by the VMA offset manager. Note that
+ * this can only be used for page-based addressing. If you need a proper offset
+ * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
+ * drm_vma_node_offset_addr() helper instead.
+ *
+ * RETURNS:
+ * Start address of @node for page-based addressing. 0 if the node does not
+ * have an offset allocated.
+ */
+static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
+{
+ return node->vm_node.start;
+}
+
+/**
+ * drm_vma_node_size() - Return size (page-based)
+ * @node: Node to inspect
+ *
+ * Return the size as number of pages for the given node. This is the same size
+ * that was passed to drm_vma_offset_add(). If no offset is allocated for the
+ * node, this is 0.
+ *
+ * RETURNS:
+ * Size of @node as number of pages. 0 if the node does not have an offset
+ * allocated.
+ */
+static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
+{
+ return node->vm_node.size;
+}
+
+/**
+ * drm_vma_node_has_offset() - Check whether node is added to offset manager
+ * @node: Node to be checked
+ *
+ * RETURNS:
+ * true iff the node was previously allocated an offset and added to
+ * an vma offset manager.
+ */
+static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
+{
+ return drm_mm_node_allocated(&node->vm_node);
+}
+
+/**
+ * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
+ * @node: Linked offset node
+ *
+ * Same as drm_vma_node_start() but returns the address as a valid offset that
+ * can be used for user-space mappings during mmap().
+ * This must not be called on unlinked nodes.
+ *
+ * RETURNS:
+ * Offset of @node for byte-based addressing. 0 if the node does not have an
+ * object allocated.
+ */
+static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+ return ((__u64)node->vm_node.start) << PAGE_SHIFT;
+}
+
+/**
+ * drm_vma_node_unmap() - Unmap offset node
+ * @node: Offset node
+ * @file_mapping: Address space to unmap @node from
+ *
+ * Unmap all userspace mappings for a given offset node. The mappings must be
+ * associated with the @file_mapping address-space. If no offset exists or
+ * the address-space is invalid, nothing is done.
+ *
+ * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
+ * is not called on this node concurrently.
+ */
+static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
+ struct address_space *file_mapping)
+{
+ if (file_mapping && drm_vma_node_has_offset(node))
+ unmap_mapping_range(file_mapping,
+ drm_vma_node_offset_addr(node),
+ drm_vma_node_size(node) << PAGE_SHIFT, 1);
+}
+
+/**
+ * drm_vma_node_verify_access() - Access verification helper for TTM
+ * @node: Offset node
+ * @filp: Open-file
+ *
+ * This checks whether @filp is granted access to @node. It is the same as
+ * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
+ * verify_access() callbacks.
+ *
+ * RETURNS:
+ * 0 if access is granted, -EACCES otherwise.
+ */
+static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+}
+
+#endif /* __DRM_VMA_MANAGER_H__ */