summaryrefslogtreecommitdiff
path: root/sys/dev/pci/drm/radeon/radeon_mn.c
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2019-04-14 10:14:55 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2019-04-14 10:14:55 +0000
commit9f7b0921603520095dd22535e96859917ee7ed2a (patch)
tree97b1df96a36c7959c2dd1bd53d7769ac0b604927 /sys/dev/pci/drm/radeon/radeon_mn.c
parent1385ee84a74b2316b691d76fe5282b8aa4568a0a (diff)
Update shared drm code, inteldrm(4) and radeondrm(4) from linux 4.4 to
linux 4.19.34. Adds support for more Intel hardware: Broxton/Apollo Lake (was is_preliminary in 4.4) Amber Lake (another Kaby Lake refresh) Gemini Lake Coffee Lake Whiskey Lake Cannon Lake (though no hardware with Intel graphics ever shipped) Ice Lake (alpha support, hardware not released) This does not add support for new radeon hardware on the AMD side as newer radeons have a different kernel driver (amdgpu). Thanks to the OpenBSD Foundation for sponsoring this work, kettenis@ for helping and a bunch of other developers for testing.
Diffstat (limited to 'sys/dev/pci/drm/radeon/radeon_mn.c')
-rw-r--r--sys/dev/pci/drm/radeon/radeon_mn.c48
1 files changed, 35 insertions, 13 deletions
diff --git a/sys/dev/pci/drm/radeon/radeon_mn.c b/sys/dev/pci/drm/radeon/radeon_mn.c
index 460a6a40a69..f8b35df44c6 100644
--- a/sys/dev/pci/drm/radeon/radeon_mn.c
+++ b/sys/dev/pci/drm/radeon/radeon_mn.c
@@ -28,8 +28,11 @@
* Christian König <christian.koenig@amd.com>
*/
-#include <dev/pci/drm/drmP.h>
-#include <dev/pci/drm/drm.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/mmu_notifier.h>
+#include <drm/drmP.h>
+#include <drm/drm.h>
#include "radeon.h"
@@ -46,8 +49,8 @@ struct radeon_mn {
struct hlist_node node;
/* objects protected by lock */
- struct rwlock lock;
- struct rb_root objects;
+ struct mutex lock;
+ struct rb_root_cached objects;
};
struct radeon_mn_node {
@@ -72,8 +75,8 @@ static void radeon_mn_destroy(struct work_struct *work)
mutex_lock(&rdev->mn_lock);
mutex_lock(&rmn->lock);
hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
- it.rb) {
+ rbtree_postorder_for_each_entry_safe(node, next_node,
+ &rmn->objects.rb_root, it.rb) {
interval_tree_remove(&node->it, &rmn->objects);
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
@@ -115,18 +118,27 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
+ int ret = 0;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ /* TODO we should be able to split locking for interval tree and
+ * the tear down.
+ */
+ if (blockable)
+ mutex_lock(&rmn->lock);
+ else if (!mutex_trylock(&rmn->lock))
+ return -EAGAIN;
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -134,6 +146,11 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
+ if (!blockable) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -154,7 +171,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
@@ -162,7 +179,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
}
}
+out_unlock:
mutex_unlock(&rmn->lock);
+
+ return ret;
}
static const struct mmu_notifier_ops radeon_mn_ops = {
@@ -183,7 +203,9 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
struct radeon_mn *rmn;
int r;
- down_write(&mm->mmap_sem);
+ if (down_write_killable(&mm->mmap_sem))
+ return ERR_PTR(-EINTR);
+
mutex_lock(&rdev->mn_lock);
hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
@@ -199,8 +221,8 @@ static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
rmn->rdev = rdev;
rmn->mm = mm;
rmn->mn.ops = &radeon_mn_ops;
- rw_init(&rmn->lock, "rmn");
- rmn->objects = RB_ROOT;
+ mutex_init(&rmn->lock);
+ rmn->objects = RB_ROOT_CACHED;
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)