diff options
Diffstat (limited to 'sys/dev/pci/drm/i915')
-rw-r--r-- | sys/dev/pci/drm/i915/i915_gem_userptr.c | 22 |
1 files changed, 21 insertions, 1 deletions
diff --git a/sys/dev/pci/drm/i915/i915_gem_userptr.c b/sys/dev/pci/drm/i915/i915_gem_userptr.c index 9a65830ede0..538b7415605 100644 --- a/sys/dev/pci/drm/i915/i915_gem_userptr.c +++ b/sys/dev/pci/drm/i915/i915_gem_userptr.c @@ -693,8 +693,28 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, i915_gem_gtt_finish_pages(obj, pages); for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) + if (obj->mm.dirty && trylock_page(page)) { + /* + * As this may not be anonymous memory (e.g. shmem) + * but exist on a real mapping, we have to lock + * the page in order to dirty it -- holding + * the page reference is not sufficient to + * prevent the inode from being truncated. + * Play safe and take the lock. + * + * However...! + * + * The mmu-notifier can be invalidated for a + * migrate_page, that is alreadying holding the lock + * on the page. Such a try_to_unmap() will result + * in us calling put_pages() and so recursively try + * to lock the page. We avoid that deadlock with + * a trylock_page() and in exchange we risk missing + * some page dirtying. + */ set_page_dirty(page); + unlock_page(page); + } mark_page_accessed(page); put_page(page); |