summaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorJonathan Gray <jsg@cvs.openbsd.org>2024-08-30 04:00:31 +0000
committerJonathan Gray <jsg@cvs.openbsd.org>2024-08-30 04:00:31 +0000
commit27fcf632859e8e749b2304e42a294d7d40ae5fa0 (patch)
tree6d49393cf12300dfbac8ee8af7ee5235ef28333b /sys
parent200d124e2ff7c1b6da0491bbccba63a275c0def2 (diff)
drm/amdkfd: Move dma unmapping after TLB flush
From Philip Yang 14fafdfdadf987e260adb3f807f5f1b5b21f0170 in linux-6.6.y/6.6.48 101b8104307eac734f2dfa4d3511430b0b631c73 in mainline linux
Diffstat (limited to 'sys')
-rw-r--r--sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c26
-rw-r--r--sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c20
3 files changed, 35 insertions, 12 deletions
diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h b/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
index 9306b3d608b..db463d2d6fc 100644
--- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -303,6 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
+void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
diff --git a/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index dc3fd9790ee..9fab9940baf 100644
--- a/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -733,7 +733,7 @@ kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
enum dma_data_direction dir;
if (unlikely(!ttm->sg)) {
- pr_err("SG Table of BO is UNEXPECTEDLY NULL");
+ pr_debug("SG Table of BO is NULL");
return;
}
@@ -1202,8 +1202,6 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
amdgpu_sync_fence(sync, bo_va->last_pt_update);
-
- kfd_mem_dmaunmap_attachment(mem, entry);
}
static int update_gpuvm_pte(struct kgd_mem *mem,
@@ -1258,6 +1256,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
update_gpuvm_pte_failed:
unmap_bo_from_gpuvm(mem, entry, sync);
+ kfd_mem_dmaunmap_attachment(mem, entry);
return ret;
}
@@ -1862,8 +1861,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
mem->va + bo_size * (1 + mem->aql_queue));
/* Remove from VM internal data structures */
- list_for_each_entry_safe(entry, tmp, &mem->attachments, list)
+ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
+ kfd_mem_dmaunmap_attachment(mem, entry);
kfd_mem_detach(entry);
+ }
ret = unreserve_bo_and_vms(&ctx, false, false);
@@ -2037,6 +2038,23 @@ out:
return ret;
}
+void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+{
+ struct kfd_mem_attachment *entry;
+ struct amdgpu_vm *vm;
+
+ vm = drm_priv_to_vm(drm_priv);
+
+ mutex_lock(&mem->lock);
+
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->bo_va->base.vm == vm)
+ kfd_mem_dmaunmap_attachment(mem, entry);
+ }
+
+ mutex_unlock(&mem->lock);
+}
+
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
{
diff --git a/sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c b/sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
index 010e4af4ea4..03a94230e5f 100644
--- a/sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
+++ b/sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
@@ -1432,17 +1432,21 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
goto sync_memory_failed;
}
}
- mutex_unlock(&p->mutex);
- if (flush_tlb) {
- /* Flush TLBs after waiting for the page table updates to complete */
- for (i = 0; i < args->n_devices; i++) {
- peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
- if (WARN_ON_ONCE(!peer_pdd))
- continue;
+ /* Flush TLBs after waiting for the page table updates to complete */
+ for (i = 0; i < args->n_devices; i++) {
+ peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]);
+ if (WARN_ON_ONCE(!peer_pdd))
+ continue;
+ if (flush_tlb)
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
- }
+
+ /* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
+ amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
}
+
+ mutex_unlock(&p->mutex);
+
kfree(devices_arr);
return 0;