diff options
| author | Christian König <christian.koenig@amd.com> | 2021-03-22 13:33:05 +0100 | 
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2021-05-19 22:30:21 -0400 | 
| commit | 0ccc3ccf5b3af48161d1ddd088dbca12a9837c70 (patch) | |
| tree | 9cbe571e1665fdfdf30e824659e71c812484750f /drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |
| parent | c6a11133337c644d1e63a78217d490e871796d1e (diff) | |
drm/amdgpu: re-apply "use the new cursor in the VM code" v2
Now that we found the underlying problem we can re-apply this patch.
This reverts commit 6b44b667e24cf89603ebdaa31b939c034d425162.
v2: rebase on KFD changes
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Tested-by: Nirmoy Das <nirmoy.das@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 55 | 
1 files changed, 18 insertions, 37 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 7e1a67295106..63035b78dd87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -37,6 +37,7 @@  #include "amdgpu_gmc.h"  #include "amdgpu_xgmi.h"  #include "amdgpu_dma_buf.h" +#include "amdgpu_res_cursor.h"  #include "kfd_svm.h"  /** @@ -1606,7 +1607,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,   * @last: last mapped entry   * @flags: flags for the entries   * @offset: offset into nodes and pages_addr - * @nodes: array of drm_mm_nodes with the MC addresses + * @res: ttm_resource to map   * @pages_addr: DMA addresses to use for mapping   * @fence: optional resulting fence   * @@ -1621,13 +1622,13 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,  				bool unlocked, struct dma_resv *resv,  				uint64_t start, uint64_t last,  				uint64_t flags, uint64_t offset, -				struct drm_mm_node *nodes, +				struct ttm_resource *res,  				dma_addr_t *pages_addr,  				struct dma_fence **fence)  {  	struct amdgpu_vm_update_params params; +	struct amdgpu_res_cursor cursor;  	enum amdgpu_sync_mode sync_mode; -	uint64_t pfn;  	int r;  	memset(¶ms, 0, sizeof(params)); @@ -1645,14 +1646,6 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,  	else  		sync_mode = AMDGPU_SYNC_EXPLICIT; -	pfn = offset >> PAGE_SHIFT; -	if (nodes) { -		while (pfn >= nodes->size) { -			pfn -= nodes->size; -			++nodes; -		} -	} -  	amdgpu_vm_eviction_lock(vm);  	if (vm->evicting) {  		r = -EBUSY; @@ -1671,23 +1664,17 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,  	if (r)  		goto error_unlock; -	do { +	amdgpu_res_first(res, offset, (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, +			 &cursor); +	while (cursor.remaining) {  		uint64_t tmp, num_entries, addr; - -		num_entries = last - start + 1; -		if (nodes) { -			addr = nodes->start << PAGE_SHIFT; -			num_entries = min((nodes->size - pfn) * -				AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries); -		} else { -			addr = 0; -		} - +		num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;  		if (pages_addr) {  			bool contiguous = true;  			if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) { +				uint64_t pfn = cursor.start >> PAGE_SHIFT;  				uint64_t count;  				contiguous = pages_addr[pfn + 1] == @@ -1707,16 +1694,18 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,  			}  			if (!contiguous) { -				addr = pfn << PAGE_SHIFT; +				addr = cursor.start;  				params.pages_addr = pages_addr;  			} else { -				addr = pages_addr[pfn]; +				addr = pages_addr[cursor.start >> PAGE_SHIFT];  				params.pages_addr = NULL;  			}  		} else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { -			addr += bo_adev->vm_manager.vram_base_offset; -			addr += pfn << PAGE_SHIFT; +			addr = bo_adev->vm_manager.vram_base_offset + +				cursor.start; +		} else { +			addr = 0;  		}  		tmp = start + num_entries; @@ -1724,14 +1713,9 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,  		if (r)  			goto error_unlock; -		pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; -		if (nodes && nodes->size == pfn) { -			pfn = 0; -			++nodes; -		} +		amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);  		start = tmp; - -	} while (unlikely(start != last + 1)); +	};  	r = vm->update_funcs->commit(¶ms, fence); @@ -1760,7 +1744,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,  	struct amdgpu_bo_va_mapping *mapping;  	dma_addr_t *pages_addr = NULL;  	struct ttm_resource *mem; -	struct drm_mm_node *nodes;  	struct dma_fence **last_update;  	struct dma_resv *resv;  	uint64_t flags; @@ -1769,7 +1752,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,  	if (clear || !bo) {  		mem = NULL; -		nodes = NULL;  		resv = vm->root.base.bo->tbo.base.resv;  	} else {  		struct drm_gem_object *obj = &bo->tbo.base; @@ -1784,7 +1766,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,  				bo = gem_to_amdgpu_bo(gobj);  		}  		mem = &bo->tbo.mem; -		nodes = mem->mm_node;  		if (mem->mem_type == TTM_PL_TT)  			pages_addr = bo->tbo.ttm->dma_address;  	} @@ -1833,7 +1814,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,  		r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false,  						resv, mapping->start,  						mapping->last, update_flags, -						mapping->offset, nodes, +						mapping->offset, mem,  						pages_addr, last_update);  		if (r)  			return r; | 
