diff options
| author | Dave Airlie <airlied@redhat.com> | 2020-09-08 16:40:13 +1000 | 
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2020-09-08 16:40:13 +1000 | 
| commit | 0c8d22fcae2f9590a07b000e1724f665820b77f7 (patch) | |
| tree | 5a2405fe298358d861a58dc933184ee6d3415eb4 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |
| parent | ce5c207c6b8dd9cdeaeeb2345b8a69335c0d98bf (diff) | |
| parent | 11bc98bd71fe2e0cb572988519e51bca9d58a18a (diff) | |
Merge tag 'amd-drm-next-5.10-2020-09-03' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.10-2020-09-03:
amdgpu:
- RAS fixes
- Sienna Cichlid updates
- Navy Flounder updates
- DCE6 (SI) support in DC
- Enable plane rotation
- Rework pre-OS vram reservation handling during driver init
- Add standard interface to dump GPU metrics table from SMU
- Rework tiling and tmz state handling in atomic commits
- Pstate fixes
- Add voltage and power hwmon interfaces for renoir
- SW CTF fixes
- S/G display fix for Raven
- Print client strings for vmfaults for vega and newer
- Manual fan control fixes
- Display updates
- Reorg power management directory structure
- Misc bug fixes
- Misc code cleanups
amdkfd:
- Topology fixes
- Add SMI events for thermal throttling and GPU resets
radeon:
- switch from pci_* to dma_* for dma allocations
- PLL fix
Scheduler:
- Clean up priority levels
UAPI:
- amdgpu INFO IOCTL query update for TMZ state
  https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6049
- amdkfd SMI event interface updates
  https://github.com/RadeonOpenCompute/rocm_smi_lib/tree/therm_thrott
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200903222921.4152-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 25 | 
1 files changed, 18 insertions, 7 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index ecd051976bce..12598a4b5c78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1275,13 +1275,24 @@ error_unlock:  	return r;  } +static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser) +{ +	int i; + +	if (!trace_amdgpu_cs_enabled()) +		return; + +	for (i = 0; i < parser->job->num_ibs; i++) +		trace_amdgpu_cs(parser, i); +} +  int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  { -	struct amdgpu_device *adev = dev->dev_private; +	struct amdgpu_device *adev = drm_to_adev(dev);  	union drm_amdgpu_cs *cs = data;  	struct amdgpu_cs_parser parser = {};  	bool reserved_buffers = false; -	int i, r; +	int r;  	if (amdgpu_ras_intr_triggered())  		return -EHWPOISON; @@ -1294,7 +1305,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  	r = amdgpu_cs_parser_init(&parser, data);  	if (r) { -		DRM_ERROR("Failed to initialize parser %d!\n", r); +		if (printk_ratelimit()) +			DRM_ERROR("Failed to initialize parser %d!\n", r);  		goto out;  	} @@ -1319,8 +1331,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)  	reserved_buffers = true; -	for (i = 0; i < parser.job->num_ibs; i++) -		trace_amdgpu_cs(&parser, i); +	trace_amdgpu_cs_ibs(&parser);  	r = amdgpu_cs_vm_handling(&parser);  	if (r) @@ -1421,7 +1432,7 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,  int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,  				    struct drm_file *filp)  { -	struct amdgpu_device *adev = dev->dev_private; +	struct amdgpu_device *adev = drm_to_adev(dev);  	union drm_amdgpu_fence_to_handle *info = data;  	struct dma_fence *fence;  	struct drm_syncobj *syncobj; @@ -1597,7 +1608,7 @@ err_free_fence_array:  int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,  				struct drm_file *filp)  { -	struct amdgpu_device *adev = dev->dev_private; +	struct amdgpu_device *adev = drm_to_adev(dev);  	union drm_amdgpu_wait_fences *wait = data;  	uint32_t fence_count = wait->in.fence_count;  	struct drm_amdgpu_fence *fences_user; | 
