diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 73 | 
1 files changed, 25 insertions, 48 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 58e495330b38..127e87b470ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);  int amdgpu_vcn_sw_init(struct amdgpu_device *adev)  { -	struct amdgpu_ring *ring; -	struct drm_sched_rq *rq;  	unsigned long bo_size;  	const char *fw_name;  	const struct common_firmware_header *hdr; @@ -84,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)  	}  	hdr = (const struct common_firmware_header *)adev->vcn.fw->data; +	adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);  	family_id = le32_to_cpu(hdr->ucode_version) & 0xff;  	version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;  	version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; @@ -102,24 +101,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)  		return r;  	} -	ring = &adev->vcn.ring_dec; -	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; -	r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, -				  rq, amdgpu_sched_jobs, NULL); -	if (r != 0) { -		DRM_ERROR("Failed setting up VCN dec run queue.\n"); -		return r; -	} - -	ring = &adev->vcn.ring_enc[0]; -	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; -	r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, -				  rq, amdgpu_sched_jobs, NULL); -	if (r != 0) { -		DRM_ERROR("Failed setting up VCN enc run queue.\n"); -		return r; -	} -  	return 0;  } @@ -129,10 +110,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)  	kfree(adev->vcn.saved_bo); -	drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); - -	drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); -  	amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,  			      &adev->vcn.gpu_addr,  			      (void **)&adev->vcn.cpu_addr); @@ -205,13 +182,18 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)  	struct amdgpu_device *adev =  		container_of(work, struct amdgpu_device, vcn.idle_work.work);  	unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); +	unsigned i; + +	for (i = 0; i < adev->vcn.num_enc_rings; ++i) { +		fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); +	}  	if (fences == 0) { -		if (adev->pm.dpm_enabled) { -			/* might be used when with pg/cg +		if (adev->pm.dpm_enabled)  			amdgpu_dpm_enable_uvd(adev, false); -			*/ -		} +		else +			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, +							       AMD_PG_STATE_GATE);  	} else {  		schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT);  	} @@ -223,9 +205,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)  	bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);  	if (set_clocks && adev->pm.dpm_enabled) { -		/* might be used when with pg/cg -		amdgpu_dpm_enable_uvd(adev, true); -		*/ +		if (adev->pm.dpm_enabled) +			amdgpu_dpm_enable_uvd(adev, true); +		else +			amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, +							       AMD_PG_STATE_UNGATE);  	}  } @@ -271,7 +255,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)  }  static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, -				   struct amdgpu_bo *bo, bool direct, +				   struct amdgpu_bo *bo,  				   struct dma_fence **fence)  {  	struct amdgpu_device *adev = ring->adev; @@ -299,19 +283,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,  	}  	ib->length_dw = 16; -	if (direct) { -		r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); -		job->fence = dma_fence_get(f); -		if (r) -			goto err_free; +	r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); +	job->fence = dma_fence_get(f); +	if (r) +		goto err_free; -		amdgpu_job_free(job); -	} else { -		r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec, -				      AMDGPU_FENCE_OWNER_UNDEFINED, &f); -		if (r) -			goto err_free; -	} +	amdgpu_job_free(job);  	amdgpu_bo_fence(bo, f, false);  	amdgpu_bo_unreserve(bo); @@ -363,11 +340,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand  	for (i = 14; i < 1024; ++i)  		msg[i] = cpu_to_le32(0x0); -	return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); +	return amdgpu_vcn_dec_send_msg(ring, bo, fence);  }  static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, -			       bool direct, struct dma_fence **fence) +			       struct dma_fence **fence)  {  	struct amdgpu_device *adev = ring->adev;  	struct amdgpu_bo *bo = NULL; @@ -389,7 +366,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han  	for (i = 6; i < 1024; ++i)  		msg[i] = cpu_to_le32(0x0); -	return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); +	return amdgpu_vcn_dec_send_msg(ring, bo, fence);  }  int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) @@ -403,7 +380,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)  		goto error;  	} -	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); +	r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);  	if (r) {  		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);  		goto error;  | 
