diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 287 | 
1 files changed, 157 insertions, 130 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index de4d77af02ae..bcf68f80bbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -70,12 +70,14 @@  #define FIRMWARE_VEGA10		"amdgpu/vega10_uvd.bin"  #define FIRMWARE_VEGA12		"amdgpu/vega12_uvd.bin" +#define FIRMWARE_VEGA20		"amdgpu/vega20_uvd.bin" -#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) -#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) -#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00) -#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00) -#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00) +/* These are common relative offsets for all asics, from uvd_7_0_offset.h,  */ +#define UVD_GPCOM_VCPU_CMD		0x03c3 +#define UVD_GPCOM_VCPU_DATA0	0x03c4 +#define UVD_GPCOM_VCPU_DATA1	0x03c5 +#define UVD_NO_OP				0x03ff +#define UVD_BASE_SI				0x3800  /**   * amdgpu_uvd_cs_ctx - Command submission parser context @@ -114,6 +116,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM);  MODULE_FIRMWARE(FIRMWARE_VEGA10);  MODULE_FIRMWARE(FIRMWARE_VEGA12); +MODULE_FIRMWARE(FIRMWARE_VEGA20);  static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -125,9 +128,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)  	const char *fw_name;  	const struct common_firmware_header *hdr;  	unsigned version_major, version_minor, family_id; -	int i, r; +	int i, j, r; -	INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); +	INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);  	switch (adev->asic_type) {  #ifdef CONFIG_DRM_AMDGPU_CIK @@ -177,6 +180,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)  	case CHIP_VEGAM:  		fw_name = FIRMWARE_VEGAM;  		break; +	case CHIP_VEGA20: +		fw_name = FIRMWARE_VEGA20; +		break;  	default:  		return -EINVAL;  	} @@ -231,28 +237,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)  	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)  		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); -	r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, -				    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, -				    &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); -	if (r) { -		dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); -		return r; -	} +	for (j = 0; j < adev->uvd.num_uvd_inst; j++) { -	ring = &adev->uvd.ring; -	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; -	r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, -				  rq, NULL); -	if (r != 0) { -		DRM_ERROR("Failed setting up UVD run queue.\n"); -		return r; -	} +		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, +					    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, +					    &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); +		if (r) { +			dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); +			return r; +		} -	for (i = 0; i < adev->uvd.max_handles; ++i) { -		atomic_set(&adev->uvd.handles[i], 0); -		adev->uvd.filp[i] = NULL; -	} +		ring = &adev->uvd.inst[j].ring; +		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; +		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, +					  rq, NULL); +		if (r != 0) { +			DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); +			return r; +		} +		for (i = 0; i < adev->uvd.max_handles; ++i) { +			atomic_set(&adev->uvd.inst[j].handles[i], 0); +			adev->uvd.inst[j].filp[i] = NULL; +		} +	}  	/* from uvd v5.0 HW addressing capacity increased to 64 bits */  	if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))  		adev->uvd.address_64_bit = true; @@ -279,20 +287,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)  int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)  { -	int i; -	kfree(adev->uvd.saved_bo); +	int i, j; -	drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); +	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { +		kfree(adev->uvd.inst[j].saved_bo); -	amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, -			      &adev->uvd.gpu_addr, -			      (void **)&adev->uvd.cpu_addr); +		drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); -	amdgpu_ring_fini(&adev->uvd.ring); +		amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, +				      &adev->uvd.inst[j].gpu_addr, +				      (void **)&adev->uvd.inst[j].cpu_addr); -	for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) -		amdgpu_ring_fini(&adev->uvd.ring_enc[i]); +		amdgpu_ring_fini(&adev->uvd.inst[j].ring); +		for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) +			amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); +	}  	release_firmware(adev->uvd.fw);  	return 0; @@ -302,32 +312,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)  {  	unsigned size;  	void *ptr; -	int i; - -	if (adev->uvd.vcpu_bo == NULL) -		return 0; +	int i, j; -	cancel_delayed_work_sync(&adev->uvd.idle_work); +	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { +		if (adev->uvd.inst[j].vcpu_bo == NULL) +			continue; -	/* only valid for physical mode */ -	if (adev->asic_type < CHIP_POLARIS10) { -		for (i = 0; i < adev->uvd.max_handles; ++i) -			if (atomic_read(&adev->uvd.handles[i])) -				break; +		cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); -		if (i == adev->uvd.max_handles) -			return 0; -	} +		/* only valid for physical mode */ +		if (adev->asic_type < CHIP_POLARIS10) { +			for (i = 0; i < adev->uvd.max_handles; ++i) +				if (atomic_read(&adev->uvd.inst[j].handles[i])) +					break; -	size = amdgpu_bo_size(adev->uvd.vcpu_bo); -	ptr = adev->uvd.cpu_addr; +			if (i == adev->uvd.max_handles) +				continue; +		} -	adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); -	if (!adev->uvd.saved_bo) -		return -ENOMEM; +		size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); +		ptr = adev->uvd.inst[j].cpu_addr; -	memcpy_fromio(adev->uvd.saved_bo, ptr, size); +		adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL); +		if (!adev->uvd.inst[j].saved_bo) +			return -ENOMEM; +		memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); +	}  	return 0;  } @@ -335,59 +346,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)  {  	unsigned size;  	void *ptr; +	int i; -	if (adev->uvd.vcpu_bo == NULL) -		return -EINVAL; +	for (i = 0; i < adev->uvd.num_uvd_inst; i++) { +		if (adev->uvd.inst[i].vcpu_bo == NULL) +			return -EINVAL; -	size = amdgpu_bo_size(adev->uvd.vcpu_bo); -	ptr = adev->uvd.cpu_addr; +		size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); +		ptr = adev->uvd.inst[i].cpu_addr; -	if (adev->uvd.saved_bo != NULL) { -		memcpy_toio(ptr, adev->uvd.saved_bo, size); -		kfree(adev->uvd.saved_bo); -		adev->uvd.saved_bo = NULL; -	} else { -		const struct common_firmware_header *hdr; -		unsigned offset; - -		hdr = (const struct common_firmware_header *)adev->uvd.fw->data; -		if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { -			offset = le32_to_cpu(hdr->ucode_array_offset_bytes); -			memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, -				    le32_to_cpu(hdr->ucode_size_bytes)); -			size -= le32_to_cpu(hdr->ucode_size_bytes); -			ptr += le32_to_cpu(hdr->ucode_size_bytes); +		if (adev->uvd.inst[i].saved_bo != NULL) { +			memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); +			kfree(adev->uvd.inst[i].saved_bo); +			adev->uvd.inst[i].saved_bo = NULL; +		} else { +			const struct common_firmware_header *hdr; +			unsigned offset; + +			hdr = (const struct common_firmware_header *)adev->uvd.fw->data; +			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { +				offset = le32_to_cpu(hdr->ucode_array_offset_bytes); +				memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, +					    le32_to_cpu(hdr->ucode_size_bytes)); +				size -= le32_to_cpu(hdr->ucode_size_bytes); +				ptr += le32_to_cpu(hdr->ucode_size_bytes); +			} +			memset_io(ptr, 0, size); +			/* to restore uvd fence seq */ +			amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);  		} -		memset_io(ptr, 0, size); -		/* to restore uvd fence seq */ -		amdgpu_fence_driver_force_completion(&adev->uvd.ring);  	} -  	return 0;  }  void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)  { -	struct amdgpu_ring *ring = &adev->uvd.ring; -	int i, r; +	struct amdgpu_ring *ring; +	int i, j, r; -	for (i = 0; i < adev->uvd.max_handles; ++i) { -		uint32_t handle = atomic_read(&adev->uvd.handles[i]); -		if (handle != 0 && adev->uvd.filp[i] == filp) { -			struct dma_fence *fence; - -			r = amdgpu_uvd_get_destroy_msg(ring, handle, -						       false, &fence); -			if (r) { -				DRM_ERROR("Error destroying UVD (%d)!\n", r); -				continue; -			} +	for (j = 0; j < adev->uvd.num_uvd_inst; j++) { +		ring = &adev->uvd.inst[j].ring; + +		for (i = 0; i < adev->uvd.max_handles; ++i) { +			uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); +			if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { +				struct dma_fence *fence; + +				r = amdgpu_uvd_get_destroy_msg(ring, handle, +							       false, &fence); +				if (r) { +					DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); +					continue; +				} -			dma_fence_wait(fence, false); -			dma_fence_put(fence); +				dma_fence_wait(fence, false); +				dma_fence_put(fence); -			adev->uvd.filp[i] = NULL; -			atomic_set(&adev->uvd.handles[i], 0); +				adev->uvd.inst[j].filp[i] = NULL; +				atomic_set(&adev->uvd.inst[j].handles[i], 0); +			}  		}  	}  } @@ -662,15 +679,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,  	void *ptr;  	long r;  	int i; +	uint32_t ip_instance = ctx->parser->job->ring->me;  	if (offset & 0x3F) { -		DRM_ERROR("UVD messages must be 64 byte aligned!\n"); +		DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);  		return -EINVAL;  	}  	r = amdgpu_bo_kmap(bo, &ptr);  	if (r) { -		DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); +		DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r);  		return r;  	} @@ -680,7 +698,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,  	handle = msg[2];  	if (handle == 0) { -		DRM_ERROR("Invalid UVD handle!\n"); +		DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance);  		return -EINVAL;  	} @@ -691,18 +709,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,  		/* try to alloc a new handle */  		for (i = 0; i < adev->uvd.max_handles; ++i) { -			if (atomic_read(&adev->uvd.handles[i]) == handle) { -				DRM_ERROR("Handle 0x%x already in use!\n", handle); +			if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { +				DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle);  				return -EINVAL;  			} -			if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { -				adev->uvd.filp[i] = ctx->parser->filp; +			if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { +				adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp;  				return 0;  			}  		} -		DRM_ERROR("No more free UVD handles!\n"); +		DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance);  		return -ENOSPC;  	case 1: @@ -714,27 +732,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,  		/* validate the handle */  		for (i = 0; i < adev->uvd.max_handles; ++i) { -			if (atomic_read(&adev->uvd.handles[i]) == handle) { -				if (adev->uvd.filp[i] != ctx->parser->filp) { -					DRM_ERROR("UVD handle collision detected!\n"); +			if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { +				if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { +					DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance);  					return -EINVAL;  				}  				return 0;  			}  		} -		DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); +		DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle);  		return -ENOENT;  	case 2:  		/* it's a destroy msg, free the handle */  		for (i = 0; i < adev->uvd.max_handles; ++i) -			atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); +			atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0);  		amdgpu_bo_kunmap(bo);  		return 0;  	default: -		DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); +		DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type);  		return -EINVAL;  	}  	BUG(); @@ -805,7 +823,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)  		}  		if ((cmd == 0 || cmd == 0x3) && -		    (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { +		    (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {  			DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",  				  start, end);  			return -EINVAL; @@ -973,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,  	uint64_t addr;  	long r;  	int i; +	unsigned offset_idx = 0; +	unsigned offset[3] = { UVD_BASE_SI, 0, 0 };  	amdgpu_bo_kunmap(bo);  	amdgpu_bo_unpin(bo); @@ -992,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,  		goto err;  	if (adev->asic_type >= CHIP_VEGA10) { -		data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0); -		data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0); -		data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0); -		data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0); -	} else { -		data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); -		data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); -		data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); -		data[3] = PACKET0(mmUVD_NO_OP, 0); +		offset_idx = 1 + ring->me; +		offset[1] = adev->reg_offset[UVD_HWIP][0][1]; +		offset[2] = adev->reg_offset[UVD_HWIP][1][1];  	} +	data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); +	data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); +	data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); +	data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); +  	ib = &job->ibs[0];  	addr = amdgpu_bo_gpu_offset(bo);  	ib->ptr[0] = data[0]; @@ -1038,7 +1057,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,  		if (r)  			goto err_free; -		r = amdgpu_job_submit(job, ring, &adev->uvd.entity, +		r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,  				      AMDGPU_FENCE_OWNER_UNDEFINED, &f);  		if (r)  			goto err_free; @@ -1126,8 +1145,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,  static void amdgpu_uvd_idle_work_handler(struct work_struct *work)  {  	struct amdgpu_device *adev = -		container_of(work, struct amdgpu_device, uvd.idle_work.work); -	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); +		container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); +	unsigned fences = 0, i, j; + +	for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { +		fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); +		for (j = 0; j < adev->uvd.num_enc_rings; ++j) { +			fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); +		} +	}  	if (fences == 0) {  		if (adev->pm.dpm_enabled) { @@ -1141,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)  							       AMD_CG_STATE_GATE);  		}  	} else { -		schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); +		schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);  	}  } @@ -1153,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)  	if (amdgpu_sriov_vf(adev))  		return; -	set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); +	set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);  	if (set_clocks) {  		if (adev->pm.dpm_enabled) {  			amdgpu_dpm_enable_uvd(adev, true); @@ -1170,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)  void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)  {  	if (!amdgpu_sriov_vf(ring->adev)) -		schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); +		schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);  }  /** @@ -1184,27 +1210,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)  {  	struct dma_fence *fence;  	long r; +	uint32_t ip_instance = ring->me;  	r = amdgpu_uvd_get_create_msg(ring, 1, NULL);  	if (r) { -		DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); +		DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);  		goto error;  	}  	r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);  	if (r) { -		DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); +		DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);  		goto error;  	}  	r = dma_fence_wait_timeout(fence, false, timeout);  	if (r == 0) { -		DRM_ERROR("amdgpu: IB test timed out.\n"); +		DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);  		r = -ETIMEDOUT;  	} else if (r < 0) { -		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); +		DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);  	} else { -		DRM_DEBUG("ib test on ring %d succeeded\n",  ring->idx); +		DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);  		r = 0;  	} @@ -1232,7 +1259,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)  		 * necessarily linear. So we need to count  		 * all non-zero handles.  		 */ -		if (atomic_read(&adev->uvd.handles[i])) +		if (atomic_read(&adev->uvd.inst->handles[i]))  			used_handles++;  	} | 
