From 4ed49c954e35559cb4a34ba049a15f54ec7fccf6 Mon Sep 17 00:00:00 2001 From: Ruijing Dong Date: Tue, 31 May 2022 14:18:25 -0400 Subject: drm/amdgpu/vcn: add unified queue ib test - add unified queue headers - add unified queue ib tests Acked-by: Leo Liu Signed-off-by: Ruijing Dong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 102 +++++++++++++++++++++++++++++++- 1 file changed, 99 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index aa7acfabf360..e62ff7db4736 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -329,6 +329,18 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) return 0; } +/* from vcn4 and above, only unified queue is used */ +bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool ret = false; + + if (adev->ip_versions[UVD_HWIP][0] >= IP_VERSION(4, 0, 0)) + ret = true; + + return ret; +} + bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) { bool ret = false; @@ -718,19 +730,55 @@ error: return r; } +static uint32_t * amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, + uint32_t ib_pack_in_dw, bool enc) +{ + uint32_t *ib_checksum; + + ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */ + ib->ptr[ib->length_dw++] = 0x30000002; + ib_checksum = &ib->ptr[ib->length_dw++]; + ib->ptr[ib->length_dw++] = ib_pack_in_dw; + + ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */ + ib->ptr[ib->length_dw++] = 0x30000001; + ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3; + ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t); + + return ib_checksum; +} + +static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum, + uint32_t ib_pack_in_dw) +{ + uint32_t i; + uint32_t checksum = 0; + + for (i = 0; i < ib_pack_in_dw; i++) + checksum += *(*ib_checksum + 2 + i); + + **ib_checksum = checksum; +} + static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { struct amdgpu_vcn_decode_buffer *decode_buffer = NULL; - const unsigned int ib_size_dw = 64; + unsigned int ib_size_dw = 64; struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); + bool sq = amdgpu_vcn_using_unified_queue(ring); + uint32_t *ib_checksum; + uint32_t ib_pack_in_dw; int i, r; + if (sq) + ib_size_dw += 8; + r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) @@ -739,6 +787,13 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, ib = &job->ibs[0]; ib->length_dw = 0; + /* single queue headers */ + if (sq) { + ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) + + 4 + 2; /* engine info + decoding ib in dw */ + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); + } + ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8; ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER); decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]); @@ -752,6 +807,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; + if (sq) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err_free; @@ -838,13 +896,18 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand struct amdgpu_ib *ib_msg, struct dma_fence **fence) { - const unsigned ib_size_dw = 16; + unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; + uint32_t *ib_checksum = NULL; uint64_t addr; + bool sq = amdgpu_vcn_using_unified_queue(ring); int i, r; + if (sq) + ib_size_dw += 8; + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) @@ -854,6 +917,10 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; + + if (sq) + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); + ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ ib->ptr[ib->length_dw++] = handle; @@ -873,6 +940,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; + if (sq) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; @@ -892,13 +962,18 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han struct amdgpu_ib *ib_msg, struct dma_fence **fence) { - const unsigned ib_size_dw = 16; + unsigned ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; + uint32_t *ib_checksum = NULL; uint64_t addr; + bool sq = amdgpu_vcn_using_unified_queue(ring); int i, r; + if (sq) + ib_size_dw += 8; + r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, &job); if (r) @@ -908,6 +983,10 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); ib->length_dw = 0; + + if (sq) + ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true); + ib->ptr[ib->length_dw++] = 0x00000018; ib->ptr[ib->length_dw++] = 0x00000001; ib->ptr[ib->length_dw++] = handle; @@ -927,6 +1006,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; + if (sq) + amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; @@ -977,6 +1059,20 @@ error: return r; } +int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + long r; + + r = amdgpu_vcn_enc_ring_test_ib(ring, timeout); + if (r) + goto error; + + r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout); + +error: + return r; +} + enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring) { switch(ring) { -- cgit v1.2.3 From e751e4be1eec4d427d91583f31ce56ccb5286a27 Mon Sep 17 00:00:00 2001 From: Ruijing Dong Date: Tue, 14 Jun 2022 22:42:54 -0400 Subject: drm/amdgpu/vcn: adjust unified queue code format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed some errors and warnings found by checkpatch.pl. Acked-by: Christian König Signed-off-by: Ruijing Dong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e62ff7db4736..fea436023351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -730,8 +730,8 @@ error: return r; } -static uint32_t * amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, - uint32_t ib_pack_in_dw, bool enc) +static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, + uint32_t ib_pack_in_dw, bool enc) { uint32_t *ib_checksum; @@ -749,7 +749,7 @@ static uint32_t * amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib, } static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum, - uint32_t ib_pack_in_dw) + uint32_t ib_pack_in_dw) { uint32_t i; uint32_t checksum = 0; @@ -790,7 +790,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, /* single queue headers */ if (sq) { ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t) - + 4 + 2; /* engine info + decoding ib in dw */ + + 4 + 2; /* engine info + decoding ib in dw */ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false); } @@ -896,7 +896,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand struct amdgpu_ib *ib_msg, struct dma_fence **fence) { - unsigned ib_size_dw = 16; + unsigned int ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; @@ -962,7 +962,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han struct amdgpu_ib *ib_msg, struct dma_fence **fence) { - unsigned ib_size_dw = 16; + unsigned int ib_size_dw = 16; struct amdgpu_job *job; struct amdgpu_ib *ib; struct dma_fence *f = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index d6f134ef9633..84ac2401895a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -120,7 +120,7 @@ static int vcn_v4_0_sw_init(void *handle) sprintf(ring->name, "vcn_unified_%d", i); r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, - AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); + AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score); if (r) return r; @@ -907,7 +907,7 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); - tmp= RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); + tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); @@ -1048,8 +1048,8 @@ static int vcn_v4_0_start(struct amdgpu_device *adev) dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), - UVD_VCPU_CNTL__BLK_RST_MASK, - ~UVD_VCPU_CNTL__BLK_RST_MASK); + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); mdelay(10); WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, ~UVD_VCPU_CNTL__BLK_RST_MASK); -- cgit v1.2.3 From f12d07d6e9f22bbe65d158de1435ec515acfb50a Mon Sep 17 00:00:00 2001 From: Ruijing Dong Date: Wed, 22 Jun 2022 10:10:11 -0400 Subject: drm/amdgpu/vcn: fix no previous prototype warning Declare 'static', as the function is not intended to be used outside of this translation unit. Fixes: 4ed49c954e35 ("drm/amdgpu/vcn: add unified queue ib test") Reported-by: kernel test robot Reviewed-by: James Zhu Signed-off-by: Ruijing Dong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index fea436023351..1bfdfb9207ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -330,7 +330,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) } /* from vcn4 and above, only unified queue is used */ -bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) +static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; bool ret = false; -- cgit v1.2.3