From 2f9d4084cac96a0281522b548ca0526c1e241b75 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 16 Oct 2017 14:38:10 +0800 Subject: drm/amdgpu:cleanup force_completion cleanups, now only operate on the given ring Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3573ecdb06ee..620e3002001d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2872,7 +2872,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) amd_sched_hw_job_reset(&ring->sched); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ - amdgpu_fence_driver_force_completion_ring(ring); + amdgpu_fence_driver_force_completion(ring); } /* request to take full control of GPU before re-initialization */ @@ -2991,9 +2991,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) continue; kthread_park(ring->sched.thread); amd_sched_hw_job_reset(&ring->sched); + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ + amdgpu_fence_driver_force_completion(ring); } - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ - amdgpu_fence_driver_force_completion(adev); need_full_reset = amdgpu_need_full_reset(adev); -- cgit v1.2.3 From a8a51a70416baab813606c6014c5f0746958dfb2 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 16 Oct 2017 19:46:43 +0800 Subject: drm/amdgpu:cleanup job reset routine(v2) merge the setting guilty on context into this function to avoid implement extra routine. v2: go through entity list and compare the fence_ctx before operate on the entity, otherwise the entity may be just a wild pointer Signed-off-by: Monk Liu Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 31 ++++++++++++++++++++++++++- drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 2 +- 3 files changed, 33 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 620e3002001d..d5d450e380bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2869,7 +2869,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) amd_sched_job_kickout(&job->base); /* only do job_reset on the hang ring if @job not NULL */ - amd_sched_hw_job_reset(&ring->sched); + amd_sched_hw_job_reset(&ring->sched, NULL); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); @@ -2990,7 +2990,7 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) if (!ring || !ring->sched.thread) continue; kthread_park(ring->sched.thread); - amd_sched_hw_job_reset(&ring->sched); + amd_sched_hw_job_reset(&ring->sched, NULL); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 764606ce3541..1474866d9048 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -443,9 +443,18 @@ static void amd_sched_job_timedout(struct work_struct *work) job->sched->ops->timedout_job(job); } -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) +static void amd_sched_set_guilty(struct amd_sched_job *s_job) +{ + if (atomic_inc_return(&s_job->karma) > s_job->sched->hang_limit) + if (s_job->s_entity->guilty) + atomic_set(s_job->s_entity->guilty, 1); +} + +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) { struct amd_sched_job *s_job; + struct amd_sched_entity *entity, *tmp; + int i;; spin_lock(&sched->job_list_lock); list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { @@ -458,6 +467,26 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) } } spin_unlock(&sched->job_list_lock); + + if (bad) { + bool found = false; + + for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++ ) { + struct amd_sched_rq *rq = &sched->sched_rq[i]; + + spin_lock(&rq->lock); + list_for_each_entry_safe(entity, tmp, &rq->entities, list) { + if (bad->s_fence->scheduled.context == entity->fence_context) { + found = true; + amd_sched_set_guilty(bad); + break; + } + } + spin_unlock(&rq->lock); + if (found) + break; + } + } } void amd_sched_job_kickout(struct amd_sched_job *s_job) diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index a05994c60b34..be75172587da 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -174,7 +174,7 @@ int amd_sched_job_init(struct amd_sched_job *job, struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity, void *owner); -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched); +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job); void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); bool amd_sched_dependency_optimized(struct dma_fence* fence, struct amd_sched_entity *entity); -- cgit v1.2.3 From 9953b72f9c9cb7733334753788faab33ccc4dc0a Mon Sep 17 00:00:00 2001 From: pding Date: Thu, 26 Oct 2017 09:30:38 +0800 Subject: drm/amdgpu: change redundant init logs to debug level When this VF stays in exclusive mode for long, other VFs will be impacted. The redundant messages causes exclusive mode timeout when they're redirected. That is a normal use case for cloud service to redirect guest log to virtual serial port. Reviewed-by: Alex Deucher Signed-off-by: pding Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/si_dma.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 6 +++--- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 2 +- 24 files changed, 47 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5432af39a674..c70cda04dbfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -85,7 +85,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; default: - dev_info(adev->dev, "kfd not supported on this ASIC\n"); + dev_dbg(adev->dev, "kfd not supported on this ASIC\n"); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index f450b69323fa..39f4d0df1ada 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -690,12 +690,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); /* set a reasonable default for DP */ if (adev->clock.default_dispclk < 53900) { - DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", - adev->clock.default_dispclk / 100); + DRM_DEBUG("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; } else if (adev->clock.default_dispclk <= 60000) { - DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", - adev->clock.default_dispclk / 100); + DRM_DEBUG("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d5d450e380bd..212138476130 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1029,7 +1029,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev) atom_card_info->ioreg_read = cail_ioreg_read; atom_card_info->ioreg_write = cail_ioreg_write; } else { - DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); + DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); atom_card_info->ioreg_read = cail_reg_read; atom_card_info->ioreg_write = cail_reg_write; } @@ -2267,8 +2267,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, dev_err(adev->dev, "gpu post error!\n"); goto failed; } - } else { - DRM_INFO("GPU post is not needed\n"); } if (adev->is_atom_fw) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index c2f414ffb2cc..6b11a75839e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -306,7 +306,6 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled) module_param_named(cik_support, amdgpu_cik_support, int, 0444); #endif - static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 72e8eacc2c03..eda89dfdef5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -391,9 +391,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, ring->fence_drv.irq_type = irq_type; ring->fence_drv.initialized = true; - dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " - "cpu addr 0x%p\n", ring->idx, - ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); + dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " + "cpu addr 0x%p\n", ring->idx, + ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 47c5ce9807db..32590e4f9f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -232,7 +232,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) int ret = pci_enable_msi(adev->pdev); if (!ret) { adev->irq.msi_enabled = true; - dev_info(adev->dev, "amdgpu: using MSI.\n"); + dev_dbg(adev->dev, "amdgpu: using MSI.\n"); } } @@ -262,7 +262,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) return r; } - DRM_INFO("amdgpu: irq initialized.\n"); + DRM_DEBUG("amdgpu: irq initialized.\n"); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index ea25164e7f4b..c48fc450d99b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -779,8 +779,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev) adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, adev->mc.aper_size); DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", - adev->mc.mc_vram_size >> 20, - (unsigned long long)adev->mc.aper_size >> 20); + adev->mc.mc_vram_size >> 20, + (unsigned long long)adev->mc.aper_size >> 20); DRM_INFO("RAM width %dbits %s\n", adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); return amdgpu_ttm_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6604771356cd..be607b2be4e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1218,7 +1218,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 873c99518954..92477e67087c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -954,7 +954,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) } if (i < timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed\n", @@ -999,7 +999,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } error: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 96df21cca85d..dabaca4da7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -261,7 +261,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", @@ -467,7 +467,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } @@ -500,7 +500,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed\n", @@ -643,7 +643,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } error: diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 60cecd117705..ed26dcbc4f79 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -657,7 +657,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); @@ -724,7 +724,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index dbbe986f90f2..9430d4809b53 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1798,7 +1798,7 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", ring->idx, scratch, tmp); @@ -1951,7 +1951,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 419ba0ce7ee5..015f37bef358 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2085,7 +2085,7 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", ring->idx, scratch, tmp); @@ -2365,7 +2365,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9ecdf621a74a..9f93e05bf97e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -804,7 +804,7 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", @@ -856,7 +856,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index da43813d67a4..a5811e80e21b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -327,7 +327,7 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) DRM_UDELAY(1); } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", @@ -379,7 +379,7 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 67f375bfe452..92f8c44a73b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -633,7 +633,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); @@ -704,7 +704,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6d06f8eb659f..52e6bf2e9e59 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -893,7 +893,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); @@ -964,7 +964,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 46009db3d195..fe78c00b9ffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -919,7 +919,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); @@ -990,7 +990,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 3fa2fbf8c9a1..ee469a906cd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -252,7 +252,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", ring->idx, tmp); @@ -317,7 +317,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 8ab0f78794a5..15771a53038e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -521,7 +521,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index bb6d46e168a3..3b29aaba783a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -536,7 +536,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 246dfd74f5c9..0c01825a8b9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -184,7 +184,7 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed\n", @@ -360,7 +360,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } error: @@ -1008,7 +1008,7 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 762ff3901a7a..7b77339feb1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -184,7 +184,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed\n", @@ -359,7 +359,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } error: @@ -1192,7 +1192,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_INFO("ring test on %d succeeded in %d usecs\n", + DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); } else { DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index c7e34128cbde..726425809eed 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -122,7 +122,7 @@ static int pp_sw_init(void *handle) ret = hwmgr->smumgr_funcs->smu_init(hwmgr); - pr_info("amdgpu: powerplay sw initialized\n"); + pr_debug("amdgpu: powerplay sw initialized\n"); } return ret; } -- cgit v1.2.3 From 8840a3878d40c9318b08932376fa31e763780dfe Mon Sep 17 00:00:00 2001 From: pding Date: Mon, 23 Oct 2017 17:22:09 +0800 Subject: drm/amdgpu: retry init if it fails due to exclusive mode timeout (v3) The exclusive mode has real-time limitation in reality, such like being done in 300ms. It's easy observed if running many VF/VMs in single host with heavy CPU workload. If we find the init fails due to exclusive mode timeout, try it again. v2: - rewrite the condition for readable value. v3: - fix typo, add comments for sleep Acked-by: Alex Deucher Signed-off-by: pding Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 15 +++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 212138476130..e521850e9409 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2303,6 +2303,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_init(adev); if (r) { + /* failed in exclusive mode due to timeout */ + if (amdgpu_sriov_vf(adev) && + !amdgpu_sriov_runtime(adev) && + amdgpu_virt_mmio_blocked(adev) && + !amdgpu_virt_wait_reset(adev)) { + dev_err(adev->dev, "VF exclusive mode timeout\n"); + r = -EAGAIN; + goto failed; + } dev_err(adev->dev, "amdgpu_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); amdgpu_fini(adev); @@ -2390,6 +2399,7 @@ failed: amdgpu_vf_error_trans_all(adev); if (runtime) vga_switcheroo_fini_domain_pm_ops(adev->dev); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 720139e182a3..f313eee60c4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -86,7 +86,7 @@ done_free: int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct amdgpu_device *adev; - int r, acpi_status; + int r, acpi_status, retry = 0; #ifdef CONFIG_DRM_AMDGPU_SI if (!amdgpu_si_support) { @@ -122,6 +122,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } } #endif +retry_init: adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { @@ -144,7 +145,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) * VRAM allocation */ r = amdgpu_device_init(adev, dev, dev->pdev, flags); - if (r) { + if (r == -EAGAIN && ++retry <= 3) { + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + adev->virt.ops = NULL; + amdgpu_device_fini(adev); + kfree(adev); + dev->dev_private = NULL; + /* Don't request EX mode too frequently which is attacking */ + msleep(5000); + dev_err(&dev->pdev->dev, "retry init %d\n", retry); + goto retry_init; + } else if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } -- cgit v1.2.3 From 5740682e66cef57626a328d237698cad329c0449 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 25 Oct 2017 16:37:02 +0800 Subject: drm/amdgpu:implement new GPU recover(v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1,new imple names amdgpu_gpu_recover which gives more hint on what it does compared with gpu_reset 2,gpu_recover unify bare-metal and SR-IOV, only the asic reset part is implemented differently 3,gpu_recover will increase hang job karma and mark its entity/context as guilty if exceeds limit V2: 4,in scheduler main routine the job from guilty context will be immedialy fake signaled after it poped from queue and its fence be set with "-ECANCELED" error 5,in scheduler recovery routine all jobs from the guilty entity would be dropped 6,in run_job() routine the real IB submission would be skipped if @skip parameter equales true or there was VRAM lost occured. V3: 7,replace deprecated gpu reset, use new gpu recover Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 322 ++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 - drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 8 files changed, 166 insertions(+), 184 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 88fa19b1a802..5714b7e8cb09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -178,6 +178,10 @@ extern int amdgpu_cik_support; #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 +/* GPU RESET flags */ +#define AMDGPU_RESET_INFO_VRAM_LOST (1 << 0) +#define AMDGPU_RESET_INFO_FULLRESET (1 << 1) + struct amdgpu_device; struct amdgpu_ib; struct amdgpu_cs_parser; @@ -1833,7 +1837,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ -int amdgpu_gpu_reset(struct amdgpu_device *adev); +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e521850e9409..e287eeda2dab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2827,163 +2827,172 @@ err: return r; } -/** - * amdgpu_sriov_gpu_reset - reset the asic +/* + * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough * * @adev: amdgpu device pointer - * @job: which job trigger hang + * @reset_flags: output param tells caller the reset result * - * Attempt the reset the GPU if it has hung (all asics). - * for SRIOV case. - * Returns 0 for success or an error on failure. - */ -int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job) + * attempt to do soft-reset or full-reset and reinitialize Asic + * return 0 means successed otherwise failed +*/ +static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) { - int i, j, r = 0; - int resched; - struct amdgpu_bo *bo, *tmp; - struct amdgpu_ring *ring; - struct dma_fence *fence = NULL, *next = NULL; + bool need_full_reset, vram_lost = 0; + int r; - mutex_lock(&adev->virt.lock_reset); - atomic_inc(&adev->gpu_reset_counter); - adev->in_sriov_reset = true; + need_full_reset = amdgpu_need_full_reset(adev); - /* block TTM */ - resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); + if (!need_full_reset) { + amdgpu_pre_soft_reset(adev); + r = amdgpu_soft_reset(adev); + amdgpu_post_soft_reset(adev); + if (r || amdgpu_check_soft_reset(adev)) { + DRM_INFO("soft reset failed, will fallback to full reset!\n"); + need_full_reset = true; + } - /* we start from the ring trigger GPU hang */ - j = job ? job->ring->idx : 0; + } - /* block scheduler */ - for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { - ring = adev->rings[i % AMDGPU_MAX_RINGS]; - if (!ring || !ring->sched.thread) - continue; + if (need_full_reset) { + r = amdgpu_suspend(adev); - kthread_park(ring->sched.thread); +retry: + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_asic_reset(adev); + amdgpu_atombios_scratch_regs_restore(adev); + /* post card */ + amdgpu_atom_asic_init(adev->mode_info.atom_context); - if (job && j != i) - continue; + if (!r) { + dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); + r = amdgpu_resume_phase1(adev); + if (r) + goto out; - /* here give the last chance to check if job removed from mirror-list - * since we already pay some time on kthread_park */ - if (job && list_empty(&job->base.node)) { - kthread_unpark(ring->sched.thread); - goto give_up_reset; + vram_lost = amdgpu_check_vram_lost(adev); + if (vram_lost) { + DRM_ERROR("VRAM is lost!\n"); + atomic_inc(&adev->vram_lost_counter); + } + + r = amdgpu_ttm_recover_gart(adev); + if (r) + goto out; + + r = amdgpu_resume_phase2(adev); + if (r) + goto out; + + if (vram_lost) + amdgpu_fill_reset_magic(adev); } + } - if (amd_sched_invalidate_job(&job->base, amdgpu_job_hang_limit)) - amd_sched_job_kickout(&job->base); +out: + if (!r) { + amdgpu_irq_gpu_reset_resume_helper(adev); + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_suspend(adev); + need_full_reset = true; + goto retry; + } + } - /* only do job_reset on the hang ring if @job not NULL */ - amd_sched_hw_job_reset(&ring->sched, NULL); + if (reset_flags) { + if (vram_lost) + (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST; - /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ - amdgpu_fence_driver_force_completion(ring); + if (need_full_reset) + (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; } - /* request to take full control of GPU before re-initialization */ - if (job) - amdgpu_virt_reset_gpu(adev); - else - amdgpu_virt_request_full_gpu(adev, true); + return r; +} + +/* + * amdgpu_reset_sriov - reset ASIC for SR-IOV vf + * + * @adev: amdgpu device pointer + * @reset_flags: output param tells caller the reset result + * + * do VF FLR and reinitialize Asic + * return 0 means successed otherwise failed +*/ +static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor) +{ + int r; + if (from_hypervisor) + r = amdgpu_virt_request_full_gpu(adev, true); + else + r = amdgpu_virt_reset_gpu(adev); + if (r) + return r; /* Resume IP prior to SMC */ - amdgpu_sriov_reinit_early(adev); + r = amdgpu_sriov_reinit_early(adev); + if (r) + goto error; /* we need recover gart prior to run SMC/CP/SDMA resume */ amdgpu_ttm_recover_gart(adev); /* now we are okay to resume SMC/CP/SDMA */ - amdgpu_sriov_reinit_late(adev); + r = amdgpu_sriov_reinit_late(adev); + if (r) + goto error; amdgpu_irq_gpu_reset_resume_helper(adev); - - if (amdgpu_ib_ring_tests(adev)) + r = amdgpu_ib_ring_tests(adev); + if (r) dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r); +error: /* release full control of GPU after ib test */ amdgpu_virt_release_full_gpu(adev, true); - DRM_INFO("recover vram bo from shadow\n"); - - ring = adev->mman.buffer_funcs_ring; - mutex_lock(&adev->shadow_list_lock); - list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { - next = NULL; - amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); - if (fence) { - r = dma_fence_wait(fence, false); - if (r) { - WARN(r, "recovery from shadow isn't completed\n"); - break; - } - } - - dma_fence_put(fence); - fence = next; - } - mutex_unlock(&adev->shadow_list_lock); - - if (fence) { - r = dma_fence_wait(fence, false); - if (r) - WARN(r, "recovery from shadow isn't completed\n"); - } - dma_fence_put(fence); - - for (i = j; i < j + AMDGPU_MAX_RINGS; ++i) { - ring = adev->rings[i % AMDGPU_MAX_RINGS]; - if (!ring || !ring->sched.thread) - continue; - - if (job && j != i) { - kthread_unpark(ring->sched.thread); - continue; - } - - amd_sched_job_recovery(&ring->sched); - kthread_unpark(ring->sched.thread); - } + if (reset_flags) { + /* will get vram_lost from GIM in future, now all + * reset request considered VRAM LOST + */ + (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST; + atomic_inc(&adev->vram_lost_counter); - drm_helper_resume_force_mode(adev->ddev); -give_up_reset: - ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (r) { - /* bad news, how to tell it to userspace ? */ - dev_info(adev->dev, "GPU reset failed\n"); - } else { - dev_info(adev->dev, "GPU reset successed!\n"); + /* VF FLR or hotlink reset is always full-reset */ + (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; } - adev->in_sriov_reset = false; - mutex_unlock(&adev->virt.lock_reset); return r; } /** - * amdgpu_gpu_reset - reset the asic + * amdgpu_gpu_recover - reset the asic and recover scheduler * * @adev: amdgpu device pointer + * @job: which job trigger hang * - * Attempt the reset the GPU if it has hung (all asics). + * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ -int amdgpu_gpu_reset(struct amdgpu_device *adev) +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) { struct drm_atomic_state *state = NULL; - int i, r; - int resched; - bool need_full_reset, vram_lost = false; + uint64_t reset_flags = 0; + int i, r, resched; if (!amdgpu_check_soft_reset(adev)) { DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); return 0; } + dev_info(adev->dev, "GPU reset begin!\n"); + + mutex_lock(&adev->virt.lock_reset); atomic_inc(&adev->gpu_reset_counter); + adev->in_sriov_reset = 1; /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); @@ -2997,69 +3006,26 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) if (!ring || !ring->sched.thread) continue; + + /* only focus on the ring hit timeout if &job not NULL */ + if (job && job->ring->idx != i) + continue; + kthread_park(ring->sched.thread); - amd_sched_hw_job_reset(&ring->sched, NULL); + amd_sched_hw_job_reset(&ring->sched, &job->base); + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); } - need_full_reset = amdgpu_need_full_reset(adev); - - if (!need_full_reset) { - amdgpu_pre_soft_reset(adev); - r = amdgpu_soft_reset(adev); - amdgpu_post_soft_reset(adev); - if (r || amdgpu_check_soft_reset(adev)) { - DRM_INFO("soft reset failed, will fallback to full reset!\n"); - need_full_reset = true; - } - } - - if (need_full_reset) { - r = amdgpu_suspend(adev); - -retry: - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_asic_reset(adev); - amdgpu_atombios_scratch_regs_restore(adev); - /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (amdgpu_sriov_vf(adev)) + r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true); + else + r = amdgpu_reset(adev, &reset_flags); - if (!r) { - dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume_phase1(adev); - if (r) - goto out; - vram_lost = amdgpu_check_vram_lost(adev); - if (vram_lost) { - DRM_ERROR("VRAM is lost!\n"); - atomic_inc(&adev->vram_lost_counter); - } - r = amdgpu_ttm_recover_gart(adev); - if (r) - goto out; - r = amdgpu_resume_phase2(adev); - if (r) - goto out; - if (vram_lost) - amdgpu_fill_reset_magic(adev); - } - } -out: if (!r) { - amdgpu_irq_gpu_reset_resume_helper(adev); - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - r = amdgpu_suspend(adev); - need_full_reset = true; - goto retry; - } - /** - * recovery vm page tables, since we cannot depend on VRAM is - * consistent after gpu full reset. - */ - if (need_full_reset && amdgpu_need_backup(adev)) { + if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || + (reset_flags & AMDGPU_RESET_INFO_VRAM_LOST)) { struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *bo, *tmp; struct dma_fence *fence = NULL, *next = NULL; @@ -3088,40 +3054,56 @@ out: } dma_fence_put(fence); } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring || !ring->sched.thread) continue; + /* only focus on the ring hit timeout if &job not NULL */ + if (job && job->ring->idx != i) + continue; + amd_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } } else { - dev_err(adev->dev, "asic resume failed (%d).\n", r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (adev->rings[i] && adev->rings[i]->sched.thread) { - kthread_unpark(adev->rings[i]->sched.thread); - } + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + + /* only focus on the ring hit timeout if &job not NULL */ + if (job && job->ring->idx != i) + continue; + + kthread_unpark(adev->rings[i]->sched.thread); } } if (amdgpu_device_has_dc_support(adev)) { - r = drm_atomic_helper_resume(adev->ddev, state); + if (drm_atomic_helper_resume(adev->ddev, state)) + dev_info(adev->dev, "drm resume failed:%d\n", r); amdgpu_dm_display_resume(adev); - } else + } else { drm_helper_resume_force_mode(adev->ddev); + } ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); + if (r) { /* bad news, how to tell it to userspace ? */ - dev_info(adev->dev, "GPU reset failed\n"); - } - else { - dev_info(adev->dev, "GPU reset successed!\n"); + dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); + amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); + } else { + dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter)); } amdgpu_vf_error_trans_all(adev); + adev->in_sriov_reset = 0; + mutex_unlock(&adev->virt.lock_reset); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index eda89dfdef5b..604ac03a42e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -694,25 +694,25 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) } /** - * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset + * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover * * Manually trigger a gpu reset at the next fence wait. */ -static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data) +static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = dev->dev_private; - seq_printf(m, "gpu reset\n"); - amdgpu_gpu_reset(adev); + seq_printf(m, "gpu recover\n"); + amdgpu_gpu_recover(adev, NULL); return 0; } static const struct drm_info_list amdgpu_debugfs_fence_list[] = { {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL}, - {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL} + {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL} }; static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 32590e4f9f7a..c340774082ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) reset_work); if (!amdgpu_sriov_vf(adev)) - amdgpu_gpu_reset(adev); + amdgpu_gpu_recover(adev, NULL); } /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 0a90c768dbc1..18770a880393 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,10 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job) atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - if (amdgpu_sriov_vf(job->adev)) - amdgpu_sriov_gpu_reset(job->adev, job); - else - amdgpu_gpu_reset(job->adev); + amdgpu_gpu_recover(job->adev, job); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d149aca71a44..20bdb8fb0b8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -288,7 +288,6 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); -int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index f91aab38637c..c32d0b0868e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -254,7 +254,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_sriov_gpu_reset(adev, NULL); + amdgpu_gpu_recover(adev, NULL); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 27b03c773b1b..818ec0fe2f51 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -519,7 +519,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_sriov_gpu_reset(adev, NULL); + amdgpu_gpu_recover(adev, NULL); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From 13a752e3a246493bfaba0cf0e0f376672ebb734c Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 17 Oct 2017 15:11:12 +0800 Subject: drm/amdgpu:cleanup in_sriov_reset and lock_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit since now gpu reset is unified with gpu_recover for both bare-metal and SR-IOV: 1)rename in_sriov_reset to in_gpu_reset 2)move lock_reset from adev->virt to adev Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 2 -- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 - drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +++--- 8 files changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5714b7e8cb09..f25d246395a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1643,7 +1643,8 @@ struct amdgpu_device { /* record last mm index being written through WREG32*/ unsigned long last_mm_index; - bool in_sriov_reset; + bool in_gpu_reset; + struct mutex lock_reset; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e287eeda2dab..9d4e0b88b101 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2163,6 +2163,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); hash_init(adev->mn_hash); + mutex_init(&adev->lock_reset); amdgpu_check_arguments(adev); @@ -2990,9 +2991,9 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) dev_info(adev->dev, "GPU reset begin!\n"); - mutex_lock(&adev->virt.lock_reset); + mutex_lock(&adev->lock_reset); atomic_inc(&adev->gpu_reset_counter); - adev->in_sriov_reset = 1; + adev->in_gpu_reset = 1; /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); @@ -3102,8 +3103,8 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) } amdgpu_vf_error_trans_all(adev); - adev->in_sriov_reset = 0; - mutex_unlock(&adev->virt.lock_reset); + adev->in_gpu_reset = 0; + mutex_unlock(&adev->lock_reset); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 447d446b5015..76f531b8e97b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -264,7 +264,7 @@ static int psp_hw_start(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; int ret; - if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) { + if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) { ret = psp_bootloader_load_sysdrv(psp); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 65649026b836..edc37cc80644 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -370,7 +370,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) return 0; } - if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) { + if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) { err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b7510bf6bc0f..733c64cbd1c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -115,8 +115,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) adev->enable_virtual_display = true; adev->cg_flags = 0; adev->pg_flags = 0; - - mutex_init(&adev->virt.lock_reset); } uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 20bdb8fb0b8c..e3f78f5edc00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -239,7 +239,6 @@ struct amdgpu_virt { uint64_t csa_vmid0_addr; bool chained_ib_support; uint32_t reg_val_offs; - struct mutex lock_reset; struct amdgpu_irq_src ack_irq; struct amdgpu_irq_src rcv_irq; struct work_struct flr_work; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 46ee74393454..2ddf126614da 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4824,7 +4824,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v8_0_kiq_setting(ring); - if (adev->in_sriov_reset) { /* for GPU_RESET case */ + if (adev->in_gpu_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); @@ -4861,7 +4861,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { + if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -4873,7 +4873,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation)); - } else if (adev->in_sriov_reset) { /* for GPU_RESET case */ + } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 46a0d3ef773f..19a619f759f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2757,7 +2757,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) gfx_v9_0_kiq_setting(ring); - if (adev->in_sriov_reset) { /* for GPU_RESET case */ + if (adev->in_gpu_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); @@ -2795,7 +2795,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) struct v9_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_sriov_reset && !adev->gfx.in_suspend) { + if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -2807,7 +2807,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); - } else if (adev->in_sriov_reset) { /* for GPU_RESET case */ + } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); -- cgit v1.2.3 From c1c7ce8f5687bb01b2eb0db3c19cb375267bb16d Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 16 Oct 2017 16:50:32 +0200 Subject: drm/amdgpu: move GART recovery into GTT manager v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The GTT manager handles the GART address space anyway, so it is completely pointless to keep the same information around twice. v2: rebased Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 53 +++++++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 ++++++++------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 +- 5 files changed, 59 insertions(+), 59 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f25d246395a1..d11967a5c392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1630,9 +1630,6 @@ struct amdgpu_device { /* link all shadow bo */ struct list_head shadow_list; struct mutex shadow_list_lock; - /* link all gtt */ - spinlock_t gtt_list_lock; - struct list_head gtt_list; /* keep an lru list of rings by HW IP */ struct list_head ring_lru_list; spinlock_t ring_lru_list_lock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9d4e0b88b101..7af0d5d8cb1d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2180,9 +2180,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->shadow_list); mutex_init(&adev->shadow_list_lock); - INIT_LIST_HEAD(&adev->gtt_list); - spin_lock_init(&adev->gtt_list_lock); - INIT_LIST_HEAD(&adev->ring_lru_list); spin_lock_init(&adev->ring_lru_list_lock); @@ -2877,7 +2874,8 @@ retry: atomic_inc(&adev->vram_lost_counter); } - r = amdgpu_ttm_recover_gart(adev); + r = amdgpu_gtt_mgr_recover( + &adev->mman.bdev.man[TTM_PL_TT]); if (r) goto out; @@ -2939,7 +2937,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, goto error; /* we need recover gart prior to run SMC/CP/SDMA resume */ - amdgpu_ttm_recover_gart(adev); + amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_sriov_reinit_late(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f7669dc6909b..e14ab34d8262 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -31,6 +31,11 @@ struct amdgpu_gtt_mgr { atomic64_t available; }; +struct amdgpu_gtt_node { + struct drm_mm_node node; + struct ttm_buffer_object *tbo; +}; + /** * amdgpu_gtt_mgr_init - init GTT manager and DRM MM * @@ -87,9 +92,9 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) */ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem) { - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; - return (node->start != AMDGPU_BO_INVALID_OFFSET); + return (node->node.start != AMDGPU_BO_INVALID_OFFSET); } /** @@ -109,7 +114,7 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, { struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev); struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; enum drm_mm_insert_mode mode; unsigned long fpfn, lpfn; int r; @@ -132,13 +137,13 @@ static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man, mode = DRM_MM_INSERT_HIGH; spin_lock(&mgr->lock); - r = drm_mm_insert_node_in_range(&mgr->mm, node, - mem->num_pages, mem->page_alignment, 0, - fpfn, lpfn, mode); + r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, + mem->page_alignment, 0, fpfn, lpfn, + mode); spin_unlock(&mgr->lock); if (!r) - mem->start = node->start; + mem->start = node->node.start; return r; } @@ -159,7 +164,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node; + struct amdgpu_gtt_node *node; int r; spin_lock(&mgr->lock); @@ -177,8 +182,9 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, goto err_out; } - node->start = AMDGPU_BO_INVALID_OFFSET; - node->size = mem->num_pages; + node->node.start = AMDGPU_BO_INVALID_OFFSET; + node->node.size = mem->num_pages; + node->tbo = tbo; mem->mm_node = node; if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) { @@ -190,7 +196,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man, goto err_out; } } else { - mem->start = node->start; + mem->start = node->node.start; } return 0; @@ -214,14 +220,14 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct amdgpu_gtt_mgr *mgr = man->priv; - struct drm_mm_node *node = mem->mm_node; + struct amdgpu_gtt_node *node = mem->mm_node; if (!node) return; spin_lock(&mgr->lock); - if (node->start != AMDGPU_BO_INVALID_OFFSET) - drm_mm_remove_node(node); + if (node->node.start != AMDGPU_BO_INVALID_OFFSET) + drm_mm_remove_node(&node->node); spin_unlock(&mgr->lock); atomic64_add(mem->num_pages, &mgr->available); @@ -244,6 +250,25 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } +int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man) +{ + struct amdgpu_gtt_mgr *mgr = man->priv; + struct amdgpu_gtt_node *node; + struct drm_mm_node *mm_node; + int r = 0; + + spin_lock(&mgr->lock); + drm_mm_for_each_node(mm_node, &mgr->mm) { + node = container_of(mm_node, struct amdgpu_gtt_node, node); + r = amdgpu_ttm_recover_gart(node->tbo); + if (r) + break; + } + spin_unlock(&mgr->lock); + + return r; +} + /** * amdgpu_gtt_mgr_debug - dump VRAM table * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3d02c2dd06e5..34dbe7afb600 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -689,7 +689,6 @@ struct amdgpu_ttm_tt { struct list_head guptasks; atomic_t mmu_invalidations; uint32_t last_set_pages; - struct list_head list; }; int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) @@ -865,21 +864,14 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return 0; } - spin_lock(>t->adev->gtt_list_lock); flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); - if (r) { + if (r) DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", ttm->num_pages, gtt->offset); - goto error_gart_bind; - } - - list_add_tail(>t->list, >t->adev->gtt_list); -error_gart_bind: - spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -920,29 +912,23 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo) return r; } -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev) +int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { - struct amdgpu_ttm_tt *gtt, *tmp; - struct ttm_mem_reg bo_mem; + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); + struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm; uint64_t flags; int r; - bo_mem.mem_type = TTM_PL_TT; - spin_lock(&adev->gtt_list_lock); - list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) { - flags = amdgpu_ttm_tt_pte_flags(gtt->adev, >t->ttm.ttm, &bo_mem); - r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, - gtt->ttm.ttm.pages, gtt->ttm.dma_address, - flags); - if (r) { - spin_unlock(&adev->gtt_list_lock); - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); - return r; - } - } - spin_unlock(&adev->gtt_list_lock); - return 0; + if (!gtt) + return 0; + + flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem); + r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, + gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); + if (r) + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + gtt->ttm.ttm.num_pages, gtt->offset); + return r; } static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) @@ -957,16 +943,10 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) return 0; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ - spin_lock(>t->adev->gtt_list_lock); r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); - if (r) { + if (r) DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", gtt->ttm.ttm.num_pages, gtt->offset); - goto error_unbind; - } - list_del_init(>t->list); -error_unbind: - spin_unlock(>t->adev->gtt_list_lock); return r; } @@ -1003,7 +983,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, kfree(gtt); return NULL; } - INIT_LIST_HEAD(>t->list); return >t->ttm.ttm; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 016d2af05d34..d2985def4168 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -69,6 +69,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func; bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); +int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); @@ -91,7 +92,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_ttm_bind(struct ttm_buffer_object *bo); -int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); +int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages); -- cgit v1.2.3 From 5ffa61c1bdc35895f60ef7b553b43266d3fda469 Mon Sep 17 00:00:00 2001 From: pding Date: Mon, 30 Oct 2017 14:07:24 +0800 Subject: drm/amdgpu: retry init if exclusive mode request is failed This is caused of that hypervisor fails to handle request, one known issue is MMIO unblocking timeout. In theory we can retry init here. Signed-off-by: pding Reviewed-by: Xiangliang Yu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7af0d5d8cb1d..69e724c96442 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1625,7 +1625,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_request_full_gpu(adev, true); if (r) - return r; + return -EAGAIN; } for (i = 0; i < adev->num_ip_blocks; i++) { -- cgit v1.2.3 From 3c7388936a76affe656d7ba682a33740a99b4a19 Mon Sep 17 00:00:00 2001 From: Horace Chen Date: Wed, 1 Nov 2017 19:32:11 +0800 Subject: drm/amdgpu: refine SR-IOV firmware VRAM reservation to protect data The previous solution will create a zero buffer on the system domain and then move the zeroes to the VRAM. This will break the original data on the VRAM. Refine the code to create bo on VRAM domain directly and then remove and re-create mem node to the exact position before bo_pin. This can avoid breaking the data and will not cause eviction. Signed-off-by: Horace Chen Reviewed-by: monk liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 69e724c96442..83dbd02004b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -680,8 +680,12 @@ void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) { int r = 0; + int i; u64 gpu_addr; u64 vram_size = adev->mc.visible_vram_size; + u64 offset = adev->fw_vram_usage.start_offset; + u64 size = adev->fw_vram_usage.size; + struct amdgpu_bo *bo; adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.reserved_bo = NULL; @@ -690,7 +694,7 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) adev->fw_vram_usage.size <= vram_size) { r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, - PAGE_SIZE, true, 0, + PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, &adev->fw_vram_usage.reserved_bo); @@ -700,6 +704,23 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); if (r) goto error_reserve; + + /* remove the original mem node and create a new one at the + * request position + */ + bo = adev->fw_vram_usage.reserved_bo; + offset = ALIGN(offset, PAGE_SIZE); + for (i = 0; i < bo->placement.num_placement; ++i) { + bo->placements[i].fpfn = offset >> PAGE_SHIFT; + bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; + } + + ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); + r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, + false, false); + if (r) + goto error_pin; + r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, AMDGPU_GEM_DOMAIN_VRAM, adev->fw_vram_usage.start_offset, -- cgit v1.2.3 From d6895ad39f3b396be199f5b6fdfb8cde4be7bbf7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 28 Feb 2017 10:36:43 +0100 Subject: drm/amdgpu: resize VRAM BAR for CPU access v6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Try to resize BAR0 to let CPU access all of VRAM. v2: rebased, style cleanups, disable mem decode before resize, handle gmc_v9 as well, round size up to power of two. v3: handle gmc_v6 as well, release and reassign all BARs in the driver. v4: rename new function to amdgpu_device_resize_fb_bar, reenable mem decoding only if all resources are assigned. v5: reorder resource release, return -ENODEV instead of BUG_ON(). v6: squash in rebase fix Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 50 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 12 +++++-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 13 ++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 13 ++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 ++++++--- 6 files changed, 90 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d11967a5c392..a7066f55fe2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1847,6 +1847,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); +int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 83dbd02004b0..84c8bbec27ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -410,6 +410,9 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev) return 0; } + if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET) + return -EINVAL; + /* doorbell bar mapping */ adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2); @@ -749,6 +752,53 @@ error_create: return r; } +/** + * amdgpu_device_resize_fb_bar - try to resize FB BAR + * + * @adev: amdgpu_device pointer + * + * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not + * to fail, but if any of the BARs is not accessible after the size we abort + * driver loading by returning -ENODEV. + */ +int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) +{ + u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size); + u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; + u16 cmd; + int r; + + /* Disable memory decoding while we change the BAR addresses and size */ + pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); + pci_write_config_word(adev->pdev, PCI_COMMAND, + cmd & ~PCI_COMMAND_MEMORY); + + /* Free the VRAM and doorbell BAR, we most likely need to move both. */ + amdgpu_doorbell_fini(adev); + if (adev->asic_type >= CHIP_BONAIRE) + pci_release_resource(adev->pdev, 2); + + pci_release_resource(adev->pdev, 0); + + r = pci_resize_resource(adev->pdev, 0, rbar_size); + if (r == -ENOSPC) + DRM_INFO("Not enough PCI address space for a large BAR."); + else if (r && r != -ENOTSUPP) + DRM_ERROR("Problem resizing BAR0 (%d).", r); + + pci_assign_unassigned_bus_resources(adev->pdev->bus); + + /* When the doorbell or fb BAR isn't available we have no chance of + * using the device. + */ + r = amdgpu_doorbell_init(adev); + if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) + return -ENODEV; + + pci_write_config_word(adev->pdev, PCI_COMMAND, cmd); + + return 0; +} /* * GPU helpers function. diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index f4603a7c8ef3..d2a43db22cff 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -283,6 +283,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) u32 tmp; int chansize, numchan; + int r; tmp = RREG32(mmMC_ARB_RAMCFG); if (tmp & (1 << 11)) { @@ -324,12 +325,17 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) break; } adev->mc.vram_width = numchan * chansize; - /* Could aper size report 0 ? */ - adev->mc.aper_base = pci_resource_start(adev->pdev, 0); - adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); adev->mc.visible_vram_size = adev->mc.aper_size; /* set the gart size */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index b0528ca9207b..583d87792820 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -322,6 +322,8 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) */ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) { + int r; + adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); if (!adev->mc.vram_width) { u32 tmp; @@ -367,13 +369,18 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) } adev->mc.vram_width = numchan * chansize; } - /* Could aper size report 0 ? */ - adev->mc.aper_base = pci_resource_start(adev->pdev, 0); - adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); + #ifdef CONFIG_X86_64 if (adev->flags & AMD_IS_APU) { adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index f368cfe2f585..9ca5fea93ebc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -498,6 +498,8 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) */ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) { + int r; + adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); if (!adev->mc.vram_width) { u32 tmp; @@ -543,13 +545,18 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) } adev->mc.vram_width = numchan * chansize; } - /* Could aper size report 0 ? */ - adev->mc.aper_base = pci_resource_start(adev->pdev, 0); - adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); + #ifdef CONFIG_X86_64 if (adev->flags & AMD_IS_APU) { adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c8f1aebeac7a..f11dfd47b517 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -449,6 +449,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) { u32 tmp; int chansize, numchan; + int r; adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); if (!adev->mc.vram_width) { @@ -491,17 +492,22 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) adev->mc.vram_width = numchan * chansize; } - /* Could aper size report 0 ? */ - adev->mc.aper_base = pci_resource_start(adev->pdev, 0); - adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->mc.mc_vram_size = ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) : nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL; adev->mc.real_vram_size = adev->mc.mc_vram_size; - adev->mc.visible_vram_size = adev->mc.aper_size; + + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_device_resize_fb_bar(adev); + if (r) + return r; + } + adev->mc.aper_base = pci_resource_start(adev->pdev, 0); + adev->mc.aper_size = pci_resource_len(adev->pdev, 0); /* In case the PCI BAR is larger than the actual amount of vram */ + adev->mc.visible_vram_size = adev->mc.aper_size; if (adev->mc.visible_vram_size > adev->mc.real_vram_size) adev->mc.visible_vram_size = adev->mc.real_vram_size; -- cgit v1.2.3 From 1884734a03904d23137a075c9b64cd9871af1914 Mon Sep 17 00:00:00 2001 From: pding Date: Mon, 6 Nov 2017 10:21:26 +0800 Subject: drm/amdkfd: initialise kfd inside amdgpu_device_init Also finalize kfd inside amdgpu_device_fini. kfd device_init needs SRIOV exclusive accessing. Try to gather exclusive accessing to reduce time consuming. Signed-off-by: pding Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 ----- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 84c8bbec27ce..6636fa25aaf5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1693,6 +1693,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev) if (r) return r; + amdgpu_amdkfd_device_probe(adev); + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_request_full_gpu(adev, true); if (r) @@ -1787,6 +1789,7 @@ static int amdgpu_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = true; } + amdgpu_amdkfd_device_init(adev); return 0; } @@ -1854,6 +1857,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) { int i, r; + amdgpu_amdkfd_device_fini(adev); /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index f313eee60c4a..252d41d5ff56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -63,8 +63,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) pm_runtime_forbid(dev->dev); } - amdgpu_amdkfd_device_fini(adev); - amdgpu_acpi_fini(adev); amdgpu_device_fini(adev); @@ -170,9 +168,6 @@ retry_init: "Error during ACPI methods call\n"); } - amdgpu_amdkfd_device_probe(adev); - amdgpu_amdkfd_device_init(adev); - if (amdgpu_device_is_px(dev)) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); -- cgit v1.2.3 From c6332b97fa77bafba2e4c68050090c08c21bff35 Mon Sep 17 00:00:00 2001 From: pding Date: Mon, 6 Nov 2017 11:21:55 +0800 Subject: drm/amdgpu: release exclusive mode after hw_init Signed-off-by: pding Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6636fa25aaf5..ddb50c48cb06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1790,6 +1790,10 @@ static int amdgpu_init(struct amdgpu_device *adev) } amdgpu_amdkfd_device_init(adev); + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_release_full_gpu(adev, true); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 252d41d5ff56..9ed1bee0fd64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -177,9 +177,6 @@ retry_init: pm_runtime_put_autosuspend(dev->dev); } - if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, true); - out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ -- cgit v1.2.3 From 0c03b912d7f84636766bc26d38df5b21f1c00e94 Mon Sep 17 00:00:00 2001 From: pding Date: Tue, 7 Nov 2017 11:02:00 +0800 Subject: drm/amdgpu: bypass FB resizing for SRIOV VF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It introduces 900ms latency in exclusive mode which causes failure of driver loading. Host can resize the BAR before guest staring, so the resizing is not necessary here. Signed-off-by: Pixel Ding Reviewed-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ddb50c48cb06..05087eda0b71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -768,6 +768,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) u16 cmd; int r; + /* Bypass for VF */ + if (amdgpu_sriov_vf(adev)) + return 0; + /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, -- cgit v1.2.3 From 75bc6099bc619bd58e09d5203081ec9dc5535ec1 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Mon, 30 Oct 2017 20:11:54 +0800 Subject: drm/amdgpu:read VRAMLOST from gim Signed-off-by: Monk Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 05087eda0b71..57267ac265f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3037,11 +3037,10 @@ error: amdgpu_virt_release_full_gpu(adev, true); if (reset_flags) { - /* will get vram_lost from GIM in future, now all - * reset request considered VRAM LOST - */ - (*reset_flags) |= ~AMDGPU_RESET_INFO_VRAM_LOST; - atomic_inc(&adev->vram_lost_counter); + if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { + (*reset_flags) |= AMDGPU_RESET_INFO_VRAM_LOST; + atomic_inc(&adev->vram_lost_counter); + } /* VF FLR or hotlink reset is always full-reset */ (*reset_flags) |= AMDGPU_RESET_INFO_FULLRESET; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 733c64cbd1c8..663bf35cdce9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -334,6 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version; AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); + AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); /* pf2vf message must be in 4K */ if (pf2vf_size > 0 && pf2vf_size < 4096) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index e3f78f5edc00..f77d116c7a1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -81,6 +81,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1, /* GIM supports feature of loading uCodes */ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, + /* VRAM LOST by GIM */ + AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, }; struct amdgim_pf2vf_info_header { @@ -246,6 +248,7 @@ struct amdgpu_virt { const struct amdgpu_virt_ops *ops; struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; + uint32_t gim_feature; }; #define AMDGPU_CSA_SIZE (8 * 1024) -- cgit v1.2.3 From 1daee8b472e896f477e490ca66e58d1f8f0f4e61 Mon Sep 17 00:00:00 2001 From: Pixel Ding Date: Wed, 8 Nov 2017 11:03:14 +0800 Subject: drm/amdgpu: revise retry init to fully cleanup driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Retry at drm_dev_register instead of amdgpu_device_init. Reviewed-by: Christian König Signed-off-by: Pixel Ding Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 11 +++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 15 ++------------- 3 files changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 57267ac265f0..ee2a5f9a32f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2390,6 +2390,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_virt_mmio_blocked(adev) && !amdgpu_virt_wait_reset(adev)) { dev_err(adev->dev, "VF exclusive mode timeout\n"); + /* Don't send request since VF is inactive. */ + adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; + adev->virt.ops = NULL; r = -EAGAIN; goto failed; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6b11a75839e2..eaccd4bd12a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -565,12 +565,13 @@ static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev) return 0; } + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct drm_device *dev; unsigned long flags = ent->driver_data; - int ret; + int ret, retry = 0; if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { DRM_INFO("This hardware requires experimental hardware support.\n" @@ -603,8 +604,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); +retry_init: ret = drm_dev_register(dev, ent->driver_data); - if (ret) + if (ret == -EAGAIN && ++retry <= 3) { + DRM_INFO("retry init %d\n", retry); + /* Don't request EX mode too frequently which is attacking */ + msleep(5000); + goto retry_init; + } else if (ret) goto err_pci; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 9ed1bee0fd64..f55021ae788d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -84,7 +84,7 @@ done_free: int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct amdgpu_device *adev; - int r, acpi_status, retry = 0; + int r, acpi_status; #ifdef CONFIG_DRM_AMDGPU_SI if (!amdgpu_si_support) { @@ -120,7 +120,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } } #endif -retry_init: adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL); if (adev == NULL) { @@ -143,17 +142,7 @@ retry_init: * VRAM allocation */ r = amdgpu_device_init(adev, dev, dev->pdev, flags); - if (r == -EAGAIN && ++retry <= 3) { - adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; - adev->virt.ops = NULL; - amdgpu_device_fini(adev); - kfree(adev); - dev->dev_private = NULL; - /* Don't request EX mode too frequently which is attacking */ - msleep(5000); - dev_err(&dev->pdev->dev, "retry init %d\n", retry); - goto retry_init; - } else if (r) { + if (r) { dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } -- cgit v1.2.3 From f59548c8824c8e361120bf87a12fc3a68f17a1ce Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 14 Nov 2017 11:55:50 +0800 Subject: drm/amdgpu:fix NULL pointer access during drv remove MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NULL pointer is because original logic will step into set_pde_pte() even after the gart.ptr is freed due to there are twice gart_unbind() on all gart area. also, there are other minor fixes: 1,since gart_init only create dummy page, the corresponding gart_fini shouldn't do more like unbinding all GART, this is unnecessary because in driver fini stage all GART unbinding had already been done during each IP's SW_FINI (GMC's SW_FINI is the last one called), so remove the step for the GART unbinding in gart_fini(). 2,gart_fini() is already invoked during each GMC IP's gart_fini routine,e.g. gmc_vx_0_gart_fini(), so no need to manually call it during ttm_fini(). 3,amdgpu_gem_force_release() should be put ahead of amdgpu_vm_manager_fini() Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 9 +-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 8 files changed, 7 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a7066f55fe2c..ecc2e60e5f0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1412,6 +1412,7 @@ struct amdgpu_fw_vram_usage { }; int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev); +void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev); /* * CGS diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ee2a5f9a32f0..fc34f745f058 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2506,7 +2506,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_ib_pool_fini(adev); - amdgpu_fw_reserve_vram_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 10eeb307700c..707f85825996 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -253,10 +253,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev) #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS /* Allocate pages table */ adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages); - if (adev->gart.pages == NULL) { - amdgpu_gart_fini(adev); + if (adev->gart.pages == NULL) return -ENOMEM; - } #endif return 0; @@ -271,11 +269,6 @@ int amdgpu_gart_init(struct amdgpu_device *adev) */ void amdgpu_gart_fini(struct amdgpu_device *adev) { - if (adev->gart.ready) { - /* unbind pages */ - amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages); - } - adev->gart.ready = false; #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS vfree(adev->gart.pages); adev->gart.pages = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d49c768cf3dc..07ecf721ebf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1402,6 +1402,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_ttm_debugfs_fini(adev); amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + amdgpu_fw_reserve_vram_fini(adev); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); @@ -1412,7 +1413,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (adev->gds.oa.total_size) ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); ttm_bo_device_release(&adev->mman.bdev); - amdgpu_gart_fini(adev); amdgpu_ttm_global_fini(adev); adev->mman.initialized = false; DRM_INFO("amdgpu: ttm finalized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index f3e5c9c6a52d..9c672ece9f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -899,9 +899,9 @@ static int gmc_v6_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); gmc_v6_0_gart_fini(adev); - amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); release_firmware(adev->mc.fw); adev->mc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 6d153fa8175c..de7a249f0e24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1049,9 +1049,9 @@ static int gmc_v7_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); gmc_v7_0_gart_fini(adev); - amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); release_firmware(adev->mc.fw); adev->mc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 7ee5f21295d4..67778744da5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1146,9 +1146,9 @@ static int gmc_v8_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); gmc_v8_0_gart_fini(adev); - amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); release_firmware(adev->mc.fw); adev->mc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 4960805bf989..798f7fc2d4e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -885,9 +885,9 @@ static int gmc_v9_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); gmc_v9_0_gart_fini(adev); - amdgpu_gem_force_release(adev); amdgpu_bo_fini(adev); return 0; -- cgit v1.2.3 From 9921167d9086d666217fd98be5a28bb43c193a34 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 14 Nov 2017 16:50:31 +0800 Subject: drm/amdgpu:cleanup unused stack var MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fc34f745f058..29c81c387420 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -684,7 +684,6 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) { int r = 0; int i; - u64 gpu_addr; u64 vram_size = adev->mc.visible_vram_size; u64 offset = adev->fw_vram_usage.start_offset; u64 size = adev->fw_vram_usage.size; @@ -728,7 +727,7 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) AMDGPU_GEM_DOMAIN_VRAM, adev->fw_vram_usage.start_offset, (adev->fw_vram_usage.start_offset + - adev->fw_vram_usage.size), &gpu_addr); + adev->fw_vram_usage.size), NULL); if (r) goto error_pin; r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, -- cgit v1.2.3 From 84e5b5161e7159bcf24dfeed9f985bd86e354ea8 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 14 Nov 2017 16:52:14 +0800 Subject: drm/amdgpu:free CSA in unified place MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit instead of doing it in each GFX ip's sw_fini Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 6 ++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 1 - drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 - 5 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 29c81c387420..fdce06b5b2b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1893,6 +1893,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + amdgpu_free_static_csa(adev); amdgpu_wb_fini(adev); amdgpu_vram_scratch_fini(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 0769cb935318..e7dfb7b44b4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -47,6 +47,12 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev) return 0; } +void amdgpu_free_static_csa(struct amdgpu_device *adev) { + amdgpu_bo_free_kernel(&adev->virt.csa_obj, + &adev->virt.csa_vmid0_addr, + NULL); +} + /* * amdgpu_map_static_csa should be called during amdgpu_vm_init * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index f77d116c7a1c..6a83425aa9ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -283,6 +283,7 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); int amdgpu_allocate_static_csa(struct amdgpu_device *adev); int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va **bo_va); +void amdgpu_free_static_csa(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ee77c949bf87..d02493cf9175 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2114,7 +2114,6 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_fini(adev); - amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); gfx_v8_0_mec_fini(adev); gfx_v8_0_rlc_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5ba24792f801..560d8e76d14c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1464,7 +1464,6 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_gfx_compute_mqd_sw_fini(adev); amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); amdgpu_gfx_kiq_fini(adev); - amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL); gfx_v9_0_mec_fini(adev); gfx_v9_0_ngg_fini(adev); -- cgit v1.2.3 From 241361350648adaaa3510410245a66a7c02b2b8e Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 14 Nov 2017 16:56:55 +0800 Subject: drm/amdgpu:show error message if fail on event4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fdce06b5b2b8..636a9138067e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1942,7 +1942,8 @@ static int amdgpu_fini(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) - amdgpu_virt_release_full_gpu(adev, false); + if (amdgpu_virt_release_full_gpu(adev, false)) + DRM_ERROR("failed to release exclusive mode on fini\n"); return 0; } -- cgit v1.2.3 From 31b8adab3247eaa6cffbaa59276456b694718cc8 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 15 Nov 2017 20:07:38 +0100 Subject: drm/amdgpu: require a root bus window above 4GB for BAR resize MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't even try to resize the BAR when there is no window above 4GB. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 636a9138067e..4e668b70371e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -764,6 +764,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) { u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size); u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; + struct pci_bus *root; + struct resource *res; + unsigned i; u16 cmd; int r; @@ -771,6 +774,21 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + /* Check if the root BUS has 64bit memory resources */ + root = adev->pdev->bus; + while (root->parent) + root = root->parent; + + pci_bus_for_each_resource(root, res, i) { + if (res && res->flags & IORESOURCE_MEM_64 && + res->start > 0x100000000ull) + break; + } + + /* Trying to resize is pointless without a root hub window above 4GB */ + if (!res) + return 0; + /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, -- cgit v1.2.3 From 3d647c8f930190f728c997c3ac3d987f992420ed Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 16 Nov 2017 19:36:10 +0100 Subject: drm/amdgpu: remove VRAM size reduction v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove some outdated comments and all code which tries to reduce the VRAM size mapped into the MC. This is superfluous and misleading since we never actually program the size. v2: handle gmc_v6_0.c as well Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 30 +----------------------------- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 ----- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 6 ------ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 6 ------ 4 files changed, 1 insertion(+), 46 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4e668b70371e..a81743d06bb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -578,41 +578,13 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) * @base: base address at which to put VRAM * * Function will try to place VRAM at base address provided - * as parameter (which is so far either PCI aperture address or - * for IGP TOM base address). - * - * If there is not enough space to fit the unvisible VRAM in the 32bits - * address space then we limit the VRAM size to the aperture. - * - * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size, - * this shouldn't be a problem as we are using the PCI aperture as a reference. - * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but - * not IGP. - * - * Note: we use mc_vram_size as on some board we need to program the mc to - * cover the whole aperture even if VRAM size is inferior to aperture size - * Novell bug 204882 + along with lots of ubuntu ones - * - * Note: when limiting vram it's safe to overwritte real_vram_size because - * we are not in case where real_vram_size is inferior to mc_vram_size (ie - * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu - * ones) - * - * Note: IGP TOM addr should be the same as the aperture addr, we don't - * explicitly check for that though. - * - * FIXME: when reducing VRAM size align new size on power of 2. + * as parameter. */ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) { uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; mc->vram_start = base; - if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) { - dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n"); - mc->real_vram_size = mc->aper_size; - mc->mc_vram_size = mc->aper_size; - } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; if (limit && limit < mc->real_vram_size) mc->real_vram_size = limit; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9c672ece9f18..1e7f52f109b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -222,11 +222,6 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - if (mc->mc_vram_size > 0xFFC0000000ULL) { - dev_warn(adev->dev, "limiting VRAM\n"); - mc->real_vram_size = 0xFFC0000000ULL; - mc->mc_vram_size = 0xFFC0000000ULL; - } amdgpu_vram_location(adev, &adev->mc, base); amdgpu_gart_location(adev, mc); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index de7a249f0e24..d521862804ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -240,12 +240,6 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - if (mc->mc_vram_size > 0xFFC0000000ULL) { - /* leave room for at least 1024M GTT */ - dev_warn(adev->dev, "limiting VRAM\n"); - mc->real_vram_size = 0xFFC0000000ULL; - mc->mc_vram_size = 0xFFC0000000ULL; - } amdgpu_vram_location(adev, &adev->mc, base); amdgpu_gart_location(adev, mc); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 67778744da5a..bd3f842cca00 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -405,12 +405,6 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - if (mc->mc_vram_size > 0xFFC0000000ULL) { - /* leave room for at least 1024M GTT */ - dev_warn(adev->dev, "limiting VRAM\n"); - mc->real_vram_size = 0xFFC0000000ULL; - mc->mc_vram_size = 0xFFC0000000ULL; - } amdgpu_vram_location(adev, &adev->mc, base); amdgpu_gart_location(adev, mc); } -- cgit v1.2.3 From b98f1b9e5e71b636036cc6c0e1a3b428acd216cb Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 16 Nov 2017 20:12:51 +0100 Subject: drm/amdgpu: align GTT start to 4GB v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For VCE to work properly the start of the GTT space must be aligned to a 4GB boundary. v2: add comment why we do this Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a81743d06bb0..a43d096ebb52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -622,7 +622,10 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) dev_warn(adev->dev, "limiting GTT\n"); mc->gart_size = size_af; } - mc->gart_start = mc->vram_end + 1; + /* VCE doesn't like it when BOs cross a 4GB segment, so align + * the GART base on a 4GB boundary as well. + */ + mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL); } mc->gart_end = mc->gart_start + mc->gart_size - 1; dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", -- cgit v1.2.3 From c13c55d611865a99871bb86cf1fc0017b8cc605a Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 12 Apr 2017 15:33:00 +0200 Subject: drm/ttm: use an operation context for ttm_bo_mem_space v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of specifying interruptible and no_wait_gpu manually. v2: rebase Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Chunming Zhou Tested-by: Dieter Nützel Tested-by: Michel Dänzer Acked-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 11 ++++++----- drivers/gpu/drm/nouveau/nouveau_bo.c | 6 ++++-- drivers/gpu/drm/radeon/radeon_ttm.c | 8 ++++---- drivers/gpu/drm/ttm/ttm_bo.c | 22 +++++++++++----------- include/drm/ttm/ttm_bo_driver.h | 3 +-- 6 files changed, 29 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a43d096ebb52..0750b323e03f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -657,6 +657,7 @@ void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) */ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) { + struct ttm_operation_ctx ctx = { false, false }; int r = 0; int i; u64 vram_size = adev->mc.visible_vram_size; @@ -693,8 +694,8 @@ int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) } ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, - false, false); + r = ttm_bo_mem_space(&bo->tbo, &bo->placement, + &bo->tbo.mem, &ctx); if (r) goto error_pin; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 07ecf721ebf9..48b24155462c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -471,6 +471,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; @@ -488,8 +489,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, placements.fpfn = 0; placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait_gpu); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); if (unlikely(r)) { return r; } @@ -518,6 +518,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; @@ -535,8 +536,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, placements.fpfn = 0; placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait_gpu); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); if (unlikely(r)) { return r; } @@ -878,6 +878,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; struct ttm_mem_reg tmp; struct ttm_placement placement; @@ -900,7 +901,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp, false, false); + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); if (unlikely(r)) return r; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 1cf3da3d7bea..dae90cb748a4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1199,6 +1199,7 @@ static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_reg) { + struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { .fpfn = 0, .lpfn = 0, @@ -1213,7 +1214,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_reg = *new_reg; tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx); if (ret) return ret; @@ -1235,6 +1236,7 @@ static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_gpu, struct ttm_mem_reg *new_reg) { + struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; struct ttm_place placement_memtype = { .fpfn = 0, .lpfn = 0, @@ -1249,7 +1251,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_reg = *new_reg; tmp_reg.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 6ada64db00e9..ef7cb83331b3 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -311,6 +311,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; @@ -328,8 +329,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placements.fpfn = 0; placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait_gpu); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); if (unlikely(r)) { return r; } @@ -358,6 +358,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; @@ -375,8 +376,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placements.fpfn = 0; placements.lpfn = 0; placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, - interruptible, no_wait_gpu); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); if (unlikely(r)) { return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 1f6957adc19e..63c1a97b3589 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -656,6 +656,7 @@ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg evict_mem; struct ttm_placement placement; @@ -671,8 +672,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, placement.num_placement = 0; placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); - ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, - no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &evict_mem, &ctx); if (ret) { if (ret != -ERESTARTSYS) { pr_err("Failed to find memory space for buffer 0x%p eviction\n", @@ -682,8 +682,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, goto out; } - ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, - no_wait_gpu); + ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, + interruptible, no_wait_gpu); if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); @@ -903,8 +903,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, - bool interruptible, - bool no_wait_gpu) + struct ttm_operation_ctx *ctx) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; @@ -999,7 +998,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } ret = ttm_bo_mem_force_space(bo, mem_type, place, mem, - interruptible, no_wait_gpu); + ctx->interruptible, + ctx->no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; return 0; @@ -1022,6 +1022,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu) { + struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; int ret = 0; struct ttm_mem_reg mem; @@ -1035,12 +1036,11 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, /* * Determine where to move the buffer. */ - ret = ttm_bo_mem_space(bo, placement, &mem, - interruptible, no_wait_gpu); + ret = ttm_bo_mem_space(bo, placement, &mem, &ctx); if (ret) goto out_unlock; - ret = ttm_bo_handle_move_mem(bo, &mem, false, - interruptible, no_wait_gpu); + ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, + no_wait_gpu); out_unlock: if (ret && mem.mm_node) ttm_bo_mem_put(bo, &mem); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a7c826a1e53f..494322a5f239 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -743,8 +743,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem); int ttm_bo_mem_space(struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem, - bool interruptible, - bool no_wait_gpu); + struct ttm_operation_ctx *ctx); void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem); void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, -- cgit v1.2.3 From 341b759e642a847c3a88401db4263dec9f36d552 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 23 Nov 2017 11:23:18 +0100 Subject: drm/amdgpu: allow non pot VM size values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The VM size actually doesn't need to be a power of two. Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0750b323e03f..748ecd74726d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1184,12 +1184,6 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size == -1) return; - if (!is_power_of_2(amdgpu_vm_size)) { - dev_warn(adev->dev, "VM size (%d) must be a power of 2\n", - amdgpu_vm_size); - goto def_value; - } - if (amdgpu_vm_size < 1) { dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", amdgpu_vm_size); -- cgit v1.2.3 From f3368128bacece315aa0384b54b7da9e6f0909cd Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 23 Nov 2017 12:57:18 +0100 Subject: drm/amdgpu: move validation of the VM size into the VM code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This moves validation of the VM size parameter into amdgpu_vm_adjust_size(). Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 16 +--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 13 +++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 ++- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +++--- 7 files changed, 20 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 748ecd74726d..cd74beb431c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1187,22 +1187,8 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) if (amdgpu_vm_size < 1) { dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n", amdgpu_vm_size); - goto def_value; + amdgpu_vm_size = -1; } - - /* - * Max GPUVM size for Cayman, SI, CI VI are 40 bits. - */ - if (amdgpu_vm_size > 1024) { - dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n", - amdgpu_vm_size); - goto def_value; - } - - return; - -def_value: - amdgpu_vm_size = -1; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 82a6f6c86aaf..44430c4820cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2580,13 +2580,22 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) * @vm_size: the default vm size if it's set auto */ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, - uint32_t fragment_size_default, unsigned max_level) + uint32_t fragment_size_default, unsigned max_level, + unsigned max_bits) { uint64_t tmp; /* adjust vm size first */ - if (amdgpu_vm_size != -1) + if (amdgpu_vm_size != -1) { + unsigned max_size = 1 << (max_bits - 30); + vm_size = amdgpu_vm_size; + if (vm_size > max_size) { + dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", + amdgpu_vm_size, max_size); + vm_size = max_size; + } + } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 54e540d5e8d7..43ea131dd411 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -325,7 +325,8 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, - uint32_t fragment_size_default, unsigned max_level); + uint32_t fragment_size_default, unsigned max_level, + unsigned max_bits); int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 49224bf38324..468281f10e8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -832,7 +832,7 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; - amdgpu_vm_adjust_size(adev, 64, 9, 1); + amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); adev->mc.mc_mask = 0xffffffffffULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index c39cf8440afd..68a85051f4b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -971,7 +971,7 @@ static int gmc_v7_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64, 9, 1); + amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); /* Set the internal MC address mask * This is the max address of the GPU's diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 421e751a0464..46ec97e70e5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1068,7 +1068,7 @@ static int gmc_v8_0_sw_init(void *handle) * Currently set to 4GB ((1 << 20) 4k pages). * Max GPUVM size for cayman and SI is 40 bits. */ - amdgpu_vm_adjust_size(adev, 64, 9, 1); + amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); /* Set the internal MC address mask * This is the max address of the GPU's diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 729e4d591293..cc972153d401 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -770,10 +770,10 @@ static int gmc_v9_0_sw_init(void *handle) case CHIP_RAVEN: adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; if (adev->rev_id == 0x0 || adev->rev_id == 0x1) - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3); + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); else /* vm_size is 64GB for legacy 2-level page support */ - amdgpu_vm_adjust_size(adev, 64, 9, 1); + amdgpu_vm_adjust_size(adev, 64, 9, 1, 48); break; case CHIP_VEGA10: /* XXX Don't know how to get VRAM type yet. */ @@ -783,7 +783,7 @@ static int gmc_v9_0_sw_init(void *handle) * vm size is 256TB (48bit), maximum size of Vega10, * block size 512 (9bit) */ - amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3); + amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); break; default: break; -- cgit v1.2.3 From 97489129c220ef67195c886f9f2bad9651edd601 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 27 Nov 2017 16:22:05 +0100 Subject: drm/amdgpu: allow specifying vm_block_size for multi level PDs v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch allows specifying the vm_block_size even when multi level page directories are active. v2: fix signed/unsigned compare warning Signed-off-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 14 +------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 +++++++++------ 2 files changed, 10 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cd74beb431c4..70c9e5756b02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1162,20 +1162,8 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev) if (amdgpu_vm_block_size < 9) { dev_warn(adev->dev, "VM page table size (%d) too small\n", amdgpu_vm_block_size); - goto def_value; + amdgpu_vm_block_size = -1; } - - if (amdgpu_vm_block_size > 24 || - (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) { - dev_warn(adev->dev, "VM page table size (%d) too large\n", - amdgpu_vm_block_size); - goto def_value; - } - - return; - -def_value: - amdgpu_vm_block_size = -1; } static void amdgpu_check_vm_size(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 44430c4820cc..7de519b86b78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2600,18 +2600,21 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); + if (amdgpu_vm_block_size != -1) + tmp >>= amdgpu_vm_block_size - 9; tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; adev->vm_manager.num_level = min(max_level, (unsigned)tmp); /* block size depends on vm size and hw setup*/ - if (adev->vm_manager.num_level > 1) - /* Use fixed block_size for multi level page tables */ - adev->vm_manager.block_size = 9; - else if (amdgpu_vm_block_size == -1) + if (amdgpu_vm_block_size != -1) adev->vm_manager.block_size = - amdgpu_vm_get_block_size(vm_size); + min((unsigned)amdgpu_vm_block_size, max_bits + - AMDGPU_GPU_PAGE_SHIFT + - 9 * adev->vm_manager.num_level); + else if (adev->vm_manager.num_level > 1) + adev->vm_manager.block_size = 9; else - adev->vm_manager.block_size = amdgpu_vm_block_size; + adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); if (amdgpu_vm_fragment_size == -1) adev->vm_manager.fragment_size = fragment_size_default; -- cgit v1.2.3 From 1b1f42d8fde4fef1ed7873bf5aa91755f8c3de35 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Wed, 6 Dec 2017 17:49:39 +0100 Subject: drm: move amd_gpu_scheduler into common location MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This moves and renames the AMDGPU scheduler to a common location in DRM in order to facilitate re-use by other drivers. This is mostly a straight forward rename with no code changes. One notable exception is the function to_drm_sched_fence(), which is no longer a inline header function to avoid the need to export the drm_sched_fence_ops_scheduled and drm_sched_fence_ops_finished structures. Reviewed-by: Chunming Zhou Tested-by: Dieter Nützel Acked-by: Alex Deucher Signed-off-by: Lucas Stach Signed-off-by: Alex Deucher --- drivers/gpu/drm/Kconfig | 5 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/amd/amdgpu/Makefile | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 38 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 20 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 20 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 14 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 10 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 8 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 8 +- drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | 82 --- drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 744 ------------------------ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 186 ------ drivers/gpu/drm/amd/scheduler/sched_fence.c | 173 ------ drivers/gpu/drm/amd/scheduler/spsc_queue.h | 121 ---- drivers/gpu/drm/scheduler/Makefile | 4 + drivers/gpu/drm/scheduler/gpu_scheduler.c | 744 ++++++++++++++++++++++++ drivers/gpu/drm/scheduler/sched_fence.c | 187 ++++++ include/drm/gpu_scheduler.h | 176 ++++++ include/drm/gpu_scheduler_trace.h | 82 +++ include/drm/spsc_queue.h | 122 ++++ 39 files changed, 1440 insertions(+), 1427 deletions(-) delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.c delete mode 100644 drivers/gpu/drm/amd/scheduler/gpu_scheduler.h delete mode 100644 drivers/gpu/drm/amd/scheduler/sched_fence.c delete mode 100644 drivers/gpu/drm/amd/scheduler/spsc_queue.h create mode 100644 drivers/gpu/drm/scheduler/Makefile create mode 100644 drivers/gpu/drm/scheduler/gpu_scheduler.c create mode 100644 drivers/gpu/drm/scheduler/sched_fence.c create mode 100644 include/drm/gpu_scheduler.h create mode 100644 include/drm/gpu_scheduler_trace.h create mode 100644 include/drm/spsc_queue.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4d9f21831741..ee38a3db1890 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -149,6 +149,10 @@ config DRM_VM bool depends on DRM && MMU +config DRM_SCHED + tristate + depends on DRM + source "drivers/gpu/drm/i2c/Kconfig" source "drivers/gpu/drm/arm/Kconfig" @@ -178,6 +182,7 @@ config DRM_AMDGPU depends on DRM && PCI && MMU select FW_LOADER select DRM_KMS_HELPER + select DRM_SCHED select DRM_TTM select POWER_SUPPLY select HWMON diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index e9500844333e..1f6ba9e34e31 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -101,3 +101,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ obj-$(CONFIG_DRM_PL111) += pl111/ obj-$(CONFIG_DRM_TVE200) += tve200/ +obj-$(CONFIG_DRM_SCHED) += scheduler/ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 90202cf4cd1e..a7391d49ad40 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -135,10 +135,7 @@ amdgpu-y += \ amdgpu-y += amdgpu_cgs.o # GPU scheduler -amdgpu-y += \ - ../scheduler/gpu_scheduler.o \ - ../scheduler/sched_fence.o \ - amdgpu_job.o +amdgpu-y += amdgpu_job.o # ACP componet ifneq ($(CONFIG_DRM_AMD_ACP),) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5e2958a79928..5c8648ec2cd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include "dm_pp_interface.h" @@ -68,7 +69,6 @@ #include "amdgpu_vcn.h" #include "amdgpu_mn.h" #include "amdgpu_dm.h" -#include "gpu_scheduler.h" #include "amdgpu_virt.h" #include "amdgpu_gart.h" @@ -689,7 +689,7 @@ struct amdgpu_ib { uint32_t flags; }; -extern const struct amd_sched_backend_ops amdgpu_sched_ops; +extern const struct drm_sched_backend_ops amdgpu_sched_ops; int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job, struct amdgpu_vm *vm); @@ -699,7 +699,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - struct amd_sched_entity *entity, void *owner, + struct drm_sched_entity *entity, void *owner, struct dma_fence **f); /* @@ -732,7 +732,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, struct amdgpu_ctx_ring { uint64_t sequence; struct dma_fence **fences; - struct amd_sched_entity entity; + struct drm_sched_entity entity; }; struct amdgpu_ctx { @@ -746,8 +746,8 @@ struct amdgpu_ctx { struct dma_fence **fences; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; bool preamble_presented; - enum amd_sched_priority init_priority; - enum amd_sched_priority override_priority; + enum drm_sched_priority init_priority; + enum drm_sched_priority override_priority; struct mutex lock; atomic_t guilty; }; @@ -767,7 +767,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, uint64_t seq); void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum amd_sched_priority priority); + enum drm_sched_priority priority); int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -1116,7 +1116,7 @@ struct amdgpu_cs_parser { #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ struct amdgpu_job { - struct amd_sched_job base; + struct drm_sched_job base; struct amdgpu_device *adev; struct amdgpu_vm *vm; struct amdgpu_ring *ring; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4cea9ab237ac..44523a88ebb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1150,7 +1150,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { struct amdgpu_ring *ring = p->job->ring; - struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; + struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity; struct amdgpu_job *job; unsigned i; uint64_t seq; @@ -1173,7 +1173,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); + r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); if (r) { amdgpu_job_free(job); amdgpu_mn_unlock(p->mn); @@ -1202,7 +1202,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_ring_priority_get(job->ring, job->base.s_priority); trace_amdgpu_cs_ioctl(job); - amd_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base, entity); ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); amdgpu_mn_unlock(p->mn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index d71dc164b469..09d35051fdd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -28,10 +28,10 @@ #include "amdgpu_sched.h" static int amdgpu_ctx_priority_permit(struct drm_file *filp, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { /* NORMAL and below are accessible by everyone */ - if (priority <= AMD_SCHED_PRIORITY_NORMAL) + if (priority <= DRM_SCHED_PRIORITY_NORMAL) return 0; if (capable(CAP_SYS_NICE)) @@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp, } static int amdgpu_ctx_init(struct amdgpu_device *adev, - enum amd_sched_priority priority, + enum drm_sched_priority priority, struct drm_file *filp, struct amdgpu_ctx *ctx) { unsigned i, j; int r; - if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX) + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) return -EINVAL; r = amdgpu_ctx_priority_permit(filp, priority); @@ -78,19 +78,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->reset_counter_query = ctx->reset_counter; ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->init_priority = priority; - ctx->override_priority = AMD_SCHED_PRIORITY_UNSET; + ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; /* create context entity for each ring */ for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i]; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; rq = &ring->sched.sched_rq[priority]; if (ring == &adev->gfx.kiq.ring) continue; - r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity, + r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, rq, amdgpu_sched_jobs, &ctx->guilty); if (r) goto failed; @@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, failed: for (j = 0; j < i; j++) - amd_sched_entity_fini(&adev->rings[j]->sched, + drm_sched_entity_fini(&adev->rings[j]->sched, &ctx->rings[j].entity); kfree(ctx->fences); ctx->fences = NULL; @@ -126,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) ctx->fences = NULL; for (i = 0; i < adev->num_rings; i++) - amd_sched_entity_fini(&adev->rings[i]->sched, + drm_sched_entity_fini(&adev->rings[i]->sched, &ctx->rings[i].entity); amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); @@ -137,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) static int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, struct drm_file *filp, - enum amd_sched_priority priority, + enum drm_sched_priority priority, uint32_t *id) { struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -266,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, { int r; uint32_t id; - enum amd_sched_priority priority; + enum drm_sched_priority priority; union drm_amdgpu_ctx *args = data; struct amdgpu_device *adev = dev->dev_private; @@ -278,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, /* For backwards compatibility reasons, we need to accept * ioctls with garbage in the priority field */ - if (priority == AMD_SCHED_PRIORITY_INVALID) - priority = AMD_SCHED_PRIORITY_NORMAL; + if (priority == DRM_SCHED_PRIORITY_INVALID) + priority = DRM_SCHED_PRIORITY_NORMAL; switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: @@ -385,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, } void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { int i; struct amdgpu_device *adev = ctx->adev; - struct amd_sched_rq *rq; - struct amd_sched_entity *entity; + struct drm_sched_rq *rq; + struct drm_sched_entity *entity; struct amdgpu_ring *ring; - enum amd_sched_priority ctx_prio; + enum drm_sched_priority ctx_prio; ctx->override_priority = priority; - ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ? + ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? ctx->init_priority : ctx->override_priority; for (i = 0; i < adev->num_rings; i++) { @@ -407,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) continue; - amd_sched_entity_set_rq(entity, rq); + drm_sched_entity_set_rq(entity, rq); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 70c9e5756b02..98cc4df02b14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3058,7 +3058,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) continue; kthread_park(ring->sched.thread); - amd_sched_hw_job_reset(&ring->sched, &job->base); + drm_sched_hw_job_reset(&ring->sched, &job->base); /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(ring); @@ -3111,7 +3111,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) if (job && job->ring->idx != i) continue; - amd_sched_job_recovery(&ring->sched); + drm_sched_job_recovery(&ring->sched); kthread_unpark(ring->sched.thread); } } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 31383e004947..1d8011bca182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -912,7 +912,7 @@ static int __init amdgpu_init(void) if (r) goto error_fence; - r = amd_sched_fence_slab_init(); + r = drm_sched_fence_slab_init(); if (r) goto error_sched; @@ -944,7 +944,7 @@ static void __exit amdgpu_exit(void) pci_unregister_driver(pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); - amd_sched_fence_slab_fini(); + drm_sched_fence_slab_fini(); amdgpu_fence_slab_fini(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 604ac03a42e4..14699637913a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -445,7 +445,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, */ timeout = MAX_SCHEDULE_TIMEOUT; } - r = amd_sched_init(&ring->sched, &amdgpu_sched_ops, + r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, timeout, ring->name); if (r) { @@ -503,7 +503,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) } amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); - amd_sched_fini(&ring->sched); + drm_sched_fini(&ring->sched); del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) dma_fence_put(ring->fence_drv.fences[j]); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index bdc210ac74f8..013c0a8cfb60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,7 +28,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_timedout(struct amd_sched_job *s_job) +static void amdgpu_job_timedout(struct drm_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); @@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) amdgpu_ib_free(job->adev, &job->ibs[i], f); } -static void amdgpu_job_free_cb(struct amd_sched_job *s_job) +static void amdgpu_job_free_cb(struct drm_sched_job *s_job) { struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); @@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job) } int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - struct amd_sched_entity *entity, void *owner, + struct drm_sched_entity *entity, void *owner, struct dma_fence **f) { int r; @@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); + r = drm_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; @@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, *f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); amdgpu_ring_priority_get(job->ring, job->base.s_priority); - amd_sched_entity_push_job(&job->base, entity); + drm_sched_entity_push_job(&job->base, entity); return 0; } -static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, - struct amd_sched_entity *s_entity) +static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) { struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_vm *vm = job->vm; @@ -151,7 +151,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit); if (fence && explicit) { - if (amd_sched_dependency_optimized(fence, s_entity)) { + if (drm_sched_dependency_optimized(fence, s_entity)) { r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false); if (r) DRM_ERROR("Error adding fence to sync (%d)\n", r); @@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job, return fence; } -static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) +static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) { struct dma_fence *fence = NULL, *finished; struct amdgpu_device *adev; @@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) return fence; } -const struct amd_sched_backend_ops amdgpu_sched_ops = { +const struct drm_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, .timedout_job = amdgpu_job_timedout, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index a98fbbb4739f..41c75f9632dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) * Release a request for executing at @priority */ void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { int i; @@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring, return; /* no need to restore if the job is already at the lowest priority */ - if (priority == AMD_SCHED_PRIORITY_NORMAL) + if (priority == DRM_SCHED_PRIORITY_NORMAL) return; mutex_lock(&ring->priority_mutex); @@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring, goto out_unlock; /* decay priority to the next level with a job available */ - for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) { - if (i == AMD_SCHED_PRIORITY_NORMAL + for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) { + if (i == DRM_SCHED_PRIORITY_NORMAL || atomic_read(&ring->num_jobs[i])) { ring->priority = i; ring->funcs->set_priority(ring, i); @@ -206,7 +206,7 @@ out_unlock: * Request a ring's priority to be raised to @priority (refcounted). */ void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { if (!ring->funcs->set_priority) return; @@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->max_dw = max_dw; - ring->priority = AMD_SCHED_PRIORITY_NORMAL; + ring->priority = DRM_SCHED_PRIORITY_NORMAL; mutex_init(&ring->priority_mutex); INIT_LIST_HEAD(&ring->lru_list); amdgpu_ring_lru_touch(adev, ring); - for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i) + for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i) atomic_set(&ring->num_jobs[i], 0); if (amdgpu_debugfs_ring_init(adev, ring)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index a6b89e3932a5..641e3fd7ba3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -25,7 +25,7 @@ #define __AMDGPU_RING_H__ #include -#include "gpu_scheduler.h" +#include /* max number of rings */ #define AMDGPU_MAX_RINGS 18 @@ -154,14 +154,14 @@ struct amdgpu_ring_funcs { void (*emit_tmz)(struct amdgpu_ring *ring, bool start); /* priority functions */ void (*set_priority) (struct amdgpu_ring *ring, - enum amd_sched_priority priority); + enum drm_sched_priority priority); }; struct amdgpu_ring { struct amdgpu_device *adev; const struct amdgpu_ring_funcs *funcs; struct amdgpu_fence_driver fence_drv; - struct amd_gpu_scheduler sched; + struct drm_gpu_scheduler sched; struct list_head lru_list; struct amdgpu_bo *ring_obj; @@ -196,7 +196,7 @@ struct amdgpu_ring { unsigned vm_inv_eng; bool has_compute_vm_bug; - atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX]; + atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; struct mutex priority_mutex; /* protected by priority_mutex */ int priority; @@ -212,9 +212,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); void amdgpu_ring_priority_get(struct amdgpu_ring *ring, - enum amd_sched_priority priority); + enum drm_sched_priority priority); void amdgpu_ring_priority_put(struct amdgpu_ring *ring, - enum amd_sched_priority priority); + enum drm_sched_priority priority); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, struct amdgpu_irq_src *irq_src, unsigned irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index 290cc3f9c433..86a0715d9431 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c @@ -29,29 +29,29 @@ #include "amdgpu_vm.h" -enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) +enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority) { switch (amdgpu_priority) { case AMDGPU_CTX_PRIORITY_VERY_HIGH: - return AMD_SCHED_PRIORITY_HIGH_HW; + return DRM_SCHED_PRIORITY_HIGH_HW; case AMDGPU_CTX_PRIORITY_HIGH: - return AMD_SCHED_PRIORITY_HIGH_SW; + return DRM_SCHED_PRIORITY_HIGH_SW; case AMDGPU_CTX_PRIORITY_NORMAL: - return AMD_SCHED_PRIORITY_NORMAL; + return DRM_SCHED_PRIORITY_NORMAL; case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_VERY_LOW: - return AMD_SCHED_PRIORITY_LOW; + return DRM_SCHED_PRIORITY_LOW; case AMDGPU_CTX_PRIORITY_UNSET: - return AMD_SCHED_PRIORITY_UNSET; + return DRM_SCHED_PRIORITY_UNSET; default: WARN(1, "Invalid context priority %d\n", amdgpu_priority); - return AMD_SCHED_PRIORITY_INVALID; + return DRM_SCHED_PRIORITY_INVALID; } } static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev, int fd, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { struct file *filp = fcheck(fd); struct drm_file *file; @@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_sched *args = data; struct amdgpu_device *adev = dev->dev_private; - enum amd_sched_priority priority; + enum drm_sched_priority priority; int r; priority = amdgpu_to_sched_priority(args->in.priority); - if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID) + if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID) return -EINVAL; switch (args->in.op) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h index b28c067d3822..2a1a0c734bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h @@ -27,7 +27,7 @@ #include -enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); +enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority); int amdgpu_sched_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index bb79fd3f3c36..df65c66dc956 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -64,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync) static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct dma_fence *f) { - struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (s_fence) { struct amdgpu_ring *ring; @@ -85,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, */ static void *amdgpu_sync_get_owner(struct dma_fence *f) { - struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (s_fence) return s_fence->owner; @@ -248,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, hash_for_each_safe(sync->fences, i, tmp, e, node) { struct dma_fence *f = e->fence; - struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (dma_fence_is_signaled(f)) { hash_del(&e->node); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 952e0bf3bc84..7db9556b389b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -76,7 +76,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int r; adev->mman.mem_global_referenced = false; @@ -108,8 +108,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); ring = adev->mman.buffer_funcs_ring; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; - r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, rq, amdgpu_sched_jobs, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move run queue.\n"); @@ -131,7 +131,7 @@ error_mem: static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) { if (adev->mman.mem_global_referenced) { - amd_sched_entity_fini(adev->mman.entity.sched, + drm_sched_entity_fini(adev->mman.entity.sched, &adev->mman.entity); mutex_destroy(&adev->mman.gtt_window_lock); drm_global_item_unref(&adev->mman.bo_global_ref.ref); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 4f9433e61406..167856f6080f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -25,7 +25,7 @@ #define __AMDGPU_TTM_H__ #include "amdgpu.h" -#include "gpu_scheduler.h" +#include #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) @@ -55,7 +55,7 @@ struct amdgpu_mman { struct mutex gtt_window_lock; /* Scheduler entity for buffer moves */ - struct amd_sched_entity entity; + struct drm_sched_entity entity; }; struct amdgpu_copy_mem { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2f2a9e17fdb4..916e51670bfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -116,7 +116,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -230,8 +230,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) } ring = &adev->uvd.ring; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD run queue.\n"); @@ -272,7 +272,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int i; kfree(adev->uvd.saved_bo); - amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, &adev->uvd.gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 845eea993f75..32ea20b99e53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -51,8 +51,8 @@ struct amdgpu_uvd { struct amdgpu_irq_src irq; bool address_64_bit; bool use_ctx_buf; - struct amd_sched_entity entity; - struct amd_sched_entity entity_enc; + struct drm_sched_entity entity; + struct drm_sched_entity entity_enc; uint32_t srbm_soft_reset; unsigned num_enc_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ba6d846b08ff..641deb0527ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -85,7 +85,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work); int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; const char *fw_name; const struct common_firmware_header *hdr; unsigned ucode_version, version_major, version_minor, binary_id; @@ -174,8 +174,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) } ring = &adev->vce.ring[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->vce.entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); @@ -207,7 +207,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); + drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, (void **)&adev->vce.cpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index 5ce54cde472d..162cae94e3b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -46,7 +46,7 @@ struct amdgpu_vce { struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; unsigned harvest_config; - struct amd_sched_entity entity; + struct drm_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index d7ba048c2f80..88e204d537f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -51,7 +51,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -104,8 +104,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } ring = &adev->vcn.ring_dec; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN dec run queue.\n"); @@ -113,8 +113,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } ring = &adev->vcn.ring_enc[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, rq, amdgpu_sched_jobs, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN enc run queue.\n"); @@ -130,9 +130,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) kfree(adev->vcn.saved_bo); - amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); + drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); - amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); + drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, &adev->vcn.gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index d50ba0657854..2fd7db891689 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -56,8 +56,8 @@ struct amdgpu_vcn { struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_irq_src irq; - struct amd_sched_entity entity_dec; - struct amd_sched_entity entity_enc; + struct drm_sched_entity entity_dec; + struct drm_sched_entity entity_enc; unsigned num_enc_rings; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3ecdbdfb04dd..dbe37d621796 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int r, i; u64 flags; uint64_t init_pde_value = 0; @@ -2663,8 +2663,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; - r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; + r = drm_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs, NULL); if (r) return r; @@ -2744,7 +2744,7 @@ error_free_root: vm->root.base.bo = NULL; error_free_sched_entity: - amd_sched_entity_fini(&ring->sched, &vm->entity); + drm_sched_entity_fini(&ring->sched, &vm->entity); return r; } @@ -2803,7 +2803,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - amd_sched_entity_fini(vm->entity.sched, &vm->entity); + drm_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 43ea131dd411..159980414964 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -24,10 +24,11 @@ #ifndef __AMDGPU_VM_H__ #define __AMDGPU_VM_H__ -#include #include +#include +#include +#include -#include "gpu_scheduler.h" #include "amdgpu_sync.h" #include "amdgpu_ring.h" @@ -175,7 +176,7 @@ struct amdgpu_vm { spinlock_t freed_lock; /* Scheduler entity for page table updates */ - struct amd_sched_entity entity; + struct drm_sched_entity entity; /* client id and PASID (TODO: replace client_id with PASID) */ u64 client_id; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d02493cf9175..c7dc69031fb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6472,10 +6472,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev, mutex_unlock(&adev->srbm_mutex); } static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring, - enum amd_sched_priority priority) + enum drm_sched_priority priority) { struct amdgpu_device *adev = ring->adev; - bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW; + bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) return; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 0e8b887cf03e..86123448a8ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -412,10 +412,10 @@ static int uvd_v6_0_sw_init(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; ring = &adev->uvd.ring_enc[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, rq, amdgpu_sched_jobs, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -456,7 +456,7 @@ static int uvd_v6_0_sw_fini(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.ring_enc[i]); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 660fa41dc877..416611150edd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -385,7 +385,7 @@ static int uvd_v7_0_early_init(void *handle) static int uvd_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; - struct amd_sched_rq *rq; + struct drm_sched_rq *rq; int i, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -416,8 +416,8 @@ static int uvd_v7_0_sw_init(void *handle) } ring = &adev->uvd.ring_enc[0]; - rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; - r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, rq, amdgpu_sched_jobs, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -472,7 +472,7 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) amdgpu_ring_fini(&adev->uvd.ring_enc[i]); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h deleted file mode 100644 index b42a78922505..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _GPU_SCHED_TRACE_H_ - -#include -#include -#include - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM gpu_sched -#define TRACE_INCLUDE_FILE gpu_sched_trace - -TRACE_EVENT(amd_sched_job, - TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity), - TP_ARGS(sched_job, entity), - TP_STRUCT__entry( - __field(struct amd_sched_entity *, entity) - __field(struct dma_fence *, fence) - __field(const char *, name) - __field(uint64_t, id) - __field(u32, job_count) - __field(int, hw_job_count) - ), - - TP_fast_assign( - __entry->entity = entity; - __entry->id = sched_job->id; - __entry->fence = &sched_job->s_fence->finished; - __entry->name = sched_job->sched->name; - __entry->job_count = spsc_queue_count(&entity->job_queue); - __entry->hw_job_count = atomic_read( - &sched_job->sched->hw_rq_count); - ), - TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", - __entry->entity, __entry->id, - __entry->fence, __entry->name, - __entry->job_count, __entry->hw_job_count) -); - -TRACE_EVENT(amd_sched_process_job, - TP_PROTO(struct amd_sched_fence *fence), - TP_ARGS(fence), - TP_STRUCT__entry( - __field(struct dma_fence *, fence) - ), - - TP_fast_assign( - __entry->fence = &fence->finished; - ), - TP_printk("fence=%p signaled", __entry->fence) -); - -#endif - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#include diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c deleted file mode 100644 index dcb987e6d94a..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ /dev/null @@ -1,744 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include -#include -#include -#include -#include -#include "gpu_scheduler.h" - -#include "spsc_queue.h" - -#define CREATE_TRACE_POINTS -#include "gpu_sched_trace.h" - -#define to_amd_sched_job(sched_job) \ - container_of((sched_job), struct amd_sched_job, queue_node) - -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); - -/* Initialize a given run queue struct */ -static void amd_sched_rq_init(struct amd_sched_rq *rq) -{ - spin_lock_init(&rq->lock); - INIT_LIST_HEAD(&rq->entities); - rq->current_entity = NULL; -} - -static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) -{ - if (!list_empty(&entity->list)) - return; - spin_lock(&rq->lock); - list_add_tail(&entity->list, &rq->entities); - spin_unlock(&rq->lock); -} - -static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, - struct amd_sched_entity *entity) -{ - if (list_empty(&entity->list)) - return; - spin_lock(&rq->lock); - list_del_init(&entity->list); - if (rq->current_entity == entity) - rq->current_entity = NULL; - spin_unlock(&rq->lock); -} - -/** - * Select an entity which could provide a job to run - * - * @rq The run queue to check. - * - * Try to find a ready entity, returns NULL if none found. - */ -static struct amd_sched_entity * -amd_sched_rq_select_entity(struct amd_sched_rq *rq) -{ - struct amd_sched_entity *entity; - - spin_lock(&rq->lock); - - entity = rq->current_entity; - if (entity) { - list_for_each_entry_continue(entity, &rq->entities, list) { - if (amd_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - spin_unlock(&rq->lock); - return entity; - } - } - } - - list_for_each_entry(entity, &rq->entities, list) { - - if (amd_sched_entity_is_ready(entity)) { - rq->current_entity = entity; - spin_unlock(&rq->lock); - return entity; - } - - if (entity == rq->current_entity) - break; - } - - spin_unlock(&rq->lock); - - return NULL; -} - -/** - * Init a context entity used by scheduler when submit to HW ring. - * - * @sched The pointer to the scheduler - * @entity The pointer to a valid amd_sched_entity - * @rq The run queue this entity belongs - * @kernel If this is an entity for the kernel - * @jobs The max number of jobs in the job queue - * - * return 0 if succeed. negative error code on failure -*/ -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs, atomic_t *guilty) -{ - if (!(sched && entity && rq)) - return -EINVAL; - - memset(entity, 0, sizeof(struct amd_sched_entity)); - INIT_LIST_HEAD(&entity->list); - entity->rq = rq; - entity->sched = sched; - entity->guilty = guilty; - - spin_lock_init(&entity->rq_lock); - spin_lock_init(&entity->queue_lock); - spsc_queue_init(&entity->job_queue); - - atomic_set(&entity->fence_seq, 0); - entity->fence_context = dma_fence_context_alloc(2); - - return 0; -} - -/** - * Query if entity is initialized - * - * @sched Pointer to scheduler instance - * @entity The pointer to a valid scheduler entity - * - * return true if entity is initialized, false otherwise -*/ -static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) -{ - return entity->sched == sched && - entity->rq != NULL; -} - -/** - * Check if entity is idle - * - * @entity The pointer to a valid scheduler entity - * - * Return true if entity don't has any unscheduled jobs. - */ -static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) -{ - rmb(); - if (spsc_queue_peek(&entity->job_queue) == NULL) - return true; - - return false; -} - -/** - * Check if entity is ready - * - * @entity The pointer to a valid scheduler entity - * - * Return true if entity could provide a job. - */ -static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) -{ - if (spsc_queue_peek(&entity->job_queue) == NULL) - return false; - - if (READ_ONCE(entity->dependency)) - return false; - - return true; -} - -/** - * Destroy a context entity - * - * @sched Pointer to scheduler instance - * @entity The pointer to a valid scheduler entity - * - * Cleanup and free the allocated resources. - */ -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity) -{ - int r; - - if (!amd_sched_entity_is_initialized(sched, entity)) - return; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL - */ - if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) - r = -ERESTARTSYS; - else - r = wait_event_killable(sched->job_scheduled, - amd_sched_entity_is_idle(entity)); - amd_sched_entity_set_rq(entity, NULL); - if (r) { - struct amd_sched_job *job; - - /* Park the kernel for a moment to make sure it isn't processing - * our enity. - */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); - if (entity->dependency) { - dma_fence_remove_callback(entity->dependency, - &entity->cb); - dma_fence_put(entity->dependency); - entity->dependency = NULL; - } - - while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct amd_sched_fence *s_fence = job->s_fence; - amd_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - amd_sched_fence_finished(s_fence); - WARN_ON(s_fence->parent); - dma_fence_put(&s_fence->finished); - sched->ops->free_job(job); - } - } -} - -static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); - amd_sched_wakeup(entity->sched); -} - -static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_entity *entity = - container_of(cb, struct amd_sched_entity, cb); - entity->dependency = NULL; - dma_fence_put(f); -} - -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq) -{ - if (entity->rq == rq) - return; - - spin_lock(&entity->rq_lock); - - if (entity->rq) - amd_sched_rq_remove_entity(entity->rq, entity); - - entity->rq = rq; - if (rq) - amd_sched_rq_add_entity(rq, entity); - - spin_unlock(&entity->rq_lock); -} - -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_fence *s_fence; - - if (!fence || dma_fence_is_signaled(fence)) - return false; - if (fence->context == entity->fence_context) - return true; - s_fence = to_amd_sched_fence(fence); - if (s_fence && s_fence->sched == sched) - return true; - - return false; -} - -static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct dma_fence * fence = entity->dependency; - struct amd_sched_fence *s_fence; - - if (fence->context == entity->fence_context) { - /* We can ignore fences from ourself */ - dma_fence_put(entity->dependency); - return false; - } - - s_fence = to_amd_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { - - /* - * Fence is from the same scheduler, only need to wait for - * it to be scheduled - */ - fence = dma_fence_get(&s_fence->scheduled); - dma_fence_put(entity->dependency); - entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - amd_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; - } - - if (!dma_fence_add_callback(entity->dependency, &entity->cb, - amd_sched_entity_wakeup)) - return true; - - dma_fence_put(entity->dependency); - return false; -} - -static struct amd_sched_job * -amd_sched_entity_pop_job(struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = entity->sched; - struct amd_sched_job *sched_job = to_amd_sched_job( - spsc_queue_peek(&entity->job_queue)); - - if (!sched_job) - return NULL; - - while ((entity->dependency = sched->ops->dependency(sched_job, entity))) - if (amd_sched_entity_add_dependency_cb(entity)) - return NULL; - - /* skip jobs from entity that marked guilty */ - if (entity->guilty && atomic_read(entity->guilty)) - dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); - - spsc_queue_pop(&entity->job_queue); - return sched_job; -} - -/** - * Submit a job to the job queue - * - * @sched_job The pointer to job required to submit - * - * Returns 0 for success, negative error code otherwise. - */ -void amd_sched_entity_push_job(struct amd_sched_job *sched_job, - struct amd_sched_entity *entity) -{ - struct amd_gpu_scheduler *sched = sched_job->sched; - bool first = false; - - trace_amd_sched_job(sched_job, entity); - - spin_lock(&entity->queue_lock); - first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); - - spin_unlock(&entity->queue_lock); - - /* first job wakes up scheduler */ - if (first) { - /* Add the entity to the run queue */ - spin_lock(&entity->rq_lock); - amd_sched_rq_add_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); - amd_sched_wakeup(sched); - } -} - -/* job_finish is called after hw fence signaled - */ -static void amd_sched_job_finish(struct work_struct *work) -{ - struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, - finish_work); - struct amd_gpu_scheduler *sched = s_job->sched; - - /* remove job from ring_mirror_list */ - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - struct amd_sched_job *next; - - spin_unlock(&sched->job_list_lock); - cancel_delayed_work_sync(&s_job->work_tdr); - spin_lock(&sched->job_list_lock); - - /* queue TDR for next job */ - next = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); - - if (next) - schedule_delayed_work(&next->work_tdr, sched->timeout); - } - spin_unlock(&sched->job_list_lock); - dma_fence_put(&s_job->s_fence->finished); - sched->ops->free_job(s_job); -} - -static void amd_sched_job_finish_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, - finish_cb); - schedule_work(&job->finish_work); -} - -static void amd_sched_job_begin(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - - dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, - amd_sched_job_finish_cb); - - spin_lock(&sched->job_list_lock); - list_add_tail(&s_job->node, &sched->ring_mirror_list); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node) == s_job) - schedule_delayed_work(&s_job->work_tdr, sched->timeout); - spin_unlock(&sched->job_list_lock); -} - -static void amd_sched_job_timedout(struct work_struct *work) -{ - struct amd_sched_job *job = container_of(work, struct amd_sched_job, - work_tdr.work); - - job->sched->ops->timedout_job(job); -} - -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad) -{ - struct amd_sched_job *s_job; - struct amd_sched_entity *entity, *tmp; - int i;; - - spin_lock(&sched->job_list_lock); - list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { - if (s_job->s_fence->parent && - dma_fence_remove_callback(s_job->s_fence->parent, - &s_job->s_fence->cb)) { - dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = NULL; - atomic_dec(&sched->hw_rq_count); - } - } - spin_unlock(&sched->job_list_lock); - - if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) { - atomic_inc(&bad->karma); - /* don't increase @bad's karma if it's from KERNEL RQ, - * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) - * corrupt but keep in mind that kernel jobs always considered good. - */ - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) { - struct amd_sched_rq *rq = &sched->sched_rq[i]; - - spin_lock(&rq->lock); - list_for_each_entry_safe(entity, tmp, &rq->entities, list) { - if (bad->s_fence->scheduled.context == entity->fence_context) { - if (atomic_read(&bad->karma) > bad->sched->hang_limit) - if (entity->guilty) - atomic_set(entity->guilty, 1); - break; - } - } - spin_unlock(&rq->lock); - if (&entity->list != &rq->entities) - break; - } - } -} - -void amd_sched_job_kickout(struct amd_sched_job *s_job) -{ - struct amd_gpu_scheduler *sched = s_job->sched; - - spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - spin_unlock(&sched->job_list_lock); -} - -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_job *s_job, *tmp; - bool found_guilty = false; - int r; - - spin_lock(&sched->job_list_lock); - s_job = list_first_entry_or_null(&sched->ring_mirror_list, - struct amd_sched_job, node); - if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) - schedule_delayed_work(&s_job->work_tdr, sched->timeout); - - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { - struct amd_sched_fence *s_fence = s_job->s_fence; - struct dma_fence *fence; - uint64_t guilty_context; - - if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { - found_guilty = true; - guilty_context = s_job->s_fence->scheduled.context; - } - - if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) - dma_fence_set_error(&s_fence->finished, -ECANCELED); - - spin_unlock(&sched->job_list_lock); - fence = sched->ops->run_job(s_job); - atomic_inc(&sched->hw_rq_count); - if (fence) { - s_fence->parent = dma_fence_get(fence); - r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); - if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", - r); - dma_fence_put(fence); - } else { - amd_sched_process_job(NULL, &s_fence->cb); - } - spin_lock(&sched->job_list_lock); - } - spin_unlock(&sched->job_list_lock); -} - -/* init a sched_job with basic field */ -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void *owner) -{ - job->sched = sched; - job->s_priority = entity->rq - sched->sched_rq; - job->s_fence = amd_sched_fence_create(entity, owner); - if (!job->s_fence) - return -ENOMEM; - job->id = atomic64_inc_return(&sched->job_id_count); - - INIT_WORK(&job->finish_work, amd_sched_job_finish); - INIT_LIST_HEAD(&job->node); - INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - - return 0; -} - -/** - * Return ture if we can push more jobs to the hw. - */ -static bool amd_sched_ready(struct amd_gpu_scheduler *sched) -{ - return atomic_read(&sched->hw_rq_count) < - sched->hw_submission_limit; -} - -/** - * Wake up the scheduler when it is ready - */ -static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) -{ - if (amd_sched_ready(sched)) - wake_up_interruptible(&sched->wake_up_worker); -} - -/** - * Select next entity to process -*/ -static struct amd_sched_entity * -amd_sched_select_entity(struct amd_gpu_scheduler *sched) -{ - struct amd_sched_entity *entity; - int i; - - if (!amd_sched_ready(sched)) - return NULL; - - /* Kernel run queue has higher priority than normal run queue*/ - for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) { - entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); - if (entity) - break; - } - - return entity; -} - -static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct amd_sched_fence *s_fence = - container_of(cb, struct amd_sched_fence, cb); - struct amd_gpu_scheduler *sched = s_fence->sched; - - dma_fence_get(&s_fence->finished); - atomic_dec(&sched->hw_rq_count); - amd_sched_fence_finished(s_fence); - - trace_amd_sched_process_job(s_fence); - dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); -} - -static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) -{ - if (kthread_should_park()) { - kthread_parkme(); - return true; - } - - return false; -} - -static int amd_sched_main(void *param) -{ - struct sched_param sparam = {.sched_priority = 1}; - struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; - int r; - - sched_setscheduler(current, SCHED_FIFO, &sparam); - - while (!kthread_should_stop()) { - struct amd_sched_entity *entity = NULL; - struct amd_sched_fence *s_fence; - struct amd_sched_job *sched_job; - struct dma_fence *fence; - - wait_event_interruptible(sched->wake_up_worker, - (!amd_sched_blocked(sched) && - (entity = amd_sched_select_entity(sched))) || - kthread_should_stop()); - - if (!entity) - continue; - - sched_job = amd_sched_entity_pop_job(entity); - if (!sched_job) - continue; - - s_fence = sched_job->s_fence; - - atomic_inc(&sched->hw_rq_count); - amd_sched_job_begin(sched_job); - - fence = sched->ops->run_job(sched_job); - amd_sched_fence_scheduled(s_fence); - - if (fence) { - s_fence->parent = dma_fence_get(fence); - r = dma_fence_add_callback(fence, &s_fence->cb, - amd_sched_process_job); - if (r == -ENOENT) - amd_sched_process_job(fence, &s_fence->cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", - r); - dma_fence_put(fence); - } else { - amd_sched_process_job(NULL, &s_fence->cb); - } - - wake_up(&sched->job_scheduled); - } - return 0; -} - -/** - * Init a gpu scheduler instance - * - * @sched The pointer to the scheduler - * @ops The backend operations for this scheduler. - * @hw_submissions Number of hw submissions to do. - * @name Name used for debugging - * - * Return 0 on success, otherwise error code. -*/ -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - unsigned hw_submission, - unsigned hang_limit, - long timeout, - const char *name) -{ - int i; - sched->ops = ops; - sched->hw_submission_limit = hw_submission; - sched->name = name; - sched->timeout = timeout; - sched->hang_limit = hang_limit; - for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++) - amd_sched_rq_init(&sched->sched_rq[i]); - - init_waitqueue_head(&sched->wake_up_worker); - init_waitqueue_head(&sched->job_scheduled); - INIT_LIST_HEAD(&sched->ring_mirror_list); - spin_lock_init(&sched->job_list_lock); - atomic_set(&sched->hw_rq_count, 0); - atomic64_set(&sched->job_id_count, 0); - - /* Each scheduler will run on a seperate kernel thread */ - sched->thread = kthread_run(amd_sched_main, sched, sched->name); - if (IS_ERR(sched->thread)) { - DRM_ERROR("Failed to create scheduler for %s.\n", name); - return PTR_ERR(sched->thread); - } - - return 0; -} - -/** - * Destroy a gpu scheduler - * - * @sched The pointer to the scheduler - */ -void amd_sched_fini(struct amd_gpu_scheduler *sched) -{ - if (sched->thread) - kthread_stop(sched->thread); -} diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h deleted file mode 100644 index b590fcc2786a..000000000000 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _GPU_SCHEDULER_H_ -#define _GPU_SCHEDULER_H_ - -#include -#include -#include "spsc_queue.h" - -struct amd_gpu_scheduler; -struct amd_sched_rq; - -enum amd_sched_priority { - AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN, - AMD_SCHED_PRIORITY_NORMAL, - AMD_SCHED_PRIORITY_HIGH_SW, - AMD_SCHED_PRIORITY_HIGH_HW, - AMD_SCHED_PRIORITY_KERNEL, - AMD_SCHED_PRIORITY_MAX, - AMD_SCHED_PRIORITY_INVALID = -1, - AMD_SCHED_PRIORITY_UNSET = -2 -}; - - -/** - * A scheduler entity is a wrapper around a job queue or a group - * of other entities. Entities take turns emitting jobs from their - * job queues to corresponding hardware ring based on scheduling - * policy. -*/ -struct amd_sched_entity { - struct list_head list; - struct amd_sched_rq *rq; - spinlock_t rq_lock; - struct amd_gpu_scheduler *sched; - - spinlock_t queue_lock; - struct spsc_queue job_queue; - - atomic_t fence_seq; - uint64_t fence_context; - - struct dma_fence *dependency; - struct dma_fence_cb cb; - atomic_t *guilty; /* points to ctx's guilty */ -}; - -/** - * Run queue is a set of entities scheduling command submissions for - * one specific ring. It implements the scheduling policy that selects - * the next entity to emit commands from. -*/ -struct amd_sched_rq { - spinlock_t lock; - struct list_head entities; - struct amd_sched_entity *current_entity; -}; - -struct amd_sched_fence { - struct dma_fence scheduled; - struct dma_fence finished; - struct dma_fence_cb cb; - struct dma_fence *parent; - struct amd_gpu_scheduler *sched; - spinlock_t lock; - void *owner; -}; - -struct amd_sched_job { - struct spsc_node queue_node; - struct amd_gpu_scheduler *sched; - struct amd_sched_fence *s_fence; - struct dma_fence_cb finish_cb; - struct work_struct finish_work; - struct list_head node; - struct delayed_work work_tdr; - uint64_t id; - atomic_t karma; - enum amd_sched_priority s_priority; -}; - -extern const struct dma_fence_ops amd_sched_fence_ops_scheduled; -extern const struct dma_fence_ops amd_sched_fence_ops_finished; -static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f) -{ - if (f->ops == &amd_sched_fence_ops_scheduled) - return container_of(f, struct amd_sched_fence, scheduled); - - if (f->ops == &amd_sched_fence_ops_finished) - return container_of(f, struct amd_sched_fence, finished); - - return NULL; -} - -static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold) -{ - return (s_job && atomic_inc_return(&s_job->karma) > threshold); -} - -/** - * Define the backend operations called by the scheduler, - * these functions should be implemented in driver side -*/ -struct amd_sched_backend_ops { - struct dma_fence *(*dependency)(struct amd_sched_job *sched_job, - struct amd_sched_entity *s_entity); - struct dma_fence *(*run_job)(struct amd_sched_job *sched_job); - void (*timedout_job)(struct amd_sched_job *sched_job); - void (*free_job)(struct amd_sched_job *sched_job); -}; - -/** - * One scheduler is implemented for each hardware ring -*/ -struct amd_gpu_scheduler { - const struct amd_sched_backend_ops *ops; - uint32_t hw_submission_limit; - long timeout; - const char *name; - struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX]; - wait_queue_head_t wake_up_worker; - wait_queue_head_t job_scheduled; - atomic_t hw_rq_count; - atomic64_t job_id_count; - struct task_struct *thread; - struct list_head ring_mirror_list; - spinlock_t job_list_lock; - int hang_limit; -}; - -int amd_sched_init(struct amd_gpu_scheduler *sched, - const struct amd_sched_backend_ops *ops, - uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name); -void amd_sched_fini(struct amd_gpu_scheduler *sched); - -int amd_sched_entity_init(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - struct amd_sched_rq *rq, - uint32_t jobs, atomic_t* guilty); -void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity); -void amd_sched_entity_push_job(struct amd_sched_job *sched_job, - struct amd_sched_entity *entity); -void amd_sched_entity_set_rq(struct amd_sched_entity *entity, - struct amd_sched_rq *rq); - -int amd_sched_fence_slab_init(void); -void amd_sched_fence_slab_fini(void); - -struct amd_sched_fence *amd_sched_fence_create( - struct amd_sched_entity *s_entity, void *owner); -void amd_sched_fence_scheduled(struct amd_sched_fence *fence); -void amd_sched_fence_finished(struct amd_sched_fence *fence); -int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void *owner); -void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job); -void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); -bool amd_sched_dependency_optimized(struct dma_fence* fence, - struct amd_sched_entity *entity); -void amd_sched_job_kickout(struct amd_sched_job *s_job); - -#endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c deleted file mode 100644 index 33f54d0a5c4f..000000000000 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#include -#include -#include -#include -#include "gpu_scheduler.h" - -static struct kmem_cache *sched_fence_slab; - -int amd_sched_fence_slab_init(void) -{ - sched_fence_slab = kmem_cache_create( - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!sched_fence_slab) - return -ENOMEM; - - return 0; -} - -void amd_sched_fence_slab_fini(void) -{ - rcu_barrier(); - kmem_cache_destroy(sched_fence_slab); -} - -struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, - void *owner) -{ - struct amd_sched_fence *fence = NULL; - unsigned seq; - - fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); - if (fence == NULL) - return NULL; - - fence->owner = owner; - fence->sched = entity->sched; - spin_lock_init(&fence->lock); - - seq = atomic_inc_return(&entity->fence_seq); - dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, - &fence->lock, entity->fence_context, seq); - dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished, - &fence->lock, entity->fence_context + 1, seq); - - return fence; -} - -void amd_sched_fence_scheduled(struct amd_sched_fence *fence) -{ - int ret = dma_fence_signal(&fence->scheduled); - - if (!ret) - DMA_FENCE_TRACE(&fence->scheduled, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->scheduled, - "was already signaled\n"); -} - -void amd_sched_fence_finished(struct amd_sched_fence *fence) -{ - int ret = dma_fence_signal(&fence->finished); - - if (!ret) - DMA_FENCE_TRACE(&fence->finished, - "signaled from irq context\n"); - else - DMA_FENCE_TRACE(&fence->finished, - "was already signaled\n"); -} - -static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence) -{ - return "amd_sched"; -} - -static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - return (const char *)fence->sched->name; -} - -static bool amd_sched_fence_enable_signaling(struct dma_fence *f) -{ - return true; -} - -/** - * amd_sched_fence_free - free up the fence memory - * - * @rcu: RCU callback head - * - * Free up the fence memory after the RCU grace period. - */ -static void amd_sched_fence_free(struct rcu_head *rcu) -{ - struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - dma_fence_put(fence->parent); - kmem_cache_free(sched_fence_slab, fence); -} - -/** - * amd_sched_fence_release_scheduled - callback that fence can be freed - * - * @fence: fence - * - * This function is called when the reference count becomes zero. - * It just RCU schedules freeing up the fence. - */ -static void amd_sched_fence_release_scheduled(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - call_rcu(&fence->finished.rcu, amd_sched_fence_free); -} - -/** - * amd_sched_fence_release_finished - drop extra reference - * - * @f: fence - * - * Drop the extra reference from the scheduled fence to the base fence. - */ -static void amd_sched_fence_release_finished(struct dma_fence *f) -{ - struct amd_sched_fence *fence = to_amd_sched_fence(f); - - dma_fence_put(&fence->scheduled); -} - -const struct dma_fence_ops amd_sched_fence_ops_scheduled = { - .get_driver_name = amd_sched_fence_get_driver_name, - .get_timeline_name = amd_sched_fence_get_timeline_name, - .enable_signaling = amd_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = amd_sched_fence_release_scheduled, -}; - -const struct dma_fence_ops amd_sched_fence_ops_finished = { - .get_driver_name = amd_sched_fence_get_driver_name, - .get_timeline_name = amd_sched_fence_get_timeline_name, - .enable_signaling = amd_sched_fence_enable_signaling, - .signaled = NULL, - .wait = dma_fence_default_wait, - .release = amd_sched_fence_release_finished, -}; diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h deleted file mode 100644 index 5902f35ce759..000000000000 --- a/drivers/gpu/drm/amd/scheduler/spsc_queue.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_ -#define AMD_SCHEDULER_SPSC_QUEUE_H_ - -#include - -/** SPSC lockless queue */ - -struct spsc_node { - - /* Stores spsc_node* */ - struct spsc_node *next; -}; - -struct spsc_queue { - - struct spsc_node *head; - - /* atomic pointer to struct spsc_node* */ - atomic_long_t tail; - - atomic_t job_count; -}; - -static inline void spsc_queue_init(struct spsc_queue *queue) -{ - queue->head = NULL; - atomic_long_set(&queue->tail, (long)&queue->head); - atomic_set(&queue->job_count, 0); -} - -static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) -{ - return queue->head; -} - -static inline int spsc_queue_count(struct spsc_queue *queue) -{ - return atomic_read(&queue->job_count); -} - -static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) -{ - struct spsc_node **tail; - - node->next = NULL; - - preempt_disable(); - - tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); - WRITE_ONCE(*tail, node); - atomic_inc(&queue->job_count); - - /* - * In case of first element verify new node will be visible to the consumer - * thread when we ping the kernel thread that there is new work to do. - */ - smp_wmb(); - - preempt_enable(); - - return tail == &queue->head; -} - - -static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) -{ - struct spsc_node *next, *node; - - /* Verify reading from memory and not the cache */ - smp_rmb(); - - node = READ_ONCE(queue->head); - - if (!node) - return NULL; - - next = READ_ONCE(node->next); - WRITE_ONCE(queue->head, next); - - if (unlikely(!next)) { - /* slowpath for the last element in the queue */ - - if (atomic_long_cmpxchg(&queue->tail, - (long)&node->next, (long) &queue->head) != (long)&node->next) { - /* Updating tail failed wait for new next to appear */ - do { - smp_rmb(); - } while (unlikely(!(queue->head = READ_ONCE(node->next)))); - } - } - - atomic_dec(&queue->job_count); - return node; -} - - - -#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */ diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile new file mode 100644 index 000000000000..ed877912d06d --- /dev/null +++ b/drivers/gpu/drm/scheduler/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Iinclude/drm +gpu-sched-y := gpu_scheduler.o sched_fence.o + +obj-$(CONFIG_DRM_SCHED) += gpu-sched.o diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c new file mode 100644 index 000000000000..2c18996d59c5 --- /dev/null +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -0,0 +1,744 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include + +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); +static void drm_sched_wakeup(struct drm_gpu_scheduler *sched); +static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); + +/* Initialize a given run queue struct */ +static void drm_sched_rq_init(struct drm_sched_rq *rq) +{ + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->entities); + rq->current_entity = NULL; +} + +static void drm_sched_rq_add_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) +{ + if (!list_empty(&entity->list)) + return; + spin_lock(&rq->lock); + list_add_tail(&entity->list, &rq->entities); + spin_unlock(&rq->lock); +} + +static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, + struct drm_sched_entity *entity) +{ + if (list_empty(&entity->list)) + return; + spin_lock(&rq->lock); + list_del_init(&entity->list); + if (rq->current_entity == entity) + rq->current_entity = NULL; + spin_unlock(&rq->lock); +} + +/** + * Select an entity which could provide a job to run + * + * @rq The run queue to check. + * + * Try to find a ready entity, returns NULL if none found. + */ +static struct drm_sched_entity * +drm_sched_rq_select_entity(struct drm_sched_rq *rq) +{ + struct drm_sched_entity *entity; + + spin_lock(&rq->lock); + + entity = rq->current_entity; + if (entity) { + list_for_each_entry_continue(entity, &rq->entities, list) { + if (drm_sched_entity_is_ready(entity)) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return entity; + } + } + } + + list_for_each_entry(entity, &rq->entities, list) { + + if (drm_sched_entity_is_ready(entity)) { + rq->current_entity = entity; + spin_unlock(&rq->lock); + return entity; + } + + if (entity == rq->current_entity) + break; + } + + spin_unlock(&rq->lock); + + return NULL; +} + +/** + * Init a context entity used by scheduler when submit to HW ring. + * + * @sched The pointer to the scheduler + * @entity The pointer to a valid drm_sched_entity + * @rq The run queue this entity belongs + * @kernel If this is an entity for the kernel + * @jobs The max number of jobs in the job queue + * + * return 0 if succeed. negative error code on failure +*/ +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty) +{ + if (!(sched && entity && rq)) + return -EINVAL; + + memset(entity, 0, sizeof(struct drm_sched_entity)); + INIT_LIST_HEAD(&entity->list); + entity->rq = rq; + entity->sched = sched; + entity->guilty = guilty; + + spin_lock_init(&entity->rq_lock); + spin_lock_init(&entity->queue_lock); + spsc_queue_init(&entity->job_queue); + + atomic_set(&entity->fence_seq, 0); + entity->fence_context = dma_fence_context_alloc(2); + + return 0; +} +EXPORT_SYMBOL(drm_sched_entity_init); + +/** + * Query if entity is initialized + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * return true if entity is initialized, false otherwise +*/ +static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) +{ + return entity->sched == sched && + entity->rq != NULL; +} + +/** + * Check if entity is idle + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity don't has any unscheduled jobs. + */ +static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) +{ + rmb(); + if (spsc_queue_peek(&entity->job_queue) == NULL) + return true; + + return false; +} + +/** + * Check if entity is ready + * + * @entity The pointer to a valid scheduler entity + * + * Return true if entity could provide a job. + */ +static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) +{ + if (spsc_queue_peek(&entity->job_queue) == NULL) + return false; + + if (READ_ONCE(entity->dependency)) + return false; + + return true; +} + +/** + * Destroy a context entity + * + * @sched Pointer to scheduler instance + * @entity The pointer to a valid scheduler entity + * + * Cleanup and free the allocated resources. + */ +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity) +{ + int r; + + if (!drm_sched_entity_is_initialized(sched, entity)) + return; + /** + * The client will not queue more IBs during this fini, consume existing + * queued IBs or discard them on SIGKILL + */ + if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) + r = -ERESTARTSYS; + else + r = wait_event_killable(sched->job_scheduled, + drm_sched_entity_is_idle(entity)); + drm_sched_entity_set_rq(entity, NULL); + if (r) { + struct drm_sched_job *job; + + /* Park the kernel for a moment to make sure it isn't processing + * our enity. + */ + kthread_park(sched->thread); + kthread_unpark(sched->thread); + if (entity->dependency) { + dma_fence_remove_callback(entity->dependency, + &entity->cb); + dma_fence_put(entity->dependency); + entity->dependency = NULL; + } + + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + drm_sched_fence_scheduled(s_fence); + dma_fence_set_error(&s_fence->finished, -ESRCH); + drm_sched_fence_finished(s_fence); + WARN_ON(s_fence->parent); + dma_fence_put(&s_fence->finished); + sched->ops->free_job(job); + } + } +} +EXPORT_SYMBOL(drm_sched_entity_fini); + +static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); + drm_sched_wakeup(entity->sched); +} + +static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_entity *entity = + container_of(cb, struct drm_sched_entity, cb); + entity->dependency = NULL; + dma_fence_put(f); +} + +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq) +{ + if (entity->rq == rq) + return; + + spin_lock(&entity->rq_lock); + + if (entity->rq) + drm_sched_rq_remove_entity(entity->rq, entity); + + entity->rq = rq; + if (rq) + drm_sched_rq_add_entity(rq, entity); + + spin_unlock(&entity->rq_lock); +} +EXPORT_SYMBOL(drm_sched_entity_set_rq); + +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->sched; + struct drm_sched_fence *s_fence; + + if (!fence || dma_fence_is_signaled(fence)) + return false; + if (fence->context == entity->fence_context) + return true; + s_fence = to_drm_sched_fence(fence); + if (s_fence && s_fence->sched == sched) + return true; + + return false; +} +EXPORT_SYMBOL(drm_sched_dependency_optimized); + +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->sched; + struct dma_fence * fence = entity->dependency; + struct drm_sched_fence *s_fence; + + if (fence->context == entity->fence_context) { + /* We can ignore fences from ourself */ + dma_fence_put(entity->dependency); + return false; + } + + s_fence = to_drm_sched_fence(fence); + if (s_fence && s_fence->sched == sched) { + + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = dma_fence_get(&s_fence->scheduled); + dma_fence_put(entity->dependency); + entity->dependency = fence; + if (!dma_fence_add_callback(fence, &entity->cb, + drm_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + dma_fence_put(fence); + return false; + } + + if (!dma_fence_add_callback(entity->dependency, &entity->cb, + drm_sched_entity_wakeup)) + return true; + + dma_fence_put(entity->dependency); + return false; +} + +static struct drm_sched_job * +drm_sched_entity_pop_job(struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = entity->sched; + struct drm_sched_job *sched_job = to_drm_sched_job( + spsc_queue_peek(&entity->job_queue)); + + if (!sched_job) + return NULL; + + while ((entity->dependency = sched->ops->dependency(sched_job, entity))) + if (drm_sched_entity_add_dependency_cb(entity)) + return NULL; + + /* skip jobs from entity that marked guilty */ + if (entity->guilty && atomic_read(entity->guilty)) + dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); + + spsc_queue_pop(&entity->job_queue); + return sched_job; +} + +/** + * Submit a job to the job queue + * + * @sched_job The pointer to job required to submit + * + * Returns 0 for success, negative error code otherwise. + */ +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + bool first = false; + + trace_drm_sched_job(sched_job, entity); + + spin_lock(&entity->queue_lock); + first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); + + spin_unlock(&entity->queue_lock); + + /* first job wakes up scheduler */ + if (first) { + /* Add the entity to the run queue */ + spin_lock(&entity->rq_lock); + drm_sched_rq_add_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + drm_sched_wakeup(sched); + } +} +EXPORT_SYMBOL(drm_sched_entity_push_job); + +/* job_finish is called after hw fence signaled + */ +static void drm_sched_job_finish(struct work_struct *work) +{ + struct drm_sched_job *s_job = container_of(work, struct drm_sched_job, + finish_work); + struct drm_gpu_scheduler *sched = s_job->sched; + + /* remove job from ring_mirror_list */ + spin_lock(&sched->job_list_lock); + list_del_init(&s_job->node); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { + struct drm_sched_job *next; + + spin_unlock(&sched->job_list_lock); + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); + + /* queue TDR for next job */ + next = list_first_entry_or_null(&sched->ring_mirror_list, + struct drm_sched_job, node); + + if (next) + schedule_delayed_work(&next->work_tdr, sched->timeout); + } + spin_unlock(&sched->job_list_lock); + dma_fence_put(&s_job->s_fence->finished); + sched->ops->free_job(s_job); +} + +static void drm_sched_job_finish_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + schedule_work(&job->finish_work); +} + +static void drm_sched_job_begin(struct drm_sched_job *s_job) +{ + struct drm_gpu_scheduler *sched = s_job->sched; + + dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb, + drm_sched_job_finish_cb); + + spin_lock(&sched->job_list_lock); + list_add_tail(&s_job->node, &sched->ring_mirror_list); + if (sched->timeout != MAX_SCHEDULE_TIMEOUT && + list_first_entry_or_null(&sched->ring_mirror_list, + struct drm_sched_job, node) == s_job) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + spin_unlock(&sched->job_list_lock); +} + +static void drm_sched_job_timedout(struct work_struct *work) +{ + struct drm_sched_job *job = container_of(work, struct drm_sched_job, + work_tdr.work); + + job->sched->ops->timedout_job(job); +} + +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) +{ + struct drm_sched_job *s_job; + struct drm_sched_entity *entity, *tmp; + int i;; + + spin_lock(&sched->job_list_lock); + list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { + if (s_job->s_fence->parent && + dma_fence_remove_callback(s_job->s_fence->parent, + &s_job->s_fence->cb)) { + dma_fence_put(s_job->s_fence->parent); + s_job->s_fence->parent = NULL; + atomic_dec(&sched->hw_rq_count); + } + } + spin_unlock(&sched->job_list_lock); + + if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { + atomic_inc(&bad->karma); + /* don't increase @bad's karma if it's from KERNEL RQ, + * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs) + * corrupt but keep in mind that kernel jobs always considered good. + */ + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) { + struct drm_sched_rq *rq = &sched->sched_rq[i]; + + spin_lock(&rq->lock); + list_for_each_entry_safe(entity, tmp, &rq->entities, list) { + if (bad->s_fence->scheduled.context == entity->fence_context) { + if (atomic_read(&bad->karma) > bad->sched->hang_limit) + if (entity->guilty) + atomic_set(entity->guilty, 1); + break; + } + } + spin_unlock(&rq->lock); + if (&entity->list != &rq->entities) + break; + } + } +} +EXPORT_SYMBOL(drm_sched_hw_job_reset); + +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *s_job, *tmp; + bool found_guilty = false; + int r; + + spin_lock(&sched->job_list_lock); + s_job = list_first_entry_or_null(&sched->ring_mirror_list, + struct drm_sched_job, node); + if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + struct drm_sched_fence *s_fence = s_job->s_fence; + struct dma_fence *fence; + uint64_t guilty_context; + + if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { + found_guilty = true; + guilty_context = s_job->s_fence->scheduled.context; + } + + if (found_guilty && s_job->s_fence->scheduled.context == guilty_context) + dma_fence_set_error(&s_fence->finished, -ECANCELED); + + spin_unlock(&sched->job_list_lock); + fence = sched->ops->run_job(s_job); + atomic_inc(&sched->hw_rq_count); + if (fence) { + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + drm_sched_process_job); + if (r == -ENOENT) + drm_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + dma_fence_put(fence); + } else { + drm_sched_process_job(NULL, &s_fence->cb); + } + spin_lock(&sched->job_list_lock); + } + spin_unlock(&sched->job_list_lock); +} +EXPORT_SYMBOL(drm_sched_job_recovery); + +/* init a sched_job with basic field */ +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner) +{ + job->sched = sched; + job->s_priority = entity->rq - sched->sched_rq; + job->s_fence = drm_sched_fence_create(entity, owner); + if (!job->s_fence) + return -ENOMEM; + job->id = atomic64_inc_return(&sched->job_id_count); + + INIT_WORK(&job->finish_work, drm_sched_job_finish); + INIT_LIST_HEAD(&job->node); + INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout); + + return 0; +} +EXPORT_SYMBOL(drm_sched_job_init); + +/** + * Return ture if we can push more jobs to the hw. + */ +static bool drm_sched_ready(struct drm_gpu_scheduler *sched) +{ + return atomic_read(&sched->hw_rq_count) < + sched->hw_submission_limit; +} + +/** + * Wake up the scheduler when it is ready + */ +static void drm_sched_wakeup(struct drm_gpu_scheduler *sched) +{ + if (drm_sched_ready(sched)) + wake_up_interruptible(&sched->wake_up_worker); +} + +/** + * Select next entity to process +*/ +static struct drm_sched_entity * +drm_sched_select_entity(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_entity *entity; + int i; + + if (!drm_sched_ready(sched)) + return NULL; + + /* Kernel run queue has higher priority than normal run queue*/ + for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + entity = drm_sched_rq_select_entity(&sched->sched_rq[i]); + if (entity) + break; + } + + return entity; +} + +static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_fence *s_fence = + container_of(cb, struct drm_sched_fence, cb); + struct drm_gpu_scheduler *sched = s_fence->sched; + + dma_fence_get(&s_fence->finished); + atomic_dec(&sched->hw_rq_count); + drm_sched_fence_finished(s_fence); + + trace_drm_sched_process_job(s_fence); + dma_fence_put(&s_fence->finished); + wake_up_interruptible(&sched->wake_up_worker); +} + +static bool drm_sched_blocked(struct drm_gpu_scheduler *sched) +{ + if (kthread_should_park()) { + kthread_parkme(); + return true; + } + + return false; +} + +static int drm_sched_main(void *param) +{ + struct sched_param sparam = {.sched_priority = 1}; + struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param; + int r; + + sched_setscheduler(current, SCHED_FIFO, &sparam); + + while (!kthread_should_stop()) { + struct drm_sched_entity *entity = NULL; + struct drm_sched_fence *s_fence; + struct drm_sched_job *sched_job; + struct dma_fence *fence; + + wait_event_interruptible(sched->wake_up_worker, + (!drm_sched_blocked(sched) && + (entity = drm_sched_select_entity(sched))) || + kthread_should_stop()); + + if (!entity) + continue; + + sched_job = drm_sched_entity_pop_job(entity); + if (!sched_job) + continue; + + s_fence = sched_job->s_fence; + + atomic_inc(&sched->hw_rq_count); + drm_sched_job_begin(sched_job); + + fence = sched->ops->run_job(sched_job); + drm_sched_fence_scheduled(s_fence); + + if (fence) { + s_fence->parent = dma_fence_get(fence); + r = dma_fence_add_callback(fence, &s_fence->cb, + drm_sched_process_job); + if (r == -ENOENT) + drm_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + dma_fence_put(fence); + } else { + drm_sched_process_job(NULL, &s_fence->cb); + } + + wake_up(&sched->job_scheduled); + } + return 0; +} + +/** + * Init a gpu scheduler instance + * + * @sched The pointer to the scheduler + * @ops The backend operations for this scheduler. + * @hw_submissions Number of hw submissions to do. + * @name Name used for debugging + * + * Return 0 on success, otherwise error code. +*/ +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + unsigned hw_submission, + unsigned hang_limit, + long timeout, + const char *name) +{ + int i; + sched->ops = ops; + sched->hw_submission_limit = hw_submission; + sched->name = name; + sched->timeout = timeout; + sched->hang_limit = hang_limit; + for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++) + drm_sched_rq_init(&sched->sched_rq[i]); + + init_waitqueue_head(&sched->wake_up_worker); + init_waitqueue_head(&sched->job_scheduled); + INIT_LIST_HEAD(&sched->ring_mirror_list); + spin_lock_init(&sched->job_list_lock); + atomic_set(&sched->hw_rq_count, 0); + atomic64_set(&sched->job_id_count, 0); + + /* Each scheduler will run on a seperate kernel thread */ + sched->thread = kthread_run(drm_sched_main, sched, sched->name); + if (IS_ERR(sched->thread)) { + DRM_ERROR("Failed to create scheduler for %s.\n", name); + return PTR_ERR(sched->thread); + } + + return 0; +} +EXPORT_SYMBOL(drm_sched_init); + +/** + * Destroy a gpu scheduler + * + * @sched The pointer to the scheduler + */ +void drm_sched_fini(struct drm_gpu_scheduler *sched) +{ + if (sched->thread) + kthread_stop(sched->thread); +} +EXPORT_SYMBOL(drm_sched_fini); diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c new file mode 100644 index 000000000000..f6f2955890c4 --- /dev/null +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -0,0 +1,187 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +static struct kmem_cache *sched_fence_slab; + +int drm_sched_fence_slab_init(void) +{ + sched_fence_slab = kmem_cache_create( + "drm_sched_fence", sizeof(struct drm_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL_GPL(drm_sched_fence_slab_init); + +void drm_sched_fence_slab_fini(void) +{ + rcu_barrier(); + kmem_cache_destroy(sched_fence_slab); +} +EXPORT_SYMBOL_GPL(drm_sched_fence_slab_fini); + +void drm_sched_fence_scheduled(struct drm_sched_fence *fence) +{ + int ret = dma_fence_signal(&fence->scheduled); + + if (!ret) + DMA_FENCE_TRACE(&fence->scheduled, + "signaled from irq context\n"); + else + DMA_FENCE_TRACE(&fence->scheduled, + "was already signaled\n"); +} + +void drm_sched_fence_finished(struct drm_sched_fence *fence) +{ + int ret = dma_fence_signal(&fence->finished); + + if (!ret) + DMA_FENCE_TRACE(&fence->finished, + "signaled from irq context\n"); + else + DMA_FENCE_TRACE(&fence->finished, + "was already signaled\n"); +} + +static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence) +{ + return "drm_sched"; +} + +static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + return (const char *)fence->sched->name; +} + +static bool drm_sched_fence_enable_signaling(struct dma_fence *f) +{ + return true; +} + +/** + * amd_sched_fence_free - free up the fence memory + * + * @rcu: RCU callback head + * + * Free up the fence memory after the RCU grace period. + */ +static void drm_sched_fence_free(struct rcu_head *rcu) +{ + struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + dma_fence_put(fence->parent); + kmem_cache_free(sched_fence_slab, fence); +} + +/** + * amd_sched_fence_release_scheduled - callback that fence can be freed + * + * @fence: fence + * + * This function is called when the reference count becomes zero. + * It just RCU schedules freeing up the fence. + */ +static void drm_sched_fence_release_scheduled(struct dma_fence *f) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + call_rcu(&fence->finished.rcu, drm_sched_fence_free); +} + +/** + * amd_sched_fence_release_finished - drop extra reference + * + * @f: fence + * + * Drop the extra reference from the scheduled fence to the base fence. + */ +static void drm_sched_fence_release_finished(struct dma_fence *f) +{ + struct drm_sched_fence *fence = to_drm_sched_fence(f); + + dma_fence_put(&fence->scheduled); +} + +const struct dma_fence_ops drm_sched_fence_ops_scheduled = { + .get_driver_name = drm_sched_fence_get_driver_name, + .get_timeline_name = drm_sched_fence_get_timeline_name, + .enable_signaling = drm_sched_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, + .release = drm_sched_fence_release_scheduled, +}; + +const struct dma_fence_ops drm_sched_fence_ops_finished = { + .get_driver_name = drm_sched_fence_get_driver_name, + .get_timeline_name = drm_sched_fence_get_timeline_name, + .enable_signaling = drm_sched_fence_enable_signaling, + .signaled = NULL, + .wait = dma_fence_default_wait, + .release = drm_sched_fence_release_finished, +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f) +{ + if (f->ops == &drm_sched_fence_ops_scheduled) + return container_of(f, struct drm_sched_fence, scheduled); + + if (f->ops == &drm_sched_fence_ops_finished) + return container_of(f, struct drm_sched_fence, finished); + + return NULL; +} +EXPORT_SYMBOL(to_drm_sched_fence); + +struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity, + void *owner) +{ + struct drm_sched_fence *fence = NULL; + unsigned seq; + + fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); + if (fence == NULL) + return NULL; + + fence->owner = owner; + fence->sched = entity->sched; + spin_lock_init(&fence->lock); + + seq = atomic_inc_return(&entity->fence_seq); + dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); + + return fence; +} diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h new file mode 100644 index 000000000000..d29da4cbb042 --- /dev/null +++ b/include/drm/gpu_scheduler.h @@ -0,0 +1,176 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _DRM_GPU_SCHEDULER_H_ +#define _DRM_GPU_SCHEDULER_H_ + +#include +#include + +struct drm_gpu_scheduler; +struct drm_sched_rq; + +enum drm_sched_priority { + DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, + DRM_SCHED_PRIORITY_NORMAL, + DRM_SCHED_PRIORITY_HIGH_SW, + DRM_SCHED_PRIORITY_HIGH_HW, + DRM_SCHED_PRIORITY_KERNEL, + DRM_SCHED_PRIORITY_MAX, + DRM_SCHED_PRIORITY_INVALID = -1, + DRM_SCHED_PRIORITY_UNSET = -2 +}; + +/** + * A scheduler entity is a wrapper around a job queue or a group + * of other entities. Entities take turns emitting jobs from their + * job queues to corresponding hardware ring based on scheduling + * policy. +*/ +struct drm_sched_entity { + struct list_head list; + struct drm_sched_rq *rq; + spinlock_t rq_lock; + struct drm_gpu_scheduler *sched; + + spinlock_t queue_lock; + struct spsc_queue job_queue; + + atomic_t fence_seq; + uint64_t fence_context; + + struct dma_fence *dependency; + struct dma_fence_cb cb; + atomic_t *guilty; /* points to ctx's guilty */ +}; + +/** + * Run queue is a set of entities scheduling command submissions for + * one specific ring. It implements the scheduling policy that selects + * the next entity to emit commands from. +*/ +struct drm_sched_rq { + spinlock_t lock; + struct list_head entities; + struct drm_sched_entity *current_entity; +}; + +struct drm_sched_fence { + struct dma_fence scheduled; + struct dma_fence finished; + struct dma_fence_cb cb; + struct dma_fence *parent; + struct drm_gpu_scheduler *sched; + spinlock_t lock; + void *owner; +}; + +struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); + +struct drm_sched_job { + struct spsc_node queue_node; + struct drm_gpu_scheduler *sched; + struct drm_sched_fence *s_fence; + struct dma_fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; + uint64_t id; + atomic_t karma; + enum drm_sched_priority s_priority; +}; + +static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, + int threshold) +{ + return (s_job && atomic_inc_return(&s_job->karma) > threshold); +} + +/** + * Define the backend operations called by the scheduler, + * these functions should be implemented in driver side +*/ +struct drm_sched_backend_ops { + struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity); + struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); + void (*timedout_job)(struct drm_sched_job *sched_job); + void (*free_job)(struct drm_sched_job *sched_job); +}; + +/** + * One scheduler is implemented for each hardware ring +*/ +struct drm_gpu_scheduler { + const struct drm_sched_backend_ops *ops; + uint32_t hw_submission_limit; + long timeout; + const char *name; + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; + wait_queue_head_t wake_up_worker; + wait_queue_head_t job_scheduled; + atomic_t hw_rq_count; + atomic64_t job_id_count; + struct task_struct *thread; + struct list_head ring_mirror_list; + spinlock_t job_list_lock; + int hang_limit; +}; + +int drm_sched_init(struct drm_gpu_scheduler *sched, + const struct drm_sched_backend_ops *ops, + uint32_t hw_submission, unsigned hang_limit, long timeout, + const char *name); +void drm_sched_fini(struct drm_gpu_scheduler *sched); + +int drm_sched_entity_init(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + struct drm_sched_rq *rq, + uint32_t jobs, atomic_t *guilty); +void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity); +void drm_sched_entity_push_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *entity); +void drm_sched_entity_set_rq(struct drm_sched_entity *entity, + struct drm_sched_rq *rq); + +int drm_sched_fence_slab_init(void); +void drm_sched_fence_slab_fini(void); + +struct drm_sched_fence *drm_sched_fence_create( + struct drm_sched_entity *s_entity, void *owner); +void drm_sched_fence_scheduled(struct drm_sched_fence *fence); +void drm_sched_fence_finished(struct drm_sched_fence *fence); +int drm_sched_job_init(struct drm_sched_job *job, + struct drm_gpu_scheduler *sched, + struct drm_sched_entity *entity, + void *owner); +void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job); +void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); +bool drm_sched_dependency_optimized(struct dma_fence* fence, + struct drm_sched_entity *entity); +void drm_sched_job_kickout(struct drm_sched_job *s_job); + +#endif diff --git a/include/drm/gpu_scheduler_trace.h b/include/drm/gpu_scheduler_trace.h new file mode 100644 index 000000000000..0789e8d0a0e1 --- /dev/null +++ b/include/drm/gpu_scheduler_trace.h @@ -0,0 +1,82 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _GPU_SCHED_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM gpu_scheduler +#define TRACE_INCLUDE_FILE gpu_scheduler_trace + +TRACE_EVENT(drm_sched_job, + TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), + TP_ARGS(sched_job, entity), + TP_STRUCT__entry( + __field(struct drm_sched_entity *, entity) + __field(struct dma_fence *, fence) + __field(const char *, name) + __field(uint64_t, id) + __field(u32, job_count) + __field(int, hw_job_count) + ), + + TP_fast_assign( + __entry->entity = entity; + __entry->id = sched_job->id; + __entry->fence = &sched_job->s_fence->finished; + __entry->name = sched_job->sched->name; + __entry->job_count = spsc_queue_count(&entity->job_queue); + __entry->hw_job_count = atomic_read( + &sched_job->sched->hw_rq_count); + ), + TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d", + __entry->entity, __entry->id, + __entry->fence, __entry->name, + __entry->job_count, __entry->hw_job_count) +); + +TRACE_EVENT(drm_sched_process_job, + TP_PROTO(struct drm_sched_fence *fence), + TP_ARGS(fence), + TP_STRUCT__entry( + __field(struct dma_fence *, fence) + ), + + TP_fast_assign( + __entry->fence = &fence->finished; + ), + TP_printk("fence=%p signaled", __entry->fence) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/include/drm/spsc_queue.h b/include/drm/spsc_queue.h new file mode 100644 index 000000000000..125f096c88cb --- /dev/null +++ b/include/drm/spsc_queue.h @@ -0,0 +1,122 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef DRM_SCHEDULER_SPSC_QUEUE_H_ +#define DRM_SCHEDULER_SPSC_QUEUE_H_ + +#include +#include + +/** SPSC lockless queue */ + +struct spsc_node { + + /* Stores spsc_node* */ + struct spsc_node *next; +}; + +struct spsc_queue { + + struct spsc_node *head; + + /* atomic pointer to struct spsc_node* */ + atomic_long_t tail; + + atomic_t job_count; +}; + +static inline void spsc_queue_init(struct spsc_queue *queue) +{ + queue->head = NULL; + atomic_long_set(&queue->tail, (long)&queue->head); + atomic_set(&queue->job_count, 0); +} + +static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue) +{ + return queue->head; +} + +static inline int spsc_queue_count(struct spsc_queue *queue) +{ + return atomic_read(&queue->job_count); +} + +static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node) +{ + struct spsc_node **tail; + + node->next = NULL; + + preempt_disable(); + + tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); + WRITE_ONCE(*tail, node); + atomic_inc(&queue->job_count); + + /* + * In case of first element verify new node will be visible to the consumer + * thread when we ping the kernel thread that there is new work to do. + */ + smp_wmb(); + + preempt_enable(); + + return tail == &queue->head; +} + + +static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue) +{ + struct spsc_node *next, *node; + + /* Verify reading from memory and not the cache */ + smp_rmb(); + + node = READ_ONCE(queue->head); + + if (!node) + return NULL; + + next = READ_ONCE(node->next); + WRITE_ONCE(queue->head, next); + + if (unlikely(!next)) { + /* slowpath for the last element in the queue */ + + if (atomic_long_cmpxchg(&queue->tail, + (long)&node->next, (long) &queue->head) != (long)&node->next) { + /* Updating tail failed wait for new next to appear */ + do { + smp_rmb(); + } while (unlikely(!(queue->head = READ_ONCE(node->next)))); + } + } + + atomic_dec(&queue->job_count); + return node; +} + + + +#endif /* DRM_SCHEDULER_SPSC_QUEUE_H_ */ -- cgit v1.2.3 From 763efb6c6f2bd60d68c9ec1815f25d782f57f6f4 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 6 Dec 2017 15:44:51 +0100 Subject: drm/amdgpu: cleanup debugfs handling a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the superflous .debugfs_init callback and register all files in amdgpu_device.c in just one function. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 ---- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 46 +++++++----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 -- 3 files changed, 11 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e329faa6166f..c6fc67f75ffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1250,11 +1250,6 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, const struct drm_info_list *files, unsigned nfiles); int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); - -#if defined(CONFIG_DEBUG_FS) -int amdgpu_debugfs_init(struct drm_minor *minor); -#endif - int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 98cc4df02b14..345663a784ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -66,8 +66,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); -static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev); -static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev); +static int amdgpu_debugfs_init(struct amdgpu_device *adev); static const char *amdgpu_asic_name[] = { "TAHITI", @@ -2405,17 +2404,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) DRM_ERROR("registering register debugfs failed (%d).\n", r); - r = amdgpu_debugfs_test_ib_ring_init(adev); - if (r) - DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r); - r = amdgpu_debugfs_firmware_init(adev); if (r) DRM_ERROR("registering firmware debugfs failed (%d).\n", r); - r = amdgpu_debugfs_vbios_dump_init(adev); + r = amdgpu_debugfs_init(adev); if (r) - DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r); + DRM_ERROR("Creating debugfs files failed (%d).\n", r); if ((amdgpu_testing & 1)) { if (adev->accel_working) @@ -3964,21 +3959,6 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) return 0; } -static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = { - {"amdgpu_test_ib", &amdgpu_debugfs_test_ib} -}; - -static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) -{ - return amdgpu_debugfs_add_files(adev, - amdgpu_debugfs_test_ib_ring_list, 1); -} - -int amdgpu_debugfs_init(struct drm_minor *minor) -{ - return 0; -} - static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -3989,19 +3969,19 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) return 0; } -static const struct drm_info_list amdgpu_vbios_dump_list[] = { - {"amdgpu_vbios", - amdgpu_debugfs_get_vbios_dump, - 0, NULL}, +static const struct drm_info_list amdgpu_debugfs_list[] = { + {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, + {"amdgpu_test_ib", &amdgpu_debugfs_test_ib} }; -static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) +static int amdgpu_debugfs_init(struct amdgpu_device *adev) { - return amdgpu_debugfs_add_files(adev, - amdgpu_vbios_dump_list, 1); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, + ARRAY_SIZE(amdgpu_debugfs_list)); } + #else -static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev) +static int amdgpu_debugfs_init(struct amdgpu_device *adev) { return 0; } @@ -4009,9 +3989,5 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } -static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev) -{ - return 0; -} static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 51b76688ab90..0b039bdcf84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -850,9 +850,6 @@ static struct drm_driver kms_driver = { .disable_vblank = amdgpu_disable_vblank_kms, .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, .get_scanout_position = amdgpu_get_crtc_scanout_position, -#if defined(CONFIG_DEBUG_FS) - .debugfs_init = amdgpu_debugfs_init, -#endif .irq_preinstall = amdgpu_irq_preinstall, .irq_postinstall = amdgpu_irq_postinstall, .irq_uninstall = amdgpu_irq_uninstall, -- cgit v1.2.3 From 79588d21ad312d9cd16a867c0d13278e6377a653 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 6 Dec 2017 16:24:49 +0100 Subject: drm/amdgpu: add amdgpu_evict_vram debugfs file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Torture test for MM and VM support, can be used to evict all VRAM while the system is under load. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 345663a784ae..b10aecc7d146 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3969,9 +3969,20 @@ static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) return 0; } +static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); + return 0; +} + static const struct drm_info_list amdgpu_debugfs_list[] = { {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, - {"amdgpu_test_ib", &amdgpu_debugfs_test_ib} + {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, + {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram} }; static int amdgpu_debugfs_init(struct amdgpu_device *adev) -- cgit v1.2.3 From b9141cd3930e390f156739829ca9589fda7926e4 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Wed, 22 Nov 2017 19:21:43 +0800 Subject: drm/amdgpu: no need to evict VRAM in device_fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this VRAM evict is not needed and also cost 2seconds to finish because the IRQ is software side disabled before it. Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b10aecc7d146..98d62a991b67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2461,8 +2461,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->shutdown = true; if (adev->mode_info.mode_config_initialized) drm_crtc_force_disable_all(adev->ddev); - /* evict vram memory */ - amdgpu_bo_evict_vram(adev); + amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); -- cgit v1.2.3 From 4ec6ecf48c64d1da82a008f6fb0be86c4044287d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 12 Dec 2017 15:20:22 -0500 Subject: drm/amdgpu: drop scratch regs save and restore from S3/S4 handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The expectation is that the base driver doesn't mess with these. Some components interact with these directly so let the components handle these directly. Reviewed-by: Harry Wentland Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 98d62a991b67..ca1cf8a71dda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2577,7 +2577,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2626,7 +2625,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) if (r) goto unlock; } - amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (amdgpu_need_post(adev)) { -- cgit v1.2.3 From 88bc1e3c38aaadbedd1df96e8bbe62b06f7cda1f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 12 Dec 2017 15:22:56 -0500 Subject: drm/amdgpu: drop scratch regs save and restore from GPU reset handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The expectation is that the base driver doesn't mess with these. Some components interact with these directly so let the components handle these directly. Reviewed-by: Harry Wentland Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ca1cf8a71dda..046b9d5bc14d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2896,9 +2896,7 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) r = amdgpu_suspend(adev); retry: - amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); - amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); -- cgit v1.2.3 From dcebf026e6f69fb79e7f88d10681faf4f8a985ba Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 12 Dec 2017 14:09:30 -0500 Subject: drm/amdgpu: Add gpu_recovery parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add new parameter to control GPU recovery procedure. v2: Add auto logic where reset is disabled for bare metal and enabled for SR-IOV. Allow forced reset from debugfs. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 8 files changed, 19 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c31c5496dc5e..ffbe99d839a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se; extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; +extern int amdgpu_gpu_recovery; #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; @@ -1910,7 +1911,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 046b9d5bc14d..3f63f5ca4fa7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3009,11 +3009,12 @@ error: * * @adev: amdgpu device pointer * @job: which job trigger hang + * @force forces reset regardless of amdgpu_gpu_recovery * * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) +int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) { struct drm_atomic_state *state = NULL; uint64_t reset_flags = 0; @@ -3024,6 +3025,12 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job) return 0; } + if (!force && (amdgpu_gpu_recovery == 0 || + (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) { + DRM_INFO("GPU recovery disabled.\n"); + return 0; + } + dev_info(adev->dev, "GPU reset begin!\n"); mutex_lock(&adev->lock_reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0b039bdcf84e..b734cd668ff1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0; int amdgpu_job_hang_limit = 0; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; +int amdgpu_gpu_recovery = -1; /* auto */ MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444); MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)"); module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); +MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto"); +module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); + #ifdef CONFIG_DRM_AMDGPU_SI #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 7cb71a8e21df..d3ce12149542 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -705,7 +705,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; seq_printf(m, "gpu recover\n"); - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, true); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index c340774082ea..c43643e8c8c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) reset_work); if (!amdgpu_sriov_vf(adev)) - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, false); } /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 013c0a8cfb60..be8a437fad54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - amdgpu_gpu_recover(job->adev, job); + amdgpu_gpu_recover(job->adev, job, false); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 71f56900d6fe..7ade56d59c27 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -253,7 +253,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, false); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index df52824c0cd4..e05823d86cfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -521,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL); + amdgpu_gpu_recover(adev, NULL, false); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From 8854695add1eaaeafae728850c905c4727e56f35 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Wed, 13 Dec 2017 14:36:53 -0500 Subject: drm/amdgpu: Simplify amdgpu_lockup_timeout usage. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With introduction of amdgpu_gpu_recovery we don't need any more to rely on amdgpu_lockup_timeout == 0 for disabling GPU reset. Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 14 +------------- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 5 files changed, 11 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3f63f5ca4fa7..79869827985f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1229,6 +1229,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_vram_page_split); amdgpu_vram_page_split = 1024; } + + if (amdgpu_lockup_timeout == 0) { + dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n"); + amdgpu_lockup_timeout = 10000; + } } /** @@ -2827,7 +2832,7 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) return false; - return amdgpu_lockup_timeout > 0 ? true : false; + return amdgpu_gpu_recovery; } static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b734cd668ff1..1fc5499cb5fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0; int amdgpu_hw_i2c = 0; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; -int amdgpu_lockup_timeout = 0; +int amdgpu_lockup_timeout = 10000; int amdgpu_dpm = -1; int amdgpu_fw_load_type = -1; int amdgpu_aspm = -1; @@ -166,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444); MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)"); module_param_named(msi, amdgpu_msi, int, 0444); -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)"); +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)"); module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444); MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d3ce12149542..da1510f65ee0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, unsigned num_hw_submission) { - long timeout; int r; /* Check that num_hw_submission is a power of two */ @@ -434,20 +433,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, /* No need to setup the GPU scheduler for KIQ ring */ if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) { - timeout = msecs_to_jiffies(amdgpu_lockup_timeout); - if (timeout == 0) { - /* - * FIXME: - * Delayed workqueue cannot use it directly, - * so the scheduler will not use delayed workqueue if - * MAX_SCHEDULE_TIMEOUT is set. - * Currently keep it simple and silly. - */ - timeout = MAX_SCHEDULE_TIMEOUT; - } r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, - timeout, ring->name); + msecs_to_jiffies(amdgpu_lockup_timeout), ring->name); if (r) { DRM_ERROR("Failed to create scheduler on ring %s.\n", ring->name); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 7ade56d59c27..43e74ec93147 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -277,7 +277,7 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev, int r; /* trigger gpu-reset by hypervisor only if TDR disbaled */ - if (amdgpu_lockup_timeout == 0) { + if (!amdgpu_gpu_recovery) { /* see what event we get */ r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index e05823d86cfb..da7c261d5d87 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -545,7 +545,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev, int r; /* trigger gpu-reset by hypervisor only if TDR disbaled */ - if (amdgpu_lockup_timeout == 0) { + if (!amdgpu_gpu_recovery) { /* see what event we get */ r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); -- cgit v1.2.3 From 4e89df63c110d7fb4883c8b3d653d3d0e5dac67d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 14:32:53 -0500 Subject: drm/amdgpu: move atom functions from amdgpu_device.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit and move them to amdgpu_atombios.c for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 236 ++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 231 -------------------------- 3 files changed, 236 insertions(+), 235 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index ffaf0534e33c..bf872f694f50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -27,6 +27,7 @@ #include #include "amdgpu.h" #include "amdgpu_atombios.h" +#include "amdgpu_atomfirmware.h" #include "amdgpu_i2c.h" #include "atom.h" @@ -1699,7 +1700,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock) WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch); } -void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) +static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev) { uint32_t bios_2_scratch, bios_6_scratch; @@ -1776,7 +1777,7 @@ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) #endif } -int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev) +static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); @@ -1819,3 +1820,234 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev) ctx->scratch_size_bytes = usage_bytes; return 0; } + +/* ATOM accessor methods */ +/* + * ATOM is an interpreted byte code stored in tables in the vbios. The + * driver registers callbacks to access registers and the interpreter + * in the driver parses the tables and executes then to program specific + * actions (set display modes, asic init, etc.). See amdgpu_atombios.c, + * atombios.h, and atom.c + */ + +/** + * cail_pll_read - read PLL register + * + * @info: atom card_info pointer + * @reg: PLL register offset + * + * Provides a PLL register accessor for the atom interpreter (r4xx+). + * Returns the value of the PLL register. + */ +static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) +{ + return 0; +} + +/** + * cail_pll_write - write PLL register + * + * @info: atom card_info pointer + * @reg: PLL register offset + * @val: value to write to the pll register + * + * Provides a PLL register accessor for the atom interpreter (r4xx+). + */ +static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + +} + +/** + * cail_mc_read - read MC (Memory Controller) register + * + * @info: atom card_info pointer + * @reg: MC register offset + * + * Provides an MC register accessor for the atom interpreter (r4xx+). + * Returns the value of the MC register. + */ +static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) +{ + return 0; +} + +/** + * cail_mc_write - write MC (Memory Controller) register + * + * @info: atom card_info pointer + * @reg: MC register offset + * @val: value to write to the pll register + * + * Provides a MC register accessor for the atom interpreter (r4xx+). + */ +static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + +} + +/** + * cail_reg_write - write MMIO register + * + * @info: atom card_info pointer + * @reg: MMIO register offset + * @val: value to write to the pll register + * + * Provides a MMIO register accessor for the atom interpreter (r4xx+). + */ +static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = info->dev->dev_private; + + WREG32(reg, val); +} + +/** + * cail_reg_read - read MMIO register + * + * @info: atom card_info pointer + * @reg: MMIO register offset + * + * Provides an MMIO register accessor for the atom interpreter (r4xx+). + * Returns the value of the MMIO register. + */ +static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) +{ + struct amdgpu_device *adev = info->dev->dev_private; + uint32_t r; + + r = RREG32(reg); + return r; +} + +/** + * cail_ioreg_write - write IO register + * + * @info: atom card_info pointer + * @reg: IO register offset + * @val: value to write to the pll register + * + * Provides a IO register accessor for the atom interpreter (r4xx+). + */ +static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + struct amdgpu_device *adev = info->dev->dev_private; + + WREG32_IO(reg, val); +} + +/** + * cail_ioreg_read - read IO register + * + * @info: atom card_info pointer + * @reg: IO register offset + * + * Provides an IO register accessor for the atom interpreter (r4xx+). + * Returns the value of the IO register. + */ +static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) +{ + struct amdgpu_device *adev = info->dev->dev_private; + uint32_t r; + + r = RREG32_IO(reg); + return r; +} + +static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + struct atom_context *ctx = adev->mode_info.atom_context; + + return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); +} + +static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, + NULL); + +/** + * amdgpu_atombios_fini - free the driver info and callbacks for atombios + * + * @adev: amdgpu_device pointer + * + * Frees the driver info and register access callbacks for the ATOM + * interpreter (r4xx+). + * Called at driver shutdown. + */ +void amdgpu_atombios_fini(struct amdgpu_device *adev) +{ + if (adev->mode_info.atom_context) { + kfree(adev->mode_info.atom_context->scratch); + kfree(adev->mode_info.atom_context->iio); + } + kfree(adev->mode_info.atom_context); + adev->mode_info.atom_context = NULL; + kfree(adev->mode_info.atom_card_info); + adev->mode_info.atom_card_info = NULL; + device_remove_file(adev->dev, &dev_attr_vbios_version); +} + +/** + * amdgpu_atombios_init - init the driver info and callbacks for atombios + * + * @adev: amdgpu_device pointer + * + * Initializes the driver info and register access callbacks for the + * ATOM interpreter (r4xx+). + * Returns 0 on sucess, -ENOMEM on failure. + * Called at driver startup. + */ +int amdgpu_atombios_init(struct amdgpu_device *adev) +{ + struct card_info *atom_card_info = + kzalloc(sizeof(struct card_info), GFP_KERNEL); + int ret; + + if (!atom_card_info) + return -ENOMEM; + + adev->mode_info.atom_card_info = atom_card_info; + atom_card_info->dev = adev->ddev; + atom_card_info->reg_read = cail_reg_read; + atom_card_info->reg_write = cail_reg_write; + /* needed for iio ops */ + if (adev->rio_mem) { + atom_card_info->ioreg_read = cail_ioreg_read; + atom_card_info->ioreg_write = cail_ioreg_write; + } else { + DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); + atom_card_info->ioreg_read = cail_reg_read; + atom_card_info->ioreg_write = cail_reg_write; + } + atom_card_info->mc_read = cail_mc_read; + atom_card_info->mc_write = cail_mc_write; + atom_card_info->pll_read = cail_pll_read; + atom_card_info->pll_write = cail_pll_write; + + adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios); + if (!adev->mode_info.atom_context) { + amdgpu_atombios_fini(adev); + return -ENOMEM; + } + + mutex_init(&adev->mode_info.atom_context->mutex); + if (adev->is_atom_fw) { + amdgpu_atomfirmware_scratch_regs_init(adev); + amdgpu_atomfirmware_allocate_fb_scratch(adev); + } else { + amdgpu_atombios_scratch_regs_init(adev); + amdgpu_atombios_allocate_fb_scratch(adev); + } + + ret = device_create_file(adev->dev, &dev_attr_vbios_version); + if (ret) { + DRM_ERROR("Failed to create device file for VBIOS version\n"); + return ret; + } + + return 0; +} + diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 58507f9ff856..fd8f18074f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -195,7 +195,6 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); -void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); @@ -217,6 +216,7 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev, u8 voltage_type, u8 *svd_gpio_id, u8 *svc_gpio_id); -int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev); +void amdgpu_atombios_fini(struct amdgpu_device *adev); +int amdgpu_atombios_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 79869827985f..a3632c757ca4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -898,237 +898,6 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev) adev->dummy_page.page = NULL; } - -/* ATOM accessor methods */ -/* - * ATOM is an interpreted byte code stored in tables in the vbios. The - * driver registers callbacks to access registers and the interpreter - * in the driver parses the tables and executes then to program specific - * actions (set display modes, asic init, etc.). See amdgpu_atombios.c, - * atombios.h, and atom.c - */ - -/** - * cail_pll_read - read PLL register - * - * @info: atom card_info pointer - * @reg: PLL register offset - * - * Provides a PLL register accessor for the atom interpreter (r4xx+). - * Returns the value of the PLL register. - */ -static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) -{ - return 0; -} - -/** - * cail_pll_write - write PLL register - * - * @info: atom card_info pointer - * @reg: PLL register offset - * @val: value to write to the pll register - * - * Provides a PLL register accessor for the atom interpreter (r4xx+). - */ -static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val) -{ - -} - -/** - * cail_mc_read - read MC (Memory Controller) register - * - * @info: atom card_info pointer - * @reg: MC register offset - * - * Provides an MC register accessor for the atom interpreter (r4xx+). - * Returns the value of the MC register. - */ -static uint32_t cail_mc_read(struct card_info *info, uint32_t reg) -{ - return 0; -} - -/** - * cail_mc_write - write MC (Memory Controller) register - * - * @info: atom card_info pointer - * @reg: MC register offset - * @val: value to write to the pll register - * - * Provides a MC register accessor for the atom interpreter (r4xx+). - */ -static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val) -{ - -} - -/** - * cail_reg_write - write MMIO register - * - * @info: atom card_info pointer - * @reg: MMIO register offset - * @val: value to write to the pll register - * - * Provides a MMIO register accessor for the atom interpreter (r4xx+). - */ -static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val) -{ - struct amdgpu_device *adev = info->dev->dev_private; - - WREG32(reg, val); -} - -/** - * cail_reg_read - read MMIO register - * - * @info: atom card_info pointer - * @reg: MMIO register offset - * - * Provides an MMIO register accessor for the atom interpreter (r4xx+). - * Returns the value of the MMIO register. - */ -static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) -{ - struct amdgpu_device *adev = info->dev->dev_private; - uint32_t r; - - r = RREG32(reg); - return r; -} - -/** - * cail_ioreg_write - write IO register - * - * @info: atom card_info pointer - * @reg: IO register offset - * @val: value to write to the pll register - * - * Provides a IO register accessor for the atom interpreter (r4xx+). - */ -static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) -{ - struct amdgpu_device *adev = info->dev->dev_private; - - WREG32_IO(reg, val); -} - -/** - * cail_ioreg_read - read IO register - * - * @info: atom card_info pointer - * @reg: IO register offset - * - * Provides an IO register accessor for the atom interpreter (r4xx+). - * Returns the value of the IO register. - */ -static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) -{ - struct amdgpu_device *adev = info->dev->dev_private; - uint32_t r; - - r = RREG32_IO(reg); - return r; -} - -static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct drm_device *ddev = dev_get_drvdata(dev); - struct amdgpu_device *adev = ddev->dev_private; - struct atom_context *ctx = adev->mode_info.atom_context; - - return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version); -} - -static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, - NULL); - -/** - * amdgpu_atombios_fini - free the driver info and callbacks for atombios - * - * @adev: amdgpu_device pointer - * - * Frees the driver info and register access callbacks for the ATOM - * interpreter (r4xx+). - * Called at driver shutdown. - */ -static void amdgpu_atombios_fini(struct amdgpu_device *adev) -{ - if (adev->mode_info.atom_context) { - kfree(adev->mode_info.atom_context->scratch); - kfree(adev->mode_info.atom_context->iio); - } - kfree(adev->mode_info.atom_context); - adev->mode_info.atom_context = NULL; - kfree(adev->mode_info.atom_card_info); - adev->mode_info.atom_card_info = NULL; - device_remove_file(adev->dev, &dev_attr_vbios_version); -} - -/** - * amdgpu_atombios_init - init the driver info and callbacks for atombios - * - * @adev: amdgpu_device pointer - * - * Initializes the driver info and register access callbacks for the - * ATOM interpreter (r4xx+). - * Returns 0 on sucess, -ENOMEM on failure. - * Called at driver startup. - */ -static int amdgpu_atombios_init(struct amdgpu_device *adev) -{ - struct card_info *atom_card_info = - kzalloc(sizeof(struct card_info), GFP_KERNEL); - int ret; - - if (!atom_card_info) - return -ENOMEM; - - adev->mode_info.atom_card_info = atom_card_info; - atom_card_info->dev = adev->ddev; - atom_card_info->reg_read = cail_reg_read; - atom_card_info->reg_write = cail_reg_write; - /* needed for iio ops */ - if (adev->rio_mem) { - atom_card_info->ioreg_read = cail_ioreg_read; - atom_card_info->ioreg_write = cail_ioreg_write; - } else { - DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n"); - atom_card_info->ioreg_read = cail_reg_read; - atom_card_info->ioreg_write = cail_reg_write; - } - atom_card_info->mc_read = cail_mc_read; - atom_card_info->mc_write = cail_mc_write; - atom_card_info->pll_read = cail_pll_read; - atom_card_info->pll_write = cail_pll_write; - - adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios); - if (!adev->mode_info.atom_context) { - amdgpu_atombios_fini(adev); - return -ENOMEM; - } - - mutex_init(&adev->mode_info.atom_context->mutex); - if (adev->is_atom_fw) { - amdgpu_atomfirmware_scratch_regs_init(adev); - amdgpu_atomfirmware_allocate_fb_scratch(adev); - } else { - amdgpu_atombios_scratch_regs_init(adev); - amdgpu_atombios_allocate_fb_scratch(adev); - } - - ret = device_create_file(adev->dev, &dev_attr_vbios_version); - if (ret) { - DRM_ERROR("Failed to create device file for VBIOS version\n"); - return ret; - } - - return 0; -} - /* if we get transitioned to only one device, take VGA back */ /** * amdgpu_vga_set_decode - enable/disable vga decode -- cgit v1.2.3 From 06ec907054c5a48f28ff6856885522e3a7103bb7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 15:02:39 -0500 Subject: drm/amdgpu: use consistent naming for static funcs in amdgpu_device.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefix the functions with device or device_ip for functions which deal with ip blocks for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 176 +++++++++++++++-------------- 1 file changed, 90 insertions(+), 86 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a3632c757ca4..92b5064b3391 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -332,7 +332,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, BUG(); } -static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) +static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) { return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, @@ -341,7 +341,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) (void **)&adev->vram_scratch.ptr); } -static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) +static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); } @@ -391,14 +391,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev) * GPU doorbell aperture helpers function. */ /** - * amdgpu_doorbell_init - Init doorbell driver information. + * amdgpu_device_doorbell_init - Init doorbell driver information. * * @adev: amdgpu_device pointer * * Init doorbell driver information (CIK) * Returns 0 on success, error on failure. */ -static int amdgpu_doorbell_init(struct amdgpu_device *adev) +static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) { /* No doorbell on SI hardware generation */ if (adev->asic_type < CHIP_BONAIRE) { @@ -431,13 +431,13 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev) } /** - * amdgpu_doorbell_fini - Tear down doorbell driver information. + * amdgpu_device_doorbell_fini - Tear down doorbell driver information. * * @adev: amdgpu_device pointer * * Tear down doorbell driver information (CIK) */ -static void amdgpu_doorbell_fini(struct amdgpu_device *adev) +static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) { iounmap(adev->doorbell.ptr); adev->doorbell.ptr = NULL; @@ -477,20 +477,20 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, } /* - * amdgpu_wb_*() + * amdgpu_device_wb_*() * Writeback is the method by which the GPU updates special pages in memory * with the status of certain GPU events (fences, ring pointers,etc.). */ /** - * amdgpu_wb_fini - Disable Writeback and free memory + * amdgpu_device_wb_fini - Disable Writeback and free memory * * @adev: amdgpu_device pointer * * Disables Writeback and frees the Writeback memory (all asics). * Used at driver shutdown. */ -static void amdgpu_wb_fini(struct amdgpu_device *adev) +static void amdgpu_device_wb_fini(struct amdgpu_device *adev) { if (adev->wb.wb_obj) { amdgpu_bo_free_kernel(&adev->wb.wb_obj, @@ -501,7 +501,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev) } /** - * amdgpu_wb_init- Init Writeback driver info and allocate memory + * amdgpu_device_wb_init- Init Writeback driver info and allocate memory * * @adev: amdgpu_device pointer * @@ -509,7 +509,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev) * Used at driver startup. * Returns 0 on success or an -error on failure. */ -static int amdgpu_wb_init(struct amdgpu_device *adev) +static int amdgpu_device_wb_init(struct amdgpu_device *adev) { int r; @@ -770,7 +770,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) cmd & ~PCI_COMMAND_MEMORY); /* Free the VRAM and doorbell BAR, we most likely need to move both. */ - amdgpu_doorbell_fini(adev); + amdgpu_device_doorbell_fini(adev); if (adev->asic_type >= CHIP_BONAIRE) pci_release_resource(adev->pdev, 2); @@ -787,7 +787,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* When the doorbell or fb BAR isn't available we have no chance of * using the device. */ - r = amdgpu_doorbell_init(adev); + r = amdgpu_device_doorbell_init(adev); if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) return -ENODEV; @@ -900,7 +900,7 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev) /* if we get transitioned to only one device, take VGA back */ /** - * amdgpu_vga_set_decode - enable/disable vga decode + * amdgpu_device_vga_set_decode - enable/disable vga decode * * @cookie: amdgpu_device pointer * @state: enable/disable vga decode @@ -908,7 +908,7 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev) * Enable/disable vga decode (all asics). * Returns VGA resource flags. */ -static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) +static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) { struct amdgpu_device *adev = cookie; amdgpu_asic_set_vga_state(adev, state); @@ -919,7 +919,7 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -static void amdgpu_check_block_size(struct amdgpu_device *adev) +static void amdgpu_device_check_block_size(struct amdgpu_device *adev) { /* defines number of bits in page table versus page directory, * a page is 4KB so we have 12 bits offset, minimum 9 bits in the @@ -934,7 +934,7 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev) } } -static void amdgpu_check_vm_size(struct amdgpu_device *adev) +static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) { /* no need to check the default value */ if (amdgpu_vm_size == -1) @@ -948,14 +948,14 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) } /** - * amdgpu_check_arguments - validate module params + * amdgpu_device_check_arguments - validate module params * * @adev: amdgpu_device pointer * * Validates certain module parameters and updates * the associated values used by the driver (all asics). */ -static void amdgpu_check_arguments(struct amdgpu_device *adev) +static void amdgpu_device_check_arguments(struct amdgpu_device *adev) { if (amdgpu_sched_jobs < 4) { dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", @@ -988,9 +988,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_vm_fragment_size = -1; } - amdgpu_check_vm_size(adev); + amdgpu_device_check_vm_size(adev); - amdgpu_check_block_size(adev); + amdgpu_device_check_block_size(adev); if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || !is_power_of_2(amdgpu_vram_page_split))) { @@ -1359,7 +1359,7 @@ out: return err; } -static int amdgpu_early_init(struct amdgpu_device *adev) +static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { int i, r; @@ -1468,7 +1468,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) return 0; } -static int amdgpu_init(struct amdgpu_device *adev) +static int amdgpu_device_ip_init(struct amdgpu_device *adev) { int i, r; @@ -1484,7 +1484,7 @@ static int amdgpu_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.sw = true; /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { - r = amdgpu_vram_scratch_init(adev); + r = amdgpu_device_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); return r; @@ -1494,9 +1494,9 @@ static int amdgpu_init(struct amdgpu_device *adev) DRM_ERROR("hw_init %d failed %d\n", i, r); return r; } - r = amdgpu_wb_init(adev); + r = amdgpu_device_wb_init(adev); if (r) { - DRM_ERROR("amdgpu_wb_init failed %d\n", r); + DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); return r; } adev->ip_blocks[i].status.hw = true; @@ -1535,18 +1535,18 @@ static int amdgpu_init(struct amdgpu_device *adev) return 0; } -static void amdgpu_fill_reset_magic(struct amdgpu_device *adev) +static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) { memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); } -static bool amdgpu_check_vram_lost(struct amdgpu_device *adev) +static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) { return !!memcmp(adev->gart.ptr, adev->reset_magic, AMDGPU_RESET_MAGIC_NUM); } -static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) +static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) { int i = 0, r; @@ -1569,7 +1569,7 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) return 0; } -static int amdgpu_late_init(struct amdgpu_device *adev) +static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) { int i = 0, r; @@ -1590,12 +1590,12 @@ static int amdgpu_late_init(struct amdgpu_device *adev) mod_delayed_work(system_wq, &adev->late_init_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); - amdgpu_fill_reset_magic(adev); + amdgpu_device_fill_reset_magic(adev); return 0; } -static int amdgpu_fini(struct amdgpu_device *adev) +static int amdgpu_device_ip_fini(struct amdgpu_device *adev) { int i, r; @@ -1629,8 +1629,8 @@ static int amdgpu_fini(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { amdgpu_free_static_csa(adev); - amdgpu_wb_fini(adev); - amdgpu_vram_scratch_fini(adev); + amdgpu_device_wb_fini(adev); + amdgpu_device_vram_scratch_fini(adev); } if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && @@ -1683,11 +1683,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) return 0; } -static void amdgpu_late_init_func_handler(struct work_struct *work) +static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, late_init_work.work); - amdgpu_late_set_cg_state(adev); + amdgpu_device_ip_late_set_cg_state(adev); } int amdgpu_suspend(struct amdgpu_device *adev) @@ -1731,7 +1731,7 @@ int amdgpu_suspend(struct amdgpu_device *adev) return 0; } -static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) +static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) { int i, r; @@ -1760,7 +1760,7 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) return 0; } -static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) +static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) { int i, r; @@ -1793,7 +1793,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) return 0; } -static int amdgpu_resume_phase1(struct amdgpu_device *adev) +static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) { int i, r; @@ -1816,7 +1816,7 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev) return 0; } -static int amdgpu_resume_phase2(struct amdgpu_device *adev) +static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) { int i, r; @@ -1838,14 +1838,14 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev) return 0; } -static int amdgpu_resume(struct amdgpu_device *adev) +static int amdgpu_device_ip_resume(struct amdgpu_device *adev) { int r; - r = amdgpu_resume_phase1(adev); + r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; - r = amdgpu_resume_phase2(adev); + r = amdgpu_device_ip_resume_phase2(adev); return r; } @@ -1984,7 +1984,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, hash_init(adev->mn_hash); mutex_init(&adev->lock_reset); - amdgpu_check_arguments(adev); + amdgpu_device_check_arguments(adev); spin_lock_init(&adev->mmio_idx_lock); spin_lock_init(&adev->smc_idx_lock); @@ -2002,7 +2002,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->ring_lru_list); spin_lock_init(&adev->ring_lru_list_lock); - INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler); + INIT_DELAYED_WORK(&adev->late_init_work, + amdgpu_device_ip_late_init_func_handler); /* Registers mapping */ /* TODO: block userspace mapping of io register */ @@ -2022,7 +2023,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); /* doorbell bar mapping */ - amdgpu_doorbell_init(adev); + amdgpu_device_doorbell_init(adev); /* io port mapping */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -2036,14 +2037,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("PCI I/O BAR is not found.\n"); /* early init functions */ - r = amdgpu_early_init(adev); + r = amdgpu_device_ip_early_init(adev); if (r) return r; /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ /* this will fail for cards that aren't VGA class devices, just * ignore it */ - vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); + vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); if (amdgpu_runtime_pm == 1) runtime = true; @@ -2118,7 +2119,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* init the mode config */ drm_mode_config_init(adev->ddev); - r = amdgpu_init(adev); + r = amdgpu_device_ip_init(adev); if (r) { /* failed in exclusive mode due to timeout */ if (amdgpu_sriov_vf(adev) && @@ -2132,9 +2133,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = -EAGAIN; goto failed; } - dev_err(adev->dev, "amdgpu_init failed\n"); + dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); - amdgpu_fini(adev); + amdgpu_device_ip_fini(adev); goto failed; } @@ -2202,9 +2203,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* enable clockgating, etc. after ib tests, etc. since some blocks require * explicit gating rather than handling it automatically. */ - r = amdgpu_late_init(adev); + r = amdgpu_device_ip_late_init(adev); if (r) { - dev_err(adev->dev, "amdgpu_late_init failed\n"); + dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); goto failed; } @@ -2239,7 +2240,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); - r = amdgpu_fini(adev); + r = amdgpu_device_ip_fini(adev); if (adev->firmware.gpu_info_fw) { release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; @@ -2262,7 +2263,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rio_mem = NULL; iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); + amdgpu_device_doorbell_fini(adev); amdgpu_pm_sysfs_fini(adev); amdgpu_debugfs_regs_cleanup(adev); } @@ -2407,9 +2408,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) DRM_ERROR("amdgpu asic init failed\n"); } - r = amdgpu_resume(adev); + r = amdgpu_device_ip_resume(adev); if (r) { - DRM_ERROR("amdgpu_resume failed (%d).\n", r); + DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); goto unlock; } amdgpu_fence_driver_resume(adev); @@ -2420,7 +2421,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) DRM_ERROR("ib ring test failed (%d).\n", r); } - r = amdgpu_late_init(adev); + r = amdgpu_device_ip_late_init(adev); if (r) goto unlock; @@ -2500,7 +2501,7 @@ unlock: return r; } -static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) +static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) { int i; bool asic_hang = false; @@ -2522,7 +2523,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) return asic_hang; } -static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) { int i, r = 0; @@ -2540,7 +2541,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) return 0; } -static bool amdgpu_need_full_reset(struct amdgpu_device *adev) +static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) { int i; @@ -2561,7 +2562,7 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) return false; } -static int amdgpu_soft_reset(struct amdgpu_device *adev) +static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) { int i, r = 0; @@ -2579,7 +2580,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev) return 0; } -static int amdgpu_post_soft_reset(struct amdgpu_device *adev) +static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) { int i, r = 0; @@ -2604,10 +2605,10 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) return amdgpu_gpu_recovery; } -static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, - struct amdgpu_ring *ring, - struct amdgpu_bo *bo, - struct dma_fence **fence) +static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct dma_fence **fence) { uint32_t domain; int r; @@ -2640,7 +2641,7 @@ err: } /* - * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough + * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough * * @adev: amdgpu device pointer * @reset_flags: output param tells caller the reset result @@ -2648,18 +2649,19 @@ err: * attempt to do soft-reset or full-reset and reinitialize Asic * return 0 means successed otherwise failed */ -static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) +static int amdgpu_device_reset(struct amdgpu_device *adev, + uint64_t* reset_flags) { bool need_full_reset, vram_lost = 0; int r; - need_full_reset = amdgpu_need_full_reset(adev); + need_full_reset = amdgpu_device_ip_need_full_reset(adev); if (!need_full_reset) { - amdgpu_pre_soft_reset(adev); - r = amdgpu_soft_reset(adev); - amdgpu_post_soft_reset(adev); - if (r || amdgpu_check_soft_reset(adev)) { + amdgpu_device_ip_pre_soft_reset(adev); + r = amdgpu_device_ip_soft_reset(adev); + amdgpu_device_ip_post_soft_reset(adev); + if (r || amdgpu_device_ip_check_soft_reset(adev)) { DRM_INFO("soft reset failed, will fallback to full reset!\n"); need_full_reset = true; } @@ -2676,11 +2678,11 @@ retry: if (!r) { dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); - r = amdgpu_resume_phase1(adev); + r = amdgpu_device_ip_resume_phase1(adev); if (r) goto out; - vram_lost = amdgpu_check_vram_lost(adev); + vram_lost = amdgpu_device_check_vram_lost(adev); if (vram_lost) { DRM_ERROR("VRAM is lost!\n"); atomic_inc(&adev->vram_lost_counter); @@ -2691,12 +2693,12 @@ retry: if (r) goto out; - r = amdgpu_resume_phase2(adev); + r = amdgpu_device_ip_resume_phase2(adev); if (r) goto out; if (vram_lost) - amdgpu_fill_reset_magic(adev); + amdgpu_device_fill_reset_magic(adev); } } @@ -2724,7 +2726,7 @@ out: } /* - * amdgpu_reset_sriov - reset ASIC for SR-IOV vf + * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * * @adev: amdgpu device pointer * @reset_flags: output param tells caller the reset result @@ -2732,7 +2734,9 @@ out: * do VF FLR and reinitialize Asic * return 0 means successed otherwise failed */ -static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor) +static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, + uint64_t *reset_flags, + bool from_hypervisor) { int r; @@ -2744,7 +2748,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, return r; /* Resume IP prior to SMC */ - r = amdgpu_sriov_reinit_early(adev); + r = amdgpu_device_ip_reinit_early_sriov(adev); if (r) goto error; @@ -2752,7 +2756,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); /* now we are okay to resume SMC/CP/SDMA */ - r = amdgpu_sriov_reinit_late(adev); + r = amdgpu_device_ip_reinit_late_sriov(adev); if (r) goto error; @@ -2794,7 +2798,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool uint64_t reset_flags = 0; int i, r, resched; - if (!amdgpu_check_soft_reset(adev)) { + if (!amdgpu_device_ip_check_soft_reset(adev)) { DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); return 0; } @@ -2836,9 +2840,9 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool } if (amdgpu_sriov_vf(adev)) - r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true); + r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true); else - r = amdgpu_reset(adev, &reset_flags); + r = amdgpu_device_reset(adev, &reset_flags); if (!r) { if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || @@ -2851,7 +2855,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool mutex_lock(&adev->shadow_list_lock); list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { next = NULL; - amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); + amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next); if (fence) { r = dma_fence_wait(fence, false); if (r) { -- cgit v1.2.3 From cdd61df614851d18b8ee72f0615202bef67f5b91 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:47:40 -0500 Subject: drm/amdgpu: rename amdgpu_suspend to amdgpu_device_ip_suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit for consistency with the other functions in that file. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ffbe99d839a3..169c71d48d49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1961,7 +1961,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); -int amdgpu_suspend(struct amdgpu_device *adev); +int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 92b5064b3391..3beea322bc12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1690,7 +1690,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) amdgpu_device_ip_late_set_cg_state(adev); } -int amdgpu_suspend(struct amdgpu_device *adev) +int amdgpu_device_ip_suspend(struct amdgpu_device *adev) { int i, r; @@ -2344,7 +2344,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) amdgpu_fence_driver_suspend(adev); - r = amdgpu_suspend(adev); + r = amdgpu_device_ip_suspend(adev); /* evict remaining vram memory * This second call to evict vram is to evict the gart page table @@ -2669,7 +2669,7 @@ static int amdgpu_device_reset(struct amdgpu_device *adev, } if (need_full_reset) { - r = amdgpu_suspend(adev); + r = amdgpu_device_ip_suspend(adev); retry: r = amdgpu_asic_reset(adev); @@ -2708,7 +2708,7 @@ out: r = amdgpu_ib_ring_tests(adev); if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); - r = amdgpu_suspend(adev); + r = amdgpu_device_ip_suspend(adev); need_full_reset = true; goto retry; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1fc5499cb5fd..50afcf65181a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -649,7 +649,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) * unfortunately we can't detect certain * hypervisors so just do this all the time. */ - amdgpu_suspend(adev); + amdgpu_device_ip_suspend(adev); } static int amdgpu_pmops_suspend(struct device *dev) -- cgit v1.2.3 From 75758255dc0fae76a845fd5185cfcdf60990cc99 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 15:23:14 -0500 Subject: drm/amdgpu: move debugfs functions to their own file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu_device.c was getting pretty cluttered. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 16 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 792 ++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h | 42 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 769 +-------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 2 +- 6 files changed, 838 insertions(+), 785 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index f778a3b4abe6..d8da12c114b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -52,7 +52,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o + amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 169c71d48d49..c15b9441190f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -71,7 +71,7 @@ #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_gart.h" - +#include "amdgpu_debugfs.h" /* * Modules parameters. @@ -425,7 +425,6 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); /* sub-allocation manager, it has to be protected by another lock. * By conception this is an helper for other part of the driver @@ -1240,19 +1239,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); */ void amdgpu_test_moves(struct amdgpu_device *adev); -/* - * Debugfs - */ -struct amdgpu_debugfs { - const struct drm_info_list *files; - unsigned num_files; -}; - -int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - const struct drm_info_list *files, - unsigned nfiles); -int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); -int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); /* * amdgpu smumgr functions diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c new file mode 100644 index 000000000000..ee76b468774a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -0,0 +1,792 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include "amdgpu.h" + +/* + * Debugfs + */ +int amdgpu_debugfs_add_files(struct amdgpu_device *adev, + const struct drm_info_list *files, + unsigned nfiles) +{ + unsigned i; + + for (i = 0; i < adev->debugfs_count; i++) { + if (adev->debugfs[i].files == files) { + /* Already registered */ + return 0; + } + } + + i = adev->debugfs_count + 1; + if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { + DRM_ERROR("Reached maximum number of debugfs components.\n"); + DRM_ERROR("Report so we increase " + "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); + return -EINVAL; + } + adev->debugfs[adev->debugfs_count].files = files; + adev->debugfs[adev->debugfs_count].num_files = nfiles; + adev->debugfs_count = i; +#if defined(CONFIG_DEBUG_FS) + drm_debugfs_create_files(files, nfiles, + adev->ddev->primary->debugfs_root, + adev->ddev->primary); +#endif + return 0; +} + +#if defined(CONFIG_DEBUG_FS) + +static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + bool pm_pg_lock, use_bank; + unsigned instance_bank, sh_bank, se_bank; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + /* are we reading registers for which a PG lock is necessary? */ + pm_pg_lock = (*pos >> 23) & 1; + + if (*pos & (1ULL << 62)) { + se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; + sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; + instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; + + if (se_bank == 0x3FF) + se_bank = 0xFFFFFFFF; + if (sh_bank == 0x3FF) + sh_bank = 0xFFFFFFFF; + if (instance_bank == 0x3FF) + instance_bank = 0xFFFFFFFF; + use_bank = 1; + } else { + use_bank = 0; + } + + *pos &= (1UL << 22) - 1; + + if (use_bank) { + if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || + (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) + return -EINVAL; + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se_bank, + sh_bank, instance_bank); + } + + if (pm_pg_lock) + mutex_lock(&adev->pm.mutex); + + while (size) { + uint32_t value; + + if (*pos > adev->rmmio_size) + goto end; + + value = RREG32(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) { + result = r; + goto end; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + +end: + if (use_bank) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + + if (pm_pg_lock) + mutex_unlock(&adev->pm.mutex); + + return result; +} + +static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + bool pm_pg_lock, use_bank; + unsigned instance_bank, sh_bank, se_bank; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + /* are we reading registers for which a PG lock is necessary? */ + pm_pg_lock = (*pos >> 23) & 1; + + if (*pos & (1ULL << 62)) { + se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; + sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; + instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; + + if (se_bank == 0x3FF) + se_bank = 0xFFFFFFFF; + if (sh_bank == 0x3FF) + sh_bank = 0xFFFFFFFF; + if (instance_bank == 0x3FF) + instance_bank = 0xFFFFFFFF; + use_bank = 1; + } else { + use_bank = 0; + } + + *pos &= (1UL << 22) - 1; + + if (use_bank) { + if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || + (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) + return -EINVAL; + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se_bank, + sh_bank, instance_bank); + } + + if (pm_pg_lock) + mutex_lock(&adev->pm.mutex); + + while (size) { + uint32_t value; + + if (*pos > adev->rmmio_size) + return result; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + if (use_bank) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + + if (pm_pg_lock) + mutex_unlock(&adev->pm.mutex); + + return result; +} + +static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + value = RREG32_PCIE(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32_PCIE(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + value = RREG32_DIDT(*pos >> 2); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32_DIDT(*pos >> 2, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + value = RREG32_SMC(*pos); + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + while (size) { + uint32_t value; + + r = get_user(value, (uint32_t *)buf); + if (r) + return r; + + WREG32_SMC(*pos, value); + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + ssize_t result = 0; + int r; + uint32_t *config, no_regs = 0; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + /* version, increment each time something is added */ + config[no_regs++] = 3; + config[no_regs++] = adev->gfx.config.max_shader_engines; + config[no_regs++] = adev->gfx.config.max_tile_pipes; + config[no_regs++] = adev->gfx.config.max_cu_per_sh; + config[no_regs++] = adev->gfx.config.max_sh_per_se; + config[no_regs++] = adev->gfx.config.max_backends_per_se; + config[no_regs++] = adev->gfx.config.max_texture_channel_caches; + config[no_regs++] = adev->gfx.config.max_gprs; + config[no_regs++] = adev->gfx.config.max_gs_threads; + config[no_regs++] = adev->gfx.config.max_hw_contexts; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; + config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.num_tile_pipes; + config[no_regs++] = adev->gfx.config.backend_enable_mask; + config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; + config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; + config[no_regs++] = adev->gfx.config.shader_engine_tile_size; + config[no_regs++] = adev->gfx.config.num_gpus; + config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; + config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; + config[no_regs++] = adev->gfx.config.gb_addr_config; + config[no_regs++] = adev->gfx.config.num_rbs; + + /* rev==1 */ + config[no_regs++] = adev->rev_id; + config[no_regs++] = adev->pg_flags; + config[no_regs++] = adev->cg_flags; + + /* rev==2 */ + config[no_regs++] = adev->family; + config[no_regs++] = adev->external_rev_id; + + /* rev==3 */ + config[no_regs++] = adev->pdev->device; + config[no_regs++] = adev->pdev->revision; + config[no_regs++] = adev->pdev->subsystem_device; + config[no_regs++] = adev->pdev->subsystem_vendor; + + while (size && (*pos < no_regs * 4)) { + uint32_t value; + + value = config[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + kfree(config); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + kfree(config); + return result; +} + +static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + int idx, x, outsize, r, valuesize; + uint32_t values[16]; + + if (size & 3 || *pos & 0x3) + return -EINVAL; + + if (amdgpu_dpm == 0) + return -EINVAL; + + /* convert offset to sensor number */ + idx = *pos >> 2; + + valuesize = sizeof(values); + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) + r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); + else + return -EINVAL; + + if (size > valuesize) + return -EINVAL; + + outsize = 0; + x = 0; + if (!r) { + while (size) { + r = put_user(values[x++], (int32_t *)buf); + buf += 4; + size -= 4; + outsize += 4; + } + } + + return !r ? outsize : r; +} + +static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + int r, x; + ssize_t result=0; + uint32_t offset, se, sh, cu, wave, simd, data[32]; + + if (size & 3 || *pos & 3) + return -EINVAL; + + /* decode offset */ + offset = (*pos & GENMASK_ULL(6, 0)); + se = (*pos & GENMASK_ULL(14, 7)) >> 7; + sh = (*pos & GENMASK_ULL(22, 15)) >> 15; + cu = (*pos & GENMASK_ULL(30, 23)) >> 23; + wave = (*pos & GENMASK_ULL(36, 31)) >> 31; + simd = (*pos & GENMASK_ULL(44, 37)) >> 37; + + /* switch to the specific se/sh/cu */ + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se, sh, cu); + + x = 0; + if (adev->gfx.funcs->read_wave_data) + adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); + + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + mutex_unlock(&adev->grbm_idx_mutex); + + if (!x) + return -EINVAL; + + while (size && (offset < x * 4)) { + uint32_t value; + + value = data[offset >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) + return r; + + result += 4; + buf += 4; + offset += 4; + size -= 4; + } + + return result; +} + +static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + int r; + ssize_t result = 0; + uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; + + if (size & 3 || *pos & 3) + return -EINVAL; + + /* decode offset */ + offset = *pos & GENMASK_ULL(11, 0); + se = (*pos & GENMASK_ULL(19, 12)) >> 12; + sh = (*pos & GENMASK_ULL(27, 20)) >> 20; + cu = (*pos & GENMASK_ULL(35, 28)) >> 28; + wave = (*pos & GENMASK_ULL(43, 36)) >> 36; + simd = (*pos & GENMASK_ULL(51, 44)) >> 44; + thread = (*pos & GENMASK_ULL(59, 52)) >> 52; + bank = (*pos & GENMASK_ULL(61, 60)) >> 60; + + data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* switch to the specific se/sh/cu */ + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se, sh, cu); + + if (bank == 0) { + if (adev->gfx.funcs->read_wave_vgprs) + adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); + } else { + if (adev->gfx.funcs->read_wave_sgprs) + adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); + } + + amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); + mutex_unlock(&adev->grbm_idx_mutex); + + while (size) { + uint32_t value; + + value = data[offset++]; + r = put_user(value, (uint32_t *)buf); + if (r) { + result = r; + goto err; + } + + result += 4; + buf += 4; + size -= 4; + } + +err: + kfree(data); + return result; +} + +static const struct file_operations amdgpu_debugfs_regs_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_read, + .write = amdgpu_debugfs_regs_write, + .llseek = default_llseek +}; +static const struct file_operations amdgpu_debugfs_regs_didt_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_didt_read, + .write = amdgpu_debugfs_regs_didt_write, + .llseek = default_llseek +}; +static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_pcie_read, + .write = amdgpu_debugfs_regs_pcie_write, + .llseek = default_llseek +}; +static const struct file_operations amdgpu_debugfs_regs_smc_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_regs_smc_read, + .write = amdgpu_debugfs_regs_smc_write, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_gca_config_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gca_config_read, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_sensors_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_sensor_read, + .llseek = default_llseek +}; + +static const struct file_operations amdgpu_debugfs_wave_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_wave_read, + .llseek = default_llseek +}; +static const struct file_operations amdgpu_debugfs_gpr_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gpr_read, + .llseek = default_llseek +}; + +static const struct file_operations *debugfs_regs[] = { + &amdgpu_debugfs_regs_fops, + &amdgpu_debugfs_regs_didt_fops, + &amdgpu_debugfs_regs_pcie_fops, + &amdgpu_debugfs_regs_smc_fops, + &amdgpu_debugfs_gca_config_fops, + &amdgpu_debugfs_sensors_fops, + &amdgpu_debugfs_wave_fops, + &amdgpu_debugfs_gpr_fops, +}; + +static const char *debugfs_regs_names[] = { + "amdgpu_regs", + "amdgpu_regs_didt", + "amdgpu_regs_pcie", + "amdgpu_regs_smc", + "amdgpu_gca_config", + "amdgpu_sensors", + "amdgpu_wave", + "amdgpu_gpr", +}; + +int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) +{ + struct drm_minor *minor = adev->ddev->primary; + struct dentry *ent, *root = minor->debugfs_root; + unsigned i, j; + + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { + ent = debugfs_create_file(debugfs_regs_names[i], + S_IFREG | S_IRUGO, root, + adev, debugfs_regs[i]); + if (IS_ERR(ent)) { + for (j = 0; j < i; j++) { + debugfs_remove(adev->debugfs_regs[i]); + adev->debugfs_regs[i] = NULL; + } + return PTR_ERR(ent); + } + + if (!i) + i_size_write(ent->d_inode, adev->rmmio_size); + adev->debugfs_regs[i] = ent; + } + + return 0; +} + +void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) +{ + unsigned i; + + for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { + if (adev->debugfs_regs[i]) { + debugfs_remove(adev->debugfs_regs[i]); + adev->debugfs_regs[i] = NULL; + } + } +} + +static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + int r = 0, i; + + /* hold on the scheduler */ + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + kthread_park(ring->sched.thread); + } + + seq_printf(m, "run ib test:\n"); + r = amdgpu_ib_ring_tests(adev); + if (r) + seq_printf(m, "ib ring tests failed (%d).\n", r); + else + seq_printf(m, "ib ring tests passed.\n"); + + /* go on the scheduler */ + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->sched.thread) + continue; + kthread_unpark(ring->sched.thread); + } + + return 0; +} + +static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_write(m, adev->bios, adev->bios_size); + return 0; +} + +static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + + seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); + return 0; +} + +static const struct drm_info_list amdgpu_debugfs_list[] = { + {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, + {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, + {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram} +}; + +int amdgpu_debugfs_init(struct amdgpu_device *adev) +{ + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, + ARRAY_SIZE(amdgpu_debugfs_list)); +} + +#else +int amdgpu_debugfs_init(struct amdgpu_device *adev) +{ + return 0; +} +int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) +{ + return 0; +} +void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h new file mode 100644 index 000000000000..8260d8073c26 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -0,0 +1,42 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Debugfs + */ +struct amdgpu_debugfs { + const struct drm_info_list *files; + unsigned num_files; +}; + +int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); +void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); +int amdgpu_debugfs_init(struct amdgpu_device *adev); +int amdgpu_debugfs_add_files(struct amdgpu_device *adev, + const struct drm_info_list *files, + unsigned nfiles); +int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); +int amdgpu_debugfs_gem_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3beea322bc12..ec078a9a5de8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -64,10 +63,6 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 -static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); -static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); -static int amdgpu_debugfs_init(struct amdgpu_device *adev); - static const char *amdgpu_asic_name[] = { "TAHITI", "PITCAIRN", @@ -2171,7 +2166,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) DRM_ERROR("registering pm debugfs failed (%d).\n", r); - r = amdgpu_gem_debugfs_init(adev); + r = amdgpu_debugfs_gem_init(adev); if (r) DRM_ERROR("registering gem debugfs failed (%d).\n", r); @@ -3020,765 +3015,3 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev) } } -/* - * Debugfs - */ -int amdgpu_debugfs_add_files(struct amdgpu_device *adev, - const struct drm_info_list *files, - unsigned nfiles) -{ - unsigned i; - - for (i = 0; i < adev->debugfs_count; i++) { - if (adev->debugfs[i].files == files) { - /* Already registered */ - return 0; - } - } - - i = adev->debugfs_count + 1; - if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) { - DRM_ERROR("Reached maximum number of debugfs components.\n"); - DRM_ERROR("Report so we increase " - "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n"); - return -EINVAL; - } - adev->debugfs[adev->debugfs_count].files = files; - adev->debugfs[adev->debugfs_count].num_files = nfiles; - adev->debugfs_count = i; -#if defined(CONFIG_DEBUG_FS) - drm_debugfs_create_files(files, nfiles, - adev->ddev->primary->debugfs_root, - adev->ddev->primary); -#endif - return 0; -} - -#if defined(CONFIG_DEBUG_FS) - -static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - bool pm_pg_lock, use_bank; - unsigned instance_bank, sh_bank, se_bank; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - /* are we reading registers for which a PG lock is necessary? */ - pm_pg_lock = (*pos >> 23) & 1; - - if (*pos & (1ULL << 62)) { - se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; - sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; - instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; - - if (se_bank == 0x3FF) - se_bank = 0xFFFFFFFF; - if (sh_bank == 0x3FF) - sh_bank = 0xFFFFFFFF; - if (instance_bank == 0x3FF) - instance_bank = 0xFFFFFFFF; - use_bank = 1; - } else { - use_bank = 0; - } - - *pos &= (1UL << 22) - 1; - - if (use_bank) { - if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || - (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) - return -EINVAL; - mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se_bank, - sh_bank, instance_bank); - } - - if (pm_pg_lock) - mutex_lock(&adev->pm.mutex); - - while (size) { - uint32_t value; - - if (*pos > adev->rmmio_size) - goto end; - - value = RREG32(*pos >> 2); - r = put_user(value, (uint32_t *)buf); - if (r) { - result = r; - goto end; - } - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - -end: - if (use_bank) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - } - - if (pm_pg_lock) - mutex_unlock(&adev->pm.mutex); - - return result; -} - -static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - bool pm_pg_lock, use_bank; - unsigned instance_bank, sh_bank, se_bank; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - /* are we reading registers for which a PG lock is necessary? */ - pm_pg_lock = (*pos >> 23) & 1; - - if (*pos & (1ULL << 62)) { - se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; - sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; - instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; - - if (se_bank == 0x3FF) - se_bank = 0xFFFFFFFF; - if (sh_bank == 0x3FF) - sh_bank = 0xFFFFFFFF; - if (instance_bank == 0x3FF) - instance_bank = 0xFFFFFFFF; - use_bank = 1; - } else { - use_bank = 0; - } - - *pos &= (1UL << 22) - 1; - - if (use_bank) { - if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || - (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) - return -EINVAL; - mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se_bank, - sh_bank, instance_bank); - } - - if (pm_pg_lock) - mutex_lock(&adev->pm.mutex); - - while (size) { - uint32_t value; - - if (*pos > adev->rmmio_size) - return result; - - r = get_user(value, (uint32_t *)buf); - if (r) - return r; - - WREG32(*pos >> 2, value); - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - if (use_bank) { - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - } - - if (pm_pg_lock) - mutex_unlock(&adev->pm.mutex); - - return result; -} - -static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - while (size) { - uint32_t value; - - value = RREG32_PCIE(*pos >> 2); - r = put_user(value, (uint32_t *)buf); - if (r) - return r; - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - while (size) { - uint32_t value; - - r = get_user(value, (uint32_t *)buf); - if (r) - return r; - - WREG32_PCIE(*pos >> 2, value); - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - while (size) { - uint32_t value; - - value = RREG32_DIDT(*pos >> 2); - r = put_user(value, (uint32_t *)buf); - if (r) - return r; - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - while (size) { - uint32_t value; - - r = get_user(value, (uint32_t *)buf); - if (r) - return r; - - WREG32_DIDT(*pos >> 2, value); - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - while (size) { - uint32_t value; - - value = RREG32_SMC(*pos); - r = put_user(value, (uint32_t *)buf); - if (r) - return r; - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - while (size) { - uint32_t value; - - r = get_user(value, (uint32_t *)buf); - if (r) - return r; - - WREG32_SMC(*pos, value); - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - ssize_t result = 0; - int r; - uint32_t *config, no_regs = 0; - - if (size & 0x3 || *pos & 0x3) - return -EINVAL; - - config = kmalloc_array(256, sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - - /* version, increment each time something is added */ - config[no_regs++] = 3; - config[no_regs++] = adev->gfx.config.max_shader_engines; - config[no_regs++] = adev->gfx.config.max_tile_pipes; - config[no_regs++] = adev->gfx.config.max_cu_per_sh; - config[no_regs++] = adev->gfx.config.max_sh_per_se; - config[no_regs++] = adev->gfx.config.max_backends_per_se; - config[no_regs++] = adev->gfx.config.max_texture_channel_caches; - config[no_regs++] = adev->gfx.config.max_gprs; - config[no_regs++] = adev->gfx.config.max_gs_threads; - config[no_regs++] = adev->gfx.config.max_hw_contexts; - config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; - config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; - config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; - config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; - config[no_regs++] = adev->gfx.config.num_tile_pipes; - config[no_regs++] = adev->gfx.config.backend_enable_mask; - config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; - config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; - config[no_regs++] = adev->gfx.config.shader_engine_tile_size; - config[no_regs++] = adev->gfx.config.num_gpus; - config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; - config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; - config[no_regs++] = adev->gfx.config.gb_addr_config; - config[no_regs++] = adev->gfx.config.num_rbs; - - /* rev==1 */ - config[no_regs++] = adev->rev_id; - config[no_regs++] = adev->pg_flags; - config[no_regs++] = adev->cg_flags; - - /* rev==2 */ - config[no_regs++] = adev->family; - config[no_regs++] = adev->external_rev_id; - - /* rev==3 */ - config[no_regs++] = adev->pdev->device; - config[no_regs++] = adev->pdev->revision; - config[no_regs++] = adev->pdev->subsystem_device; - config[no_regs++] = adev->pdev->subsystem_vendor; - - while (size && (*pos < no_regs * 4)) { - uint32_t value; - - value = config[*pos >> 2]; - r = put_user(value, (uint32_t *)buf); - if (r) { - kfree(config); - return r; - } - - result += 4; - buf += 4; - *pos += 4; - size -= 4; - } - - kfree(config); - return result; -} - -static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = file_inode(f)->i_private; - int idx, x, outsize, r, valuesize; - uint32_t values[16]; - - if (size & 3 || *pos & 0x3) - return -EINVAL; - - if (amdgpu_dpm == 0) - return -EINVAL; - - /* convert offset to sensor number */ - idx = *pos >> 2; - - valuesize = sizeof(values); - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) - r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize); - else - return -EINVAL; - - if (size > valuesize) - return -EINVAL; - - outsize = 0; - x = 0; - if (!r) { - while (size) { - r = put_user(values[x++], (int32_t *)buf); - buf += 4; - size -= 4; - outsize += 4; - } - } - - return !r ? outsize : r; -} - -static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = f->f_inode->i_private; - int r, x; - ssize_t result=0; - uint32_t offset, se, sh, cu, wave, simd, data[32]; - - if (size & 3 || *pos & 3) - return -EINVAL; - - /* decode offset */ - offset = (*pos & GENMASK_ULL(6, 0)); - se = (*pos & GENMASK_ULL(14, 7)) >> 7; - sh = (*pos & GENMASK_ULL(22, 15)) >> 15; - cu = (*pos & GENMASK_ULL(30, 23)) >> 23; - wave = (*pos & GENMASK_ULL(36, 31)) >> 31; - simd = (*pos & GENMASK_ULL(44, 37)) >> 37; - - /* switch to the specific se/sh/cu */ - mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); - - x = 0; - if (adev->gfx.funcs->read_wave_data) - adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x); - - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); - mutex_unlock(&adev->grbm_idx_mutex); - - if (!x) - return -EINVAL; - - while (size && (offset < x * 4)) { - uint32_t value; - - value = data[offset >> 2]; - r = put_user(value, (uint32_t *)buf); - if (r) - return r; - - result += 4; - buf += 4; - offset += 4; - size -= 4; - } - - return result; -} - -static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, - size_t size, loff_t *pos) -{ - struct amdgpu_device *adev = f->f_inode->i_private; - int r; - ssize_t result = 0; - uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; - - if (size & 3 || *pos & 3) - return -EINVAL; - - /* decode offset */ - offset = *pos & GENMASK_ULL(11, 0); - se = (*pos & GENMASK_ULL(19, 12)) >> 12; - sh = (*pos & GENMASK_ULL(27, 20)) >> 20; - cu = (*pos & GENMASK_ULL(35, 28)) >> 28; - wave = (*pos & GENMASK_ULL(43, 36)) >> 36; - simd = (*pos & GENMASK_ULL(51, 44)) >> 44; - thread = (*pos & GENMASK_ULL(59, 52)) >> 52; - bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - /* switch to the specific se/sh/cu */ - mutex_lock(&adev->grbm_idx_mutex); - amdgpu_gfx_select_se_sh(adev, se, sh, cu); - - if (bank == 0) { - if (adev->gfx.funcs->read_wave_vgprs) - adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data); - } else { - if (adev->gfx.funcs->read_wave_sgprs) - adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data); - } - - amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF); - mutex_unlock(&adev->grbm_idx_mutex); - - while (size) { - uint32_t value; - - value = data[offset++]; - r = put_user(value, (uint32_t *)buf); - if (r) { - result = r; - goto err; - } - - result += 4; - buf += 4; - size -= 4; - } - -err: - kfree(data); - return result; -} - -static const struct file_operations amdgpu_debugfs_regs_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_regs_read, - .write = amdgpu_debugfs_regs_write, - .llseek = default_llseek -}; -static const struct file_operations amdgpu_debugfs_regs_didt_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_regs_didt_read, - .write = amdgpu_debugfs_regs_didt_write, - .llseek = default_llseek -}; -static const struct file_operations amdgpu_debugfs_regs_pcie_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_regs_pcie_read, - .write = amdgpu_debugfs_regs_pcie_write, - .llseek = default_llseek -}; -static const struct file_operations amdgpu_debugfs_regs_smc_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_regs_smc_read, - .write = amdgpu_debugfs_regs_smc_write, - .llseek = default_llseek -}; - -static const struct file_operations amdgpu_debugfs_gca_config_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_gca_config_read, - .llseek = default_llseek -}; - -static const struct file_operations amdgpu_debugfs_sensors_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_sensor_read, - .llseek = default_llseek -}; - -static const struct file_operations amdgpu_debugfs_wave_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_wave_read, - .llseek = default_llseek -}; -static const struct file_operations amdgpu_debugfs_gpr_fops = { - .owner = THIS_MODULE, - .read = amdgpu_debugfs_gpr_read, - .llseek = default_llseek -}; - -static const struct file_operations *debugfs_regs[] = { - &amdgpu_debugfs_regs_fops, - &amdgpu_debugfs_regs_didt_fops, - &amdgpu_debugfs_regs_pcie_fops, - &amdgpu_debugfs_regs_smc_fops, - &amdgpu_debugfs_gca_config_fops, - &amdgpu_debugfs_sensors_fops, - &amdgpu_debugfs_wave_fops, - &amdgpu_debugfs_gpr_fops, -}; - -static const char *debugfs_regs_names[] = { - "amdgpu_regs", - "amdgpu_regs_didt", - "amdgpu_regs_pcie", - "amdgpu_regs_smc", - "amdgpu_gca_config", - "amdgpu_sensors", - "amdgpu_wave", - "amdgpu_gpr", -}; - -static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) -{ - struct drm_minor *minor = adev->ddev->primary; - struct dentry *ent, *root = minor->debugfs_root; - unsigned i, j; - - for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { - ent = debugfs_create_file(debugfs_regs_names[i], - S_IFREG | S_IRUGO, root, - adev, debugfs_regs[i]); - if (IS_ERR(ent)) { - for (j = 0; j < i; j++) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } - return PTR_ERR(ent); - } - - if (!i) - i_size_write(ent->d_inode, adev->rmmio_size); - adev->debugfs_regs[i] = ent; - } - - return 0; -} - -static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) -{ - unsigned i; - - for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { - if (adev->debugfs_regs[i]) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } - } -} - -static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - int r = 0, i; - - /* hold on the scheduler */ - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !ring->sched.thread) - continue; - kthread_park(ring->sched.thread); - } - - seq_printf(m, "run ib test:\n"); - r = amdgpu_ib_ring_tests(adev); - if (r) - seq_printf(m, "ib ring tests failed (%d).\n", r); - else - seq_printf(m, "ib ring tests passed.\n"); - - /* go on the scheduler */ - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (!ring || !ring->sched.thread) - continue; - kthread_unpark(ring->sched.thread); - } - - return 0; -} - -static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - - seq_write(m, adev->bios, adev->bios_size); - return 0; -} - -static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data) -{ - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - - seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev)); - return 0; -} - -static const struct drm_info_list amdgpu_debugfs_list[] = { - {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump}, - {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}, - {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram} -}; - -static int amdgpu_debugfs_init(struct amdgpu_device *adev) -{ - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, - ARRAY_SIZE(amdgpu_debugfs_list)); -} - -#else -static int amdgpu_debugfs_init(struct amdgpu_device *adev) -{ - return 0; -} -static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) -{ - return 0; -} -static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index eb75eb44efc6..10805edcf964 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -851,7 +851,7 @@ static const struct drm_info_list amdgpu_debugfs_gem_list[] = { }; #endif -int amdgpu_gem_debugfs_init(struct amdgpu_device *adev) +int amdgpu_debugfs_gem_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); -- cgit v1.2.3 From 131b4b3686b701079e8fb82eb9384c8acdd3fc72 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:03:43 -0500 Subject: drm/amdgpu: rename amdgpu_wb_* functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/si_dma.c | 10 +++++----- 10 files changed, 46 insertions(+), 46 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c15b9441190f..81c1ddb9eb3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1171,8 +1171,8 @@ struct amdgpu_wb { unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; }; -int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); -void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); +int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); +void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); void amdgpu_get_pcie_info(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ec078a9a5de8..7b44ca29a2c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -530,7 +530,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev) } /** - * amdgpu_wb_get - Allocate a wb entry + * amdgpu_device_wb_get - Allocate a wb entry * * @adev: amdgpu_device pointer * @wb: wb index @@ -538,7 +538,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev) * Allocate a wb slot for use by the driver (all asics). * Returns 0 on success or -EINVAL on failure. */ -int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) +int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb) { unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); @@ -552,14 +552,14 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) } /** - * amdgpu_wb_free - Free a wb entry + * amdgpu_device_wb_free - Free a wb entry * * @adev: amdgpu_device pointer * @wb: wb index * * Free a wb slot allocated for use by the driver (all asics) */ -void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) +void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) { if (wb < adev->wb.num_wb) __clear_bit(wb >> 3, adev->wb.used); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ef043361009f..bb40d2529a30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -203,7 +203,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, spin_lock_init(&kiq->ring_lock); - r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); + r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); if (r) return r; @@ -229,7 +229,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq) { - amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs); + amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); amdgpu_ring_fini(ring); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f5f27e4f0f7f..06373d44b3da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -92,15 +92,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, } return 0; } else { - r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs); + r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs); if (r) { dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r); return r; } - r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs); + r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs); if (r) { - amdgpu_wb_free(adev, adev->irq.ih.wptr_offs); + amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r); return r; } @@ -133,8 +133,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj, &adev->irq.ih.gpu_addr, (void **)&adev->irq.ih.ring); - amdgpu_wb_free(adev, adev->irq.ih.wptr_offs); - amdgpu_wb_free(adev, adev->irq.ih.rptr_offs); + amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); + amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 41c75f9632dc..13044e66dcaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -263,25 +263,25 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - r = amdgpu_wb_get(adev, &ring->rptr_offs); + r = amdgpu_device_wb_get(adev, &ring->rptr_offs); if (r) { dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); return r; } - r = amdgpu_wb_get(adev, &ring->wptr_offs); + r = amdgpu_device_wb_get(adev, &ring->wptr_offs); if (r) { dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); return r; } - r = amdgpu_wb_get(adev, &ring->fence_offs); + r = amdgpu_device_wb_get(adev, &ring->fence_offs); if (r) { dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); return r; } - r = amdgpu_wb_get(adev, &ring->cond_exe_offs); + r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs); if (r) { dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); return r; @@ -348,11 +348,11 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) if (!(ring->adev) || !(ring->adev->rings[ring->idx])) return; - amdgpu_wb_free(ring->adev, ring->rptr_offs); - amdgpu_wb_free(ring->adev, ring->wptr_offs); + amdgpu_device_wb_free(ring->adev, ring->rptr_offs); + amdgpu_device_wb_free(ring->adev, ring->wptr_offs); - amdgpu_wb_free(ring->adev, ring->cond_exe_offs); - amdgpu_wb_free(ring->adev, ring->fence_offs); + amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs); + amdgpu_device_wb_free(ring->adev, ring->fence_offs); amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index ed26dcbc4f79..e406c93d01d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -626,7 +626,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; @@ -639,7 +639,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); @@ -663,7 +663,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) ring->idx, tmp); r = -EINVAL; } - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -686,7 +686,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; long r; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; @@ -735,7 +735,7 @@ err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 121e628e7cdb..0c5b91a40f22 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -600,7 +600,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; @@ -613,7 +613,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -639,7 +639,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) ring->idx, tmp); r = -EINVAL; } - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -662,7 +662,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; long r; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; @@ -715,7 +715,7 @@ err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 41c60f5ac96e..4e031a2aad9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -867,7 +867,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; @@ -880,7 +880,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -906,7 +906,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) ring->idx, tmp); r = -EINVAL; } - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -929,7 +929,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; long r; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; @@ -981,7 +981,7 @@ err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 79e82bf35f7d..73477c5ed9b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -869,7 +869,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; @@ -882,7 +882,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 5); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -908,7 +908,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) ring->idx, tmp); r = -EINVAL; } - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -931,7 +931,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) u32 tmp = 0; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; @@ -983,7 +983,7 @@ err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index ee469a906cd3..9adca5d8b045 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -221,7 +221,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) u32 tmp; u64 gpu_addr; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); return r; @@ -234,7 +234,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 4); if (r) { DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -258,7 +258,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) ring->idx, tmp); r = -EINVAL; } - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } @@ -281,7 +281,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) u64 gpu_addr; long r; - r = amdgpu_wb_get(adev, &index); + r = amdgpu_device_wb_get(adev, &index); if (r) { dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; @@ -328,7 +328,7 @@ err1: amdgpu_ib_free(adev, &ib, NULL); dma_fence_put(f); err0: - amdgpu_wb_free(adev, index); + amdgpu_device_wb_free(adev, index); return r; } -- cgit v1.2.3 From 9c3f2b54746f764e1b695797c78bc46b8713f067 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:20:19 -0500 Subject: drm/amdgpu: rename amdgpu_program_register_sequence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device for consistency with other functions in this file. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 +- drivers/gpu/drm/amd/amdgpu/cik.c | 120 ++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 24 +++--- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 30 ++++---- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 114 +++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 12 +-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 54 ++++++------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 6 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 48 ++++++------ drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 12 +-- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 60 +++++++-------- drivers/gpu/drm/amd/amdgpu/si.c | 102 ++++++++++++------------ drivers/gpu/drm/amd/amdgpu/vi.c | 30 ++++---- 14 files changed, 311 insertions(+), 311 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 81c1ddb9eb3a..f10f4fc7dbe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1913,7 +1913,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); -void amdgpu_program_register_sequence(struct amdgpu_device *adev, +void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 *registers, const u32 array_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7b44ca29a2c8..05e5c6822f9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -342,7 +342,7 @@ static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) } /** - * amdgpu_program_register_sequence - program an array of registers. + * amdgpu_device_program_register_sequence - program an array of registers. * * @adev: amdgpu_device pointer * @registers: pointer to the register array @@ -351,9 +351,9 @@ static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) * Programs an array or registers with and and or masks. * This is a helper for setting golden registers. */ -void amdgpu_program_register_sequence(struct amdgpu_device *adev, - const u32 *registers, - const u32 array_size) +void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, + const u32 *registers, + const u32 array_size) { u32 tmp, reg, and_mask, or_mask; int i; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 8ba056a2a5da..39d49712f8c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -755,74 +755,74 @@ static void cik_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - amdgpu_program_register_sequence(adev, - bonaire_mgcg_cgcg_init, - ARRAY_SIZE(bonaire_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - bonaire_golden_registers, - ARRAY_SIZE(bonaire_golden_registers)); - amdgpu_program_register_sequence(adev, - bonaire_golden_common_registers, - ARRAY_SIZE(bonaire_golden_common_registers)); - amdgpu_program_register_sequence(adev, - bonaire_golden_spm_registers, - ARRAY_SIZE(bonaire_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + bonaire_mgcg_cgcg_init, + ARRAY_SIZE(bonaire_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + bonaire_golden_registers, + ARRAY_SIZE(bonaire_golden_registers)); + amdgpu_device_program_register_sequence(adev, + bonaire_golden_common_registers, + ARRAY_SIZE(bonaire_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + bonaire_golden_spm_registers, + ARRAY_SIZE(bonaire_golden_spm_registers)); break; case CHIP_KABINI: - amdgpu_program_register_sequence(adev, - kalindi_mgcg_cgcg_init, - ARRAY_SIZE(kalindi_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - kalindi_golden_registers, - ARRAY_SIZE(kalindi_golden_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_common_registers, - ARRAY_SIZE(kalindi_golden_common_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_spm_registers, - ARRAY_SIZE(kalindi_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_mgcg_cgcg_init, + ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_registers, + ARRAY_SIZE(kalindi_golden_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_common_registers, + ARRAY_SIZE(kalindi_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_spm_registers, + ARRAY_SIZE(kalindi_golden_spm_registers)); break; case CHIP_MULLINS: - amdgpu_program_register_sequence(adev, - kalindi_mgcg_cgcg_init, - ARRAY_SIZE(kalindi_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - godavari_golden_registers, - ARRAY_SIZE(godavari_golden_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_common_registers, - ARRAY_SIZE(kalindi_golden_common_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_spm_registers, - ARRAY_SIZE(kalindi_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_mgcg_cgcg_init, + ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + godavari_golden_registers, + ARRAY_SIZE(godavari_golden_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_common_registers, + ARRAY_SIZE(kalindi_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_spm_registers, + ARRAY_SIZE(kalindi_golden_spm_registers)); break; case CHIP_KAVERI: - amdgpu_program_register_sequence(adev, - spectre_mgcg_cgcg_init, - ARRAY_SIZE(spectre_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - spectre_golden_registers, - ARRAY_SIZE(spectre_golden_registers)); - amdgpu_program_register_sequence(adev, - spectre_golden_common_registers, - ARRAY_SIZE(spectre_golden_common_registers)); - amdgpu_program_register_sequence(adev, - spectre_golden_spm_registers, - ARRAY_SIZE(spectre_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + spectre_mgcg_cgcg_init, + ARRAY_SIZE(spectre_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + spectre_golden_registers, + ARRAY_SIZE(spectre_golden_registers)); + amdgpu_device_program_register_sequence(adev, + spectre_golden_common_registers, + ARRAY_SIZE(spectre_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + spectre_golden_spm_registers, + ARRAY_SIZE(spectre_golden_spm_registers)); break; case CHIP_HAWAII: - amdgpu_program_register_sequence(adev, - hawaii_mgcg_cgcg_init, - ARRAY_SIZE(hawaii_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - hawaii_golden_registers, - ARRAY_SIZE(hawaii_golden_registers)); - amdgpu_program_register_sequence(adev, - hawaii_golden_common_registers, - ARRAY_SIZE(hawaii_golden_common_registers)); - amdgpu_program_register_sequence(adev, - hawaii_golden_spm_registers, - ARRAY_SIZE(hawaii_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + hawaii_mgcg_cgcg_init, + ARRAY_SIZE(hawaii_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + hawaii_golden_registers, + ARRAY_SIZE(hawaii_golden_registers)); + amdgpu_device_program_register_sequence(adev, + hawaii_golden_common_registers, + ARRAY_SIZE(hawaii_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + hawaii_golden_spm_registers, + ARRAY_SIZE(hawaii_golden_spm_registers)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index a397111c2ced..f34bc68aadfb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -145,20 +145,20 @@ static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - ARRAY_SIZE(fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_fiji_a10, - ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_fiji_a10, + ARRAY_SIZE(golden_settings_fiji_a10)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - ARRAY_SIZE(tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_tonga_a11, - ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_tonga_a11, + ARRAY_SIZE(golden_settings_tonga_a11)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 67e670989e81..26378bd6aba4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -154,28 +154,28 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - cz_golden_settings_a11, - ARRAY_SIZE(cz_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_golden_settings_a11, + ARRAY_SIZE(cz_golden_settings_a11)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_golden_settings_a11, - ARRAY_SIZE(stoney_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_settings_a11, + ARRAY_SIZE(stoney_golden_settings_a11)); break; case CHIP_POLARIS11: case CHIP_POLARIS12: - amdgpu_program_register_sequence(adev, - polaris11_golden_settings_a11, - ARRAY_SIZE(polaris11_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + polaris11_golden_settings_a11, + ARRAY_SIZE(polaris11_golden_settings_a11)); break; case CHIP_POLARIS10: - amdgpu_program_register_sequence(adev, - polaris10_golden_settings_a11, - ARRAY_SIZE(polaris10_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + polaris10_golden_settings_a11, + ARRAY_SIZE(polaris10_golden_settings_a11)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index c7dc69031fb5..4a9c28cd144d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -679,55 +679,55 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - ARRAY_SIZE(iceland_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_iceland_a11, - ARRAY_SIZE(golden_settings_iceland_a11)); - amdgpu_program_register_sequence(adev, - iceland_golden_common_all, - ARRAY_SIZE(iceland_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_iceland_a11, + ARRAY_SIZE(golden_settings_iceland_a11)); + amdgpu_device_program_register_sequence(adev, + iceland_golden_common_all, + ARRAY_SIZE(iceland_golden_common_all)); break; case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - ARRAY_SIZE(fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_fiji_a10, - ARRAY_SIZE(golden_settings_fiji_a10)); - amdgpu_program_register_sequence(adev, - fiji_golden_common_all, - ARRAY_SIZE(fiji_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_fiji_a10, + ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_device_program_register_sequence(adev, + fiji_golden_common_all, + ARRAY_SIZE(fiji_golden_common_all)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - ARRAY_SIZE(tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_tonga_a11, - ARRAY_SIZE(golden_settings_tonga_a11)); - amdgpu_program_register_sequence(adev, - tonga_golden_common_all, - ARRAY_SIZE(tonga_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_tonga_a11, + ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_device_program_register_sequence(adev, + tonga_golden_common_all, + ARRAY_SIZE(tonga_golden_common_all)); break; case CHIP_POLARIS11: case CHIP_POLARIS12: - amdgpu_program_register_sequence(adev, - golden_settings_polaris11_a11, - ARRAY_SIZE(golden_settings_polaris11_a11)); - amdgpu_program_register_sequence(adev, - polaris11_golden_common_all, - ARRAY_SIZE(polaris11_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris11_a11, + ARRAY_SIZE(golden_settings_polaris11_a11)); + amdgpu_device_program_register_sequence(adev, + polaris11_golden_common_all, + ARRAY_SIZE(polaris11_golden_common_all)); break; case CHIP_POLARIS10: - amdgpu_program_register_sequence(adev, - golden_settings_polaris10_a11, - ARRAY_SIZE(golden_settings_polaris10_a11)); - amdgpu_program_register_sequence(adev, - polaris10_golden_common_all, - ARRAY_SIZE(polaris10_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris10_a11, + ARRAY_SIZE(golden_settings_polaris10_a11)); + amdgpu_device_program_register_sequence(adev, + polaris10_golden_common_all, + ARRAY_SIZE(polaris10_golden_common_all)); WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C); if (adev->pdev->revision == 0xc7 && ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) || @@ -738,26 +738,26 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) } break; case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - cz_golden_settings_a11, - ARRAY_SIZE(cz_golden_settings_a11)); - amdgpu_program_register_sequence(adev, - cz_golden_common_all, - ARRAY_SIZE(cz_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_golden_settings_a11, + ARRAY_SIZE(cz_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + cz_golden_common_all, + ARRAY_SIZE(cz_golden_common_all)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_mgcg_cgcg_init, - ARRAY_SIZE(stoney_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - stoney_golden_settings_a11, - ARRAY_SIZE(stoney_golden_settings_a11)); - amdgpu_program_register_sequence(adev, - stoney_golden_common_all, - ARRAY_SIZE(stoney_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + stoney_mgcg_cgcg_init, + ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_settings_a11, + ARRAY_SIZE(stoney_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_common_all, + ARRAY_SIZE(stoney_golden_common_all)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 9c28e18741ea..c4285395b5fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -67,12 +67,12 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - ARRAY_SIZE(iceland_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_iceland_a11, - ARRAY_SIZE(golden_settings_iceland_a11)); + amdgpu_device_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_iceland_a11, + ARRAY_SIZE(golden_settings_iceland_a11)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index efed20ac4a01..6641276ecbdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -120,44 +120,44 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - ARRAY_SIZE(fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_fiji_a10, - ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_fiji_a10, + ARRAY_SIZE(golden_settings_fiji_a10)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - ARRAY_SIZE(tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_tonga_a11, - ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_tonga_a11, + ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: case CHIP_POLARIS12: - amdgpu_program_register_sequence(adev, - golden_settings_polaris11_a11, - ARRAY_SIZE(golden_settings_polaris11_a11)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris11_a11, + ARRAY_SIZE(golden_settings_polaris11_a11)); break; case CHIP_POLARIS10: - amdgpu_program_register_sequence(adev, - golden_settings_polaris10_a11, - ARRAY_SIZE(golden_settings_polaris10_a11)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris10_a11, + ARRAY_SIZE(golden_settings_polaris10_a11)); break; case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_mgcg_cgcg_init, - ARRAY_SIZE(stoney_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_stoney_common, - ARRAY_SIZE(golden_settings_stoney_common)); + amdgpu_device_program_register_sequence(adev, + stoney_mgcg_cgcg_init, + ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_stoney_common, + ARRAY_SIZE(golden_settings_stoney_common)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 5da2272bd313..899ffe50cb50 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -918,9 +918,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) bool value; u32 tmp; - amdgpu_program_register_sequence(adev, - golden_settings_vega10_hdp, - ARRAY_SIZE(golden_settings_vega10_hdp)); + amdgpu_device_program_register_sequence(adev, + golden_settings_vega10_hdp, + ARRAY_SIZE(golden_settings_vega10_hdp)); if (adev->gart.robj == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index da7c261d5d87..af2d47e9abdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -279,32 +279,32 @@ void xgpu_vi_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - xgpu_fiji_mgcg_cgcg_init, - ARRAY_SIZE( - xgpu_fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - xgpu_fiji_golden_settings_a10, - ARRAY_SIZE( - xgpu_fiji_golden_settings_a10)); - amdgpu_program_register_sequence(adev, - xgpu_fiji_golden_common_all, - ARRAY_SIZE( - xgpu_fiji_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + xgpu_fiji_mgcg_cgcg_init, + ARRAY_SIZE( + xgpu_fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + xgpu_fiji_golden_settings_a10, + ARRAY_SIZE( + xgpu_fiji_golden_settings_a10)); + amdgpu_device_program_register_sequence(adev, + xgpu_fiji_golden_common_all, + ARRAY_SIZE( + xgpu_fiji_golden_common_all)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - xgpu_tonga_mgcg_cgcg_init, - ARRAY_SIZE( - xgpu_tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - xgpu_tonga_golden_settings_a11, - ARRAY_SIZE( - xgpu_tonga_golden_settings_a11)); - amdgpu_program_register_sequence(adev, - xgpu_tonga_golden_common_all, - ARRAY_SIZE( - xgpu_tonga_golden_common_all)); + amdgpu_device_program_register_sequence(adev, + xgpu_tonga_mgcg_cgcg_init, + ARRAY_SIZE( + xgpu_tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + xgpu_tonga_golden_settings_a11, + ARRAY_SIZE( + xgpu_tonga_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + xgpu_tonga_golden_common_all, + ARRAY_SIZE( + xgpu_tonga_golden_common_all)); break; default: BUG_ON("Doesn't support chip type.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 0c5b91a40f22..401552bae7f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -93,12 +93,12 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - ARRAY_SIZE(iceland_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_iceland_a11, - ARRAY_SIZE(golden_settings_iceland_a11)); + amdgpu_device_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_iceland_a11, + ARRAY_SIZE(golden_settings_iceland_a11)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 4e031a2aad9d..0735d4d0e56a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -192,47 +192,47 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - ARRAY_SIZE(fiji_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_fiji_a10, - ARRAY_SIZE(golden_settings_fiji_a10)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_fiji_a10, + ARRAY_SIZE(golden_settings_fiji_a10)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - ARRAY_SIZE(tonga_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_tonga_a11, - ARRAY_SIZE(golden_settings_tonga_a11)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + golden_settings_tonga_a11, + ARRAY_SIZE(golden_settings_tonga_a11)); break; case CHIP_POLARIS11: case CHIP_POLARIS12: - amdgpu_program_register_sequence(adev, - golden_settings_polaris11_a11, - ARRAY_SIZE(golden_settings_polaris11_a11)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris11_a11, + ARRAY_SIZE(golden_settings_polaris11_a11)); break; case CHIP_POLARIS10: - amdgpu_program_register_sequence(adev, - golden_settings_polaris10_a11, - ARRAY_SIZE(golden_settings_polaris10_a11)); + amdgpu_device_program_register_sequence(adev, + golden_settings_polaris10_a11, + ARRAY_SIZE(golden_settings_polaris10_a11)); break; case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - cz_golden_settings_a11, - ARRAY_SIZE(cz_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_golden_settings_a11, + ARRAY_SIZE(cz_golden_settings_a11)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_mgcg_cgcg_init, - ARRAY_SIZE(stoney_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - stoney_golden_settings_a11, - ARRAY_SIZE(stoney_golden_settings_a11)); + amdgpu_device_program_register_sequence(adev, + stoney_mgcg_cgcg_init, + ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + stoney_golden_settings_a11, + ARRAY_SIZE(stoney_golden_settings_a11)); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 49eef3090f08..78baddb5d300 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1390,65 +1390,65 @@ static void si_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TAHITI: - amdgpu_program_register_sequence(adev, - tahiti_golden_registers, - ARRAY_SIZE(tahiti_golden_registers)); - amdgpu_program_register_sequence(adev, - tahiti_golden_rlc_registers, - ARRAY_SIZE(tahiti_golden_rlc_registers)); - amdgpu_program_register_sequence(adev, - tahiti_mgcg_cgcg_init, - ARRAY_SIZE(tahiti_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - tahiti_golden_registers2, - ARRAY_SIZE(tahiti_golden_registers2)); + amdgpu_device_program_register_sequence(adev, + tahiti_golden_registers, + ARRAY_SIZE(tahiti_golden_registers)); + amdgpu_device_program_register_sequence(adev, + tahiti_golden_rlc_registers, + ARRAY_SIZE(tahiti_golden_rlc_registers)); + amdgpu_device_program_register_sequence(adev, + tahiti_mgcg_cgcg_init, + ARRAY_SIZE(tahiti_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + tahiti_golden_registers2, + ARRAY_SIZE(tahiti_golden_registers2)); break; case CHIP_PITCAIRN: - amdgpu_program_register_sequence(adev, - pitcairn_golden_registers, - ARRAY_SIZE(pitcairn_golden_registers)); - amdgpu_program_register_sequence(adev, - pitcairn_golden_rlc_registers, - ARRAY_SIZE(pitcairn_golden_rlc_registers)); - amdgpu_program_register_sequence(adev, - pitcairn_mgcg_cgcg_init, - ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + pitcairn_golden_registers, + ARRAY_SIZE(pitcairn_golden_registers)); + amdgpu_device_program_register_sequence(adev, + pitcairn_golden_rlc_registers, + ARRAY_SIZE(pitcairn_golden_rlc_registers)); + amdgpu_device_program_register_sequence(adev, + pitcairn_mgcg_cgcg_init, + ARRAY_SIZE(pitcairn_mgcg_cgcg_init)); break; case CHIP_VERDE: - amdgpu_program_register_sequence(adev, - verde_golden_registers, - ARRAY_SIZE(verde_golden_registers)); - amdgpu_program_register_sequence(adev, - verde_golden_rlc_registers, - ARRAY_SIZE(verde_golden_rlc_registers)); - amdgpu_program_register_sequence(adev, - verde_mgcg_cgcg_init, - ARRAY_SIZE(verde_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - verde_pg_init, - ARRAY_SIZE(verde_pg_init)); + amdgpu_device_program_register_sequence(adev, + verde_golden_registers, + ARRAY_SIZE(verde_golden_registers)); + amdgpu_device_program_register_sequence(adev, + verde_golden_rlc_registers, + ARRAY_SIZE(verde_golden_rlc_registers)); + amdgpu_device_program_register_sequence(adev, + verde_mgcg_cgcg_init, + ARRAY_SIZE(verde_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + verde_pg_init, + ARRAY_SIZE(verde_pg_init)); break; case CHIP_OLAND: - amdgpu_program_register_sequence(adev, - oland_golden_registers, - ARRAY_SIZE(oland_golden_registers)); - amdgpu_program_register_sequence(adev, - oland_golden_rlc_registers, - ARRAY_SIZE(oland_golden_rlc_registers)); - amdgpu_program_register_sequence(adev, - oland_mgcg_cgcg_init, - ARRAY_SIZE(oland_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + oland_golden_registers, + ARRAY_SIZE(oland_golden_registers)); + amdgpu_device_program_register_sequence(adev, + oland_golden_rlc_registers, + ARRAY_SIZE(oland_golden_rlc_registers)); + amdgpu_device_program_register_sequence(adev, + oland_mgcg_cgcg_init, + ARRAY_SIZE(oland_mgcg_cgcg_init)); break; case CHIP_HAINAN: - amdgpu_program_register_sequence(adev, - hainan_golden_registers, - ARRAY_SIZE(hainan_golden_registers)); - amdgpu_program_register_sequence(adev, - hainan_golden_registers2, - ARRAY_SIZE(hainan_golden_registers2)); - amdgpu_program_register_sequence(adev, - hainan_mgcg_cgcg_init, - ARRAY_SIZE(hainan_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + hainan_golden_registers, + ARRAY_SIZE(hainan_golden_registers)); + amdgpu_device_program_register_sequence(adev, + hainan_golden_registers2, + ARRAY_SIZE(hainan_golden_registers2)); + amdgpu_device_program_register_sequence(adev, + hainan_mgcg_cgcg_init, + ARRAY_SIZE(hainan_mgcg_cgcg_init)); break; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index bb8ca9489546..0b57c5d24510 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -282,29 +282,29 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + ARRAY_SIZE(iceland_mgcg_cgcg_init)); break; case CHIP_FIJI: - amdgpu_program_register_sequence(adev, - fiji_mgcg_cgcg_init, - ARRAY_SIZE(fiji_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + fiji_mgcg_cgcg_init, + ARRAY_SIZE(fiji_mgcg_cgcg_init)); break; case CHIP_TONGA: - amdgpu_program_register_sequence(adev, - tonga_mgcg_cgcg_init, - ARRAY_SIZE(tonga_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + tonga_mgcg_cgcg_init, + ARRAY_SIZE(tonga_mgcg_cgcg_init)); break; case CHIP_CARRIZO: - amdgpu_program_register_sequence(adev, - cz_mgcg_cgcg_init, - ARRAY_SIZE(cz_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + cz_mgcg_cgcg_init, + ARRAY_SIZE(cz_mgcg_cgcg_init)); break; case CHIP_STONEY: - amdgpu_program_register_sequence(adev, - stoney_mgcg_cgcg_init, - ARRAY_SIZE(stoney_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + stoney_mgcg_cgcg_init, + ARRAY_SIZE(stoney_mgcg_cgcg_init)); break; case CHIP_POLARIS11: case CHIP_POLARIS10: -- cgit v1.2.3 From 8111c387275f64aae972d5717d32b0f756bb6e5f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:22:53 -0500 Subject: drm/amdgpu: rename amdgpu_pci_config_reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device for consistency with other functions in this file. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik.c | 2 +- drivers/gpu/drm/amd/amdgpu/vi.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f10f4fc7dbe0..941535d94f87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1899,7 +1899,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); bool amdgpu_need_backup(struct amdgpu_device *adev); -void amdgpu_pci_config_reset(struct amdgpu_device *adev); +void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 05e5c6822f9c..2ff4194fd153 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -377,7 +377,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, } } -void amdgpu_pci_config_reset(struct amdgpu_device *adev) +void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) { pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 39d49712f8c9..132ba226f289 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1246,7 +1246,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) /* disable BM */ pci_clear_master(adev->pdev); /* reset */ - amdgpu_pci_config_reset(adev); + amdgpu_device_pci_config_reset(adev); udelay(100); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 0b57c5d24510..0e1202914fa8 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -667,7 +667,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* disable BM */ pci_clear_master(adev->pdev); /* reset */ - amdgpu_pci_config_reset(adev); + amdgpu_device_pci_config_reset(adev); udelay(100); -- cgit v1.2.3 From 22cb0164378c6511423f79d429e0b2e4290934df Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:27:11 -0500 Subject: drm/amdgpu: move amdgpu_doorbell_get_kfd_info to amdgpu_amdkfd.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's the only place it's used. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ------ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 33 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +----------------------------- 3 files changed, 34 insertions(+), 38 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 941535d94f87..b03f9242448c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -651,12 +651,6 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_INVALID = 0xFFFF } AMDGPU_DOORBELL64_ASSIGNMENT; - -void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, - phys_addr_t *aperture_base, - size_t *aperture_size, - size_t *start_offset); - /* * IRQS. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index c70cda04dbfb..896b16db58aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -93,6 +93,39 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) adev->pdev, kfd2kgd); } +/** + * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to + * setup amdkfd + * + * @adev: amdgpu_device pointer + * @aperture_base: output returning doorbell aperture base physical address + * @aperture_size: output returning doorbell aperture size in bytes + * @start_offset: output returning # of doorbell bytes reserved for amdgpu. + * + * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, + * takes doorbells required for its own rings and reports the setup to amdkfd. + * amdgpu reserved doorbells are at the start of the doorbell aperture. + */ +static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, + phys_addr_t *aperture_base, + size_t *aperture_size, + size_t *start_offset) +{ + /* + * The first num_doorbells are used by amdgpu. + * amdkfd takes whatever's left in the aperture. + */ + if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { + *aperture_base = adev->doorbell.base; + *aperture_size = adev->doorbell.size; + *start_offset = adev->doorbell.num_doorbells * sizeof(u32); + } else { + *aperture_base = 0; + *aperture_size = 0; + *start_offset = 0; + } +} + void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { int i; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2ff4194fd153..ae9a3a0334d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -438,38 +438,7 @@ static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) adev->doorbell.ptr = NULL; } -/** - * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to - * setup amdkfd - * - * @adev: amdgpu_device pointer - * @aperture_base: output returning doorbell aperture base physical address - * @aperture_size: output returning doorbell aperture size in bytes - * @start_offset: output returning # of doorbell bytes reserved for amdgpu. - * - * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up, - * takes doorbells required for its own rings and reports the setup to amdkfd. - * amdgpu reserved doorbells are at the start of the doorbell aperture. - */ -void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, - phys_addr_t *aperture_base, - size_t *aperture_size, - size_t *start_offset) -{ - /* - * The first num_doorbells are used by amdgpu. - * amdkfd takes whatever's left in the aperture. - */ - if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) { - *aperture_base = adev->doorbell.base; - *aperture_size = adev->doorbell.size; - *start_offset = adev->doorbell.num_doorbells * sizeof(u32); - } else { - *aperture_base = 0; - *aperture_size = 0; - *start_offset = 0; - } -} + /* * amdgpu_device_wb_*() -- cgit v1.2.3 From 2543e28a81b8d687880315475ba1203ca2f7496a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:33:36 -0500 Subject: drm/amdgpu: rename amdgpu_*_location functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device to the name for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 ++++++---- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 4 ++-- 6 files changed, 18 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b03f9242448c..ac21addcde7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1901,8 +1901,10 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); -void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); -void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); +void amdgpu_device_vram_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc, u64 base); +void amdgpu_device_gart_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc); int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); int amdgpu_ttm_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ae9a3a0334d7..09ffa9fbcd92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -535,7 +535,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) } /** - * amdgpu_vram_location - try to find VRAM location + * amdgpu_device_vram_location - try to find VRAM location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @base: base address at which to put VRAM @@ -543,7 +543,8 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) * Function will try to place VRAM at base address provided * as parameter. */ -void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base) +void amdgpu_device_vram_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc, u64 base) { uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; @@ -557,7 +558,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 } /** - * amdgpu_gart_location - try to find GTT location + * amdgpu_device_gart_location - try to find GTT location * @adev: amdgpu device structure holding all necessary informations * @mc: memory controller structure holding memory informations * @@ -568,7 +569,8 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 * * FIXME: when reducing GTT size align new size on power of 2. */ -void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) +void amdgpu_device_gart_location(struct amdgpu_device *adev, + struct amdgpu_mc *mc) { u64 size_af, size_bf; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 279f43a2b1a4..e1a73c43f32d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -222,8 +222,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gart_location(adev, mc); + amdgpu_device_vram_location(adev, &adev->mc, base); + amdgpu_device_gart_location(adev, mc); } static void gmc_v6_0_mc_program(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index c4285395b5fe..356a9a71b8cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -240,8 +240,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gart_location(adev, mc); + amdgpu_device_vram_location(adev, &adev->mc, base); + amdgpu_device_gart_location(adev, mc); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 6641276ecbdf..fce45578f5fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -405,8 +405,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base <<= 24; - amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gart_location(adev, mc); + amdgpu_device_vram_location(adev, &adev->mc, base); + amdgpu_device_gart_location(adev, mc); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 899ffe50cb50..3c84f82687f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -638,8 +638,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, u64 base = 0; if (!amdgpu_sriov_vf(adev)) base = mmhub_v1_0_get_fb_location(adev); - amdgpu_vram_location(adev, &adev->mc, base); - amdgpu_gart_location(adev, mc); + amdgpu_device_vram_location(adev, &adev->mc, base); + amdgpu_device_gart_location(adev, mc); /* base offset of vram pages */ if (adev->flags & AMD_IS_APU) adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); -- cgit v1.2.3 From f5ec697e37023ce60dc1c38bf6b2bf32de767376 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 14 Dec 2017 16:39:02 -0500 Subject: drm/amdgpu: move fw_reserve functions to amdgpu_ttm.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's the only place they are used. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 95 ---------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 99 +++++++++++++++++++++++++++++- 3 files changed, 97 insertions(+), 100 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ac21addcde7d..a8390abe13a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1387,9 +1387,6 @@ struct amdgpu_fw_vram_usage { void *va; }; -int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev); -void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev); - /* * CGS */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 09ffa9fbcd92..3135287c0f5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -597,101 +597,6 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev, mc->gart_size >> 20, mc->gart_start, mc->gart_end); } -/* - * Firmware Reservation functions - */ -/** - * amdgpu_fw_reserve_vram_fini - free fw reserved vram - * - * @adev: amdgpu_device pointer - * - * free fw reserved vram if it has been reserved. - */ -void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) -{ - amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, - NULL, &adev->fw_vram_usage.va); -} - -/** - * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw - * - * @adev: amdgpu_device pointer - * - * create bo vram reservation from fw. - */ -int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) -{ - struct ttm_operation_ctx ctx = { false, false }; - int r = 0; - int i; - u64 vram_size = adev->mc.visible_vram_size; - u64 offset = adev->fw_vram_usage.start_offset; - u64 size = adev->fw_vram_usage.size; - struct amdgpu_bo *bo; - - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; - - if (adev->fw_vram_usage.size > 0 && - adev->fw_vram_usage.size <= vram_size) { - - r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, - PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, - &adev->fw_vram_usage.reserved_bo); - if (r) - goto error_create; - - r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); - if (r) - goto error_reserve; - - /* remove the original mem node and create a new one at the - * request position - */ - bo = adev->fw_vram_usage.reserved_bo; - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } - - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, - &bo->tbo.mem, &ctx); - if (r) - goto error_pin; - - r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, - AMDGPU_GEM_DOMAIN_VRAM, - adev->fw_vram_usage.start_offset, - (adev->fw_vram_usage.start_offset + - adev->fw_vram_usage.size), NULL); - if (r) - goto error_pin; - r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); - if (r) - goto error_kmap; - - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); - } - return r; - -error_kmap: - amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); -error_pin: - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); -error_reserve: - amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); -error_create: - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; - return r; -} - /** * amdgpu_device_resize_fb_bar - try to resize FB BAR * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c307a7d2cf16..5b5554cba35f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1269,6 +1269,101 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .access_memory = &amdgpu_ttm_access_memory }; +/* + * Firmware Reservation functions + */ +/** + * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram + * + * @adev: amdgpu_device pointer + * + * free fw reserved vram if it has been reserved. + */ +static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, + NULL, &adev->fw_vram_usage.va); +} + +/** + * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw + * + * @adev: amdgpu_device pointer + * + * create bo vram reservation from fw. + */ +static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) +{ + struct ttm_operation_ctx ctx = { false, false }; + int r = 0; + int i; + u64 vram_size = adev->mc.visible_vram_size; + u64 offset = adev->fw_vram_usage.start_offset; + u64 size = adev->fw_vram_usage.size; + struct amdgpu_bo *bo; + + adev->fw_vram_usage.va = NULL; + adev->fw_vram_usage.reserved_bo = NULL; + + if (adev->fw_vram_usage.size > 0 && + adev->fw_vram_usage.size <= vram_size) { + + r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, + PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, + &adev->fw_vram_usage.reserved_bo); + if (r) + goto error_create; + + r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); + if (r) + goto error_reserve; + + /* remove the original mem node and create a new one at the + * request position + */ + bo = adev->fw_vram_usage.reserved_bo; + offset = ALIGN(offset, PAGE_SIZE); + for (i = 0; i < bo->placement.num_placement; ++i) { + bo->placements[i].fpfn = offset >> PAGE_SHIFT; + bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; + } + + ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); + r = ttm_bo_mem_space(&bo->tbo, &bo->placement, + &bo->tbo.mem, &ctx); + if (r) + goto error_pin; + + r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, + AMDGPU_GEM_DOMAIN_VRAM, + adev->fw_vram_usage.start_offset, + (adev->fw_vram_usage.start_offset + + adev->fw_vram_usage.size), NULL); + if (r) + goto error_pin; + r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, + &adev->fw_vram_usage.va); + if (r) + goto error_kmap; + + amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); + } + return r; + +error_kmap: + amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); +error_pin: + amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); +error_reserve: + amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); +error_create: + adev->fw_vram_usage.va = NULL; + adev->fw_vram_usage.reserved_bo = NULL; + return r; +} + int amdgpu_ttm_init(struct amdgpu_device *adev) { uint64_t gtt_size; @@ -1311,7 +1406,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) *The reserved vram for firmware must be pinned to the specified *place on the VRAM, so reserve it early. */ - r = amdgpu_fw_reserve_vram_init(adev); + r = amdgpu_ttm_fw_reserve_vram_init(adev); if (r) { return r; } @@ -1395,7 +1490,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_ttm_debugfs_fini(adev); amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); - amdgpu_fw_reserve_vram_fini(adev); + amdgpu_ttm_fw_reserve_vram_fini(adev); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); -- cgit v1.2.3 From 2990a1fc012e1bb4523a54d2c27eebc21a2c7e7e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:18:00 -0500 Subject: drm/amdgpu: rename ip block helper functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device to the name for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 38 +++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 50 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 18 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 18 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 16 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 8 +- drivers/gpu/drm/amd/amdgpu/cik.c | 88 +++++++++---------- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 18 ++-- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 8 +- drivers/gpu/drm/amd/amdgpu/si.c | 54 ++++++------ drivers/gpu/drm/amd/amdgpu/soc15.c | 46 +++++----- drivers/gpu/drm/amd/amdgpu/vi.c | 132 ++++++++++++++--------------- 14 files changed, 252 insertions(+), 246 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a8390abe13a6..5c016b3d494d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -224,17 +224,18 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_LAST }; -int amdgpu_set_clockgating_state(struct amdgpu_device *adev, - enum amd_ip_block_type block_type, - enum amd_clockgating_state state); -int amdgpu_set_powergating_state(struct amdgpu_device *adev, - enum amd_ip_block_type block_type, - enum amd_powergating_state state); -void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags); -int amdgpu_wait_for_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type); -bool amdgpu_is_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type); +int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, + enum amd_ip_block_type block_type, + enum amd_clockgating_state state); +int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, + enum amd_ip_block_type block_type, + enum amd_powergating_state state); +void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags); +int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); #define AMDGPU_MAX_IP_NUM 16 @@ -259,15 +260,16 @@ struct amdgpu_ip_block { const struct amdgpu_ip_block_version *version; }; -int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, - enum amd_ip_block_type type, - u32 major, u32 minor); +int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, + enum amd_ip_block_type type, + u32 major, u32 minor); -struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, - enum amd_ip_block_type type); +struct amdgpu_ip_block * +amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, + enum amd_ip_block_type type); -int amdgpu_ip_block_add(struct amdgpu_device *adev, - const struct amdgpu_ip_block_version *ip_block_version); +int amdgpu_device_ip_block_add(struct amdgpu_device *adev, + const struct amdgpu_ip_block_version *ip_block_version); /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ struct amdgpu_buffer_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index c04f44a90392..a29362f9ef41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -277,7 +277,7 @@ static int acp_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; const struct amdgpu_ip_block *ip_block = - amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); + amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP); if (!ip_block) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3135287c0f5b..38e14525721c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -937,9 +937,9 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { .can_switch = amdgpu_switcheroo_can_switch, }; -int amdgpu_set_clockgating_state(struct amdgpu_device *adev, - enum amd_ip_block_type block_type, - enum amd_clockgating_state state) +int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, + enum amd_ip_block_type block_type, + enum amd_clockgating_state state) { int i, r = 0; @@ -959,9 +959,9 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, return r; } -int amdgpu_set_powergating_state(struct amdgpu_device *adev, - enum amd_ip_block_type block_type, - enum amd_powergating_state state) +int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, + enum amd_ip_block_type block_type, + enum amd_powergating_state state) { int i, r = 0; @@ -981,7 +981,8 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, return r; } -void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) +void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) { int i; @@ -993,8 +994,8 @@ void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags) } } -int amdgpu_wait_for_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type) +int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) { int i, r; @@ -1012,8 +1013,8 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, } -bool amdgpu_is_idle(struct amdgpu_device *adev, - enum amd_ip_block_type block_type) +bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) { int i; @@ -1027,8 +1028,9 @@ bool amdgpu_is_idle(struct amdgpu_device *adev, } -struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, - enum amd_ip_block_type type) +struct amdgpu_ip_block * +amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, + enum amd_ip_block_type type) { int i; @@ -1040,7 +1042,7 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, } /** - * amdgpu_ip_block_version_cmp + * amdgpu_device_ip_block_version_cmp * * @adev: amdgpu_device pointer * @type: enum amd_ip_block_type @@ -1050,11 +1052,11 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, * return 0 if equal or greater * return 1 if smaller or the ip_block doesn't exist */ -int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, - enum amd_ip_block_type type, - u32 major, u32 minor) +int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, + enum amd_ip_block_type type, + u32 major, u32 minor) { - struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); + struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type); if (ip_block && ((ip_block->version->major > major) || ((ip_block->version->major == major) && @@ -1065,7 +1067,7 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, } /** - * amdgpu_ip_block_add + * amdgpu_device_ip_block_add * * @adev: amdgpu_device pointer * @ip_block_version: pointer to the IP to add @@ -1073,8 +1075,8 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, * Adds the IP block driver information to the collection of IPs * on the asic. */ -int amdgpu_ip_block_add(struct amdgpu_device *adev, - const struct amdgpu_ip_block_version *ip_block_version) +int amdgpu_device_ip_block_add(struct amdgpu_device *adev, + const struct amdgpu_ip_block_version *ip_block_version) { if (!ip_block_version) return -EINVAL; @@ -1569,10 +1571,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) amdgpu_virt_request_full_gpu(adev, false); /* ungate SMC block first */ - r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, - AMD_CG_STATE_UNGATE); + r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, + AMD_CG_STATE_UNGATE); if (r) { - DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r); + DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r); } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 83205b93e62d..01a996c6b802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1278,16 +1278,16 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) /* XXX select vce level based on ring/task */ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); amdgpu_pm_compute_clocks(adev); } else { - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); mutex_lock(&adev->pm.mutex); adev->pm.dpm.vce_active = false; mutex_unlock(&adev->pm.mutex); @@ -1584,7 +1584,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) struct drm_device *ddev = adev->ddev; u32 flags = 0; - amdgpu_get_clockgating_state(adev, &flags); + amdgpu_device_ip_get_clockgating_state(adev, &flags); seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); amdgpu_parse_cg_state(m, flags); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 916e51670bfd..bd6d3a1c1d65 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -244,7 +244,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ - if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) + if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; switch (adev->asic_type) { @@ -1153,10 +1153,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); /* shutdown the UVD block */ - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); @@ -1176,10 +1176,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) amdgpu_dpm_enable_uvd(adev, true); } else { amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 641deb0527ae..9857d482c942 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -311,10 +311,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_dpm_enable_vce(adev, false); } else { amdgpu_asic_set_vce_clocks(adev, 0, 0); - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); @@ -343,10 +343,10 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); - amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 564e1b1962f1..398abbcbf029 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -741,7 +741,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) has_compute_vm_bug = false; - ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); if (ip_block) { /* Compute has a VM bug for GFX version < 7. Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index f11c0aacf19f..a0943aa8d1d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -891,12 +891,12 @@ static void ci_dpm_powergate_uvd(void *handle, bool gate) if (gate) { /* stop the UVD block */ - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); ci_update_uvd_dpm(adev, gate); } else { - amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); ci_update_uvd_dpm(adev, gate); } } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 132ba226f289..6a92abc736e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1974,77 +1974,77 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_2_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; case CHIP_HAWAII: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_5_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; case CHIP_KAVERI: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_1_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; case CHIP_KABINI: case CHIP_MULLINS: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_3_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 4a9c28cd144d..46550b588982 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5062,8 +5062,9 @@ static int gfx_v8_0_hw_fini(void *handle) gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); return 0; } @@ -5480,8 +5481,9 @@ static int gfx_v8_0_late_init(void *handle) if (r) return r; - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); return 0; } @@ -5492,10 +5494,10 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade if ((adev->asic_type == CHIP_POLARIS11) || (adev->asic_type == CHIP_POLARIS12)) /* Send msg to SMU via Powerplay */ - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_SMC, - enable ? - AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + enable ? + AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index f33d1ffdb20b..d9e9e52a0def 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1682,8 +1682,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) if (gate) { /* stop the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); kv_update_uvd_dpm(adev, gate); if (pi->caps_uvd_pg) /* power off the UVD block */ @@ -1695,8 +1695,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate) /* re-init the UVD block */ kv_update_uvd_dpm(adev, gate); - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 78baddb5d300..543101d5a5ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1959,42 +1959,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev) case CHIP_VERDE: case CHIP_TAHITI: case CHIP_PITCAIRN: - amdgpu_ip_block_add(adev, &si_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); - amdgpu_ip_block_add(adev, &si_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &si_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); else - amdgpu_ip_block_add(adev, &dce_v6_0_ip_block); - amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); - amdgpu_ip_block_add(adev, &si_dma_ip_block); - /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */ - /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */ + amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_dma_ip_block); + /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ + /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_OLAND: - amdgpu_ip_block_add(adev, &si_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); - amdgpu_ip_block_add(adev, &si_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &si_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); else - amdgpu_ip_block_add(adev, &dce_v6_4_ip_block); - amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); - amdgpu_ip_block_add(adev, &si_dma_ip_block); - /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */ - /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */ + amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_dma_ip_block); + /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ + /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; case CHIP_HAINAN: - amdgpu_ip_block_add(adev, &si_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block); - amdgpu_ip_block_add(adev, &si_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &si_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); - amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block); - amdgpu_ip_block_add(adev, &si_dma_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_dma_ip_block); break; default: BUG(); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 49ff552cd6fe..f0fb4161e866 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -533,43 +533,43 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - amdgpu_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_ip_block_add(adev, &vega10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1) - amdgpu_ip_block_add(adev, &psp_v3_1_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); if (!amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif - amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; case CHIP_RAVEN: - amdgpu_ip_block_add(adev, &vega10_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_ip_block_add(adev, &psp_v10_0_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif - amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); break; default: return -EINVAL; @@ -616,8 +616,8 @@ static int soc15_common_early_init(void *handle) adev->asic_funcs = &soc15_asic_funcs; - if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && - (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) && + (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP))) psp_enabled = true; adev->rev_id = soc15_get_rev_id(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 0e1202914fa8..66072063bc7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -891,8 +891,8 @@ static int vi_common_early_init(void *handle) adev->asic_funcs = &vi_asic_funcs; - if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && - (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) + if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) && + (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC))) smc_enabled = true; adev->rev_id = vi_get_rev_id(adev); @@ -1487,115 +1487,115 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_TOPAZ: /* topaz has no DCE, UVD, VCE */ - amdgpu_ip_block_add(adev, &vi_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); - amdgpu_ip_block_add(adev, &iceland_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vi_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); + amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); - amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); break; case CHIP_FIJI: - amdgpu_ip_block_add(adev, &vi_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); - amdgpu_ip_block_add(adev, &tonga_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vi_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); + amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); - amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) { - amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); } break; case CHIP_TONGA: - amdgpu_ip_block_add(adev, &vi_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); - amdgpu_ip_block_add(adev, &tonga_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vi_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); - amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) { - amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); } break; case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: - amdgpu_ip_block_add(adev, &vi_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); - amdgpu_ip_block_add(adev, &tonga_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vi_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); + amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); - amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); - amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); break; case CHIP_CARRIZO: - amdgpu_ip_block_add(adev, &vi_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); - amdgpu_ip_block_add(adev, &cz_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vi_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); - amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); - amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); #if defined(CONFIG_DRM_AMD_ACP) - amdgpu_ip_block_add(adev, &acp_ip_block); + amdgpu_device_ip_block_add(adev, &acp_ip_block); #endif break; case CHIP_STONEY: - amdgpu_ip_block_add(adev, &vi_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); - amdgpu_ip_block_add(adev, &cz_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &vi_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); - amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); - amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); - amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); #if defined(CONFIG_DRM_AMD_ACP) - amdgpu_ip_block_add(adev, &acp_ip_block); + amdgpu_device_ip_block_add(adev, &acp_ip_block); #endif break; default: -- cgit v1.2.3 From 39c640c086ad12005e88cf6f9de03c32111ef081 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:22:11 -0500 Subject: drm/amdgpu: rename amdgpu_need_post MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device to the name for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5c016b3d494d..149ed8a67664 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1893,7 +1893,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); -bool amdgpu_need_post(struct amdgpu_device *adev); +bool amdgpu_device_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 057e1ecd83ce..a5df80d50d44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -93,7 +93,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) resource_size_t size = 256 * 1024; /* ??? */ if (!(adev->flags & AMD_IS_APU)) - if (amdgpu_need_post(adev)) + if (amdgpu_device_need_post(adev)) return false; adev->bios = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 38e14525721c..8cc3dcf120d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -671,7 +671,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) * GPU helpers function. */ /** - * amdgpu_need_post - check if the hw need post or not + * amdgpu_device_need_post - check if the hw need post or not * * @adev: amdgpu_device pointer * @@ -679,7 +679,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) * or post is needed if hw reset is performed. * Returns true if need or false if not. */ -bool amdgpu_need_post(struct amdgpu_device *adev) +bool amdgpu_device_need_post(struct amdgpu_device *adev) { uint32_t reg; @@ -1946,7 +1946,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, amdgpu_device_detect_sriov_bios(adev); /* Post card if necessary */ - if (amdgpu_need_post(adev)) { + if (amdgpu_device_need_post(adev)) { if (!adev->bios) { dev_err(adev->dev, "no vBIOS found\n"); r = -EINVAL; @@ -2275,7 +2275,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } /* post card */ - if (amdgpu_need_post(adev)) { + if (amdgpu_device_need_post(adev)) { r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) DRM_ERROR("amdgpu asic init failed\n"); -- cgit v1.2.3 From 55e0037aab6503453eef663eded60e530896eaef Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:28:33 -0500 Subject: drm/amdgpu: move dummy page functions to amdgpu_gart.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's the only place they are used. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 -- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 45 --------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 49 ++++++++++++++++++++++++++++-- 3 files changed, 47 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 149ed8a67664..d4abb7f04a86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -377,9 +377,6 @@ struct amdgpu_dummy_page { struct page *page; dma_addr_t addr; }; -int amdgpu_dummy_page_init(struct amdgpu_device *adev); -void amdgpu_dummy_page_fini(struct amdgpu_device *adev); - /* * Clocks diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8cc3dcf120d0..0080776c4936 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -724,51 +724,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev) return true; } -/** - * amdgpu_dummy_page_init - init dummy page used by the driver - * - * @adev: amdgpu_device pointer - * - * Allocate the dummy page used by the driver (all asics). - * This dummy page is used by the driver as a filler for gart entries - * when pages are taken out of the GART - * Returns 0 on sucess, -ENOMEM on failure. - */ -int amdgpu_dummy_page_init(struct amdgpu_device *adev) -{ - if (adev->dummy_page.page) - return 0; - adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); - if (adev->dummy_page.page == NULL) - return -ENOMEM; - adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, - 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { - dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); - __free_page(adev->dummy_page.page); - adev->dummy_page.page = NULL; - return -ENOMEM; - } - return 0; -} - -/** - * amdgpu_dummy_page_fini - free dummy page used by the driver - * - * @adev: amdgpu_device pointer - * - * Frees the dummy page used by the driver (all asics). - */ -void amdgpu_dummy_page_fini(struct amdgpu_device *adev) -{ - if (adev->dummy_page.page == NULL) - return; - pci_unmap_page(adev->pdev, adev->dummy_page.addr, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - __free_page(adev->dummy_page.page); - adev->dummy_page.page = NULL; -} - /* if we get transitioned to only one device, take VGA back */ /** * amdgpu_device_vga_set_decode - enable/disable vga decode diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 1f51897acc5b..0a4f34afaaaa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -56,6 +56,51 @@ * Common GART table functions. */ +/** + * amdgpu_dummy_page_init - init dummy page used by the driver + * + * @adev: amdgpu_device pointer + * + * Allocate the dummy page used by the driver (all asics). + * This dummy page is used by the driver as a filler for gart entries + * when pages are taken out of the GART + * Returns 0 on sucess, -ENOMEM on failure. + */ +static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) +{ + if (adev->dummy_page.page) + return 0; + adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); + if (adev->dummy_page.page == NULL) + return -ENOMEM; + adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page, + 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) { + dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n"); + __free_page(adev->dummy_page.page); + adev->dummy_page.page = NULL; + return -ENOMEM; + } + return 0; +} + +/** + * amdgpu_dummy_page_fini - free dummy page used by the driver + * + * @adev: amdgpu_device pointer + * + * Frees the dummy page used by the driver (all asics). + */ +static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) +{ + if (adev->dummy_page.page == NULL) + return; + pci_unmap_page(adev->pdev, adev->dummy_page.addr, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + __free_page(adev->dummy_page.page); + adev->dummy_page.page = NULL; +} + /** * amdgpu_gart_table_vram_alloc - allocate vram for gart page table * @@ -308,7 +353,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev) DRM_ERROR("Page size is smaller than GPU page size!\n"); return -EINVAL; } - r = amdgpu_dummy_page_init(adev); + r = amdgpu_gart_dummy_page_init(adev); if (r) return r; /* Compute table size */ @@ -340,5 +385,5 @@ void amdgpu_gart_fini(struct amdgpu_device *adev) vfree(adev->gart.pages); adev->gart.pages = NULL; #endif - amdgpu_dummy_page_fini(adev); + amdgpu_gart_dummy_page_fini(adev); } -- cgit v1.2.3 From 5f152b5e69a5392181b0a84bd55fe17a417364ac Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:40:49 -0500 Subject: drm/amdgpu: rename amdgpu_gpu_recover MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device to the name for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 +- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 7 files changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d4abb7f04a86..04e5498929c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1887,7 +1887,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) /* Common functions */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job* job, bool force); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0080776c4936..cfeceab29224 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2611,7 +2611,7 @@ error: } /** - * amdgpu_gpu_recover - reset the asic and recover scheduler + * amdgpu_device_gpu_recover - reset the asic and recover scheduler * * @adev: amdgpu device pointer * @job: which job trigger hang @@ -2620,7 +2620,8 @@ error: * Attempt to reset the GPU if it has hung (all asics). * Returns 0 for success or an error on failure. */ -int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) +int amdgpu_device_gpu_recover(struct amdgpu_device *adev, + struct amdgpu_job *job, bool force) { struct drm_atomic_state *state = NULL; uint64_t reset_flags = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index da1510f65ee0..008e1984b7e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -693,7 +693,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; seq_printf(m, "gpu recover\n"); - amdgpu_gpu_recover(adev, NULL, true); + amdgpu_device_gpu_recover(adev, NULL, true); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index c43643e8c8c8..56bcd59c3399 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work) reset_work); if (!amdgpu_sriov_vf(adev)) - amdgpu_gpu_recover(adev, NULL, false); + amdgpu_device_gpu_recover(adev, NULL, false); } /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index be8a437fad54..56d9ee5013a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_read(&job->ring->fence_drv.last_seq), job->ring->fence_drv.sync_seq); - amdgpu_gpu_recover(job->adev, job, false); + amdgpu_device_gpu_recover(job->adev, job, false); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 43e74ec93147..271452d3999a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -253,7 +253,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL, false); + amdgpu_device_gpu_recover(adev, NULL, false); } static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index af2d47e9abdc..9fc1c37344ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -521,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) } /* Trigger recovery due to world switch failure */ - amdgpu_gpu_recover(adev, NULL, false); + amdgpu_device_gpu_recover(adev, NULL, false); } static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev, -- cgit v1.2.3 From 6b8f4ee56fb27ac36fb3dbec91b5eb04beb90287 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:45:02 -0500 Subject: drm/amdgpu: move amdgpu_need_backup to amdgpu_object.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's the only place it's used. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 -------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 ++++++++ 3 files changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 04e5498929c3..91b4fda42873 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1889,7 +1889,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) /* Common functions */ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job, bool force); -bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cfeceab29224..f80081ecb02c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2425,14 +2425,6 @@ static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) return 0; } -bool amdgpu_need_backup(struct amdgpu_device *adev) -{ - if (adev->flags & AMD_IS_APU) - return false; - - return amdgpu_gpu_recovery; -} - static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 86340cfa6be7..4feee9927bce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -37,6 +37,14 @@ #include "amdgpu.h" #include "amdgpu_trace.h" +static bool amdgpu_need_backup(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + return amdgpu_gpu_recovery; +} + static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); -- cgit v1.2.3 From 041d9d93b5dba8fa41134a4e5fc7a432b76fa308 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 15 Dec 2017 16:49:33 -0500 Subject: drm/amdgpu: rename amdgpu_get_pcie_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add device to the name for consistency. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/cik.c | 2 +- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 +- drivers/gpu/drm/amd/amdgpu/vi.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 91b4fda42873..6b296e1fecf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1167,7 +1167,7 @@ struct amdgpu_wb { int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); -void amdgpu_get_pcie_info(struct amdgpu_device *adev); +void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); /* * SDMA diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f80081ecb02c..357cd8bf2e55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2749,7 +2749,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, return r; } -void amdgpu_get_pcie_info(struct amdgpu_device *adev) +void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) { u32 mask; int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 6a92abc736e0..8e59e65efd44 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1866,7 +1866,7 @@ static int cik_common_early_init(void *handle) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - amdgpu_get_pcie_info(adev); + amdgpu_device_get_pcie_info(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f0fb4161e866..8f2cff7b7e0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -682,7 +682,7 @@ static int soc15_common_early_init(void *handle) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - amdgpu_get_pcie_info(adev); + amdgpu_device_get_pcie_info(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 66072063bc7c..d9bb26322850 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1074,7 +1074,7 @@ static int vi_common_early_init(void *handle) /* vi use smc load by default */ adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - amdgpu_get_pcie_info(adev); + amdgpu_device_get_pcie_info(adev); return 0; } -- cgit v1.2.3 From 0ebb7c5405941bde1d5ddaa806f6085e6a6deaa7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Sun, 7 Jan 2018 10:18:57 +0100 Subject: drm/amdgpu: fix 64bit BAR detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Windows added by the BIOS are not marked as 64bit because they are usually not changeable anyway. This fixes large BAR support on my new Ryzen build system. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 357cd8bf2e55..9baf182d5418 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -626,7 +626,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) root = root->parent; pci_bus_for_each_resource(root, res, i) { - if (res && res->flags & IORESOURCE_MEM_64 && + if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && res->start > 0x100000000ull) break; } -- cgit v1.2.3 From 54bc1398cc0c3affa301d65370be2fd8dcf1bf08 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 19 Jan 2018 17:23:08 -0500 Subject: drm/amdgpu: Reenable manual GPU reset from sysfs Otherwise it keeps rejecting the reset. Signed-off-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9baf182d5418..03f2e56673d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2619,7 +2619,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, uint64_t reset_flags = 0; int i, r, resched; - if (!amdgpu_device_ip_check_soft_reset(adev)) { + if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); return 0; } -- cgit v1.2.3 From 458d876eb869d5a88b53074c6c271b8b9adc0f07 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Jan 2018 23:13:32 -0500 Subject: drm/amdgpu: Avoid leaking PM domain on driver unbind (v2) We only support vga_switcheroo and runtime pm on PX/HG systems so forcing runpm to 1 doesn't do anything useful anyway. Only call vga_switcheroo_init_domain_pm_ops() for PX/HG so that the cleanup path is correct as well. This mirrors what radeon does as well. v2: rework the patch originally sent by Lukas (Alex) Acked-by: Lukas Wunner Reported-by: Lukas Wunner Signed-off-by: Alex Deucher Signed-off-by: Lukas Wunner (v1) Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 03f2e56673d1..00a50cc5ec9a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1874,8 +1874,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, * ignore it */ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); - if (amdgpu_runtime_pm == 1) - runtime = true; if (amdgpu_device_is_px(ddev)) runtime = true; if (!pci_is_thunderbolt_attached(adev->pdev)) -- cgit v1.2.3