diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_gmu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 330 |
1 files changed, 248 insertions, 82 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 4e6dc16e4a4c..5903cd891b49 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -224,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) { - u32 val; + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int local = gmu->idle_level; + u32 val; /* SPTP and IFPC both report as IFPC */ if (gmu->idle_level == GMU_IDLE_STATE_SPTP) local = GMU_IDLE_STATE_IFPC; - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_is_a8xx(adreno_gpu)) + val = gmu_read(gmu, REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + else + val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); if (val == local) { if (gmu->idle_level != GMU_IDLE_STATE_IFPC || @@ -269,7 +274,9 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) /* Set the log wptr index * note: downstream saves the value in poweroff and restores it here */ - if (adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_GENERAL_9, 0); + else if (adreno_is_a7xx(adreno_gpu)) gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0); else gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0); @@ -350,12 +357,18 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { /* Trigger a OOB (out of band) request to the GMU */ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; u32 val; int request, ack; WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return 0; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return -EINVAL; @@ -376,9 +389,23 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Trigger the equested OOB operation */ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request); - /* Wait for the acknowledge interrupt */ - ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, - val & (1 << ack), 100, 10000); + do { + /* Wait for the acknowledge interrupt */ + ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, + val & (1 << ack), 100, 10000); + + if (!ret) + break; + + if (completion_done(&a6xx_gpu->base.fault_coredump_done)) + break; + + /* We may timeout because the GMU is temporarily wedged from + * pending faults from the GPU and we are taking a devcoredump. + * Wait until the MMU is resumed and try again. + */ + wait_for_completion(&a6xx_gpu->base.fault_coredump_done); + } while (true); if (ret) DRM_DEV_ERROR(gmu->dev, @@ -395,10 +422,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) /* Clear a pending OOB state in the GMU */ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int bit; WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); + /* Skip OOB calls since RGMU is not enabled */ + if (adreno_has_rgmu(adreno_gpu)) + return; + if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) return; @@ -485,17 +518,25 @@ static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu) * in the power down sequence not being fully executed. That in turn can * prevent CX_GDSC from collapsing. Assert Qactive to avoid this. */ - if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu)) - gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0)); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); + else if (adreno_is_a7xx(adreno_gpu) || (adreno_is_a621(adreno_gpu) || + adreno_is_7c3(adreno_gpu))) + gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0)); } /* Let the GMU know that we are about to go into slumber */ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; int ret; /* Disable the power counter so the GMU isn't busy */ - gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); + else + gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0); /* Disable SPTP_PC if the CPU is responsible for it */ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) @@ -522,10 +563,9 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) } out: - a6xx_gemnoc_workaround(gmu); - /* Put fence into allow mode */ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0); + a6xx_gemnoc_workaround(gmu); return ret; } @@ -561,16 +601,22 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu) static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + u32 bitmask = BIT(16); int ret; u32 val; if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status)) return; + if (adreno_is_a840(adreno_gpu)) + bitmask = BIT(30); + gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1); ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, - val, val & (1 << 16), 100, 10000); + val, val & bitmask, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n"); @@ -584,22 +630,24 @@ static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) writel(value, ptr + (offset << 2)); } -static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name); - static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct platform_device *pdev = to_platform_device(gmu->dev); - void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0; void __iomem *seqptr = NULL; uint32_t pdc_address_offset; + void __iomem *pdcptr; bool pdc_in_aop = false; + /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */ + if (adreno_is_a8xx(adreno_gpu)) + return; + + pdcptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc"); if (IS_ERR(pdcptr)) - goto err; + return; if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) @@ -612,9 +660,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) pdc_address_offset = 0x30080; if (!pdc_in_aop) { - seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq"); + seqptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc_seq"); if (IS_ERR(seqptr)) - goto err; + return; } /* Disable SDE clock gating */ @@ -704,12 +752,6 @@ setup_pdc: /* ensure no writes happen before the uCode is fully written */ wmb(); - -err: - if (!IS_ERR_OR_NULL(pdcptr)) - iounmap(pdcptr); - if (!IS_ERR_OR_NULL(seqptr)) - iounmap(seqptr); } /* @@ -732,7 +774,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1); /* A7xx knows better by default! */ - if (adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) return; gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400); @@ -795,7 +837,9 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) u32 itcm_base = 0x00000000; u32 dtcm_base = 0x00040000; - if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) dtcm_base = 0x10004000; if (gmu->legacy) { @@ -850,7 +894,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) { struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx; + const struct adreno_reglist *gbif_cx = a6xx_info->gbif_cx; u32 fence_range_lower, fence_range_upper; u32 chipid = 0; int ret; @@ -859,12 +905,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) { gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1); + gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1); } /* Turn on TCM (Tightly Coupled Memory) retention */ if (adreno_is_a7xx(adreno_gpu)) a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1); - else + else if (!adreno_is_a8xx(adreno_gpu)) gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1); ret = a6xx_rpmh_start(gmu); @@ -889,7 +938,10 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova); gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1); - if (adreno_is_a7xx(adreno_gpu)) { + if (adreno_is_a8xx(adreno_gpu)) { + fence_range_upper = 0x32; + fence_range_lower = 0x8c0; + } else if (adreno_is_a7xx(adreno_gpu)) { fence_range_upper = 0x32; fence_range_lower = 0x8a0; } else { @@ -923,7 +975,12 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ } - if (adreno_is_a7xx(adreno_gpu)) { + if (adreno_is_a8xx(adreno_gpu)) { + gmu_write(gmu, REG_A8XX_GMU_GENERAL_10, chipid); + gmu_write(gmu, REG_A8XX_GMU_GENERAL_8, + (gmu->log.iova & GENMASK(31, 12)) | + ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); + } else if (adreno_is_a7xx(adreno_gpu)) { gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid); gmu_write(gmu, REG_A7XX_GMU_GENERAL_8, (gmu->log.iova & GENMASK(31, 12)) | @@ -935,6 +992,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) gmu->log.iova | (gmu->log.size / SZ_4K - 1)); } + /* For A7x and newer, do the CX GBIF configurations before GMU wake up */ + for (int i = 0; (gbif_cx && gbif_cx[i].offset); i++) + gpu_write(gpu, gbif_cx[i].offset, gbif_cx[i].value); + + if (adreno_is_a8xx(adreno_gpu)) { + gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000); + gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33); + } + /* Set up the lowest idle level on the GMU */ a6xx_gmu_power_config(gmu); @@ -986,7 +1052,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) u32 val, seqmem_off = 0; /* The second spin of A7xx GPUs messed with some register offsets.. */ - if (adreno_is_a740_family(adreno_gpu)) + if (adreno_is_a740_family(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) seqmem_off = 4; /* Make sure there are no outstanding RPMh votes */ @@ -999,7 +1065,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, val, (val & 1), 100, 1000); - if (!adreno_is_a740_family(adreno_gpu)) + if (!adreno_is_a740_family(adreno_gpu) && !adreno_is_a8xx(adreno_gpu)) return; gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off, @@ -1027,7 +1093,10 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) * Turn off keep alive that might have been enabled by the hang * interrupt */ - gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + if (adreno_is_a8xx(adreno_gpu)) + gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); + else + gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0); /* Flush all the queues */ a6xx_hfi_stop(gmu); @@ -1053,7 +1122,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) /* Halt the gmu cm3 core */ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); - a6xx_bus_clear_pending_transactions(adreno_gpu, true); + adreno_gpu->funcs->bus_halt(adreno_gpu, true); /* Reset GPU core blocks */ a6xx_gpu_sw_reset(gpu, true); @@ -1122,6 +1191,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) return ret; } + /* Read the slice info on A8x GPUs */ + a8xx_gpu_get_slice_info(gpu); + /* Set the bus quota to a reasonable value for boot */ a6xx_gmu_set_initial_bw(gpu, gmu); @@ -1131,7 +1203,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) enable_irq(gmu->gmu_irq); /* Check to see if we are doing a cold or warm boot */ - if (adreno_is_a7xx(adreno_gpu)) { + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) { status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ? GMU_WARM_BOOT : GMU_COLD_BOOT; } else if (gmu->legacy) { @@ -1225,7 +1297,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) if (ret) goto force_off; - a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + adreno_gpu->funcs->bus_halt(adreno_gpu, a6xx_gpu->hung); /* tell the GMU we want to slumber */ ret = a6xx_gmu_notify_slumber(gmu); @@ -1460,7 +1532,7 @@ static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu, vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK); /* GMUs on A7xx votes on both x & y */ - if (adreno_is_a7xx(adreno_gpu)) + if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote); else data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote); @@ -1492,13 +1564,14 @@ static unsigned int a6xx_gmu_get_arc_level(struct device *dev, } static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, - unsigned long *freqs, int freqs_count, const char *id) + unsigned long *freqs, int freqs_count, + const char *pri_id, const char *sec_id) { int i, j; const u16 *pri, *sec; size_t pri_count, sec_count; - pri = cmd_db_read_aux_data(id, &pri_count); + pri = cmd_db_read_aux_data(pri_id, &pri_count); if (IS_ERR(pri)) return PTR_ERR(pri); /* @@ -1509,13 +1582,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, if (!pri_count) return -EINVAL; - /* - * Some targets have a separate gfx mxc rail. So try to read that first and then fall back - * to regular mx rail if it is missing - */ - sec = cmd_db_read_aux_data("gmxc.lvl", &sec_count); - if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER)) - sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + sec = cmd_db_read_aux_data(sec_id, &sec_count); if (IS_ERR(sec)) return PTR_ERR(sec); @@ -1569,6 +1636,57 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, return 0; } +static int a6xx_gmu_rpmh_dep_votes_init(struct device *dev, u32 *votes, + unsigned long *freqs, int freqs_count) +{ + const u16 *mx; + size_t count; + + mx = cmd_db_read_aux_data("mx.lvl", &count); + if (IS_ERR(mx)) + return PTR_ERR(mx); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + count >>= 1; + if (!count) + return -EINVAL; + + /* Fix the vote for zero frequency */ + votes[0] = 0xffffffff; + + /* Construct a vote for rest of the corners */ + for (int i = 1; i < freqs_count; i++) { + unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]); + u8 j, index = 0; + + /* Get the primary index that matches the arc level */ + for (j = 0; j < count; j++) { + if (mx[j] >= level) { + index = j; + break; + } + } + + if (j == count) { + DRM_DEV_ERROR(dev, + "Mx Level %u not found in the RPMh list\n", + level); + DRM_DEV_ERROR(dev, "Available levels:\n"); + for (j = 0; j < count; j++) + DRM_DEV_ERROR(dev, " %u\n", mx[j]); + + return -EINVAL; + } + + /* Construct the vote */ + votes[i] = (0x3fff << 14) | (index << 8) | (0xff); + } + + return 0; +} + /* * The GMU votes with the RPMh for itself and on behalf of the GPU but we need * to construct the list of votes on the CPU and send it over. Query the RPMh @@ -1583,15 +1701,27 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; const struct a6xx_info *info = adreno_gpu->info->a6xx; struct msm_gpu *gpu = &adreno_gpu->base; + const char *sec_id; + const u16 *gmxc; int ret; + gmxc = cmd_db_read_aux_data("gmxc.lvl", NULL); + if (gmxc == ERR_PTR(-EPROBE_DEFER)) + return -EPROBE_DEFER; + + /* If GMxC is present, prefer that as secondary rail for GX votes */ + sec_id = IS_ERR_OR_NULL(gmxc) ? "mx.lvl" : "gmxc.lvl"; + /* Build the GX votes */ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, - gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl", sec_id); /* Build the CX votes */ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, - gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl", "mx.lvl"); + + ret |= a6xx_gmu_rpmh_dep_votes_init(gmu->dev, gmu->dep_arc_votes, + gmu->gpu_freqs, gmu->nr_gpu_freqs); /* Build the interconnect votes */ if (info->bcms && gmu->nr_gpu_bws > 1) @@ -1795,27 +1925,6 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) return 0; } -static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, - const char *name) -{ - void __iomem *ret; - struct resource *res = platform_get_resource_byname(pdev, - IORESOURCE_MEM, name); - - if (!res) { - DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); - return ERR_PTR(-EINVAL); - } - - ret = ioremap(res->start, resource_size(res)); - if (!ret) { - DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); - return ERR_PTR(-EINVAL); - } - - return ret; -} - static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, const char *name, irq_handler_t handler) { @@ -1866,7 +1975,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - struct platform_device *pdev = to_platform_device(gmu->dev); mutex_lock(&gmu->lock); if (!gmu->initialized) { @@ -1895,12 +2003,11 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) qmp_put(gmu->qmp); iounmap(gmu->mmio); - if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) - iounmap(gmu->rscc); gmu->mmio = NULL; gmu->rscc = NULL; - if (!adreno_has_gmu_wrapper(adreno_gpu)) { + if (!adreno_has_gmu_wrapper(adreno_gpu) && + !adreno_has_rgmu(adreno_gpu)) { a6xx_gmu_memory_free(gmu); free_irq(gmu->gmu_irq, gmu); @@ -1922,10 +2029,38 @@ static int cxpd_notifier_cb(struct notifier_block *nb, return 0; } +static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, + const char *name, resource_size_t *start) +{ + void __iomem *ret; + struct resource *res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, name); + + if (!res) { + DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name); + return ERR_PTR(-EINVAL); + } + + ret = ioremap(res->start, resource_size(res)); + if (!ret) { + DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name); + return ERR_PTR(-EINVAL); + } + + if (start) + *start = res->start; + + return ret; +} + int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { struct platform_device *pdev = of_find_device_by_node(node); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; + resource_size_t start; + struct resource *res; int ret; if (!pdev) @@ -1942,13 +2077,29 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Mark legacy for manual SPTPRAC control */ gmu->legacy = true; + /* RGMU requires clocks */ + ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks); + if (ret < 0) + goto err_clk; + + gmu->nr_clocks = ret; + /* Map the GMU registers */ - gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); if (IS_ERR(gmu->mmio)) { ret = PTR_ERR(gmu->mmio); goto err_mmio; } + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } + + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); + gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx"); if (IS_ERR(gmu->cxpd)) { ret = PTR_ERR(gmu->cxpd); @@ -1981,6 +2132,7 @@ detach_cxpd: err_mmio: iounmap(gmu->mmio); +err_clk: /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); @@ -1989,10 +2141,13 @@ err_mmio: int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { + struct platform_device *pdev = of_find_device_by_node(node); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; - struct platform_device *pdev = of_find_device_by_node(node); struct device_link *link; + resource_size_t start; + struct resource *res; int ret; if (!pdev) @@ -2028,13 +2183,14 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) */ gmu->dummy.size = SZ_4K; if (adreno_is_a660_family(adreno_gpu) || - adreno_is_a7xx(adreno_gpu)) { + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000, "debug"); if (ret) goto err_memory; - gmu->dummy.size = SZ_8K; + gmu->dummy.size = SZ_16K; } /* Allocate memory for the GMU dummy page */ @@ -2045,7 +2201,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) /* Note that a650 family also includes a660 family: */ if (adreno_is_a650_family(adreno_gpu) || - adreno_is_a7xx(adreno_gpu)) { + adreno_is_a7xx(adreno_gpu) || + adreno_is_a8xx(adreno_gpu)) { ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache, SZ_16M - SZ_16K, 0x04000, "icache"); if (ret) @@ -2087,19 +2244,30 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) goto err_memory; /* Map the GMU registers */ - gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); + gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start); if (IS_ERR(gmu->mmio)) { ret = PTR_ERR(gmu->mmio); goto err_memory; } + res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory"); + if (!res) { + ret = -EINVAL; + goto err_mmio; + } + + /* Identify gmu base offset from gpu base address */ + gmu->mmio_offset = (u32)(start - res->start); + if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) { - gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc"); + gmu->rscc = devm_platform_ioremap_resource_byname(pdev, "rscc"); if (IS_ERR(gmu->rscc)) { ret = -ENODEV; goto err_mmio; } + } else if (adreno_is_a8xx(adreno_gpu)) { + gmu->rscc = gmu->mmio + 0x19000; } else { gmu->rscc = gmu->mmio + 0x23000; } @@ -2173,8 +2341,6 @@ detach_cxpd: err_mmio: iounmap(gmu->mmio); - if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc")) - iounmap(gmu->rscc); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); |
