diff options
Diffstat (limited to 'drivers/ufs/host')
| -rw-r--r-- | drivers/ufs/host/Kconfig | 13 | ||||
| -rw-r--r-- | drivers/ufs/host/Makefile | 1 | ||||
| -rw-r--r-- | drivers/ufs/host/ti-j721e-ufs.c | 37 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-amd-versal2.c | 564 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 130 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-mediatek.h | 4 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-qcom.c | 18 | ||||
| -rw-r--r-- | drivers/ufs/host/ufs-rockchip.c | 20 | ||||
| -rw-r--r-- | drivers/ufs/host/ufshcd-dwc.h | 46 | ||||
| -rw-r--r-- | drivers/ufs/host/ufshcd-pci.c | 70 |
10 files changed, 855 insertions, 48 deletions
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig index 191fbd799ec5..7d5117b2dab4 100644 --- a/drivers/ufs/host/Kconfig +++ b/drivers/ufs/host/Kconfig @@ -154,3 +154,16 @@ config SCSI_UFS_ROCKCHIP Select this if you have UFS controller on Rockchip chipset. If unsure, say N. + +config SCSI_UFS_AMD_VERSAL2 + tristate "AMD Versal Gen 2 UFS controller platform driver" + depends on SCSI_UFSHCD_PLATFORM && (ARCH_ZYNQMP || COMPILE_TEST) + help + This selects the AMD Versal Gen 2 specific additions on top of + the UFSHCD DWC and UFSHCD platform driver. UFS host on AMD + Versal Gen 2 needs some vendor specific configurations like PHY + and vendor specific register accesses before accessing the + hardware. + + Select this if you have UFS controller on AMD Versal Gen 2 SoC. + If unsure, say N. diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile index 2f97feb5db3f..65d8bb23ab7b 100644 --- a/drivers/ufs/host/Makefile +++ b/drivers/ufs/host/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o obj-$(CONFIG_SCSI_UFS_ROCKCHIP) += ufs-rockchip.o obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o +obj-$(CONFIG_SCSI_UFS_AMD_VERSAL2) += ufs-amd-versal2.o ufshcd-dwc.o diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c index 21214e5d5896..43781593b5c1 100644 --- a/drivers/ufs/host/ti-j721e-ufs.c +++ b/drivers/ufs/host/ti-j721e-ufs.c @@ -15,18 +15,26 @@ #define TI_UFS_SS_RST_N_PCS BIT(0) #define TI_UFS_SS_CLK_26MHZ BIT(4) +struct ti_j721e_ufs { + void __iomem *regbase; + u32 reg; +}; + static int ti_j721e_ufs_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct ti_j721e_ufs *ufs; unsigned long clk_rate; - void __iomem *regbase; struct clk *clk; - u32 reg = 0; int ret; - regbase = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(regbase)) - return PTR_ERR(regbase); + ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL); + if (!ufs) + return -ENOMEM; + + ufs->regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ufs->regbase)) + return PTR_ERR(ufs->regbase); pm_runtime_enable(dev); ret = pm_runtime_resume_and_get(dev); @@ -42,12 +50,14 @@ static int ti_j721e_ufs_probe(struct platform_device *pdev) } clk_rate = clk_get_rate(clk); if (clk_rate == 26000000) - reg |= TI_UFS_SS_CLK_26MHZ; + ufs->reg |= TI_UFS_SS_CLK_26MHZ; devm_clk_put(dev, clk); /* Take UFS slave device out of reset */ - reg |= TI_UFS_SS_RST_N_PCS; - writel(reg, regbase + TI_UFS_SS_CTRL); + ufs->reg |= TI_UFS_SS_RST_N_PCS; + writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL); + + dev_set_drvdata(dev, ufs); ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, dev); @@ -72,6 +82,16 @@ static void ti_j721e_ufs_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); } +static int ti_j721e_ufs_resume(struct device *dev) +{ + struct ti_j721e_ufs *ufs = dev_get_drvdata(dev); + + writel(ufs->reg, ufs->regbase + TI_UFS_SS_CTRL); + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ti_j721e_ufs_pm_ops, NULL, ti_j721e_ufs_resume); + static const struct of_device_id ti_j721e_ufs_of_match[] = { { .compatible = "ti,j721e-ufs", @@ -87,6 +107,7 @@ static struct platform_driver ti_j721e_ufs_driver = { .driver = { .name = "ti-j721e-ufs", .of_match_table = ti_j721e_ufs_of_match, + .pm = pm_sleep_ptr(&ti_j721e_ufs_pm_ops), }, }; module_platform_driver(ti_j721e_ufs_driver); diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c new file mode 100644 index 000000000000..40543db621a1 --- /dev/null +++ b/drivers/ufs/host/ufs-amd-versal2.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Authors: Sai Krishna Potthuri <sai.krishna.potthuri@amd.com> + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/firmware/xlnx-zynqmp.h> +#include <linux/irqreturn.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <ufs/unipro.h> + +#include "ufshcd-dwc.h" +#include "ufshcd-pltfrm.h" +#include "ufshci-dwc.h" + +/* PHY modes */ +#define UFSHCD_DWC_PHY_MODE_ROM 0 + +#define MPHY_FAST_RX_AFE_CAL BIT(2) +#define MPHY_FW_CALIB_CFG_VAL BIT(8) + +#define MPHY_RX_OVRD_EN BIT(3) +#define MPHY_RX_OVRD_VAL BIT(2) +#define MPHY_RX_ACK_MASK BIT(0) + +#define TIMEOUT_MICROSEC 1000000 + +struct ufs_versal2_host { + struct ufs_hba *hba; + struct reset_control *rstc; + struct reset_control *rstphy; + u32 phy_mode; + unsigned long host_clk; + u8 attcompval0; + u8 attcompval1; + u8 ctlecompval0; + u8 ctlecompval1; +}; + +static int ufs_versal2_phy_reg_write(struct ufs_hba *hba, u32 addr, u32 val) +{ + static struct ufshcd_dme_attr_val phy_write_attrs[] = { + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGWRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGWRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGRDWRSEL), 1, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + phy_write_attrs[0].mib_val = (u8)addr; + phy_write_attrs[1].mib_val = (u8)(addr >> 8); + phy_write_attrs[2].mib_val = (u8)val; + phy_write_attrs[3].mib_val = (u8)(val >> 8); + + return ufshcd_dwc_dme_set_attrs(hba, phy_write_attrs, ARRAY_SIZE(phy_write_attrs)); +} + +static int ufs_versal2_phy_reg_read(struct ufs_hba *hba, u32 addr, u32 *val) +{ + u32 mib_val; + int ret; + static struct ufshcd_dme_attr_val phy_read_attrs[] = { + { UIC_ARG_MIB(CBCREGADDRLSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGADDRMSB), 0, DME_LOCAL }, + { UIC_ARG_MIB(CBCREGRDWRSEL), 0, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + phy_read_attrs[0].mib_val = (u8)addr; + phy_read_attrs[1].mib_val = (u8)(addr >> 8); + + ret = ufshcd_dwc_dme_set_attrs(hba, phy_read_attrs, ARRAY_SIZE(phy_read_attrs)); + if (ret) + return ret; + + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDLSB), &mib_val); + if (ret) + return ret; + + *val = mib_val; + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(CBCREGRDMSB), &mib_val); + if (ret) + return ret; + + *val |= (mib_val << 8); + + return 0; +} + +static int ufs_versal2_enable_phy(struct ufs_hba *hba) +{ + u32 offset, reg; + int ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); + if (ret) + return ret; + + /* Check Tx/Rx FSM states */ + for (offset = 0; offset < 2; offset++) { + u32 time_left, mibsel; + + time_left = TIMEOUT_MICROSEC; + mibsel = UIC_ARG_MIB_SEL(MTX_FSM_STATE, UIC_ARG_MPHY_TX_GEN_SEL_INDEX(offset)); + do { + ret = ufshcd_dme_get(hba, mibsel, ®); + if (ret) + return ret; + + if (reg == TX_STATE_HIBERN8 || reg == TX_STATE_SLEEP || + reg == TX_STATE_LSBURST) + break; + + time_left--; + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Tx FSM state.\n"); + return -ETIMEDOUT; + } + + time_left = TIMEOUT_MICROSEC; + mibsel = UIC_ARG_MIB_SEL(MRX_FSM_STATE, UIC_ARG_MPHY_RX_GEN_SEL_INDEX(offset)); + do { + ret = ufshcd_dme_get(hba, mibsel, ®); + if (ret) + return ret; + + if (reg == RX_STATE_HIBERN8 || reg == RX_STATE_SLEEP || + reg == RX_STATE_LSBURST) + break; + + time_left--; + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Rx FSM state.\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int ufs_versal2_setup_phy(struct ufs_hba *hba) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + int ret; + u32 reg; + + /* Bypass RX-AFE offset calibrations (ATT/CTLE) */ + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(0), ®); + if (ret) + return ret; + + reg |= MPHY_FAST_RX_AFE_CAL; + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(0), reg); + if (ret) + return ret; + + ret = ufs_versal2_phy_reg_read(hba, FAST_FLAGS(1), ®); + if (ret) + return ret; + + reg |= MPHY_FAST_RX_AFE_CAL; + ret = ufs_versal2_phy_reg_write(hba, FAST_FLAGS(1), reg); + if (ret) + return ret; + + /* Program ATT and CTLE compensation values */ + if (host->attcompval0) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(0), host->attcompval0); + if (ret) + return ret; + } + + if (host->attcompval1) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_ATT_IDAC(1), host->attcompval1); + if (ret) + return ret; + } + + if (host->ctlecompval0) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(0), host->ctlecompval0); + if (ret) + return ret; + } + + if (host->ctlecompval1) { + ret = ufs_versal2_phy_reg_write(hba, RX_AFE_CTLE_IDAC(1), host->ctlecompval1); + if (ret) + return ret; + } + + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(0), ®); + if (ret) + return ret; + + reg |= MPHY_FW_CALIB_CFG_VAL; + ret = ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(0), reg); + if (ret) + return ret; + + ret = ufs_versal2_phy_reg_read(hba, FW_CALIB_CCFG(1), ®); + if (ret) + return ret; + + reg |= MPHY_FW_CALIB_CFG_VAL; + return ufs_versal2_phy_reg_write(hba, FW_CALIB_CCFG(1), reg); +} + +static int ufs_versal2_phy_init(struct ufs_hba *hba) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + u32 time_left; + bool is_ready; + int ret; + static const struct ufshcd_dme_attr_val rmmi_attrs[] = { + { UIC_ARG_MIB(CBREFCLKCTRL2), CBREFREFCLK_GATE_OVR_EN, DME_LOCAL }, + { UIC_ARG_MIB(CBCRCTRL), 1, DME_LOCAL }, + { UIC_ARG_MIB(CBC10DIRECTCONF2), 1, DME_LOCAL }, + { UIC_ARG_MIB(VS_MPHYCFGUPDT), 1, DME_LOCAL } + }; + + /* Wait for Tx/Rx config_rdy */ + time_left = TIMEOUT_MICROSEC; + do { + time_left--; + ret = zynqmp_pm_is_mphy_tx_rx_config_ready(&is_ready); + if (ret) + return ret; + + if (!is_ready) + break; + + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Tx/Rx configuration signal busy.\n"); + return -ETIMEDOUT; + } + + ret = ufshcd_dwc_dme_set_attrs(hba, rmmi_attrs, ARRAY_SIZE(rmmi_attrs)); + if (ret) + return ret; + + ret = reset_control_deassert(host->rstphy); + if (ret) { + dev_err(hba->dev, "ufsphy reset deassert failed, err = %d\n", ret); + return ret; + } + + /* Wait for SRAM init done */ + time_left = TIMEOUT_MICROSEC; + do { + time_left--; + ret = zynqmp_pm_is_sram_init_done(&is_ready); + if (ret) + return ret; + + if (is_ready) + break; + + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "SRAM initialization failed.\n"); + return -ETIMEDOUT; + } + + ret = ufs_versal2_setup_phy(hba); + if (ret) + return ret; + + return ufs_versal2_enable_phy(hba); +} + +static int ufs_versal2_init(struct ufs_hba *hba) +{ + struct ufs_versal2_host *host; + struct device *dev = hba->dev; + struct ufs_clk_info *clki; + int ret; + u32 cal; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->hba = hba; + ufshcd_set_variant(hba, host); + + host->phy_mode = UFSHCD_DWC_PHY_MODE_ROM; + + list_for_each_entry(clki, &hba->clk_list_head, list) { + if (!strcmp(clki->name, "core")) + host->host_clk = clk_get_rate(clki->clk); + } + + host->rstc = devm_reset_control_get_exclusive(dev, "host"); + if (IS_ERR(host->rstc)) { + dev_err(dev, "failed to get reset ctrl: host\n"); + return PTR_ERR(host->rstc); + } + + host->rstphy = devm_reset_control_get_exclusive(dev, "phy"); + if (IS_ERR(host->rstphy)) { + dev_err(dev, "failed to get reset ctrl: phy\n"); + return PTR_ERR(host->rstphy); + } + + ret = reset_control_assert(host->rstc); + if (ret) { + dev_err(hba->dev, "host reset assert failed, err = %d\n", ret); + return ret; + } + + ret = reset_control_assert(host->rstphy); + if (ret) { + dev_err(hba->dev, "phy reset assert failed, err = %d\n", ret); + return ret; + } + + ret = zynqmp_pm_set_sram_bypass(); + if (ret) { + dev_err(dev, "Bypass SRAM interface failed, err = %d\n", ret); + return ret; + } + + ret = reset_control_deassert(host->rstc); + if (ret) + dev_err(hba->dev, "host reset deassert failed, err = %d\n", ret); + + ret = zynqmp_pm_get_ufs_calibration_values(&cal); + if (ret) { + dev_err(dev, "failed to read calibration values\n"); + return ret; + } + + host->attcompval0 = (u8)cal; + host->attcompval1 = (u8)(cal >> 8); + host->ctlecompval0 = (u8)(cal >> 16); + host->ctlecompval1 = (u8)(cal >> 24); + + hba->quirks |= UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING; + + return 0; +} + +static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + int ret = 0; + + if (status == PRE_CHANGE) { + ret = ufs_versal2_phy_init(hba); + if (ret) + dev_err(hba->dev, "Phy init failed (%d)\n", ret); + } + + return ret; +} + +static int ufs_versal2_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + int ret = 0; + + switch (status) { + case PRE_CHANGE: + if (host->host_clk) + ufshcd_writel(hba, host->host_clk / 1000000, DWC_UFS_REG_HCLKDIV); + + break; + case POST_CHANGE: + ret = ufshcd_dwc_link_startup_notify(hba, status); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_req) +{ + u32 time_left, reg, lane; + int ret; + + for (lane = 0; lane < activelanes; lane++) { + time_left = TIMEOUT_MICROSEC; + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), ®); + if (ret) + return ret; + + reg |= MPHY_RX_OVRD_EN; + if (rx_req) + reg |= MPHY_RX_OVRD_VAL; + else + reg &= ~MPHY_RX_OVRD_VAL; + + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); + if (ret) + return ret; + + do { + ret = ufs_versal2_phy_reg_read(hba, RX_PCS_OUT(lane), ®); + if (ret) + return ret; + + reg &= MPHY_RX_ACK_MASK; + if (reg == rx_req) + break; + + time_left--; + usleep_range(1, 5); + } while (time_left); + + if (!time_left) { + dev_err(hba->dev, "Invalid Rx Ack value.\n"); + return -ETIMEDOUT; + } + } + + return 0; +} + +static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status, + const struct ufs_pa_layer_attr *dev_max_params, + struct ufs_pa_layer_attr *dev_req_params) +{ + struct ufs_versal2_host *host = ufshcd_get_variant(hba); + u32 lane, reg, rate = 0; + int ret = 0; + + if (status == PRE_CHANGE) { + memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr)); + + /* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */ + if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 && + !host->ctlecompval1) { + dev_req_params->pwr_rx = SLOW_MODE; + dev_req_params->pwr_tx = SLOW_MODE; + return 0; + } + + if (dev_req_params->pwr_rx == SLOW_MODE || dev_req_params->pwr_rx == SLOWAUTO_MODE) + return 0; + + if (dev_req_params->hs_rate == PA_HS_MODE_B) + rate = 1; + + /* Select the rate */ + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), rate); + if (ret) + return ret; + + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 1); + if (ret) + return ret; + + ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 1); + if (ret) + return ret; + + ret = ufs_versal2_phy_ratesel(hba, dev_req_params->lane_tx, 0); + if (ret) + return ret; + + /* Remove rx_req override */ + for (lane = 0; lane < dev_req_params->lane_tx; lane++) { + ret = ufs_versal2_phy_reg_read(hba, RX_OVRD_IN_1(lane), ®); + if (ret) + return ret; + + reg &= ~MPHY_RX_OVRD_EN; + ret = ufs_versal2_phy_reg_write(hba, RX_OVRD_IN_1(lane), reg); + if (ret) + return ret; + } + + if (dev_req_params->lane_tx == UFS_LANE_2 && dev_req_params->lane_rx == UFS_LANE_2) + ret = ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } + + return ret; +} + +static struct ufs_hba_variant_ops ufs_versal2_hba_vops = { + .name = "ufs-versal2-pltfm", + .init = ufs_versal2_init, + .link_startup_notify = ufs_versal2_link_startup_notify, + .hce_enable_notify = ufs_versal2_hce_enable_notify, + .pwr_change_notify = ufs_versal2_pwr_change_notify, +}; + +static const struct of_device_id ufs_versal2_pltfm_match[] = { + { + .compatible = "amd,versal2-ufs", + .data = &ufs_versal2_hba_vops, + }, + { }, +}; +MODULE_DEVICE_TABLE(of, ufs_versal2_pltfm_match); + +static int ufs_versal2_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + /* Perform generic probe */ + ret = ufshcd_pltfrm_init(pdev, &ufs_versal2_hba_vops); + if (ret) + dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", ret); + + return ret; +} + +static void ufs_versal2_remove(struct platform_device *pdev) +{ + struct ufs_hba *hba = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&(pdev)->dev); + ufshcd_remove(hba); +} + +static const struct dev_pm_ops ufs_versal2_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) +}; + +static struct platform_driver ufs_versal2_pltfm = { + .probe = ufs_versal2_probe, + .remove = ufs_versal2_remove, + .driver = { + .name = "ufshcd-versal2", + .pm = &ufs_versal2_pm_ops, + .of_match_table = of_match_ptr(ufs_versal2_pltfm_match), + }, +}; + +module_platform_driver(ufs_versal2_pltfm); + +MODULE_AUTHOR("Sai Krishna Potthuri <sai.krishna.potthuri@amd.com>"); +MODULE_DESCRIPTION("AMD Versal Gen 2 UFS Host Controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 758a393a9de1..ecbbf52bf734 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -41,8 +41,7 @@ static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up); static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = { { .wmanufacturerid = UFS_ANY_VENDOR, .model = UFS_ANY_MODEL, - .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM | - UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, + .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = "H9HQ21AFAMZDAR", .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES }, @@ -280,12 +279,21 @@ static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, REG_UFS_XOUFS_CTRL); + if (host->legacy_ip_ver) + return 0; + /* DDR_EN setting */ if (host->ip_ver >= IP_VER_MT6989) { ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8), 0x453000, REG_UFS_MMIO_OPT_CTRL_0); } + if (host->ip_ver >= IP_VER_MT6991_A0) { + /* Enable multi-rtt */ + ufshcd_rmwl(hba, MRTT_EN, MRTT_EN, REG_UFS_MMIO_OPT_CTRL_0); + /* Enable random performance improvement */ + ufshcd_rmwl(hba, RDN_PFM_IMPV_DIS, 0, REG_UFS_MMIO_OPT_CTRL_0); + } } return 0; @@ -405,7 +413,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); - if (((host->ip_ver >> 16) & 0xFF) >= 0x36) { + if (!host->legacy_ip_ver && host->ip_ver >= IP_VER_MT6983) { ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); @@ -422,6 +430,7 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba, u64 timeout, time_checked; u32 val, sm; bool wait_idle; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); /* cannot use plain ktime_get() in suspend */ timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL; @@ -432,8 +441,13 @@ static int ufs_mtk_wait_idle_state(struct ufs_hba *hba, do { time_checked = ktime_get_mono_fast_ns(); - ufs_mtk_dbg_sel(hba); - val = ufshcd_readl(hba, REG_UFS_PROBE); + if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) { + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + } else { + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); + val = val >> 16; + } sm = val & 0x1f; @@ -465,13 +479,20 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, { ktime_t timeout, time_checked; u32 val; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); timeout = ktime_add_ms(ktime_get(), max_wait_ms); do { time_checked = ktime_get(); - ufs_mtk_dbg_sel(hba); - val = ufshcd_readl(hba, REG_UFS_PROBE); - val = val >> 28; + + if (host->legacy_ip_ver || host->ip_ver < IP_VER_MT6899) { + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + val = val >> 28; + } else { + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); + val = val >> 24; + } if (val == state) return 0; @@ -1109,18 +1130,6 @@ static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) } } -/* Convert microseconds to Auto-Hibernate Idle Timer register value */ -static u32 ufs_mtk_us_to_ahit(unsigned int timer) -{ - unsigned int scale; - - for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) - timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; - - return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | - FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); -} - static void ufs_mtk_fix_ahit(struct ufs_hba *hba) { unsigned int us; @@ -1143,7 +1152,7 @@ static void ufs_mtk_fix_ahit(struct ufs_hba *hba) break; } - hba->ahit = ufs_mtk_us_to_ahit(us); + hba->ahit = ufshcd_us_to_ahit(us); } ufs_mtk_setup_clk_gating(hba); @@ -1332,6 +1341,36 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, return true; } +static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba) +{ + int i; + u32 value; + u32 cnt, att, min; + struct attr_min { + u32 attr; + u32 min_value; + } pa_min_sync_length[] = { + {PA_TXHSG1SYNCLENGTH, 0x48}, + {PA_TXHSG2SYNCLENGTH, 0x48}, + {PA_TXHSG3SYNCLENGTH, 0x48}, + {PA_TXHSG4SYNCLENGTH, 0x48}, + {PA_TXHSG5SYNCLENGTH, 0x48} + }; + + cnt = sizeof(pa_min_sync_length) / sizeof(struct attr_min); + for (i = 0; i < cnt; i++) { + att = pa_min_sync_length[i].attr; + min = pa_min_sync_length[i].min_value; + ufshcd_dme_get(hba, UIC_ARG_MIB(att), &value); + if (value < min) + ufshcd_dme_set(hba, UIC_ARG_MIB(att), min); + + ufshcd_dme_peer_get(hba, UIC_ARG_MIB(att), &value); + if (value < min) + ufshcd_dme_peer_set(hba, UIC_ARG_MIB(att), min); + } +} + static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, const struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) @@ -1355,6 +1394,8 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, } if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { + ufs_mtk_adjust_sync_length(hba); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); @@ -1619,14 +1660,26 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba) static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) { int err; + u32 val; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); err = ufshcd_hba_enable(hba); if (err) return err; err = ufs_mtk_unipro_set_lpm(hba, false); - if (err) + if (err) { + if (host->ip_ver < IP_VER_MT6899) { + ufs_mtk_dbg_sel(hba); + val = ufshcd_readl(hba, REG_UFS_PROBE); + } else { + val = ufshcd_readl(hba, REG_UFS_UFS_MMIO_OTSD_CTRL); + } + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val); + val = ufshcd_readl(hba, REG_INTERRUPT_STATUS); + ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)val); return err; + } err = ufshcd_uic_hibern8_exit(hba); if (err) @@ -1744,6 +1797,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, { int err; struct arm_smccc_res res; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (status == PRE_CHANGE) { if (ufshcd_is_auto_hibern8_supported(hba)) @@ -1773,6 +1827,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, ufs_mtk_sram_pwr_ctrl(false, res); + /* Release pm_qos/clk if in scale-up mode during suspend */ + if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) { + ufshcd_pm_qos_update(hba, false); + _ufs_mtk_clk_scale(hba, false); + } else if ((!ufshcd_is_clkscaling_supported(hba) && + hba->pwr_info.gear_rx >= UFS_HS_G5)) { + _ufs_mtk_clk_scale(hba, false); + } + return 0; fail: /* @@ -1788,6 +1851,7 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { int err; struct arm_smccc_res res; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) ufs_mtk_dev_vreg_set_lpm(hba, false); @@ -1798,6 +1862,15 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (err) goto fail; + /* Request pm_qos/clk if in scale-up mode after resume */ + if (ufshcd_is_clkscaling_supported(hba) && (host->clk_scale_up)) { + ufshcd_pm_qos_update(hba, true); + _ufs_mtk_clk_scale(hba, true); + } else if ((!ufshcd_is_clkscaling_supported(hba) && + hba->pwr_info.gear_rx >= UFS_HS_G5)) { + _ufs_mtk_clk_scale(hba, true); + } + if (ufshcd_is_link_hibern8(hba)) { err = ufs_mtk_link_set_hpm(hba); if (err) @@ -1889,15 +1962,13 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) { ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); - if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && - (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { + if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc) { hba->vreg_info.vcc->always_on = true; /* * VCC will be kept always-on thus we don't - * need any delay during regulator operations + * need any delay before putting device's VCC in LPM mode. */ - hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | - UFS_DEVICE_QUIRK_DELAY_AFTER_LPM); + hba->dev_quirks &= ~UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM; } ufs_mtk_vreg_fix_vcc(hba); @@ -2373,6 +2444,11 @@ static int ufs_mtk_system_suspend(struct device *dev) struct arm_smccc_res res; int ret; + if (hba->shutting_down) { + ret = -EBUSY; + goto out; + } + ret = ufshcd_system_suspend(dev); if (ret) goto out; diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h index dfbf78bd8664..9747277f11e8 100644 --- a/drivers/ufs/host/ufs-mediatek.h +++ b/drivers/ufs/host/ufs-mediatek.h @@ -20,6 +20,9 @@ #define MCQ_MULTI_INTR_EN BIT(2) #define MCQ_CMB_INTR_EN BIT(3) #define MCQ_AH8 BIT(4) +#define MON_EN BIT(5) +#define MRTT_EN BIT(25) +#define RDN_PFM_IMPV_DIS BIT(28) #define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN) @@ -28,6 +31,7 @@ */ #define REG_UFS_XOUFS_CTRL 0x140 #define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_UFS_MMIO_OTSD_CTRL 0x14C #define REG_UFS_MMIO_OPT_CTRL_0 0x160 #define REG_UFS_EXTREG 0x2100 #define REG_UFS_MPHYCTRL 0x2200 diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 3e83dc51d538..8d119b3223cb 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -740,8 +740,21 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, /* reset the connected UFS device during power down */ - if (ufs_qcom_is_link_off(hba) && host->device_reset) + if (ufs_qcom_is_link_off(hba) && host->device_reset) { ufs_qcom_device_reset_ctrl(hba, true); + /* + * After sending the SSU command, asserting the rst_n + * line causes the device firmware to wake up and + * execute its reset routine. + * + * During this process, the device may draw current + * beyond the permissible limit for low-power mode (LPM). + * A 10ms delay, based on experimental observations, + * allows the UFS device to complete its hardware reset + * before transitioning the power rail to LPM. + */ + usleep_range(10000, 11000); + } return ufs_qcom_ice_suspend(host); } @@ -1024,9 +1037,6 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = { { .wmanufacturerid = UFS_VENDOR_SKHYNIX, .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM }, - { .wmanufacturerid = UFS_VENDOR_TOSHIBA, - .model = UFS_ANY_MODEL, - .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM }, { .wmanufacturerid = UFS_VENDOR_WDC, .model = UFS_ANY_MODEL, .quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE }, diff --git a/drivers/ufs/host/ufs-rockchip.c b/drivers/ufs/host/ufs-rockchip.c index 8754085dd0cc..7fff34513a60 100644 --- a/drivers/ufs/host/ufs-rockchip.c +++ b/drivers/ufs/host/ufs-rockchip.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/platform_device.h> @@ -20,9 +21,17 @@ #include "ufshcd-pltfrm.h" #include "ufs-rockchip.h" +static void ufs_rockchip_controller_reset(struct ufs_rockchip_host *host) +{ + reset_control_assert(host->rst); + udelay(1); + reset_control_deassert(host->rst); +} + static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { + struct ufs_rockchip_host *host = ufshcd_get_variant(hba); int err = 0; if (status == POST_CHANGE) { @@ -37,6 +46,9 @@ static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba, return ufshcd_vops_phy_initialization(hba); } + /* PRE_CHANGE */ + ufs_rockchip_controller_reset(host); + return 0; } @@ -156,9 +168,7 @@ static int ufs_rockchip_common_init(struct ufs_hba *hba) return dev_err_probe(dev, PTR_ERR(host->rst), "failed to get reset control\n"); - reset_control_assert(host->rst); - udelay(1); - reset_control_deassert(host->rst); + ufs_rockchip_controller_reset(host); host->ref_out_clk = devm_clk_get_enabled(dev, "ref_out"); if (IS_ERR(host->ref_out_clk)) @@ -282,9 +292,7 @@ static int ufs_rockchip_runtime_resume(struct device *dev) return err; } - reset_control_assert(host->rst); - udelay(1); - reset_control_deassert(host->rst); + ufs_rockchip_controller_reset(host); return ufshcd_runtime_resume(dev); } diff --git a/drivers/ufs/host/ufshcd-dwc.h b/drivers/ufs/host/ufshcd-dwc.h index ad91ea56662c..c618bb914904 100644 --- a/drivers/ufs/host/ufshcd-dwc.h +++ b/drivers/ufs/host/ufshcd-dwc.h @@ -12,6 +12,52 @@ #include <ufs/ufshcd.h> +/* RMMI Attributes */ +#define CBREFCLKCTRL2 0x8132 +#define CBCRCTRL 0x811F +#define CBC10DIRECTCONF2 0x810E +#define CBRATESEL 0x8114 +#define CBCREGADDRLSB 0x8116 +#define CBCREGADDRMSB 0x8117 +#define CBCREGWRLSB 0x8118 +#define CBCREGWRMSB 0x8119 +#define CBCREGRDLSB 0x811A +#define CBCREGRDMSB 0x811B +#define CBCREGRDWRSEL 0x811C + +#define CBREFREFCLK_GATE_OVR_EN BIT(7) + +/* M-PHY Attributes */ +#define MTX_FSM_STATE 0x41 +#define MRX_FSM_STATE 0xC1 + +/* M-PHY registers */ +#define RX_OVRD_IN_1(n) (0x3006 + ((n) * 0x100)) +#define RX_PCS_OUT(n) (0x300F + ((n) * 0x100)) +#define FAST_FLAGS(n) (0x401C + ((n) * 0x100)) +#define RX_AFE_ATT_IDAC(n) (0x4000 + ((n) * 0x100)) +#define RX_AFE_CTLE_IDAC(n) (0x4001 + ((n) * 0x100)) +#define FW_CALIB_CCFG(n) (0x404D + ((n) * 0x100)) + +/* Tx/Rx FSM state */ +enum rx_fsm_state { + RX_STATE_DISABLED = 0, + RX_STATE_HIBERN8 = 1, + RX_STATE_SLEEP = 2, + RX_STATE_STALL = 3, + RX_STATE_LSBURST = 4, + RX_STATE_HSBURST = 5, +}; + +enum tx_fsm_state { + TX_STATE_DISABLED = 0, + TX_STATE_HIBERN8 = 1, + TX_STATE_SLEEP = 2, + TX_STATE_STALL = 3, + TX_STATE_LSBURST = 4, + TX_STATE_HSBURST = 5, +}; + struct ufshcd_dme_attr_val { u32 attr_sel; u32 mib_val; diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index b87e03777395..5f65dfad1a71 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -15,6 +15,7 @@ #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> +#include <linux/suspend.h> #include <linux/debugfs.h> #include <linux/uuid.h> #include <linux/acpi.h> @@ -31,6 +32,7 @@ struct intel_host { u32 dsm_fns; u32 active_ltr; u32 idle_ltr; + int saved_spm_lvl; struct dentry *debugfs_root; struct gpio_desc *reset_gpio; }; @@ -347,6 +349,7 @@ static int ufs_intel_common_init(struct ufs_hba *hba) host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; + host->saved_spm_lvl = -1; ufshcd_set_variant(hba, host); intel_dsm_init(host, hba->dev); if (INTEL_DSM_SUPPORTED(host, RESET)) { @@ -425,7 +428,8 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba) static int ufs_intel_adl_init(struct ufs_hba *hba) { hba->nop_out_timeout = 200; - hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; + hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 | + UFSHCD_QUIRK_PERFORM_LINK_STARTUP_ONCE; hba->caps |= UFSHCD_CAP_WB_EN; return ufs_intel_common_init(hba); } @@ -538,6 +542,66 @@ static int ufshcd_pci_restore(struct device *dev) return ufshcd_system_resume(dev); } + +static int ufs_intel_suspend_prepare(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct intel_host *host = ufshcd_get_variant(hba); + int err; + + /* + * Only s2idle (S0ix) retains link state. Force power-off + * (UFS_PM_LVL_5) for any other case. + */ + if (pm_suspend_target_state != PM_SUSPEND_TO_IDLE && hba->spm_lvl < UFS_PM_LVL_5) { + host->saved_spm_lvl = hba->spm_lvl; + hba->spm_lvl = UFS_PM_LVL_5; + } + + err = ufshcd_suspend_prepare(dev); + + if (err < 0 && host->saved_spm_lvl != -1) { + hba->spm_lvl = host->saved_spm_lvl; + host->saved_spm_lvl = -1; + } + + return err; +} + +static void ufs_intel_resume_complete(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + struct intel_host *host = ufshcd_get_variant(hba); + + ufshcd_resume_complete(dev); + + if (host->saved_spm_lvl != -1) { + hba->spm_lvl = host->saved_spm_lvl; + host->saved_spm_lvl = -1; + } +} + +static int ufshcd_pci_suspend_prepare(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!strcmp(hba->vops->name, "intel-pci")) + return ufs_intel_suspend_prepare(dev); + + return ufshcd_suspend_prepare(dev); +} + +static void ufshcd_pci_resume_complete(struct device *dev) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + if (!strcmp(hba->vops->name, "intel-pci")) { + ufs_intel_resume_complete(dev); + return; + } + + ufshcd_resume_complete(dev); +} #endif /** @@ -611,8 +675,8 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { .thaw = ufshcd_system_resume, .poweroff = ufshcd_system_suspend, .restore = ufshcd_pci_restore, - .prepare = ufshcd_suspend_prepare, - .complete = ufshcd_resume_complete, + .prepare = ufshcd_pci_suspend_prepare, + .complete = ufshcd_pci_resume_complete, #endif }; |
