diff options
Diffstat (limited to 'drivers/pci/controller')
35 files changed, 2112 insertions, 1386 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index cc9fa02d32a0..6671946dbf66 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -9,12 +9,14 @@ config PCI_MVEBU depends on MVEBU_MBUS depends on ARM depends on OF + select PCI_BRIDGE_EMUL config PCI_AARDVARK bool "Aardvark PCIe controller" depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN + select PCI_BRIDGE_EMUL help Add support for Aardvark 64bit PCIe Host Controller. This controller is part of the South Bridge of the Marvel Armada @@ -102,7 +104,7 @@ config PCI_HOST_GENERIC config PCIE_XILINX bool "Xilinx AXI PCIe host bridge support" - depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST + depends on OF || COMPILE_TEST help Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. @@ -231,7 +233,7 @@ config PCIE_ROCKCHIP_EP available to support GEN2 with 4 slots. config PCIE_MEDIATEK - bool "MediaTek PCIe controller" + tristate "MediaTek PCIe controller" depends on ARCH_MEDIATEK || COMPILE_TEST depends on OF depends on PCI_MSI_IRQ_DOMAIN @@ -239,6 +241,16 @@ config PCIE_MEDIATEK Say Y here if you want to enable PCIe controller support on MediaTek SoCs. +config PCIE_MOBIVEIL + bool "Mobiveil AXI PCIe controller" + depends on ARCH_ZYNQMP || COMPILE_TEST + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + help + Say Y here if you want to enable support for the Mobiveil AXI PCIe + Soft IP. It has up to 8 outbound and inbound windows + for address translation and it is a PCIe Gen4 IP. + config PCIE_TANGO_SMP8759 bool "Tango SMP8759 PCIe controller (DANGEROUS)" depends on ARCH_TANGO && PCI_MSI && OF diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 24322b92f200..d56a507495c5 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o +obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o obj-$(CONFIG_VMD) += vmd.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 5d2ce72c7a52..fcf91eacfc63 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o -obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o +obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 345aab56ce8b..a32d6dde7a57 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -370,7 +370,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, } static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num) + enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); @@ -542,7 +542,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { }; /* - * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 + * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 * @dra7xx: the dra7xx device where the workaround should be applied * * Access to the PCIe slave port that are not 32-bit aligned will result @@ -552,7 +552,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { * * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. */ -static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) +static int dra7xx_pcie_unaligned_memaccess(struct device *dev) { int ret; struct device_node *np = dev->of_node; @@ -704,6 +704,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_RC); + + ret = dra7xx_pcie_unaligned_memaccess(dev); + if (ret) + dev_err(dev, "WA for Errata i870 not applied\n"); + ret = dra7xx_add_pcie_port(dra7xx, pdev); if (ret < 0) goto err_gpio; @@ -717,7 +722,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, DEVICE_TYPE_EP); - ret = dra7xx_pcie_ep_unaligned_memaccess(dev); + ret = dra7xx_pcie_unaligned_memaccess(dev); if (ret) goto err_gpio; diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 4cc1e5df8c79..cee5f2f590e2 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -421,7 +421,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, } } - pp->root_bus_nr = -1; pp->ops = &exynos_pcie_host_ops; ret = dw_pcie_host_init(pp); diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 80f604602783..2cbef2d7c207 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -50,6 +50,7 @@ struct imx6_pcie { struct regmap *iomuxc_gpr; struct reset_control *pciephy_reset; struct reset_control *apps_reset; + struct reset_control *turnoff_reset; enum imx6_pcie_variants variant; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; @@ -97,6 +98,16 @@ struct imx6_pcie { #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) /* PHY registers (not memory-mapped) */ +#define PCIE_PHY_ATEOVRD 0x10 +#define PCIE_PHY_ATEOVRD_EN (0x1 << 2) +#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 +#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 + +#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 +#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 +#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f +#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9) + #define PCIE_PHY_RX_ASIC_OUT 0x100D #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) @@ -508,6 +519,50 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); } +static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) +{ + unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); + int mult, div; + u32 val; + + switch (phy_rate) { + case 125000000: + /* + * The default settings of the MPLL are for a 125MHz input + * clock, so no need to reconfigure anything in that case. + */ + return 0; + case 100000000: + mult = 25; + div = 0; + break; + case 200000000: + mult = 25; + div = 1; + break; + default: + dev_err(imx6_pcie->pci->dev, + "Unsupported PHY reference clock rate %lu\n", phy_rate); + return -EINVAL; + } + + pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val); + val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK << + PCIE_PHY_MPLL_MULTIPLIER_SHIFT); + val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT; + val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD; + pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val); + + pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val); + val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK << + PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT); + val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT; + val |= PCIE_PHY_ATEOVRD_EN; + pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val); + + return 0; +} + static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; @@ -542,6 +597,24 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) return -EINVAL; } +static void imx6_pcie_ltssm_enable(struct device *dev) +{ + struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + + switch (imx6_pcie->variant) { + case IMX6Q: + case IMX6SX: + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, + IMX6Q_GPR12_PCIE_CTL_2); + break; + case IMX7D: + reset_control_deassert(imx6_pcie->apps_reset); + break; + } +} + static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) { struct dw_pcie *pci = imx6_pcie->pci; @@ -560,11 +633,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); /* Start LTSSM. */ - if (imx6_pcie->variant == IMX7D) - reset_control_deassert(imx6_pcie->apps_reset); - else - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); + imx6_pcie_ltssm_enable(dev); ret = imx6_pcie_wait_for_link(imx6_pcie); if (ret) @@ -632,6 +701,7 @@ static int imx6_pcie_host_init(struct pcie_port *pp) imx6_pcie_assert_core_reset(imx6_pcie); imx6_pcie_init_phy(imx6_pcie); imx6_pcie_deassert_core_reset(imx6_pcie); + imx6_setup_phy_mpll(imx6_pcie); dw_pcie_setup_rc(pp); imx6_pcie_establish_link(imx6_pcie); @@ -667,7 +737,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, } } - pp->root_bus_nr = -1; pp->ops = &imx6_pcie_host_ops; ret = dw_pcie_host_init(pp); @@ -683,6 +752,94 @@ static const struct dw_pcie_ops dw_pcie_ops = { .link_up = imx6_pcie_link_up, }; +#ifdef CONFIG_PM_SLEEP +static void imx6_pcie_ltssm_disable(struct device *dev) +{ + struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + + switch (imx6_pcie->variant) { + case IMX6SX: + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0); + break; + case IMX7D: + reset_control_assert(imx6_pcie->apps_reset); + break; + default: + dev_err(dev, "ltssm_disable not supported\n"); + } +} + +static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) +{ + reset_control_assert(imx6_pcie->turnoff_reset); + reset_control_deassert(imx6_pcie->turnoff_reset); + + /* + * Components with an upstream port must respond to + * PME_Turn_Off with PME_TO_Ack but we can't check. + * + * The standard recommends a 1-10ms timeout after which to + * proceed anyway as if acks were received. + */ + usleep_range(1000, 10000); +} + +static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) +{ + clk_disable_unprepare(imx6_pcie->pcie); + clk_disable_unprepare(imx6_pcie->pcie_phy); + clk_disable_unprepare(imx6_pcie->pcie_bus); + + if (imx6_pcie->variant == IMX7D) { + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, + IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); + } +} + +static int imx6_pcie_suspend_noirq(struct device *dev) +{ + struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + + if (imx6_pcie->variant != IMX7D) + return 0; + + imx6_pcie_pm_turnoff(imx6_pcie); + imx6_pcie_clk_disable(imx6_pcie); + imx6_pcie_ltssm_disable(dev); + + return 0; +} + +static int imx6_pcie_resume_noirq(struct device *dev) +{ + int ret; + struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); + struct pcie_port *pp = &imx6_pcie->pci->pp; + + if (imx6_pcie->variant != IMX7D) + return 0; + + imx6_pcie_assert_core_reset(imx6_pcie); + imx6_pcie_init_phy(imx6_pcie); + imx6_pcie_deassert_core_reset(imx6_pcie); + dw_pcie_setup_rc(pp); + + ret = imx6_pcie_establish_link(imx6_pcie); + if (ret < 0) + dev_info(dev, "pcie link is down after resume.\n"); + + return 0; +} +#endif + +static const struct dev_pm_ops imx6_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq, + imx6_pcie_resume_noirq) +}; + static int imx6_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -777,6 +934,13 @@ static int imx6_pcie_probe(struct platform_device *pdev) break; } + /* Grab turnoff reset */ + imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff"); + if (IS_ERR(imx6_pcie->turnoff_reset)) { + dev_err(dev, "Failed to get TURNOFF reset control\n"); + return PTR_ERR(imx6_pcie->turnoff_reset); + } + /* Grab GPR config register range */ imx6_pcie->iomuxc_gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); @@ -849,6 +1013,7 @@ static struct platform_driver imx6_pcie_driver = { .name = "imx6q-pcie", .of_match_table = imx6_pcie_of_match, .suppress_bind_attrs = true, + .pm = &imx6_pcie_pm_ops, }, .probe = imx6_pcie_probe, .shutdown = imx6_pcie_shutdown, diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c deleted file mode 100644 index 0682213328e9..000000000000 --- a/drivers/pci/controller/dwc/pci-keystone-dw.c +++ /dev/null @@ -1,484 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DesignWare application register space functions for Keystone PCI controller - * - * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com - * - * Author: Murali Karicheri <m-karicheri2@ti.com> - */ - -#include <linux/irq.h> -#include <linux/irqdomain.h> -#include <linux/irqreturn.h> -#include <linux/module.h> -#include <linux/of.h> -#include <linux/of_pci.h> -#include <linux/pci.h> -#include <linux/platform_device.h> - -#include "pcie-designware.h" -#include "pci-keystone.h" - -/* Application register defines */ -#define LTSSM_EN_VAL 1 -#define LTSSM_STATE_MASK 0x1f -#define LTSSM_STATE_L0 0x11 -#define DBI_CS2_EN_VAL 0x20 -#define OB_XLAT_EN_VAL 2 - -/* Application registers */ -#define CMD_STATUS 0x004 -#define CFG_SETUP 0x008 -#define OB_SIZE 0x030 -#define CFG_PCIM_WIN_SZ_IDX 3 -#define CFG_PCIM_WIN_CNT 32 -#define SPACE0_REMOTE_CFG_OFFSET 0x1000 -#define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) -#define OB_OFFSET_HI(n) (0x204 + (8 * n)) - -/* IRQ register defines */ -#define IRQ_EOI 0x050 -#define IRQ_STATUS 0x184 -#define IRQ_ENABLE_SET 0x188 -#define IRQ_ENABLE_CLR 0x18c - -#define MSI_IRQ 0x054 -#define MSI0_IRQ_STATUS 0x104 -#define MSI0_IRQ_ENABLE_SET 0x108 -#define MSI0_IRQ_ENABLE_CLR 0x10c -#define IRQ_STATUS 0x184 -#define MSI_IRQ_OFFSET 4 - -/* Error IRQ bits */ -#define ERR_AER BIT(5) /* ECRC error */ -#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ -#define ERR_CORR BIT(3) /* Correctable error */ -#define ERR_NONFATAL BIT(2) /* Non-fatal error */ -#define ERR_FATAL BIT(1) /* Fatal error */ -#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ -#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ - ERR_NONFATAL | ERR_FATAL | ERR_SYS) -#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) -#define ERR_IRQ_STATUS_RAW 0x1c0 -#define ERR_IRQ_STATUS 0x1c4 -#define ERR_IRQ_ENABLE_SET 0x1c8 -#define ERR_IRQ_ENABLE_CLR 0x1cc - -/* Config space registers */ -#define DEBUG0 0x728 - -#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) - -static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, - u32 *bit_pos) -{ - *reg_offset = offset % 8; - *bit_pos = offset >> 3; -} - -phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - return ks_pcie->app.start + MSI_IRQ; -} - -static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) -{ - return readl(ks_pcie->va_app_base + offset); -} - -static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) -{ - writel(val, ks_pcie->va_app_base + offset); -} - -void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - u32 pending, vector; - int src, virq; - - pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); - - /* - * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit - * shows 1, 9, 17, 25 and so forth - */ - for (src = 0; src < 4; src++) { - if (BIT(src) & pending) { - vector = offset + (src << 3); - virq = irq_linear_revmap(pp->irq_domain, vector); - dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", - src, vector, virq); - generic_handle_irq(virq); - } - } -} - -void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) -{ - u32 reg_offset, bit_pos; - struct keystone_pcie *ks_pcie; - struct dw_pcie *pci; - - pci = to_dw_pcie_from_pp(pp); - ks_pcie = to_keystone_pcie(pci); - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - - ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), - BIT(bit_pos)); - ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); -} - -void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) -{ - u32 reg_offset, bit_pos; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), - BIT(bit_pos)); -} - -void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) -{ - u32 reg_offset, bit_pos; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); - ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), - BIT(bit_pos)); -} - -int ks_dw_pcie_msi_host_init(struct pcie_port *pp) -{ - return dw_pcie_allocate_domains(pp); -} - -void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) -{ - int i; - - for (i = 0; i < PCI_NUM_INTX; i++) - ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); -} - -void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct device *dev = pci->dev; - u32 pending; - int virq; - - pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); - - if (BIT(0) & pending) { - virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); - dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); - generic_handle_irq(virq); - } - - /* EOI the INTx interrupt */ - ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); -} - -void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) -{ - ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); -} - -irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) -{ - u32 status; - - status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; - if (!status) - return IRQ_NONE; - - if (status & ERR_FATAL_IRQ) - dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n", - status); - - /* Ack the IRQ; status bits are RW1C */ - ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); - return IRQ_HANDLED; -} - -static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) -{ -} - -static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) -{ -} - -static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) -{ -} - -static struct irq_chip ks_dw_pcie_legacy_irq_chip = { - .name = "Keystone-PCI-Legacy-IRQ", - .irq_ack = ks_dw_pcie_ack_legacy_irq, - .irq_mask = ks_dw_pcie_mask_legacy_irq, - .irq_unmask = ks_dw_pcie_unmask_legacy_irq, -}; - -static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, - unsigned int irq, irq_hw_number_t hw_irq) -{ - irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, d->host_data); - - return 0; -} - -static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { - .map = ks_dw_pcie_init_legacy_irq_map, - .xlate = irq_domain_xlate_onetwocell, -}; - -/** - * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask - * registers - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) -{ - u32 val; - - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); - - do { - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - } while (!(val & DBI_CS2_EN_VAL)); -} - -/** - * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode - * - * Since modification of dbi_cs2 involves different clock domain, read the - * status back to ensure the transition is complete. - */ -static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) -{ - u32 val; - - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); - - do { - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - } while (val & DBI_CS2_EN_VAL); -} - -void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - u32 start = pp->mem->start, end = pp->mem->end; - int i, tr_size; - u32 val; - - /* Disable BARs for inbound access */ - ks_dw_pcie_set_dbi_mode(ks_pcie); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); - ks_dw_pcie_clear_dbi_mode(ks_pcie); - - /* Set outbound translation size per window division */ - ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); - - tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; - - /* Using Direct 1:1 mapping of RC <-> PCI memory space */ - for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { - ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); - ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); - start += tr_size; - } - - /* Enable OB translation */ - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); -} - -/** - * ks_pcie_cfg_setup() - Set up configuration space address for a device - * - * @ks_pcie: ptr to keystone_pcie structure - * @bus: Bus number the device is residing on - * @devfn: device, function number info - * - * Forms and returns the address of configuration space mapped in PCIESS - * address space 0. Also configures CFG_SETUP for remote configuration space - * access. - * - * The address space has two regions to access configuration - local and remote. - * We access local region for bus 0 (as RC is attached on bus 0) and remote - * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, - * we will do TYPE 0 access as it will be on our secondary bus (logical). - * CFG_SETUP is needed only for remote configuration access. - */ -static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, - unsigned int devfn) -{ - u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - u32 regval; - - if (bus == 0) - return pci->dbi_base; - - regval = (bus << 16) | (device << 8) | function; - - /* - * Since Bus#1 will be a virtual bus, we need to have TYPE0 - * access only. - * TYPE 1 - */ - if (bus != 1) - regval |= BIT(24); - - ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); - return pp->va_cfg0_base; -} - -int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - u8 bus_num = bus->number; - void __iomem *addr; - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - - return dw_pcie_read(addr + where, size, val); -} - -int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 val) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - u8 bus_num = bus->number; - void __iomem *addr; - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - - return dw_pcie_write(addr + where, size, val); -} - -/** - * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization - * - * This sets BAR0 to enable inbound access for MSI_IRQ register - */ -void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - - /* Configure and set up BAR0 */ - ks_dw_pcie_set_dbi_mode(ks_pcie); - - /* Enable BAR0 */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); - - ks_dw_pcie_clear_dbi_mode(ks_pcie); - - /* - * For BAR0, just setting bus address for inbound writes (MSI) should - * be sufficient. Use physical address to avoid any conflicts. - */ - dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); -} - -/** - * ks_dw_pcie_link_up() - Check if link up - */ -int ks_dw_pcie_link_up(struct dw_pcie *pci) -{ - u32 val; - - val = dw_pcie_readl_dbi(pci, DEBUG0); - return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; -} - -void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) -{ - u32 val; - - /* Disable Link training */ - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - val &= ~LTSSM_EN_VAL; - ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); - - /* Initiate Link Training */ - val = ks_dw_app_readl(ks_pcie, CMD_STATUS); - ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); -} - -/** - * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware - * - * Ioremap the register resources, initialize legacy irq domain - * and call dw_pcie_v3_65_host_init() API to initialize the Keystone - * PCI host controller. - */ -int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, - struct device_node *msi_intc_np) -{ - struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = pci->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; - - /* Index 0 is the config reg. space address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - - /* - * We set these same and is used in pcie rd/wr_other_conf - * functions - */ - pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; - pp->va_cfg1_base = pp->va_cfg0_base; - - /* Index 1 is the application reg. space address */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ks_pcie->va_app_base = devm_ioremap_resource(dev, res); - if (IS_ERR(ks_pcie->va_app_base)) - return PTR_ERR(ks_pcie->va_app_base); - - ks_pcie->app = *res; - - /* Create legacy IRQ domain */ - ks_pcie->legacy_irq_domain = - irq_domain_add_linear(ks_pcie->legacy_intc_np, - PCI_NUM_INTX, - &ks_dw_pcie_legacy_irq_domain_ops, - NULL); - if (!ks_pcie->legacy_irq_domain) { - dev_err(dev, "Failed to add irq domain for legacy irqs\n"); - return -EINVAL; - } - - return dw_pcie_host_init(pp); -} diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 3722a5f31e5e..14f2b0b4ed5e 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -9,40 +9,510 @@ * Implementation based on pci-exynos.c and pcie-designware.c */ -#include <linux/irqchip/chained_irq.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/init.h> #include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> -#include <linux/init.h> +#include <linux/mfd/syscon.h> #include <linux/msi.h> -#include <linux/of_irq.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/of_pci.h> -#include <linux/platform_device.h> #include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/resource.h> #include <linux/signal.h> #include "pcie-designware.h" -#include "pci-keystone.h" -#define DRIVER_NAME "keystone-pcie" +#define PCIE_VENDORID_MASK 0xffff +#define PCIE_DEVICEID_SHIFT 16 + +/* Application registers */ +#define CMD_STATUS 0x004 +#define LTSSM_EN_VAL BIT(0) +#define OB_XLAT_EN_VAL BIT(1) +#define DBI_CS2 BIT(5) + +#define CFG_SETUP 0x008 +#define CFG_BUS(x) (((x) & 0xff) << 16) +#define CFG_DEVICE(x) (((x) & 0x1f) << 8) +#define CFG_FUNC(x) ((x) & 0x7) +#define CFG_TYPE1 BIT(24) + +#define OB_SIZE 0x030 +#define SPACE0_REMOTE_CFG_OFFSET 0x1000 +#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) +#define OB_OFFSET_HI(n) (0x204 + (8 * (n))) +#define OB_ENABLEN BIT(0) +#define OB_WIN_SIZE 8 /* 8MB */ + +/* IRQ register defines */ +#define IRQ_EOI 0x050 +#define IRQ_STATUS 0x184 +#define IRQ_ENABLE_SET 0x188 +#define IRQ_ENABLE_CLR 0x18c + +#define MSI_IRQ 0x054 +#define MSI0_IRQ_STATUS 0x104 +#define MSI0_IRQ_ENABLE_SET 0x108 +#define MSI0_IRQ_ENABLE_CLR 0x10c +#define IRQ_STATUS 0x184 +#define MSI_IRQ_OFFSET 4 + +#define ERR_IRQ_STATUS 0x1c4 +#define ERR_IRQ_ENABLE_SET 0x1c8 +#define ERR_AER BIT(5) /* ECRC error */ +#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ +#define ERR_CORR BIT(3) /* Correctable error */ +#define ERR_NONFATAL BIT(2) /* Non-fatal error */ +#define ERR_FATAL BIT(1) /* Fatal error */ +#define ERR_SYS BIT(0) /* System error */ +#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ + ERR_NONFATAL | ERR_FATAL | ERR_SYS) + +#define MAX_MSI_HOST_IRQS 8 +/* PCIE controller device IDs */ +#define PCIE_RC_K2HK 0xb008 +#define PCIE_RC_K2E 0xb009 +#define PCIE_RC_K2L 0xb00a +#define PCIE_RC_K2G 0xb00b + +#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + +struct keystone_pcie { + struct dw_pcie *pci; + /* PCI Device ID */ + u32 device_id; + int num_legacy_host_irqs; + int legacy_host_irqs[PCI_NUM_INTX]; + struct device_node *legacy_intc_np; + + int num_msi_host_irqs; + int msi_host_irqs[MAX_MSI_HOST_IRQS]; + int num_lanes; + u32 num_viewport; + struct phy **phy; + struct device_link **link; + struct device_node *msi_intc_np; + struct irq_domain *legacy_irq_domain; + struct device_node *np; + + int error_irq; + + /* Application register space */ + void __iomem *va_app_base; /* DT 1st resource */ + struct resource app; +}; -/* DEV_STAT_CTRL */ -#define PCIE_CAP_BASE 0x70 +static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, + u32 *bit_pos) +{ + *reg_offset = offset % 8; + *bit_pos = offset >> 3; +} -/* PCIE controller device IDs */ -#define PCIE_RC_K2HK 0xb008 -#define PCIE_RC_K2E 0xb009 -#define PCIE_RC_K2L 0xb00a +static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + return ks_pcie->app.start + MSI_IRQ; +} + +static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) +{ + return readl(ks_pcie->va_app_base + offset); +} + +static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, + u32 val) +{ + writel(val, ks_pcie->va_app_base + offset); +} + +static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + u32 pending, vector; + int src, virq; + + pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); + + /* + * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit + * shows 1, 9, 17, 25 and so forth + */ + for (src = 0; src < 4; src++) { + if (BIT(src) & pending) { + vector = offset + (src << 3); + virq = irq_linear_revmap(pp->irq_domain, vector); + dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", + src, vector, virq); + generic_handle_irq(virq); + } + } +} + +static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp) +{ + u32 reg_offset, bit_pos; + struct keystone_pcie *ks_pcie; + struct dw_pcie *pci; + + pci = to_dw_pcie_from_pp(pp); + ks_pcie = to_keystone_pcie(pci); + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + + ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), + BIT(bit_pos)); + ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); +} + +static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), + BIT(bit_pos)); +} + +static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq) +{ + u32 reg_offset, bit_pos; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); + ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), + BIT(bit_pos)); +} + +static int ks_pcie_msi_host_init(struct pcie_port *pp) +{ + return dw_pcie_allocate_domains(pp); +} + +static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) +{ + int i; + + for (i = 0; i < PCI_NUM_INTX; i++) + ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); +} + +static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, + int offset) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + u32 pending; + int virq; + + pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); + + if (BIT(0) & pending) { + virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); + dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); + generic_handle_irq(virq); + } + + /* EOI the INTx interrupt */ + ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); +} + +static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) +{ + ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); +} + +static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) +{ + u32 reg; + struct device *dev = ks_pcie->pci->dev; + + reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS); + if (!reg) + return IRQ_NONE; + + if (reg & ERR_SYS) + dev_err(dev, "System Error\n"); + + if (reg & ERR_FATAL) + dev_err(dev, "Fatal Error\n"); + + if (reg & ERR_NONFATAL) + dev_dbg(dev, "Non Fatal Error\n"); + + if (reg & ERR_CORR) + dev_dbg(dev, "Correctable Error\n"); + + if (reg & ERR_AXI) + dev_err(dev, "AXI tag lookup fatal Error\n"); + + if (reg & ERR_AER) + dev_err(dev, "ECRC Error\n"); + + ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); + + return IRQ_HANDLED; +} + +static void ks_pcie_ack_legacy_irq(struct irq_data *d) +{ +} + +static void ks_pcie_mask_legacy_irq(struct irq_data *d) +{ +} + +static void ks_pcie_unmask_legacy_irq(struct irq_data *d) +{ +} + +static struct irq_chip ks_pcie_legacy_irq_chip = { + .name = "Keystone-PCI-Legacy-IRQ", + .irq_ack = ks_pcie_ack_legacy_irq, + .irq_mask = ks_pcie_mask_legacy_irq, + .irq_unmask = ks_pcie_unmask_legacy_irq, +}; + +static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, + unsigned int irq, + irq_hw_number_t hw_irq) +{ + irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, d->host_data); + + return 0; +} + +static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { + .map = ks_pcie_init_legacy_irq_map, + .xlate = irq_domain_xlate_onetwocell, +}; + +/** + * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask + * registers + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + val |= DBI_CS2; + ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + + do { + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + } while (!(val & DBI_CS2)); +} + +/** + * ks_pcie_clear_dbi_mode() - Disable DBI mode + * + * Since modification of dbi_cs2 involves different clock domain, read the + * status back to ensure the transition is complete. + */ +static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) +{ + u32 val; + + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + val &= ~DBI_CS2; + ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); + + do { + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + } while (val & DBI_CS2); +} + +static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) +{ + u32 val; + u32 num_viewport = ks_pcie->num_viewport; + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + u64 start = pp->mem->start; + u64 end = pp->mem->end; + int i; + + /* Disable BARs for inbound access */ + ks_pcie_set_dbi_mode(ks_pcie); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); + ks_pcie_clear_dbi_mode(ks_pcie); + + val = ilog2(OB_WIN_SIZE); + ks_pcie_app_writel(ks_pcie, OB_SIZE, val); + + /* Using Direct 1:1 mapping of RC <-> PCI memory space */ + for (i = 0; i < num_viewport && (start < end); i++) { + ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i), + lower_32_bits(start) | OB_ENABLEN); + ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), + upper_32_bits(start)); + start += OB_WIN_SIZE; + } + + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + val |= OB_XLAT_EN_VAL; + ks_pcie_app_writel(ks_pcie, CMD_STATUS, val); +} + +static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, + u32 *val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u32 reg; + + reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | + CFG_FUNC(PCI_FUNC(devfn)); + if (bus->parent->number != pp->root_bus_nr) + reg |= CFG_TYPE1; + ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); + + return dw_pcie_read(pp->va_cfg0_base + where, size, val); +} + +static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, + unsigned int devfn, int where, int size, + u32 val) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + u32 reg; + + reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | + CFG_FUNC(PCI_FUNC(devfn)); + if (bus->parent->number != pp->root_bus_nr) + reg |= CFG_TYPE1; + ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); + + return dw_pcie_write(pp->va_cfg0_base + where, size, val); +} + +/** + * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization + * + * This sets BAR0 to enable inbound access for MSI_IRQ register + */ +static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp) +{ + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); + + /* Configure and set up BAR0 */ + ks_pcie_set_dbi_mode(ks_pcie); + + /* Enable BAR0 */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); + + ks_pcie_clear_dbi_mode(ks_pcie); + + /* + * For BAR0, just setting bus address for inbound writes (MSI) should + * be sufficient. Use physical address to avoid any conflicts. + */ + dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); +} + +/** + * ks_pcie_link_up() - Check if link up + */ +static int ks_pcie_link_up(struct dw_pcie *pci) +{ + u32 val; + + val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0); + val &= PORT_LOGIC_LTSSM_STATE_MASK; + return (val == PORT_LOGIC_LTSSM_STATE_L0); +} + +static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) +{ + u32 val; -#define to_keystone_pcie(x) dev_get_drvdata((x)->dev) + /* Disable Link training */ + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + val &= ~LTSSM_EN_VAL; + ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); -static void quirk_limit_mrrs(struct pci_dev *dev) + /* Initiate Link Training */ + val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); + ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); +} + +/** + * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware + * + * Ioremap the register resources, initialize legacy irq domain + * and call dw_pcie_v3_65_host_init() API to initialize the Keystone + * PCI host controller. + */ +static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) +{ + struct dw_pcie *pci = ks_pcie->pci; + struct pcie_port *pp = &pci->pp; + struct device *dev = pci->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + /* Index 0 is the config reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + + /* + * We set these same and is used in pcie rd/wr_other_conf + * functions + */ + pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; + pp->va_cfg1_base = pp->va_cfg0_base; + + /* Index 1 is the application reg. space address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ks_pcie->va_app_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ks_pcie->va_app_base)) + return PTR_ERR(ks_pcie->va_app_base); + + ks_pcie->app = *res; + + /* Create legacy IRQ domain */ + ks_pcie->legacy_irq_domain = + irq_domain_add_linear(ks_pcie->legacy_intc_np, + PCI_NUM_INTX, + &ks_pcie_legacy_irq_domain_ops, + NULL); + if (!ks_pcie->legacy_irq_domain) { + dev_err(dev, "Failed to add irq domain for legacy irqs\n"); + return -EINVAL; + } + + return dw_pcie_host_init(pp); +} + +static void ks_pcie_quirk(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; - struct pci_dev *bridge = bus->self; + struct pci_dev *bridge; static const struct pci_device_id rc_pci_devids[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, @@ -50,11 +520,13 @@ static void quirk_limit_mrrs(struct pci_dev *dev) .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G), + .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, { 0, }, }; if (pci_is_root_bus(bus)) - return; + bridge = dev; /* look for the host bridge */ while (!pci_is_root_bus(bus)) { @@ -62,43 +534,39 @@ static void quirk_limit_mrrs(struct pci_dev *dev) bus = bus->parent; } - if (bridge) { - /* - * Keystone PCI controller has a h/w limitation of - * 256 bytes maximum read request size. It can't handle - * anything higher than this. So force this limit on - * all downstream devices. - */ - if (pci_match_id(rc_pci_devids, bridge)) { - if (pcie_get_readrq(dev) > 256) { - dev_info(&dev->dev, "limiting MRRS to 256\n"); - pcie_set_readrq(dev, 256); - } + if (!bridge) + return; + + /* + * Keystone PCI controller has a h/w limitation of + * 256 bytes maximum read request size. It can't handle + * anything higher than this. So force this limit on + * all downstream devices. + */ + if (pci_match_id(rc_pci_devids, bridge)) { + if (pcie_get_readrq(dev) > 256) { + dev_info(&dev->dev, "limiting MRRS to 256\n"); + pcie_set_readrq(dev, 256); } } } -DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) { struct dw_pcie *pci = ks_pcie->pci; - struct pcie_port *pp = &pci->pp; struct device *dev = pci->dev; - unsigned int retries; - - dw_pcie_setup_rc(pp); if (dw_pcie_link_up(pci)) { dev_info(dev, "Link already up\n"); return 0; } + ks_pcie_initiate_link_train(ks_pcie); + /* check if the link is up or not */ - for (retries = 0; retries < 5; retries++) { - ks_dw_pcie_initiate_link_train(ks_pcie); - if (!dw_pcie_wait_for_link(pci)) - return 0; - } + if (!dw_pcie_wait_for_link(pci)) + return 0; dev_err(dev, "phy link never came up\n"); return -ETIMEDOUT; @@ -121,7 +589,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) * ack operation. */ chained_irq_enter(chip, desc); - ks_dw_pcie_handle_msi_irq(ks_pcie, offset); + ks_pcie_handle_msi_irq(ks_pcie, offset); chained_irq_exit(chip, desc); } @@ -150,7 +618,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) * ack operation. */ chained_irq_enter(chip, desc); - ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); + ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); chained_irq_exit(chip, desc); } @@ -222,7 +690,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) ks_pcie_legacy_irq_handler, ks_pcie); } - ks_dw_pcie_enable_legacy_irqs(ks_pcie); + ks_pcie_enable_legacy_irqs(ks_pcie); /* MSI IRQ */ if (IS_ENABLED(CONFIG_PCI_MSI)) { @@ -234,7 +702,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) } if (ks_pcie->error_irq > 0) - ks_dw_pcie_enable_error_irq(ks_pcie); + ks_pcie_enable_error_irq(ks_pcie); } /* @@ -242,8 +710,8 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) * bus error instead of returning 0xffffffff. This handler always returns 0 * for this kind of faults. */ -static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, - struct pt_regs *regs) +static int ks_pcie_fault(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) { unsigned long instr = *(unsigned long *) instruction_pointer(regs); @@ -257,59 +725,78 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, return 0; } +static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) +{ + int ret; + unsigned int id; + struct regmap *devctrl_regs; + struct dw_pcie *pci = ks_pcie->pci; + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + + devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id"); + if (IS_ERR(devctrl_regs)) + return PTR_ERR(devctrl_regs); + + ret = regmap_read(devctrl_regs, 0, &id); + if (ret) + return ret; + + dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); + dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); + + return 0; +} + static int __init ks_pcie_host_init(struct pcie_port *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); - u32 val; + int ret; + + dw_pcie_setup_rc(pp); ks_pcie_establish_link(ks_pcie); - ks_dw_pcie_setup_rc_app_regs(ks_pcie); + ks_pcie_setup_rc_app_regs(ks_pcie); ks_pcie_setup_interrupts(ks_pcie); writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), pci->dbi_base + PCI_IO_BASE); - /* update the Vendor ID */ - writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID); - - /* update the DEV_STAT_CTRL to publish right mrrs */ - val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); - val &= ~PCI_EXP_DEVCTL_READRQ; - /* set the mrrs to 256 bytes */ - val |= BIT(12); - writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); + ret = ks_pcie_init_id(ks_pcie); + if (ret < 0) + return ret; /* * PCIe access errors that result into OCP errors are caught by ARM as * "External aborts" */ - hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, + hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, "Asynchronous external abort"); return 0; } -static const struct dw_pcie_host_ops keystone_pcie_host_ops = { - .rd_other_conf = ks_dw_pcie_rd_other_conf, - .wr_other_conf = ks_dw_pcie_wr_other_conf, +static const struct dw_pcie_host_ops ks_pcie_host_ops = { + .rd_other_conf = ks_pcie_rd_other_conf, + .wr_other_conf = ks_pcie_wr_other_conf, .host_init = ks_pcie_host_init, - .msi_set_irq = ks_dw_pcie_msi_set_irq, - .msi_clear_irq = ks_dw_pcie_msi_clear_irq, - .get_msi_addr = ks_dw_pcie_get_msi_addr, - .msi_host_init = ks_dw_pcie_msi_host_init, - .msi_irq_ack = ks_dw_pcie_msi_irq_ack, - .scan_bus = ks_dw_pcie_v3_65_scan_bus, + .msi_set_irq = ks_pcie_msi_set_irq, + .msi_clear_irq = ks_pcie_msi_clear_irq, + .get_msi_addr = ks_pcie_get_msi_addr, + .msi_host_init = ks_pcie_msi_host_init, + .msi_irq_ack = ks_pcie_msi_irq_ack, + .scan_bus = ks_pcie_v3_65_scan_bus, }; -static irqreturn_t pcie_err_irq_handler(int irq, void *priv) +static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) { struct keystone_pcie *ks_pcie = priv; - return ks_dw_pcie_handle_error_irq(ks_pcie); + return ks_pcie_handle_error_irq(ks_pcie); } -static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, - struct platform_device *pdev) +static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, + struct platform_device *pdev) { struct dw_pcie *pci = ks_pcie->pci; struct pcie_port *pp = &pci->pp; @@ -338,7 +825,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, if (ks_pcie->error_irq <= 0) dev_info(dev, "no error IRQ defined\n"); else { - ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, + ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, IRQF_SHARED, "pcie-error-irq", ks_pcie); if (ret < 0) { dev_err(dev, "failed to request error IRQ %d\n", @@ -347,9 +834,8 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, } } - pp->root_bus_nr = -1; - pp->ops = &keystone_pcie_host_ops; - ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); + pp->ops = &ks_pcie_host_ops; + ret = ks_pcie_dw_host_init(ks_pcie); if (ret) { dev_err(dev, "failed to initialize host\n"); return ret; @@ -366,28 +852,62 @@ static const struct of_device_id ks_pcie_of_match[] = { { }, }; -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = ks_dw_pcie_link_up, +static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { + .link_up = ks_pcie_link_up, }; -static int __exit ks_pcie_remove(struct platform_device *pdev) +static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) { - struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); + int num_lanes = ks_pcie->num_lanes; - clk_disable_unprepare(ks_pcie->clk); + while (num_lanes--) { + phy_power_off(ks_pcie->phy[num_lanes]); + phy_exit(ks_pcie->phy[num_lanes]); + } +} + +static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) +{ + int i; + int ret; + int num_lanes = ks_pcie->num_lanes; + + for (i = 0; i < num_lanes; i++) { + ret = phy_init(ks_pcie->phy[i]); + if (ret < 0) + goto err_phy; + + ret = phy_power_on(ks_pcie->phy[i]); + if (ret < 0) { + phy_exit(ks_pcie->phy[i]); + goto err_phy; + } + } return 0; + +err_phy: + while (--i >= 0) { + phy_power_off(ks_pcie->phy[i]); + phy_exit(ks_pcie->phy[i]); + } + + return ret; } static int __init ks_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; struct dw_pcie *pci; struct keystone_pcie *ks_pcie; - struct resource *res; - void __iomem *reg_p; - struct phy *phy; + struct device_link **link; + u32 num_viewport; + struct phy **phy; + u32 num_lanes; + char name[10]; int ret; + int i; ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); if (!ks_pcie) @@ -398,54 +918,99 @@ static int __init ks_pcie_probe(struct platform_device *pdev) return -ENOMEM; pci->dev = dev; - pci->ops = &dw_pcie_ops; + pci->ops = &ks_pcie_dw_pcie_ops; - ks_pcie->pci = pci; + ret = of_property_read_u32(np, "num-viewport", &num_viewport); + if (ret < 0) { + dev_err(dev, "unable to read *num-viewport* property\n"); + return ret; + } - /* initialize SerDes Phy if present */ - phy = devm_phy_get(dev, "pcie-phy"); - if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) - return PTR_ERR(phy); + ret = of_property_read_u32(np, "num-lanes", &num_lanes); + if (ret) + num_lanes = 1; - if (!IS_ERR_OR_NULL(phy)) { - ret = phy_init(phy); - if (ret < 0) - return ret; + phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL); + if (!phy) + return -ENOMEM; + + link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL); + if (!link) + return -ENOMEM; + + for (i = 0; i < num_lanes; i++) { + snprintf(name, sizeof(name), "pcie-phy%d", i); + phy[i] = devm_phy_optional_get(dev, name); + if (IS_ERR(phy[i])) { + ret = PTR_ERR(phy[i]); + goto err_link; + } + + if (!phy[i]) + continue; + + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + ret = -EINVAL; + goto err_link; + } } - /* index 2 is to read PCI DEVICE_ID */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - reg_p = devm_ioremap_resource(dev, res); - if (IS_ERR(reg_p)) - return PTR_ERR(reg_p); - ks_pcie->device_id = readl(reg_p) >> 16; - devm_iounmap(dev, reg_p); - devm_release_mem_region(dev, res->start, resource_size(res)); + ks_pcie->np = np; + ks_pcie->pci = pci; + ks_pcie->link = link; + ks_pcie->num_lanes = num_lanes; + ks_pcie->num_viewport = num_viewport; + ks_pcie->phy = phy; - ks_pcie->np = dev->of_node; - platform_set_drvdata(pdev, ks_pcie); - ks_pcie->clk = devm_clk_get(dev, "pcie"); - if (IS_ERR(ks_pcie->clk)) { - dev_err(dev, "Failed to get pcie rc clock\n"); - return PTR_ERR(ks_pcie->clk); + ret = ks_pcie_enable_phy(ks_pcie); + if (ret) { + dev_err(dev, "failed to enable phy\n"); + goto err_link; } - ret = clk_prepare_enable(ks_pcie->clk); - if (ret) - return ret; platform_set_drvdata(pdev, ks_pcie); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } - ret = ks_add_pcie_port(ks_pcie, pdev); + ret = ks_pcie_add_pcie_port(ks_pcie, pdev); if (ret < 0) - goto fail_clk; + goto err_get_sync; return 0; -fail_clk: - clk_disable_unprepare(ks_pcie->clk); + +err_get_sync: + pm_runtime_put(dev); + pm_runtime_disable(dev); + ks_pcie_disable_phy(ks_pcie); + +err_link: + while (--i >= 0 && link[i]) + device_link_del(link[i]); return ret; } +static int __exit ks_pcie_remove(struct platform_device *pdev) +{ + struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); + struct device_link **link = ks_pcie->link; + int num_lanes = ks_pcie->num_lanes; + struct device *dev = &pdev->dev; + + pm_runtime_put(dev); + pm_runtime_disable(dev); + ks_pcie_disable_phy(ks_pcie); + while (num_lanes--) + device_link_del(link[num_lanes]); + + return 0; +} + static struct platform_driver ks_pcie_driver __refdata = { .probe = ks_pcie_probe, .remove = __exit_p(ks_pcie_remove), diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h deleted file mode 100644 index 8a13da391543..000000000000 --- a/drivers/pci/controller/dwc/pci-keystone.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Keystone PCI Controller's common includes - * - * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com - * - * Author: Murali Karicheri <m-karicheri2@ti.com> - */ - -#define MAX_MSI_HOST_IRQS 8 - -struct keystone_pcie { - struct dw_pcie *pci; - struct clk *clk; - /* PCI Device ID */ - u32 device_id; - int num_legacy_host_irqs; - int legacy_host_irqs[PCI_NUM_INTX]; - struct device_node *legacy_intc_np; - - int num_msi_host_irqs; - int msi_host_irqs[MAX_MSI_HOST_IRQS]; - struct device_node *msi_intc_np; - struct irq_domain *legacy_irq_domain; - struct device_node *np; - - int error_irq; - - /* Application register space */ - void __iomem *va_app_base; /* DT 1st resource */ - struct resource app; -}; - -/* Keystone DW specific MSI controller APIs/definitions */ -void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); -phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); - -/* Keystone specific PCI controller APIs */ -void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); -void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); -void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); -irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); -int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, - struct device_node *msi_intc_np); -int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 val); -int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - unsigned int devfn, int where, int size, u32 *val); -void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); -void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); -void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp); -void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); -void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); -void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); -int ks_dw_pcie_msi_host_init(struct pcie_port *pp); -int ks_dw_pcie_link_up(struct dw_pcie *pci); diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 072fd7ecc29f..0c389a30ef5d 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -172,7 +172,6 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, struct device *dev = &pdev->dev; int ret; - pp->root_bus_nr = -1; pp->ops = &armada8k_pcie_host_ops; pp->irq = platform_get_irq(pdev, 0); diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 321b56cfd5d0..dba83abfe764 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -399,7 +399,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, } } - pp->root_bus_nr = -1; pp->ops = &artpec6_pcie_host_ops; ret = dw_pcie_host_init(pp); @@ -428,7 +427,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) } static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num) + enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 8650416f6f9e..1e7b02221eac 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -40,6 +40,39 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) __dw_pcie_ep_reset_bar(pci, bar, 0); } +static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, + u8 cap) +{ + u8 cap_id, next_cap_ptr; + u16 reg; + + reg = dw_pcie_readw_dbi(pci, cap_ptr); + next_cap_ptr = (reg & 0xff00) >> 8; + cap_id = (reg & 0x00ff); + + if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) + return 0; + + if (cap_id == cap) + return cap_ptr; + + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); +} + +static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) +{ + u8 next_cap_ptr; + u16 reg; + + reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); + next_cap_ptr = (reg & 0x00ff); + + if (!next_cap_ptr) + return 0; + + return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); +} + static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, struct pci_epf_header *hdr) { @@ -213,36 +246,84 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) { - int val; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msi_cap) + return -EINVAL; + + reg = ep->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + if (!(val & PCI_MSI_FLAGS_ENABLE)) + return -EINVAL; + + val = (val & PCI_MSI_FLAGS_QSIZE) >> 4; + + return val; +} + +static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msi_cap) + return -EINVAL; + + reg = ep->msi_cap + PCI_MSI_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + val &= ~PCI_MSI_FLAGS_QMASK; + val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; + dw_pcie_dbi_ro_wr_en(pci); + dw_pcie_writew_dbi(pci, reg, val); + dw_pcie_dbi_ro_wr_dis(pci); + + return 0; +} + +static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ + struct dw_pcie_ep *ep = epc_get_drvdata(epc); + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); - if (!(val & MSI_CAP_MSI_EN_MASK)) + if (!ep->msix_cap) return -EINVAL; - val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; + reg = ep->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + return val; } -static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int) +static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts) { - int val; struct dw_pcie_ep *ep = epc_get_drvdata(epc); struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + u32 val, reg; + + if (!ep->msix_cap) + return -EINVAL; - val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); - val &= ~MSI_CAP_MMC_MASK; - val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK; + reg = ep->msix_cap + PCI_MSIX_FLAGS; + val = dw_pcie_readw_dbi(pci, reg); + val &= ~PCI_MSIX_FLAGS_QSIZE; + val |= interrupts; dw_pcie_dbi_ro_wr_en(pci); - dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); + dw_pcie_writew_dbi(pci, reg, val); dw_pcie_dbi_ro_wr_dis(pci); return 0; } static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num) + enum pci_epc_irq_type type, u16 interrupt_num) { struct dw_pcie_ep *ep = epc_get_drvdata(epc); @@ -282,32 +363,52 @@ static const struct pci_epc_ops epc_ops = { .unmap_addr = dw_pcie_ep_unmap_addr, .set_msi = dw_pcie_ep_set_msi, .get_msi = dw_pcie_ep_get_msi, + .set_msix = dw_pcie_ep_set_msix, + .get_msix = dw_pcie_ep_get_msix, .raise_irq = dw_pcie_ep_raise_irq, .start = dw_pcie_ep_start, .stop = dw_pcie_ep_stop, }; +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct device *dev = pci->dev; + + dev_err(dev, "EP cannot trigger legacy IRQs\n"); + + return -EINVAL; +} + int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); struct pci_epc *epc = ep->epc; u16 msg_ctrl, msg_data; - u32 msg_addr_lower, msg_addr_upper; + u32 msg_addr_lower, msg_addr_upper, reg; u64 msg_addr; bool has_upper; int ret; + if (!ep->msi_cap) + return -EINVAL; + /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ - msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); + reg = ep->msi_cap + PCI_MSI_FLAGS; + msg_ctrl = dw_pcie_readw_dbi(pci, reg); has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); - msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); + reg = ep->msi_cap + PCI_MSI_ADDRESS_LO; + msg_addr_lower = dw_pcie_readl_dbi(pci, reg); if (has_upper) { - msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); - msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64); + reg = ep->msi_cap + PCI_MSI_ADDRESS_HI; + msg_addr_upper = dw_pcie_readl_dbi(pci, reg); + reg = ep->msi_cap + PCI_MSI_DATA_64; + msg_data = dw_pcie_readw_dbi(pci, reg); } else { msg_addr_upper = 0; - msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32); + reg = ep->msi_cap + PCI_MSI_DATA_32; + msg_data = dw_pcie_readw_dbi(pci, reg); } msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, @@ -322,6 +423,64 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, + u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epc *epc = ep->epc; + u16 tbl_offset, bir; + u32 bar_addr_upper, bar_addr_lower; + u32 msg_addr_upper, msg_addr_lower; + u32 reg, msg_data, vec_ctrl; + u64 tbl_addr, msg_addr, reg_u64; + void __iomem *msix_tbl; + int ret; + + reg = ep->msix_cap + PCI_MSIX_TABLE; + tbl_offset = dw_pcie_readl_dbi(pci, reg); + bir = (tbl_offset & PCI_MSIX_TABLE_BIR); + tbl_offset &= PCI_MSIX_TABLE_OFFSET; + tbl_offset >>= 3; + + reg = PCI_BASE_ADDRESS_0 + (4 * bir); + bar_addr_upper = 0; + bar_addr_lower = dw_pcie_readl_dbi(pci, reg); + reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK); + if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64) + bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4); + + tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower; + tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE)); + tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK; + + msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr, + PCI_MSIX_ENTRY_SIZE); + if (!msix_tbl) + return -EINVAL; + + msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR); + msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR); + msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; + msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA); + vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL); + + iounmap(msix_tbl); + + if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) + return -EPERM; + + ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, + epc->mem->page_size); + if (ret) + return ret; + + writel(msg_data, ep->msi_mem); + + dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); + + return 0; +} + void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { struct pci_epc *epc = ep->epc; @@ -386,15 +545,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) return -ENOMEM; ep->outbound_addr = addr; - if (ep->ops->ep_init) - ep->ops->ep_init(ep); - epc = devm_pci_epc_create(dev, &epc_ops); if (IS_ERR(epc)) { dev_err(dev, "Failed to create epc device\n"); return PTR_ERR(epc); } + ep->epc = epc; + epc_set_drvdata(epc, ep); + + if (ep->ops->ep_init) + ep->ops->ep_init(ep); + ret = of_property_read_u8(np, "max-functions", &epc->max_functions); if (ret < 0) epc->max_functions = 1; @@ -409,15 +571,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, epc->mem->page_size); if (!ep->msi_mem) { - dev_err(dev, "Failed to reserve memory for MSI\n"); + dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n"); return -ENOMEM; } + ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI); - epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER; - EPC_FEATURE_SET_BAR(epc->features, BAR_0); + ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); - ep->epc = epc; - epc_set_drvdata(epc, ep); dw_pcie_setup(pci); return 0; diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 5937fed4c938..c12bf794d69c 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -70,24 +70,29 @@ static const struct dw_pcie_ops dw_pcie_ops = { static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + struct pci_epc *epc = ep->epc; enum pci_barno bar; for (bar = BAR_0; bar <= BAR_5; bar++) dw_pcie_ep_reset_bar(pci, bar); + + epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER; + epc->features |= EPC_FEATURE_MSIX_AVAILABLE; } static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, enum pci_epc_irq_type type, - u8 interrupt_num) + u16 interrupt_num) { struct dw_pcie *pci = to_dw_pcie_from_ep(ep); switch (type) { case PCI_EPC_IRQ_LEGACY: - dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); - return -EINVAL; + return dw_pcie_ep_raise_legacy_irq(ep, func_no); case PCI_EPC_IRQ_MSI: return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); default: dev_err(pci->dev, "UNKNOWN IRQ type\n"); } @@ -118,7 +123,6 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, return pp->msi_irq; } - pp->root_bus_nr = -1; pp->ops = &dw_plat_pcie_host_ops; ret = dw_pcie_host_init(pp); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 778c4f76a884..2153956a0b20 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, if (val & PCIE_ATU_ENABLE) return; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Outbound iATU is not being enabled\n"); } @@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, if (val & PCIE_ATU_ENABLE) return; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Outbound iATU is not being enabled\n"); } @@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, if (val & PCIE_ATU_ENABLE) return 0; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Inbound iATU is not being enabled\n"); @@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, if (val & PCIE_ATU_ENABLE) return 0; - usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); + mdelay(LINK_WAIT_IATU); } dev_err(pci->dev, "Inbound iATU is not being enabled\n"); diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index bee4e2535a61..0989d880ac46 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -26,8 +26,7 @@ /* Parameters for the waiting for iATU enabled routine */ #define LINK_WAIT_MAX_IATU_RETRIES 5 -#define LINK_WAIT_IATU_MIN 9000 -#define LINK_WAIT_IATU_MAX 10000 +#define LINK_WAIT_IATU 9 /* Synopsys-specific PCIe configuration registers */ #define PCIE_PORT_LINK_CONTROL 0x710 @@ -37,6 +36,10 @@ #define PORT_LINK_MODE_4_LANES (0x7 << 16) #define PORT_LINK_MODE_8_LANES (0xf << 16) +#define PCIE_PORT_DEBUG0 0x728 +#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f +#define PORT_LOGIC_LTSSM_STATE_L0 0x11 + #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) @@ -96,17 +99,6 @@ #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) -#define MSI_MESSAGE_CONTROL 0x52 -#define MSI_CAP_MMC_SHIFT 1 -#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT) -#define MSI_CAP_MME_SHIFT 4 -#define MSI_CAP_MSI_EN_MASK 0x1 -#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) -#define MSI_MESSAGE_ADDR_L32 0x54 -#define MSI_MESSAGE_ADDR_U32 0x58 -#define MSI_MESSAGE_DATA_32 0x58 -#define MSI_MESSAGE_DATA_64 0x5C - #define MAX_MSI_IRQS 256 #define MAX_MSI_IRQS_PER_CTRL 32 #define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) @@ -191,7 +183,7 @@ enum dw_pcie_as_type { struct dw_pcie_ep_ops { void (*ep_init)(struct dw_pcie_ep *ep); int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, - enum pci_epc_irq_type type, u8 interrupt_num); + enum pci_epc_irq_type type, u16 interrupt_num); }; struct dw_pcie_ep { @@ -208,6 +200,8 @@ struct dw_pcie_ep { u32 num_ob_windows; void __iomem *msi_mem; phys_addr_t msi_mem_phys; + u8 msi_cap; /* MSI capability offset */ + u8 msix_cap; /* MSI-X capability offset */ }; struct dw_pcie_ops { @@ -357,8 +351,11 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp) void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); int dw_pcie_ep_init(struct dw_pcie_ep *ep); void dw_pcie_ep_exit(struct dw_pcie_ep *ep); +int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num); +int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, + u16 interrupt_num); void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); #else static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) @@ -374,12 +371,23 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) { } +static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) +{ + return 0; +} + static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) { return 0; } +static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, + u16 interrupt_num) +{ + return 0; +} + static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) { } diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 3611d6ce9a92..7b32e619b959 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -420,7 +420,6 @@ static int histb_pcie_probe(struct platform_device *pdev) phy_init(hipcie->phy); } - pp->root_bus_nr = -1; pp->ops = &histb_pcie_host_ops; platform_set_drvdata(pdev, hipcie); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index d2970a009eb5..9b599296205d 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -430,6 +430,9 @@ static int kirin_pcie_host_init(struct pcie_port *pp) { kirin_pcie_establish_link(pp); + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + return 0; } @@ -445,9 +448,34 @@ static const struct dw_pcie_host_ops kirin_pcie_host_ops = { .host_init = kirin_pcie_host_init, }; -static int __init kirin_add_pcie_port(struct dw_pcie *pci, - struct platform_device *pdev) +static int kirin_pcie_add_msi(struct dw_pcie *pci, + struct platform_device *pdev) { + int irq; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, + "failed to get MSI IRQ (%d)\n", irq); + return irq; + } + + pci->pp.msi_irq = irq; + } + + return 0; +} + +static int kirin_add_pcie_port(struct dw_pcie *pci, + struct platform_device *pdev) +{ + int ret; + + ret = kirin_pcie_add_msi(pci, pdev); + if (ret) + return ret; + pci->pp.ops = &kirin_pcie_host_ops; return dw_pcie_host_init(&pci->pp); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index a1d0198081a6..d185ea5fe996 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -1089,7 +1089,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp) struct qcom_pcie *pcie = to_qcom_pcie(pci); int ret; - pm_runtime_get_sync(pci->dev); qcom_ep_reset_assert(pcie); ret = pcie->ops->init(pcie); @@ -1126,7 +1125,6 @@ err_disable_phy: phy_power_off(pcie->phy); err_deinit: pcie->ops->deinit(pcie); - pm_runtime_put(pci->dev); return ret; } @@ -1216,6 +1214,12 @@ static int qcom_pcie_probe(struct platform_device *pdev) return -ENOMEM; pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_disable(dev); + return ret; + } + pci->dev = dev; pci->ops = &dw_pcie_ops; pp = &pci->pp; @@ -1225,45 +1229,56 @@ static int qcom_pcie_probe(struct platform_device *pdev) pcie->ops = of_device_get_match_data(dev); pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); - if (IS_ERR(pcie->reset)) - return PTR_ERR(pcie->reset); + if (IS_ERR(pcie->reset)) { + ret = PTR_ERR(pcie->reset); + goto err_pm_runtime_put; + } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); pcie->parf = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->parf)) - return PTR_ERR(pcie->parf); + if (IS_ERR(pcie->parf)) { + ret = PTR_ERR(pcie->parf); + goto err_pm_runtime_put; + } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); + if (IS_ERR(pci->dbi_base)) { + ret = PTR_ERR(pci->dbi_base); + goto err_pm_runtime_put; + } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); pcie->elbi = devm_ioremap_resource(dev, res); - if (IS_ERR(pcie->elbi)) - return PTR_ERR(pcie->elbi); + if (IS_ERR(pcie->elbi)) { + ret = PTR_ERR(pcie->elbi); + goto err_pm_runtime_put; + } pcie->phy = devm_phy_optional_get(dev, "pciephy"); - if (IS_ERR(pcie->phy)) - return PTR_ERR(pcie->phy); + if (IS_ERR(pcie->phy)) { + ret = PTR_ERR(pcie->phy); + goto err_pm_runtime_put; + } ret = pcie->ops->get_resources(pcie); if (ret) - return ret; + goto err_pm_runtime_put; - pp->root_bus_nr = -1; pp->ops = &qcom_pcie_dw_ops; if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) - return pp->msi_irq; + if (pp->msi_irq < 0) { + ret = pp->msi_irq; + goto err_pm_runtime_put; + } } ret = phy_init(pcie->phy); if (ret) { pm_runtime_disable(&pdev->dev); - return ret; + goto err_pm_runtime_put; } platform_set_drvdata(pdev, pcie); @@ -1272,10 +1287,16 @@ static int qcom_pcie_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "cannot initialize host\n"); pm_runtime_disable(&pdev->dev); - return ret; + goto err_pm_runtime_put; } return 0; + +err_pm_runtime_put: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; } static const struct of_device_id qcom_pcie_match[] = { diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index ecb58f7b7566..7d0cdfd8138b 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -210,7 +210,6 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, return ret; } - pp->root_bus_nr = -1; pp->ops = &spear13xx_pcie_host_ops; ret = dw_pcie_host_init(pp); diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 0fae816fba39..750081c1cb48 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -20,12 +20,16 @@ #include <linux/of_pci.h> #include "../pci.h" +#include "../pci-bridge-emul.h" /* PCIe core registers */ +#define PCIE_CORE_DEV_ID_REG 0x0 #define PCIE_CORE_CMD_STATUS_REG 0x4 #define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) #define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) #define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) +#define PCIE_CORE_DEV_REV_REG 0x8 +#define PCIE_CORE_PCIEXP_CAP 0xc0 #define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8 #define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4) #define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 @@ -41,7 +45,10 @@ #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) - +#define PCIE_CORE_INT_A_ASSERT_ENABLE 1 +#define PCIE_CORE_INT_B_ASSERT_ENABLE 2 +#define PCIE_CORE_INT_C_ASSERT_ENABLE 3 +#define PCIE_CORE_INT_D_ASSERT_ENABLE 4 /* PIO registers base address and register offsets */ #define PIO_BASE_ADDR 0x4000 #define PIO_CTRL (PIO_BASE_ADDR + 0x0) @@ -93,7 +100,9 @@ #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) +#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30) #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) +#define PCIE_MSG_PM_PME_MASK BIT(7) #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) #define PCIE_ISR0_MSI_INT_PENDING BIT(24) #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) @@ -111,24 +120,6 @@ #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) -/* PCIe window configuration */ -#define OB_WIN_BASE_ADDR 0x4c00 -#define OB_WIN_BLOCK_SIZE 0x20 -#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ - OB_WIN_BLOCK_SIZE * (win) + \ - (offset)) -#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) -#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) -#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) -#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) -#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) -#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) -#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) - -/* PCIe window types */ -#define OB_PCIE_MEM 0x0 -#define OB_PCIE_IO 0x4 - /* LMI registers base address and register offsets */ #define LMI_BASE_ADDR 0x6000 #define CFG_REG (LMI_BASE_ADDR + 0x0) @@ -207,6 +198,7 @@ struct advk_pcie { struct mutex msi_used_lock; u16 msi_msg; int root_bus_nr; + struct pci_bridge_emul bridge; }; static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) @@ -247,34 +239,9 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie) return -ETIMEDOUT; } -/* - * Set PCIe address window register which could be used for memory - * mapping. - */ -static void advk_pcie_set_ob_win(struct advk_pcie *pcie, - u32 win_num, u32 match_ms, - u32 match_ls, u32 mask_ms, - u32 mask_ls, u32 remap_ms, - u32 remap_ls, u32 action) -{ - advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num)); - advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num)); - advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num)); - advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num)); - advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num)); - advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num)); - advk_writel(pcie, action, OB_WIN_ACTIONS(win_num)); - advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num)); -} - static void advk_pcie_setup_hw(struct advk_pcie *pcie) { u32 reg; - int i; - - /* Point PCIe unit MBUS decode windows to DRAM space */ - for (i = 0; i < 8; i++) - advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0); /* Set to Direct mode */ reg = advk_readl(pcie, CTRL_CONFIG_REG); @@ -433,6 +400,118 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie) return -ETIMEDOUT; } + +static pci_bridge_emul_read_status_t +advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) +{ + struct advk_pcie *pcie = bridge->data; + + + switch (reg) { + case PCI_EXP_SLTCTL: + *value = PCI_EXP_SLTSTA_PDS << 16; + return PCI_BRIDGE_EMUL_HANDLED; + + case PCI_EXP_RTCTL: { + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); + *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0; + return PCI_BRIDGE_EMUL_HANDLED; + } + + case PCI_EXP_RTSTA: { + u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG); + u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG); + *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16); + return PCI_BRIDGE_EMUL_HANDLED; + } + + case PCI_CAP_LIST_ID: + case PCI_EXP_DEVCAP: + case PCI_EXP_DEVCTL: + case PCI_EXP_LNKCAP: + case PCI_EXP_LNKCTL: + *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); + return PCI_BRIDGE_EMUL_HANDLED; + default: + return PCI_BRIDGE_EMUL_NOT_HANDLED; + } + +} + +static void +advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, + int reg, u32 old, u32 new, u32 mask) +{ + struct advk_pcie *pcie = bridge->data; + + switch (reg) { + case PCI_EXP_DEVCTL: + case PCI_EXP_LNKCTL: + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); + break; + + case PCI_EXP_RTCTL: + new = (new & PCI_EXP_RTCTL_PMEIE) << 3; + advk_writel(pcie, new, PCIE_ISR0_MASK_REG); + break; + + case PCI_EXP_RTSTA: + new = (new & PCI_EXP_RTSTA_PME) >> 9; + advk_writel(pcie, new, PCIE_ISR0_REG); + break; + + default: + break; + } +} + +struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { + .read_pcie = advk_pci_bridge_emul_pcie_conf_read, + .write_pcie = advk_pci_bridge_emul_pcie_conf_write, +}; + +/* + * Initialize the configuration space of the PCI-to-PCI bridge + * associated with the given PCIe interface. + */ +static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) +{ + struct pci_bridge_emul *bridge = &pcie->bridge; + + bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff; + bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16; + bridge->conf.class_revision = + advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff; + + /* Support 32 bits I/O addressing */ + bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; + bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; + + /* Support 64 bits memory pref */ + bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64; + bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64; + + /* Support interrupt A for MSI feature */ + bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; + + bridge->has_pcie = true; + bridge->data = pcie; + bridge->ops = &advk_pci_bridge_emul_ops; + + pci_bridge_emul_init(bridge); + +} + +static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, + int devfn) +{ + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) + return false; + + return true; +} + static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { @@ -440,11 +519,15 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, u32 reg; int ret; - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { + if (!advk_pcie_valid_device(pcie, bus, devfn)) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } + if (bus->number == pcie->root_bus_nr) + return pci_bridge_emul_conf_read(&pcie->bridge, where, + size, val); + /* Start PIO */ advk_writel(pcie, 0, PIO_START); advk_writel(pcie, 1, PIO_ISR); @@ -452,7 +535,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->number == pcie->root_bus_nr) + if (bus->primary == pcie->root_bus_nr) reg |= PCIE_CONFIG_RD_TYPE0; else reg |= PCIE_CONFIG_RD_TYPE1; @@ -494,9 +577,13 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int offset; int ret; - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) + if (!advk_pcie_valid_device(pcie, bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; + if (bus->number == pcie->root_bus_nr) + return pci_bridge_emul_conf_write(&pcie->bridge, where, + size, val); + if (where % size) return PCIBIOS_SET_FAILED; @@ -507,7 +594,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->number == pcie->root_bus_nr) + if (bus->primary == pcie->root_bus_nr) reg |= PCIE_CONFIG_WR_TYPE0; else reg |= PCIE_CONFIG_WR_TYPE1; @@ -843,12 +930,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) switch (resource_type(res)) { case IORESOURCE_IO: - advk_pcie_set_ob_win(pcie, 1, - upper_32_bits(res->start), - lower_32_bits(res->start), - 0, 0xF8000000, 0, - lower_32_bits(res->start), - OB_PCIE_IO); err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", @@ -857,12 +938,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) } break; case IORESOURCE_MEM: - advk_pcie_set_ob_win(pcie, 0, - upper_32_bits(res->start), - lower_32_bits(res->start), - 0x0, 0xF8000000, 0, - lower_32_bits(res->start), - (2 << 20) | OB_PCIE_MEM); res_valid |= !(res->flags & IORESOURCE_PREFETCH); break; case IORESOURCE_BUS: @@ -889,7 +964,6 @@ static int advk_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct advk_pcie *pcie; struct resource *res; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; int ret, irq; @@ -922,6 +996,8 @@ static int advk_pcie_probe(struct platform_device *pdev) advk_pcie_setup_hw(pcie); + advk_sw_pci_bridge_init(pcie); + ret = advk_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed to initialize irq\n"); @@ -943,21 +1019,13 @@ static int advk_pcie_probe(struct platform_device *pdev) bridge->map_irq = of_irq_parse_and_map_pci; bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); + ret = pci_host_probe(bridge); if (ret < 0) { advk_pcie_remove_msi_irq_domain(pcie); advk_pcie_remove_irq_domain(pcie); return ret; } - bus = bridge->bus; - - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); return 0; } diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index d8f10451f273..c742881b5061 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -58,9 +58,7 @@ err_out: int pci_host_common_probe(struct platform_device *pdev, struct pci_ecam_ops *ops) { - const char *type; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; struct pci_config_window *cfg; struct list_head resources; @@ -70,12 +68,6 @@ int pci_host_common_probe(struct platform_device *pdev, if (!bridge) return -ENOMEM; - type = of_get_property(np, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - of_pci_check_probe_only(); /* Parse and map our Configuration Space windows */ diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index f6325f1a89e8..9ba4d12c179c 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -45,6 +45,7 @@ #include <linux/irqdomain.h> #include <asm/irqdomain.h> #include <asm/apic.h> +#include <linux/irq.h> #include <linux/msi.h> #include <linux/hyperv.h> #include <linux/refcount.h> @@ -88,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version; #define STATUS_REVISION_MISMATCH 0xC0000059 +/* space for 32bit serial number as string */ +#define SLOT_NAME_SIZE 11 + /* * Message Types */ @@ -493,6 +497,7 @@ struct hv_pci_dev { struct list_head list_entry; refcount_t refs; enum hv_pcichild_state state; + struct pci_slot *pci_slot; struct pci_function_description desc; bool reported_missing; struct hv_pcibus_device *hbus; @@ -1456,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) spin_unlock_irqrestore(&hbus->device_list_lock, flags); } +/* + * Assign entries in sysfs pci slot directory. + * + * Note that this function does not need to lock the children list + * because it is called from pci_devices_present_work which + * is serialized with hv_eject_device_work because they are on the + * same ordered workqueue. Therefore hbus->children list will not change + * even when pci_create_slot sleeps. + */ +static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) +{ + struct hv_pci_dev *hpdev; + char name[SLOT_NAME_SIZE]; + int slot_nr; + + list_for_each_entry(hpdev, &hbus->children, list_entry) { + if (hpdev->pci_slot) + continue; + + slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot)); + snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser); + hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr, + name, NULL); + if (IS_ERR(hpdev->pci_slot)) { + pr_warn("pci_create slot %s failed\n", name); + hpdev->pci_slot = NULL; + } + } +} + /** * create_root_hv_pci_bus() - Expose a new root PCI bus * @hbus: Root PCI bus, as understood by this driver @@ -1479,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); pci_bus_assign_resources(hbus->pci_bus); + hv_pci_assign_slots(hbus); pci_bus_add_devices(hbus->pci_bus); pci_unlock_rescan_remove(); hbus->state = hv_pcibus_installed; @@ -1545,7 +1581,7 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, unsigned long flags; int ret; - hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC); + hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL); if (!hpdev) return NULL; @@ -1741,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work) */ pci_lock_rescan_remove(); pci_scan_child_bus(hbus->pci_bus); + hv_pci_assign_slots(hbus); pci_unlock_rescan_remove(); break; @@ -1857,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work) list_del(&hpdev->list_entry); spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); + if (hpdev->pci_slot) + pci_destroy_slot(hpdev->pci_slot); + memset(&ctxt, 0, sizeof(ctxt)); ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 23e270839e6a..fa0fc46edb0c 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -22,6 +22,7 @@ #include <linux/of_platform.h> #include "../pci.h" +#include "../pci-bridge-emul.h" /* * PCIe unit register offsets. @@ -63,61 +64,6 @@ #define PCIE_DEBUG_CTRL 0x1a60 #define PCIE_DEBUG_SOFT_RESET BIT(20) -enum { - PCISWCAP = PCI_BRIDGE_CONTROL + 2, - PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID, - PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP, - PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL, - PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP, - PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL, - PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP, - PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL, - PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL, - PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA, - PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2, - PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2, - PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2, - PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2, - PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2, - PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2, -}; - -/* PCI configuration space of a PCI-to-PCI bridge */ -struct mvebu_sw_pci_bridge { - u16 vendor; - u16 device; - u16 command; - u16 status; - u16 class; - u8 interface; - u8 revision; - u8 bist; - u8 header_type; - u8 latency_timer; - u8 cache_line_size; - u32 bar[2]; - u8 primary_bus; - u8 secondary_bus; - u8 subordinate_bus; - u8 secondary_latency_timer; - u8 iobase; - u8 iolimit; - u16 secondary_status; - u16 membase; - u16 memlimit; - u16 iobaseupper; - u16 iolimitupper; - u32 romaddr; - u8 intline; - u8 intpin; - u16 bridgectrl; - - /* PCI express capability */ - u32 pcie_sltcap; - u16 pcie_devctl; - u16 pcie_rtctl; -}; - struct mvebu_pcie_port; /* Structure representing all PCIe interfaces */ @@ -125,6 +71,7 @@ struct mvebu_pcie { struct platform_device *pdev; struct mvebu_pcie_port *ports; struct msi_controller *msi; + struct list_head resources; struct resource io; struct resource realio; struct resource mem; @@ -152,7 +99,7 @@ struct mvebu_pcie_port { struct clk *clk; struct gpio_desc *reset_gpio; char *reset_name; - struct mvebu_sw_pci_bridge bridge; + struct pci_bridge_emul bridge; struct device_node *dn; struct mvebu_pcie *pcie; struct mvebu_pcie_window memwin; @@ -414,11 +361,12 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { struct mvebu_pcie_window desired = {}; + struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new iobase/iolimit values invalid? */ - if (port->bridge.iolimit < port->bridge.iobase || - port->bridge.iolimitupper < port->bridge.iobaseupper || - !(port->bridge.command & PCI_COMMAND_IO)) { + if (conf->iolimit < conf->iobase || + conf->iolimitupper < conf->iobaseupper || + !(conf->command & PCI_COMMAND_IO)) { mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, &port->iowin); return; @@ -437,11 +385,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) * specifications. iobase is the bus address, port->iowin_base * is the CPU address. */ - desired.remap = ((port->bridge.iobase & 0xF0) << 8) | - (port->bridge.iobaseupper << 16); + desired.remap = ((conf->iobase & 0xF0) << 8) | + (conf->iobaseupper << 16); desired.base = port->pcie->io.start + desired.remap; - desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | - (port->bridge.iolimitupper << 16)) - + desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) | + (conf->iolimitupper << 16)) - desired.remap) + 1; @@ -452,10 +400,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) { struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; + struct pci_bridge_emul_conf *conf = &port->bridge.conf; /* Are the new membase/memlimit values invalid? */ - if (port->bridge.memlimit < port->bridge.membase || - !(port->bridge.command & PCI_COMMAND_MEMORY)) { + if (conf->memlimit < conf->membase || + !(conf->command & PCI_COMMAND_MEMORY)) { mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, &port->memwin); return; @@ -467,130 +416,32 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) * window to setup, according to the PCI-to-PCI bridge * specifications. */ - desired.base = ((port->bridge.membase & 0xFFF0) << 16); - desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - + desired.base = ((conf->membase & 0xFFF0) << 16); + desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) - desired.base + 1; mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, &port->memwin); } -/* - * Initialize the configuration space of the PCI-to-PCI bridge - * associated with the given PCIe interface. - */ -static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port) +static pci_bridge_emul_read_status_t +mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, + int reg, u32 *value) { - struct mvebu_sw_pci_bridge *bridge = &port->bridge; - - memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge)); - - bridge->class = PCI_CLASS_BRIDGE_PCI; - bridge->vendor = PCI_VENDOR_ID_MARVELL; - bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; - bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; - bridge->header_type = PCI_HEADER_TYPE_BRIDGE; - bridge->cache_line_size = 0x10; - - /* We support 32 bits I/O addressing */ - bridge->iobase = PCI_IO_RANGE_TYPE_32; - bridge->iolimit = PCI_IO_RANGE_TYPE_32; - - /* Add capabilities */ - bridge->status = PCI_STATUS_CAP_LIST; -} - -/* - * Read the configuration space of the PCI-to-PCI bridge associated to - * the given PCIe interface. - */ -static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, - unsigned int where, int size, u32 *value) -{ - struct mvebu_sw_pci_bridge *bridge = &port->bridge; - - switch (where & ~3) { - case PCI_VENDOR_ID: - *value = bridge->device << 16 | bridge->vendor; - break; - - case PCI_COMMAND: - *value = bridge->command | bridge->status << 16; - break; - - case PCI_CLASS_REVISION: - *value = bridge->class << 16 | bridge->interface << 8 | - bridge->revision; - break; - - case PCI_CACHE_LINE_SIZE: - *value = bridge->bist << 24 | bridge->header_type << 16 | - bridge->latency_timer << 8 | bridge->cache_line_size; - break; - - case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: - *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4]; - break; - - case PCI_PRIMARY_BUS: - *value = (bridge->secondary_latency_timer << 24 | - bridge->subordinate_bus << 16 | - bridge->secondary_bus << 8 | - bridge->primary_bus); - break; - - case PCI_IO_BASE: - if (!mvebu_has_ioport(port)) - *value = bridge->secondary_status << 16; - else - *value = (bridge->secondary_status << 16 | - bridge->iolimit << 8 | - bridge->iobase); - break; - - case PCI_MEMORY_BASE: - *value = (bridge->memlimit << 16 | bridge->membase); - break; - - case PCI_PREF_MEMORY_BASE: - *value = 0; - break; - - case PCI_IO_BASE_UPPER16: - *value = (bridge->iolimitupper << 16 | bridge->iobaseupper); - break; - - case PCI_CAPABILITY_LIST: - *value = PCISWCAP; - break; - - case PCI_ROM_ADDRESS1: - *value = 0; - break; - - case PCI_INTERRUPT_LINE: - /* LINE PIN MIN_GNT MAX_LAT */ - *value = 0; - break; - - case PCISWCAP_EXP_LIST_ID: - /* Set PCIe v2, root port, slot support */ - *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | - PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP; - break; + struct mvebu_pcie_port *port = bridge->data; - case PCISWCAP_EXP_DEVCAP: + switch (reg) { + case PCI_EXP_DEVCAP: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); break; - case PCISWCAP_EXP_DEVCTL: + case PCI_EXP_DEVCTL: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - *value |= bridge->pcie_devctl; break; - case PCISWCAP_EXP_LNKCAP: + case PCI_EXP_LNKCAP: /* * PCIe requires the clock power management capability to be * hard-wired to zero for downstream ports @@ -599,176 +450,140 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, ~PCI_EXP_LNKCAP_CLKPM; break; - case PCISWCAP_EXP_LNKCTL: + case PCI_EXP_LNKCTL: *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); break; - case PCISWCAP_EXP_SLTCAP: - *value = bridge->pcie_sltcap; - break; - - case PCISWCAP_EXP_SLTCTL: + case PCI_EXP_SLTCTL: *value = PCI_EXP_SLTSTA_PDS << 16; break; - case PCISWCAP_EXP_RTCTL: - *value = bridge->pcie_rtctl; - break; - - case PCISWCAP_EXP_RTSTA: + case PCI_EXP_RTSTA: *value = mvebu_readl(port, PCIE_RC_RTSTA); break; - /* PCIe requires the v2 fields to be hard-wired to zero */ - case PCISWCAP_EXP_DEVCAP2: - case PCISWCAP_EXP_DEVCTL2: - case PCISWCAP_EXP_LNKCAP2: - case PCISWCAP_EXP_LNKCTL2: - case PCISWCAP_EXP_SLTCAP2: - case PCISWCAP_EXP_SLTCTL2: default: - /* - * PCI defines configuration read accesses to reserved or - * unimplemented registers to read as zero and complete - * normally. - */ - *value = 0; - return PCIBIOS_SUCCESSFUL; + return PCI_BRIDGE_EMUL_NOT_HANDLED; } - if (size == 2) - *value = (*value >> (8 * (where & 3))) & 0xffff; - else if (size == 1) - *value = (*value >> (8 * (where & 3))) & 0xff; - - return PCIBIOS_SUCCESSFUL; + return PCI_BRIDGE_EMUL_HANDLED; } -/* Write to the PCI-to-PCI bridge configuration space */ -static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, - unsigned int where, int size, u32 value) +static void +mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, + int reg, u32 old, u32 new, u32 mask) { - struct mvebu_sw_pci_bridge *bridge = &port->bridge; - u32 mask, reg; - int err; - - if (size == 4) - mask = 0x0; - else if (size == 2) - mask = ~(0xffff << ((where & 3) * 8)); - else if (size == 1) - mask = ~(0xff << ((where & 3) * 8)); - else - return PCIBIOS_BAD_REGISTER_NUMBER; + struct mvebu_pcie_port *port = bridge->data; + struct pci_bridge_emul_conf *conf = &bridge->conf; - err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, ®); - if (err) - return err; - - value = (reg & mask) | value << ((where & 3) * 8); - - switch (where & ~3) { + switch (reg) { case PCI_COMMAND: { - u32 old = bridge->command; - if (!mvebu_has_ioport(port)) - value &= ~PCI_COMMAND_IO; + conf->command &= ~PCI_COMMAND_IO; - bridge->command = value & 0xffff; - if ((old ^ bridge->command) & PCI_COMMAND_IO) + if ((old ^ new) & PCI_COMMAND_IO) mvebu_pcie_handle_iobase_change(port); - if ((old ^ bridge->command) & PCI_COMMAND_MEMORY) + if ((old ^ new) & PCI_COMMAND_MEMORY) mvebu_pcie_handle_membase_change(port); - break; - } - case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: - bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; break; + } case PCI_IO_BASE: /* - * We also keep bit 1 set, it is a read-only bit that + * We keep bit 1 set, it is a read-only bit that * indicates we support 32 bits addressing for the * I/O */ - bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; - bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; + conf->iobase |= PCI_IO_RANGE_TYPE_32; + conf->iolimit |= PCI_IO_RANGE_TYPE_32; mvebu_pcie_handle_iobase_change(port); break; case PCI_MEMORY_BASE: - bridge->membase = value & 0xffff; - bridge->memlimit = value >> 16; mvebu_pcie_handle_membase_change(port); break; case PCI_IO_BASE_UPPER16: - bridge->iobaseupper = value & 0xffff; - bridge->iolimitupper = value >> 16; mvebu_pcie_handle_iobase_change(port); break; case PCI_PRIMARY_BUS: - bridge->primary_bus = value & 0xff; - bridge->secondary_bus = (value >> 8) & 0xff; - bridge->subordinate_bus = (value >> 16) & 0xff; - bridge->secondary_latency_timer = (value >> 24) & 0xff; - mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus); + mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus); break; - case PCISWCAP_EXP_DEVCTL: + default: + break; + } +} + +static void +mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, + int reg, u32 old, u32 new, u32 mask) +{ + struct mvebu_pcie_port *port = bridge->data; + + switch (reg) { + case PCI_EXP_DEVCTL: /* * Armada370 data says these bits must always * be zero when in root complex mode. */ - value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | - PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - - /* - * If the mask is 0xffff0000, then we only want to write - * the device control register, rather than clearing the - * RW1C bits in the device status register. Mask out the - * status register bits. - */ - if (mask == 0xffff0000) - value &= 0xffff; + new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); - mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); + mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); break; - case PCISWCAP_EXP_LNKCTL: + case PCI_EXP_LNKCTL: /* * If we don't support CLKREQ, we must ensure that the * CLKREQ enable bit always reads zero. Since we haven't * had this capability, and it's dependent on board wiring, * disable it for the time being. */ - value &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - - /* - * If the mask is 0xffff0000, then we only want to write - * the link control register, rather than clearing the - * RW1C bits in the link status register. Mask out the - * RW1C status register bits. - */ - if (mask == 0xffff0000) - value &= ~((PCI_EXP_LNKSTA_LABS | - PCI_EXP_LNKSTA_LBMS) << 16); + new &= ~PCI_EXP_LNKCTL_CLKREQ_EN; - mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); + mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); break; - case PCISWCAP_EXP_RTSTA: - mvebu_writel(port, value, PCIE_RC_RTSTA); + case PCI_EXP_RTSTA: + mvebu_writel(port, new, PCIE_RC_RTSTA); break; + } +} - default: - break; +struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = { + .write_base = mvebu_pci_bridge_emul_base_conf_write, + .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read, + .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write, +}; + +/* + * Initialize the configuration space of the PCI-to-PCI bridge + * associated with the given PCIe interface. + */ +static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port) +{ + struct pci_bridge_emul *bridge = &port->bridge; + + bridge->conf.vendor = PCI_VENDOR_ID_MARVELL; + bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; + bridge->conf.class_revision = + mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; + + if (mvebu_has_ioport(port)) { + /* We support 32 bits I/O addressing */ + bridge->conf.iobase = PCI_IO_RANGE_TYPE_32; + bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32; } - return PCIBIOS_SUCCESSFUL; + bridge->has_pcie = true; + bridge->data = port; + bridge->ops = &mvebu_pci_bridge_emul_ops; + + pci_bridge_emul_init(bridge); } static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) @@ -788,8 +603,8 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, if (bus->number == 0 && port->devfn == devfn) return port; if (bus->number != 0 && - bus->number >= port->bridge.secondary_bus && - bus->number <= port->bridge.subordinate_bus) + bus->number >= port->bridge.conf.secondary_bus && + bus->number <= port->bridge.conf.subordinate_bus) return port; } @@ -800,7 +615,7 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { - struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); + struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; int ret; @@ -810,7 +625,8 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, /* Access the emulated PCI-to-PCI bridge */ if (bus->number == 0) - return mvebu_sw_pci_bridge_write(port, where, size, val); + return pci_bridge_emul_conf_write(&port->bridge, where, + size, val); if (!mvebu_pcie_link_up(port)) return PCIBIOS_DEVICE_NOT_FOUND; @@ -826,7 +642,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { - struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); + struct mvebu_pcie *pcie = bus->sysdata; struct mvebu_pcie_port *port; int ret; @@ -838,7 +654,8 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, /* Access the emulated PCI-to-PCI bridge */ if (bus->number == 0) - return mvebu_sw_pci_bridge_read(port, where, size, val); + return pci_bridge_emul_conf_read(&port->bridge, where, + size, val); if (!mvebu_pcie_link_up(port)) { *val = 0xffffffff; @@ -857,36 +674,6 @@ static struct pci_ops mvebu_pcie_ops = { .write = mvebu_pcie_wr_conf, }; -static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) -{ - struct mvebu_pcie *pcie = sys_to_pcie(sys); - int err, i; - - pcie->mem.name = "PCI MEM"; - pcie->realio.name = "PCI I/O"; - - if (resource_size(&pcie->realio) != 0) - pci_add_resource_offset(&sys->resources, &pcie->realio, - sys->io_offset); - - pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); - pci_add_resource(&sys->resources, &pcie->busn); - - err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources); - if (err) - return 0; - - for (i = 0; i < pcie->nports; i++) { - struct mvebu_pcie_port *port = &pcie->ports[i]; - - if (!port->base) - continue; - mvebu_pcie_setup_hw(port); - } - - return 1; -} - static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, const struct resource *res, resource_size_t start, @@ -917,31 +704,6 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, return start; } -static void mvebu_pcie_enable(struct mvebu_pcie *pcie) -{ - struct hw_pci hw; - - memset(&hw, 0, sizeof(hw)); - -#ifdef CONFIG_PCI_MSI - hw.msi_ctrl = pcie->msi; -#endif - - hw.nr_controllers = 1; - hw.private_data = (void **)&pcie; - hw.setup = mvebu_pcie_setup; - hw.map_irq = of_irq_parse_and_map_pci; - hw.ops = &mvebu_pcie_ops; - hw.align_resource = mvebu_pcie_align_resource; - - pci_common_init_dev(&pcie->pdev->dev, &hw); -} - -/* - * Looks up the list of register addresses encoded into the reg = - * <...> property for one that matches the given port/lane. Once - * found, maps it. - */ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, struct device_node *np, struct mvebu_pcie_port *port) @@ -1190,46 +952,123 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) clk_disable_unprepare(port->clk); } -static int mvebu_pcie_probe(struct platform_device *pdev) +/* + * We can't use devm_of_pci_get_host_bridge_resources() because we + * need to parse our special DT properties encoding the MEM and IO + * apertures. + */ +static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) { - struct device *dev = &pdev->dev; - struct mvebu_pcie *pcie; + struct device *dev = &pcie->pdev->dev; struct device_node *np = dev->of_node; - struct device_node *child; - int num, i, ret; + int ret; - pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); - if (!pcie) - return -ENOMEM; + INIT_LIST_HEAD(&pcie->resources); - pcie->pdev = pdev; - platform_set_drvdata(pdev, pcie); + /* Get the bus range */ + ret = of_pci_parse_bus_range(np, &pcie->busn); + if (ret) { + dev_err(dev, "failed to parse bus-range property: %d\n", ret); + return ret; + } + pci_add_resource(&pcie->resources, &pcie->busn); - /* Get the PCIe memory and I/O aperture */ + /* Get the PCIe memory aperture */ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); if (resource_size(&pcie->mem) == 0) { dev_err(dev, "invalid memory aperture size\n"); return -EINVAL; } + pcie->mem.name = "PCI MEM"; + pci_add_resource(&pcie->resources, &pcie->mem); + + /* Get the PCIe IO aperture */ mvebu_mbus_get_pcie_io_aperture(&pcie->io); if (resource_size(&pcie->io) != 0) { pcie->realio.flags = pcie->io.flags; pcie->realio.start = PCIBIOS_MIN_IO; pcie->realio.end = min_t(resource_size_t, - IO_SPACE_LIMIT, - resource_size(&pcie->io)); - } else - pcie->realio = pcie->io; + IO_SPACE_LIMIT - SZ_64K, + resource_size(&pcie->io) - 1); + pcie->realio.name = "PCI I/O"; - /* Get the bus range */ - ret = of_pci_parse_bus_range(np, &pcie->busn); - if (ret) { - dev_err(dev, "failed to parse bus-range property: %d\n", ret); + pci_add_resource(&pcie->resources, &pcie->realio); + } + + return devm_request_pci_bus_resources(dev, &pcie->resources); +} + +/* + * This is a copy of pci_host_probe(), except that it does the I/O + * remap as the last step, once we are sure we won't fail. + * + * It should be removed once the I/O remap error handling issue has + * been sorted out. + */ +static int mvebu_pci_host_probe(struct pci_host_bridge *bridge) +{ + struct mvebu_pcie *pcie; + struct pci_bus *bus, *child; + int ret; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret < 0) { + dev_err(bridge->dev.parent, "Scanning root bridge failed"); return ret; } + pcie = pci_host_bridge_priv(bridge); + if (resource_size(&pcie->io) != 0) { + unsigned int i; + + for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K) + pci_ioremap_io(i, pcie->io.start + i); + } + + bus = bridge->bus; + + /* + * We insert PCI resources into the iomem_resource and + * ioport_resource trees in either pci_bus_claim_resources() + * or pci_bus_assign_resources(). + */ + if (pci_has_flag(PCI_PROBE_ONLY)) { + pci_bus_claim_resources(bus); + } else { + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + pci_bus_add_devices(bus); + return 0; +} + +static int mvebu_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mvebu_pcie *pcie; + struct pci_host_bridge *bridge; + struct device_node *np = dev->of_node; + struct device_node *child; + int num, i, ret; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct mvebu_pcie)); + if (!bridge) + return -ENOMEM; + + pcie = pci_host_bridge_priv(bridge); + pcie->pdev = pdev; + platform_set_drvdata(pdev, pcie); + + ret = mvebu_pcie_parse_request_resources(pcie); + if (ret) + return ret; + num = of_get_available_child_count(np); pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); @@ -1272,20 +1111,24 @@ static int mvebu_pcie_probe(struct platform_device *pdev) continue; } + mvebu_pcie_setup_hw(port); mvebu_pcie_set_local_dev_nr(port, 1); - mvebu_sw_pci_bridge_init(port); + mvebu_pci_bridge_emul_init(port); } pcie->nports = i; - for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) - pci_ioremap_io(i, pcie->io.start + i); - - mvebu_pcie_enable(pcie); - - platform_set_drvdata(pdev, pcie); - - return 0; + list_splice_init(&pcie->resources, &bridge->windows); + bridge->dev.parent = dev; + bridge->sysdata = pcie; + bridge->busnr = 0; + bridge->ops = &mvebu_pcie_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + bridge->align_resource = mvebu_pcie_align_resource; + bridge->msi = pcie->msi; + + return mvebu_pci_host_probe(bridge); } static const struct of_device_id mvebu_pcie_of_match_table[] = { diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c index e3fe4124e3af..c3a088910f48 100644 --- a/drivers/pci/controller/pcie-cadence-ep.c +++ b/drivers/pci/controller/pcie-cadence-ep.c @@ -238,7 +238,7 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; - u16 flags, mmc, mme; + u16 flags, mme; /* Validate that the MSI feature is actually enabled. */ flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); @@ -249,7 +249,6 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) * Get the Multiple Message Enable bitfield from the Message Control * register. */ - mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1; mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; return mme; @@ -259,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; - u32 r = ep->max_regions - 1; u32 offset; u16 status; u8 msg_code; @@ -269,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r, + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; @@ -348,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, /* Set the outbound region if needed. */ if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { - /* Last region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, @@ -357,13 +355,14 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); ep->irq_pci_fn = fn; } - writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); + writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); return 0; } static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, - enum pci_epc_irq_type type, u8 interrupt_num) + enum pci_epc_irq_type type, + u16 interrupt_num) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); @@ -439,6 +438,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) struct pci_epc *epc; struct resource *res; int ret; + int phy_count; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) @@ -473,6 +473,12 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) if (!ep->ob_addr) return -ENOMEM; + ret = cdns_pcie_init_phy(dev, pcie); + if (ret) { + dev_err(dev, "failed to init phy\n"); + return ret; + } + platform_set_drvdata(pdev, pcie); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { @@ -510,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) goto free_epc_mem; } ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; + /* Reserve region 0 for IRQs */ + set_bit(0, &ep->ob_region_map); return 0; @@ -521,6 +529,10 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) err_get_sync: pm_runtime_disable(dev); + cdns_pcie_disable_phy(pcie); + phy_count = pcie->phy_count; + while (phy_count--) + device_link_del(pcie->link[phy_count]); return ret; } @@ -528,6 +540,7 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev) static void cdns_pcie_ep_shutdown(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct cdns_pcie *pcie = dev_get_drvdata(dev); int ret; ret = pm_runtime_put_sync(dev); @@ -536,13 +549,14 @@ static void cdns_pcie_ep_shutdown(struct platform_device *pdev) pm_runtime_disable(dev); - /* The PCIe controller can't be disabled. */ + cdns_pcie_disable_phy(pcie); } static struct platform_driver cdns_pcie_ep_driver = { .driver = { .name = "cdns-pcie-ep", .of_match_table = cdns_pcie_ep_of_match, + .pm = &cdns_pcie_pm_ops, }, .probe = cdns_pcie_ep_probe, .shutdown = cdns_pcie_ep_shutdown, diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c index a4ebbd37b553..97e251090b4f 100644 --- a/drivers/pci/controller/pcie-cadence-host.c +++ b/drivers/pci/controller/pcie-cadence-host.c @@ -58,6 +58,11 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, return pcie->reg_base + (where & 0xfff); } + /* Check that the link is up */ + if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1)) + return NULL; + /* Clear AXI link-down status */ + cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0); /* Update Output registers for AXI region 0. */ addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | @@ -230,7 +235,6 @@ static int cdns_pcie_host_init(struct device *dev, static int cdns_pcie_host_probe(struct platform_device *pdev) { - const char *type; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; @@ -239,6 +243,7 @@ static int cdns_pcie_host_probe(struct platform_device *pdev) struct cdns_pcie *pcie; struct resource *res; int ret; + int phy_count; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); if (!bridge) @@ -262,12 +267,6 @@ static int cdns_pcie_host_probe(struct platform_device *pdev) rc->device_id = 0xffff; of_property_read_u16(np, "device-id", &rc->device_id); - type = of_get_property(np, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); pcie->reg_base = devm_ioremap_resource(dev, res); if (IS_ERR(pcie->reg_base)) { @@ -290,6 +289,13 @@ static int cdns_pcie_host_probe(struct platform_device *pdev) } pcie->mem_res = res; + ret = cdns_pcie_init_phy(dev, pcie); + if (ret) { + dev_err(dev, "failed to init phy\n"); + return ret; + } + platform_set_drvdata(pdev, pcie); + pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { @@ -322,15 +328,35 @@ static int cdns_pcie_host_probe(struct platform_device *pdev) err_get_sync: pm_runtime_disable(dev); + cdns_pcie_disable_phy(pcie); + phy_count = pcie->phy_count; + while (phy_count--) + device_link_del(pcie->link[phy_count]); return ret; } +static void cdns_pcie_shutdown(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cdns_pcie *pcie = dev_get_drvdata(dev); + int ret; + + ret = pm_runtime_put_sync(dev); + if (ret < 0) + dev_dbg(dev, "pm_runtime_put_sync failed\n"); + + pm_runtime_disable(dev); + cdns_pcie_disable_phy(pcie); +} + static struct platform_driver cdns_pcie_host_driver = { .driver = { .name = "cdns-pcie-host", .of_match_table = cdns_pcie_host_of_match, + .pm = &cdns_pcie_pm_ops, }, .probe = cdns_pcie_host_probe, + .shutdown = cdns_pcie_shutdown, }; builtin_platform_driver(cdns_pcie_host_driver); diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c index 138d113eb45d..cd795f6fc1e2 100644 --- a/drivers/pci/controller/pcie-cadence.c +++ b/drivers/pci/controller/pcie-cadence.c @@ -124,3 +124,130 @@ void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); } + +void cdns_pcie_disable_phy(struct cdns_pcie *pcie) +{ + int i = pcie->phy_count; + + while (i--) { + phy_power_off(pcie->phy[i]); + phy_exit(pcie->phy[i]); + } +} + +int cdns_pcie_enable_phy(struct cdns_pcie *pcie) +{ + int ret; + int i; + + for (i = 0; i < pcie->phy_count; i++) { + ret = phy_init(pcie->phy[i]); + if (ret < 0) + goto err_phy; + + ret = phy_power_on(pcie->phy[i]); + if (ret < 0) { + phy_exit(pcie->phy[i]); + goto err_phy; + } + } + + return 0; + +err_phy: + while (--i >= 0) { + phy_power_off(pcie->phy[i]); + phy_exit(pcie->phy[i]); + } + + return ret; +} + +int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie) +{ + struct device_node *np = dev->of_node; + int phy_count; + struct phy **phy; + struct device_link **link; + int i; + int ret; + const char *name; + + phy_count = of_property_count_strings(np, "phy-names"); + if (phy_count < 1) { + dev_err(dev, "no phy-names. PHY will not be initialized\n"); + pcie->phy_count = 0; + return 0; + } + + phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + for (i = 0; i < phy_count; i++) { + of_property_read_string_index(np, "phy-names", i, &name); + phy[i] = devm_phy_get(dev, name); + if (IS_ERR(phy[i])) { + ret = PTR_ERR(phy[i]); + goto err_phy; + } + link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); + if (!link[i]) { + devm_phy_put(dev, phy[i]); + ret = -EINVAL; + goto err_phy; + } + } + + pcie->phy_count = phy_count; + pcie->phy = phy; + pcie->link = link; + + ret = cdns_pcie_enable_phy(pcie); + if (ret) + goto err_phy; + + return 0; + +err_phy: + while (--i >= 0) { + device_link_del(link[i]); + devm_phy_put(dev, phy[i]); + } + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int cdns_pcie_suspend_noirq(struct device *dev) +{ + struct cdns_pcie *pcie = dev_get_drvdata(dev); + + cdns_pcie_disable_phy(pcie); + + return 0; +} + +static int cdns_pcie_resume_noirq(struct device *dev) +{ + struct cdns_pcie *pcie = dev_get_drvdata(dev); + int ret; + + ret = cdns_pcie_enable_phy(pcie); + if (ret) { + dev_err(dev, "failed to enable phy\n"); + return ret; + } + + return 0; +} +#endif + +const struct dev_pm_ops cdns_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cdns_pcie_suspend_noirq, + cdns_pcie_resume_noirq) +}; diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/pcie-cadence.h index 4bb27333b05c..ae6bf2a2b3d3 100644 --- a/drivers/pci/controller/pcie-cadence.h +++ b/drivers/pci/controller/pcie-cadence.h @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/pci.h> +#include <linux/phy/phy.h> /* * Local Management Registers @@ -165,6 +166,9 @@ #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) +/* AXI link down register */ +#define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) + enum cdns_pcie_rp_bar { RP_BAR0, RP_BAR1, @@ -229,6 +233,9 @@ struct cdns_pcie { struct resource *mem_res; bool is_rc; u8 bus; + int phy_count; + struct phy **phy; + struct device_link **link; }; /* Register access */ @@ -279,7 +286,7 @@ static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, } static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, - u32 reg, u16 value) + u32 reg, u32 value) { writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } @@ -307,5 +314,9 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, u32 r, u64 cpu_addr); void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); +void cdns_pcie_disable_phy(struct cdns_pcie *pcie); +int cdns_pcie_enable_phy(struct cdns_pcie *pcie); +int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie); +extern const struct dev_pm_ops cdns_pcie_pm_ops; #endif /* _PCIE_CADENCE_H */ diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 3c76c5fa4f32..c20fd6bd68fd 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -85,6 +85,8 @@ #define IMAP_VALID_SHIFT 0 #define IMAP_VALID BIT(IMAP_VALID_SHIFT) +#define IPROC_PCI_PM_CAP 0x48 +#define IPROC_PCI_PM_CAP_MASK 0xffff #define IPROC_PCI_EXP_CAP 0xac #define IPROC_PCIE_REG_INVALID 0xffff @@ -375,6 +377,17 @@ static const u16 iproc_pcie_reg_paxc_v2[] = { [IPROC_PCIE_CFG_DATA] = 0x1fc, }; +/* + * List of device IDs of controllers that have corrupted capability list that + * require SW fixup + */ +static const u16 iproc_pcie_corrupt_cap_did[] = { + 0x16cd, + 0x16f0, + 0xd802, + 0xd804 +}; + static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) { struct iproc_pcie *pcie = bus->sysdata; @@ -495,6 +508,49 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) return data; } +static void iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val) +{ + u32 i, dev_id; + + switch (where & ~0x3) { + case PCI_VENDOR_ID: + dev_id = *val >> 16; + + /* + * Activate fixup for those controllers that have corrupted + * capability list registers + */ + for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) + if (dev_id == iproc_pcie_corrupt_cap_did[i]) + pcie->fix_paxc_cap = true; + break; + + case IPROC_PCI_PM_CAP: + if (pcie->fix_paxc_cap) { + /* advertise PM, force next capability to PCIe */ + *val &= ~IPROC_PCI_PM_CAP_MASK; + *val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM; + } + break; + + case IPROC_PCI_EXP_CAP: + if (pcie->fix_paxc_cap) { + /* advertise root port, version 2, terminate here */ + *val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 | + PCI_CAP_ID_EXP; + } + break; + + case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: + /* Don't advertise CRS SV support */ + *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); + break; + + default: + break; + } +} + static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { @@ -509,13 +565,10 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, /* root complex access */ if (busno == 0) { ret = pci_generic_config_read32(bus, devfn, where, size, val); - if (ret != PCIBIOS_SUCCESSFUL) - return ret; + if (ret == PCIBIOS_SUCCESSFUL) + iproc_pcie_fix_cap(pcie, where, val); - /* Don't advertise CRS SV support */ - if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL) - *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); - return PCIBIOS_SUCCESSFUL; + return ret; } cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); @@ -529,6 +582,25 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, if (size <= 2) *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); + /* + * For PAXC and PAXCv2, the total number of PFs that one can enumerate + * depends on the firmware configuration. Unfortunately, due to an ASIC + * bug, unconfigured PFs cannot be properly hidden from the root + * complex. As a result, write access to these PFs will cause bus lock + * up on the embedded processor + * + * Since all unconfigured PFs are left with an incorrect, staled device + * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access + * early here and reject them all + */ +#define DEVICE_ID_MASK 0xffff0000 +#define DEVICE_ID_SHIFT 16 + if (pcie->rej_unconfig_pf && + (where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID) + if ((*val & DEVICE_ID_MASK) == + (PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT)) + return PCIBIOS_FUNC_NOT_SUPPORTED; + return PCIBIOS_SUCCESSFUL; } @@ -558,14 +630,6 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, return (pcie->base + offset); } - /* - * PAXC is connected to an internally emulated EP within the SoC. It - * allows only one device. - */ - if (pcie->ep_is_internal) - if (slot > 0) - return NULL; - return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); } @@ -628,7 +692,7 @@ static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, struct iproc_pcie *pcie = iproc_data(bus); iproc_pcie_apb_err_disable(bus, true); - if (pcie->type == IPROC_PCIE_PAXB_V2) + if (pcie->iproc_cfg_read) ret = iproc_pcie_config_read(bus, devfn, where, size, val); else ret = pci_generic_config_read32(bus, devfn, where, size, val); @@ -808,14 +872,14 @@ static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, writel(lower_32_bits(pci_addr), pcie->base + omap_offset); writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); - dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", - window_idx, oarr_offset, &axi_addr, &pci_addr); - dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n", - readl(pcie->base + oarr_offset), - readl(pcie->base + oarr_offset + 4)); - dev_info(dev, "omap lo 0x%x omap hi 0x%x\n", - readl(pcie->base + omap_offset), - readl(pcie->base + omap_offset + 4)); + dev_dbg(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", + window_idx, oarr_offset, &axi_addr, &pci_addr); + dev_dbg(dev, "oarr lo 0x%x oarr hi 0x%x\n", + readl(pcie->base + oarr_offset), + readl(pcie->base + oarr_offset + 4)); + dev_dbg(dev, "omap lo 0x%x omap hi 0x%x\n", + readl(pcie->base + omap_offset), + readl(pcie->base + omap_offset + 4)); return 0; } @@ -982,8 +1046,8 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, iproc_pcie_reg_is_invalid(imap_offset)) return -EINVAL; - dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", - region_idx, iarr_offset, &axi_addr, &pci_addr); + dev_dbg(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", + region_idx, iarr_offset, &axi_addr, &pci_addr); /* * Program the IARR registers. The upper 32-bit IARR register is @@ -993,9 +1057,9 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, pcie->base + iarr_offset); writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); - dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n", - readl(pcie->base + iarr_offset), - readl(pcie->base + iarr_offset + 4)); + dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n", + readl(pcie->base + iarr_offset), + readl(pcie->base + iarr_offset + 4)); /* * Now program the IMAP registers. Each IARR region may have one or @@ -1009,10 +1073,10 @@ static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, writel(upper_32_bits(axi_addr), pcie->base + imap_offset + ib_map->imap_addr_offset); - dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n", - window_idx, readl(pcie->base + imap_offset), - readl(pcie->base + imap_offset + - ib_map->imap_addr_offset)); + dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n", + window_idx, readl(pcie->base + imap_offset), + readl(pcie->base + imap_offset + + ib_map->imap_addr_offset)); imap_offset += ib_map->imap_window_offset; axi_addr += size; @@ -1144,10 +1208,22 @@ static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) return ret; } -static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) +static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr, + bool enable) { u32 val; + if (!enable) { + /* + * Disable PAXC MSI steering. All write transfers will be + * treated as non-MSI transfers + */ + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); + val &= ~MSI_ENABLE_CFG; + iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); + return; + } + /* * Program bits [43:13] of address of GITS_TRANSLATER register into * bits [30:0] of the MSI base address register. In fact, in all iProc @@ -1201,7 +1277,7 @@ static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, return ret; break; case IPROC_PCIE_PAXC_V2: - iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr); + iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr, true); break; default: return -EINVAL; @@ -1271,6 +1347,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) break; case IPROC_PCIE_PAXB: regs = iproc_pcie_reg_paxb; + pcie->iproc_cfg_read = true; pcie->has_apb_err_disable = true; if (pcie->need_ob_cfg) { pcie->ob_map = paxb_ob_map; @@ -1293,10 +1370,14 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) case IPROC_PCIE_PAXC: regs = iproc_pcie_reg_paxc; pcie->ep_is_internal = true; + pcie->iproc_cfg_read = true; + pcie->rej_unconfig_pf = true; break; case IPROC_PCIE_PAXC_V2: regs = iproc_pcie_reg_paxc_v2; pcie->ep_is_internal = true; + pcie->iproc_cfg_read = true; + pcie->rej_unconfig_pf = true; pcie->need_msi_steer = true; break; default: @@ -1427,6 +1508,24 @@ int iproc_pcie_remove(struct iproc_pcie *pcie) } EXPORT_SYMBOL(iproc_pcie_remove); +/* + * The MSI parsing logic in certain revisions of Broadcom PAXC based root + * complex does not work and needs to be disabled + */ +static void quirk_paxc_disable_msi_parsing(struct pci_dev *pdev) +{ + struct iproc_pcie *pcie = iproc_data(pdev->bus); + + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + iproc_pcie_paxc_v2_msi_steer(pcie, 0, false); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, + quirk_paxc_disable_msi_parsing); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, + quirk_paxc_disable_msi_parsing); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, + quirk_paxc_disable_msi_parsing); + MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h index 814b600b383a..4f03ea539805 100644 --- a/drivers/pci/controller/pcie-iproc.h +++ b/drivers/pci/controller/pcie-iproc.h @@ -58,8 +58,13 @@ struct iproc_msi; * @phy: optional PHY device that controls the Serdes * @map_irq: function callback to map interrupts * @ep_is_internal: indicates an internal emulated endpoint device is connected + * @iproc_cfg_read: indicates the iProc config read function should be used + * @rej_unconfig_pf: indicates the root complex needs to detect and reject + * enumeration against unconfigured physical functions emulated in the ASIC * @has_apb_err_disable: indicates the controller can be configured to prevent * unsupported request from being forwarded as an APB bus error + * @fix_paxc_cap: indicates the controller has corrupted capability list in its + * config space registers and requires SW based fixup * * @need_ob_cfg: indicates SW needs to configure the outbound mapping window * @ob: outbound mapping related parameters @@ -84,7 +89,10 @@ struct iproc_pcie { struct phy *phy; int (*map_irq)(const struct pci_dev *, u8, u8); bool ep_is_internal; + bool iproc_cfg_read; + bool rej_unconfig_pf; bool has_apb_err_disable; + bool fix_paxc_cap; bool need_ob_cfg; struct iproc_pcie_ob ob; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 861dda69f366..d069a76cbb95 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -15,6 +15,7 @@ #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/msi.h> +#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/of_platform.h> @@ -162,6 +163,7 @@ struct mtk_pcie_soc { * @phy: pointer to PHY control block * @lane: lane count * @slot: port slot + * @irq: GIC irq * @irq_domain: legacy INTx IRQ domain * @inner_domain: inner IRQ domain * @msi_domain: MSI IRQ domain @@ -182,6 +184,7 @@ struct mtk_pcie_port { struct phy *phy; u32 lane; u32 slot; + int irq; struct irq_domain *irq_domain; struct irq_domain *inner_domain; struct irq_domain *msi_domain; @@ -225,10 +228,8 @@ static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) clk_disable_unprepare(pcie->free_ck); - if (dev->pm_domain) { - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - } + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); } static void mtk_pcie_port_free(struct mtk_pcie_port *port) @@ -337,6 +338,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, { struct mtk_pcie *pcie = bus->sysdata; struct mtk_pcie_port *port; + struct pci_dev *dev = NULL; + + /* + * Walk the bus hierarchy to get the devfn value + * of the port in the root bus. + */ + while (bus && bus->number) { + dev = bus->self; + bus = dev->bus; + devfn = dev->devfn; + } list_for_each_entry(port, &pcie->ports, list) if (port->slot == PCI_SLOT(devfn)) @@ -383,75 +395,6 @@ static struct pci_ops mtk_pcie_ops_v2 = { .write = mtk_pcie_config_write, }; -static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) -{ - struct mtk_pcie *pcie = port->pcie; - struct resource *mem = &pcie->mem; - const struct mtk_pcie_soc *soc = port->pcie->soc; - u32 val; - size_t size; - int err; - - /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ - if (pcie->base) { - val = readl(pcie->base + PCIE_SYS_CFG_V2); - val |= PCIE_CSR_LTSSM_EN(port->slot) | - PCIE_CSR_ASPM_L1_EN(port->slot); - writel(val, pcie->base + PCIE_SYS_CFG_V2); - } - - /* Assert all reset signals */ - writel(0, port->base + PCIE_RST_CTRL); - - /* - * Enable PCIe link down reset, if link status changed from link up to - * link down, this will reset MAC control registers and configuration - * space. - */ - writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); - - /* De-assert PHY, PE, PIPE, MAC and configuration reset */ - val = readl(port->base + PCIE_RST_CTRL); - val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | - PCIE_MAC_SRSTB | PCIE_CRSTB; - writel(val, port->base + PCIE_RST_CTRL); - - /* Set up vendor ID and class code */ - if (soc->need_fix_class_id) { - val = PCI_VENDOR_ID_MEDIATEK; - writew(val, port->base + PCIE_CONF_VEND_ID); - - val = PCI_CLASS_BRIDGE_HOST; - writew(val, port->base + PCIE_CONF_CLASS_ID); - } - - /* 100ms timeout value should be enough for Gen1/2 training */ - err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, - !!(val & PCIE_PORT_LINKUP_V2), 20, - 100 * USEC_PER_MSEC); - if (err) - return -ETIMEDOUT; - - /* Set INTx mask */ - val = readl(port->base + PCIE_INT_MASK); - val &= ~INTX_MASK; - writel(val, port->base + PCIE_INT_MASK); - - /* Set AHB to PCIe translation windows */ - size = mem->end - mem->start; - val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); - writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); - - val = upper_32_bits(mem->start); - writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); - - /* Set PCIe to AXI translation memory space.*/ - val = fls(0xffffffff) | WIN_ENABLE; - writel(val, port->base + PCIE_AXI_WINDOW0); - - return 0; -} - static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); @@ -590,6 +533,27 @@ static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) writel(val, port->base + PCIE_INT_MASK); } +static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie) +{ + struct mtk_pcie_port *port, *tmp; + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) { + irq_set_chained_handler_and_data(port->irq, NULL, NULL); + + if (port->irq_domain) + irq_domain_remove(port->irq_domain); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + if (port->msi_domain) + irq_domain_remove(port->msi_domain); + if (port->inner_domain) + irq_domain_remove(port->inner_domain); + } + + irq_dispose_mapping(port->irq); + } +} + static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { @@ -628,8 +592,6 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, ret = mtk_pcie_allocate_msi_domains(port); if (ret) return ret; - - mtk_pcie_enable_msi(port); } return 0; @@ -682,7 +644,7 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, struct mtk_pcie *pcie = port->pcie; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); - int err, irq; + int err; err = mtk_pcie_init_irq_domain(port, node); if (err) { @@ -690,8 +652,81 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, return err; } - irq = platform_get_irq(pdev, port->slot); - irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port); + port->irq = platform_get_irq(pdev, port->slot); + irq_set_chained_handler_and_data(port->irq, + mtk_pcie_intr_handler, port); + + return 0; +} + +static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) +{ + struct mtk_pcie *pcie = port->pcie; + struct resource *mem = &pcie->mem; + const struct mtk_pcie_soc *soc = port->pcie->soc; + u32 val; + size_t size; + int err; + + /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ + if (pcie->base) { + val = readl(pcie->base + PCIE_SYS_CFG_V2); + val |= PCIE_CSR_LTSSM_EN(port->slot) | + PCIE_CSR_ASPM_L1_EN(port->slot); + writel(val, pcie->base + PCIE_SYS_CFG_V2); + } + + /* Assert all reset signals */ + writel(0, port->base + PCIE_RST_CTRL); + + /* + * Enable PCIe link down reset, if link status changed from link up to + * link down, this will reset MAC control registers and configuration + * space. + */ + writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); + + /* De-assert PHY, PE, PIPE, MAC and configuration reset */ + val = readl(port->base + PCIE_RST_CTRL); + val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | + PCIE_MAC_SRSTB | PCIE_CRSTB; + writel(val, port->base + PCIE_RST_CTRL); + + /* Set up vendor ID and class code */ + if (soc->need_fix_class_id) { + val = PCI_VENDOR_ID_MEDIATEK; + writew(val, port->base + PCIE_CONF_VEND_ID); + + val = PCI_CLASS_BRIDGE_PCI; + writew(val, port->base + PCIE_CONF_CLASS_ID); + } + + /* 100ms timeout value should be enough for Gen1/2 training */ + err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, + !!(val & PCIE_PORT_LINKUP_V2), 20, + 100 * USEC_PER_MSEC); + if (err) + return -ETIMEDOUT; + + /* Set INTx mask */ + val = readl(port->base + PCIE_INT_MASK); + val &= ~INTX_MASK; + writel(val, port->base + PCIE_INT_MASK); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + mtk_pcie_enable_msi(port); + + /* Set AHB to PCIe translation windows */ + size = mem->end - mem->start; + val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); + + val = upper_32_bits(mem->start); + writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); + + /* Set PCIe to AXI translation memory space.*/ + val = fls(0xffffffff) | WIN_ENABLE; + writel(val, port->base + PCIE_AXI_WINDOW0); return 0; } @@ -987,10 +1022,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) pcie->free_ck = NULL; } - if (dev->pm_domain) { - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - } + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); /* enable top level clock */ err = clk_prepare_enable(pcie->free_ck); @@ -1002,10 +1035,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) return 0; err_free_ck: - if (dev->pm_domain) { - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - } + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return err; } @@ -1109,36 +1140,10 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie) if (err < 0) return err; - devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); - - return 0; -} - -static int mtk_pcie_register_host(struct pci_host_bridge *host) -{ - struct mtk_pcie *pcie = pci_host_bridge_priv(host); - struct pci_bus *child; - int err; - - host->busnr = pcie->busn.start; - host->dev.parent = pcie->dev; - host->ops = pcie->soc->ops; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; - host->sysdata = pcie; - - err = pci_scan_root_bus_bridge(host); - if (err < 0) + err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start); + if (err) return err; - pci_bus_size_bridges(host->bus); - pci_bus_assign_resources(host->bus); - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - return 0; } @@ -1168,7 +1173,14 @@ static int mtk_pcie_probe(struct platform_device *pdev) if (err) goto put_resources; - err = mtk_pcie_register_host(host); + host->busnr = pcie->busn.start; + host->dev.parent = pcie->dev; + host->ops = pcie->soc->ops; + host->map_irq = of_irq_parse_and_map_pci; + host->swizzle_irq = pci_common_swizzle; + host->sysdata = pcie; + + err = pci_host_probe(host); if (err) goto put_resources; @@ -1181,6 +1193,80 @@ put_resources: return err; } + +static void mtk_pcie_free_resources(struct mtk_pcie *pcie) +{ + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + struct list_head *windows = &host->windows; + + pci_free_resource_list(windows); +} + +static int mtk_pcie_remove(struct platform_device *pdev) +{ + struct mtk_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + + pci_stop_root_bus(host->bus); + pci_remove_root_bus(host->bus); + mtk_pcie_free_resources(pcie); + + mtk_pcie_irq_teardown(pcie); + + mtk_pcie_put_resources(pcie); + + return 0; +} + +static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev) +{ + struct mtk_pcie *pcie = dev_get_drvdata(dev); + struct mtk_pcie_port *port; + + if (list_empty(&pcie->ports)) + return 0; + + list_for_each_entry(port, &pcie->ports, list) { + clk_disable_unprepare(port->pipe_ck); + clk_disable_unprepare(port->obff_ck); + clk_disable_unprepare(port->axi_ck); + clk_disable_unprepare(port->aux_ck); + clk_disable_unprepare(port->ahb_ck); + clk_disable_unprepare(port->sys_ck); + phy_power_off(port->phy); + phy_exit(port->phy); + } + + clk_disable_unprepare(pcie->free_ck); + + return 0; +} + +static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev) +{ + struct mtk_pcie *pcie = dev_get_drvdata(dev); + struct mtk_pcie_port *port, *tmp; + + if (list_empty(&pcie->ports)) + return 0; + + clk_prepare_enable(pcie->free_ck); + + list_for_each_entry_safe(port, tmp, &pcie->ports, list) + mtk_pcie_enable_port(port); + + /* In case of EP was removed while system suspend. */ + if (list_empty(&pcie->ports)) + clk_disable_unprepare(pcie->free_ck); + + return 0; +} + +static const struct dev_pm_ops mtk_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq, + mtk_pcie_resume_noirq) +}; + static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { .ops = &mtk_pcie_ops, .startup = mtk_pcie_startup_port, @@ -1209,10 +1295,13 @@ static const struct of_device_id mtk_pcie_ids[] = { static struct platform_driver mtk_pcie_driver = { .probe = mtk_pcie_probe, + .remove = mtk_pcie_remove, .driver = { .name = "mtk-pcie", .of_match_table = mtk_pcie_ids, .suppress_bind_attrs = true, + .pm = &mtk_pcie_pm_ops, }, }; -builtin_platform_driver(mtk_pcie_driver); +module_platform_driver(mtk_pcie_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c index 4d6c20e47bed..77052a0712d0 100644 --- a/drivers/pci/controller/pcie-mobiveil.c +++ b/drivers/pci/controller/pcie-mobiveil.c @@ -23,6 +23,8 @@ #include <linux/platform_device.h> #include <linux/slab.h> +#include "../pci.h" + /* register offsets and bit positions */ /* @@ -107,7 +109,7 @@ #define CFG_WINDOW_TYPE 0 #define IO_WINDOW_TYPE 1 #define MEM_WINDOW_TYPE 2 -#define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) +#define IB_WIN_SIZE ((u64)256 * 1024 * 1024 * 1024) #define MAX_PIO_WINDOWS 8 /* Parameters for the waiting for link up routine */ @@ -130,7 +132,7 @@ struct mobiveil_pcie { void __iomem *config_axi_slave_base; /* endpoint config base */ void __iomem *csr_axi_slave_base; /* root port config base */ void __iomem *apb_csr_base; /* MSI register base */ - void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */ + phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */ struct irq_domain *intx_domain; raw_spinlock_t intx_mask_lock; int irq; @@ -299,13 +301,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) struct platform_device *pdev = pcie->pdev; struct device_node *node = dev->of_node; struct resource *res; - const char *type; - - type = of_get_property(node, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } /* map config resource */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 6beba8ed7b84..b8163c56a142 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -472,7 +472,7 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, - u8 interrupt_num) + u16 interrupt_num) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index fb32840ce8e6..81538d77f790 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -777,16 +777,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, struct platform_device *pdev) { struct device *dev = pcie->dev; - struct device_node *node = dev->of_node; struct resource *res; - const char *type; - - /* Check for device type */ - type = of_get_property(node, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); pcie->breg_base = devm_ioremap_resource(dev, res); diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 7b1389d8e2a5..9bd1a35cd5d8 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -574,15 +574,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) struct device *dev = port->dev; struct device_node *node = dev->of_node; struct resource regs; - const char *type; int err; - type = of_get_property(node, "device_type", NULL); - if (!type || strcmp(type, "pci")) { - dev_err(dev, "invalid \"device_type\" %s\n", type); - return -EINVAL; - } - err = of_address_to_resource(node, 0, ®s); if (err) { dev_err(dev, "missing \"reg\" property\n"); diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 942b64fc7f1f..e50b0b5815ff 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -197,9 +197,20 @@ static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *d int i, best = 1; unsigned long flags; - if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1) + if (vmd->msix_count == 1) return &vmd->irqs[0]; + /* + * White list for fast-interrupt handlers. All others will share the + * "slow" interrupt vector. + */ + switch (msi_desc_to_pci_dev(desc)->class) { + case PCI_CLASS_STORAGE_EXPRESS: + break; + default: + return &vmd->irqs[0]; + } + raw_spin_lock_irqsave(&list_lock, flags); for (i = 1; i < vmd->msix_count; i++) if (vmd->irqs[i].count < vmd->irqs[best].count) @@ -393,12 +404,10 @@ static int vmd_dma_supported(struct device *dev, u64 mask) return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); } -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK static u64 vmd_get_required_mask(struct device *dev) { return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); } -#endif static void vmd_teardown_dma_ops(struct vmd_dev *vmd) { @@ -439,9 +448,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd) ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); -#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); -#endif add_dma_domain(domain); } #undef ASSIGN_VMD_DMA_OPS @@ -802,12 +809,12 @@ static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); - vmd_detach_resources(vmd); sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_teardown_dma_ops(vmd); + vmd_detach_resources(vmd); irq_domain_remove(vmd->irq_domain); } |
