diff options
Diffstat (limited to 'drivers/pci/controller')
40 files changed, 1935 insertions, 1426 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 4a7afbe189f8..64e2f5e379aa 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -12,7 +12,7 @@ config PCI_MVEBU  	select PCI_BRIDGE_EMUL  config PCI_AARDVARK -	bool "Aardvark PCIe controller" +	tristate "Aardvark PCIe controller"  	depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST  	depends on OF  	depends on PCI_MSI_IRQ_DOMAIN @@ -273,9 +273,10 @@ config VMD  config PCIE_BRCMSTB  	tristate "Broadcom Brcmstb PCIe host controller" -	depends on ARCH_BCM2835 || COMPILE_TEST +	depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST  	depends on OF  	depends on PCI_MSI_IRQ_DOMAIN +	default ARCH_BRCMSTB  	help  	  Say Y here to enable PCIe host controller support for  	  Broadcom STB based SoCs, like the Raspberry Pi 4. @@ -297,6 +298,13 @@ config PCI_LOONGSON  	  Say Y here if you want to enable PCI controller support on  	  Loongson systems. +config PCIE_HISI_ERR +	depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST) +	bool "HiSilicon HIP PCIe controller error handling driver" +	help +	  Say Y here if you want error handling support +	  for the PCIe controller's errors on HiSilicon HIP SoCs +  source "drivers/pci/controller/dwc/Kconfig"  source "drivers/pci/controller/mobiveil/Kconfig"  source "drivers/pci/controller/cadence/Kconfig" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index bcdbf49ab1e4..04c6edc285c5 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o  obj-$(CONFIG_VMD) += vmd.o  obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o  obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o +obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o  # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW  obj-y				+= dwc/  obj-y				+= mobiveil/ diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 254a3e1eff50..84cc58dc8512 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -328,7 +328,6 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)  	cdns_pcie_ep_assert_intx(ep, fn, intx, true);  	/*  	 * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() -	 * from drivers/pci/dwc/pci-dra7xx.c  	 */  	mdelay(1);  	cdns_pcie_ep_assert_intx(ep, fn, intx, false); diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 4550e0d469ca..811c1cb2e8de 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -337,7 +337,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)  	struct resource_entry *entry;  	u64 cpu_addr = cfg_res->start;  	u32 addr0, addr1, desc1; -	int r, err, busnr = 0; +	int r, busnr = 0;  	entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);  	if (entry) @@ -383,11 +383,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)  		r++;  	} -	err = cdns_pcie_host_map_dma_ranges(rc); -	if (err) -		return err; - -	return 0; +	return cdns_pcie_host_map_dma_ranges(rc);  }  static int cdns_pcie_host_init(struct device *dev, diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 044a3761c44f..bc049865f8e0 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -237,8 +237,9 @@ config PCIE_HISI_STB  	  Say Y here if you want PCIe controller support on HiSilicon STB SoCs  config PCI_MESON -	bool "MESON PCIe controller" +	tristate "MESON PCIe controller"  	depends on PCI_MSI_IRQ_DOMAIN +	default m if ARCH_MESON  	select PCIE_DW_HOST  	help  	  Say Y here if you want to enable PCI controller support on Amlogic diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index dc387724cf08..6d012d2b1e90 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -73,8 +73,6 @@  #define	LINK_UP						BIT(16)  #define	DRA7XX_CPU_TO_BUS_ADDR				0x0FFFFFFF -#define EXP_CAP_ID_OFFSET				0x70 -  #define	PCIECTRL_TI_CONF_INTX_ASSERT			0x0124  #define	PCIECTRL_TI_CONF_INTX_DEASSERT			0x0128 @@ -91,7 +89,6 @@ struct dra7xx_pcie {  	void __iomem		*base;		/* DT ti_conf */  	int			phy_count;	/* DT phy-names count */  	struct phy		**phy; -	int			link_gen;  	struct irq_domain	*irq_domain;  	enum dw_pcie_device_mode mode;  }; @@ -142,33 +139,12 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci)  	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);  	struct device *dev = pci->dev;  	u32 reg; -	u32 exp_cap_off = EXP_CAP_ID_OFFSET;  	if (dw_pcie_link_up(pci)) {  		dev_err(dev, "link is already up\n");  		return 0;  	} -	if (dra7xx->link_gen == 1) { -		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, -			     4, ®); -		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { -			reg &= ~((u32)PCI_EXP_LNKCAP_SLS); -			reg |= PCI_EXP_LNKCAP_SLS_2_5GB; -			dw_pcie_write(pci->dbi_base + exp_cap_off + -				      PCI_EXP_LNKCAP, 4, reg); -		} - -		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, -			     2, ®); -		if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { -			reg &= ~((u32)PCI_EXP_LNKCAP_SLS); -			reg |= PCI_EXP_LNKCAP_SLS_2_5GB; -			dw_pcie_write(pci->dbi_base + exp_cap_off + -				      PCI_EXP_LNKCTL2, 2, reg); -		} -	} -  	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);  	reg |= LTSSM_EN;  	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); @@ -490,7 +466,9 @@ static struct irq_chip dra7xx_pci_msi_bottom_irq_chip = {  static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	struct device *dev = pci->dev;  	u32 ctrl, num_ctrls; +	int ret;  	pp->msi_irq_chip = &dra7xx_pci_msi_bottom_irq_chip; @@ -506,7 +484,21 @@ static int dra7xx_pcie_msi_host_init(struct pcie_port *pp)  				    ~0);  	} -	return dw_pcie_allocate_domains(pp); +	ret = dw_pcie_allocate_domains(pp); +	if (ret) +		return ret; + +	pp->msi_data = dma_map_single_attrs(dev, &pp->msi_msg, +					   sizeof(pp->msi_msg), +					   DMA_FROM_DEVICE, +					   DMA_ATTR_SKIP_CPU_SYNC); +	ret = dma_mapping_error(dev, pp->msi_data); +	if (ret) { +		dev_err(dev, "Failed to map MSI data\n"); +		pp->msi_data = 0; +		dw_pcie_free_msi(pp); +	} +	return ret;  }  static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { @@ -937,10 +929,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)  	reg &= ~LTSSM_EN;  	dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); -	dra7xx->link_gen = of_pci_get_max_link_speed(np); -	if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) -		dra7xx->link_gen = 2; -  	switch (mode) {  	case DW_PCIE_RC_TYPE:  		if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index 8d82c43ae299..242683cde04a 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -336,32 +336,37 @@ static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,  	exynos_pcie_sideband_dbi_w_mode(ep, false);  } -static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, -				u32 *val) +static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, +				   int where, int size, u32 *val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct exynos_pcie *ep = to_exynos_pcie(pci); -	int ret; +	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); -	exynos_pcie_sideband_dbi_r_mode(ep, true); -	ret = dw_pcie_read(pci->dbi_base + where, size, val); -	exynos_pcie_sideband_dbi_r_mode(ep, false); -	return ret; +	if (PCI_SLOT(devfn)) { +		*val = ~0; +		return PCIBIOS_DEVICE_NOT_FOUND; +	} + +	*val = dw_pcie_read_dbi(pci, where, size); +	return PCIBIOS_SUCCESSFUL;  } -static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, -				u32 val) +static int exynos_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn, +				   int where, int size, u32 val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct exynos_pcie *ep = to_exynos_pcie(pci); -	int ret; +	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); -	exynos_pcie_sideband_dbi_w_mode(ep, true); -	ret = dw_pcie_write(pci->dbi_base + where, size, val); -	exynos_pcie_sideband_dbi_w_mode(ep, false); -	return ret; +	if (PCI_SLOT(devfn)) +		return PCIBIOS_DEVICE_NOT_FOUND; + +	dw_pcie_write_dbi(pci, where, size, val); +	return PCIBIOS_SUCCESSFUL;  } +static struct pci_ops exynos_pci_ops = { +	.read = exynos_pcie_rd_own_conf, +	.write = exynos_pcie_wr_own_conf, +}; +  static int exynos_pcie_link_up(struct dw_pcie *pci)  {  	struct exynos_pcie *ep = to_exynos_pcie(pci); @@ -379,6 +384,8 @@ static int exynos_pcie_host_init(struct pcie_port *pp)  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct exynos_pcie *ep = to_exynos_pcie(pci); +	pp->bridge->ops = &exynos_pci_ops; +  	exynos_pcie_establish_link(ep);  	exynos_pcie_enable_interrupts(ep); @@ -386,8 +393,6 @@ static int exynos_pcie_host_init(struct pcie_port *pp)  }  static const struct dw_pcie_host_ops exynos_pcie_host_ops = { -	.rd_own_conf = exynos_pcie_rd_own_conf, -	.wr_own_conf = exynos_pcie_wr_own_conf,  	.host_init = exynos_pcie_host_init,  }; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 5fef2613b223..5cf1ef12fb9b 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -79,7 +79,6 @@ struct imx6_pcie {  	u32			tx_deemph_gen2_6db;  	u32			tx_swing_full;  	u32			tx_swing_low; -	int			link_gen;  	struct regulator	*vpcie;  	void __iomem		*phy_base; @@ -94,15 +93,6 @@ struct imx6_pcie {  #define PHY_PLL_LOCK_WAIT_USLEEP_MAX	200  #define PHY_PLL_LOCK_WAIT_TIMEOUT	(2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) -/* PCIe Root Complex registers (memory-mapped) */ -#define PCIE_RC_IMX6_MSI_CAP			0x50 -#define PCIE_RC_LCR				0x7c -#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1	0x1 -#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2	0x2 -#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK	0xf - -#define PCIE_RC_LCSR				0x80 -  /* PCIe Port Logic registers (memory-mapped) */  #define PL_OFFSET 0x700 @@ -116,8 +106,6 @@ struct imx6_pcie {  #define PCIE_PHY_STAT (PL_OFFSET + 0x110)  #define PCIE_PHY_STAT_ACK		BIT(16) -#define PCIE_LINK_WIDTH_SPEED_CONTROL	0x80C -  /* PHY registers (not memory-mapped) */  #define PCIE_PHY_ATEOVRD			0x10  #define  PCIE_PHY_ATEOVRD_EN			BIT(2) @@ -761,6 +749,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)  {  	struct dw_pcie *pci = imx6_pcie->pci;  	struct device *dev = pci->dev; +	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	u32 tmp;  	int ret; @@ -769,10 +758,10 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)  	 * started in Gen2 mode, there is a possibility the devices on the  	 * bus will not be detected at all.  This happens with PCIe switches.  	 */ -	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); -	tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; -	tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; -	dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); +	tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); +	tmp &= ~PCI_EXP_LNKCAP_SLS; +	tmp |= PCI_EXP_LNKCAP_SLS_2_5GB; +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);  	/* Start LTSSM. */  	imx6_pcie_ltssm_enable(dev); @@ -781,12 +770,12 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)  	if (ret)  		goto err_reset_phy; -	if (imx6_pcie->link_gen == 2) { +	if (pci->link_gen == 2) {  		/* Allow Gen2 mode after the link is up. */ -		tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); -		tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; -		tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; -		dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); +		tmp = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); +		tmp &= ~PCI_EXP_LNKCAP_SLS; +		tmp |= PCI_EXP_LNKCAP_SLS_5_0GB; +		dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, tmp);  		/*  		 * Start Directed Speed Change so the best possible @@ -824,8 +813,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)  		dev_info(dev, "Link: Gen2 disabled\n");  	} -	tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); -	dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); +	tmp = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); +	dev_info(dev, "Link up, Gen%i\n", tmp & PCI_EXP_LNKSTA_CLS);  	return 0;  err_reset_phy: @@ -847,9 +836,7 @@ static int imx6_pcie_host_init(struct pcie_port *pp)  	imx6_setup_phy_mpll(imx6_pcie);  	dw_pcie_setup_rc(pp);  	imx6_pcie_establish_link(imx6_pcie); - -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); +	dw_pcie_msi_init(pp);  	return 0;  } @@ -1073,38 +1060,33 @@ static int imx6_pcie_probe(struct platform_device *pdev)  	/* Fetch clocks */  	imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); -	if (IS_ERR(imx6_pcie->pcie_phy)) { -		dev_err(dev, "pcie_phy clock source missing or invalid\n"); -		return PTR_ERR(imx6_pcie->pcie_phy); -	} +	if (IS_ERR(imx6_pcie->pcie_phy)) +		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy), +				     "pcie_phy clock source missing or invalid\n");  	imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); -	if (IS_ERR(imx6_pcie->pcie_bus)) { -		dev_err(dev, "pcie_bus clock source missing or invalid\n"); -		return PTR_ERR(imx6_pcie->pcie_bus); -	} +	if (IS_ERR(imx6_pcie->pcie_bus)) +		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus), +				     "pcie_bus clock source missing or invalid\n");  	imx6_pcie->pcie = devm_clk_get(dev, "pcie"); -	if (IS_ERR(imx6_pcie->pcie)) { -		dev_err(dev, "pcie clock source missing or invalid\n"); -		return PTR_ERR(imx6_pcie->pcie); -	} +	if (IS_ERR(imx6_pcie->pcie)) +		return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie), +				     "pcie clock source missing or invalid\n");  	switch (imx6_pcie->drvdata->variant) {  	case IMX6SX:  		imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,  							   "pcie_inbound_axi"); -		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { -			dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); -			return PTR_ERR(imx6_pcie->pcie_inbound_axi); -		} +		if (IS_ERR(imx6_pcie->pcie_inbound_axi)) +			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_inbound_axi), +					     "pcie_inbound_axi clock missing or invalid\n");  		break;  	case IMX8MQ:  		imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); -		if (IS_ERR(imx6_pcie->pcie_aux)) { -			dev_err(dev, "pcie_aux clock source missing or invalid\n"); -			return PTR_ERR(imx6_pcie->pcie_aux); -		} +		if (IS_ERR(imx6_pcie->pcie_aux)) +			return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), +					     "pcie_aux clock source missing or invalid\n");  		fallthrough;  	case IMX7D:  		if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) @@ -1165,10 +1147,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)  		imx6_pcie->tx_swing_low = 127;  	/* Limit link speed */ -	ret = of_property_read_u32(node, "fsl,max-link-speed", -				   &imx6_pcie->link_gen); -	if (ret) -		imx6_pcie->link_gen = 1; +	pci->link_gen = 1; +	ret = of_property_read_u32(node, "fsl,max-link-speed", &pci->link_gen);  	imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");  	if (IS_ERR(imx6_pcie->vpcie)) { @@ -1188,11 +1168,10 @@ static int imx6_pcie_probe(struct platform_device *pdev)  		return ret;  	if (pci_msi_enabled()) { -		val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP + -					PCI_MSI_FLAGS); +		u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); +		val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);  		val |= PCI_MSI_FLAGS_ENABLE; -		dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS, -				   val); +		dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);  	}  	return 0; diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index c8c9d6a75f17..a222728238ca 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -96,8 +96,6 @@  #define LEG_EP				0x1  #define RC				0x2 -#define EXP_CAP_ID_OFFSET		0x70 -  #define KS_PCIE_SYSCLOCKOUTEN		BIT(0)  #define AM654_PCIE_DEV_TYPE_MASK	0x3 @@ -123,7 +121,6 @@ struct keystone_pcie {  	int			msi_host_irq;  	int			num_lanes; -	u32			num_viewport;  	struct phy		**phy;  	struct device_link	**link;  	struct			device_node *msi_intc_np; @@ -397,13 +394,17 @@ static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)  static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  {  	u32 val; -	u32 num_viewport = ks_pcie->num_viewport;  	struct dw_pcie *pci = ks_pcie->pci;  	struct pcie_port *pp = &pci->pp; -	u64 start = pp->mem->start; -	u64 end = pp->mem->end; +	u32 num_viewport = pci->num_viewport; +	u64 start, end; +	struct resource *mem;  	int i; +	mem = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM)->res; +	start = mem->start; +	end = mem->end; +  	/* Disable BARs for inbound access */  	ks_pcie_set_dbi_mode(ks_pcie);  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); @@ -430,10 +431,10 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)  	ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);  } -static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				 unsigned int devfn, int where, int size, -				 u32 *val) +static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, +					   unsigned int devfn, int where)  { +	struct pcie_port *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);  	u32 reg; @@ -444,36 +445,29 @@ static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,  		reg |= CFG_TYPE1;  	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); -	return dw_pcie_read(pp->va_cfg0_base + where, size, val); +	return pp->va_cfg0_base + where;  } -static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				 unsigned int devfn, int where, int size, -				 u32 val) -{ -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); -	u32 reg; - -	reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | -		CFG_FUNC(PCI_FUNC(devfn)); -	if (!pci_is_root_bus(bus->parent)) -		reg |= CFG_TYPE1; -	ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); - -	return dw_pcie_write(pp->va_cfg0_base + where, size, val); -} +static struct pci_ops ks_child_pcie_ops = { +	.map_bus = ks_pcie_other_map_bus, +	.read = pci_generic_config_read, +	.write = pci_generic_config_write, +};  /** - * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization + * ks_pcie_v3_65_add_bus() - keystone add_bus post initialization   *   * This sets BAR0 to enable inbound access for MSI_IRQ register   */ -static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp) +static int ks_pcie_v3_65_add_bus(struct pci_bus *bus)  { +	struct pcie_port *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); +	if (!pci_is_root_bus(bus)) +		return 0; +  	/* Configure and set up BAR0 */  	ks_pcie_set_dbi_mode(ks_pcie); @@ -488,8 +482,17 @@ static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)  	  * be sufficient.  Use physical address to avoid any conflicts.  	  */  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); + +	return 0;  } +static struct pci_ops ks_pcie_ops = { +	.map_bus = dw_pcie_own_conf_map_bus, +	.read = pci_generic_config_read, +	.write = pci_generic_config_write, +	.add_bus = ks_pcie_v3_65_add_bus, +}; +  /**   * ks_pcie_link_up() - Check if link up   */ @@ -807,6 +810,9 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)  	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);  	int ret; +	pp->bridge->ops = &ks_pcie_ops; +	pp->bridge->child_ops = &ks_child_pcie_ops; +  	ret = ks_pcie_config_legacy_irq(ks_pcie);  	if (ret)  		return ret; @@ -842,11 +848,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)  }  static const struct dw_pcie_host_ops ks_pcie_host_ops = { -	.rd_other_conf = ks_pcie_rd_other_conf, -	.wr_other_conf = ks_pcie_wr_other_conf,  	.host_init = ks_pcie_host_init,  	.msi_host_init = ks_pcie_msi_host_init, -	.scan_bus = ks_pcie_v3_65_scan_bus,  };  static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { @@ -867,16 +870,8 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,  	struct dw_pcie *pci = ks_pcie->pci;  	struct pcie_port *pp = &pci->pp;  	struct device *dev = &pdev->dev; -	struct resource *res;  	int ret; -	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); -	pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); -	if (IS_ERR(pp->va_cfg0_base)) -		return PTR_ERR(pp->va_cfg0_base); - -	pp->va_cfg1_base = pp->va_cfg0_base; -  	ret = dw_pcie_host_init(pp);  	if (ret) {  		dev_err(dev, "failed to initialize host\n"); @@ -886,18 +881,6 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,  	return 0;  } -static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base, -				   u32 reg, size_t size) -{ -	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); -	u32 val; - -	ks_pcie_set_dbi_mode(ks_pcie); -	dw_pcie_read(base + reg, size, &val); -	ks_pcie_clear_dbi_mode(ks_pcie); -	return val; -} -  static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base,  				     u32 reg, size_t size, u32 val)  { @@ -912,7 +895,6 @@ static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {  	.start_link = ks_pcie_start_link,  	.stop_link = ks_pcie_stop_link,  	.link_up = ks_pcie_link_up, -	.read_dbi2 = ks_pcie_am654_read_dbi2,  	.write_dbi2 = ks_pcie_am654_write_dbi2,  }; @@ -1125,31 +1107,6 @@ static int ks_pcie_am654_set_mode(struct device *dev,  	return 0;  } -static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed) -{ -	u32 val; - -	dw_pcie_dbi_ro_wr_en(pci); - -	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP); -	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { -		val &= ~((u32)PCI_EXP_LNKCAP_SLS); -		val |= link_speed; -		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP, -				   val); -	} - -	val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2); -	if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { -		val &= ~((u32)PCI_EXP_LNKCAP_SLS); -		val |= link_speed; -		dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2, -				   val); -	} - -	dw_pcie_dbi_ro_wr_dis(pci); -} -  static const struct ks_pcie_of_data ks_pcie_rc_of_data = {  	.host_ops = &ks_pcie_host_ops,  	.version = 0x365A, @@ -1197,13 +1154,10 @@ static int __init ks_pcie_probe(struct platform_device *pdev)  	struct keystone_pcie *ks_pcie;  	struct device_link **link;  	struct gpio_desc *gpiod; -	void __iomem *atu_base;  	struct resource *res;  	unsigned int version;  	void __iomem *base; -	u32 num_viewport;  	struct phy **phy; -	int link_speed;  	u32 num_lanes;  	char name[10];  	int ret; @@ -1320,29 +1274,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)  		goto err_get_sync;  	} -	if (pci->version >= 0x480A) { -		atu_base = devm_platform_ioremap_resource_byname(pdev, "atu"); -		if (IS_ERR(atu_base)) { -			ret = PTR_ERR(atu_base); -			goto err_get_sync; -		} - -		pci->atu_base = atu_base; - +	if (pci->version >= 0x480A)  		ret = ks_pcie_am654_set_mode(dev, mode); -		if (ret < 0) -			goto err_get_sync; -	} else { +	else  		ret = ks_pcie_set_mode(dev); -		if (ret < 0) -			goto err_get_sync; -	} - -	link_speed = of_pci_get_max_link_speed(np); -	if (link_speed < 0) -		link_speed = 2; - -	ks_pcie_set_link_speed(pci, link_speed); +	if (ret < 0) +		goto err_get_sync;  	switch (mode) {  	case DW_PCIE_RC_TYPE: @@ -1351,12 +1288,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)  			goto err_get_sync;  		} -		ret = of_property_read_u32(np, "num-viewport", &num_viewport); -		if (ret < 0) { -			dev_err(dev, "unable to read *num-viewport* property\n"); -			goto err_get_sync; -		} -  		/*  		 * "Power Sequencing and Reset Signal Timings" table in  		 * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0 @@ -1370,7 +1301,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)  			gpiod_set_value_cansleep(gpiod, 1);  		} -		ks_pcie->num_viewport = num_viewport;  		pci->pp.ops = host_ops;  		ret = ks_pcie_add_pcie_port(ks_pcie, pdev);  		if (ret < 0) diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 0d151cead1b7..84206f265e54 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -20,50 +20,58 @@  #define PCIE_DBI2_OFFSET		0x1000	/* DBI2 base address*/ -struct ls_pcie_ep { -	struct dw_pcie		*pci; +#define to_ls_pcie_ep(x)	dev_get_drvdata((x)->dev) + +struct ls_pcie_ep_drvdata { +	u32				func_offset; +	const struct dw_pcie_ep_ops	*ops; +	const struct dw_pcie_ops	*dw_pcie_ops;  }; -#define to_ls_pcie_ep(x)	dev_get_drvdata((x)->dev) +struct ls_pcie_ep { +	struct dw_pcie			*pci; +	struct pci_epc_features		*ls_epc; +	const struct ls_pcie_ep_drvdata *drvdata; +};  static int ls_pcie_establish_link(struct dw_pcie *pci)  {  	return 0;  } -static const struct dw_pcie_ops ls_pcie_ep_ops = { +static const struct dw_pcie_ops dw_ls_pcie_ep_ops = {  	.start_link = ls_pcie_establish_link,  }; -static const struct of_device_id ls_pcie_ep_of_match[] = { -	{ .compatible = "fsl,ls-pcie-ep",}, -	{ }, -}; - -static const struct pci_epc_features ls_pcie_epc_features = { -	.linkup_notifier = false, -	.msi_capable = true, -	.msix_capable = false, -	.bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4), -}; -  static const struct pci_epc_features*  ls_pcie_ep_get_features(struct dw_pcie_ep *ep)  { -	return &ls_pcie_epc_features; +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); + +	return pcie->ls_epc;  }  static void ls_pcie_ep_init(struct dw_pcie_ep *ep)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); +	struct dw_pcie_ep_func *ep_func;  	enum pci_barno bar; +	ep_func = dw_pcie_ep_get_func_from_ep(ep, 0); +	if (!ep_func) +		return; +  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)  		dw_pcie_ep_reset_bar(pci, bar); + +	pcie->ls_epc->msi_capable = ep_func->msi_cap ? true : false; +	pcie->ls_epc->msix_capable = ep_func->msix_cap ? true : false;  }  static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, -				  enum pci_epc_irq_type type, u16 interrupt_num) +				enum pci_epc_irq_type type, u16 interrupt_num)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); @@ -73,21 +81,51 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,  	case PCI_EPC_IRQ_MSI:  		return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);  	case PCI_EPC_IRQ_MSIX: -		return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num); +		return dw_pcie_ep_raise_msix_irq_doorbell(ep, func_no, +							  interrupt_num);  	default:  		dev_err(pci->dev, "UNKNOWN IRQ type\n");  		return -EINVAL;  	}  } -static const struct dw_pcie_ep_ops pcie_ep_ops = { +static unsigned int ls_pcie_ep_func_conf_select(struct dw_pcie_ep *ep, +						u8 func_no) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct ls_pcie_ep *pcie = to_ls_pcie_ep(pci); + +	WARN_ON(func_no && !pcie->drvdata->func_offset); +	return pcie->drvdata->func_offset * func_no; +} + +static const struct dw_pcie_ep_ops ls_pcie_ep_ops = {  	.ep_init = ls_pcie_ep_init,  	.raise_irq = ls_pcie_ep_raise_irq,  	.get_features = ls_pcie_ep_get_features, +	.func_conf_select = ls_pcie_ep_func_conf_select, +}; + +static const struct ls_pcie_ep_drvdata ls1_ep_drvdata = { +	.ops = &ls_pcie_ep_ops, +	.dw_pcie_ops = &dw_ls_pcie_ep_ops, +}; + +static const struct ls_pcie_ep_drvdata ls2_ep_drvdata = { +	.func_offset = 0x20000, +	.ops = &ls_pcie_ep_ops, +	.dw_pcie_ops = &dw_ls_pcie_ep_ops, +}; + +static const struct of_device_id ls_pcie_ep_of_match[] = { +	{ .compatible = "fsl,ls1046a-pcie-ep", .data = &ls1_ep_drvdata }, +	{ .compatible = "fsl,ls1088a-pcie-ep", .data = &ls2_ep_drvdata }, +	{ .compatible = "fsl,ls2088a-pcie-ep", .data = &ls2_ep_drvdata }, +	{ },  };  static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie, -					struct platform_device *pdev) +				 struct platform_device *pdev)  {  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; @@ -96,7 +134,7 @@ static int __init ls_add_pcie_ep(struct ls_pcie_ep *pcie,  	int ret;  	ep = &pci->ep; -	ep->ops = &pcie_ep_ops; +	ep->ops = pcie->drvdata->ops;  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");  	if (!res) @@ -119,6 +157,7 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)  	struct device *dev = &pdev->dev;  	struct dw_pcie *pci;  	struct ls_pcie_ep *pcie; +	struct pci_epc_features *ls_epc;  	struct resource *dbi_base;  	int ret; @@ -130,15 +169,26 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev)  	if (!pci)  		return -ENOMEM; +	ls_epc = devm_kzalloc(dev, sizeof(*ls_epc), GFP_KERNEL); +	if (!ls_epc) +		return -ENOMEM; + +	pcie->drvdata = of_device_get_match_data(dev); + +	pci->dev = dev; +	pci->ops = pcie->drvdata->dw_pcie_ops; + +	ls_epc->bar_fixed_64bit = (1 << BAR_2) | (1 << BAR_4), + +	pcie->pci = pci; +	pcie->ls_epc = ls_epc; +  	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");  	pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base);  	if (IS_ERR(pci->dbi_base))  		return PTR_ERR(pci->dbi_base);  	pci->dbi_base2 = pci->dbi_base + PCIE_DBI2_OFFSET; -	pci->dev = dev; -	pci->ops = &ls_pcie_ep_ops; -	pcie->pci = pci;  	platform_set_drvdata(pdev, pcie); diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index 4f183b96afbb..1913dc2c8fa0 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -17,37 +17,13 @@  #include <linux/resource.h>  #include <linux/types.h>  #include <linux/phy/phy.h> +#include <linux/module.h>  #include "pcie-designware.h"  #define to_meson_pcie(x) dev_get_drvdata((x)->dev) -/* External local bus interface registers */ -#define PLR_OFFSET			0x700 -#define PCIE_PORT_LINK_CTRL_OFF		(PLR_OFFSET + 0x10) -#define FAST_LINK_MODE			BIT(7) -#define LINK_CAPABLE_MASK		GENMASK(21, 16) -#define LINK_CAPABLE_X1			BIT(16) - -#define PCIE_GEN2_CTRL_OFF		(PLR_OFFSET + 0x10c) -#define NUM_OF_LANES_MASK		GENMASK(12, 8) -#define NUM_OF_LANES_X1			BIT(8) -#define DIRECT_SPEED_CHANGE		BIT(17) - -#define TYPE1_HDR_OFFSET		0x0 -#define PCIE_STATUS_COMMAND		(TYPE1_HDR_OFFSET + 0x04) -#define PCI_IO_EN			BIT(0) -#define PCI_MEM_SPACE_EN		BIT(1) -#define PCI_BUS_MASTER_EN		BIT(2) - -#define PCIE_BASE_ADDR0			(TYPE1_HDR_OFFSET + 0x10) -#define PCIE_BASE_ADDR1			(TYPE1_HDR_OFFSET + 0x14) - -#define PCIE_CAP_OFFSET			0x70 -#define PCIE_DEV_CTRL_DEV_STUS		(PCIE_CAP_OFFSET + 0x08) -#define PCIE_CAP_MAX_PAYLOAD_MASK	GENMASK(7, 5)  #define PCIE_CAP_MAX_PAYLOAD_SIZE(x)	((x) << 5) -#define PCIE_CAP_MAX_READ_REQ_MASK	GENMASK(14, 12)  #define PCIE_CAP_MAX_READ_REQ_SIZE(x)	((x) << 12)  /* PCIe specific config registers */ @@ -77,11 +53,6 @@ enum pcie_data_rate {  	PCIE_GEN4  }; -struct meson_pcie_mem_res { -	void __iomem *elbi_base; -	void __iomem *cfg_base; -}; -  struct meson_pcie_clk_res {  	struct clk *clk;  	struct clk *port_clk; @@ -95,7 +66,7 @@ struct meson_pcie_rc_reset {  struct meson_pcie {  	struct dw_pcie pci; -	struct meson_pcie_mem_res mem_res; +	void __iomem *cfg_base;  	struct meson_pcie_clk_res clk_res;  	struct meson_pcie_rc_reset mrst;  	struct gpio_desc *reset_gpio; @@ -134,28 +105,18 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)  	return 0;  } -static void __iomem *meson_pcie_get_mem(struct platform_device *pdev, -					struct meson_pcie *mp, -					const char *id) -{ -	struct device *dev = mp->pci.dev; -	struct resource *res; - -	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, id); - -	return devm_ioremap_resource(dev, res); -} -  static int meson_pcie_get_mems(struct platform_device *pdev,  			       struct meson_pcie *mp)  { -	mp->mem_res.elbi_base = meson_pcie_get_mem(pdev, mp, "elbi"); -	if (IS_ERR(mp->mem_res.elbi_base)) -		return PTR_ERR(mp->mem_res.elbi_base); +	struct dw_pcie *pci = &mp->pci; + +	pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi"); +	if (IS_ERR(pci->dbi_base)) +		return PTR_ERR(pci->dbi_base); -	mp->mem_res.cfg_base = meson_pcie_get_mem(pdev, mp, "cfg"); -	if (IS_ERR(mp->mem_res.cfg_base)) -		return PTR_ERR(mp->mem_res.cfg_base); +	mp->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); +	if (IS_ERR(mp->cfg_base)) +		return PTR_ERR(mp->cfg_base);  	return 0;  } @@ -253,24 +214,14 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)  	return 0;  } -static inline void meson_elb_writel(struct meson_pcie *mp, u32 val, u32 reg) -{ -	writel(val, mp->mem_res.elbi_base + reg); -} - -static inline u32 meson_elb_readl(struct meson_pcie *mp, u32 reg) -{ -	return readl(mp->mem_res.elbi_base + reg); -} -  static inline u32 meson_cfg_readl(struct meson_pcie *mp, u32 reg)  { -	return readl(mp->mem_res.cfg_base + reg); +	return readl(mp->cfg_base + reg);  }  static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)  { -	writel(val, mp->mem_res.cfg_base + reg); +	writel(val, mp->cfg_base + reg);  }  static void meson_pcie_assert_reset(struct meson_pcie *mp) @@ -287,25 +238,6 @@ static void meson_pcie_init_dw(struct meson_pcie *mp)  	val = meson_cfg_readl(mp, PCIE_CFG0);  	val |= APP_LTSSM_ENABLE;  	meson_cfg_writel(mp, val, PCIE_CFG0); - -	val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); -	val &= ~(LINK_CAPABLE_MASK | FAST_LINK_MODE); -	meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); - -	val = meson_elb_readl(mp, PCIE_PORT_LINK_CTRL_OFF); -	val |= LINK_CAPABLE_X1; -	meson_elb_writel(mp, val, PCIE_PORT_LINK_CTRL_OFF); - -	val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); -	val &= ~NUM_OF_LANES_MASK; -	meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF); - -	val = meson_elb_readl(mp, PCIE_GEN2_CTRL_OFF); -	val |= NUM_OF_LANES_X1 | DIRECT_SPEED_CHANGE; -	meson_elb_writel(mp, val, PCIE_GEN2_CTRL_OFF); - -	meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR0); -	meson_elb_writel(mp, 0x0, PCIE_BASE_ADDR1);  }  static int meson_size_to_payload(struct meson_pcie *mp, int size) @@ -327,37 +259,34 @@ static int meson_size_to_payload(struct meson_pcie *mp, int size)  static void meson_set_max_payload(struct meson_pcie *mp, int size)  { +	struct dw_pcie *pci = &mp->pci;  	u32 val; +	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	int max_payload_size = meson_size_to_payload(mp, size); -	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); -	val &= ~PCIE_CAP_MAX_PAYLOAD_MASK; -	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); +	val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL); +	val &= ~PCI_EXP_DEVCTL_PAYLOAD; +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val); -	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); +	val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);  	val |= PCIE_CAP_MAX_PAYLOAD_SIZE(max_payload_size); -	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);  }  static void meson_set_max_rd_req_size(struct meson_pcie *mp, int size)  { +	struct dw_pcie *pci = &mp->pci;  	u32 val; +	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	int max_rd_req_size = meson_size_to_payload(mp, size); -	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); -	val &= ~PCIE_CAP_MAX_READ_REQ_MASK; -	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); +	val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL); +	val &= ~PCI_EXP_DEVCTL_READRQ; +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val); -	val = meson_elb_readl(mp, PCIE_DEV_CTRL_DEV_STUS); +	val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_DEVCTL);  	val |= PCIE_CAP_MAX_READ_REQ_SIZE(max_rd_req_size); -	meson_elb_writel(mp, val, PCIE_DEV_CTRL_DEV_STUS); -} - -static inline void meson_enable_memory_space(struct meson_pcie *mp) -{ -	/* Set the RC Bus Master, Memory Space and I/O Space enables */ -	meson_elb_writel(mp, PCI_IO_EN | PCI_MEM_SPACE_EN | PCI_BUS_MASTER_EN, -			 PCIE_STATUS_COMMAND); +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_DEVCTL, val);  }  static int meson_pcie_establish_link(struct meson_pcie *mp) @@ -370,26 +299,18 @@ static int meson_pcie_establish_link(struct meson_pcie *mp)  	meson_set_max_rd_req_size(mp, MAX_READ_REQ_SIZE);  	dw_pcie_setup_rc(pp); -	meson_enable_memory_space(mp);  	meson_pcie_assert_reset(mp);  	return dw_pcie_wait_for_link(pci);  } -static void meson_pcie_enable_interrupts(struct meson_pcie *mp) +static int meson_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, +				  int where, int size, u32 *val)  { -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(&mp->pci.pp); -} - -static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, -				  u32 *val) -{ -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	int ret; -	ret = dw_pcie_read(pci->dbi_base + where, size, val); +	ret = pci_generic_config_read(bus, devfn, where, size, val);  	if (ret != PCIBIOS_SUCCESSFUL)  		return ret; @@ -410,13 +331,11 @@ static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,  	return PCIBIOS_SUCCESSFUL;  } -static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where, -				  int size, u32 val) -{ -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - -	return dw_pcie_write(pci->dbi_base + where, size, val); -} +static struct pci_ops meson_pci_ops = { +	.map_bus = dw_pcie_own_conf_map_bus, +	.read = meson_pcie_rd_own_conf, +	.write = pci_generic_config_write, +};  static int meson_pcie_link_up(struct dw_pcie *pci)  { @@ -463,18 +382,18 @@ static int meson_pcie_host_init(struct pcie_port *pp)  	struct meson_pcie *mp = to_meson_pcie(pci);  	int ret; +	pp->bridge->ops = &meson_pci_ops; +  	ret = meson_pcie_establish_link(mp);  	if (ret)  		return ret; -	meson_pcie_enable_interrupts(mp); +	dw_pcie_msi_init(pp);  	return 0;  }  static const struct dw_pcie_host_ops meson_pcie_host_ops = { -	.rd_own_conf = meson_pcie_rd_own_conf, -	.wr_own_conf = meson_pcie_wr_own_conf,  	.host_init = meson_pcie_host_init,  }; @@ -493,7 +412,6 @@ static int meson_add_pcie_port(struct meson_pcie *mp,  	}  	pp->ops = &meson_pcie_host_ops; -	pci->dbi_base = mp->mem_res.elbi_base;  	ret = dw_pcie_host_init(pp);  	if (ret) { @@ -522,6 +440,7 @@ static int meson_pcie_probe(struct platform_device *pdev)  	pci = &mp->pci;  	pci->dev = dev;  	pci->ops = &dw_pcie_ops; +	pci->num_lanes = 1;  	mp->phy = devm_phy_get(dev, "pcie");  	if (IS_ERR(mp->phy)) { @@ -589,6 +508,7 @@ static const struct of_device_id meson_pcie_of_match[] = {  	},  	{},  }; +MODULE_DEVICE_TABLE(of, meson_pcie_of_match);  static struct platform_driver meson_pcie_driver = {  	.probe = meson_pcie_probe, @@ -598,4 +518,8 @@ static struct platform_driver meson_pcie_driver = {  	},  }; -builtin_platform_driver(meson_pcie_driver); +module_platform_driver(meson_pcie_driver); + +MODULE_AUTHOR("Yue Wang <yue.wang@amlogic.com>"); +MODULE_DESCRIPTION("Amlogic PCIe Controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index d57d4ee15848..f973fbca90cf 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -217,14 +217,15 @@ static inline void al_pcie_target_bus_set(struct al_pcie *pcie,  				  reg);  } -static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie, -					   unsigned int busnr, -					   unsigned int devfn) +static void __iomem *al_pcie_conf_addr_map_bus(struct pci_bus *bus, +					       unsigned int devfn, int where)  { +	struct pcie_port *pp = bus->sysdata; +	struct al_pcie *pcie = to_al_pcie(to_dw_pcie_from_pp(pp)); +	unsigned int busnr = bus->number;  	struct al_pcie_target_bus_cfg *target_bus_cfg = &pcie->target_bus_cfg;  	unsigned int busnr_ecam = busnr & target_bus_cfg->ecam_mask;  	unsigned int busnr_reg = busnr & target_bus_cfg->reg_mask; -	struct pcie_port *pp = &pcie->pci->pp;  	void __iomem *pci_base_addr;  	pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base + @@ -240,52 +241,14 @@ static void __iomem *al_pcie_conf_addr_map(struct al_pcie *pcie,  				       target_bus_cfg->reg_mask);  	} -	return pci_base_addr; -} - -static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				 unsigned int devfn, int where, int size, -				 u32 *val) -{ -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct al_pcie *pcie = to_al_pcie(pci); -	unsigned int busnr = bus->number; -	void __iomem *pci_addr; -	int rc; - -	pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn); - -	rc = dw_pcie_read(pci_addr + where, size, val); - -	dev_dbg(pci->dev, "%d-byte config read from %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n", -		size, pci_domain_nr(bus), bus->number, -		PCI_SLOT(devfn), PCI_FUNC(devfn), where, -		(pci_addr + where), *val); - -	return rc; +	return pci_base_addr + where;  } -static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				 unsigned int devfn, int where, int size, -				 u32 val) -{ -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct al_pcie *pcie = to_al_pcie(pci); -	unsigned int busnr = bus->number; -	void __iomem *pci_addr; -	int rc; - -	pci_addr = al_pcie_conf_addr_map(pcie, busnr, devfn); - -	rc = dw_pcie_write(pci_addr + where, size, val); - -	dev_dbg(pci->dev, "%d-byte config write to %04x:%02x:%02x.%d offset 0x%x (pci_addr: 0x%px) - val:0x%x\n", -		size, pci_domain_nr(bus), bus->number, -		PCI_SLOT(devfn), PCI_FUNC(devfn), where, -		(pci_addr + where), val); - -	return rc; -} +static struct pci_ops al_child_pci_ops = { +	.map_bus = al_pcie_conf_addr_map_bus, +	.read = pci_generic_config_read, +	.write = pci_generic_config_write, +};  static void al_pcie_config_prepare(struct al_pcie *pcie)  { @@ -297,6 +260,7 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)  	u8 secondary_bus;  	u32 cfg_control;  	u32 reg; +	struct resource *bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;  	target_bus_cfg = &pcie->target_bus_cfg; @@ -310,13 +274,13 @@ static void al_pcie_config_prepare(struct al_pcie *pcie)  	target_bus_cfg->ecam_mask = ecam_bus_mask;  	/* This portion is taken from the cfg_target_bus reg */  	target_bus_cfg->reg_mask = ~target_bus_cfg->ecam_mask; -	target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask; +	target_bus_cfg->reg_val = bus->start & target_bus_cfg->reg_mask;  	al_pcie_target_bus_set(pcie, target_bus_cfg->reg_val,  			       target_bus_cfg->reg_mask); -	secondary_bus = pp->busn->start + 1; -	subordinate_bus = pp->busn->end; +	secondary_bus = bus->start + 1; +	subordinate_bus = bus->end;  	/* Set the valid values of secondary and subordinate buses */  	cfg_control_offset = AXI_BASE_OFFSET + pcie->reg_offsets.ob_ctrl + @@ -339,6 +303,8 @@ static int al_pcie_host_init(struct pcie_port *pp)  	struct al_pcie *pcie = to_al_pcie(pci);  	int rc; +	pp->bridge->child_ops = &al_child_pci_ops; +  	rc = al_pcie_rev_id_get(pcie, &pcie->controller_rev_id);  	if (rc)  		return rc; @@ -353,8 +319,6 @@ static int al_pcie_host_init(struct pcie_port *pp)  }  static const struct dw_pcie_host_ops al_pcie_host_ops = { -	.rd_other_conf = al_pcie_rd_other_conf, -	.wr_other_conf = al_pcie_wr_other_conf,  	.host_init = al_pcie_host_init,  }; diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 97d50bb50f06..929448e9e0bc 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -44,13 +44,6 @@ struct artpec_pcie_of_data {  static const struct of_device_id artpec6_pcie_of_match[]; -/* PCIe Port Logic registers (memory-mapped) */ -#define PL_OFFSET			0x700 - -#define ACK_F_ASPM_CTRL_OFF		(PL_OFFSET + 0xc) -#define ACK_N_FTS_MASK			GENMASK(15, 8) -#define ACK_N_FTS(x)			(((x) << 8) & ACK_N_FTS_MASK) -  /* ARTPEC-6 specific registers */  #define PCIECFG				0x18  #define  PCIECFG_DBG_OEN		BIT(24) @@ -289,30 +282,6 @@ static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)  	}  } -static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie) -{ -	struct dw_pcie *pci = artpec6_pcie->pci; -	u32 val; - -	if (artpec6_pcie->variant != ARTPEC7) -		return; - -	/* -	 * Increase the N_FTS (Number of Fast Training Sequences) -	 * to be transmitted when transitioning from L0s to L0. -	 */ -	val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF); -	val &= ~ACK_N_FTS_MASK; -	val |= ACK_N_FTS(180); -	dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val); - -	/* -	 * Set the Number of Fast Training Sequences that the core -	 * advertises as its N_FTS during Gen2 or Gen3 link training. -	 */ -	dw_pcie_link_set_n_fts(pci, 180); -} -  static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)  {  	u32 val; @@ -346,29 +315,23 @@ static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)  	usleep_range(100, 200);  } -static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) -{ -	struct dw_pcie *pci = artpec6_pcie->pci; -	struct pcie_port *pp = &pci->pp; - -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); -} -  static int artpec6_pcie_host_init(struct pcie_port *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); +	if (artpec6_pcie->variant == ARTPEC7) { +		pci->n_fts[0] = 180; +		pci->n_fts[1] = 180; +	}  	artpec6_pcie_assert_core_reset(artpec6_pcie);  	artpec6_pcie_init_phy(artpec6_pcie);  	artpec6_pcie_deassert_core_reset(artpec6_pcie);  	artpec6_pcie_wait_for_phy(artpec6_pcie); -	artpec6_pcie_set_nfts(artpec6_pcie);  	dw_pcie_setup_rc(pp);  	artpec6_pcie_establish_link(pci);  	dw_pcie_wait_for_link(pci); -	artpec6_pcie_enable_interrupts(artpec6_pcie); +	dw_pcie_msi_init(pp);  	return 0;  } @@ -412,7 +375,6 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)  	artpec6_pcie_init_phy(artpec6_pcie);  	artpec6_pcie_deassert_core_reset(artpec6_pcie);  	artpec6_pcie_wait_for_phy(artpec6_pcie); -	artpec6_pcie_set_nfts(artpec6_pcie);  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)  		dw_pcie_ep_reset_bar(pci, bar); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 305bfec2424d..ad7da4ea43a5 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -12,6 +12,8 @@  #include <linux/pci-epc.h>  #include <linux/pci-epf.h> +#include "../../pci.h" +  void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)  {  	struct pci_epc *epc = ep->epc; @@ -28,12 +30,39 @@ void dw_pcie_ep_init_notify(struct dw_pcie_ep *ep)  }  EXPORT_SYMBOL_GPL(dw_pcie_ep_init_notify); -static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, -				   int flags) +struct dw_pcie_ep_func * +dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) +{ +	struct dw_pcie_ep_func *ep_func; + +	list_for_each_entry(ep_func, &ep->func_list, list) { +		if (ep_func->func_no == func_no) +			return ep_func; +	} + +	return NULL; +} + +static unsigned int dw_pcie_ep_func_select(struct dw_pcie_ep *ep, u8 func_no) +{ +	unsigned int func_offset = 0; + +	if (ep->ops->func_conf_select) +		func_offset = ep->ops->func_conf_select(ep, func_no); + +	return func_offset; +} + +static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, u8 func_no, +				   enum pci_barno bar, int flags)  {  	u32 reg; +	unsigned int func_offset = 0; +	struct dw_pcie_ep *ep = &pci->ep; -	reg = PCI_BASE_ADDRESS_0 + (4 * bar); +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = func_offset + PCI_BASE_ADDRESS_0 + (4 * bar);  	dw_pcie_dbi_ro_wr_en(pci);  	dw_pcie_writel_dbi2(pci, reg, 0x0);  	dw_pcie_writel_dbi(pci, reg, 0x0); @@ -46,7 +75,53 @@ static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar,  void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)  { -	__dw_pcie_ep_reset_bar(pci, bar, 0); +	u8 func_no, funcs; + +	funcs = pci->ep.epc->max_functions; + +	for (func_no = 0; func_no < funcs; func_no++) +		__dw_pcie_ep_reset_bar(pci, func_no, bar, 0); +} + +static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, +		u8 cap_ptr, u8 cap) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	unsigned int func_offset = 0; +	u8 cap_id, next_cap_ptr; +	u16 reg; + +	if (!cap_ptr) +		return 0; + +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = dw_pcie_readw_dbi(pci, func_offset + cap_ptr); +	cap_id = (reg & 0x00ff); + +	if (cap_id > PCI_CAP_ID_MAX) +		return 0; + +	if (cap_id == cap) +		return cap_ptr; + +	next_cap_ptr = (reg & 0xff00) >> 8; +	return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap); +} + +static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	unsigned int func_offset = 0; +	u8 next_cap_ptr; +	u16 reg; + +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = dw_pcie_readw_dbi(pci, func_offset + PCI_CAPABILITY_LIST); +	next_cap_ptr = (reg & 0x00ff); + +	return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);  }  static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, @@ -54,28 +129,31 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,  {  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	unsigned int func_offset = 0; + +	func_offset = dw_pcie_ep_func_select(ep, func_no);  	dw_pcie_dbi_ro_wr_en(pci); -	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); -	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); -	dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); -	dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); -	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, +	dw_pcie_writew_dbi(pci, func_offset + PCI_VENDOR_ID, hdr->vendorid); +	dw_pcie_writew_dbi(pci, func_offset + PCI_DEVICE_ID, hdr->deviceid); +	dw_pcie_writeb_dbi(pci, func_offset + PCI_REVISION_ID, hdr->revid); +	dw_pcie_writeb_dbi(pci, func_offset + PCI_CLASS_PROG, hdr->progif_code); +	dw_pcie_writew_dbi(pci, func_offset + PCI_CLASS_DEVICE,  			   hdr->subclass_code | hdr->baseclass_code << 8); -	dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, +	dw_pcie_writeb_dbi(pci, func_offset + PCI_CACHE_LINE_SIZE,  			   hdr->cache_line_size); -	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, +	dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_VENDOR_ID,  			   hdr->subsys_vendor_id); -	dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); -	dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, +	dw_pcie_writew_dbi(pci, func_offset + PCI_SUBSYSTEM_ID, hdr->subsys_id); +	dw_pcie_writeb_dbi(pci, func_offset + PCI_INTERRUPT_PIN,  			   hdr->interrupt_pin);  	dw_pcie_dbi_ro_wr_dis(pci);  	return 0;  } -static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, -				  dma_addr_t cpu_addr, +static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, u8 func_no, +				  enum pci_barno bar, dma_addr_t cpu_addr,  				  enum dw_pcie_as_type as_type)  {  	int ret; @@ -88,7 +166,7 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,  		return -EINVAL;  	} -	ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, +	ret = dw_pcie_prog_inbound_atu(pci, func_no, free_win, bar, cpu_addr,  				       as_type);  	if (ret < 0) {  		dev_err(pci->dev, "Failed to program IB window\n"); @@ -101,7 +179,8 @@ static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar,  	return 0;  } -static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, +static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, u8 func_no, +				   phys_addr_t phys_addr,  				   u64 pci_addr, size_t size)  {  	u32 free_win; @@ -113,8 +192,8 @@ static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr,  		return -EINVAL;  	} -	dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, -				  phys_addr, pci_addr, size); +	dw_pcie_prog_ep_outbound_atu(pci, func_no, free_win, PCIE_ATU_TYPE_MEM, +				     phys_addr, pci_addr, size);  	set_bit(free_win, ep->ob_window_map);  	ep->outbound_addr[free_win] = phys_addr; @@ -130,7 +209,7 @@ static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no,  	enum pci_barno bar = epf_bar->barno;  	u32 atu_index = ep->bar_to_atu[bar]; -	__dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); +	__dw_pcie_ep_reset_bar(pci, func_no, bar, epf_bar->flags);  	dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND);  	clear_bit(atu_index, ep->ib_window_map); @@ -147,14 +226,20 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,  	size_t size = epf_bar->size;  	int flags = epf_bar->flags;  	enum dw_pcie_as_type as_type; -	u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); +	u32 reg; +	unsigned int func_offset = 0; + +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = PCI_BASE_ADDRESS_0 + (4 * bar) + func_offset;  	if (!(flags & PCI_BASE_ADDRESS_SPACE))  		as_type = DW_PCIE_AS_MEM;  	else  		as_type = DW_PCIE_AS_IO; -	ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); +	ret = dw_pcie_ep_inbound_atu(ep, func_no, bar, +				     epf_bar->phys_addr, as_type);  	if (ret)  		return ret; @@ -213,7 +298,7 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); -	ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); +	ret = dw_pcie_ep_outbound_atu(ep, func_no, addr, pci_addr, size);  	if (ret) {  		dev_err(pci->dev, "Failed to enable address\n");  		return ret; @@ -227,11 +312,16 @@ static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	u32 val, reg; +	unsigned int func_offset = 0; +	struct dw_pcie_ep_func *ep_func; -	if (!ep->msi_cap) +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msi_cap)  		return -EINVAL; -	reg = ep->msi_cap + PCI_MSI_FLAGS; +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;  	val = dw_pcie_readw_dbi(pci, reg);  	if (!(val & PCI_MSI_FLAGS_ENABLE))  		return -EINVAL; @@ -246,11 +336,16 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	u32 val, reg; +	unsigned int func_offset = 0; +	struct dw_pcie_ep_func *ep_func; -	if (!ep->msi_cap) +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msi_cap)  		return -EINVAL; -	reg = ep->msi_cap + PCI_MSI_FLAGS; +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;  	val = dw_pcie_readw_dbi(pci, reg);  	val &= ~PCI_MSI_FLAGS_QMASK;  	val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK; @@ -266,11 +361,16 @@ static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	u32 val, reg; +	unsigned int func_offset = 0; +	struct dw_pcie_ep_func *ep_func; -	if (!ep->msix_cap) +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msix_cap)  		return -EINVAL; -	reg = ep->msix_cap + PCI_MSIX_FLAGS; +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;  	val = dw_pcie_readw_dbi(pci, reg);  	if (!(val & PCI_MSIX_FLAGS_ENABLE))  		return -EINVAL; @@ -286,23 +386,28 @@ static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts,  	struct dw_pcie_ep *ep = epc_get_drvdata(epc);  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	u32 val, reg; +	unsigned int func_offset = 0; +	struct dw_pcie_ep_func *ep_func; -	if (!ep->msix_cap) +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msix_cap)  		return -EINVAL;  	dw_pcie_dbi_ro_wr_en(pci); -	reg = ep->msix_cap + PCI_MSIX_FLAGS; +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = ep_func->msix_cap + func_offset + PCI_MSIX_FLAGS;  	val = dw_pcie_readw_dbi(pci, reg);  	val &= ~PCI_MSIX_FLAGS_QSIZE;  	val |= interrupts;  	dw_pcie_writew_dbi(pci, reg, val); -	reg = ep->msix_cap + PCI_MSIX_TABLE; +	reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;  	val = offset | bir;  	dw_pcie_writel_dbi(pci, reg, val); -	reg = ep->msix_cap + PCI_MSIX_PBA; +	reg = ep_func->msix_cap + func_offset + PCI_MSIX_PBA;  	val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir;  	dw_pcie_writel_dbi(pci, reg, val); @@ -385,31 +490,36 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,  			     u8 interrupt_num)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct dw_pcie_ep_func *ep_func;  	struct pci_epc *epc = ep->epc;  	unsigned int aligned_offset; +	unsigned int func_offset = 0;  	u16 msg_ctrl, msg_data;  	u32 msg_addr_lower, msg_addr_upper, reg;  	u64 msg_addr;  	bool has_upper;  	int ret; -	if (!ep->msi_cap) +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msi_cap)  		return -EINVAL; +	func_offset = dw_pcie_ep_func_select(ep, func_no); +  	/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ -	reg = ep->msi_cap + PCI_MSI_FLAGS; +	reg = ep_func->msi_cap + func_offset + PCI_MSI_FLAGS;  	msg_ctrl = dw_pcie_readw_dbi(pci, reg);  	has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); -	reg = ep->msi_cap + PCI_MSI_ADDRESS_LO; +	reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_LO;  	msg_addr_lower = dw_pcie_readl_dbi(pci, reg);  	if (has_upper) { -		reg = ep->msi_cap + PCI_MSI_ADDRESS_HI; +		reg = ep_func->msi_cap + func_offset + PCI_MSI_ADDRESS_HI;  		msg_addr_upper = dw_pcie_readl_dbi(pci, reg); -		reg = ep->msi_cap + PCI_MSI_DATA_64; +		reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_64;  		msg_data = dw_pcie_readw_dbi(pci, reg);  	} else {  		msg_addr_upper = 0; -		reg = ep->msi_cap + PCI_MSI_DATA_32; +		reg = ep_func->msi_cap + func_offset + PCI_MSI_DATA_32;  		msg_data = dw_pcie_readw_dbi(pci, reg);  	}  	aligned_offset = msg_addr_lower & (epc->mem->window.page_size - 1); @@ -427,12 +537,33 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,  	return 0;  } +int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, +				       u16 interrupt_num) +{ +	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct dw_pcie_ep_func *ep_func; +	u32 msg_data; + +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msix_cap) +		return -EINVAL; + +	msg_data = (func_no << PCIE_MSIX_DOORBELL_PF_SHIFT) | +		   (interrupt_num - 1); + +	dw_pcie_writel_dbi(pci, PCIE_MSIX_DOORBELL, msg_data); + +	return 0; +} +  int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, -			     u16 interrupt_num) +			      u16 interrupt_num)  {  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep); +	struct dw_pcie_ep_func *ep_func;  	struct pci_epf_msix_tbl *msix_tbl;  	struct pci_epc *epc = ep->epc; +	unsigned int func_offset = 0;  	u32 reg, msg_data, vec_ctrl;  	unsigned int aligned_offset;  	u32 tbl_offset; @@ -440,7 +571,13 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,  	int ret;  	u8 bir; -	reg = ep->msix_cap + PCI_MSIX_TABLE; +	ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no); +	if (!ep_func || !ep_func->msix_cap) +		return -EINVAL; + +	func_offset = dw_pcie_ep_func_select(ep, func_no); + +	reg = ep_func->msix_cap + func_offset + PCI_MSIX_TABLE;  	tbl_offset = dw_pcie_readl_dbi(pci, reg);  	bir = (tbl_offset & PCI_MSIX_TABLE_BIR);  	tbl_offset &= PCI_MSIX_TABLE_OFFSET; @@ -505,7 +642,8 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)  	u32 reg;  	int i; -	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE); +	hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) & +		   PCI_HEADER_TYPE_MASK;  	if (hdr_type != PCI_HEADER_TYPE_NORMAL) {  		dev_err(pci->dev,  			"PCIe controller is not set to EP mode (hdr_type:0x%x)!\n", @@ -513,23 +651,21 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)  		return -EIO;  	} -	ep->msi_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); +	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); -	ep->msix_cap = dw_pcie_find_capability(pci, PCI_CAP_ID_MSIX); +	dw_pcie_dbi_ro_wr_en(pci); -	offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);  	if (offset) {  		reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL);  		nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>  			PCI_REBAR_CTRL_NBAR_SHIFT; -		dw_pcie_dbi_ro_wr_en(pci);  		for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)  			dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); -		dw_pcie_dbi_ro_wr_dis(pci);  	}  	dw_pcie_setup(pci); +	dw_pcie_dbi_ro_wr_dis(pci);  	return 0;  } @@ -539,11 +675,15 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  {  	int ret;  	void *addr; +	u8 func_no;  	struct pci_epc *epc;  	struct dw_pcie *pci = to_dw_pcie_from_ep(ep);  	struct device *dev = pci->dev;  	struct device_node *np = dev->of_node;  	const struct pci_epc_features *epc_features; +	struct dw_pcie_ep_func *ep_func; + +	INIT_LIST_HEAD(&ep->func_list);  	if (!pci->dbi_base || !pci->dbi_base2) {  		dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); @@ -590,6 +730,9 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  		return -ENOMEM;  	ep->outbound_addr = addr; +	if (pci->link_gen < 1) +		pci->link_gen = of_pci_get_max_link_speed(np); +  	epc = devm_pci_epc_create(dev, &epc_ops);  	if (IS_ERR(epc)) {  		dev_err(dev, "Failed to create epc device\n"); @@ -599,13 +742,27 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)  	ep->epc = epc;  	epc_set_drvdata(epc, ep); -	if (ep->ops->ep_init) -		ep->ops->ep_init(ep); -  	ret = of_property_read_u8(np, "max-functions", &epc->max_functions);  	if (ret < 0)  		epc->max_functions = 1; +	for (func_no = 0; func_no < epc->max_functions; func_no++) { +		ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL); +		if (!ep_func) +			return -ENOMEM; + +		ep_func->func_no = func_no; +		ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no, +							      PCI_CAP_ID_MSI); +		ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no, +							       PCI_CAP_ID_MSIX); + +		list_add_tail(&ep_func->list, &ep->func_list); +	} + +	if (ep->ops->ep_init) +		ep->ops->ep_init(ep); +  	ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,  			       ep->page_size);  	if (ret < 0) { diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 9dafecba347f..674f32db85ca 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -20,30 +20,7 @@  #include "pcie-designware.h"  static struct pci_ops dw_pcie_ops; - -static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, -			       u32 *val) -{ -	struct dw_pcie *pci; - -	if (pp->ops->rd_own_conf) -		return pp->ops->rd_own_conf(pp, where, size, val); - -	pci = to_dw_pcie_from_pp(pp); -	return dw_pcie_read(pci->dbi_base + where, size, val); -} - -static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, -			       u32 val) -{ -	struct dw_pcie *pci; - -	if (pp->ops->wr_own_conf) -		return pp->ops->wr_own_conf(pp, where, size, val); - -	pci = to_dw_pcie_from_pp(pp); -	return dw_pcie_write(pci->dbi_base + where, size, val); -} +static struct pci_ops dw_child_pcie_ops;  static void dw_msi_ack_irq(struct irq_data *d)  { @@ -82,13 +59,13 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)  	unsigned long val;  	u32 status, num_ctrls;  	irqreturn_t ret = IRQ_NONE; +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;  	for (i = 0; i < num_ctrls; i++) { -		dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + -					(i * MSI_REG_CTRL_BLOCK_SIZE), -				    4, &status); +		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + +					   (i * MSI_REG_CTRL_BLOCK_SIZE));  		if (!status)  			continue; @@ -148,6 +125,7 @@ static int dw_pci_msi_set_affinity(struct irq_data *d,  static void dw_pci_bottom_mask(struct irq_data *d)  {  	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned int res, bit, ctrl;  	unsigned long flags; @@ -158,8 +136,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)  	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;  	pp->irq_mask[ctrl] |= BIT(bit); -	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, -			    pp->irq_mask[ctrl]); +	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);  	raw_spin_unlock_irqrestore(&pp->lock, flags);  } @@ -167,6 +144,7 @@ static void dw_pci_bottom_mask(struct irq_data *d)  static void dw_pci_bottom_unmask(struct irq_data *d)  {  	struct pcie_port *pp = irq_data_get_irq_chip_data(d); +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned int res, bit, ctrl;  	unsigned long flags; @@ -177,8 +155,7 @@ static void dw_pci_bottom_unmask(struct irq_data *d)  	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;  	pp->irq_mask[ctrl] &= ~BIT(bit); -	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, -			    pp->irq_mask[ctrl]); +	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);  	raw_spin_unlock_irqrestore(&pp->lock, flags);  } @@ -186,13 +163,14 @@ static void dw_pci_bottom_unmask(struct irq_data *d)  static void dw_pci_bottom_ack(struct irq_data *d)  {  	struct pcie_port *pp  = irq_data_get_irq_chip_data(d); +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);  	unsigned int res, bit, ctrl;  	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;  	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;  	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; -	dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); +	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, BIT(bit));  }  static struct irq_chip dw_pci_msi_bottom_irq_chip = { @@ -288,32 +266,26 @@ void dw_pcie_free_msi(struct pcie_port *pp)  	irq_domain_remove(pp->msi_domain);  	irq_domain_remove(pp->irq_domain); -	if (pp->msi_page) -		__free_page(pp->msi_page); +	if (pp->msi_data) { +		struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +		struct device *dev = pci->dev; + +		dma_unmap_single_attrs(dev, pp->msi_data, sizeof(pp->msi_msg), +				       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); +	}  }  void dw_pcie_msi_init(struct pcie_port *pp)  {  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct device *dev = pci->dev; -	u64 msi_target; +	u64 msi_target = (u64)pp->msi_data; -	pp->msi_page = alloc_page(GFP_KERNEL); -	pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, -				    DMA_FROM_DEVICE); -	if (dma_mapping_error(dev, pp->msi_data)) { -		dev_err(dev, "Failed to map MSI data\n"); -		__free_page(pp->msi_page); -		pp->msi_page = NULL; +	if (!IS_ENABLED(CONFIG_PCI_MSI))  		return; -	} -	msi_target = (u64)pp->msi_data;  	/* Program the msi_data */ -	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, -			    lower_32_bits(msi_target)); -	dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, -			    upper_32_bits(msi_target)); +	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target)); +	dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));  }  EXPORT_SYMBOL_GPL(dw_pcie_msi_init); @@ -324,20 +296,16 @@ int dw_pcie_host_init(struct pcie_port *pp)  	struct device_node *np = dev->of_node;  	struct platform_device *pdev = to_platform_device(dev);  	struct resource_entry *win; -	struct pci_bus *child;  	struct pci_host_bridge *bridge;  	struct resource *cfg_res; -	u32 hdr_type;  	int ret;  	raw_spin_lock_init(&pci->pp.lock);  	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");  	if (cfg_res) { -		pp->cfg0_size = resource_size(cfg_res) >> 1; -		pp->cfg1_size = resource_size(cfg_res) >> 1; +		pp->cfg0_size = resource_size(cfg_res);  		pp->cfg0_base = cfg_res->start; -		pp->cfg1_base = cfg_res->start + pp->cfg0_size;  	} else if (!pp->va_cfg0_base) {  		dev_err(dev, "Missing *config* reg space\n");  	} @@ -346,47 +314,33 @@ int dw_pcie_host_init(struct pcie_port *pp)  	if (!bridge)  		return -ENOMEM; +	pp->bridge = bridge; +  	/* Get the I/O and memory ranges from DT */  	resource_list_for_each_entry(win, &bridge->windows) {  		switch (resource_type(win->res)) {  		case IORESOURCE_IO: -			pp->io = win->res; -			pp->io->name = "I/O"; -			pp->io_size = resource_size(pp->io); -			pp->io_bus_addr = pp->io->start - win->offset; -			pp->io_base = pci_pio_to_address(pp->io->start); -			break; -		case IORESOURCE_MEM: -			pp->mem = win->res; -			pp->mem->name = "MEM"; -			pp->mem_size = resource_size(pp->mem); -			pp->mem_bus_addr = pp->mem->start - win->offset; +			pp->io_size = resource_size(win->res); +			pp->io_bus_addr = win->res->start - win->offset; +			pp->io_base = pci_pio_to_address(win->res->start);  			break;  		case 0: -			pp->cfg = win->res; -			pp->cfg0_size = resource_size(pp->cfg) >> 1; -			pp->cfg1_size = resource_size(pp->cfg) >> 1; -			pp->cfg0_base = pp->cfg->start; -			pp->cfg1_base = pp->cfg->start + pp->cfg0_size; -			break; -		case IORESOURCE_BUS: -			pp->busn = win->res; +			dev_err(dev, "Missing *config* reg space\n"); +			pp->cfg0_size = resource_size(win->res); +			pp->cfg0_base = win->res->start; +			if (!pci->dbi_base) { +				pci->dbi_base = devm_pci_remap_cfgspace(dev, +								pp->cfg0_base, +								pp->cfg0_size); +				if (!pci->dbi_base) { +					dev_err(dev, "Error with ioremap\n"); +					return -ENOMEM; +				} +			}  			break;  		}  	} -	if (!pci->dbi_base) { -		pci->dbi_base = devm_pci_remap_cfgspace(dev, -						pp->cfg->start, -						resource_size(pp->cfg)); -		if (!pci->dbi_base) { -			dev_err(dev, "Error with ioremap\n"); -			return -ENOMEM; -		} -	} - -	pp->mem_base = pp->mem->start; -  	if (!pp->va_cfg0_base) {  		pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,  					pp->cfg0_base, pp->cfg0_size); @@ -396,20 +350,13 @@ int dw_pcie_host_init(struct pcie_port *pp)  		}  	} -	if (!pp->va_cfg1_base) { -		pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, -						pp->cfg1_base, -						pp->cfg1_size); -		if (!pp->va_cfg1_base) { -			dev_err(dev, "Error with ioremap\n"); -			return -ENOMEM; -		} -	} -  	ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);  	if (ret)  		pci->num_viewport = 2; +	if (pci->link_gen < 1) +		pci->link_gen = of_pci_get_max_link_speed(np); +  	if (pci_msi_enabled()) {  		/*  		 * If a specific SoC driver needs to change the @@ -440,6 +387,16 @@ int dw_pcie_host_init(struct pcie_port *pp)  				irq_set_chained_handler_and_data(pp->msi_irq,  							    dw_chained_msi_isr,  							    pp); + +			pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg, +						      sizeof(pp->msi_msg), +						      DMA_FROM_DEVICE, +						      DMA_ATTR_SKIP_CPU_SYNC); +			if (dma_mapping_error(pci->dev, pp->msi_data)) { +				dev_err(pci->dev, "Failed to map MSI data\n"); +				pp->msi_data = 0; +				goto err_free_msi; +			}  		} else {  			ret = pp->ops->msi_host_init(pp);  			if (ret < 0) @@ -447,47 +404,21 @@ int dw_pcie_host_init(struct pcie_port *pp)  		}  	} +	/* Set default bus ops */ +	bridge->ops = &dw_pcie_ops; +	bridge->child_ops = &dw_child_pcie_ops; +  	if (pp->ops->host_init) {  		ret = pp->ops->host_init(pp);  		if (ret)  			goto err_free_msi;  	} -	ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type); -	if (ret != PCIBIOS_SUCCESSFUL) { -		dev_err(pci->dev, "Failed reading PCI_HEADER_TYPE cfg space reg (ret: 0x%x)\n", -			ret); -		ret = pcibios_err_to_errno(ret); -		goto err_free_msi; -	} -	if (hdr_type != PCI_HEADER_TYPE_BRIDGE) { -		dev_err(pci->dev, -			"PCIe controller is not set to bridge type (hdr_type: 0x%x)!\n", -			hdr_type); -		ret = -EIO; -		goto err_free_msi; -	} -  	bridge->sysdata = pp; -	bridge->ops = &dw_pcie_ops; - -	ret = pci_scan_root_bus_bridge(bridge); -	if (ret) -		goto err_free_msi; - -	pp->root_bus = bridge->bus; - -	if (pp->ops->scan_bus) -		pp->ops->scan_bus(pp); -	pci_bus_size_bridges(pp->root_bus); -	pci_bus_assign_resources(pp->root_bus); - -	list_for_each_entry(child, &pp->root_bus->children, node) -		pcie_bus_configure_settings(child); - -	pci_bus_add_devices(pp->root_bus); -	return 0; +	ret = pci_host_probe(bridge); +	if (!ret) +		return 0;  err_free_msi:  	if (pci_msi_enabled() && !pp->ops->msi_host_init) @@ -498,125 +429,104 @@ EXPORT_SYMBOL_GPL(dw_pcie_host_init);  void dw_pcie_host_deinit(struct pcie_port *pp)  { -	pci_stop_root_bus(pp->root_bus); -	pci_remove_root_bus(pp->root_bus); +	pci_stop_root_bus(pp->bridge->bus); +	pci_remove_root_bus(pp->bridge->bus);  	if (pci_msi_enabled() && !pp->ops->msi_host_init)  		dw_pcie_free_msi(pp);  }  EXPORT_SYMBOL_GPL(dw_pcie_host_deinit); -static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				     u32 devfn, int where, int size, u32 *val, -				     bool write) +static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus, +						unsigned int devfn, int where)  { -	int ret, type; -	u32 busdev, cfg_size; -	u64 cpu_addr; -	void __iomem *va_cfg_base; +	int type; +	u32 busdev; +	struct pcie_port *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); +	/* +	 * Checking whether the link is up here is a last line of defense +	 * against platforms that forward errors on the system bus as +	 * SError upon PCI configuration transactions issued when the link +	 * is down. This check is racy by definition and does not stop +	 * the system from triggering an SError if the link goes down +	 * after this check is performed. +	 */ +	if (!dw_pcie_link_up(pci)) +		return NULL; +  	busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |  		 PCIE_ATU_FUNC(PCI_FUNC(devfn)); -	if (pci_is_root_bus(bus->parent)) { +	if (pci_is_root_bus(bus->parent))  		type = PCIE_ATU_TYPE_CFG0; -		cpu_addr = pp->cfg0_base; -		cfg_size = pp->cfg0_size; -		va_cfg_base = pp->va_cfg0_base; -	} else { -		type = PCIE_ATU_TYPE_CFG1; -		cpu_addr = pp->cfg1_base; -		cfg_size = pp->cfg1_size; -		va_cfg_base = pp->va_cfg1_base; -	} - -	dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, -				  type, cpu_addr, -				  busdev, cfg_size); -	if (write) -		ret = dw_pcie_write(va_cfg_base + where, size, *val);  	else -		ret = dw_pcie_read(va_cfg_base + where, size, val); - -	if (pci->num_viewport <= 2) -		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, -					  PCIE_ATU_TYPE_IO, pp->io_base, -					  pp->io_bus_addr, pp->io_size); - -	return ret; -} - -static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				 u32 devfn, int where, int size, u32 *val) -{ -	if (pp->ops->rd_other_conf) -		return pp->ops->rd_other_conf(pp, bus, devfn, where, -					      size, val); +		type = PCIE_ATU_TYPE_CFG1; -	return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val, -					 false); -} -static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, -				 u32 devfn, int where, int size, u32 val) -{ -	if (pp->ops->wr_other_conf) -		return pp->ops->wr_other_conf(pp, bus, devfn, where, -					      size, val); +	dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, +				  type, pp->cfg0_base, +				  busdev, pp->cfg0_size); -	return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val, -					 true); +	return pp->va_cfg0_base + where;  } -static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, -				int dev) +static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn, +				 int where, int size, u32 *val)  { +	int ret; +	struct pcie_port *pp = bus->sysdata;  	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	/* If there is no link, then there is no device */ -	if (!pci_is_root_bus(bus)) { -		if (!dw_pcie_link_up(pci)) -			return 0; -	} else if (dev > 0) -		/* Access only one slot on each root port */ -		return 0; +	ret = pci_generic_config_read(bus, devfn, where, size, val); -	return 1; +	if (!ret && pci->num_viewport <= 2) +		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, +					  PCIE_ATU_TYPE_IO, pp->io_base, +					  pp->io_bus_addr, pp->io_size); + +	return ret;  } -static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, -			   int size, u32 *val) +static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn, +				 int where, int size, u32 val)  { +	int ret;  	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { -		*val = 0xffffffff; -		return PCIBIOS_DEVICE_NOT_FOUND; -	} +	ret = pci_generic_config_write(bus, devfn, where, size, val); -	if (pci_is_root_bus(bus)) -		return dw_pcie_rd_own_conf(pp, where, size, val); +	if (!ret && pci->num_viewport <= 2) +		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, +					  PCIE_ATU_TYPE_IO, pp->io_base, +					  pp->io_bus_addr, pp->io_size); -	return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); +	return ret;  } -static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, -			   int where, int size, u32 val) +static struct pci_ops dw_child_pcie_ops = { +	.map_bus = dw_pcie_other_conf_map_bus, +	.read = dw_pcie_rd_other_conf, +	.write = dw_pcie_wr_other_conf, +}; + +void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where)  {  	struct pcie_port *pp = bus->sysdata; +	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) -		return PCIBIOS_DEVICE_NOT_FOUND; - -	if (pci_is_root_bus(bus)) -		return dw_pcie_wr_own_conf(pp, where, size, val); +	if (PCI_SLOT(devfn) > 0) +		return NULL; -	return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); +	return pci->dbi_base + where;  } +EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus);  static struct pci_ops dw_pcie_ops = { -	.read = dw_pcie_rd_conf, -	.write = dw_pcie_wr_conf, +	.map_bus = dw_pcie_own_conf_map_bus, +	.read = pci_generic_config_read, +	.write = pci_generic_config_write,  };  void dw_pcie_setup_rc(struct pcie_port *pp) @@ -632,18 +542,18 @@ void dw_pcie_setup_rc(struct pcie_port *pp)  	dw_pcie_setup(pci); -	if (!pp->ops->msi_host_init) { +	if (pci_msi_enabled() && !pp->ops->msi_host_init) {  		num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;  		/* Initialize IRQ Status array */  		for (ctrl = 0; ctrl < num_ctrls; ctrl++) {  			pp->irq_mask[ctrl] = ~0; -			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + +			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +  					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE), -					    4, pp->irq_mask[ctrl]); -			dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + +					    pp->irq_mask[ctrl]); +			dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +  					    (ctrl * MSI_REG_CTRL_BLOCK_SIZE), -					    4, ~0); +					    ~0);  		}  	} @@ -671,28 +581,32 @@ void dw_pcie_setup_rc(struct pcie_port *pp)  	dw_pcie_writel_dbi(pci, PCI_COMMAND, val);  	/* -	 * If the platform provides ->rd_other_conf, it means the platform -	 * uses its own address translation component rather than ATU, so -	 * we should not program the ATU here. +	 * If the platform provides its own child bus config accesses, it means +	 * the platform uses its own address translation component rather than +	 * ATU, so we should not program the ATU here.  	 */ -	if (!pp->ops->rd_other_conf) { +	if (pp->bridge->child_ops == &dw_child_pcie_ops) { +		struct resource_entry *entry = +			resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); +  		dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, -					  PCIE_ATU_TYPE_MEM, pp->mem_base, -					  pp->mem_bus_addr, pp->mem_size); +					  PCIE_ATU_TYPE_MEM, entry->res->start, +					  entry->res->start - entry->offset, +					  resource_size(entry->res));  		if (pci->num_viewport > 2)  			dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,  						  PCIE_ATU_TYPE_IO, pp->io_base,  						  pp->io_bus_addr, pp->io_size);  	} -	dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); +	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);  	/* Program correct class for RC */ -	dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); +	dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); -	dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); +	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);  	val |= PORT_LOGIC_SPEED_CHANGE; -	dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); +	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);  	dw_pcie_dbi_ro_wr_dis(pci);  } diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 712456f6ce36..e3e300669ed5 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -39,9 +39,7 @@ static int dw_plat_pcie_host_init(struct pcie_port *pp)  	dw_pcie_setup_rc(pp);  	dw_pcie_wait_for_link(pci); - -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); +	dw_pcie_msi_init(pp);  	return 0;  } diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index b723e0cc41fb..c2dea8fc97c8 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -10,6 +10,7 @@  #include <linux/delay.h>  #include <linux/of.h> +#include <linux/of_platform.h>  #include <linux/types.h>  #include "../../pci.h" @@ -166,21 +167,6 @@ void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)  }  EXPORT_SYMBOL_GPL(dw_pcie_write_dbi); -u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size) -{ -	int ret; -	u32 val; - -	if (pci->ops->read_dbi2) -		return pci->ops->read_dbi2(pci, pci->dbi_base2, reg, size); - -	ret = dw_pcie_read(pci->dbi_base2 + reg, size, &val); -	if (ret) -		dev_err(pci->dev, "read DBI address failed\n"); - -	return val; -} -  void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)  {  	int ret; @@ -195,31 +181,31 @@ void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)  		dev_err(pci->dev, "write DBI address failed\n");  } -u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size) +static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg)  {  	int ret;  	u32 val;  	if (pci->ops->read_dbi) -		return pci->ops->read_dbi(pci, pci->atu_base, reg, size); +		return pci->ops->read_dbi(pci, pci->atu_base, reg, 4); -	ret = dw_pcie_read(pci->atu_base + reg, size, &val); +	ret = dw_pcie_read(pci->atu_base + reg, 4, &val);  	if (ret)  		dev_err(pci->dev, "Read ATU address failed\n");  	return val;  } -void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val) +static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val)  {  	int ret;  	if (pci->ops->write_dbi) { -		pci->ops->write_dbi(pci, pci->atu_base, reg, size, val); +		pci->ops->write_dbi(pci, pci->atu_base, reg, 4, val);  		return;  	} -	ret = dw_pcie_write(pci->atu_base + reg, size, val); +	ret = dw_pcie_write(pci->atu_base + reg, 4, val);  	if (ret)  		dev_err(pci->dev, "Write ATU address failed\n");  } @@ -239,9 +225,10 @@ static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg,  	dw_pcie_writel_atu(pci, offset + reg, val);  } -static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, -					     int type, u64 cpu_addr, -					     u64 pci_addr, u32 size) +static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, u8 func_no, +					     int index, int type, +					     u64 cpu_addr, u64 pci_addr, +					     u32 size)  {  	u32 retries, val;  	u64 limit_addr = cpu_addr + size - 1; @@ -259,7 +246,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,  	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET,  				 upper_32_bits(pci_addr));  	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, -				 type); +				 type | PCIE_ATU_FUNC_NUM(func_no));  	dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2,  				 PCIE_ATU_ENABLE); @@ -278,8 +265,9 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,  	dev_err(pci->dev, "Outbound iATU is not being enabled\n");  } -void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, -			       u64 cpu_addr, u64 pci_addr, u32 size) +static void __dw_pcie_prog_outbound_atu(struct dw_pcie *pci, u8 func_no, +					int index, int type, u64 cpu_addr, +					u64 pci_addr, u32 size)  {  	u32 retries, val; @@ -287,8 +275,8 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,  		cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);  	if (pci->iatu_unroll_enabled) { -		dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, -						 pci_addr, size); +		dw_pcie_prog_outbound_atu_unroll(pci, func_no, index, type, +						 cpu_addr, pci_addr, size);  		return;  	} @@ -304,7 +292,8 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,  			   lower_32_bits(pci_addr));  	dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET,  			   upper_32_bits(pci_addr)); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); +	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | +			   PCIE_ATU_FUNC_NUM(func_no));  	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE);  	/* @@ -321,6 +310,21 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,  	dev_err(pci->dev, "Outbound iATU is not being enabled\n");  } +void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, +			       u64 cpu_addr, u64 pci_addr, u32 size) +{ +	__dw_pcie_prog_outbound_atu(pci, 0, index, type, +				    cpu_addr, pci_addr, size); +} + +void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, +				  int type, u64 cpu_addr, u64 pci_addr, +				  u32 size) +{ +	__dw_pcie_prog_outbound_atu(pci, func_no, index, type, +				    cpu_addr, pci_addr, size); +} +  static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg)  {  	u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); @@ -336,8 +340,8 @@ static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg,  	dw_pcie_writel_atu(pci, offset + reg, val);  } -static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, -					   int bar, u64 cpu_addr, +static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, u8 func_no, +					   int index, int bar, u64 cpu_addr,  					   enum dw_pcie_as_type as_type)  {  	int type; @@ -359,8 +363,10 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,  		return -EINVAL;  	} -	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); +	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type | +				 PCIE_ATU_FUNC_NUM(func_no));  	dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, +				 PCIE_ATU_FUNC_NUM_MATCH_EN |  				 PCIE_ATU_ENABLE |  				 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); @@ -381,14 +387,15 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,  	return -EBUSY;  } -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, -			     u64 cpu_addr, enum dw_pcie_as_type as_type) +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, +			     int bar, u64 cpu_addr, +			     enum dw_pcie_as_type as_type)  {  	int type;  	u32 retries, val;  	if (pci->iatu_unroll_enabled) -		return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, +		return dw_pcie_prog_inbound_atu_unroll(pci, func_no, index, bar,  						       cpu_addr, as_type);  	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | @@ -407,9 +414,11 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,  		return -EINVAL;  	} -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE -			   | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); +	dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type | +			   PCIE_ATU_FUNC_NUM(func_no)); +	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | +			   PCIE_ATU_FUNC_NUM_MATCH_EN | +			   PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));  	/*  	 * Make sure ATU enable takes effect before any subsequent config @@ -444,7 +453,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index,  	}  	dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); -	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE); +	dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~(u32)PCIE_ATU_ENABLE);  }  int dw_pcie_wait_for_link(struct dw_pcie *pci) @@ -488,50 +497,41 @@ void dw_pcie_upconfig_setup(struct dw_pcie *pci)  }  EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup); -void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen) +static void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen)  { -	u32 reg, val; +	u32 cap, ctrl2, link_speed;  	u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); -	reg = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); -	reg &= ~PCI_EXP_LNKCTL2_TLS; +	cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); +	ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2); +	ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;  	switch (pcie_link_speed[link_gen]) {  	case PCIE_SPEED_2_5GT: -		reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; +		link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;  		break;  	case PCIE_SPEED_5_0GT: -		reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; +		link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;  		break;  	case PCIE_SPEED_8_0GT: -		reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; +		link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;  		break;  	case PCIE_SPEED_16_0GT: -		reg |= PCI_EXP_LNKCTL2_TLS_16_0GT; +		link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;  		break;  	default:  		/* Use hardware capability */ -		val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); -		val = FIELD_GET(PCI_EXP_LNKCAP_SLS, val); -		reg &= ~PCI_EXP_LNKCTL2_HASD; -		reg |= FIELD_PREP(PCI_EXP_LNKCTL2_TLS, val); +		link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap); +		ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;  		break;  	} -	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, reg); -} -EXPORT_SYMBOL_GPL(dw_pcie_link_set_max_speed); +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed); -void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts) -{ -	u32 val; +	cap &= ~((u32)PCI_EXP_LNKCAP_SLS); +	dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed); -	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); -	val &= ~PORT_LOGIC_N_FTS_MASK; -	val |= n_fts & PORT_LOGIC_N_FTS_MASK; -	dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);  } -EXPORT_SYMBOL_GPL(dw_pcie_link_set_n_fts);  static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)  { @@ -546,32 +546,58 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)  void dw_pcie_setup(struct dw_pcie *pci)  { -	int ret;  	u32 val; -	u32 lanes;  	struct device *dev = pci->dev;  	struct device_node *np = dev->of_node; +	struct platform_device *pdev = to_platform_device(dev);  	if (pci->version >= 0x480A || (!pci->version &&  				       dw_pcie_iatu_unroll_enabled(pci))) {  		pci->iatu_unroll_enabled = true;  		if (!pci->atu_base) +			pci->atu_base = +			    devm_platform_ioremap_resource_byname(pdev, "atu"); +		if (IS_ERR(pci->atu_base))  			pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;  	}  	dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ?  		"enabled" : "disabled"); +	if (pci->link_gen > 0) +		dw_pcie_link_set_max_speed(pci, pci->link_gen); -	ret = of_property_read_u32(np, "num-lanes", &lanes); -	if (ret) { -		dev_dbg(pci->dev, "property num-lanes isn't found\n"); +	/* Configure Gen1 N_FTS */ +	if (pci->n_fts[0]) { +		val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); +		val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK); +		val |= PORT_AFR_N_FTS(pci->n_fts[0]); +		val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]); +		dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); +	} + +	/* Configure Gen2+ N_FTS */ +	if (pci->n_fts[1]) { +		val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); +		val &= ~PORT_LOGIC_N_FTS_MASK; +		val |= pci->n_fts[pci->link_gen - 1]; +		dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); +	} + +	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); +	val &= ~PORT_LINK_FAST_LINK_MODE; +	val |= PORT_LINK_DLL_LINK_EN; +	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); + +	of_property_read_u32(np, "num-lanes", &pci->num_lanes); +	if (!pci->num_lanes) { +		dev_dbg(pci->dev, "Using h/w default number of lanes\n");  		return;  	}  	/* Set the number of lanes */ -	val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); +	val &= ~PORT_LINK_FAST_LINK_MODE;  	val &= ~PORT_LINK_MODE_MASK; -	switch (lanes) { +	switch (pci->num_lanes) {  	case 1:  		val |= PORT_LINK_MODE_1_LANES;  		break; @@ -585,7 +611,7 @@ void dw_pcie_setup(struct dw_pcie *pci)  		val |= PORT_LINK_MODE_8_LANES;  		break;  	default: -		dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); +		dev_err(pci->dev, "num-lanes %u: invalid value\n", pci->num_lanes);  		return;  	}  	dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); @@ -593,7 +619,7 @@ void dw_pcie_setup(struct dw_pcie *pci)  	/* Set link width speed control register */  	val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);  	val &= ~PORT_LOGIC_LINK_WIDTH_MASK; -	switch (lanes) { +	switch (pci->num_lanes) {  	case 1:  		val |= PORT_LOGIC_LINK_WIDTH_1_LANES;  		break; diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index f911760dcc69..9d2f511f13fa 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -32,10 +32,18 @@  /* Synopsys-specific PCIe configuration registers */  #define PCIE_PORT_AFR			0x70C  #define PORT_AFR_N_FTS_MASK		GENMASK(15, 8) +#define PORT_AFR_N_FTS(n)		FIELD_PREP(PORT_AFR_N_FTS_MASK, n)  #define PORT_AFR_CC_N_FTS_MASK		GENMASK(23, 16) +#define PORT_AFR_CC_N_FTS(n)		FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n) +#define PORT_AFR_ENTER_ASPM		BIT(30) +#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT	24 +#define PORT_AFR_L0S_ENTRANCE_LAT_MASK	GENMASK(26, 24) +#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT	27 +#define PORT_AFR_L1_ENTRANCE_LAT_MASK	GENMASK(29, 27)  #define PCIE_PORT_LINK_CONTROL		0x710  #define PORT_LINK_DLL_LINK_EN		BIT(5) +#define PORT_LINK_FAST_LINK_MODE	BIT(7)  #define PORT_LINK_MODE_MASK		GENMASK(21, 16)  #define PORT_LINK_MODE(n)		FIELD_PREP(PORT_LINK_MODE_MASK, n)  #define PORT_LINK_MODE_1_LANES		PORT_LINK_MODE(0x1) @@ -80,9 +88,11 @@  #define PCIE_ATU_TYPE_IO		0x2  #define PCIE_ATU_TYPE_CFG0		0x4  #define PCIE_ATU_TYPE_CFG1		0x5 +#define PCIE_ATU_FUNC_NUM(pf)           ((pf) << 20)  #define PCIE_ATU_CR2			0x908  #define PCIE_ATU_ENABLE			BIT(31)  #define PCIE_ATU_BAR_MODE_ENABLE	BIT(30) +#define PCIE_ATU_FUNC_NUM_MATCH_EN      BIT(19)  #define PCIE_ATU_LOWER_BASE		0x90C  #define PCIE_ATU_UPPER_BASE		0x910  #define PCIE_ATU_LIMIT			0x914 @@ -95,6 +105,9 @@  #define PCIE_MISC_CONTROL_1_OFF		0x8BC  #define PCIE_DBI_RO_WR_EN		BIT(0) +#define PCIE_MSIX_DOORBELL		0x948 +#define PCIE_MSIX_DOORBELL_PF_SHIFT	24 +  #define PCIE_PL_CHK_REG_CONTROL_STATUS			0xB20  #define PCIE_PL_CHK_REG_CHK_REG_START			BIT(0)  #define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS		BIT(1) @@ -160,14 +173,7 @@ enum dw_pcie_device_mode {  };  struct dw_pcie_host_ops { -	int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); -	int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); -	int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, -			     unsigned int devfn, int where, int size, u32 *val); -	int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, -			     unsigned int devfn, int where, int size, u32 val);  	int (*host_init)(struct pcie_port *pp); -	void (*scan_bus)(struct pcie_port *pp);  	void (*set_num_vectors)(struct pcie_port *pp);  	int (*msi_host_init)(struct pcie_port *pp);  }; @@ -176,30 +182,20 @@ struct pcie_port {  	u64			cfg0_base;  	void __iomem		*va_cfg0_base;  	u32			cfg0_size; -	u64			cfg1_base; -	void __iomem		*va_cfg1_base; -	u32			cfg1_size;  	resource_size_t		io_base;  	phys_addr_t		io_bus_addr;  	u32			io_size; -	u64			mem_base; -	phys_addr_t		mem_bus_addr; -	u32			mem_size; -	struct resource		*cfg; -	struct resource		*io; -	struct resource		*mem; -	struct resource		*busn;  	int			irq;  	const struct dw_pcie_host_ops *ops;  	int			msi_irq;  	struct irq_domain	*irq_domain;  	struct irq_domain	*msi_domain; +	u16			msi_msg;  	dma_addr_t		msi_data; -	struct page		*msi_page;  	struct irq_chip		*msi_irq_chip;  	u32			num_vectors;  	u32			irq_mask[MAX_MSI_CTRLS]; -	struct pci_bus		*root_bus; +	struct pci_host_bridge  *bridge;  	raw_spinlock_t		lock;  	DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);  }; @@ -215,10 +211,26 @@ struct dw_pcie_ep_ops {  	int	(*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,  			     enum pci_epc_irq_type type, u16 interrupt_num);  	const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep); +	/* +	 * Provide a method to implement the different func config space +	 * access for different platform, if different func have different +	 * offset, return the offset of func. if use write a register way +	 * return a 0, and implement code in callback function of platform +	 * driver. +	 */ +	unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no); +}; + +struct dw_pcie_ep_func { +	struct list_head	list; +	u8			func_no; +	u8			msi_cap;	/* MSI capability offset */ +	u8			msix_cap;	/* MSI-X capability offset */  };  struct dw_pcie_ep {  	struct pci_epc		*epc; +	struct list_head	func_list;  	const struct dw_pcie_ep_ops *ops;  	phys_addr_t		phys_base;  	size_t			addr_size; @@ -231,8 +243,6 @@ struct dw_pcie_ep {  	u32			num_ob_windows;  	void __iomem		*msi_mem;  	phys_addr_t		msi_mem_phys; -	u8			msi_cap;	/* MSI capability offset */ -	u8			msix_cap;	/* MSI-X capability offset */  	struct pci_epf_bar	*epf_bar[PCI_STD_NUM_BARS];  }; @@ -242,8 +252,6 @@ struct dw_pcie_ops {  			    size_t size);  	void	(*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,  			     size_t size, u32 val); -	u32     (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, -			     size_t size);  	void    (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,  			      size_t size, u32 val);  	int	(*link_up)(struct dw_pcie *pcie); @@ -263,6 +271,9 @@ struct dw_pcie {  	struct dw_pcie_ep	ep;  	const struct dw_pcie_ops *ops;  	unsigned int		version; +	int			num_lanes; +	int			link_gen; +	u8			n_fts[2];  };  #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) @@ -278,20 +289,19 @@ int dw_pcie_write(void __iomem *addr, int size, u32 val);  u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);  void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val); -u32 dw_pcie_read_dbi2(struct dw_pcie *pci, u32 reg, size_t size);  void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val); -u32 dw_pcie_read_atu(struct dw_pcie *pci, u32 reg, size_t size); -void dw_pcie_write_atu(struct dw_pcie *pci, u32 reg, size_t size, u32 val);  int dw_pcie_link_up(struct dw_pcie *pci);  void dw_pcie_upconfig_setup(struct dw_pcie *pci); -void dw_pcie_link_set_max_speed(struct dw_pcie *pci, u32 link_gen); -void dw_pcie_link_set_n_fts(struct dw_pcie *pci, u32 n_fts);  int dw_pcie_wait_for_link(struct dw_pcie *pci);  void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index,  			       int type, u64 cpu_addr, u64 pci_addr,  			       u32 size); -int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, -			     u64 cpu_addr, enum dw_pcie_as_type as_type); +void dw_pcie_prog_ep_outbound_atu(struct dw_pcie *pci, u8 func_no, int index, +				  int type, u64 cpu_addr, u64 pci_addr, +				  u32 size); +int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, u8 func_no, int index, +			     int bar, u64 cpu_addr, +			     enum dw_pcie_as_type as_type);  void dw_pcie_disable_atu(struct dw_pcie *pci, int index,  			 enum dw_pcie_region_type type);  void dw_pcie_setup(struct dw_pcie *pci); @@ -331,21 +341,6 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)  	dw_pcie_write_dbi2(pci, reg, 0x4, val);  } -static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) -{ -	return dw_pcie_read_dbi2(pci, reg, 0x4); -} - -static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) -{ -	dw_pcie_write_atu(pci, reg, 0x4, val); -} - -static inline u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 reg) -{ -	return dw_pcie_read_atu(pci, reg, 0x4); -} -  static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)  {  	u32 reg; @@ -376,6 +371,8 @@ void dw_pcie_setup_rc(struct pcie_port *pp);  int dw_pcie_host_init(struct pcie_port *pp);  void dw_pcie_host_deinit(struct pcie_port *pp);  int dw_pcie_allocate_domains(struct pcie_port *pp); +void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, +				       int where);  #else  static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)  { @@ -407,6 +404,12 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)  {  	return 0;  } +static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, +						     unsigned int devfn, +						     int where) +{ +	return NULL; +}  #endif  #ifdef CONFIG_PCIE_DW_EP @@ -420,7 +423,11 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,  			     u8 interrupt_num);  int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,  			     u16 interrupt_num); +int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, +				       u16 interrupt_num);  void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); +struct dw_pcie_ep_func * +dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);  #else  static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)  { @@ -461,8 +468,21 @@ static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,  	return 0;  } +static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, +						     u8 func_no, +						     u16 interrupt_num) +{ +	return 0; +} +  static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)  {  } + +static inline struct dw_pcie_ep_func * +dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no) +{ +	return NULL; +}  #endif  #endif /* _PCIE_DESIGNWARE_H */ diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 2a2835746077..afc1abbe49aa 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -122,32 +122,37 @@ static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base,  	histb_pcie_dbi_w_mode(&pci->pp, false);  } -static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, -				  int size, u32 *val) +static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn, +				  int where, int size, u32 *val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	int ret; +	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); -	histb_pcie_dbi_r_mode(pp, true); -	ret = dw_pcie_read(pci->dbi_base + where, size, val); -	histb_pcie_dbi_r_mode(pp, false); +	if (PCI_SLOT(devfn)) { +		*val = ~0; +		return PCIBIOS_DEVICE_NOT_FOUND; +	} -	return ret; +	*val = dw_pcie_read_dbi(pci, where, size); +	return PCIBIOS_SUCCESSFUL;  } -static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, -				  int size, u32 val) +static int histb_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn, +				  int where, int size, u32 val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	int ret; +	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); -	histb_pcie_dbi_w_mode(pp, true); -	ret = dw_pcie_write(pci->dbi_base + where, size, val); -	histb_pcie_dbi_w_mode(pp, false); +	if (PCI_SLOT(devfn)) +		return PCIBIOS_DEVICE_NOT_FOUND; -	return ret; +	dw_pcie_write_dbi(pci, where, size, val); +	return PCIBIOS_SUCCESSFUL;  } +static struct pci_ops histb_pci_ops = { +	.read = histb_pcie_rd_own_conf, +	.write = histb_pcie_wr_own_conf, +}; +  static int histb_pcie_link_up(struct dw_pcie *pci)  {  	struct histb_pcie *hipcie = to_histb_pcie(pci); @@ -194,17 +199,15 @@ static int histb_pcie_establish_link(struct pcie_port *pp)  static int histb_pcie_host_init(struct pcie_port *pp)  { -	histb_pcie_establish_link(pp); +	pp->bridge->ops = &histb_pci_ops; -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); +	histb_pcie_establish_link(pp); +	dw_pcie_msi_init(pp);  	return 0;  }  static const struct dw_pcie_host_ops histb_pcie_host_ops = { -	.rd_own_conf = histb_pcie_rd_own_conf, -	.wr_own_conf = histb_pcie_wr_own_conf,  	.host_init = histb_pcie_host_init,  }; diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index c3b3a1d162b5..5650cb78acba 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -67,14 +67,9 @@ struct intel_pcie_port {  	void __iomem		*app_base;  	struct gpio_desc	*reset_gpio;  	u32			rst_intrvl; -	u32			max_speed; -	u32			link_gen; -	u32			max_width; -	u32			n_fts;  	struct clk		*core_clk;  	struct reset_control	*core_rst;  	struct phy		*phy; -	u8			pcie_cap_ofst;  };  static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val) @@ -134,11 +129,7 @@ static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)  static void intel_pcie_link_setup(struct intel_pcie_port *lpp)  {  	u32 val; -	u8 offset = lpp->pcie_cap_ofst; - -	val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCAP); -	lpp->max_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, val); -	lpp->max_width = FIELD_GET(PCI_EXP_LNKCAP_MLW, val); +	u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);  	val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL); @@ -146,41 +137,29 @@ static void intel_pcie_link_setup(struct intel_pcie_port *lpp)  	pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);  } -static void intel_pcie_port_logic_setup(struct intel_pcie_port *lpp) +static void intel_pcie_init_n_fts(struct dw_pcie *pci)  { -	u32 val, mask; - -	switch (pcie_link_speed[lpp->max_speed]) { -	case PCIE_SPEED_8_0GT: -		lpp->n_fts = PORT_AFR_N_FTS_GEN3; +	switch (pci->link_gen) { +	case 3: +		pci->n_fts[1] = PORT_AFR_N_FTS_GEN3;  		break; -	case PCIE_SPEED_16_0GT: -		lpp->n_fts = PORT_AFR_N_FTS_GEN4; +	case 4: +		pci->n_fts[1] = PORT_AFR_N_FTS_GEN4;  		break;  	default: -		lpp->n_fts = PORT_AFR_N_FTS_GEN12_DFT; +		pci->n_fts[1] = PORT_AFR_N_FTS_GEN12_DFT;  		break;  	} - -	mask = PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK; -	val = FIELD_PREP(PORT_AFR_N_FTS_MASK, lpp->n_fts) | -	       FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, lpp->n_fts); -	pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_AFR, mask, val); - -	/* Port Link Control Register */ -	pcie_rc_cfg_wr_mask(lpp, PCIE_PORT_LINK_CONTROL, PORT_LINK_DLL_LINK_EN, -			    PORT_LINK_DLL_LINK_EN); +	pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;  }  static void intel_pcie_rc_setup(struct intel_pcie_port *lpp)  {  	intel_pcie_ltssm_disable(lpp);  	intel_pcie_link_setup(lpp); +	intel_pcie_init_n_fts(&lpp->pci);  	dw_pcie_setup_rc(&lpp->pci.pp);  	dw_pcie_upconfig_setup(&lpp->pci); -	intel_pcie_port_logic_setup(lpp); -	dw_pcie_link_set_max_speed(&lpp->pci, lpp->link_gen); -	dw_pcie_link_set_n_fts(&lpp->pci, lpp->n_fts);  }  static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp) @@ -275,20 +254,11 @@ static int intel_pcie_get_resources(struct platform_device *pdev)  		return ret;  	} -	ret = device_property_match_string(dev, "device_type", "pci"); -	if (ret) { -		dev_err(dev, "Failed to find pci device type: %d\n", ret); -		return ret; -	} -  	ret = device_property_read_u32(dev, "reset-assert-ms",  				       &lpp->rst_intrvl);  	if (ret)  		lpp->rst_intrvl = RESET_INTERVAL_MS; -	ret = of_pci_get_max_link_speed(dev->of_node); -	lpp->link_gen = ret < 0 ? 0 : ret; -  	lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app");  	if (IS_ERR(lpp->app_base))  		return PTR_ERR(lpp->app_base); @@ -313,8 +283,9 @@ static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)  {  	u32 value;  	int ret; +	struct dw_pcie *pci = &lpp->pci; -	if (pcie_link_speed[lpp->max_speed] < PCIE_SPEED_8_0GT) +	if (pci->link_gen < 3)  		return 0;  	/* Send PME_TURN_OFF message */ @@ -343,7 +314,6 @@ static void intel_pcie_turn_off(struct intel_pcie_port *lpp)  static int intel_pcie_host_setup(struct intel_pcie_port *lpp)  { -	struct device *dev = lpp->pci.dev;  	int ret;  	intel_pcie_core_rst_assert(lpp); @@ -361,17 +331,6 @@ static int intel_pcie_host_setup(struct intel_pcie_port *lpp)  		goto clk_err;  	} -	if (!lpp->pcie_cap_ofst) { -		ret = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP); -		if (!ret) { -			ret = -ENXIO; -			dev_err(dev, "Invalid PCIe capability offset\n"); -			goto app_init_err; -		} - -		lpp->pcie_cap_ofst = ret; -	} -  	intel_pcie_rc_setup(lpp);  	ret = intel_pcie_app_logic_setup(lpp);  	if (ret) diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index e496f51e0152..d0a6a2dee6f5 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -330,34 +330,37 @@ static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie,  	kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR);  } -static int kirin_pcie_rd_own_conf(struct pcie_port *pp, +static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,  				  int where, int size, u32 *val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); -	int ret; +	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); -	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); -	ret = dw_pcie_read(pci->dbi_base + where, size, val); -	kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); +	if (PCI_SLOT(devfn)) { +		*val = ~0; +		return PCIBIOS_DEVICE_NOT_FOUND; +	} -	return ret; +	*val = dw_pcie_read_dbi(pci, where, size); +	return PCIBIOS_SUCCESSFUL;  } -static int kirin_pcie_wr_own_conf(struct pcie_port *pp, +static int kirin_pcie_wr_own_conf(struct pci_bus *bus, unsigned int devfn,  				  int where, int size, u32 val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -	struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); -	int ret; +	struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata); -	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); -	ret = dw_pcie_write(pci->dbi_base + where, size, val); -	kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); +	if (PCI_SLOT(devfn)) +		return PCIBIOS_DEVICE_NOT_FOUND; -	return ret; +	dw_pcie_write_dbi(pci, where, size, val); +	return PCIBIOS_SUCCESSFUL;  } +static struct pci_ops kirin_pci_ops = { +	.read = kirin_pcie_rd_own_conf, +	.write = kirin_pcie_wr_own_conf, +}; +  static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,  			       u32 reg, size_t size)  { @@ -423,10 +426,10 @@ static int kirin_pcie_establish_link(struct pcie_port *pp)  static int kirin_pcie_host_init(struct pcie_port *pp)  { -	kirin_pcie_establish_link(pp); +	pp->bridge->ops = &kirin_pci_ops; -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); +	kirin_pcie_establish_link(pp); +	dw_pcie_msi_init(pp);  	return 0;  } @@ -438,8 +441,6 @@ static const struct dw_pcie_ops kirin_dw_pcie_ops = {  };  static const struct dw_pcie_host_ops kirin_pcie_host_ops = { -	.rd_own_conf = kirin_pcie_rd_own_conf, -	.wr_own_conf = kirin_pcie_wr_own_conf,  	.host_init = kirin_pcie_host_init,  }; @@ -507,8 +508,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)  	kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,  						      "reset-gpios", 0); -	if (kirin_pcie->gpio_id_reset < 0) +	if (kirin_pcie->gpio_id_reset == -EPROBE_DEFER) { +		return -EPROBE_DEFER; +	} else if (!gpio_is_valid(kirin_pcie->gpio_id_reset)) { +		dev_err(dev, "unable to get a valid gpio pin\n");  		return -ENODEV; +	}  	ret = kirin_pcie_power_on(kirin_pcie);  	if (ret) diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 3aac77a295ba..b4761640ffd9 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -67,10 +67,6 @@  #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1		0x81c  #define CFG_BRIDGE_SB_INIT			BIT(0) -#define PCIE20_CAP				0x70 -#define PCIE20_DEVICE_CONTROL2_STATUS2		(PCIE20_CAP + PCI_EXP_DEVCTL2) -#define PCIE20_CAP_LINK_CAPABILITIES		(PCIE20_CAP + PCI_EXP_LNKCAP) -#define PCIE20_CAP_LINK_1			(PCIE20_CAP + 0x14)  #define PCIE_CAP_LINK1_VAL			0x2FD7F  #define PCIE20_PARF_Q2A_FLUSH			0x1AC @@ -193,7 +189,6 @@ struct qcom_pcie {  	struct phy *phy;  	struct gpio_desc *reset;  	const struct qcom_pcie_ops *ops; -	int gen;  };  #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev) @@ -302,6 +297,9 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)  	reset_control_assert(res->por_reset);  	reset_control_assert(res->ext_reset);  	reset_control_assert(res->phy_reset); + +	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); +  	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);  } @@ -314,6 +312,16 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  	u32 val;  	int ret; +	/* reset the PCIe interface as uboot can leave it undefined state */ +	reset_control_assert(res->pci_reset); +	reset_control_assert(res->axi_reset); +	reset_control_assert(res->ahb_reset); +	reset_control_assert(res->por_reset); +	reset_control_assert(res->ext_reset); +	reset_control_assert(res->phy_reset); + +	writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL); +  	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);  	if (ret < 0) {  		dev_err(dev, "cannot enable regulators\n"); @@ -394,12 +402,6 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)  	/* wait for clock acquisition */  	usleep_range(1000, 1500); -	if (pcie->gen == 1) { -		val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); -		val |= PCI_EXP_LNKSTA_CLS_2_5GB; -		writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); -	} -  	/* Set the Max TLP size to 2K, instead of using default of 4K */  	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,  	       pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); @@ -1017,6 +1019,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;  	struct dw_pcie *pci = pcie->pci;  	struct device *dev = pci->dev; +	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	int i, ret;  	u32 val; @@ -1092,14 +1095,14 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)  	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);  	writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); -	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); +	writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); -	val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); +	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);  	val &= ~PCI_EXP_LNKCAP_ASPMS; -	writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); +	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP); -	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + -		PCIE20_DEVICE_CONTROL2_STATUS2); +	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + +		PCI_EXP_DEVCTL2);  	return 0; @@ -1252,7 +1255,8 @@ static void qcom_pcie_post_deinit_2_7_0(struct qcom_pcie *pcie)  static int qcom_pcie_link_up(struct dw_pcie *pci)  { -	u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); +	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); +	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);  	return !!(val & PCI_EXP_LNKSTA_DLLLA);  } @@ -1280,9 +1284,7 @@ static int qcom_pcie_host_init(struct pcie_port *pp)  	}  	dw_pcie_setup_rc(pp); - -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); +	dw_pcie_msi_init(pp);  	qcom_ep_reset_deassert(pcie); @@ -1399,10 +1401,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)  		goto err_pm_runtime_put;  	} -	pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node); -	if (pcie->gen < 0) -		pcie->gen = 2; -  	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");  	if (IS_ERR(pcie->parf)) {  		ret = PTR_ERR(pcie->parf); diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 62846562da0b..e348225f651f 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -26,7 +26,6 @@ struct spear13xx_pcie {  	void __iomem		*app_base;  	struct phy		*phy;  	struct clk		*clk; -	bool			is_gen1;  };  struct pcie_app_reg { @@ -65,8 +64,6 @@ struct pcie_app_reg {  /* CR6 */  #define MSI_CTRL_INT				(1 << 26) -#define EXP_CAP_ID_OFFSET			0x70 -  #define to_spear13xx_pcie(x)	dev_get_drvdata((x)->dev)  static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) @@ -75,7 +72,7 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)  	struct pcie_port *pp = &pci->pp;  	struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;  	u32 val; -	u32 exp_cap_off = EXP_CAP_ID_OFFSET; +	u32 exp_cap_off = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);  	if (dw_pcie_link_up(pci)) {  		dev_err(pci->dev, "link already up\n"); @@ -89,36 +86,12 @@ static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie)  	 * default value in capability register is 512 bytes. So force  	 * it to 128 here.  	 */ -	dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); +	val = dw_pcie_readw_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL);  	val &= ~PCI_EXP_DEVCTL_READRQ; -	dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); - -	dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A); -	dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); +	dw_pcie_writew_dbi(pci, exp_cap_off + PCI_EXP_DEVCTL, val); -	/* -	 * if is_gen1 is set then handle it, so that some buggy card -	 * also works -	 */ -	if (spear13xx_pcie->is_gen1) { -		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, -			     4, &val); -		if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { -			val &= ~((u32)PCI_EXP_LNKCAP_SLS); -			val |= PCI_EXP_LNKCAP_SLS_2_5GB; -			dw_pcie_write(pci->dbi_base + exp_cap_off + -				      PCI_EXP_LNKCAP, 4, val); -		} - -		dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, -			     2, &val); -		if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { -			val &= ~((u32)PCI_EXP_LNKCAP_SLS); -			val |= PCI_EXP_LNKCAP_SLS_2_5GB; -			dw_pcie_write(pci->dbi_base + exp_cap_off + -				      PCI_EXP_LNKCTL2, 2, val); -		} -	} +	dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, 0x104A); +	dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, 0xCD80);  	/* enable ltssm */  	writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) @@ -278,7 +251,7 @@ static int spear13xx_pcie_probe(struct platform_device *pdev)  	spear13xx_pcie->app_base = pci->dbi_base + 0x2000;  	if (of_property_read_bool(np, "st,pcie-is-gen1")) -		spear13xx_pcie->is_gen1 = true; +		pci->link_gen = 1;  	platform_set_drvdata(pdev, spear13xx_pcie); diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 70498689d0c0..f920e7efe118 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -183,19 +183,7 @@  #define EVENT_COUNTER_GROUP_SEL_SHIFT	24  #define EVENT_COUNTER_GROUP_5		0x5 -#define PORT_LOGIC_ACK_F_ASPM_CTRL			0x70C -#define ENTER_ASPM					BIT(30) -#define L0S_ENTRANCE_LAT_SHIFT				24 -#define L0S_ENTRANCE_LAT_MASK				GENMASK(26, 24) -#define L1_ENTRANCE_LAT_SHIFT				27 -#define L1_ENTRANCE_LAT_MASK				GENMASK(29, 27) -#define N_FTS_SHIFT					8 -#define N_FTS_MASK					GENMASK(7, 0)  #define N_FTS_VAL					52 - -#define PORT_LOGIC_GEN2_CTRL				0x80C -#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE	BIT(17) -#define FTS_MASK					GENMASK(7, 0)  #define FTS_VAL						52  #define PORT_LOGIC_MSI_CTRL_INT_0_EN		0x828 @@ -296,7 +284,6 @@ struct tegra_pcie_dw {  	u8 init_link_width;  	u32 msi_ctrl_int;  	u32 num_lanes; -	u32 max_speed;  	u32 cid;  	u32 cfg_link_cap_l1sub;  	u32 pcie_cap_base; @@ -401,9 +388,9 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)  			val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;  			appl_writel(pcie, val, APPL_CAR_RESET_OVRD); -			val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); -			val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE; -			dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); +			val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); +			val |= PORT_LOGIC_SPEED_CHANGE; +			dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);  		}  	} @@ -568,42 +555,44 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)  	return IRQ_HANDLED;  } -static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size, -				     u32 *val) +static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, +				     int size, u32 *val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -  	/*  	 * This is an endpoint mode specific register happen to appear even  	 * when controller is operating in root port mode and system hangs  	 * when it is accessed with link being in ASPM-L1 state.  	 * So skip accessing it altogether  	 */ -	if (where == PORT_LOGIC_MSIX_DOORBELL) { +	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {  		*val = 0x00000000;  		return PCIBIOS_SUCCESSFUL;  	} -	return dw_pcie_read(pci->dbi_base + where, size, val); +	return pci_generic_config_read(bus, devfn, where, size, val);  } -static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size, -				     u32 val) +static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, +				     int size, u32 val)  { -	struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -  	/*  	 * This is an endpoint mode specific register happen to appear even  	 * when controller is operating in root port mode and system hangs  	 * when it is accessed with link being in ASPM-L1 state.  	 * So skip accessing it altogether  	 */ -	if (where == PORT_LOGIC_MSIX_DOORBELL) +	if (!PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)  		return PCIBIOS_SUCCESSFUL; -	return dw_pcie_write(pci->dbi_base + where, size, val); +	return pci_generic_config_write(bus, devfn, where, size, val);  } +static struct pci_ops tegra_pci_ops = { +	.map_bus = dw_pcie_own_conf_map_bus, +	.read = tegra_pcie_dw_rd_own_conf, +	.write = tegra_pcie_dw_wr_own_conf, +}; +  #if defined(CONFIG_PCIEASPM)  static void disable_aspm_l11(struct tegra_pcie_dw *pcie)  { @@ -692,30 +681,23 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)  	dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);  	/* Program L0s and L1 entrance latencies */ -	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); -	val &= ~L0S_ENTRANCE_LAT_MASK; -	val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT); -	val |= ENTER_ASPM; -	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); +	val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); +	val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; +	val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); +	val |= PORT_AFR_ENTER_ASPM; +	dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);  } -static int init_debugfs(struct tegra_pcie_dw *pcie) +static void init_debugfs(struct tegra_pcie_dw *pcie)  { -	struct dentry *d; - -	d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", -					pcie->debugfs, aspm_state_cnt); -	if (IS_ERR_OR_NULL(d)) -		dev_err(pcie->dev, -			"Failed to create debugfs file \"aspm_state_cnt\"\n"); - -	return 0; +	debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs, +				    aspm_state_cnt);  }  #else  static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }  static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }  static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } -static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; } +static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }  #endif  static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) @@ -827,26 +809,24 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)  	/* Program init preset */  	for (i = 0; i < pcie->num_lanes; i++) { -		dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF -				 + (i * 2), 2, &val); +		val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));  		val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;  		val |= GEN3_GEN4_EQ_PRESET_INIT;  		val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;  		val |= (GEN3_GEN4_EQ_PRESET_INIT <<  			   CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); -		dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF -				 + (i * 2), 2, val); +		dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);  		offset = dw_pcie_find_ext_capability(pci,  						     PCI_EXT_CAP_ID_PL_16GT) +  				PCI_PL_16GT_LE_CTRL; -		dw_pcie_read(pci->dbi_base + offset + i, 1, &val); +		val = dw_pcie_readb_dbi(pci, offset + i);  		val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;  		val |= GEN3_GEN4_EQ_PRESET_INIT;  		val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;  		val |= (GEN3_GEN4_EQ_PRESET_INIT <<  			PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); -		dw_pcie_write(pci->dbi_base + offset + i, 1, val); +		dw_pcie_writeb_dbi(pci, offset + i, val);  	}  	val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); @@ -892,17 +872,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)  	dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); -	/* Configure FTS */ -	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); -	val &= ~(N_FTS_MASK << N_FTS_SHIFT); -	val |= N_FTS_VAL << N_FTS_SHIFT; -	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); - -	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); -	val &= ~FTS_MASK; -	val |= FTS_VAL; -	dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); -  	/* Enable as 0xFFFF0001 response for CRS */  	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);  	val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT); @@ -910,16 +879,6 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)  		AMBA_ERROR_RESPONSE_CRS_SHIFT);  	dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); -	/* Configure Max Speed from DT */ -	if (pcie->max_speed && pcie->max_speed != -EINVAL) { -		val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + -					PCI_EXP_LNKCAP); -		val &= ~PCI_EXP_LNKCAP_SLS; -		val |= pcie->max_speed; -		dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, -				   val); -	} -  	/* Configure Max lane width from DT */  	val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);  	val &= ~PCI_EXP_LNKCAP_MLW; @@ -970,6 +929,8 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)  	struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);  	u32 val, tmp, offset, speed; +	pp->bridge->ops = &tegra_pci_ops; +  	tegra_pcie_prepare_host(pp);  	if (dw_pcie_wait_for_link(pci)) { @@ -1057,8 +1018,6 @@ static const struct dw_pcie_ops tegra_dw_pcie_ops = {  };  static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { -	.rd_own_conf = tegra_pcie_dw_rd_own_conf, -	.wr_own_conf = tegra_pcie_dw_wr_own_conf,  	.host_init = tegra_pcie_dw_host_init,  	.set_num_vectors = tegra_pcie_set_msi_vec_num,  }; @@ -1129,8 +1088,6 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)  		return ret;  	} -	pcie->max_speed = of_pci_get_max_link_speed(np); -  	ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);  	if (ret) {  		dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); @@ -1262,9 +1219,9 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)  	 * 5.2 Link State Power Management (Page #428).  	 */ -	list_for_each_entry(child, &pp->root_bus->children, node) { +	list_for_each_entry(child, &pp->bridge->bus->children, node) {  		/* Bring downstream devices to D0 if they are not already in */ -		if (child->parent == pp->root_bus) { +		if (child->parent == pp->bridge->bus) {  			root_bus = child;  			break;  		} @@ -1641,10 +1598,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)  	}  	pcie->debugfs = debugfs_create_dir(name, NULL); -	if (!pcie->debugfs) -		dev_err(dev, "Failed to create debugfs\n"); -	else -		init_debugfs(pcie); +	init_debugfs(pcie);  	return ret; @@ -1817,27 +1771,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)  	val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;  	dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); -	/* Configure N_FTS & FTS */ -	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL); -	val &= ~(N_FTS_MASK << N_FTS_SHIFT); -	val |= N_FTS_VAL << N_FTS_SHIFT; -	dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val); - -	val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL); -	val &= ~FTS_MASK; -	val |= FTS_VAL; -	dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val); - -	/* Configure Max Speed from DT */ -	if (pcie->max_speed && pcie->max_speed != -EINVAL) { -		val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + -					PCI_EXP_LNKCAP); -		val &= ~PCI_EXP_LNKCAP_SLS; -		val |= pcie->max_speed; -		dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, -				   val); -	} -  	pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,  						      PCI_CAP_ID_EXP);  	clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); @@ -2066,6 +1999,9 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)  	pci = &pcie->pci;  	pci->dev = &pdev->dev;  	pci->ops = &tegra_dw_pcie_ops; +	pci->n_fts[0] = N_FTS_VAL; +	pci->n_fts[1] = FTS_VAL; +  	pp = &pci->pp;  	pcie->dev = &pdev->dev;  	pcie->mode = (enum dw_pcie_device_mode)data->mode; diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index 3a7f403b57b8..48176265c867 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -322,8 +322,7 @@ static int uniphier_pcie_host_init(struct pcie_port *pp)  	if (ret)  		return ret; -	if (IS_ENABLED(CONFIG_PCI_MSI)) -		dw_pcie_msi_init(pp); +	dw_pcie_msi_init(pp);  	return 0;  } diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 3adec419a45b..a2632d02ce8f 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -480,7 +480,6 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)  	struct device *dev = &pcie->pdev->dev;  	struct device_node *node = dev->of_node;  	struct mobiveil_root_port *rp = &pcie->rp; -	int ret;  	/* setup INTx */  	rp->intx_domain = irq_domain_add_linear(node, PCI_NUM_INTX, @@ -494,11 +493,7 @@ static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie)  	raw_spin_lock_init(&rp->intx_mask_lock);  	/* setup MSI */ -	ret = mobiveil_allocate_msi_domains(pcie); -	if (ret) -		return ret; - -	return 0; +	return mobiveil_allocate_msi_domains(pcie);  }  static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie) diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 1559f79e63b6..0be485a25327 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -9,11 +9,12 @@   */  #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h>  #include <linux/interrupt.h>  #include <linux/irq.h>  #include <linux/irqdomain.h>  #include <linux/kernel.h> +#include <linux/module.h>  #include <linux/pci.h>  #include <linux/init.h>  #include <linux/phy/phy.h> @@ -251,6 +252,25 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)  	}  } +static void advk_pcie_issue_perst(struct advk_pcie *pcie) +{ +	u32 reg; + +	if (!pcie->reset_gpio) +		return; + +	/* PERST does not work for some cards when link training is enabled */ +	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); +	reg &= ~LINK_TRAINING_EN; +	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + +	/* 10ms delay is needed for some cards */ +	dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); +	gpiod_set_value_cansleep(pcie->reset_gpio, 1); +	usleep_range(10000, 11000); +	gpiod_set_value_cansleep(pcie->reset_gpio, 0); +} +  static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)  {  	int ret, neg_gen; @@ -299,6 +319,21 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)  	int neg_gen = -1, gen;  	/* +	 * Reset PCIe card via PERST# signal. Some cards are not detected +	 * during link training when they are in some non-initial state. +	 */ +	advk_pcie_issue_perst(pcie); + +	/* +	 * PERST# signal could have been asserted by pinctrl subsystem before +	 * probe() callback has been called or issued explicitly by reset gpio +	 * function advk_pcie_issue_perst(), making the endpoint going into +	 * fundamental reset. As required by PCI Express spec a delay for at +	 * least 100ms after such a reset before link training is needed. +	 */ +	msleep(PCI_PM_D3COLD_WAIT); + +	/*  	 * Try link training at link gen specified by device tree property  	 * 'max-link-speed'. If this fails, iteratively train at lower gen.  	 */ @@ -330,31 +365,10 @@ err:  	dev_err(dev, "link never came up\n");  } -static void advk_pcie_issue_perst(struct advk_pcie *pcie) -{ -	u32 reg; - -	if (!pcie->reset_gpio) -		return; - -	/* PERST does not work for some cards when link training is enabled */ -	reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); -	reg &= ~LINK_TRAINING_EN; -	advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); - -	/* 10ms delay is needed for some cards */ -	dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); -	gpiod_set_value_cansleep(pcie->reset_gpio, 1); -	usleep_range(10000, 11000); -	gpiod_set_value_cansleep(pcie->reset_gpio, 0); -} -  static void advk_pcie_setup_hw(struct advk_pcie *pcie)  {  	u32 reg; -	advk_pcie_issue_perst(pcie); -  	/* Enable TX */  	reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);  	reg |= PCIE_CORE_REF_CLK_TX_ENABLE; @@ -431,15 +445,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)  	reg |= PIO_CTRL_ADDR_WIN_DISABLE;  	advk_writel(pcie, reg, PIO_CTRL); -	/* -	 * PERST# signal could have been asserted by pinctrl subsystem before -	 * probe() callback has been called or issued explicitly by reset gpio -	 * function advk_pcie_issue_perst(), making the endpoint going into -	 * fundamental reset. As required by PCI Express spec a delay for at -	 * least 100ms after such a reset before link training is needed. -	 */ -	msleep(PCI_PM_D3COLD_WAIT); -  	advk_pcie_train_link(pcie);  	/* @@ -607,7 +612,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {   * Initialize the configuration space of the PCI-to-PCI bridge   * associated with the given PCIe interface.   */ -static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) +static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)  {  	struct pci_bridge_emul *bridge = &pcie->bridge; @@ -633,8 +638,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)  	bridge->data = pcie;  	bridge->ops = &advk_pci_bridge_emul_ops; -	pci_bridge_emul_init(bridge, 0); - +	return pci_bridge_emul_init(bridge, 0);  }  static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, @@ -1077,7 +1081,9 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie)  	}  	ret = phy_power_on(pcie->phy); -	if (ret) { +	if (ret == -EOPNOTSUPP) { +		dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n"); +	} else if (ret) {  		phy_exit(pcie->phy);  		return ret;  	} @@ -1122,6 +1128,7 @@ static int advk_pcie_probe(struct platform_device *pdev)  	pcie = pci_host_bridge_priv(bridge);  	pcie->pdev = pdev; +	platform_set_drvdata(pdev, pcie);  	pcie->base = devm_platform_ioremap_resource(pdev, 0);  	if (IS_ERR(pcie->base)) @@ -1167,7 +1174,11 @@ static int advk_pcie_probe(struct platform_device *pdev)  	advk_pcie_setup_hw(pcie); -	advk_sw_pci_bridge_init(pcie); +	ret = advk_sw_pci_bridge_init(pcie); +	if (ret) { +		dev_err(dev, "Failed to register emulated root PCI bridge\n"); +		return ret; +	}  	ret = advk_pcie_init_irq_domain(pcie);  	if (ret) { @@ -1195,18 +1206,37 @@ static int advk_pcie_probe(struct platform_device *pdev)  	return 0;  } +static int advk_pcie_remove(struct platform_device *pdev) +{ +	struct advk_pcie *pcie = platform_get_drvdata(pdev); +	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); + +	pci_lock_rescan_remove(); +	pci_stop_root_bus(bridge->bus); +	pci_remove_root_bus(bridge->bus); +	pci_unlock_rescan_remove(); + +	advk_pcie_remove_msi_irq_domain(pcie); +	advk_pcie_remove_irq_domain(pcie); + +	return 0; +} +  static const struct of_device_id advk_pcie_of_match_table[] = {  	{ .compatible = "marvell,armada-3700-pcie", },  	{},  }; +MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);  static struct platform_driver advk_pcie_driver = {  	.driver = {  		.name = "advk-pcie",  		.of_match_table = advk_pcie_of_match_table, -		/* Driver unloading/unbinding currently not supported */ -		.suppress_bind_attrs = true,  	},  	.probe = advk_pcie_probe, +	.remove = advk_pcie_remove,  }; -builtin_platform_driver(advk_pcie_driver); +module_platform_driver(advk_pcie_driver); + +MODULE_DESCRIPTION("Aardvark PCIe controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 4e992403fffe..03ed5cb1c4b2 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1276,11 +1276,25 @@ static void hv_irq_unmask(struct irq_data *data)  exit_unlock:  	spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); -	if (res) { +	/* +	 * During hibernation, when a CPU is offlined, the kernel tries +	 * to move the interrupt to the remaining CPUs that haven't +	 * been offlined yet. In this case, the below hv_do_hypercall() +	 * always fails since the vmbus channel has been closed: +	 * refer to cpu_disable_common() -> fixup_irqs() -> +	 * irq_migrate_all_off_this_cpu() -> migrate_one_irq(). +	 * +	 * Suppress the error message for hibernation because the failure +	 * during hibernation does not matter (at this time all the devices +	 * have been frozen). Note: the correct affinity info is still updated +	 * into the irqdata data structure in migrate_one_irq() -> +	 * irq_do_set_affinity() -> hv_set_affinity(), so later when the VM +	 * resumes, hv_pci_restore_msi_state() is able to correctly restore +	 * the interrupt with the correct affinity. +	 */ +	if (res && hbus->state != hv_pcibus_removing)  		dev_err(&hbus->hdev->device,  			"%s() failed: %#llx", __func__, res); -		return; -	}  	pci_msi_unmask_irq(data);  } @@ -3367,6 +3381,34 @@ static int hv_pci_suspend(struct hv_device *hdev)  	return 0;  } +static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg) +{ +	struct msi_desc *entry; +	struct irq_data *irq_data; + +	for_each_pci_msi_entry(entry, pdev) { +		irq_data = irq_get_irq_data(entry->irq); +		if (WARN_ON_ONCE(!irq_data)) +			return -EINVAL; + +		hv_compose_msi_msg(irq_data, &entry->msg); +	} + +	return 0; +} + +/* + * Upon resume, pci_restore_msi_state() -> ... ->  __pci_write_msi_msg() + * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V + * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg() + * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping + * Table entries. + */ +static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus) +{ +	pci_walk_bus(hbus->pci_bus, hv_pci_restore_msi_msg, NULL); +} +  static int hv_pci_resume(struct hv_device *hdev)  {  	struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); @@ -3400,6 +3442,8 @@ static int hv_pci_resume(struct hv_device *hdev)  	prepopulate_bars(hbus); +	hv_pci_restore_msi_state(hbus); +  	hbus->state = hv_pcibus_installed;  	return 0;  out: diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 719c19fe2bfb..48169b1e3817 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -183,7 +183,6 @@ static int loongson_pci_probe(struct platform_device *pdev)  	struct device_node *node = dev->of_node;  	struct pci_host_bridge *bridge;  	struct resource *regs; -	int err;  	if (!node)  		return -ENODEV; @@ -222,11 +221,7 @@ static int loongson_pci_probe(struct platform_device *pdev)  	bridge->ops = &loongson_pci_ops;  	bridge->map_irq = loongson_map_irq; -	err = pci_host_probe(bridge); -	if (err) -		return err; - -	return 0; +	return pci_host_probe(bridge);  }  static struct platform_driver loongson_pci_driver = { diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index c39978b750ec..eee82838f4ba 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -12,7 +12,6 @@  #include <linux/gpio.h>  #include <linux/init.h>  #include <linux/mbus.h> -#include <linux/msi.h>  #include <linux/slab.h>  #include <linux/platform_device.h>  #include <linux/of_address.h> @@ -70,7 +69,6 @@ struct mvebu_pcie_port;  struct mvebu_pcie {  	struct platform_device *pdev;  	struct mvebu_pcie_port *ports; -	struct msi_controller *msi;  	struct resource io;  	struct resource realio;  	struct resource mem; @@ -1127,7 +1125,6 @@ static int mvebu_pcie_probe(struct platform_device *pdev)  	bridge->sysdata = pcie;  	bridge->ops = &mvebu_pcie_ops;  	bridge->align_resource = mvebu_pcie_align_resource; -	bridge->msi = pcie->msi;  	return mvebu_pci_host_probe(bridge);  } diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index c1d34353c29b..8fcabed7c6a6 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -2564,36 +2564,14 @@ static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)  	return 0;  } -static const struct seq_operations tegra_pcie_ports_seq_ops = { +static const struct seq_operations tegra_pcie_ports_sops = {  	.start = tegra_pcie_ports_seq_start,  	.next = tegra_pcie_ports_seq_next,  	.stop = tegra_pcie_ports_seq_stop,  	.show = tegra_pcie_ports_seq_show,  }; -static int tegra_pcie_ports_open(struct inode *inode, struct file *file) -{ -	struct tegra_pcie *pcie = inode->i_private; -	struct seq_file *s; -	int err; - -	err = seq_open(file, &tegra_pcie_ports_seq_ops); -	if (err) -		return err; - -	s = file->private_data; -	s->private = pcie; - -	return 0; -} - -static const struct file_operations tegra_pcie_ports_ops = { -	.owner = THIS_MODULE, -	.open = tegra_pcie_ports_open, -	.read = seq_read, -	.llseek = seq_lseek, -	.release = seq_release, -}; +DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);  static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)  { @@ -2601,24 +2579,12 @@ static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)  	pcie->debugfs = NULL;  } -static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) +static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)  { -	struct dentry *file; -  	pcie->debugfs = debugfs_create_dir("pcie", NULL); -	if (!pcie->debugfs) -		return -ENOMEM; -	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, -				   pcie, &tegra_pcie_ports_ops); -	if (!file) -		goto remove; - -	return 0; - -remove: -	tegra_pcie_debugfs_exit(pcie); -	return -ENOMEM; +	debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie, +			    &tegra_pcie_ports_fops);  }  static int tegra_pcie_probe(struct platform_device *pdev) @@ -2672,11 +2638,8 @@ static int tegra_pcie_probe(struct platform_device *pdev)  		goto pm_runtime_put;  	} -	if (IS_ENABLED(CONFIG_DEBUG_FS)) { -		err = tegra_pcie_debugfs_init(pcie); -		if (err < 0) -			dev_err(dev, "failed to setup debugfs: %d\n", err); -	} +	if (IS_ENABLED(CONFIG_DEBUG_FS)) +		tegra_pcie_debugfs_init(pcie);  	return 0; diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 1f54334f09f7..154a5398633c 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -658,7 +658,6 @@ static int v3_get_dma_range_config(struct v3_pci *v3,  	default:  		dev_err(v3->dev, "illegal dma memory chunk size\n");  		return -EINVAL; -		break;  	}  	val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;  	*pci_map = val; diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index 02271c6d17a1..2470782cb01a 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -493,8 +493,8 @@ static int xgene_msi_probe(struct platform_device *pdev)  	 */  	for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {  		for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) -			msi_val = xgene_msi_ir_read(xgene_msi, irq_index, -						    msi_idx); +			xgene_msi_ir_read(xgene_msi, irq_index, msi_idx); +  		/* Read MSIINTn to confirm */  		msi_val = xgene_msi_int_read(xgene_msi, irq_index);  		if (msi_val) { diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index bac63d04297f..bea86899bd5d 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -23,6 +23,7 @@  #include <linux/of_platform.h>  #include <linux/pci.h>  #include <linux/printk.h> +#include <linux/reset.h>  #include <linux/sizes.h>  #include <linux/slab.h>  #include <linux/string.h> @@ -52,8 +53,11 @@  #define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000  #define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000  #define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000 -#define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128		0x0 +  #define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000 +#define  PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK		0x07c00000 +#define  PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK		0x0000001f +#define  SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK  #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c  #define PCIE_MEM_WIN0_LO(win)	\ @@ -77,10 +81,12 @@  #define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048  #define PCIE_MISC_MSI_DATA_CONFIG			0x404c -#define  PCIE_MISC_MSI_DATA_CONFIG_VAL			0xffe06540 +#define  PCIE_MISC_MSI_DATA_CONFIG_VAL_32		0xffe06540 +#define  PCIE_MISC_MSI_DATA_CONFIG_VAL_8		0xfff86540  #define PCIE_MISC_PCIE_CTRL				0x4064  #define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1 +#define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK		0x4  #define PCIE_MISC_PCIE_STATUS				0x4068  #define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80 @@ -88,6 +94,9 @@  #define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10  #define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40 +#define PCIE_MISC_REVISION				0x406c +#define  BRCM_PCIE_HW_REV_33				0x0303 +  #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070  #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000  #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0 @@ -108,10 +117,14 @@  #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2  #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000 -#define PCIE_MSI_INTR2_STATUS				0x4500 -#define PCIE_MSI_INTR2_CLR				0x4508 -#define PCIE_MSI_INTR2_MASK_SET				0x4510 -#define PCIE_MSI_INTR2_MASK_CLR				0x4514 + +#define PCIE_INTR2_CPU_BASE		0x4300 +#define PCIE_MSI_INTR2_BASE		0x4500 +/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */ +#define  MSI_INT_STATUS			0x0 +#define  MSI_INT_CLR			0x8 +#define  MSI_INT_MASK_SET		0x10 +#define  MSI_INT_MASK_CLR		0x14  #define PCIE_EXT_CFG_DATA				0x8000 @@ -120,13 +133,19 @@  #define  PCIE_EXT_SLOT_SHIFT				15  #define  PCIE_EXT_FUNC_SHIFT				12 -#define PCIE_RGR1_SW_INIT_1				0x9210  #define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1 -#define  PCIE_RGR1_SW_INIT_1_INIT_MASK			0x2 +#define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT		0x0 + +#define RGR1_SW_INIT_1_INIT_GENERIC_MASK		0x2 +#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT		0x1 +#define RGR1_SW_INIT_1_INIT_7278_MASK			0x1 +#define RGR1_SW_INIT_1_INIT_7278_SHIFT			0x0  /* PCIe parameters */  #define BRCM_NUM_PCIE_OUT_WINS		0x4  #define BRCM_INT_PCI_MSI_NR		32 +#define BRCM_INT_PCI_MSI_LEGACY_NR	8 +#define BRCM_INT_PCI_MSI_SHIFT		0  /* MSI target adresses */  #define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL @@ -151,6 +170,85 @@  #define SSC_STATUS_OFFSET		0x1  #define SSC_STATUS_SSC_MASK		0x400  #define SSC_STATUS_PLL_LOCK_MASK	0x800 +#define PCIE_BRCM_MAX_MEMC		3 + +#define IDX_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_INDEX]) +#define DATA_ADDR(pcie)			(pcie->reg_offsets[EXT_CFG_DATA]) +#define PCIE_RGR1_SW_INIT_1(pcie)	(pcie->reg_offsets[RGR1_SW_INIT_1]) + +/* Rescal registers */ +#define PCIE_DVT_PMU_PCIE_PHY_CTRL				0xc700 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS			0x3 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK		0x4 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT	0x2 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK		0x2 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT		0x1 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK		0x1 +#define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT		0x0 + +/* Forward declarations */ +struct brcm_pcie; +static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val); +static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val); +static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val); +static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val); + +enum { +	RGR1_SW_INIT_1, +	EXT_CFG_INDEX, +	EXT_CFG_DATA, +}; + +enum { +	RGR1_SW_INIT_1_INIT_MASK, +	RGR1_SW_INIT_1_INIT_SHIFT, +}; + +enum pcie_type { +	GENERIC, +	BCM7278, +	BCM2711, +}; + +struct pcie_cfg_data { +	const int *offsets; +	const enum pcie_type type; +	void (*perst_set)(struct brcm_pcie *pcie, u32 val); +	void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val); +}; + +static const int pcie_offsets[] = { +	[RGR1_SW_INIT_1] = 0x9210, +	[EXT_CFG_INDEX]  = 0x9000, +	[EXT_CFG_DATA]   = 0x9004, +}; + +static const struct pcie_cfg_data generic_cfg = { +	.offsets	= pcie_offsets, +	.type		= GENERIC, +	.perst_set	= brcm_pcie_perst_set_generic, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +}; + +static const int pcie_offset_bcm7278[] = { +	[RGR1_SW_INIT_1] = 0xc010, +	[EXT_CFG_INDEX] = 0x9000, +	[EXT_CFG_DATA] = 0x9004, +}; + +static const struct pcie_cfg_data bcm7278_cfg = { +	.offsets	= pcie_offset_bcm7278, +	.type		= BCM7278, +	.perst_set	= brcm_pcie_perst_set_7278, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278, +}; + +static const struct pcie_cfg_data bcm2711_cfg = { +	.offsets	= pcie_offsets, +	.type		= BCM2711, +	.perst_set	= brcm_pcie_perst_set_generic, +	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic, +};  struct brcm_msi {  	struct device		*dev; @@ -163,6 +261,12 @@ struct brcm_msi {  	int			irq;  	/* used indicates which MSI interrupts have been alloc'd */  	unsigned long		used; +	bool			legacy; +	/* Some chips have MSIs in bits [31..24] of a shared register. */ +	int			legacy_shift; +	int			nr; /* No. of MSI available, depends on chip */ +	/* This is the base pointer for interrupt status/set/clr regs */ +	void __iomem		*intr_base;  };  /* Internal PCIe Host Controller Information.*/ @@ -175,6 +279,14 @@ struct brcm_pcie {  	int			gen;  	u64			msi_target_addr;  	struct brcm_msi		*msi; +	const int		*reg_offsets; +	enum pcie_type		type; +	struct reset_control	*rescal; +	int			num_memc; +	u64			memc_size[PCIE_BRCM_MAX_MEMC]; +	u32			hw_rev; +	void			(*perst_set)(struct brcm_pcie *pcie, u32 val); +	void			(*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);  };  /* @@ -365,8 +477,10 @@ static void brcm_pcie_msi_isr(struct irq_desc *desc)  	msi = irq_desc_get_handler_data(desc);  	dev = msi->dev; -	status = readl(msi->base + PCIE_MSI_INTR2_STATUS); -	for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) { +	status = readl(msi->intr_base + MSI_INT_STATUS); +	status >>= msi->legacy_shift; + +	for_each_set_bit(bit, &status, msi->nr) {  		virq = irq_find_mapping(msi->inner_domain, bit);  		if (virq)  			generic_handle_irq(virq); @@ -383,7 +497,7 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)  	msg->address_lo = lower_32_bits(msi->target_addr);  	msg->address_hi = upper_32_bits(msi->target_addr); -	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq; +	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;  }  static int brcm_msi_set_affinity(struct irq_data *irq_data, @@ -395,8 +509,9 @@ static int brcm_msi_set_affinity(struct irq_data *irq_data,  static void brcm_msi_ack_irq(struct irq_data *data)  {  	struct brcm_msi *msi = irq_data_get_irq_chip_data(data); +	const int shift_amt = data->hwirq + msi->legacy_shift; -	writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR); +	writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);  } @@ -412,7 +527,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)  	int hwirq;  	mutex_lock(&msi->lock); -	hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0); +	hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);  	mutex_unlock(&msi->lock);  	return hwirq; @@ -461,8 +576,7 @@ static int brcm_allocate_domains(struct brcm_msi *msi)  	struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);  	struct device *dev = msi->dev; -	msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR, -						  &msi_domain_ops, msi); +	msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);  	if (!msi->inner_domain) {  		dev_err(dev, "failed to create IRQ domain\n");  		return -ENOMEM; @@ -499,7 +613,10 @@ static void brcm_msi_remove(struct brcm_pcie *pcie)  static void brcm_msi_set_regs(struct brcm_msi *msi)  { -	writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR); +	u32 val = __GENMASK(31, msi->legacy_shift); + +	writel(val, msi->intr_base + MSI_INT_MASK_CLR); +	writel(val, msi->intr_base + MSI_INT_CLR);  	/*  	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI @@ -510,8 +627,8 @@ static void brcm_msi_set_regs(struct brcm_msi *msi)  	writel(upper_32_bits(msi->target_addr),  	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI); -	writel(PCIE_MISC_MSI_DATA_CONFIG_VAL, -	       msi->base + PCIE_MISC_MSI_DATA_CONFIG); +	val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32; +	writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);  }  static int brcm_pcie_enable_msi(struct brcm_pcie *pcie) @@ -536,6 +653,17 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)  	msi->np = pcie->np;  	msi->target_addr = pcie->msi_target_addr;  	msi->irq = irq; +	msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33; + +	if (msi->legacy) { +		msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE; +		msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR; +		msi->legacy_shift = 24; +	} else { +		msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE; +		msi->nr = BRCM_INT_PCI_MSI_NR; +		msi->legacy_shift = 0; +	}  	ret = brcm_allocate_domains(msi);  	if (ret) @@ -599,22 +727,43 @@ static struct pci_ops brcm_pcie_ops = {  	.write = pci_generic_config_write,  }; -static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val) +static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val) +{ +	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_GENERIC_MASK; +	u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT; + +	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); +	tmp = (tmp & ~mask) | ((val << shift) & mask); +	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); +} + +static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val) +{ +	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_7278_MASK; +	u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT; + +	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); +	tmp = (tmp & ~mask) | ((val << shift) & mask); +	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie)); +} + +static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)  {  	u32 tmp; -	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1); -	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK); -	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1); +	/* Perst bit has moved and assert value is 0 */ +	tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL); +	u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK); +	writel(tmp, pcie->base +  PCIE_MISC_PCIE_CTRL);  } -static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val) +static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)  {  	u32 tmp; -	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1); +	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));  	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK); -	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1); +	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));  }  static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie, @@ -622,22 +771,44 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,  							u64 *rc_bar2_offset)  {  	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); -	struct device *dev = pcie->dev;  	struct resource_entry *entry; +	struct device *dev = pcie->dev; +	u64 lowest_pcie_addr = ~(u64)0; +	int ret, i = 0; +	u64 size = 0; -	entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM); -	if (!entry) -		return -ENODEV; +	resource_list_for_each_entry(entry, &bridge->dma_ranges) { +		u64 pcie_beg = entry->res->start - entry->offset; +		size += entry->res->end - entry->res->start + 1; +		if (pcie_beg < lowest_pcie_addr) +			lowest_pcie_addr = pcie_beg; +	} -	/* -	 * The controller expects the inbound window offset to be calculated as -	 * the difference between PCIe's address space and CPU's. The offset -	 * provided by the firmware is calculated the opposite way, so we -	 * negate it. -	 */ -	*rc_bar2_offset = -entry->offset; -	*rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start); +	if (lowest_pcie_addr == ~(u64)0) { +		dev_err(dev, "DT node has no dma-ranges\n"); +		return -EINVAL; +	} + +	ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1, +						  PCIE_BRCM_MAX_MEMC); + +	if (ret <= 0) { +		/* Make an educated guess */ +		pcie->num_memc = 1; +		pcie->memc_size[0] = 1ULL << fls64(size - 1); +	} else { +		pcie->num_memc = ret; +	} + +	/* Each memc is viewed through a "port" that is a power of 2 */ +	for (i = 0, size = 0; i < pcie->num_memc; i++) +		size += pcie->memc_size[i]; + +	/* System memory starts at this address in PCIe-space */ +	*rc_bar2_offset = lowest_pcie_addr; +	/* The sum of all memc views must also be a power of 2 */ +	*rc_bar2_size = 1ULL << fls64(size - 1);  	/*  	 * We validate the inbound memory view even though we should trust @@ -689,22 +860,19 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	void __iomem *base = pcie->base;  	struct device *dev = pcie->dev;  	struct resource_entry *entry; -	unsigned int scb_size_val;  	bool ssc_good = false;  	struct resource *res;  	int num_out_wins = 0;  	u16 nlw, cls, lnksta; -	int i, ret; -	u32 tmp, aspm_support; +	int i, ret, memc; +	u32 tmp, burst, aspm_support;  	/* Reset the bridge */ -	brcm_pcie_bridge_sw_init_set(pcie, 1); -	brcm_pcie_perst_set(pcie, 1); - +	pcie->bridge_sw_init_set(pcie, 1);  	usleep_range(100, 200);  	/* Take the bridge out of reset */ -	brcm_pcie_bridge_sw_init_set(pcie, 0); +	pcie->bridge_sw_init_set(pcie, 0);  	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);  	tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; @@ -712,11 +880,22 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	/* Wait for SerDes to be stable */  	usleep_range(100, 200); +	/* +	 * SCB_MAX_BURST_SIZE is a two bit field.  For GENERIC chips it +	 * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it +	 * is encoded as 0=Rsvd, 1=128, 2=256, 3=512. +	 */ +	if (pcie->type == BCM2711) +		burst = 0x0; /* 128B */ +	else if (pcie->type == BCM7278) +		burst = 0x3; /* 512 bytes */ +	else +		burst = 0x2; /* 512 bytes */ +  	/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */  	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);  	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK); -	u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128, -			  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK); +	u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);  	writel(tmp, base + PCIE_MISC_MISC_CTRL);  	ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size, @@ -731,11 +910,17 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	writel(upper_32_bits(rc_bar2_offset),  	       base + PCIE_MISC_RC_BAR2_CONFIG_HI); -	scb_size_val = rc_bar2_size ? -		       ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */  	tmp = readl(base + PCIE_MISC_MISC_CTRL); -	u32p_replace_bits(&tmp, scb_size_val, -			  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK); +	for (memc = 0; memc < pcie->num_memc; memc++) { +		u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15; + +		if (memc == 0) +			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0)); +		else if (memc == 1) +			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1)); +		else if (memc == 2) +			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2)); +	}  	writel(tmp, base + PCIE_MISC_MISC_CTRL);  	/* @@ -760,17 +945,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)  	tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;  	writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO); -	/* Mask all interrupts since we are not handling any yet */ -	writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET); - -	/* clear any interrupts we find on boot */ -	writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR); -  	if (pcie->gen)  		brcm_pcie_set_gen(pcie, pcie->gen);  	/* Unassert the fundamental reset */ -	brcm_pcie_perst_set(pcie, 0); +	pcie->perst_set(pcie, 0);  	/*  	 * Give the RC/EP time to wake up, before trying to configure RC. @@ -882,6 +1061,52 @@ static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)  		dev_err(pcie->dev, "failed to enter low-power link state\n");  } +static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start) +{ +	static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = { +		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT, +		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT, +		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,}; +	static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = { +		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK, +		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK, +		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,}; +	const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1; +	const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1; +	u32 tmp, combined_mask = 0; +	u32 val; +	void __iomem *base = pcie->base; +	int i, ret; + +	for (i = beg; i != end; start ? i++ : i--) { +		val = start ? BIT_MASK(shifts[i]) : 0; +		tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL); +		tmp = (tmp & ~masks[i]) | (val & masks[i]); +		writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL); +		usleep_range(50, 200); +		combined_mask |= masks[i]; +	} + +	tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL); +	val = start ? combined_mask : 0; + +	ret = (tmp & combined_mask) == val ? 0 : -EIO; +	if (ret) +		dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop")); + +	return ret; +} + +static inline int brcm_phy_start(struct brcm_pcie *pcie) +{ +	return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0; +} + +static inline int brcm_phy_stop(struct brcm_pcie *pcie) +{ +	return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0; +} +  static void brcm_pcie_turn_off(struct brcm_pcie *pcie)  {  	void __iomem *base = pcie->base; @@ -890,7 +1115,7 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)  	if (brcm_pcie_link_up(pcie))  		brcm_pcie_enter_l23(pcie);  	/* Assert fundamental reset */ -	brcm_pcie_perst_set(pcie, 1); +	pcie->perst_set(pcie, 1);  	/* Deassert request for L23 in case it was asserted */  	tmp = readl(base + PCIE_MISC_PCIE_CTRL); @@ -903,13 +1128,66 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)  	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);  	/* Shutdown PCIe bridge */ -	brcm_pcie_bridge_sw_init_set(pcie, 1); +	pcie->bridge_sw_init_set(pcie, 1); +} + +static int brcm_pcie_suspend(struct device *dev) +{ +	struct brcm_pcie *pcie = dev_get_drvdata(dev); +	int ret; + +	brcm_pcie_turn_off(pcie); +	ret = brcm_phy_stop(pcie); +	clk_disable_unprepare(pcie->clk); + +	return ret; +} + +static int brcm_pcie_resume(struct device *dev) +{ +	struct brcm_pcie *pcie = dev_get_drvdata(dev); +	void __iomem *base; +	u32 tmp; +	int ret; + +	base = pcie->base; +	clk_prepare_enable(pcie->clk); + +	ret = brcm_phy_start(pcie); +	if (ret) +		goto err; + +	/* Take bridge out of reset so we can access the SERDES reg */ +	pcie->bridge_sw_init_set(pcie, 0); + +	/* SERDES_IDDQ = 0 */ +	tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); +	u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK); +	writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG); + +	/* wait for serdes to be stable */ +	udelay(100); + +	ret = brcm_pcie_setup(pcie); +	if (ret) +		goto err; + +	if (pcie->msi) +		brcm_msi_set_regs(pcie->msi); + +	return 0; + +err: +	clk_disable_unprepare(pcie->clk); +	return ret;  }  static void __brcm_pcie_remove(struct brcm_pcie *pcie)  {  	brcm_msi_remove(pcie);  	brcm_pcie_turn_off(pcie); +	brcm_phy_stop(pcie); +	reset_control_assert(pcie->rescal);  	clk_disable_unprepare(pcie->clk);  } @@ -925,10 +1203,20 @@ static int brcm_pcie_remove(struct platform_device *pdev)  	return 0;  } +static const struct of_device_id brcm_pcie_match[] = { +	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg }, +	{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg }, +	{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg }, +	{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg }, +	{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg }, +	{}, +}; +  static int brcm_pcie_probe(struct platform_device *pdev)  {  	struct device_node *np = pdev->dev.of_node, *msi_np;  	struct pci_host_bridge *bridge; +	const struct pcie_cfg_data *data;  	struct brcm_pcie *pcie;  	int ret; @@ -936,9 +1224,19 @@ static int brcm_pcie_probe(struct platform_device *pdev)  	if (!bridge)  		return -ENOMEM; +	data = of_device_get_match_data(&pdev->dev); +	if (!data) { +		pr_err("failed to look up compatible string\n"); +		return -EINVAL; +	} +  	pcie = pci_host_bridge_priv(bridge);  	pcie->dev = &pdev->dev;  	pcie->np = np; +	pcie->reg_offsets = data->offsets; +	pcie->type = data->type; +	pcie->perst_set = data->perst_set; +	pcie->bridge_sw_init_set = data->bridge_sw_init_set;  	pcie->base = devm_platform_ioremap_resource(pdev, 0);  	if (IS_ERR(pcie->base)) @@ -958,11 +1256,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)  		dev_err(&pdev->dev, "could not enable clock\n");  		return ret;  	} +	pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal"); +	if (IS_ERR(pcie->rescal)) { +		clk_disable_unprepare(pcie->clk); +		return PTR_ERR(pcie->rescal); +	} + +	ret = reset_control_deassert(pcie->rescal); +	if (ret) +		dev_err(&pdev->dev, "failed to deassert 'rescal'\n"); + +	ret = brcm_phy_start(pcie); +	if (ret) { +		reset_control_assert(pcie->rescal); +		clk_disable_unprepare(pcie->clk); +		return ret; +	}  	ret = brcm_pcie_setup(pcie);  	if (ret)  		goto fail; +	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); +  	msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);  	if (pci_msi_enabled() && msi_np == pcie->np) {  		ret = brcm_pcie_enable_msi(pcie); @@ -983,18 +1299,20 @@ fail:  	return ret;  } -static const struct of_device_id brcm_pcie_match[] = { -	{ .compatible = "brcm,bcm2711-pcie" }, -	{}, -};  MODULE_DEVICE_TABLE(of, brcm_pcie_match); +static const struct dev_pm_ops brcm_pcie_pm_ops = { +	.suspend = brcm_pcie_suspend, +	.resume = brcm_pcie_resume, +}; +  static struct platform_driver brcm_pcie_driver = {  	.probe = brcm_pcie_probe,  	.remove = brcm_pcie_remove,  	.driver = {  		.name = "brcm-pcie",  		.of_match_table = brcm_pcie_match, +		.pm = &brcm_pcie_pm_ops,  	},  };  module_platform_driver(brcm_pcie_driver); diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c new file mode 100644 index 000000000000..7959c9c8d2bc --- /dev/null +++ b/drivers/pci/controller/pcie-hisi-error.c @@ -0,0 +1,327 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for handling the PCIe controller errors on + * HiSilicon HIP SoCs. + * + * Copyright (c) 2020 HiSilicon Limited. + */ + +#include <linux/acpi.h> +#include <acpi/ghes.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/kfifo.h> +#include <linux/spinlock.h> + +/* HISI PCIe controller error definitions */ +#define HISI_PCIE_ERR_MISC_REGS	33 + +#define HISI_PCIE_LOCAL_VALID_VERSION		BIT(0) +#define HISI_PCIE_LOCAL_VALID_SOC_ID		BIT(1) +#define HISI_PCIE_LOCAL_VALID_SOCKET_ID		BIT(2) +#define HISI_PCIE_LOCAL_VALID_NIMBUS_ID		BIT(3) +#define HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID	BIT(4) +#define HISI_PCIE_LOCAL_VALID_CORE_ID		BIT(5) +#define HISI_PCIE_LOCAL_VALID_PORT_ID		BIT(6) +#define HISI_PCIE_LOCAL_VALID_ERR_TYPE		BIT(7) +#define HISI_PCIE_LOCAL_VALID_ERR_SEVERITY	BIT(8) +#define HISI_PCIE_LOCAL_VALID_ERR_MISC		9 + +static guid_t hisi_pcie_sec_guid = +	GUID_INIT(0xB2889FC9, 0xE7D7, 0x4F9D, +		  0xA8, 0x67, 0xAF, 0x42, 0xE9, 0x8B, 0xE7, 0x72); + +/* + * Firmware reports the socket port ID where the error occurred.  These + * macros convert that to the core ID and core port ID required by the + * ACPI reset method. + */ +#define HISI_PCIE_PORT_ID(core, v)       (((v) >> 1) + ((core) << 3)) +#define HISI_PCIE_CORE_ID(v)             ((v) >> 3) +#define HISI_PCIE_CORE_PORT_ID(v)        (((v) & 7) << 1) + +struct hisi_pcie_error_data { +	u64	val_bits; +	u8	version; +	u8	soc_id; +	u8	socket_id; +	u8	nimbus_id; +	u8	sub_module_id; +	u8	core_id; +	u8	port_id; +	u8	err_severity; +	u16	err_type; +	u8	reserv[2]; +	u32	err_misc[HISI_PCIE_ERR_MISC_REGS]; +}; + +struct hisi_pcie_error_private { +	struct notifier_block	nb; +	struct device *dev; +}; + +enum hisi_pcie_submodule_id { +	HISI_PCIE_SUB_MODULE_ID_AP, +	HISI_PCIE_SUB_MODULE_ID_TL, +	HISI_PCIE_SUB_MODULE_ID_MAC, +	HISI_PCIE_SUB_MODULE_ID_DL, +	HISI_PCIE_SUB_MODULE_ID_SDI, +}; + +static const char * const hisi_pcie_sub_module[] = { +	[HISI_PCIE_SUB_MODULE_ID_AP]	= "AP Layer", +	[HISI_PCIE_SUB_MODULE_ID_TL]	= "TL Layer", +	[HISI_PCIE_SUB_MODULE_ID_MAC]	= "MAC Layer", +	[HISI_PCIE_SUB_MODULE_ID_DL]	= "DL Layer", +	[HISI_PCIE_SUB_MODULE_ID_SDI]	= "SDI Layer", +}; + +enum hisi_pcie_err_severity { +	HISI_PCIE_ERR_SEV_RECOVERABLE, +	HISI_PCIE_ERR_SEV_FATAL, +	HISI_PCIE_ERR_SEV_CORRECTED, +	HISI_PCIE_ERR_SEV_NONE, +}; + +static const char * const hisi_pcie_error_sev[] = { +	[HISI_PCIE_ERR_SEV_RECOVERABLE]	= "recoverable", +	[HISI_PCIE_ERR_SEV_FATAL]	= "fatal", +	[HISI_PCIE_ERR_SEV_CORRECTED]	= "corrected", +	[HISI_PCIE_ERR_SEV_NONE]	= "none", +}; + +static const char *hisi_pcie_get_string(const char * const *array, +					size_t n, u32 id) +{ +	u32 index; + +	for (index = 0; index < n; index++) { +		if (index == id && array[index]) +			return array[index]; +	} + +	return "unknown"; +} + +static int hisi_pcie_port_reset(struct platform_device *pdev, +				u32 chip_id, u32 port_id) +{ +	struct device *dev = &pdev->dev; +	acpi_handle handle = ACPI_HANDLE(dev); +	union acpi_object arg[3]; +	struct acpi_object_list arg_list; +	acpi_status s; +	unsigned long long data = 0; + +	arg[0].type = ACPI_TYPE_INTEGER; +	arg[0].integer.value = chip_id; +	arg[1].type = ACPI_TYPE_INTEGER; +	arg[1].integer.value = HISI_PCIE_CORE_ID(port_id); +	arg[2].type = ACPI_TYPE_INTEGER; +	arg[2].integer.value = HISI_PCIE_CORE_PORT_ID(port_id); + +	arg_list.count = 3; +	arg_list.pointer = arg; + +	s = acpi_evaluate_integer(handle, "RST", &arg_list, &data); +	if (ACPI_FAILURE(s)) { +		dev_err(dev, "No RST method\n"); +		return -EIO; +	} + +	if (data) { +		dev_err(dev, "Failed to Reset\n"); +		return -EIO; +	} + +	return 0; +} + +static int hisi_pcie_port_do_recovery(struct platform_device *dev, +				      u32 chip_id, u32 port_id) +{ +	acpi_status s; +	struct device *device = &dev->dev; +	acpi_handle root_handle = ACPI_HANDLE(device); +	struct acpi_pci_root *pci_root; +	struct pci_bus *root_bus; +	struct pci_dev *pdev; +	u32 domain, busnr, devfn; + +	s = acpi_get_parent(root_handle, &root_handle); +	if (ACPI_FAILURE(s)) +		return -ENODEV; +	pci_root = acpi_pci_find_root(root_handle); +	if (!pci_root) +		return -ENODEV; +	root_bus = pci_root->bus; +	domain = pci_root->segment; + +	busnr = root_bus->number; +	devfn = PCI_DEVFN(port_id, 0); +	pdev = pci_get_domain_bus_and_slot(domain, busnr, devfn); +	if (!pdev) { +		dev_info(device, "Fail to get root port %04x:%02x:%02x.%d device\n", +			 domain, busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); +		return -ENODEV; +	} + +	pci_stop_and_remove_bus_device_locked(pdev); +	pci_dev_put(pdev); + +	if (hisi_pcie_port_reset(dev, chip_id, port_id)) +		return -EIO; + +	/* +	 * The initialization time of subordinate devices after +	 * hot reset is no more than 1s, which is required by +	 * the PCI spec v5.0 sec 6.6.1. The time will shorten +	 * if Readiness Notifications mechanisms are used. But +	 * wait 1s here to adapt any conditions. +	 */ +	ssleep(1UL); + +	/* add root port and downstream devices */ +	pci_lock_rescan_remove(); +	pci_rescan_bus(root_bus); +	pci_unlock_rescan_remove(); + +	return 0; +} + +static void hisi_pcie_handle_error(struct platform_device *pdev, +				   const struct hisi_pcie_error_data *edata) +{ +	struct device *dev = &pdev->dev; +	int idx, rc; +	const unsigned long valid_bits[] = {BITMAP_FROM_U64(edata->val_bits)}; + +	if (edata->val_bits == 0) { +		dev_warn(dev, "%s: no valid error information\n", __func__); +		return; +	} + +	dev_info(dev, "\nHISI : HIP : PCIe controller error\n"); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOC_ID) +		dev_info(dev, "Table version = %d\n", edata->version); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOCKET_ID) +		dev_info(dev, "Socket ID = %d\n", edata->socket_id); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_NIMBUS_ID) +		dev_info(dev, "Nimbus ID = %d\n", edata->nimbus_id); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID) +		dev_info(dev, "Sub Module = %s\n", +			 hisi_pcie_get_string(hisi_pcie_sub_module, +					      ARRAY_SIZE(hisi_pcie_sub_module), +					      edata->sub_module_id)); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_CORE_ID) +		dev_info(dev, "Core ID = core%d\n", edata->core_id); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_PORT_ID) +		dev_info(dev, "Port ID = port%d\n", edata->port_id); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_SEVERITY) +		dev_info(dev, "Error severity = %s\n", +			 hisi_pcie_get_string(hisi_pcie_error_sev, +					      ARRAY_SIZE(hisi_pcie_error_sev), +					      edata->err_severity)); +	if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_TYPE) +		dev_info(dev, "Error type = 0x%x\n", edata->err_type); + +	dev_info(dev, "Reg Dump:\n"); +	idx = HISI_PCIE_LOCAL_VALID_ERR_MISC; +	for_each_set_bit_from(idx, valid_bits, +			      HISI_PCIE_LOCAL_VALID_ERR_MISC + HISI_PCIE_ERR_MISC_REGS) +		dev_info(dev, "ERR_MISC_%d = 0x%x\n", idx - HISI_PCIE_LOCAL_VALID_ERR_MISC, +			 edata->err_misc[idx - HISI_PCIE_LOCAL_VALID_ERR_MISC]); + +	if (edata->err_severity != HISI_PCIE_ERR_SEV_RECOVERABLE) +		return; + +	/* Recovery for the PCIe controller errors, try reset +	 * PCI port for the error recovery +	 */ +	rc = hisi_pcie_port_do_recovery(pdev, edata->socket_id, +			HISI_PCIE_PORT_ID(edata->core_id, edata->port_id)); +	if (rc) +		dev_info(dev, "fail to do hisi pcie port reset\n"); +} + +static int hisi_pcie_notify_error(struct notifier_block *nb, +				  unsigned long event, void *data) +{ +	struct acpi_hest_generic_data *gdata = data; +	const struct hisi_pcie_error_data *error_data = acpi_hest_get_payload(gdata); +	struct hisi_pcie_error_private *priv; +	struct device *dev; +	struct platform_device *pdev; +	guid_t err_sec_guid; +	u8 socket; + +	import_guid(&err_sec_guid, gdata->section_type); +	if (!guid_equal(&err_sec_guid, &hisi_pcie_sec_guid)) +		return NOTIFY_DONE; + +	priv = container_of(nb, struct hisi_pcie_error_private, nb); +	dev = priv->dev; + +	if (device_property_read_u8(dev, "socket", &socket)) +		return NOTIFY_DONE; + +	if (error_data->socket_id != socket) +		return NOTIFY_DONE; + +	pdev = container_of(dev, struct platform_device, dev); +	hisi_pcie_handle_error(pdev, error_data); + +	return NOTIFY_OK; +} + +static int hisi_pcie_error_handler_probe(struct platform_device *pdev) +{ +	struct hisi_pcie_error_private *priv; +	int ret; + +	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return -ENOMEM; + +	priv->nb.notifier_call = hisi_pcie_notify_error; +	priv->dev = &pdev->dev; +	ret = ghes_register_vendor_record_notifier(&priv->nb); +	if (ret) { +		dev_err(&pdev->dev, +			"Failed to register hisi pcie controller error handler with apei\n"); +		return ret; +	} + +	platform_set_drvdata(pdev, priv); + +	return 0; +} + +static int hisi_pcie_error_handler_remove(struct platform_device *pdev) +{ +	struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev); + +	ghes_unregister_vendor_record_notifier(&priv->nb); + +	return 0; +} + +static const struct acpi_device_id hisi_pcie_acpi_match[] = { +	{ "HISI0361", 0 }, +	{ } +}; + +static struct platform_driver hisi_pcie_error_handler_driver = { +	.driver = { +		.name	= "hisi-pcie-error-handler", +		.acpi_match_table = hisi_pcie_acpi_match, +	}, +	.probe		= hisi_pcie_error_handler_probe, +	.remove		= hisi_pcie_error_handler_remove, +}; +module_platform_driver(hisi_pcie_error_handler_driver); + +MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c index aa55b064f64d..56b8ee7bf330 100644 --- a/drivers/pci/controller/pcie-iproc-bcma.c +++ b/drivers/pci/controller/pcie-iproc-bcma.c @@ -94,18 +94,7 @@ static struct bcma_driver iproc_pcie_bcma_driver = {  	.probe		= iproc_pcie_bcma_probe,  	.remove		= iproc_pcie_bcma_remove,  }; - -static int __init iproc_pcie_bcma_init(void) -{ -	return bcma_driver_register(&iproc_pcie_bcma_driver); -} -module_init(iproc_pcie_bcma_init); - -static void __exit iproc_pcie_bcma_exit(void) -{ -	bcma_driver_unregister(&iproc_pcie_bcma_driver); -} -module_exit(iproc_pcie_bcma_exit); +module_bcma_driver(iproc_pcie_bcma_driver);  MODULE_AUTHOR("Hauke Mehrtens");  MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index 3176ad3ab0e5..908475d27e0e 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c @@ -209,15 +209,20 @@ static int iproc_msi_irq_set_affinity(struct irq_data *data,  	struct iproc_msi *msi = irq_data_get_irq_chip_data(data);  	int target_cpu = cpumask_first(mask);  	int curr_cpu; +	int ret;  	curr_cpu = hwirq_to_cpu(msi, data->hwirq);  	if (curr_cpu == target_cpu) -		return IRQ_SET_MASK_OK_DONE; +		ret = IRQ_SET_MASK_OK_DONE; +	else { +		/* steer MSI to the target CPU */ +		data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; +		ret = IRQ_SET_MASK_OK; +	} -	/* steer MSI to the target CPU */ -	data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; +	irq_data_update_effective_affinity(data, cpumask_of(target_cpu)); -	return IRQ_SET_MASK_OK; +	return ret;  }  static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index a956b0c18bd1..b93e7bda101b 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -99,7 +99,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)  	switch (pcie->type) {  	case IPROC_PCIE_PAXC:  	case IPROC_PCIE_PAXC_V2: -		pcie->map_irq = 0; +		pcie->map_irq = NULL;  		break;  	default:  		break; diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index f3082de44e8a..f92e0152e65e 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -572,12 +572,8 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev)  		goto err_setup_irq;  	} -	bridge->dev.parent = dev;  	bridge->sysdata = port->cfg; -	bridge->busnr = port->cfg->busr.start;  	bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; -	bridge->map_irq = of_irq_parse_and_map_pci; -	bridge->swizzle_irq = pci_common_swizzle;  	err = pci_host_probe(bridge);  	if (err < 0) diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index aa1b12bac9a1..f375c21ceeb1 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -298,6 +298,33 @@ static struct msi_domain_info vmd_msi_domain_info = {  	.chip		= &vmd_msi_controller,  }; +static int vmd_create_irq_domain(struct vmd_dev *vmd) +{ +	struct fwnode_handle *fn; + +	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); +	if (!fn) +		return -ENODEV; + +	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL); +	if (!vmd->irq_domain) { +		irq_domain_free_fwnode(fn); +		return -ENODEV; +	} + +	return 0; +} + +static void vmd_remove_irq_domain(struct vmd_dev *vmd) +{ +	if (vmd->irq_domain) { +		struct fwnode_handle *fn = vmd->irq_domain->fwnode; + +		irq_domain_remove(vmd->irq_domain); +		irq_domain_free_fwnode(fn); +	} +} +  static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,  				  unsigned int devfn, int reg, int len)  { @@ -417,97 +444,175 @@ static int vmd_find_free_domain(void)  	return domain + 1;  } -static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) +static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint, +				resource_size_t *offset1, +				resource_size_t *offset2)  { -	struct pci_sysdata *sd = &vmd->sysdata; -	struct fwnode_handle *fn; -	struct resource *res; -	u32 upper_bits; -	unsigned long flags; -	LIST_HEAD(resources); -	resource_size_t offset[2] = {0}; -	resource_size_t membar2_offset = 0x2000; -	struct pci_bus *child; +	struct pci_dev *dev = vmd->dev; +	u64 phys1, phys2; -	/* -	 * Shadow registers may exist in certain VMD device ids which allow -	 * guests to correctly assign host physical addresses to the root ports -	 * and child devices. These registers will either return the host value -	 * or 0, depending on an enable bit in the VMD device. -	 */ -	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { +	if (native_hint) {  		u32 vmlock;  		int ret; -		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; -		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); +		ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);  		if (ret || vmlock == ~0)  			return -ENODEV;  		if (MB2_SHADOW_EN(vmlock)) {  			void __iomem *membar2; -			membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); +			membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);  			if (!membar2)  				return -ENOMEM; -			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - -					(readq(membar2 + MB2_SHADOW_OFFSET) & -					 PCI_BASE_ADDRESS_MEM_MASK); -			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - -					(readq(membar2 + MB2_SHADOW_OFFSET + 8) & -					 PCI_BASE_ADDRESS_MEM_MASK); -			pci_iounmap(vmd->dev, membar2); -		} -	} - -	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { -		int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR); +			phys1 = readq(membar2 + MB2_SHADOW_OFFSET); +			phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8); +			pci_iounmap(dev, membar2); +		} else +			return 0; +	} else { +		/* Hypervisor-Emulated Vendor-Specific Capability */ +		int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);  		u32 reg, regu; -		pci_read_config_dword(vmd->dev, pos + 4, ®); +		pci_read_config_dword(dev, pos + 4, ®);  		/* "SHDW" */  		if (pos && reg == 0x53484457) { -			pci_read_config_dword(vmd->dev, pos + 8, ®); -			pci_read_config_dword(vmd->dev, pos + 12, ®u); -			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - -					(((u64) regu << 32 | reg) & -					 PCI_BASE_ADDRESS_MEM_MASK); - -			pci_read_config_dword(vmd->dev, pos + 16, ®); -			pci_read_config_dword(vmd->dev, pos + 20, ®u); -			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - -					(((u64) regu << 32 | reg) & -					 PCI_BASE_ADDRESS_MEM_MASK); +			pci_read_config_dword(dev, pos + 8, ®); +			pci_read_config_dword(dev, pos + 12, ®u); +			phys1 = (u64) regu << 32 | reg; + +			pci_read_config_dword(dev, pos + 16, ®); +			pci_read_config_dword(dev, pos + 20, ®u); +			phys2 = (u64) regu << 32 | reg; +		} else +			return 0; +	} + +	*offset1 = dev->resource[VMD_MEMBAR1].start - +			(phys1 & PCI_BASE_ADDRESS_MEM_MASK); +	*offset2 = dev->resource[VMD_MEMBAR2].start - +			(phys2 & PCI_BASE_ADDRESS_MEM_MASK); + +	return 0; +} + +static int vmd_get_bus_number_start(struct vmd_dev *vmd) +{ +	struct pci_dev *dev = vmd->dev; +	u16 reg; + +	pci_read_config_word(dev, PCI_REG_VMCAP, ®); +	if (BUS_RESTRICT_CAP(reg)) { +		pci_read_config_word(dev, PCI_REG_VMCONFIG, ®); + +		switch (BUS_RESTRICT_CFG(reg)) { +		case 0: +			vmd->busn_start = 0; +			break; +		case 1: +			vmd->busn_start = 128; +			break; +		case 2: +			vmd->busn_start = 224; +			break; +		default: +			pci_err(dev, "Unknown Bus Offset Setting (%d)\n", +				BUS_RESTRICT_CFG(reg)); +			return -ENODEV;  		}  	} +	return 0; +} + +static irqreturn_t vmd_irq(int irq, void *data) +{ +	struct vmd_irq_list *irqs = data; +	struct vmd_irq *vmdirq; +	int idx; + +	idx = srcu_read_lock(&irqs->srcu); +	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) +		generic_handle_irq(vmdirq->virq); +	srcu_read_unlock(&irqs->srcu, idx); + +	return IRQ_HANDLED; +} + +static int vmd_alloc_irqs(struct vmd_dev *vmd) +{ +	struct pci_dev *dev = vmd->dev; +	int i, err; + +	vmd->msix_count = pci_msix_vec_count(dev); +	if (vmd->msix_count < 0) +		return -ENODEV; + +	vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, +						PCI_IRQ_MSIX); +	if (vmd->msix_count < 0) +		return vmd->msix_count; + +	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), +				 GFP_KERNEL); +	if (!vmd->irqs) +		return -ENOMEM; + +	for (i = 0; i < vmd->msix_count; i++) { +		err = init_srcu_struct(&vmd->irqs[i].srcu); +		if (err) +			return err; + +		INIT_LIST_HEAD(&vmd->irqs[i].irq_list); +		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), +				       vmd_irq, IRQF_NO_THREAD, +				       "vmd", &vmd->irqs[i]); +		if (err) +			return err; +	} + +	return 0; +} + +static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) +{ +	struct pci_sysdata *sd = &vmd->sysdata; +	struct resource *res; +	u32 upper_bits; +	unsigned long flags; +	LIST_HEAD(resources); +	resource_size_t offset[2] = {0}; +	resource_size_t membar2_offset = 0x2000; +	struct pci_bus *child; +	int ret; + +	/* +	 * Shadow registers may exist in certain VMD device ids which allow +	 * guests to correctly assign host physical addresses to the root ports +	 * and child devices. These registers will either return the host value +	 * or 0, depending on an enable bit in the VMD device. +	 */ +	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { +		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE; +		ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]); +		if (ret) +			return ret; +	} else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { +		ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]); +		if (ret) +			return ret; +	} +  	/*  	 * Certain VMD devices may have a root port configuration option which  	 * limits the bus range to between 0-127, 128-255, or 224-255  	 */  	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { -		u16 reg16; - -		pci_read_config_word(vmd->dev, PCI_REG_VMCAP, ®16); -		if (BUS_RESTRICT_CAP(reg16)) { -			pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, -					     ®16); - -			switch (BUS_RESTRICT_CFG(reg16)) { -			case 1: -				vmd->busn_start = 128; -				break; -			case 2: -				vmd->busn_start = 224; -				break; -			case 3: -				pci_err(vmd->dev, "Unknown Bus Offset Setting\n"); -				return -ENODEV; -			default: -				break; -			} -		} +		ret = vmd_get_bus_number_start(vmd); +		if (ret) +			return ret;  	}  	res = &vmd->dev->resource[VMD_CFGBAR]; @@ -568,17 +673,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)  	sd->node = pcibus_to_node(vmd->dev->bus); -	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); -	if (!fn) -		return -ENODEV; - -	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, -						    NULL); - -	if (!vmd->irq_domain) { -		irq_domain_free_fwnode(fn); -		return -ENODEV; -	} +	ret = vmd_create_irq_domain(vmd); +	if (ret) +		return ret;  	/*  	 * Override the irq domain bus token so the domain can be distinguished @@ -594,13 +691,13 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)  				       &vmd_ops, sd, &resources);  	if (!vmd->bus) {  		pci_free_resource_list(&resources); -		irq_domain_remove(vmd->irq_domain); -		irq_domain_free_fwnode(fn); +		vmd_remove_irq_domain(vmd);  		return -ENODEV;  	}  	vmd_attach_resources(vmd); -	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); +	if (vmd->irq_domain) +		dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);  	pci_scan_child_bus(vmd->bus);  	pci_assign_unassigned_bus_resources(vmd->bus); @@ -620,24 +717,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)  	return 0;  } -static irqreturn_t vmd_irq(int irq, void *data) -{ -	struct vmd_irq_list *irqs = data; -	struct vmd_irq *vmdirq; -	int idx; - -	idx = srcu_read_lock(&irqs->srcu); -	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) -		generic_handle_irq(vmdirq->virq); -	srcu_read_unlock(&irqs->srcu, idx); - -	return IRQ_HANDLED; -} -  static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)  {  	struct vmd_dev *vmd; -	int i, err; +	int err;  	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))  		return -ENOMEM; @@ -660,32 +743,9 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)  	    dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))  		return -ENODEV; -	vmd->msix_count = pci_msix_vec_count(dev); -	if (vmd->msix_count < 0) -		return -ENODEV; - -	vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, -					PCI_IRQ_MSIX); -	if (vmd->msix_count < 0) -		return vmd->msix_count; - -	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), -				 GFP_KERNEL); -	if (!vmd->irqs) -		return -ENOMEM; - -	for (i = 0; i < vmd->msix_count; i++) { -		err = init_srcu_struct(&vmd->irqs[i].srcu); -		if (err) -			return err; - -		INIT_LIST_HEAD(&vmd->irqs[i].irq_list); -		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), -				       vmd_irq, IRQF_NO_THREAD, -				       "vmd", &vmd->irqs[i]); -		if (err) -			return err; -	} +	err = vmd_alloc_irqs(vmd); +	if (err) +		return err;  	spin_lock_init(&vmd->cfg_lock);  	pci_set_drvdata(dev, vmd); @@ -709,15 +769,13 @@ static void vmd_cleanup_srcu(struct vmd_dev *vmd)  static void vmd_remove(struct pci_dev *dev)  {  	struct vmd_dev *vmd = pci_get_drvdata(dev); -	struct fwnode_handle *fn = vmd->irq_domain->fwnode;  	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");  	pci_stop_root_bus(vmd->bus);  	pci_remove_root_bus(vmd->bus);  	vmd_cleanup_srcu(vmd);  	vmd_detach_resources(vmd); -	irq_domain_remove(vmd->irq_domain); -	irq_domain_free_fwnode(fn); +	vmd_remove_irq_domain(vmd);  }  #ifdef CONFIG_PM_SLEEP @@ -730,7 +788,6 @@ static int vmd_suspend(struct device *dev)  	for (i = 0; i < vmd->msix_count; i++)  		devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); -	pci_save_state(pdev);  	return 0;  } @@ -748,7 +805,6 @@ static int vmd_resume(struct device *dev)  			return err;  	} -	pci_restore_state(pdev);  	return 0;  }  #endif  | 
