summaryrefslogtreecommitdiff
path: root/drivers/pci/controller/dwc/pcie-designware-host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/controller/dwc/pcie-designware-host.c')
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c299
1 files changed, 198 insertions, 101 deletions
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index d2291c3ceb8b..952f8594b501 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -10,6 +10,7 @@
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/of_address.h>
@@ -23,35 +24,21 @@
static struct pci_ops dw_pcie_ops;
static struct pci_ops dw_child_pcie_ops;
-static void dw_msi_ack_irq(struct irq_data *d)
-{
- irq_chip_ack_parent(d);
-}
-
-static void dw_msi_mask_irq(struct irq_data *d)
-{
- pci_msi_mask_irq(d);
- irq_chip_mask_parent(d);
-}
-
-static void dw_msi_unmask_irq(struct irq_data *d)
-{
- pci_msi_unmask_irq(d);
- irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip dw_pcie_msi_irq_chip = {
- .name = "PCI-MSI",
- .irq_ack = dw_msi_ack_irq,
- .irq_mask = dw_msi_mask_irq,
- .irq_unmask = dw_msi_unmask_irq,
-};
-
-static struct msi_domain_info dw_pcie_msi_domain_info = {
- .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX |
- MSI_FLAG_MULTI_PCI_MSI,
- .chip = &dw_pcie_msi_irq_chip,
+#define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_NO_AFFINITY | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT)
+#define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \
+ MSI_FLAG_PCI_MSIX | \
+ MSI_GENERIC_FLAGS_MASK)
+
+static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
+ .required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
+ .supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
+ .bus_select_token = DOMAIN_BUS_PCI_MSI,
+ .chip_flags = MSI_CHIP_FLAG_SET_ACK,
+ .prefix = "DW-",
+ .init_dev_msi_info = msi_lib_init_dev_msi_info,
};
/* MSI int handler */
@@ -227,30 +214,23 @@ static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
-
- pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
- &dw_pcie_msi_domain_ops, pp);
+ struct irq_domain_info info = {
+ .fwnode = dev_fwnode(pci->dev),
+ .ops = &dw_pcie_msi_domain_ops,
+ .size = pp->num_vectors,
+ .host_data = pp,
+ };
+
+ pp->irq_domain = msi_create_parent_irq_domain(&info, &dw_pcie_msi_parent_ops);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
- irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS);
-
- pp->msi_domain = pci_msi_create_irq_domain(fwnode,
- &dw_pcie_msi_domain_info,
- pp->irq_domain);
- if (!pp->msi_domain) {
- dev_err(pci->dev, "Failed to create MSI domain\n");
- irq_domain_remove(pp->irq_domain);
- return -ENOMEM;
- }
-
return 0;
}
-static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
+void dw_pcie_free_msi(struct dw_pcie_rp *pp)
{
u32 ctrl;
@@ -260,22 +240,36 @@ static void dw_pcie_free_msi(struct dw_pcie_rp *pp)
NULL, NULL);
}
- irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
+EXPORT_SYMBOL_GPL(dw_pcie_free_msi);
-static void dw_pcie_msi_init(struct dw_pcie_rp *pp)
+void dw_pcie_msi_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target = (u64)pp->msi_data;
+ u32 ctrl, num_ctrls;
if (!pci_msi_enabled() || !pp->has_msi_ctrl)
return;
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+
+ /* Initialize IRQ Status array */
+ for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ pp->irq_mask[ctrl]);
+ dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
+ (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
+ ~0);
+ }
+
/* Program the msi_data */
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, lower_32_bits(msi_target));
dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, upper_32_bits(msi_target));
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_init);
static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
{
@@ -317,7 +311,7 @@ static int dw_pcie_parse_split_msi_irq(struct dw_pcie_rp *pp)
return 0;
}
-static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
+int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
@@ -391,6 +385,7 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
return 0;
}
+EXPORT_SYMBOL_GPL(dw_pcie_msi_host_init);
static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
{
@@ -418,50 +413,69 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
}
}
-int dw_pcie_host_init(struct dw_pcie_rp *pp)
+static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
- struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win;
- struct pci_host_bridge *bridge;
struct resource *res;
int ret;
- raw_spin_lock_init(&pp->lock);
-
ret = dw_pcie_get_resources(pci);
if (ret)
return ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
- if (res) {
- pp->cfg0_size = resource_size(res);
- pp->cfg0_base = res->start;
-
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
- } else {
- dev_err(dev, "Missing *config* reg space\n");
+ if (!res) {
+ dev_err(dev, "Missing \"config\" reg space\n");
return -ENODEV;
}
- bridge = devm_pci_alloc_host_bridge(dev, 0);
- if (!bridge)
- return -ENOMEM;
+ pp->cfg0_size = resource_size(res);
+ pp->cfg0_base = res->start;
- pp->bridge = bridge;
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
/* Get the I/O range from DT */
- win = resource_list_first_type(&bridge->windows, IORESOURCE_IO);
+ win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
if (win) {
pp->io_size = resource_size(win->res);
pp->io_bus_addr = win->res->start - win->offset;
pp->io_base = pci_pio_to_address(win->res->start);
}
+ /*
+ * visconti_pcie_cpu_addr_fixup() uses pp->io_base, so we have to
+ * call dw_pcie_parent_bus_offset() after setting pp->io_base.
+ */
+ pci->parent_bus_offset = dw_pcie_parent_bus_offset(pci, "config",
+ pp->cfg0_base);
+ return 0;
+}
+
+int dw_pcie_host_init(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+ struct pci_host_bridge *bridge;
+ int ret;
+
+ raw_spin_lock_init(&pp->lock);
+
+ bridge = devm_pci_alloc_host_bridge(dev, 0);
+ if (!bridge)
+ return -ENOMEM;
+
+ pp->bridge = bridge;
+
+ ret = dw_pcie_host_get_resources(pp);
+ if (ret)
+ return ret;
+
/* Set default bus ops */
bridge->ops = &dw_pcie_ops;
bridge->child_ops = &dw_child_pcie_ops;
@@ -504,6 +518,13 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
+ if (pci->num_lanes < 1)
+ pci->num_lanes = dw_pcie_link_get_max_link_width(pci);
+
+ ret = of_pci_get_equalization_presets(dev, &pp->presets, pci->num_lanes);
+ if (ret)
+ goto err_free_msi;
+
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
@@ -530,8 +551,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
goto err_remove_edma;
}
- /* Ignore errors, the link may come up later */
- dw_pcie_wait_for_link(pci);
+ /*
+ * Note: Skip the link up delay only when a Link Up IRQ is present.
+ * If there is no Link Up IRQ, we should not bypass the delay
+ * because that would require users to manually rescan for devices.
+ */
+ if (!pp->use_linkup_irq)
+ /* Ignore errors, the link may come up later */
+ dw_pcie_wait_for_link(pci);
bridge->sysdata = pp;
@@ -542,6 +569,8 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (pp->ops->post_init)
pp->ops->post_init(pp);
+ dwc_pcie_debugfs_init(pci, DW_PCIE_RC_TYPE);
+
return 0;
err_stop_link:
@@ -566,6 +595,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ dwc_pcie_debugfs_deinit(pci);
+
pci_stop_root_bus(pp->bridge->bus);
pci_remove_root_bus(pp->bridge->bus);
@@ -610,7 +641,7 @@ static void __iomem *dw_pcie_other_conf_map_bus(struct pci_bus *bus,
type = PCIE_ATU_TYPE_CFG1;
atu.type = type;
- atu.cpu_addr = pp->cfg0_base;
+ atu.parent_bus_addr = pp->cfg0_base - pci->parent_bus_offset;
atu.pci_addr = busdev;
atu.size = pp->cfg0_size;
@@ -635,7 +666,7 @@ static int dw_pcie_rd_other_conf(struct pci_bus *bus, unsigned int devfn,
if (pp->cfg0_io_shared) {
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -661,7 +692,7 @@ static int dw_pcie_wr_other_conf(struct pci_bus *bus, unsigned int devfn,
if (pp->cfg0_io_shared) {
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -730,7 +761,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
atu.index = i;
atu.type = PCIE_ATU_TYPE_MEM;
- atu.cpu_addr = entry->res->start;
+ atu.parent_bus_addr = entry->res->start - pci->parent_bus_offset;
atu.pci_addr = entry->res->start - entry->offset;
/* Adjust iATU size if MSG TLP region was allocated before */
@@ -752,7 +783,7 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
if (pci->num_ob_windows > ++i) {
atu.index = i;
atu.type = PCIE_ATU_TYPE_IO;
- atu.cpu_addr = pp->io_base;
+ atu.parent_bus_addr = pp->io_base - pci->parent_bus_offset;
atu.pci_addr = pp->io_bus_addr;
atu.size = pp->io_size;
@@ -799,10 +830,81 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp)
return 0;
}
+static void dw_pcie_program_presets(struct dw_pcie_rp *pp, enum pci_bus_speed speed)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ u8 lane_eq_offset, lane_reg_size, cap_id;
+ u8 *presets;
+ u32 cap;
+ int i;
+
+ if (speed == PCIE_SPEED_8_0GT) {
+ presets = (u8 *)pp->presets.eq_presets_8gts;
+ lane_eq_offset = PCI_SECPCI_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_SECPCI;
+ /* For data rate of 8 GT/S each lane equalization control is 16bits wide*/
+ lane_reg_size = 0x2;
+ } else if (speed == PCIE_SPEED_16_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_16GTS - 1];
+ lane_eq_offset = PCI_PL_16GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_16GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_32_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_32GTS - 1];
+ lane_eq_offset = PCI_PL_32GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_32GT;
+ lane_reg_size = 0x1;
+ } else if (speed == PCIE_SPEED_64_0GT) {
+ presets = pp->presets.eq_presets_Ngts[EQ_PRESET_TYPE_64GTS - 1];
+ lane_eq_offset = PCI_PL_64GT_LE_CTRL;
+ cap_id = PCI_EXT_CAP_ID_PL_64GT;
+ lane_reg_size = 0x1;
+ } else {
+ return;
+ }
+
+ if (presets[0] == PCI_EQ_RESV)
+ return;
+
+ cap = dw_pcie_find_ext_capability(pci, cap_id);
+ if (!cap)
+ return;
+
+ /*
+ * Write preset values to the registers byte-by-byte for the given
+ * number of lanes and register size.
+ */
+ for (i = 0; i < pci->num_lanes * lane_reg_size; i++)
+ dw_pcie_writeb_dbi(pci, cap + lane_eq_offset + i, presets[i]);
+}
+
+static void dw_pcie_config_presets(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ enum pci_bus_speed speed = pcie_link_speed[pci->max_link_speed];
+
+ /*
+ * Lane equalization settings need to be applied for all data rates the
+ * controller supports and for all supported lanes.
+ */
+
+ if (speed >= PCIE_SPEED_8_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_8_0GT);
+
+ if (speed >= PCIE_SPEED_16_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_16_0GT);
+
+ if (speed >= PCIE_SPEED_32_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_32_0GT);
+
+ if (speed >= PCIE_SPEED_64_0GT)
+ dw_pcie_program_presets(pp, PCIE_SPEED_64_0GT);
+}
+
int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- u32 val, ctrl, num_ctrls;
+ u32 val;
int ret;
/*
@@ -813,20 +915,6 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
dw_pcie_setup(pci);
- if (pp->has_msi_ctrl) {
- num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
-
- /* Initialize IRQ Status array */
- for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- pp->irq_mask[ctrl]);
- dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
- (ctrl * MSI_REG_CTRL_BLOCK_SIZE),
- ~0);
- }
- }
-
dw_pcie_msi_init(pp);
/* Setup RC BARs */
@@ -852,6 +940,7 @@ int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
+ dw_pcie_config_presets(pp);
/*
* If the platform provides its own child bus config accesses, it means
* the platform uses its own address translation component rather than
@@ -896,13 +985,13 @@ static int dw_pcie_pme_turn_off(struct dw_pcie *pci)
atu.size = resource_size(pci->pp.msg_res);
atu.index = pci->pp.msg_atu_index;
- atu.cpu_addr = pci->pp.msg_res->start;
+ atu.parent_bus_addr = pci->pp.msg_res->start - pci->parent_bus_offset;
ret = dw_pcie_prog_outbound_atu(pci, &atu);
if (ret)
return ret;
- mem = ioremap(atu.cpu_addr, pci->region_align);
+ mem = ioremap(pci->pp.msg_res->start, pci->region_align);
if (!mem)
return -ENOMEM;
@@ -918,7 +1007,7 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
{
u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
u32 val;
- int ret = 0;
+ int ret;
/*
* If L1SS is supported, then do not put the link into L2 as some
@@ -927,25 +1016,33 @@ int dw_pcie_suspend_noirq(struct dw_pcie *pci)
if (dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKCTL) & PCI_EXP_LNKCTL_ASPM_L1)
return 0;
- if (dw_pcie_get_ltssm(pci) <= DW_PCIE_LTSSM_DETECT_ACT)
- return 0;
-
- if (pci->pp.ops->pme_turn_off)
+ if (pci->pp.ops->pme_turn_off) {
pci->pp.ops->pme_turn_off(&pci->pp);
- else
+ } else {
ret = dw_pcie_pme_turn_off(pci);
+ if (ret)
+ return ret;
+ }
- if (ret)
- return ret;
-
- ret = read_poll_timeout(dw_pcie_get_ltssm, val, val == DW_PCIE_LTSSM_L2_IDLE,
+ ret = read_poll_timeout(dw_pcie_get_ltssm, val,
+ val == DW_PCIE_LTSSM_L2_IDLE ||
+ val <= DW_PCIE_LTSSM_DETECT_WAIT,
PCIE_PME_TO_L2_TIMEOUT_US/10,
PCIE_PME_TO_L2_TIMEOUT_US, false, pci);
if (ret) {
+ /* Only log message when LTSSM isn't in DETECT or POLL */
dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val);
return ret;
}
+ /*
+ * Per PCIe r6.0, sec 5.3.3.2.1, software should wait at least
+ * 100ns after L2/L3 Ready before turning off refclock and
+ * main power. This is harmless when no endpoint is connected.
+ */
+ udelay(1);
+
+ dw_pcie_stop_link(pci);
if (pci->pp.ops->deinit)
pci->pp.ops->deinit(&pci->pp);