diff options
Diffstat (limited to 'drivers/net/ethernet/ti')
| -rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-nuss.c | 47 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/am65-cpsw-qos.c | 51 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/am65-cpts.c | 63 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/cpsw_new.c | 6 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/davinci_mdio.c | 21 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_common.c | 516 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_config.c | 7 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_prueth.c | 401 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_prueth.h | 31 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c | 7 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp.h | 5 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp_core.c | 68 | ||||
| -rw-r--r-- | drivers/net/ethernet/ti/netcp_ethss.c | 72 |
13 files changed, 993 insertions, 302 deletions
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 110eb2da8dbc..5924db6be3fe 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1788,28 +1788,28 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, } static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, - struct ifreq *ifr) + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype; - struct hwtstamp_config cfg; - if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) + if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) { + NL_SET_ERR_MSG(extack, "Time stamping is not supported"); return -EOPNOTSUPP; - - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + } /* TX HW timestamp */ - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; default: + NL_SET_ERR_MSG(extack, "TX mode is not supported"); return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: port->rx_ts_enabled = false; break; @@ -1826,17 +1826,19 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: port->rx_ts_enabled = true; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_NTP_ALL: + NL_SET_ERR_MSG(extack, "RX filter is not supported"); return -EOPNOTSUPP; default: + NL_SET_ERR_MSG(extack, "RX filter is not supported"); return -ERANGE; } - port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON); + port->tx_ts_enabled = (cfg->tx_type == HWTSTAMP_TX_ON); /* cfg TX timestamp */ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET << @@ -1872,25 +1874,24 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2); writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, - struct ifreq *ifr) + struct kernel_hwtstamp_config *cfg) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); - struct hwtstamp_config cfg; if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = port->tx_ts_enabled ? + cfg->flags = 0; + cfg->tx_type = port->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | + cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, @@ -1901,13 +1902,6 @@ static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, if (!netif_running(ndev)) return -EINVAL; - switch (cmd) { - case SIOCSHWTSTAMP: - return am65_cpsw_nuss_hwtstamp_set(ndev, req); - case SIOCGHWTSTAMP: - return am65_cpsw_nuss_hwtstamp_get(ndev, req); - } - return phylink_mii_ioctl(port->slave.phylink, req, cmd); } @@ -1991,6 +1985,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate, .ndo_bpf = am65_cpsw_ndo_bpf, .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit, + .ndo_hwtstamp_get = am65_cpsw_nuss_hwtstamp_get, + .ndo_hwtstamp_set = am65_cpsw_nuss_hwtstamp_set, }; static void am65_cpsw_disable_phy(struct phy *phy) @@ -3072,7 +3068,8 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common) } static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct am65_cpsw_devlink *dl_priv = devlink_priv(dl); struct am65_cpsw_common *common = dl_priv->common; diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index fa96db7c1a13..66e8b224827b 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -276,9 +276,31 @@ static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port) /* The number of wireside clocks contained in the verify * timeout counter. The default is 0x1312d0 * (10ms at 125Mhz in 1G mode). + * The frequency of the clock depends on the link speed + * and the PHY interface. */ - val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */ + switch (port->slave.phy_if) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->qos.link_speed == SPEED_1000) + val = 125 * HZ_PER_MHZ; /* 125 MHz at 1000Mbps*/ + else if (port->qos.link_speed == SPEED_100) + val = 25 * HZ_PER_MHZ; /* 25 MHz at 100Mbps*/ + else + val = (25 * HZ_PER_MHZ) / 10; /* 2.5 MHz at 10Mbps*/ + break; + + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_SGMII: + val = 125 * HZ_PER_MHZ; /* 125 MHz */ + break; + default: + netdev_err(port->ndev, "selected mode does not supported IET\n"); + return -EOPNOTSUPP; + } val /= MILLIHZ_PER_HZ; /* count per ms timeout */ val *= verify_time_ms; /* count for timeout ms */ @@ -295,20 +317,21 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port) u32 ctrl, status; int try; - try = 20; - do { - /* Reset the verify state machine by writing 1 - * to LINKFAIL - */ - ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); - ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL; - writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); + try = 3; - /* Clear MAC_LINKFAIL bit to start Verify. */ - ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); - ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL; - writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); + /* Reset the verify state machine by writing 1 + * to LINKFAIL + */ + ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); + ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL; + writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); + /* Clear MAC_LINKFAIL bit to start Verify. */ + ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL); + ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL; + writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL); + + do { msleep(port->qos.iet.verify_time_ms); status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS); @@ -330,7 +353,7 @@ static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port) netdev_dbg(port->ndev, "MAC Merge verify error\n"); return -ENODEV; } - } while (try-- > 0); + } while (--try > 0); netdev_dbg(port->ndev, "MAC Merge verify timeout\n"); return -ETIMEDOUT; diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index 59d6ab989c55..8ffbfaa3ab18 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -163,7 +163,9 @@ struct am65_cpts { struct device_node *clk_mux_np; struct clk *refclk; u32 refclk_freq; - struct list_head events; + /* separate lists to handle TX and RX timestamp independently */ + struct list_head events_tx; + struct list_head events_rx; struct list_head pool; struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS]; spinlock_t lock; /* protects events lists*/ @@ -227,6 +229,24 @@ static void am65_cpts_disable(struct am65_cpts *cpts) am65_cpts_write32(cpts, 0, int_enable); } +static int am65_cpts_purge_event_list(struct am65_cpts *cpts, + struct list_head *events) +{ + struct list_head *this, *next; + struct am65_cpts_event *event; + int removed = 0; + + list_for_each_safe(this, next, events) { + event = list_entry(this, struct am65_cpts_event, list); + if (time_after(jiffies, event->tmo)) { + list_del_init(&event->list); + list_add(&event->list, &cpts->pool); + ++removed; + } + } + return removed; +} + static int am65_cpts_event_get_port(struct am65_cpts_event *event) { return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >> @@ -239,20 +259,12 @@ static int am65_cpts_event_get_type(struct am65_cpts_event *event) AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT; } -static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts) +static int am65_cpts_purge_events(struct am65_cpts *cpts) { - struct list_head *this, *next; - struct am65_cpts_event *event; int removed = 0; - list_for_each_safe(this, next, &cpts->events) { - event = list_entry(this, struct am65_cpts_event, list); - if (time_after(jiffies, event->tmo)) { - list_del_init(&event->list); - list_add(&event->list, &cpts->pool); - ++removed; - } - } + removed += am65_cpts_purge_event_list(cpts, &cpts->events_tx); + removed += am65_cpts_purge_event_list(cpts, &cpts->events_rx); if (removed) dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed); @@ -287,7 +299,7 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts) struct am65_cpts_event, list); if (!event) { - if (am65_cpts_cpts_purge_events(cpts)) { + if (am65_cpts_purge_events(cpts)) { dev_err(cpts->dev, "cpts: event pool empty\n"); ret = -1; goto out; @@ -306,11 +318,21 @@ static int __am65_cpts_fifo_read(struct am65_cpts *cpts) cpts->timestamp); break; case AM65_CPTS_EV_RX: + event->tmo = jiffies + + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); + + list_move_tail(&event->list, &cpts->events_rx); + + dev_dbg(cpts->dev, + "AM65_CPTS_EV_RX e1:%08x e2:%08x t:%lld\n", + event->event1, event->event2, + event->timestamp); + break; case AM65_CPTS_EV_TX: event->tmo = jiffies + msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT); - list_move_tail(&event->list, &cpts->events); + list_move_tail(&event->list, &cpts->events_tx); dev_dbg(cpts->dev, "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n", @@ -828,7 +850,7 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts, return found; } -static void am65_cpts_find_ts(struct am65_cpts *cpts) +static void am65_cpts_find_tx_ts(struct am65_cpts *cpts) { struct am65_cpts_event *event; struct list_head *this, *next; @@ -837,7 +859,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts) LIST_HEAD(events); spin_lock_irqsave(&cpts->lock, flags); - list_splice_init(&cpts->events, &events); + list_splice_init(&cpts->events_tx, &events); spin_unlock_irqrestore(&cpts->lock, flags); list_for_each_safe(this, next, &events) { @@ -850,7 +872,7 @@ static void am65_cpts_find_ts(struct am65_cpts *cpts) } spin_lock_irqsave(&cpts->lock, flags); - list_splice_tail(&events, &cpts->events); + list_splice_tail(&events, &cpts->events_tx); list_splice_tail(&events_free, &cpts->pool); spin_unlock_irqrestore(&cpts->lock, flags); } @@ -861,7 +883,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp) unsigned long flags; long delay = -1; - am65_cpts_find_ts(cpts); + am65_cpts_find_tx_ts(cpts); spin_lock_irqsave(&cpts->txq.lock, flags); if (!skb_queue_empty(&cpts->txq)) @@ -905,7 +927,7 @@ static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid) spin_lock_irqsave(&cpts->lock, flags); __am65_cpts_fifo_read(cpts); - list_for_each_safe(this, next, &cpts->events) { + list_for_each_safe(this, next, &cpts->events_rx) { event = list_entry(this, struct am65_cpts_event, list); if (time_after(jiffies, event->tmo)) { list_move(&event->list, &cpts->pool); @@ -1155,7 +1177,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs, return ERR_PTR(ret); mutex_init(&cpts->ptp_clk_lock); - INIT_LIST_HEAD(&cpts->events); + INIT_LIST_HEAD(&cpts->events_tx); + INIT_LIST_HEAD(&cpts->events_rx); INIT_LIST_HEAD(&cpts->pool); spin_lock_init(&cpts->lock); skb_queue_head_init(&cpts->txq); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 8b9e2078c602..ab88d4c02cbd 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1618,7 +1618,8 @@ static const struct devlink_ops cpsw_devlink_ops = { }; static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct cpsw_devlink *dl_priv = devlink_priv(dl); struct cpsw_common *cpsw = dl_priv->cpsw; @@ -1753,7 +1754,8 @@ exit: } static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct cpsw_devlink *dl_priv = devlink_priv(dl); struct cpsw_common *cpsw = dl_priv->cpsw; diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 68507126be8e..48f85a3649b2 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -234,7 +234,6 @@ static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) ret = mdiobb_read_c22(bus, phy, reg); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -251,7 +250,6 @@ static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, ret = mdiobb_write_c22(bus, phy, reg, val); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -268,7 +266,6 @@ static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, ret = mdiobb_read_c45(bus, phy, devad, reg); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -285,7 +282,6 @@ static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, ret = mdiobb_write_c45(bus, phy, devad, reg, val); - pm_runtime_mark_last_busy(bus->parent); pm_runtime_put_autosuspend(bus->parent); return ret; @@ -332,7 +328,6 @@ static int davinci_mdio_common_reset(struct davinci_mdio_data *data) data->bus->phy_mask = phy_mask; done: - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return 0; @@ -441,7 +436,6 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) break; } - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return ret; } @@ -478,7 +472,6 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, break; } - pm_runtime_mark_last_busy(data->dev); pm_runtime_put_autosuspend(data->dev); return ret; @@ -548,8 +541,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) struct davinci_mdio_data *data; struct resource *res; struct phy_device *phy; - int ret, addr; int autosuspend_delay_ms = -1; + int ret; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -652,14 +645,10 @@ static int davinci_mdio_probe(struct platform_device *pdev) goto bail_out; /* scan and dump the bus */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - phy = mdiobus_get_phy(data->bus, addr); - if (phy) { - dev_info(dev, "phy[%d]: device %s, driver %s\n", - phy->mdio.addr, phydev_name(phy), - phy->drv ? phy->drv->name : "unknown"); - } - } + mdiobus_for_each_phy(data->bus, phy) + dev_info(dev, "phy[%d]: device %s, driver %s\n", + phy->mdio.addr, phydev_name(phy), + phy->drv ? phy->drv->name : "unknown"); return 0; diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 57e5f1c88f50..090aa74d3ce7 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -93,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) } EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); +static int emac_xsk_xmit_zc(struct prueth_emac *emac, + unsigned int q_idx) +{ + struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx]; + struct xsk_buff_pool *pool = tx_chn->xsk_pool; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *host_desc; + dma_addr_t dma_desc, dma_buf; + struct prueth_swdata *swdata; + struct xdp_desc xdp_desc; + int num_tx = 0, pkt_len; + int descs_avail, ret; + u32 *epib; + int i; + + descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool); + /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS + * will be available for normal TX path and queue is stopped there if + * necessary + */ + if (descs_avail <= MAX_SKB_FRAGS) + return 0; + + descs_avail -= MAX_SKB_FRAGS; + + for (i = 0; i < descs_avail; i++) { + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + pkt_len = xdp_desc.len; + xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len); + + host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (unlikely(!host_desc)) + break; + + cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(host_desc, 0); + epib = host_desc->epib; + epib[0] = 0; + epib[1] = 0; + cppi5_hdesc_set_pktlen(host_desc, pkt_len); + cppi5_desc_set_tags_ids(&host_desc->hdr, 0, + (emac->port_id | (q_idx << 8))); + + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); + cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, + pkt_len); + + swdata = cppi5_hdesc_get_swdata(host_desc); + swdata->type = PRUETH_SWDATA_XSK; + + dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, + host_desc); + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, + host_desc, dma_desc); + + if (ret) { + ndev->stats.tx_errors++; + k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); + break; + } + + num_tx++; + } + + xsk_tx_release(tx_chn->xsk_pool); + return num_tx; +} + void prueth_xmit_free(struct prueth_tx_chn *tx_chn, struct cppi5_host_desc_t *desc) { struct cppi5_host_desc_t *first_desc, *next_desc; dma_addr_t buf_dma, next_desc_dma; + struct prueth_swdata *swdata; u32 buf_dma_len; first_desc = desc; next_desc = first_desc; + swdata = cppi5_hdesc_get_swdata(first_desc); + if (swdata->type == PRUETH_SWDATA_XSK) + goto free_pool; cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); @@ -126,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); } +free_pool: k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } EXPORT_SYMBOL_GPL(prueth_xmit_free); @@ -139,7 +216,9 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, struct prueth_swdata *swdata; struct prueth_tx_chn *tx_chn; unsigned int total_bytes = 0; + int xsk_frames_done = 0; struct xdp_frame *xdpf; + unsigned int pkt_len; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; @@ -176,6 +255,11 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, total_bytes += xdpf->len; xdp_return_frame(xdpf); break; + case PRUETH_SWDATA_XSK: + pkt_len = cppi5_hdesc_get_pktlen(desc_tx); + dev_sw_netstats_tx_add(ndev, 1, pkt_len); + xsk_frames_done++; + break; default: prueth_xmit_free(tx_chn, desc_tx); ndev->stats.tx_dropped++; @@ -204,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, __netif_tx_unlock(netif_txq); } + if (tx_chn->xsk_pool) { + if (xsk_frames_done) + xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_chn->xsk_pool)) + xsk_set_tx_need_wakeup(tx_chn->xsk_pool); + + netif_txq = netdev_get_tx_queue(ndev, chn); + txq_trans_cond_update(netif_txq); + emac_xsk_xmit_zc(emac, chn); + } + return num_tx; } @@ -212,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) struct prueth_tx_chn *tx_chns = container_of(timer, struct prueth_tx_chn, tx_hrtimer); - enable_irq(tx_chns->irq); + if (tx_chns->irq_disabled) { + tx_chns->irq_disabled = false; + enable_irq(tx_chns->irq); + } return HRTIMER_NORESTART; } @@ -235,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) ns_to_ktime(tx_chn->tx_pace_timeout_ns), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(tx_chn->irq); + if (tx_chn->irq_disabled) { + tx_chn->irq_disabled = false; + enable_irq(tx_chn->irq); + } } } @@ -246,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id) { struct prueth_tx_chn *tx_chn = dev_id; + tx_chn->irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&tx_chn->napi_tx); @@ -362,6 +465,29 @@ fail: } EXPORT_SYMBOL_GPL(prueth_init_tx_chns); +static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, + struct device *dma_dev, + int size) +{ + struct page_pool_params pp_params = { 0 }; + struct page_pool *pool; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = size; + pp_params.nid = dev_to_node(emac->prueth->dev); + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.dev = dma_dev; + pp_params.napi = &emac->napi_rx; + pp_params.max_len = PAGE_SIZE; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + netdev_err(emac->ndev, "cannot create rx page pool\n"); + + return pool; +} + int prueth_init_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, char *name, u32 max_rflows, @@ -371,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac, struct device *dev = emac->prueth->dev; struct net_device *ndev = emac->ndev; u32 fdqring_id, hdesc_size; + struct page_pool *pool; int i, ret = 0, slice; int flow_id_base; @@ -413,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac, goto fail; } + pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num); + if (IS_ERR(pool)) { + ret = PTR_ERR(pool); + goto fail; + } + + rx_chn->pg_pool = pool; + flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); if (emac->is_sr1 && !strcmp(name, "rxmgm")) { emac->rx_mgm_flow_id_base = flow_id_base; @@ -544,15 +679,15 @@ void emac_rx_timestamp(struct prueth_emac *emac, * emac_xmit_xdp_frame - transmits an XDP frame * @emac: emac device * @xdpf: data to transmit - * @page: page from page pool if already DMA mapped * @q_idx: queue id + * @buff_type: Type of buffer to be transmitted * * Return: XDP state */ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct xdp_frame *xdpf, - struct page *page, - unsigned int q_idx) + unsigned int q_idx, + enum prueth_tx_buff_type buff_type) { struct cppi5_host_desc_t *first_desc; struct net_device *ndev = emac->ndev; @@ -560,6 +695,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; struct prueth_swdata *swdata; + struct page *page; u32 *epib; int ret; @@ -576,7 +712,12 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, return ICSSG_XDP_CONSUMED; /* drop */ } - if (page) { /* already DMA mapped by page_pool */ + if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */ + page = virt_to_head_page(xdpf->data); + if (unlikely(!page)) { + netdev_err(ndev, "xdp tx: failed to get page from xdpf\n"); + goto drop_free_descs; + } buf_dma = page_pool_get_dma_addr(page); buf_dma += xdpf->headroom + sizeof(struct xdp_frame); } else { /* Map the linear buffer */ @@ -631,13 +772,11 @@ EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); * emac_run_xdp - run an XDP program * @emac: emac device * @xdp: XDP buffer containing the frame - * @page: page with RX data if already DMA mapped * @len: Rx descriptor packet length * * Return: XDP state */ -static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, - struct page *page, u32 *len) +static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len) { struct net_device *ndev = emac->ndev; struct netdev_queue *netif_txq; @@ -664,7 +803,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, q_idx = cpu % emac->tx_ch_num; netif_txq = netdev_get_tx_queue(ndev, q_idx); __netif_tx_lock(netif_txq, cpu); - result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); + result = emac_xmit_xdp_frame(emac, xdpf, q_idx, + PRUETH_TX_BUFF_TYPE_XDP_TX); __netif_tx_unlock(netif_txq); if (result == ICSSG_XDP_CONSUMED) { ndev->stats.tx_dropped++; @@ -689,11 +829,188 @@ drop: fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: ndev->stats.rx_dropped++; - page_pool_recycle_direct(emac->rx_chns.pg_pool, page); return ICSSG_XDP_CONSUMED; } } +static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + struct xdp_buff *xdp) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct prueth_swdata *swdata; + dma_addr_t desc_dma; + dma_addr_t buf_dma; + int buf_len; + + buf_dma = xsk_buff_xdp_get_dma(xdp); + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); + if (!desc_rx) { + netdev_err(ndev, "rx push: failed to allocate descriptor\n"); + return -ENOMEM; + } + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); + + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); + buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); + swdata = cppi5_hdesc_get_swdata(desc_rx); + swdata->type = PRUETH_SWDATA_XSK; + swdata->data.xdp = xdp; + + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, + desc_rx, desc_dma); +} + +static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + struct xdp_buff *xdp; + int i, ret; + + for (i = 0; i < budget; i++) { + xdp = xsk_buff_alloc(rx_chn->xsk_pool); + if (!xdp) + break; + + ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp); + if (ret) { + netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n"); + xsk_buff_free(xdp); + break; + } + } + + return i; +} + +static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata) +{ + unsigned int headroom = xdp->data - xdp->data_hard_start; + unsigned int pkt_len = xdp->data_end - xdp->data; + struct net_device *ndev = emac->ndev; + struct sk_buff *skb; + + skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start); + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; + return; + } + + skb_reserve(skb, headroom); + skb_put(skb, pkt_len); + skb->dev = ndev; + + /* RX HW timestamp */ + if (emac->rx_ts_enabled) + emac_rx_timestamp(emac, skb, psdata); + + if (emac->prueth->is_switch_mode) + skb->offload_fwd_mark = emac->offload_fwd_mark; + skb->protocol = eth_type_trans(skb, ndev); + + skb_mark_for_recycle(skb); + napi_gro_receive(&emac->napi_rx, skb); + ndev->stats.rx_bytes += pkt_len; + ndev->stats.rx_packets++; +} + +static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id, + int budget) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 buf_dma_len, pkt_len, port_id = 0; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct prueth_swdata *swdata; + dma_addr_t desc_dma, buf_dma; + struct xdp_buff *xdp; + int xdp_status = 0; + int count = 0; + u32 *psdata; + int ret; + + while (count < budget) { + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); + if (ret) { + if (ret != -ENODATA) + netdev_err(ndev, "rx pop: failed: %d\n", ret); + break; + } + + if (cppi5_desc_is_tdcm(desc_dma)) { + complete(&emac->tdown_complete); + break; + } + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_rx); + if (swdata->type != PRUETH_SWDATA_XSK) { + netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + break; + } + + xdp = swdata->data.xdp; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + /* firmware adds 4 CRC bytes, strip them */ + pkt_len -= 4; + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + psdata = cppi5_hdesc_get_psdata(desc_rx); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + count++; + xsk_buff_set_size(xdp, pkt_len); + xsk_buff_dma_sync_for_cpu(xdp); + + if (prueth_xdp_is_enabled(emac)) { + ret = emac_run_xdp(emac, xdp, &pkt_len); + switch (ret) { + case ICSSG_XDP_PASS: + /* prepare skb and send to n/w stack */ + emac_dispatch_skb_zc(emac, xdp, psdata); + xsk_buff_free(xdp); + break; + case ICSSG_XDP_CONSUMED: + xsk_buff_free(xdp); + break; + case ICSSG_XDP_TX: + case ICSSG_XDP_REDIR: + xdp_status |= ret; + break; + } + } else { + /* prepare skb and send to n/w stack */ + emac_dispatch_skb_zc(emac, xdp, psdata); + xsk_buff_free(xdp); + } + } + + if (xdp_status & ICSSG_XDP_REDIR) + xdp_do_flush(); + + /* Allocate xsk buffers from the pool for the "count" number of + * packets processed in order to be able to receive more packets. + */ + ret = prueth_rx_alloc_zc(emac, count); + + if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) { + /* If the user space doesn't provide enough buffers then it must + * explicitly wake up the kernel when new buffers are available + */ + if (ret < count) + xsk_set_rx_need_wakeup(rx_chn->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_chn->xsk_pool); + } + + return count; +} + static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) { struct prueth_rx_chn *rx_chn = &emac->rx_chns; @@ -719,8 +1036,10 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) return ret; } - if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ + if (cppi5_desc_is_tdcm(desc_dma)) { + complete(&emac->tdown_complete); return 0; + } desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); @@ -738,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) /* firmware adds 4 CRC bytes, strip them */ pkt_len -= 4; cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); /* if allocation fails we drop the packet but push the @@ -752,11 +1070,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) } pa = page_address(page); - if (emac->xdp_prog) { + if (prueth_xdp_is_enabled(emac)) { xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); - *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); + *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len); if (*xdp_state != ICSSG_XDP_PASS) goto requeue; headroom = xdp.data - xdp.data_hard_start; @@ -804,24 +1122,29 @@ requeue: return ret; } -static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) +void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; struct prueth_swdata *swdata; struct page_pool *pool; + struct xdp_buff *xdp; struct page *page; pool = rx_chn->pg_pool; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - if (swdata->type == PRUETH_SWDATA_PAGE) { + if (rx_chn->xsk_pool) { + xdp = swdata->data.xdp; + xsk_buff_free(xdp); + } else { page = swdata->data.page; page_pool_recycle_direct(pool, page); } k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); } +EXPORT_SYMBOL_GPL(prueth_rx_cleanup); static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) { @@ -1025,10 +1348,11 @@ drop_stop_q_busy: } EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); -static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) +void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_tx_chn *tx_chn = data; struct cppi5_host_desc_t *desc_tx; + struct xsk_buff_pool *xsk_pool; struct prueth_swdata *swdata; struct xdp_frame *xdpf; struct sk_buff *skb; @@ -1045,17 +1369,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) xdpf = swdata->data.xdpf; xdp_return_frame(xdpf); break; + case PRUETH_SWDATA_XSK: + xsk_pool = tx_chn->xsk_pool; + xsk_tx_completed(xsk_pool, 1); + break; default: break; } prueth_xmit_free(tx_chn, desc_tx); } +EXPORT_SYMBOL_GPL(prueth_tx_cleanup); irqreturn_t prueth_rx_irq(int irq, void *dev_id) { struct prueth_emac *emac = dev_id; + emac->rx_chns.irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&emac->napi_rx); @@ -1083,6 +1413,7 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; int flow = emac->is_sr1 ? PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; int xdp_state_or = 0; int num_rx = 0; int cur_budget; @@ -1090,14 +1421,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) int ret; while (flow--) { - cur_budget = budget - num_rx; - - while (cur_budget--) { - ret = emac_rx_packet(emac, flow, &xdp_state); - xdp_state_or |= xdp_state; - if (ret) - break; - num_rx++; + if (rx_chn->xsk_pool) { + num_rx = emac_rx_packet_zc(emac, flow, budget); + } else { + cur_budget = budget - num_rx; + + while (cur_budget--) { + ret = emac_rx_packet(emac, flow, &xdp_state); + xdp_state_or |= xdp_state; + if (ret) + break; + num_rx++; + } } if (num_rx >= budget) @@ -1113,7 +1448,11 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) ns_to_ktime(emac->rx_pace_timeout_ns), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(emac->rx_chns.irq[rx_flow]); + if (emac->rx_chns.irq_disabled) { + /* re-enable the RX IRQ */ + emac->rx_chns.irq_disabled = false; + enable_irq(emac->rx_chns.irq[rx_flow]); + } } } @@ -1121,62 +1460,48 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) } EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); -static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, - struct device *dma_dev, - int size) -{ - struct page_pool_params pp_params = { 0 }; - struct page_pool *pool; - - pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - pp_params.pool_size = size; - pp_params.nid = dev_to_node(emac->prueth->dev); - pp_params.dma_dir = DMA_BIDIRECTIONAL; - pp_params.dev = dma_dev; - pp_params.napi = &emac->napi_rx; - pp_params.max_len = PAGE_SIZE; - - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) - netdev_err(emac->ndev, "cannot create rx page pool\n"); - - return pool; -} - int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, int buf_size) { - struct page_pool *pool; struct page *page; + int desc_avail; int i, ret; - pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); - if (IS_ERR(pool)) - return PTR_ERR(pool); - - chn->pg_pool = pool; + desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool); + if (desc_avail < chn->descs_num) + netdev_warn(emac->ndev, + "not enough RX descriptors available %d < %d\n", + desc_avail, chn->descs_num); - for (i = 0; i < chn->descs_num; i++) { - /* NOTE: we're not using memory efficiently here. - * 1 full page (4KB?) used here instead of - * PRUETH_MAX_PKT_SIZE (~1.5KB?) + if (chn->xsk_pool) { + /* get pages from xsk_pool and push to RX ring + * queue as much as possible */ - page = page_pool_dev_alloc_pages(pool); - if (!page) { - netdev_err(emac->ndev, "couldn't allocate rx page\n"); - ret = -ENOMEM; + ret = prueth_rx_alloc_zc(emac, desc_avail); + if (!ret) goto recycle_alloc_pg; - } + } else { + for (i = 0; i < desc_avail; i++) { + /* NOTE: we're not using memory efficiently here. + * 1 full page (4KB?) used here instead of + * PRUETH_MAX_PKT_SIZE (~1.5KB?) + */ + page = page_pool_dev_alloc_pages(chn->pg_pool); + if (!page) { + netdev_err(emac->ndev, "couldn't allocate rx page\n"); + ret = -ENOMEM; + goto recycle_alloc_pg; + } - ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); - if (ret < 0) { - netdev_err(emac->ndev, - "cannot submit page for rx chan %s ret %d\n", - chn->name, ret); - page_pool_recycle_direct(pool, page); - goto recycle_alloc_pg; + ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); + if (ret < 0) { + netdev_err(emac->ndev, + "cannot submit page for rx chan %s ret %d\n", + chn->name, ret); + page_pool_recycle_direct(chn->pg_pool, page); + goto recycle_alloc_pg; + } } } @@ -1223,15 +1548,13 @@ void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) } EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); -static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: emac->tx_ts_enabled = 0; break; @@ -1242,7 +1565,7 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: emac->rx_ts_enabled = 0; break; @@ -1262,43 +1585,28 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: emac->rx_ts_enabled = 1; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config); -static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - config.flags = 0; - config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} + config->flags = 0; + config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return emac_get_ts_config(ndev, ifr); - case SIOCSHWTSTAMP: - return emac_set_ts_config(ndev, ifr); - default: - break; - } - - return phy_do_ioctl(ndev, ifr, cmd); + return 0; } -EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); +EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index da53eb04b0a4..3f8237c17d09 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -66,6 +66,9 @@ #define FDB_GEN_CFG1 0x60 #define SMEM_VLAN_OFFSET 8 #define SMEM_VLAN_OFFSET_MASK GENMASK(25, 8) +#define FDB_HASH_SIZE_MASK GENMASK(6, 3) +#define FDB_HASH_SIZE_SHIFT 3 +#define FDB_HASH_SIZE 3 #define FDB_GEN_CFG2 0x64 #define FDB_VLAN_EN BIT(6) @@ -463,6 +466,8 @@ void icssg_init_emac_mode(struct prueth *prueth) /* Set VLAN TABLE address base */ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, addr << SMEM_VLAN_OFFSET); + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK, + FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT); /* Set enable VLAN aware mode, and FDBs for all PRUs */ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN)); prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + @@ -484,6 +489,8 @@ void icssg_init_fw_offload_mode(struct prueth *prueth) /* Set VLAN TABLE address base */ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, addr << SMEM_VLAN_OFFSET); + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, FDB_HASH_SIZE_MASK, + FDB_HASH_SIZE << FDB_HASH_SIZE_SHIFT); /* Set enable VLAN aware mode, and FDBs for all PRUs */ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL); prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index e42d0fdefee1..f65041662173 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -47,6 +47,9 @@ NETIF_F_HW_HSR_TAG_INS | \ NETIF_F_HW_HSR_TAG_RM) +#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\ + DMA_ATTR_WEAK_ORDERING) + /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) @@ -392,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer) container_of(timer, struct prueth_emac, rx_hrtimer); int rx_flow = PRUETH_RX_FLOW_DATA; - enable_irq(emac->rx_chns.irq[rx_flow]); + if (emac->rx_chns.irq_disabled) { + /* re-enable the RX IRQ */ + emac->rx_chns.irq_disabled = false; + enable_irq(emac->rx_chns.irq[rx_flow]); + } return HRTIMER_NORESTART; } @@ -566,31 +573,41 @@ const struct icss_iep_clockops prueth_iep_clockops = { .perout_enable = prueth_perout_enable, }; +static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) +{ + struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; + + if (xdp_rxq_info_is_reg(rxq)) + xdp_rxq_info_unreg(rxq); +} + static int prueth_create_xdp_rxqs(struct prueth_emac *emac) { struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; struct page_pool *pool = emac->rx_chns.pg_pool; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; int ret; ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id); if (ret) return ret; - ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); - if (ret) - xdp_rxq_info_unreg(rxq); - - return ret; -} - -static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) -{ - struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; + if (rx_chn->xsk_pool) { + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + if (ret) + goto xdp_unreg; + xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq); + } else { + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); + if (ret) + goto xdp_unreg; + } - if (!xdp_rxq_info_is_reg(rxq)) - return; + return 0; - xdp_rxq_info_unreg(rxq); +xdp_unreg: + prueth_destroy_xdp_rxqs(emac); + return ret; } static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) @@ -735,6 +752,128 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, return 0; } +static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id) +{ + struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id]; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + + if (emac->xsk_qid != queue_id) { + rx_chn->xsk_pool = NULL; + tx_chn->xsk_pool = NULL; + } else { + rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id); + tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id); + } +} + +static void prueth_destroy_txq(struct prueth_emac *emac) +{ + int ret, i; + + atomic_set(&emac->tdown_cnt, emac->tx_ch_num); + /* ensure new tdown_cnt value is visible */ + smp_mb__after_atomic(); + /* tear down and disable UDMA channels */ + reinit_completion(&emac->tdown_complete); + for (i = 0; i < emac->tx_ch_num; i++) + k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); + + ret = wait_for_completion_timeout(&emac->tdown_complete, + msecs_to_jiffies(1000)); + if (!ret) + netdev_err(emac->ndev, "tx teardown timeout\n"); + + for (i = 0; i < emac->tx_ch_num; i++) { + napi_disable(&emac->tx_chns[i].napi_tx); + hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); + k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn, + &emac->tx_chns[i], + prueth_tx_cleanup); + k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); + } +} + +static void prueth_destroy_rxq(struct prueth_emac *emac) +{ + int i, ret; + + /* tear down and disable UDMA channels */ + reinit_completion(&emac->tdown_complete); + k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); + + /* When RX DMA Channel Teardown is initiated, it will result in an + * interrupt and a Teardown Completion Marker (TDCM) is queued into + * the RX Completion queue. Acknowledging the interrupt involves + * popping the TDCM descriptor from the RX Completion queue via the + * RX NAPI Handler. To avoid timing out when waiting for the TDCM to + * be popped, schedule the RX NAPI handler to run immediately. + */ + if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) { + if (napi_schedule_prep(&emac->napi_rx)) + __napi_schedule(&emac->napi_rx); + } + + ret = wait_for_completion_timeout(&emac->tdown_complete, + msecs_to_jiffies(1000)); + if (!ret) + netdev_err(emac->ndev, "rx teardown timeout\n"); + + for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) { + napi_disable(&emac->napi_rx); + hrtimer_cancel(&emac->rx_hrtimer); + k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i, + &emac->rx_chns, + prueth_rx_cleanup); + } + + prueth_destroy_xdp_rxqs(emac); + k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn); +} + +static int prueth_create_txq(struct prueth_emac *emac) +{ + int ret, i; + + for (i = 0; i < emac->tx_ch_num; i++) { + ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); + if (ret) + goto reset_tx_chan; + napi_enable(&emac->tx_chns[i].napi_tx); + } + return 0; + +reset_tx_chan: + /* Since interface is not yet up, there is wouldn't be + * any SKB for completion. So set false to free_skb + */ + prueth_reset_tx_chan(emac, i, false); + return ret; +} + +static int prueth_create_rxq(struct prueth_emac *emac) +{ + int ret; + + ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); + if (ret) + return ret; + + ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + if (ret) + goto reset_rx_chn; + + ret = prueth_create_xdp_rxqs(emac); + if (ret) + goto reset_rx_chn; + + napi_enable(&emac->napi_rx); + return 0; + +reset_rx_chn: + prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false); + return ret; +} + /** * emac_ndo_open - EMAC device open * @ndev: network adapter device @@ -746,7 +885,7 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid, static int emac_ndo_open(struct net_device *ndev) { struct prueth_emac *emac = netdev_priv(ndev); - int ret, i, num_data_chn = emac->tx_ch_num; + int ret, num_data_chn = emac->tx_ch_num; struct icssg_flow_cfg __iomem *flow_cfg; struct prueth *prueth = emac->prueth; int slice = prueth_emac_slice(emac); @@ -767,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev) return ret; } + emac->xsk_qid = -EINVAL; init_completion(&emac->cmd_complete); ret = prueth_init_tx_chns(emac); if (ret) { @@ -819,28 +959,13 @@ static int emac_ndo_open(struct net_device *ndev) goto stop; /* Prepare RX */ - ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE); + ret = prueth_create_rxq(emac); if (ret) goto free_tx_ts_irq; - ret = prueth_create_xdp_rxqs(emac); - if (ret) - goto reset_rx_chn; - - ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + ret = prueth_create_txq(emac); if (ret) - goto destroy_xdp_rxqs; - - for (i = 0; i < emac->tx_ch_num; i++) { - ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); - if (ret) - goto reset_tx_chan; - } - - /* Enable NAPI in Tx and Rx direction */ - for (i = 0; i < emac->tx_ch_num; i++) - napi_enable(&emac->tx_chns[i].napi_tx); - napi_enable(&emac->napi_rx); + goto destroy_rxq; /* start PHY */ phy_start(ndev->phydev); @@ -851,15 +976,8 @@ static int emac_ndo_open(struct net_device *ndev) return 0; -reset_tx_chan: - /* Since interface is not yet up, there is wouldn't be - * any SKB for completion. So set false to free_skb - */ - prueth_reset_tx_chan(emac, i, false); -destroy_xdp_rxqs: - prueth_destroy_xdp_rxqs(emac); -reset_rx_chn: - prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); +destroy_rxq: + prueth_destroy_rxq(emac); free_tx_ts_irq: free_irq(emac->tx_ts_irq, emac); stop: @@ -889,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev) { struct prueth_emac *emac = netdev_priv(ndev); struct prueth *prueth = emac->prueth; - int rx_flow = PRUETH_RX_FLOW_DATA; - int max_rx_flows; - int ret, i; /* inform the upper layers. */ netif_tx_stop_all_queues(ndev); @@ -905,32 +1020,8 @@ static int emac_ndo_stop(struct net_device *ndev) else __dev_mc_unsync(ndev, icssg_prueth_del_mcast); - atomic_set(&emac->tdown_cnt, emac->tx_ch_num); - /* ensure new tdown_cnt value is visible */ - smp_mb__after_atomic(); - /* tear down and disable UDMA channels */ - reinit_completion(&emac->tdown_complete); - for (i = 0; i < emac->tx_ch_num; i++) - k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false); - - ret = wait_for_completion_timeout(&emac->tdown_complete, - msecs_to_jiffies(1000)); - if (!ret) - netdev_err(ndev, "tx teardown timeout\n"); - - prueth_reset_tx_chan(emac, emac->tx_ch_num, true); - for (i = 0; i < emac->tx_ch_num; i++) { - napi_disable(&emac->tx_chns[i].napi_tx); - hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer); - } - - max_rx_flows = PRUETH_MAX_RX_FLOWS; - k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); - - prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); - prueth_destroy_xdp_rxqs(emac); - napi_disable(&emac->napi_rx); - hrtimer_cancel(&emac->rx_hrtimer); + prueth_destroy_txq(emac); + prueth_destroy_rxq(emac); cancel_work_sync(&emac->rx_mode_work); @@ -943,10 +1034,10 @@ static int emac_ndo_stop(struct net_device *ndev) free_irq(emac->tx_ts_irq, emac); - free_irq(emac->rx_chns.irq[rx_flow], emac); + free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac); prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); - prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); + prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS); prueth_cleanup_tx_chns(emac); prueth->emacs_initialized--; @@ -1108,7 +1199,8 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame __netif_tx_lock(netif_txq, cpu); for (i = 0; i < n; i++) { xdpf = frames[i]; - err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); + err = emac_xmit_xdp_frame(emac, xdpf, q_idx, + PRUETH_TX_BUFF_TYPE_XDP_NDO); if (err != ICSSG_XDP_TX) { ndev->stats.tx_dropped++; break; @@ -1141,6 +1233,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) return 0; } +static int prueth_xsk_pool_enable(struct prueth_emac *emac, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 frame_size; + int ret; + + if (queue_id >= PRUETH_MAX_RX_FLOWS || + queue_id >= emac->tx_ch_num) { + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id); + return -EINVAL; + } + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < PRUETH_MAX_PKT_SIZE) + return -EOPNOTSUPP; + + ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR); + if (ret) { + netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret); + return ret; + } + + if (netif_running(emac->ndev)) { + /* stop packets from wire for graceful teardown */ + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + if (ret) + return ret; + prueth_destroy_rxq(emac); + } + + emac->xsk_qid = queue_id; + prueth_set_xsk_pool(emac, queue_id); + + if (netif_running(emac->ndev)) { + ret = prueth_create_rxq(emac); + if (ret) { + netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret); + return ret; + } + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + if (ret) { + prueth_destroy_rxq(emac); + return ret; + } + ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX); + if (ret) + return ret; + } + + return 0; +} + +static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id) +{ + struct xsk_buff_pool *pool; + int ret; + + if (queue_id >= PRUETH_MAX_RX_FLOWS || + queue_id >= emac->tx_ch_num) { + netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id); + return -EINVAL; + } + + if (emac->xsk_qid != queue_id) { + netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id); + return -EINVAL; + } + + pool = xsk_get_pool_from_qid(emac->ndev, queue_id); + if (!pool) { + netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id); + return -EINVAL; + } + + if (netif_running(emac->ndev)) { + /* stop packets from wire for graceful teardown */ + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + if (ret) + return ret; + prueth_destroy_rxq(emac); + } + + xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR); + emac->xsk_qid = -EINVAL; + prueth_set_xsk_pool(emac, queue_id); + + if (netif_running(emac->ndev)) { + ret = prueth_create_rxq(emac); + if (ret) { + netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret); + return ret; + } + ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + if (ret) { + prueth_destroy_rxq(emac); + return ret; + } + } + + return 0; +} + /** * emac_ndo_bpf - implements ndo_bpf for icssg_prueth * @ndev: network adapter device @@ -1155,11 +1350,58 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) switch (bpf->command) { case XDP_SETUP_PROG: return emac_xdp_setup(emac, bpf); + case XDP_SETUP_XSK_POOL: + return bpf->xsk.pool ? + prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) : + prueth_xsk_pool_disable(emac, bpf->xsk.queue_id); default: return -EINVAL; } } +int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid]; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + + if (emac->xsk_qid != qid) { + netdev_err(ndev, "XSK queue %d not registered\n", qid); + return -EINVAL; + } + + if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) { + netdev_err(ndev, "Invalid XSK queue ID %d\n", qid); + return -EINVAL; + } + + if (!tx_chn->xsk_pool) { + netdev_err(ndev, "XSK pool not registered for queue %d\n", qid); + return -EINVAL; + } + + if (!rx_chn->xsk_pool) { + netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid); + return -EINVAL; + } + + if (flags & XDP_WAKEUP_TX) { + if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) { + if (likely(napi_schedule_prep(&tx_chn->napi_tx))) + __napi_schedule(&tx_chn->napi_tx); + } + } + + if (flags & XDP_WAKEUP_RX) { + if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) { + if (likely(napi_schedule_prep(&emac->napi_rx))) + __napi_schedule(&emac->napi_rx); + } + } + + return 0; +} + static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -1168,7 +1410,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode, - .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, .ndo_fix_features = emac_ndo_fix_features, @@ -1176,6 +1418,9 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, .ndo_bpf = emac_ndo_bpf, .ndo_xdp_xmit = emac_xdp_xmit, + .ndo_hwtstamp_get = icssg_ndo_get_ts_config, + .ndo_hwtstamp_set = icssg_ndo_set_ts_config, + .ndo_xsk_wakeup = prueth_xsk_wakeup, }; static int prueth_netdev_init(struct prueth *prueth, @@ -1248,8 +1493,7 @@ static int prueth_netdev_init(struct prueth *prueth, } else if (of_phy_is_fixed_link(eth_node)) { ret = of_phy_register_fixed_link(eth_node); if (ret) { - ret = dev_err_probe(prueth->dev, ret, - "failed to register fixed-link phy\n"); + dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n"); goto free; } @@ -1310,7 +1554,8 @@ static int prueth_netdev_init(struct prueth *prueth, xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT); + NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_XSK_ZEROCOPY); netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index ca8a22a4a5da..10eadd356650 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -38,6 +38,8 @@ #include <net/devlink.h> #include <net/xdp.h> #include <net/page_pool/helpers.h> +#include <net/xsk_buff_pool.h> +#include <net/xdp_sock_drv.h> #include "icssg_config.h" #include "icss_iep.h" @@ -126,6 +128,8 @@ struct prueth_tx_chn { char name[32]; struct hrtimer tx_hrtimer; unsigned long tx_pace_timeout_ns; + struct xsk_buff_pool *xsk_pool; + bool irq_disabled; }; struct prueth_rx_chn { @@ -138,6 +142,8 @@ struct prueth_rx_chn { char name[32]; struct page_pool *pg_pool; struct xdp_rxq_info xdp_rxq; + struct xsk_buff_pool *xsk_pool; + bool irq_disabled; }; enum prueth_swdata_type { @@ -146,6 +152,12 @@ enum prueth_swdata_type { PRUETH_SWDATA_PAGE, PRUETH_SWDATA_CMD, PRUETH_SWDATA_XDPF, + PRUETH_SWDATA_XSK, +}; + +enum prueth_tx_buff_type { + PRUETH_TX_BUFF_TYPE_XDP_TX, + PRUETH_TX_BUFF_TYPE_XDP_NDO, }; struct prueth_swdata { @@ -155,6 +167,7 @@ struct prueth_swdata { struct page *page; u32 cmd; struct xdp_frame *xdpf; + struct xdp_buff *xdp; } data; }; @@ -241,6 +254,7 @@ struct prueth_emac { struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID]; struct bpf_prog *xdp_prog; struct xdp_attachment_info xdpi; + int xsk_qid; }; /* The buf includes headroom compatible with both skb and xdpf */ @@ -479,7 +493,11 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable); void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config); +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats); int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, @@ -495,7 +513,14 @@ void prueth_put_cores(struct prueth *prueth, int slice); u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns); u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct xdp_frame *xdpf, - struct page *page, - unsigned int q_idx); + unsigned int q_idx, + enum prueth_tx_buff_type buff_type); +void prueth_rx_cleanup(void *data, dma_addr_t desc_dma); +void prueth_tx_cleanup(void *data, dma_addr_t desc_dma); +int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags); +static inline bool prueth_xdp_is_enabled(struct prueth_emac *emac) +{ + return !!READ_ONCE(emac->xdp_prog); +} #endif /* __NET_TI_ICSSG_PRUETH_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 5e225310c9de..7bb4f0d850cc 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -747,9 +747,11 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1, - .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_get_stats64 = icssg_ndo_get_stats64, .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, + .ndo_hwtstamp_get = icssg_ndo_get_ts_config, + .ndo_hwtstamp_set = icssg_ndo_set_ts_config, }; static int prueth_netdev_init(struct prueth *prueth, @@ -816,8 +818,7 @@ static int prueth_netdev_init(struct prueth *prueth, } else if (of_phy_is_fixed_link(eth_node)) { ret = of_phy_register_fixed_link(eth_node); if (ret) { - ret = dev_err_probe(prueth->dev, ret, - "failed to register fixed-link phy\n"); + dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n"); goto free; } diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index 7007eb8bed36..b9cbd3b4a8a2 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -207,6 +207,11 @@ struct netcp_module { int (*del_vid)(void *intf_priv, int vid); int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd); int (*set_rx_mode)(void *intf_priv, bool promisc); + int (*hwtstamp_get)(void *intf_priv, + struct kernel_hwtstamp_config *cfg); + int (*hwtstamp_set)(void *intf_priv, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); /* used internally */ struct list_head module_list; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 857820657bac..5ed1c46bbcb1 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1338,10 +1338,10 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) tx_pipe->dma_channel = knav_dma_open_channel(dev, tx_pipe->dma_chan_name, &config); - if (IS_ERR(tx_pipe->dma_channel)) { + if (!tx_pipe->dma_channel) { dev_err(dev, "failed opening tx chan(%s)\n", tx_pipe->dma_chan_name); - ret = PTR_ERR(tx_pipe->dma_channel); + ret = -EINVAL; goto err; } @@ -1359,7 +1359,7 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe) return 0; err: - if (!IS_ERR_OR_NULL(tx_pipe->dma_channel)) + if (tx_pipe->dma_channel) knav_dma_close_channel(tx_pipe->dma_channel); tx_pipe->dma_channel = NULL; return ret; @@ -1678,10 +1678,10 @@ static int netcp_setup_navigator_resources(struct net_device *ndev) netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device, netcp->dma_chan_name, &config); - if (IS_ERR(netcp->rx_channel)) { + if (!netcp->rx_channel) { dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n", netcp->dma_chan_name); - ret = PTR_ERR(netcp->rx_channel); + ret = -EINVAL; goto fail; } @@ -1781,6 +1781,62 @@ static int netcp_ndo_stop(struct net_device *ndev) return 0; } +static int netcp_ndo_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->hwtstamp_get) + continue; + + err = module->hwtstamp_get(intf_modpriv->module_priv, config); + break; + } + + return err; +} + +static int netcp_ndo_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct netcp_intf *netcp = netdev_priv(ndev); + struct netcp_intf_modpriv *intf_modpriv; + struct netcp_module *module; + int ret = -1, err = -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -EINVAL; + + for_each_module(netcp, intf_modpriv) { + module = intf_modpriv->netcp_module; + if (!module->hwtstamp_set) + continue; + + err = module->hwtstamp_set(intf_modpriv->module_priv, config, + extack); + if ((err < 0) && (err != -EOPNOTSUPP)) { + NL_SET_ERR_MSG_WEAK_MOD(extack, + "At least one module failed to setup HW timestamps"); + ret = err; + goto out; + } + if (err == 0) + ret = err; + } + +out: + return (ret == 0) ? 0 : err; +} + static int netcp_ndo_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { @@ -1952,6 +2008,8 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_tx_timeout = netcp_ndo_tx_timeout, .ndo_select_queue = dev_pick_tx_zero, .ndo_setup_tc = netcp_setup_tc, + .ndo_hwtstamp_get = netcp_ndo_hwtstamp_get, + .ndo_hwtstamp_set = netcp_ndo_hwtstamp_set, }; static int netcp_create_interface(struct netcp_device *netcp_device, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 55a1a96cd834..8f46e9be76b1 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2591,20 +2591,26 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info) return 0; } -static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr) +static int gbe_hwtstamp_get(void *intf_priv, struct kernel_hwtstamp_config *cfg) { - struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; - struct cpts *cpts = gbe_dev->cpts; - struct hwtstamp_config cfg; + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev; + struct phy_device *phy; + + gbe_dev = gbe_intf->gbe_dev; - if (!cpts) + if (!gbe_dev->cpts) + return -EOPNOTSUPP; + + phy = gbe_intf->slave->phy; + if (phy_has_hwtstamp(phy)) return -EOPNOTSUPP; - cfg.flags = 0; - cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = gbe_dev->rx_ts_enabled; + cfg->flags = 0; + cfg->tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg->rx_filter = gbe_dev->rx_ts_enabled; - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static void gbe_hwtstamp(struct gbe_intf *gbe_intf) @@ -2637,19 +2643,23 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf) writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2)); } -static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) +static int gbe_hwtstamp_set(void *intf_priv, struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { - struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; - struct cpts *cpts = gbe_dev->cpts; - struct hwtstamp_config cfg; + struct gbe_intf *gbe_intf = intf_priv; + struct gbe_priv *gbe_dev; + struct phy_device *phy; - if (!cpts) + gbe_dev = gbe_intf->gbe_dev; + + if (!gbe_dev->cpts) return -EOPNOTSUPP; - if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) - return -EFAULT; + phy = gbe_intf->slave->phy; + if (phy_has_hwtstamp(phy)) + return phy->mii_ts->hwtstamp_set(phy->mii_ts, cfg, extack); - switch (cfg.tx_type) { + switch (cfg->tx_type) { case HWTSTAMP_TX_OFF: gbe_dev->tx_ts_enabled = 0; break; @@ -2660,7 +2670,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) return -ERANGE; } - switch (cfg.rx_filter) { + switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE; break; @@ -2668,7 +2678,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: @@ -2680,7 +2690,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; @@ -2688,7 +2698,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) gbe_hwtstamp(gbe_intf); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + return 0; } static void gbe_register_cpts(struct gbe_priv *gbe_dev) @@ -2745,12 +2755,15 @@ static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev) { } -static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req) +static inline int gbe_hwtstamp_get(void *intf_priv, + struct kernel_hwtstamp_config *cfg) { return -EOPNOTSUPP; } -static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req) +static inline int gbe_hwtstamp_set(void *intf_priv, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } @@ -2816,15 +2829,6 @@ static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd) struct gbe_intf *gbe_intf = intf_priv; struct phy_device *phy = gbe_intf->slave->phy; - if (!phy_has_hwtstamp(phy)) { - switch (cmd) { - case SIOCGHWTSTAMP: - return gbe_hwtstamp_get(gbe_intf, req); - case SIOCSHWTSTAMP: - return gbe_hwtstamp_set(gbe_intf, req); - } - } - if (phy) return phy_mii_ioctl(phy, req, cmd); @@ -3824,6 +3828,8 @@ static struct netcp_module gbe_module = { .add_vid = gbe_add_vid, .del_vid = gbe_del_vid, .ioctl = gbe_ioctl, + .hwtstamp_get = gbe_hwtstamp_get, + .hwtstamp_set = gbe_hwtstamp_set, }; static struct netcp_module xgbe_module = { @@ -3841,6 +3847,8 @@ static struct netcp_module xgbe_module = { .add_vid = gbe_add_vid, .del_vid = gbe_del_vid, .ioctl = gbe_ioctl, + .hwtstamp_get = gbe_hwtstamp_get, + .hwtstamp_set = gbe_hwtstamp_set, }; static int __init keystone_gbe_init(void) |
