summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ti/icssg/icssg_prueth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/ti/icssg/icssg_prueth.c')
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c401
1 files changed, 323 insertions, 78 deletions
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index e42d0fdefee1..f65041662173 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -47,6 +47,9 @@
NETIF_F_HW_HSR_TAG_INS | \
NETIF_F_HW_HSR_TAG_RM)
+#define PRUETH_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC |\
+ DMA_ATTR_WEAK_ORDERING)
+
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
@@ -392,7 +395,11 @@ static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- enable_irq(emac->rx_chns.irq[rx_flow]);
+ if (emac->rx_chns.irq_disabled) {
+ /* re-enable the RX IRQ */
+ emac->rx_chns.irq_disabled = false;
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
return HRTIMER_NORESTART;
}
@@ -566,31 +573,41 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
+{
+ struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+}
+
static int prueth_create_xdp_rxqs(struct prueth_emac *emac)
{
struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
struct page_pool *pool = emac->rx_chns.pg_pool;
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
int ret;
ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id);
if (ret)
return ret;
- ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
- if (ret)
- xdp_rxq_info_unreg(rxq);
-
- return ret;
-}
-
-static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac)
-{
- struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq;
+ if (rx_chn->xsk_pool) {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (ret)
+ goto xdp_unreg;
+ xsk_pool_set_rxq_info(rx_chn->xsk_pool, rxq);
+ } else {
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto xdp_unreg;
+ }
- if (!xdp_rxq_info_is_reg(rxq))
- return;
+ return 0;
- xdp_rxq_info_unreg(rxq);
+xdp_unreg:
+ prueth_destroy_xdp_rxqs(emac);
+ return ret;
}
static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
@@ -735,6 +752,128 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
return 0;
}
+static void prueth_set_xsk_pool(struct prueth_emac *emac, u16 queue_id)
+{
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[queue_id];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != queue_id) {
+ rx_chn->xsk_pool = NULL;
+ tx_chn->xsk_pool = NULL;
+ } else {
+ rx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ tx_chn->xsk_pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ }
+}
+
+static void prueth_destroy_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "tx teardown timeout\n");
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+
+static void prueth_destroy_rxq(struct prueth_emac *emac)
+{
+ int i, ret;
+
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ /* When RX DMA Channel Teardown is initiated, it will result in an
+ * interrupt and a Teardown Completion Marker (TDCM) is queued into
+ * the RX Completion queue. Acknowledging the interrupt involves
+ * popping the TDCM descriptor from the RX Completion queue via the
+ * RX NAPI Handler. To avoid timing out when waiting for the TDCM to
+ * be popped, schedule the RX NAPI handler to run immediately.
+ */
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (napi_schedule_prep(&emac->napi_rx))
+ __napi_schedule(&emac->napi_rx);
+ }
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(emac->ndev, "rx teardown timeout\n");
+
+ for (i = 0; i < PRUETH_MAX_RX_FLOWS; i++) {
+ napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
+ k3_udma_glue_reset_rx_chn(emac->rx_chns.rx_chn, i,
+ &emac->rx_chns,
+ prueth_rx_cleanup);
+ }
+
+ prueth_destroy_xdp_rxqs(emac);
+ k3_udma_glue_disable_rx_chn(emac->rx_chns.rx_chn);
+}
+
+static int prueth_create_txq(struct prueth_emac *emac)
+{
+ int ret, i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ }
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+ return ret;
+}
+
+static int prueth_create_rxq(struct prueth_emac *emac)
+{
+ int ret;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ return ret;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = prueth_create_xdp_rxqs(emac);
+ if (ret)
+ goto reset_rx_chn;
+
+ napi_enable(&emac->napi_rx);
+ return 0;
+
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false);
+ return ret;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -746,7 +885,7 @@ static int icssg_update_vlan_mcast(struct net_device *vdev, int vid,
static int emac_ndo_open(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
- int ret, i, num_data_chn = emac->tx_ch_num;
+ int ret, num_data_chn = emac->tx_ch_num;
struct icssg_flow_cfg __iomem *flow_cfg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
@@ -767,6 +906,7 @@ static int emac_ndo_open(struct net_device *ndev)
return ret;
}
+ emac->xsk_qid = -EINVAL;
init_completion(&emac->cmd_complete);
ret = prueth_init_tx_chns(emac);
if (ret) {
@@ -819,28 +959,13 @@ static int emac_ndo_open(struct net_device *ndev)
goto stop;
/* Prepare RX */
- ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ ret = prueth_create_rxq(emac);
if (ret)
goto free_tx_ts_irq;
- ret = prueth_create_xdp_rxqs(emac);
- if (ret)
- goto reset_rx_chn;
-
- ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ ret = prueth_create_txq(emac);
if (ret)
- goto destroy_xdp_rxqs;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
- if (ret)
- goto reset_tx_chan;
- }
-
- /* Enable NAPI in Tx and Rx direction */
- for (i = 0; i < emac->tx_ch_num; i++)
- napi_enable(&emac->tx_chns[i].napi_tx);
- napi_enable(&emac->napi_rx);
+ goto destroy_rxq;
/* start PHY */
phy_start(ndev->phydev);
@@ -851,15 +976,8 @@ static int emac_ndo_open(struct net_device *ndev)
return 0;
-reset_tx_chan:
- /* Since interface is not yet up, there is wouldn't be
- * any SKB for completion. So set false to free_skb
- */
- prueth_reset_tx_chan(emac, i, false);
-destroy_xdp_rxqs:
- prueth_destroy_xdp_rxqs(emac);
-reset_rx_chn:
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+destroy_rxq:
+ prueth_destroy_rxq(emac);
free_tx_ts_irq:
free_irq(emac->tx_ts_irq, emac);
stop:
@@ -889,9 +1007,6 @@ static int emac_ndo_stop(struct net_device *ndev)
{
struct prueth_emac *emac = netdev_priv(ndev);
struct prueth *prueth = emac->prueth;
- int rx_flow = PRUETH_RX_FLOW_DATA;
- int max_rx_flows;
- int ret, i;
/* inform the upper layers. */
netif_tx_stop_all_queues(ndev);
@@ -905,32 +1020,8 @@ static int emac_ndo_stop(struct net_device *ndev)
else
__dev_mc_unsync(ndev, icssg_prueth_del_mcast);
- atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
- /* ensure new tdown_cnt value is visible */
- smp_mb__after_atomic();
- /* tear down and disable UDMA channels */
- reinit_completion(&emac->tdown_complete);
- for (i = 0; i < emac->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
-
- ret = wait_for_completion_timeout(&emac->tdown_complete,
- msecs_to_jiffies(1000));
- if (!ret)
- netdev_err(ndev, "tx teardown timeout\n");
-
- prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++) {
- napi_disable(&emac->tx_chns[i].napi_tx);
- hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
- }
-
- max_rx_flows = PRUETH_MAX_RX_FLOWS;
- k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
-
- prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
- prueth_destroy_xdp_rxqs(emac);
- napi_disable(&emac->napi_rx);
- hrtimer_cancel(&emac->rx_hrtimer);
+ prueth_destroy_txq(emac);
+ prueth_destroy_rxq(emac);
cancel_work_sync(&emac->rx_mode_work);
@@ -943,10 +1034,10 @@ static int emac_ndo_stop(struct net_device *ndev)
free_irq(emac->tx_ts_irq, emac);
- free_irq(emac->rx_chns.irq[rx_flow], emac);
+ free_irq(emac->rx_chns.irq[PRUETH_RX_FLOW_DATA], emac);
prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
- prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, PRUETH_MAX_RX_FLOWS);
prueth_cleanup_tx_chns(emac);
prueth->emacs_initialized--;
@@ -1108,7 +1199,8 @@ static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frame
__netif_tx_lock(netif_txq, cpu);
for (i = 0; i < n; i++) {
xdpf = frames[i];
- err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx);
+ err = emac_xmit_xdp_frame(emac, xdpf, q_idx,
+ PRUETH_TX_BUFF_TYPE_XDP_NDO);
if (err != ICSSG_XDP_TX) {
ndev->stats.tx_dropped++;
break;
@@ -1141,6 +1233,109 @@ static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf)
return 0;
}
+static int prueth_xsk_pool_enable(struct prueth_emac *emac,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 frame_size;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < PRUETH_MAX_PKT_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = xsk_pool_dma_map(pool, rx_chn->dma_dev, PRUETH_RX_DMA_ATTR);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to map XSK pool: %d\n", ret);
+ return ret;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ emac->xsk_qid = queue_id;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ ret = prueth_xsk_wakeup(emac->ndev, queue_id, XDP_WAKEUP_RX);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int prueth_xsk_pool_disable(struct prueth_emac *emac, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ int ret;
+
+ if (queue_id >= PRUETH_MAX_RX_FLOWS ||
+ queue_id >= emac->tx_ch_num) {
+ netdev_err(emac->ndev, "Invalid XSK queue ID %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (emac->xsk_qid != queue_id) {
+ netdev_err(emac->ndev, "XSK queue ID %d not registered\n", queue_id);
+ return -EINVAL;
+ }
+
+ pool = xsk_get_pool_from_qid(emac->ndev, queue_id);
+ if (!pool) {
+ netdev_err(emac->ndev, "No XSK pool registered for queue %d\n", queue_id);
+ return -EINVAL;
+ }
+
+ if (netif_running(emac->ndev)) {
+ /* stop packets from wire for graceful teardown */
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ if (ret)
+ return ret;
+ prueth_destroy_rxq(emac);
+ }
+
+ xsk_pool_dma_unmap(pool, PRUETH_RX_DMA_ATTR);
+ emac->xsk_qid = -EINVAL;
+ prueth_set_xsk_pool(emac, queue_id);
+
+ if (netif_running(emac->ndev)) {
+ ret = prueth_create_rxq(emac);
+ if (ret) {
+ netdev_err(emac->ndev, "Failed to create RX queue: %d\n", ret);
+ return ret;
+ }
+ ret = icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ if (ret) {
+ prueth_destroy_rxq(emac);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/**
* emac_ndo_bpf - implements ndo_bpf for icssg_prueth
* @ndev: network adapter device
@@ -1155,11 +1350,58 @@ static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
switch (bpf->command) {
case XDP_SETUP_PROG:
return emac_xdp_setup(emac, bpf);
+ case XDP_SETUP_XSK_POOL:
+ return bpf->xsk.pool ?
+ prueth_xsk_pool_enable(emac, bpf->xsk.pool, bpf->xsk.queue_id) :
+ prueth_xsk_pool_disable(emac, bpf->xsk.queue_id);
default:
return -EINVAL;
}
}
+int prueth_xsk_wakeup(struct net_device *ndev, u32 qid, u32 flags)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[qid];
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+
+ if (emac->xsk_qid != qid) {
+ netdev_err(ndev, "XSK queue %d not registered\n", qid);
+ return -EINVAL;
+ }
+
+ if (qid >= PRUETH_MAX_RX_FLOWS || qid >= emac->tx_ch_num) {
+ netdev_err(ndev, "Invalid XSK queue ID %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!tx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (!rx_chn->xsk_pool) {
+ netdev_err(ndev, "XSK pool not registered for RX queue %d\n", qid);
+ return -EINVAL;
+ }
+
+ if (flags & XDP_WAKEUP_TX) {
+ if (!napi_if_scheduled_mark_missed(&tx_chn->napi_tx)) {
+ if (likely(napi_schedule_prep(&tx_chn->napi_tx)))
+ __napi_schedule(&tx_chn->napi_tx);
+ }
+ }
+
+ if (flags & XDP_WAKEUP_RX) {
+ if (!napi_if_scheduled_mark_missed(&emac->napi_rx)) {
+ if (likely(napi_schedule_prep(&emac->napi_rx)))
+ __napi_schedule(&emac->napi_rx);
+ }
+ }
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1168,7 +1410,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = icssg_ndo_tx_timeout,
.ndo_set_rx_mode = emac_ndo_set_rx_mode,
- .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_eth_ioctl = phy_do_ioctl,
.ndo_get_stats64 = icssg_ndo_get_stats64,
.ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
.ndo_fix_features = emac_ndo_fix_features,
@@ -1176,6 +1418,9 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid,
.ndo_bpf = emac_ndo_bpf,
.ndo_xdp_xmit = emac_xdp_xmit,
+ .ndo_hwtstamp_get = icssg_ndo_get_ts_config,
+ .ndo_hwtstamp_set = icssg_ndo_set_ts_config,
+ .ndo_xsk_wakeup = prueth_xsk_wakeup,
};
static int prueth_netdev_init(struct prueth *prueth,
@@ -1248,8 +1493,7 @@ static int prueth_netdev_init(struct prueth *prueth,
} else if (of_phy_is_fixed_link(eth_node)) {
ret = of_phy_register_fixed_link(eth_node);
if (ret) {
- ret = dev_err_probe(prueth->dev, ret,
- "failed to register fixed-link phy\n");
+ dev_err_probe(prueth->dev, ret, "failed to register fixed-link phy\n");
goto free;
}
@@ -1310,7 +1554,8 @@ static int prueth_netdev_init(struct prueth *prueth,
xdp_set_features_flag(ndev,
NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT);
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY);
netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
hrtimer_setup(&emac->rx_hrtimer, &emac_rx_timer_callback, CLOCK_MONOTONIC,