diff options
Diffstat (limited to 'drivers/net/ethernet/ti/icssg/icssg_common.c')
| -rw-r--r-- | drivers/net/ethernet/ti/icssg/icssg_common.c | 516 |
1 files changed, 412 insertions, 104 deletions
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 57e5f1c88f50..090aa74d3ce7 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -93,15 +93,91 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) } EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); +static int emac_xsk_xmit_zc(struct prueth_emac *emac, + unsigned int q_idx) +{ + struct prueth_tx_chn *tx_chn = &emac->tx_chns[q_idx]; + struct xsk_buff_pool *pool = tx_chn->xsk_pool; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *host_desc; + dma_addr_t dma_desc, dma_buf; + struct prueth_swdata *swdata; + struct xdp_desc xdp_desc; + int num_tx = 0, pkt_len; + int descs_avail, ret; + u32 *epib; + int i; + + descs_avail = k3_cppi_desc_pool_avail(tx_chn->desc_pool); + /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS + * will be available for normal TX path and queue is stopped there if + * necessary + */ + if (descs_avail <= MAX_SKB_FRAGS) + return 0; + + descs_avail -= MAX_SKB_FRAGS; + + for (i = 0; i < descs_avail; i++) { + if (!xsk_tx_peek_desc(pool, &xdp_desc)) + break; + + dma_buf = xsk_buff_raw_get_dma(pool, xdp_desc.addr); + pkt_len = xdp_desc.len; + xsk_buff_raw_dma_sync_for_device(pool, dma_buf, pkt_len); + + host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (unlikely(!host_desc)) + break; + + cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(host_desc, 0); + epib = host_desc->epib; + epib[0] = 0; + epib[1] = 0; + cppi5_hdesc_set_pktlen(host_desc, pkt_len); + cppi5_desc_set_tags_ids(&host_desc->hdr, 0, + (emac->port_id | (q_idx << 8))); + + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf); + cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, + pkt_len); + + swdata = cppi5_hdesc_get_swdata(host_desc); + swdata->type = PRUETH_SWDATA_XSK; + + dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, + host_desc); + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, + host_desc, dma_desc); + + if (ret) { + ndev->stats.tx_errors++; + k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc); + break; + } + + num_tx++; + } + + xsk_tx_release(tx_chn->xsk_pool); + return num_tx; +} + void prueth_xmit_free(struct prueth_tx_chn *tx_chn, struct cppi5_host_desc_t *desc) { struct cppi5_host_desc_t *first_desc, *next_desc; dma_addr_t buf_dma, next_desc_dma; + struct prueth_swdata *swdata; u32 buf_dma_len; first_desc = desc; next_desc = first_desc; + swdata = cppi5_hdesc_get_swdata(first_desc); + if (swdata->type == PRUETH_SWDATA_XSK) + goto free_pool; cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); @@ -126,6 +202,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); } +free_pool: k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } EXPORT_SYMBOL_GPL(prueth_xmit_free); @@ -139,7 +216,9 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, struct prueth_swdata *swdata; struct prueth_tx_chn *tx_chn; unsigned int total_bytes = 0; + int xsk_frames_done = 0; struct xdp_frame *xdpf; + unsigned int pkt_len; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; @@ -176,6 +255,11 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, total_bytes += xdpf->len; xdp_return_frame(xdpf); break; + case PRUETH_SWDATA_XSK: + pkt_len = cppi5_hdesc_get_pktlen(desc_tx); + dev_sw_netstats_tx_add(ndev, 1, pkt_len); + xsk_frames_done++; + break; default: prueth_xmit_free(tx_chn, desc_tx); ndev->stats.tx_dropped++; @@ -204,6 +288,18 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, __netif_tx_unlock(netif_txq); } + if (tx_chn->xsk_pool) { + if (xsk_frames_done) + xsk_tx_completed(tx_chn->xsk_pool, xsk_frames_done); + + if (xsk_uses_need_wakeup(tx_chn->xsk_pool)) + xsk_set_tx_need_wakeup(tx_chn->xsk_pool); + + netif_txq = netdev_get_tx_queue(ndev, chn); + txq_trans_cond_update(netif_txq); + emac_xsk_xmit_zc(emac, chn); + } + return num_tx; } @@ -212,7 +308,10 @@ static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer) struct prueth_tx_chn *tx_chns = container_of(timer, struct prueth_tx_chn, tx_hrtimer); - enable_irq(tx_chns->irq); + if (tx_chns->irq_disabled) { + tx_chns->irq_disabled = false; + enable_irq(tx_chns->irq); + } return HRTIMER_NORESTART; } @@ -235,7 +334,10 @@ static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget) ns_to_ktime(tx_chn->tx_pace_timeout_ns), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(tx_chn->irq); + if (tx_chn->irq_disabled) { + tx_chn->irq_disabled = false; + enable_irq(tx_chn->irq); + } } } @@ -246,6 +348,7 @@ static irqreturn_t prueth_tx_irq(int irq, void *dev_id) { struct prueth_tx_chn *tx_chn = dev_id; + tx_chn->irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&tx_chn->napi_tx); @@ -362,6 +465,29 @@ fail: } EXPORT_SYMBOL_GPL(prueth_init_tx_chns); +static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, + struct device *dma_dev, + int size) +{ + struct page_pool_params pp_params = { 0 }; + struct page_pool *pool; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = size; + pp_params.nid = dev_to_node(emac->prueth->dev); + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.dev = dma_dev; + pp_params.napi = &emac->napi_rx; + pp_params.max_len = PAGE_SIZE; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + netdev_err(emac->ndev, "cannot create rx page pool\n"); + + return pool; +} + int prueth_init_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, char *name, u32 max_rflows, @@ -371,6 +497,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac, struct device *dev = emac->prueth->dev; struct net_device *ndev = emac->ndev; u32 fdqring_id, hdesc_size; + struct page_pool *pool; int i, ret = 0, slice; int flow_id_base; @@ -413,6 +540,14 @@ int prueth_init_rx_chns(struct prueth_emac *emac, goto fail; } + pool = prueth_create_page_pool(emac, rx_chn->dma_dev, rx_chn->descs_num); + if (IS_ERR(pool)) { + ret = PTR_ERR(pool); + goto fail; + } + + rx_chn->pg_pool = pool; + flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); if (emac->is_sr1 && !strcmp(name, "rxmgm")) { emac->rx_mgm_flow_id_base = flow_id_base; @@ -544,15 +679,15 @@ void emac_rx_timestamp(struct prueth_emac *emac, * emac_xmit_xdp_frame - transmits an XDP frame * @emac: emac device * @xdpf: data to transmit - * @page: page from page pool if already DMA mapped * @q_idx: queue id + * @buff_type: Type of buffer to be transmitted * * Return: XDP state */ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct xdp_frame *xdpf, - struct page *page, - unsigned int q_idx) + unsigned int q_idx, + enum prueth_tx_buff_type buff_type) { struct cppi5_host_desc_t *first_desc; struct net_device *ndev = emac->ndev; @@ -560,6 +695,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; struct prueth_swdata *swdata; + struct page *page; u32 *epib; int ret; @@ -576,7 +712,12 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, return ICSSG_XDP_CONSUMED; /* drop */ } - if (page) { /* already DMA mapped by page_pool */ + if (buff_type == PRUETH_TX_BUFF_TYPE_XDP_TX) { /* already DMA mapped by page_pool */ + page = virt_to_head_page(xdpf->data); + if (unlikely(!page)) { + netdev_err(ndev, "xdp tx: failed to get page from xdpf\n"); + goto drop_free_descs; + } buf_dma = page_pool_get_dma_addr(page); buf_dma += xdpf->headroom + sizeof(struct xdp_frame); } else { /* Map the linear buffer */ @@ -631,13 +772,11 @@ EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); * emac_run_xdp - run an XDP program * @emac: emac device * @xdp: XDP buffer containing the frame - * @page: page with RX data if already DMA mapped * @len: Rx descriptor packet length * * Return: XDP state */ -static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, - struct page *page, u32 *len) +static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *len) { struct net_device *ndev = emac->ndev; struct netdev_queue *netif_txq; @@ -664,7 +803,8 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, q_idx = cpu % emac->tx_ch_num; netif_txq = netdev_get_tx_queue(ndev, q_idx); __netif_tx_lock(netif_txq, cpu); - result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); + result = emac_xmit_xdp_frame(emac, xdpf, q_idx, + PRUETH_TX_BUFF_TYPE_XDP_TX); __netif_tx_unlock(netif_txq); if (result == ICSSG_XDP_CONSUMED) { ndev->stats.tx_dropped++; @@ -689,11 +829,188 @@ drop: fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: ndev->stats.rx_dropped++; - page_pool_recycle_direct(emac->rx_chns.pg_pool, page); return ICSSG_XDP_CONSUMED; } } +static int prueth_dma_rx_push_mapped_zc(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + struct xdp_buff *xdp) +{ + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct prueth_swdata *swdata; + dma_addr_t desc_dma; + dma_addr_t buf_dma; + int buf_len; + + buf_dma = xsk_buff_xdp_get_dma(xdp); + desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); + if (!desc_rx) { + netdev_err(ndev, "rx push: failed to allocate descriptor\n"); + return -ENOMEM; + } + desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); + + cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); + buf_len = xsk_pool_get_rx_frame_size(rx_chn->xsk_pool); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); + swdata = cppi5_hdesc_get_swdata(desc_rx); + swdata->type = PRUETH_SWDATA_XSK; + swdata->data.xdp = xdp; + + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, + desc_rx, desc_dma); +} + +static int prueth_rx_alloc_zc(struct prueth_emac *emac, int budget) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + struct xdp_buff *xdp; + int i, ret; + + for (i = 0; i < budget; i++) { + xdp = xsk_buff_alloc(rx_chn->xsk_pool); + if (!xdp) + break; + + ret = prueth_dma_rx_push_mapped_zc(emac, rx_chn, xdp); + if (ret) { + netdev_err(emac->ndev, "rx alloc: failed to map descriptors to xdp buff\n"); + xsk_buff_free(xdp); + break; + } + } + + return i; +} + +static void emac_dispatch_skb_zc(struct prueth_emac *emac, struct xdp_buff *xdp, u32 *psdata) +{ + unsigned int headroom = xdp->data - xdp->data_hard_start; + unsigned int pkt_len = xdp->data_end - xdp->data; + struct net_device *ndev = emac->ndev; + struct sk_buff *skb; + + skb = napi_alloc_skb(&emac->napi_rx, xdp->data_end - xdp->data_hard_start); + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; + return; + } + + skb_reserve(skb, headroom); + skb_put(skb, pkt_len); + skb->dev = ndev; + + /* RX HW timestamp */ + if (emac->rx_ts_enabled) + emac_rx_timestamp(emac, skb, psdata); + + if (emac->prueth->is_switch_mode) + skb->offload_fwd_mark = emac->offload_fwd_mark; + skb->protocol = eth_type_trans(skb, ndev); + + skb_mark_for_recycle(skb); + napi_gro_receive(&emac->napi_rx, skb); + ndev->stats.rx_bytes += pkt_len; + ndev->stats.rx_packets++; +} + +static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id, + int budget) +{ + struct prueth_rx_chn *rx_chn = &emac->rx_chns; + u32 buf_dma_len, pkt_len, port_id = 0; + struct net_device *ndev = emac->ndev; + struct cppi5_host_desc_t *desc_rx; + struct prueth_swdata *swdata; + dma_addr_t desc_dma, buf_dma; + struct xdp_buff *xdp; + int xdp_status = 0; + int count = 0; + u32 *psdata; + int ret; + + while (count < budget) { + ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); + if (ret) { + if (ret != -ENODATA) + netdev_err(ndev, "rx pop: failed: %d\n", ret); + break; + } + + if (cppi5_desc_is_tdcm(desc_dma)) { + complete(&emac->tdown_complete); + break; + } + + desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_rx); + if (swdata->type != PRUETH_SWDATA_XSK) { + netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + break; + } + + xdp = swdata->data.xdp; + cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); + k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + pkt_len = cppi5_hdesc_get_pktlen(desc_rx); + /* firmware adds 4 CRC bytes, strip them */ + pkt_len -= 4; + cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); + psdata = cppi5_hdesc_get_psdata(desc_rx); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + count++; + xsk_buff_set_size(xdp, pkt_len); + xsk_buff_dma_sync_for_cpu(xdp); + + if (prueth_xdp_is_enabled(emac)) { + ret = emac_run_xdp(emac, xdp, &pkt_len); + switch (ret) { + case ICSSG_XDP_PASS: + /* prepare skb and send to n/w stack */ + emac_dispatch_skb_zc(emac, xdp, psdata); + xsk_buff_free(xdp); + break; + case ICSSG_XDP_CONSUMED: + xsk_buff_free(xdp); + break; + case ICSSG_XDP_TX: + case ICSSG_XDP_REDIR: + xdp_status |= ret; + break; + } + } else { + /* prepare skb and send to n/w stack */ + emac_dispatch_skb_zc(emac, xdp, psdata); + xsk_buff_free(xdp); + } + } + + if (xdp_status & ICSSG_XDP_REDIR) + xdp_do_flush(); + + /* Allocate xsk buffers from the pool for the "count" number of + * packets processed in order to be able to receive more packets. + */ + ret = prueth_rx_alloc_zc(emac, count); + + if (xsk_uses_need_wakeup(rx_chn->xsk_pool)) { + /* If the user space doesn't provide enough buffers then it must + * explicitly wake up the kernel when new buffers are available + */ + if (ret < count) + xsk_set_rx_need_wakeup(rx_chn->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx_chn->xsk_pool); + } + + return count; +} + static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) { struct prueth_rx_chn *rx_chn = &emac->rx_chns; @@ -719,8 +1036,10 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) return ret; } - if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */ + if (cppi5_desc_is_tdcm(desc_dma)) { + complete(&emac->tdown_complete); return 0; + } desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); @@ -738,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) /* firmware adds 4 CRC bytes, strip them */ pkt_len -= 4; cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); /* if allocation fails we drop the packet but push the @@ -752,11 +1070,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) } pa = page_address(page); - if (emac->xdp_prog) { + if (prueth_xdp_is_enabled(emac)) { xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); - *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); + *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len); if (*xdp_state != ICSSG_XDP_PASS) goto requeue; headroom = xdp.data - xdp.data_hard_start; @@ -804,24 +1122,29 @@ requeue: return ret; } -static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) +void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; struct prueth_swdata *swdata; struct page_pool *pool; + struct xdp_buff *xdp; struct page *page; pool = rx_chn->pg_pool; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - if (swdata->type == PRUETH_SWDATA_PAGE) { + if (rx_chn->xsk_pool) { + xdp = swdata->data.xdp; + xsk_buff_free(xdp); + } else { page = swdata->data.page; page_pool_recycle_direct(pool, page); } k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); } +EXPORT_SYMBOL_GPL(prueth_rx_cleanup); static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) { @@ -1025,10 +1348,11 @@ drop_stop_q_busy: } EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); -static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) +void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_tx_chn *tx_chn = data; struct cppi5_host_desc_t *desc_tx; + struct xsk_buff_pool *xsk_pool; struct prueth_swdata *swdata; struct xdp_frame *xdpf; struct sk_buff *skb; @@ -1045,17 +1369,23 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) xdpf = swdata->data.xdpf; xdp_return_frame(xdpf); break; + case PRUETH_SWDATA_XSK: + xsk_pool = tx_chn->xsk_pool; + xsk_tx_completed(xsk_pool, 1); + break; default: break; } prueth_xmit_free(tx_chn, desc_tx); } +EXPORT_SYMBOL_GPL(prueth_tx_cleanup); irqreturn_t prueth_rx_irq(int irq, void *dev_id) { struct prueth_emac *emac = dev_id; + emac->rx_chns.irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&emac->napi_rx); @@ -1083,6 +1413,7 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; int flow = emac->is_sr1 ? PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; + struct prueth_rx_chn *rx_chn = &emac->rx_chns; int xdp_state_or = 0; int num_rx = 0; int cur_budget; @@ -1090,14 +1421,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) int ret; while (flow--) { - cur_budget = budget - num_rx; - - while (cur_budget--) { - ret = emac_rx_packet(emac, flow, &xdp_state); - xdp_state_or |= xdp_state; - if (ret) - break; - num_rx++; + if (rx_chn->xsk_pool) { + num_rx = emac_rx_packet_zc(emac, flow, budget); + } else { + cur_budget = budget - num_rx; + + while (cur_budget--) { + ret = emac_rx_packet(emac, flow, &xdp_state); + xdp_state_or |= xdp_state; + if (ret) + break; + num_rx++; + } } if (num_rx >= budget) @@ -1113,7 +1448,11 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) ns_to_ktime(emac->rx_pace_timeout_ns), HRTIMER_MODE_REL_PINNED); } else { - enable_irq(emac->rx_chns.irq[rx_flow]); + if (emac->rx_chns.irq_disabled) { + /* re-enable the RX IRQ */ + emac->rx_chns.irq_disabled = false; + enable_irq(emac->rx_chns.irq[rx_flow]); + } } } @@ -1121,62 +1460,48 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) } EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); -static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, - struct device *dma_dev, - int size) -{ - struct page_pool_params pp_params = { 0 }; - struct page_pool *pool; - - pp_params.order = 0; - pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; - pp_params.pool_size = size; - pp_params.nid = dev_to_node(emac->prueth->dev); - pp_params.dma_dir = DMA_BIDIRECTIONAL; - pp_params.dev = dma_dev; - pp_params.napi = &emac->napi_rx; - pp_params.max_len = PAGE_SIZE; - - pool = page_pool_create(&pp_params); - if (IS_ERR(pool)) - netdev_err(emac->ndev, "cannot create rx page pool\n"); - - return pool; -} - int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, int buf_size) { - struct page_pool *pool; struct page *page; + int desc_avail; int i, ret; - pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); - if (IS_ERR(pool)) - return PTR_ERR(pool); - - chn->pg_pool = pool; + desc_avail = k3_cppi_desc_pool_avail(chn->desc_pool); + if (desc_avail < chn->descs_num) + netdev_warn(emac->ndev, + "not enough RX descriptors available %d < %d\n", + desc_avail, chn->descs_num); - for (i = 0; i < chn->descs_num; i++) { - /* NOTE: we're not using memory efficiently here. - * 1 full page (4KB?) used here instead of - * PRUETH_MAX_PKT_SIZE (~1.5KB?) + if (chn->xsk_pool) { + /* get pages from xsk_pool and push to RX ring + * queue as much as possible */ - page = page_pool_dev_alloc_pages(pool); - if (!page) { - netdev_err(emac->ndev, "couldn't allocate rx page\n"); - ret = -ENOMEM; + ret = prueth_rx_alloc_zc(emac, desc_avail); + if (!ret) goto recycle_alloc_pg; - } + } else { + for (i = 0; i < desc_avail; i++) { + /* NOTE: we're not using memory efficiently here. + * 1 full page (4KB?) used here instead of + * PRUETH_MAX_PKT_SIZE (~1.5KB?) + */ + page = page_pool_dev_alloc_pages(chn->pg_pool); + if (!page) { + netdev_err(emac->ndev, "couldn't allocate rx page\n"); + ret = -ENOMEM; + goto recycle_alloc_pg; + } - ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); - if (ret < 0) { - netdev_err(emac->ndev, - "cannot submit page for rx chan %s ret %d\n", - chn->name, ret); - page_pool_recycle_direct(pool, page); - goto recycle_alloc_pg; + ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); + if (ret < 0) { + netdev_err(emac->ndev, + "cannot submit page for rx chan %s ret %d\n", + chn->name, ret); + page_pool_recycle_direct(chn->pg_pool, page); + goto recycle_alloc_pg; + } } } @@ -1223,15 +1548,13 @@ void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) } EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); -static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_set_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - switch (config.tx_type) { + switch (config->tx_type) { case HWTSTAMP_TX_OFF: emac->tx_ts_enabled = 0; break; @@ -1242,7 +1565,7 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) return -ERANGE; } - switch (config.rx_filter) { + switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: emac->rx_ts_enabled = 0; break; @@ -1262,43 +1585,28 @@ static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: emac->rx_ts_enabled = 1; - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: return -ERANGE; } - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } +EXPORT_SYMBOL_GPL(icssg_ndo_set_ts_config); -static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) +int icssg_ndo_get_ts_config(struct net_device *ndev, + struct kernel_hwtstamp_config *config) { struct prueth_emac *emac = netdev_priv(ndev); - struct hwtstamp_config config; - - config.flags = 0; - config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} + config->flags = 0; + config->tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE; -int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return emac_get_ts_config(ndev, ifr); - case SIOCSHWTSTAMP: - return emac_set_ts_config(ndev, ifr); - default: - break; - } - - return phy_do_ioctl(ndev, ifr, cmd); + return 0; } -EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); +EXPORT_SYMBOL_GPL(icssg_ndo_get_ts_config); void icssg_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) |
