diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 268 |
1 files changed, 211 insertions, 57 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index d2871757ec94..73f08d02f9c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -144,6 +144,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) } /** + * ice_clean_tstamp_ring - clean time stamp ring + * @tx_ring: Tx ring to clean the Time Stamp ring for + */ +static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring) +{ + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; + u32 size; + + if (!tstamp_ring->desc) + return; + + size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), + PAGE_SIZE); + memset(tstamp_ring->desc, 0, size); + tstamp_ring->next_to_use = 0; +} + +/** + * ice_free_tstamp_ring - free time stamp resources per queue + * @tx_ring: Tx ring to free the Time Stamp ring for + */ +void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring) +{ + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; + u32 size; + + if (!tstamp_ring->desc) + return; + + ice_clean_tstamp_ring(tx_ring); + size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), + PAGE_SIZE); + dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc, + tstamp_ring->dma); + tstamp_ring->desc = NULL; +} + +/** + * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring + * @tx_ring: Tx ring to free the Time Stamp ring for + */ +void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring) +{ + ice_free_tstamp_ring(tx_ring); + kfree_rcu(tx_ring->tstamp_ring, rcu); + tx_ring->tstamp_ring = NULL; + tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME; +} + +/** * ice_clean_tx_ring - Free any empty Tx buffers * @tx_ring: ring to be cleaned */ @@ -181,6 +231,9 @@ tx_skip_free: /* cleanup Tx queue statistics */ netdev_tx_reset_queue(txring_txq(tx_ring)); + + if (ice_is_txtime_cfg(tx_ring)) + ice_free_tx_tstamp_ring(tx_ring); } /** @@ -332,6 +385,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget) } /** + * ice_alloc_tstamp_ring - allocate the Time Stamp ring + * @tx_ring: Tx ring to allocate the Time Stamp ring for + * + * Return: 0 on success, negative on error + */ +static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring) +{ + struct ice_tstamp_ring *tstamp_ring; + + /* allocate with kzalloc(), free with kfree_rcu() */ + tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL); + if (!tstamp_ring) + return -ENOMEM; + + tstamp_ring->tx_ring = tx_ring; + tx_ring->tstamp_ring = tstamp_ring; + tstamp_ring->desc = NULL; + tstamp_ring->count = ice_calc_ts_ring_count(tx_ring); + tx_ring->flags |= ICE_TX_FLAGS_TXTIME; + return 0; +} + +/** + * ice_setup_tstamp_ring - allocate the Time Stamp ring + * @tx_ring: Tx ring to set up the Time Stamp ring for + * + * Return: 0 on success, negative on error + */ +static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring) +{ + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; + struct device *dev = tx_ring->dev; + u32 size; + + /* round up to nearest page */ + size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc), + PAGE_SIZE); + tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma, + GFP_KERNEL); + if (!tstamp_ring->desc) { + dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n", + size); + return -ENOMEM; + } + + tstamp_ring->next_to_use = 0; + return 0; +} + +/** + * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring + * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for + * + * Return: 0 on success, negative on error + */ +int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int err; + + err = ice_alloc_tstamp_ring(tx_ring); + if (err) { + dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n", + tx_ring->q_index); + return err; + } + + err = ice_setup_tstamp_ring(tx_ring); + if (err) { + dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n", + tx_ring->q_index); + ice_free_tx_tstamp_ring(tx_ring); + return err; + } + return 0; +} + +/** * ice_setup_tx_ring - Allocate the Tx descriptors * @tx_ring: the Tx ring to set up * @@ -894,10 +1025,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, rx_buf->page_offset, size); sinfo->xdp_frags_size += size; - /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail() - * can pop off frags but driver has to handle it on its own - */ - rx_ring->nr_frags = sinfo->nr_frags; if (page_is_pfmemalloc(rx_buf->page)) xdp_buff_set_frag_pfmemalloc(xdp); @@ -968,20 +1095,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, /** * ice_get_pgcnts - grab page_count() for gathered fragments * @rx_ring: Rx descriptor ring to store the page counts on + * @ntc: the next to clean element (not included in this frame!) * * This function is intended to be called right before running XDP * program so that the page recycling mechanism will be able to take * a correct decision regarding underlying pages; this is done in such * way as XDP program can change the refcount of page */ -static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) +static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc) { - u32 nr_frags = rx_ring->nr_frags + 1; u32 idx = rx_ring->first_desc; struct ice_rx_buf *rx_buf; u32 cnt = rx_ring->count; - for (int i = 0; i < nr_frags; i++) { + while (idx != ntc) { rx_buf = &rx_ring->rx_buf[idx]; rx_buf->pgcnt = page_count(rx_buf->page); @@ -1035,10 +1162,9 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) skb_metadata_set(skb, metasize); if (unlikely(xdp_buff_has_frags(xdp))) - xdp_update_skb_shared_info(skb, nr_frags, - sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_is_frag_pfmemalloc(xdp)); + xdp_update_skb_frags_info(skb, nr_frags, sinfo->xdp_frags_size, + nr_frags * xdp->frame_sz, + xdp_buff_get_skb_flags(xdp)); return skb; } @@ -1115,10 +1241,10 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], sizeof(skb_frag_t) * nr_frags); - xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, - sinfo->xdp_frags_size, - nr_frags * xdp->frame_sz, - xdp_buff_is_frag_pfmemalloc(xdp)); + xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags, + sinfo->xdp_frags_size, + nr_frags * xdp->frame_sz, + xdp_buff_get_skb_flags(xdp)); } return skb; @@ -1154,62 +1280,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** - * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame * @rx_ring: Rx ring with all the auxiliary data * @xdp: XDP buffer carrying linear + frags part - * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage - * @ntc: a current next_to_clean value to be stored at rx_ring + * @ntc: the next to clean element (not included in this frame!) * @verdict: return code from XDP program execution * - * Walk through gathered fragments and satisfy internal page - * recycle mechanism; we take here an action related to verdict - * returned by XDP program; + * Called after XDP program is completed, or on error with verdict set to + * ICE_XDP_CONSUMED. + * + * Walk through buffers from first_desc to the end of the frame, releasing + * buffers and satisfying internal page recycle mechanism. The action depends + * on verdict from XDP program. */ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - u32 *xdp_xmit, u32 ntc, u32 verdict) + u32 ntc, u32 verdict) { - u32 nr_frags = rx_ring->nr_frags + 1; u32 idx = rx_ring->first_desc; u32 cnt = rx_ring->count; - u32 post_xdp_frags = 1; struct ice_rx_buf *buf; - int i; + u32 xdp_frags = 0; + int i = 0; if (unlikely(xdp_buff_has_frags(xdp))) - post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; + xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - for (i = 0; i < post_xdp_frags; i++) { + while (idx != ntc) { buf = &rx_ring->rx_buf[idx]; + if (++idx == cnt) + idx = 0; - if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + /* An XDP program could release fragments from the end of the + * buffer. For these, we need to keep the pagecnt_bias as-is. + * To do this, only adjust pagecnt_bias for fragments up to + * the total remaining after the XDP program has run. + */ + if (verdict != ICE_XDP_CONSUMED) ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - *xdp_xmit |= verdict; - } else if (verdict & ICE_XDP_CONSUMED) { + else if (i++ <= xdp_frags) buf->pagecnt_bias++; - } else if (verdict == ICE_XDP_PASS) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - } ice_put_rx_buf(rx_ring, buf); - - if (++idx == cnt) - idx = 0; - } - /* handle buffers that represented frags released by XDP prog; - * for these we keep pagecnt_bias as-is; refcount from struct page - * has been decremented within XDP prog and we do not have to increase - * the biased refcnt - */ - for (; i < nr_frags; i++) { - buf = &rx_ring->rx_buf[idx]; - ice_put_rx_buf(rx_ring, buf); - if (++idx == cnt) - idx = 0; } xdp->data = NULL; rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; } /** @@ -1317,6 +1432,10 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) /* retrieve a buffer from the ring */ rx_buf = ice_get_rx_buf(rx_ring, size, ntc); + /* Increment ntc before calls to ice_put_rx_mbuf() */ + if (++ntc == cnt) + ntc = 0; + if (!xdp->data) { void *hard_start; @@ -1325,24 +1444,23 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); xdp_buff_clear_frags_flag(xdp); } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { - ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); + ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED); break; } - if (++ntc == cnt) - ntc = 0; /* skip if it is NOP desc */ if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_get_pgcnts(rx_ring); + ice_get_pgcnts(rx_ring, ntc); xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); + xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR); continue; construct_skb: @@ -1355,7 +1473,7 @@ construct_skb: rx_ring->ring_stats->rx_stats.alloc_buf_failed++; xdp_verdict = ICE_XDP_CONSUMED; } - ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict); if (!skb) break; @@ -1835,10 +1953,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, /* notify HW of packet */ kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, netdev_xmit_more()); - if (kick) - /* notify HW of packet */ - writel(i, tx_ring->tail); + if (!kick) + return; + if (ice_is_txtime_cfg(tx_ring)) { + struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring; + u32 tstamp_count = tstamp_ring->count; + u32 j = tstamp_ring->next_to_use; + struct ice_ts_desc *ts_desc; + struct timespec64 ts; + u32 tstamp; + + ts = ktime_to_timespec64(first->skb->tstamp); + tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS; + + ts_desc = ICE_TS_DESC(tstamp_ring, j); + ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp); + + j++; + if (j == tstamp_count) { + u32 fetch = tstamp_count - tx_ring->count; + + j = 0; + + /* To prevent an MDD, when wrapping the tstamp ring + * create additional TS descriptors equal to the number + * of the fetch TS descriptors value. HW will merge the + * TS descriptors with the same timestamp value into a + * single descriptor. + */ + for (; j < fetch; j++) { + ts_desc = ICE_TS_DESC(tstamp_ring, j); + ts_desc->tx_desc_idx_tstamp = + ice_build_tstamp_desc(i, tstamp); + } + } + tstamp_ring->next_to_use = j; + writel_relaxed(j, tstamp_ring->tail); + } else { + writel_relaxed(i, tx_ring->tail); + } return; dma_error: |