diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.h')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.h | 132 |
1 files changed, 16 insertions, 116 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 841a07bfba54..e440c55d9e9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -4,6 +4,8 @@ #ifndef _ICE_TXRX_H_ #define _ICE_TXRX_H_ +#include <net/libeth/types.h> + #include "ice_type.h" #define ICE_DFLT_IRQ_WORK 256 @@ -27,72 +29,6 @@ #define ICE_MAX_TXQ_PER_TXQG 128 -/* Attempt to maximize the headroom available for incoming frames. We use a 2K - * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame. - * This leaves us with 512 bytes of room. From that we need to deduct the - * space needed for the shared info and the padding needed to IP align the - * frame. - * - * Note: For cache line sizes 256 or larger this value is going to end - * up negative. In these cases we should fall back to the legacy - * receive path. - */ -#if (PAGE_SIZE < 8192) -#define ICE_2K_TOO_SMALL_WITH_PADDING \ - ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \ - SKB_WITH_OVERHEAD(ICE_RXBUF_2048)) - -/** - * ice_compute_pad - compute the padding - * @rx_buf_len: buffer length - * - * Figure out the size of half page based on given buffer length and - * then subtract the skb_shared_info followed by subtraction of the - * actual buffer length; this in turn results in the actual space that - * is left for padding usage - */ -static inline int ice_compute_pad(int rx_buf_len) -{ - int half_page_size; - - half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); - return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len; -} - -/** - * ice_skb_pad - determine the padding that we can supply - * - * Figure out the right Rx buffer size and based on that calculate the - * padding - */ -static inline int ice_skb_pad(void) -{ - int rx_buf_len; - - /* If a 2K buffer cannot handle a standard Ethernet frame then - * optimize padding for a 3K buffer instead of a 1.5K buffer. - * - * For a 3K buffer we need to add enough padding to allow for - * tailroom due to NET_IP_ALIGN possibly shifting us out of - * cache-line alignment. - */ - if (ICE_2K_TOO_SMALL_WITH_PADDING) - rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); - else - rx_buf_len = ICE_RXBUF_1536; - - /* if needed make room for NET_IP_ALIGN */ - rx_buf_len -= NET_IP_ALIGN; - - return ice_compute_pad(rx_buf_len); -} - -#define ICE_SKB_PAD ice_skb_pad() -#else -#define ICE_2K_TOO_SMALL_WITH_PADDING false -#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) -#endif - /* We are assuming that the cache line is always 64 Bytes here for ice. * In order to make sure that is a correct assumption there is a check in probe * to print a warning if the read from GLPCI_CNF2 tells us that the cache line @@ -112,10 +48,6 @@ static inline int ice_skb_pad(void) (u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define ICE_RX_DESC_UNUSED(R) \ - ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->first_desc - (R)->next_to_use - 1) - #define ICE_RING_QUARTER(R) ((R)->count >> 2) #define ICE_TX_FLAGS_TSO BIT(0) @@ -197,14 +129,6 @@ struct ice_tx_offload_params { u8 header_len; }; -struct ice_rx_buf { - dma_addr_t dma; - struct page *page; - unsigned int page_offset; - unsigned int pgcnt; - unsigned int pagecnt_bias; -}; - struct ice_q_stats { u64 pkts; u64 bytes; @@ -262,15 +186,6 @@ struct ice_pkt_ctx { __be16 vlan_proto; }; -struct ice_xdp_buff { - struct xdp_buff xdp_buff; - const union ice_32b_rx_flex_desc *eop_desc; - const struct ice_pkt_ctx *pkt_ctx; -}; - -/* Required for compatibility with xdp_buffs from xsk_pool */ -static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0); - /* indices into GLINT_ITR registers */ #define ICE_RX_ITR ICE_IDX_ITR0 #define ICE_TX_ITR ICE_IDX_ITR1 @@ -323,7 +238,7 @@ struct ice_tstamp_ring { struct ice_rx_ring { /* CL1 - 1st cacheline starts here */ void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ + struct page_pool *pp; struct net_device *netdev; /* netdev ring maps to */ struct ice_vsi *vsi; /* Backreference to associated VSI */ struct ice_q_vector *q_vector; /* Backreference to associated vector */ @@ -335,14 +250,19 @@ struct ice_rx_ring { u16 next_to_alloc; union { - struct ice_rx_buf *rx_buf; + struct libeth_fqe *rx_fqes; struct xdp_buff **xdp_buf; }; + /* CL2 - 2nd cacheline starts here */ + struct libeth_fqe *hdr_fqes; + struct page_pool *hdr_pp; + union { - struct ice_xdp_buff xdp_ext; - struct xdp_buff xdp; + struct libeth_xdp_buff_stash xdp; + struct libeth_xdp_buff *xsk; }; + /* CL3 - 3rd cacheline starts here */ union { struct ice_pkt_ctx pkt_ctx; @@ -352,12 +272,13 @@ struct ice_rx_ring { }; }; struct bpf_prog *xdp_prog; - u16 rx_offset; /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; - u16 first_desc; + + u32 hdr_truesize; + u32 truesize; /* stats structs */ struct ice_ring_stats *ring_stats; @@ -368,12 +289,11 @@ struct ice_rx_ring { struct ice_tx_ring *xdp_ring; struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; - u16 max_frame; + u16 rx_hdr_len; u16 rx_buf_len; dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; -#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) #define ICE_RX_FLAGS_MULTIDEV BIT(3) #define ICE_RX_FLAGS_RING_GCS BIT(4) @@ -422,21 +342,6 @@ struct ice_tx_ring { u16 quanta_prof_id; } ____cacheline_internodealigned_in_smp; -static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) -{ - return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB); -} - -static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB; -} - -static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) -{ - ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB; -} - static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring) { return !!ring->ch; @@ -491,18 +396,13 @@ struct ice_coalesce_stored { static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring) { -#if (PAGE_SIZE < 8192) - if (ring->rx_buf_len > (PAGE_SIZE / 2)) - return 1; -#endif return 0; } -#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring)) - union ice_32b_rx_flex_desc; void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs); +void ice_rxq_pp_destroy(struct ice_rx_ring *rq); bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count); netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev); u16 |
