1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (C) 2025 Intel Corporation */
#ifndef _IDPF_XDP_H_
#define _IDPF_XDP_H_
#include <net/libeth/xdp.h>
#include "idpf_txrx.h"
int idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq);
int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
void idpf_xdp_rxq_info_deinit(struct idpf_rx_queue *rxq, u32 model);
void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
struct bpf_prog *xdp_prog);
int idpf_xdpsqs_get(const struct idpf_vport *vport);
void idpf_xdpsqs_put(const struct idpf_vport *vport);
u32 idpf_xdpsq_poll(struct idpf_tx_queue *xdpsq, u32 budget);
bool idpf_xdp_tx_flush_bulk(struct libeth_xdp_tx_bulk *bq, u32 flags);
/**
* idpf_xdp_tx_xmit - produce a single HW Tx descriptor out of XDP desc
* @desc: XDP descriptor to pull the DMA address and length from
* @i: descriptor index on the queue to fill
* @sq: XDP queue to produce the HW Tx descriptor on
* @priv: &xsk_tx_metadata_ops on XSk xmit or %NULL
*/
static inline void idpf_xdp_tx_xmit(struct libeth_xdp_tx_desc desc, u32 i,
const struct libeth_xdpsq *sq, u64 priv)
{
struct idpf_flex_tx_desc *tx_desc = sq->descs;
u32 cmd;
cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_DTYPE_M,
IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2);
if (desc.flags & LIBETH_XDP_TX_LAST)
cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
IDPF_TX_DESC_CMD_EOP);
if (priv && (desc.flags & LIBETH_XDP_TX_CSUM))
cmd |= FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M,
IDPF_TX_FLEX_DESC_CMD_CS_EN);
tx_desc = &tx_desc[i];
tx_desc->buf_addr = cpu_to_le64(desc.addr);
#ifdef __LIBETH_WORD_ACCESS
*(u64 *)&tx_desc->qw1 = ((u64)desc.len << 48) | cmd;
#else
tx_desc->qw1.buf_size = cpu_to_le16(desc.len);
tx_desc->qw1.cmd_dtype = cpu_to_le16(cmd);
#endif
}
static inline void idpf_xdpsq_set_rs(const struct idpf_tx_queue *xdpsq)
{
u32 ntu, cmd;
ntu = xdpsq->next_to_use;
if (unlikely(!ntu))
ntu = xdpsq->desc_count;
cmd = FIELD_PREP(IDPF_FLEX_TXD_QW1_CMD_M, IDPF_TX_DESC_CMD_RS);
#ifdef __LIBETH_WORD_ACCESS
*(u64 *)&xdpsq->flex_tx[ntu - 1].q.qw1 |= cmd;
#else
xdpsq->flex_tx[ntu - 1].q.qw1.cmd_dtype |= cpu_to_le16(cmd);
#endif
}
static inline void idpf_xdpsq_update_tail(const struct idpf_tx_queue *xdpsq)
{
dma_wmb();
writel_relaxed(xdpsq->next_to_use, xdpsq->tail);
}
/**
* idpf_xdp_tx_finalize - finalize sending over XDPSQ
* @_xdpsq: XDP Tx queue
* @sent: whether any frames were sent
* @flush: whether to update RS bit and the tail register
*
* Set the RS bit ("end of batch"), bump the tail, and queue the cleanup timer.
* To be called after a NAPI polling loop, at the end of .ndo_xdp_xmit() etc.
*/
static inline void idpf_xdp_tx_finalize(void *_xdpsq, bool sent, bool flush)
{
struct idpf_tx_queue *xdpsq = _xdpsq;
if ((!flush || unlikely(!sent)) &&
likely(xdpsq->desc_count - 1 != xdpsq->pending))
return;
libeth_xdpsq_lock(&xdpsq->xdp_lock);
idpf_xdpsq_set_rs(xdpsq);
idpf_xdpsq_update_tail(xdpsq);
libeth_xdpsq_queue_timer(xdpsq->timer);
libeth_xdpsq_unlock(&xdpsq->xdp_lock);
}
struct idpf_xdp_rx_desc {
aligned_u64 qw0;
#define IDPF_XDP_RX_BUFQ BIT_ULL(47)
#define IDPF_XDP_RX_GEN BIT_ULL(46)
#define IDPF_XDP_RX_LEN GENMASK_ULL(45, 32)
#define IDPF_XDP_RX_PT GENMASK_ULL(25, 16)
aligned_u64 qw1;
#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
#define IDPF_XDP_RX_EOP BIT_ULL(1)
aligned_u64 qw2;
#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
aligned_u64 qw3;
} __aligned(4 * sizeof(u64));
static_assert(sizeof(struct idpf_xdp_rx_desc) ==
sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
#define idpf_xdp_rx_bufq(desc) !!((desc)->qw0 & IDPF_XDP_RX_BUFQ)
#define idpf_xdp_rx_gen(desc) !!((desc)->qw0 & IDPF_XDP_RX_GEN)
#define idpf_xdp_rx_len(desc) FIELD_GET(IDPF_XDP_RX_LEN, (desc)->qw0)
#define idpf_xdp_rx_pt(desc) FIELD_GET(IDPF_XDP_RX_PT, (desc)->qw0)
#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
static inline void
idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
{
#ifdef __LIBETH_WORD_ACCESS
desc->qw0 = ((const typeof(desc))rxd)->qw0;
#else
desc->qw0 = ((u64)le16_to_cpu(rxd->pktlen_gen_bufq_id) << 32) |
((u64)le16_to_cpu(rxd->ptype_err_fflags0) << 16);
#endif
}
static inline void
idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
{
#ifdef __LIBETH_WORD_ACCESS
desc->qw1 = ((const typeof(desc))rxd)->qw1;
#else
desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
rxd->status_err0_qw1;
#endif
}
static inline void
idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
{
#ifdef __LIBETH_WORD_ACCESS
desc->qw2 = ((const typeof(desc))rxd)->qw2;
#else
desc->qw2 = ((u64)rxd->hash3 << 24) |
((u64)rxd->ff2_mirrid_hash2.hash2 << 16) |
le16_to_cpu(rxd->hash1);
#endif
}
void idpf_xdp_set_features(const struct idpf_vport *vport);
int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int idpf_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
#endif /* _IDPF_XDP_H_ */
|