summaryrefslogtreecommitdiff
path: root/net/xdp
diff options
context:
space:
mode:
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk.c20
-rw-r--r--net/xdp/xsk_buff_pool.c21
2 files changed, 16 insertions, 25 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 69bbcca8ac75..f093c3453f64 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -541,12 +541,11 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&pool->cq_lock, flags);
+ spin_lock(&pool->cq_cached_prod_lock);
ret = xskq_prod_reserve(pool->cq);
- spin_unlock_irqrestore(&pool->cq_lock, flags);
+ spin_unlock(&pool->cq_cached_prod_lock);
return ret;
}
@@ -597,7 +596,7 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
unsigned long flags;
u32 idx, i;
- spin_lock_irqsave(&pool->cq_lock, flags);
+ spin_lock_irqsave(&pool->cq_prod_lock, flags);
idx = xskq_get_prod(pool->cq);
if (unlikely(num_descs > 1)) {
@@ -615,19 +614,18 @@ static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
descs_processed++;
}
xskq_prod_submit_n(pool->cq, descs_processed);
- spin_unlock_irqrestore(&pool->cq_lock, flags);
+ spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
}
static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pool->cq_lock, flags);
+ spin_lock(&pool->cq_cached_prod_lock);
xskq_prod_cancel_n(pool->cq, n);
- spin_unlock_irqrestore(&pool->cq_lock, flags);
+ spin_unlock(&pool->cq_cached_prod_lock);
}
-static void xsk_destruct_skb(struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+void xsk_destruct_skb(struct sk_buff *skb)
{
struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
@@ -1274,7 +1272,7 @@ static bool xsk_validate_queues(struct xdp_sock *xs)
return xs->fq_tmp && xs->cq_tmp;
}
-static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len)
{
struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
struct sock *sk = sock->sk;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index aa9788f20d0d..51526034c42a 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -12,26 +12,22 @@
void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
- unsigned long flags;
-
if (!xs->tx)
return;
- spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
+ spin_lock(&pool->xsk_tx_list_lock);
list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
- spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
+ spin_unlock(&pool->xsk_tx_list_lock);
}
void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
- unsigned long flags;
-
if (!xs->tx)
return;
- spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
+ spin_lock(&pool->xsk_tx_list_lock);
list_del_rcu(&xs->tx_list);
- spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
+ spin_unlock(&pool->xsk_tx_list_lock);
}
void xp_destroy(struct xsk_buff_pool *pool)
@@ -94,7 +90,8 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
INIT_LIST_HEAD(&pool->xskb_list);
INIT_LIST_HEAD(&pool->xsk_tx_list);
spin_lock_init(&pool->xsk_tx_list_lock);
- spin_lock_init(&pool->cq_lock);
+ spin_lock_init(&pool->cq_prod_lock);
+ spin_lock_init(&pool->cq_cached_prod_lock);
refcount_set(&pool->users, 1);
pool->fq = xs->fq_tmp;
@@ -158,10 +155,6 @@ static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
}
}
-#define NETDEV_XDP_ACT_ZC (NETDEV_XDP_ACT_BASIC | \
- NETDEV_XDP_ACT_REDIRECT | \
- NETDEV_XDP_ACT_XSK_ZEROCOPY)
-
int xp_assign_dev(struct xsk_buff_pool *pool,
struct net_device *netdev, u16 queue_id, u16 flags)
{
@@ -203,7 +196,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool,
/* For copy-mode, we are done. */
return 0;
- if ((netdev->xdp_features & NETDEV_XDP_ACT_ZC) != NETDEV_XDP_ACT_ZC) {
+ if ((netdev->xdp_features & NETDEV_XDP_ACT_XSK) != NETDEV_XDP_ACT_XSK) {
err = -EOPNOTSUPP;
goto err_unreg_pool;
}