From b8866426411c34489b0265bf720b4d917c8d4795 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 10 Mar 2023 15:53:43 +0100 Subject: ieee802154: Add support for user active scan requests In case a passive scan could not discover any PAN, a device may decide to perform an active scan to force coordinators to send a BEACON "immediately". Allow users to request to perform an active scan. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20230310145346.1397068-2-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- net/ieee802154/nl802154.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index d8f4379d4fa6..42e531aefe86 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1426,6 +1426,7 @@ static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info) type = nla_get_u8(info->attrs[NL802154_ATTR_SCAN_TYPE]); switch (type) { + case NL802154_SCAN_ACTIVE: case NL802154_SCAN_PASSIVE: request->type = type; break; -- cgit v1.2.3 From e2c3e6f53a7a8a00ffeed127cfd1b397c3b016f8 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 10 Mar 2023 15:53:44 +0100 Subject: mac802154: Handle active scanning Active scan support is based on the current passive scan support, cheered up with beacon requests sent after every channel change. Co-developed-by: David Girault Signed-off-by: David Girault Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20230310145346.1397068-3-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- include/net/ieee802154_netdev.h | 18 +++++++++++++- net/ieee802154/header_ops.c | 23 ++++++++++++++++++ net/mac802154/ieee802154_i.h | 1 + net/mac802154/scan.c | 53 +++++++++++++++++++++++++++++++++++++++-- 4 files changed, 92 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index da8a3e648c7a..257c9b2e9c9a 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -74,6 +74,10 @@ struct ieee802154_beacon_hdr { #endif } __packed; +struct ieee802154_mac_cmd_pl { + u8 cmd_id; +} __packed; + struct ieee802154_sechdr { #if defined(__LITTLE_ENDIAN_BITFIELD) u8 level:3, @@ -149,6 +153,16 @@ struct ieee802154_beacon_frame { struct ieee802154_beacon_hdr mac_pl; }; +struct ieee802154_mac_cmd_frame { + struct ieee802154_hdr mhr; + struct ieee802154_mac_cmd_pl mac_pl; +}; + +struct ieee802154_beacon_req_frame { + struct ieee802154_hdr mhr; + struct ieee802154_mac_cmd_pl mac_pl; +}; + /* pushes hdr onto the skb. fields of hdr->fc that can be calculated from * the contents of hdr will be, and the actual value of those bits in * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame @@ -174,9 +188,11 @@ int ieee802154_hdr_peek_addrs(const struct sk_buff *skb, */ int ieee802154_hdr_peek(const struct sk_buff *skb, struct ieee802154_hdr *hdr); -/* pushes a beacon frame into an skb */ +/* pushes/pulls various frame types into/from an skb */ int ieee802154_beacon_push(struct sk_buff *skb, struct ieee802154_beacon_frame *beacon); +int ieee802154_mac_cmd_push(struct sk_buff *skb, void *frame, + const void *pl, unsigned int pl_len); int ieee802154_max_payload(const struct ieee802154_hdr *hdr); diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c index 35d384dfe29d..a5ff1017a4b2 100644 --- a/net/ieee802154/header_ops.c +++ b/net/ieee802154/header_ops.c @@ -120,6 +120,29 @@ ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr) } EXPORT_SYMBOL_GPL(ieee802154_hdr_push); +int ieee802154_mac_cmd_push(struct sk_buff *skb, void *f, + const void *pl, unsigned int pl_len) +{ + struct ieee802154_mac_cmd_frame *frame = f; + struct ieee802154_mac_cmd_pl *mac_pl = &frame->mac_pl; + struct ieee802154_hdr *mhr = &frame->mhr; + int ret; + + skb_reserve(skb, sizeof(*mhr)); + ret = ieee802154_hdr_push(skb, mhr); + if (ret < 0) + return ret; + + skb_reset_mac_header(skb); + skb->mac_len = ret; + + skb_put_data(skb, mac_pl, sizeof(*mac_pl)); + skb_put_data(skb, pl, pl_len); + + return 0; +} +EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_push); + int ieee802154_beacon_push(struct sk_buff *skb, struct ieee802154_beacon_frame *beacon) { diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index 63bab99ed368..7bfd5411a164 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -58,6 +58,7 @@ struct ieee802154_local { /* Scanning */ u8 scan_page; u8 scan_channel; + struct ieee802154_beacon_req_frame scan_beacon_req; struct cfg802154_scan_request __rcu *scan_req; struct delayed_work scan_work; diff --git a/net/mac802154/scan.c b/net/mac802154/scan.c index 9b0933a185eb..201bfc567a43 100644 --- a/net/mac802154/scan.c +++ b/net/mac802154/scan.c @@ -18,8 +18,12 @@ #define IEEE802154_BEACON_MHR_SZ 13 #define IEEE802154_BEACON_PL_SZ 4 +#define IEEE802154_MAC_CMD_MHR_SZ 23 +#define IEEE802154_MAC_CMD_PL_SZ 1 #define IEEE802154_BEACON_SKB_SZ (IEEE802154_BEACON_MHR_SZ + \ IEEE802154_BEACON_PL_SZ) +#define IEEE802154_MAC_CMD_SKB_SZ (IEEE802154_MAC_CMD_MHR_SZ + \ + IEEE802154_MAC_CMD_PL_SZ) /* mac802154_scan_cleanup_locked() must be called upon scan completion or abort. * - Completions are asynchronous, not locked by the rtnl and decided by the @@ -131,6 +135,42 @@ static int mac802154_scan_find_next_chan(struct ieee802154_local *local, return 0; } +static int mac802154_scan_prepare_beacon_req(struct ieee802154_local *local) +{ + memset(&local->scan_beacon_req, 0, sizeof(local->scan_beacon_req)); + local->scan_beacon_req.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; + local->scan_beacon_req.mhr.fc.dest_addr_mode = IEEE802154_SHORT_ADDRESSING; + local->scan_beacon_req.mhr.fc.version = IEEE802154_2003_STD; + local->scan_beacon_req.mhr.fc.source_addr_mode = IEEE802154_NO_ADDRESSING; + local->scan_beacon_req.mhr.dest.mode = IEEE802154_ADDR_SHORT; + local->scan_beacon_req.mhr.dest.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); + local->scan_beacon_req.mhr.dest.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); + local->scan_beacon_req.mac_pl.cmd_id = IEEE802154_CMD_BEACON_REQ; + + return 0; +} + +static int mac802154_transmit_beacon_req(struct ieee802154_local *local, + struct ieee802154_sub_if_data *sdata) +{ + struct sk_buff *skb; + int ret; + + skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ, GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + skb->dev = sdata->dev; + + ret = ieee802154_mac_cmd_push(skb, &local->scan_beacon_req, NULL, 0); + if (ret) { + kfree_skb(skb); + return ret; + } + + return ieee802154_mlme_tx(local, sdata, skb); +} + void mac802154_scan_worker(struct work_struct *work) { struct ieee802154_local *local = @@ -206,6 +246,13 @@ void mac802154_scan_worker(struct work_struct *work) goto end_scan; } + if (scan_req->type == NL802154_SCAN_ACTIVE) { + ret = mac802154_transmit_beacon_req(local, sdata); + if (ret) + dev_err(&sdata->dev->dev, + "Error when transmitting beacon request (%d)\n", ret); + } + ieee802154_configure_durations(wpan_phy, page, channel); scan_duration = mac802154_scan_get_channel_time(scan_req_duration, wpan_phy->symbol_duration); @@ -231,8 +278,8 @@ int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata, if (mac802154_is_scanning(local)) return -EBUSY; - /* TODO: support other scanning type */ - if (request->type != NL802154_SCAN_PASSIVE) + if (request->type != NL802154_SCAN_PASSIVE && + request->type != NL802154_SCAN_ACTIVE) return -EOPNOTSUPP; /* Store scanning parameters */ @@ -247,6 +294,8 @@ int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata, local->scan_page = request->page; local->scan_channel = -1; set_bit(IEEE802154_IS_SCANNING, &local->ongoing); + if (request->type == NL802154_SCAN_ACTIVE) + mac802154_scan_prepare_beacon_req(local); nl802154_scan_started(request->wpan_phy, request->wpan_dev); -- cgit v1.2.3 From 26f88e4ebd4fbe96fb4326408e2af05644716d76 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 10 Mar 2023 15:53:45 +0100 Subject: ieee802154: Add support for allowing to answer BEACON_REQ Accept beaconing configurations from the user which involve answering beacon requests rather than only passively sending beacons. This may help devices to find the PAN more quickly. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20230310145346.1397068-4-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- net/ieee802154/nl802154.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 42e531aefe86..a20ac4bb48a4 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -233,7 +233,7 @@ static const struct nla_policy nl802154_policy[NL802154_ATTR_MAX+1] = { NLA_POLICY_RANGE(NLA_U8, NL802154_SCAN_DONE_REASON_FINISHED, NL802154_SCAN_DONE_REASON_ABORTED), [NL802154_ATTR_BEACON_INTERVAL] = - NLA_POLICY_MAX(NLA_U8, IEEE802154_MAX_SCAN_DURATION), + NLA_POLICY_MAX(NLA_U8, IEEE802154_ACTIVE_SCAN_DURATION), #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL [NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, }, -- cgit v1.2.3 From d021d218f6d924ff5417c64b2e41d184e4bb32d3 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 10 Mar 2023 15:53:46 +0100 Subject: mac802154: Handle received BEACON_REQ When performing an active scan, devices emit BEACON_REQ which must be answered by other PANs receiving the request, unless they are already passively sending beacons. Answering a beacon request becomes a duty when the user tells us to send beacons and the request provides an interval of 15. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20230310145346.1397068-5-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- include/net/ieee802154_netdev.h | 2 ++ net/ieee802154/header_ops.c | 13 ++++++++ net/mac802154/ieee802154_i.h | 20 ++++++++++++ net/mac802154/main.c | 2 ++ net/mac802154/rx.c | 70 ++++++++++++++++++++++++++++++++++++++++- net/mac802154/scan.c | 15 ++++++--- 6 files changed, 117 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 257c9b2e9c9a..063313df447d 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -193,6 +193,8 @@ int ieee802154_beacon_push(struct sk_buff *skb, struct ieee802154_beacon_frame *beacon); int ieee802154_mac_cmd_push(struct sk_buff *skb, void *frame, const void *pl, unsigned int pl_len); +int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb, + struct ieee802154_mac_cmd_pl *mac_pl); int ieee802154_max_payload(const struct ieee802154_hdr *hdr); diff --git a/net/ieee802154/header_ops.c b/net/ieee802154/header_ops.c index a5ff1017a4b2..41a556be1017 100644 --- a/net/ieee802154/header_ops.c +++ b/net/ieee802154/header_ops.c @@ -307,6 +307,19 @@ ieee802154_hdr_pull(struct sk_buff *skb, struct ieee802154_hdr *hdr) } EXPORT_SYMBOL_GPL(ieee802154_hdr_pull); +int ieee802154_mac_cmd_pl_pull(struct sk_buff *skb, + struct ieee802154_mac_cmd_pl *mac_pl) +{ + if (!pskb_may_pull(skb, sizeof(*mac_pl))) + return -EINVAL; + + memcpy(mac_pl, skb->data, sizeof(*mac_pl)); + skb_pull(skb, sizeof(*mac_pl)); + + return 0; +} +EXPORT_SYMBOL_GPL(ieee802154_mac_cmd_pl_pull); + int ieee802154_hdr_peek_addrs(const struct sk_buff *skb, struct ieee802154_hdr *hdr) { diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h index 7bfd5411a164..c347ec9ff8c9 100644 --- a/net/mac802154/ieee802154_i.h +++ b/net/mac802154/ieee802154_i.h @@ -71,6 +71,8 @@ struct ieee802154_local { /* Asynchronous tasks */ struct list_head rx_beacon_list; struct work_struct rx_beacon_work; + struct list_head rx_mac_cmd_list; + struct work_struct rx_mac_cmd_work; bool started; bool suspended; @@ -155,6 +157,22 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata) return test_bit(SDATA_STATE_RUNNING, &sdata->state); } +static inline int ieee802154_get_mac_cmd(struct sk_buff *skb, u8 *mac_cmd) +{ + struct ieee802154_mac_cmd_pl mac_pl; + int ret; + + if (mac_cb(skb)->type != IEEE802154_FC_TYPE_MAC_CMD) + return -EINVAL; + + ret = ieee802154_mac_cmd_pl_pull(skb, &mac_pl); + if (ret) + return ret; + + *mac_cmd = mac_pl.cmd_id; + return 0; +} + extern struct ieee802154_mlme_ops mac802154_mlme_wpan; void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb); @@ -276,6 +294,8 @@ static inline bool mac802154_is_beaconing(struct ieee802154_local *local) return test_bit(IEEE802154_IS_BEACONING, &local->ongoing); } +void mac802154_rx_mac_cmd_worker(struct work_struct *work); + /* interface handling */ int ieee802154_iface_init(void); void ieee802154_iface_exit(void); diff --git a/net/mac802154/main.c b/net/mac802154/main.c index ee23e234b998..357ece67432b 100644 --- a/net/mac802154/main.c +++ b/net/mac802154/main.c @@ -90,6 +90,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) INIT_LIST_HEAD(&local->interfaces); INIT_LIST_HEAD(&local->rx_beacon_list); + INIT_LIST_HEAD(&local->rx_mac_cmd_list); mutex_init(&local->iflist_mtx); tasklet_setup(&local->tasklet, ieee802154_tasklet_handler); @@ -100,6 +101,7 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops) INIT_DELAYED_WORK(&local->scan_work, mac802154_scan_worker); INIT_WORK(&local->rx_beacon_work, mac802154_rx_beacon_worker); INIT_DELAYED_WORK(&local->beacon_work, mac802154_beacon_worker); + INIT_WORK(&local->rx_mac_cmd_work, mac802154_rx_mac_cmd_worker); /* init supported flags with 802.15.4 default ranges */ phy->supported.max_minbe = 8; diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c index da0628ee3c89..e2434b4fe514 100644 --- a/net/mac802154/rx.c +++ b/net/mac802154/rx.c @@ -47,6 +47,62 @@ void mac802154_rx_beacon_worker(struct work_struct *work) kfree(mac_pkt); } +static bool mac802154_should_answer_beacon_req(struct ieee802154_local *local) +{ + struct cfg802154_beacon_request *beacon_req; + unsigned int interval; + + rcu_read_lock(); + beacon_req = rcu_dereference(local->beacon_req); + if (!beacon_req) { + rcu_read_unlock(); + return false; + } + + interval = beacon_req->interval; + rcu_read_unlock(); + + if (!mac802154_is_beaconing(local)) + return false; + + return interval == IEEE802154_ACTIVE_SCAN_DURATION; +} + +void mac802154_rx_mac_cmd_worker(struct work_struct *work) +{ + struct ieee802154_local *local = + container_of(work, struct ieee802154_local, rx_mac_cmd_work); + struct cfg802154_mac_pkt *mac_pkt; + u8 mac_cmd; + int rc; + + mac_pkt = list_first_entry_or_null(&local->rx_mac_cmd_list, + struct cfg802154_mac_pkt, node); + if (!mac_pkt) + return; + + rc = ieee802154_get_mac_cmd(mac_pkt->skb, &mac_cmd); + if (rc) + goto out; + + switch (mac_cmd) { + case IEEE802154_CMD_BEACON_REQ: + dev_dbg(&mac_pkt->sdata->dev->dev, "processing BEACON REQ\n"); + if (!mac802154_should_answer_beacon_req(local)) + break; + + queue_delayed_work(local->mac_wq, &local->beacon_work, 0); + break; + default: + break; + } + +out: + list_del(&mac_pkt->node); + kfree_skb(mac_pkt->skb); + kfree(mac_pkt); +} + static int ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, struct sk_buff *skb, const struct ieee802154_hdr *hdr) @@ -140,8 +196,20 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata, list_add_tail(&mac_pkt->node, &sdata->local->rx_beacon_list); queue_work(sdata->local->mac_wq, &sdata->local->rx_beacon_work); return NET_RX_SUCCESS; - case IEEE802154_FC_TYPE_ACK: + case IEEE802154_FC_TYPE_MAC_CMD: + dev_dbg(&sdata->dev->dev, "MAC COMMAND received\n"); + mac_pkt = kzalloc(sizeof(*mac_pkt), GFP_ATOMIC); + if (!mac_pkt) + goto fail; + + mac_pkt->skb = skb_get(skb); + mac_pkt->sdata = sdata; + list_add_tail(&mac_pkt->node, &sdata->local->rx_mac_cmd_list); + queue_work(sdata->local->mac_wq, &sdata->local->rx_mac_cmd_work); + return NET_RX_SUCCESS; + + case IEEE802154_FC_TYPE_ACK: goto fail; case IEEE802154_FC_TYPE_DATA: diff --git a/net/mac802154/scan.c b/net/mac802154/scan.c index 201bfc567a43..618553a2e026 100644 --- a/net/mac802154/scan.c +++ b/net/mac802154/scan.c @@ -403,6 +403,7 @@ void mac802154_beacon_worker(struct work_struct *work) struct cfg802154_beacon_request *beacon_req; struct ieee802154_sub_if_data *sdata; struct wpan_dev *wpan_dev; + u8 interval; int ret; rcu_read_lock(); @@ -423,6 +424,7 @@ void mac802154_beacon_worker(struct work_struct *work) } wpan_dev = beacon_req->wpan_dev; + interval = beacon_req->interval; rcu_read_unlock(); @@ -432,8 +434,9 @@ void mac802154_beacon_worker(struct work_struct *work) dev_err(&sdata->dev->dev, "Beacon could not be transmitted (%d)\n", ret); - queue_delayed_work(local->mac_wq, &local->beacon_work, - local->beacon_interval); + if (interval < IEEE802154_ACTIVE_SCAN_DURATION) + queue_delayed_work(local->mac_wq, &local->beacon_work, + local->beacon_interval); } int mac802154_stop_beacons_locked(struct ieee802154_local *local, @@ -488,13 +491,17 @@ int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata, local->beacon.mhr.source.pan_id = request->wpan_dev->pan_id; local->beacon.mhr.source.extended_addr = request->wpan_dev->extended_addr; local->beacon.mac_pl.beacon_order = request->interval; - local->beacon.mac_pl.superframe_order = request->interval; + if (request->interval <= IEEE802154_MAX_SCAN_DURATION) + local->beacon.mac_pl.superframe_order = request->interval; local->beacon.mac_pl.final_cap_slot = 0xf; local->beacon.mac_pl.battery_life_ext = 0; - /* TODO: Fill this field depending on the coordinator capacity */ + /* TODO: Fill this field with the coordinator situation in the network */ local->beacon.mac_pl.pan_coordinator = 1; local->beacon.mac_pl.assoc_permit = 1; + if (request->interval == IEEE802154_ACTIVE_SCAN_DURATION) + return 0; + /* Start the beacon work */ local->beacon_interval = mac802154_scan_get_channel_time(request->interval, -- cgit v1.2.3 From 822452fb6c696bb2331649ce6fbb49e49261cc71 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Fri, 24 Mar 2023 12:05:57 +0100 Subject: net: ieee802154: Handle limited devices with only datagram support Some devices, like HardMAC ones can be a bit limited in the way they handle mac commands. In particular, they might just not support it at all and instead only be able to transmit and receive regular data packets. In this case, they cannot be used for any of the internal management commands that we have introduced so far and must be flagged accordingly. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20230324110558.90707-2-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- include/net/cfg802154.h | 3 +++ net/ieee802154/nl802154.c | 10 ++++++++++ 2 files changed, 13 insertions(+) (limited to 'net') diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 0c2778a836db..e00057984489 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -178,12 +178,15 @@ wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b) * setting. * @WPAN_PHY_FLAG_STATE_QUEUE_STOPPED: Indicates that the transmit queue was * temporarily stopped. + * @WPAN_PHY_FLAG_DATAGRAMS_ONLY: Indicates that transceiver is only able to + * send/receive datagrams. */ enum wpan_phy_flags { WPAN_PHY_FLAG_TXPOWER = BIT(1), WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2), WPAN_PHY_FLAG_CCA_MODE = BIT(3), WPAN_PHY_FLAG_STATE_QUEUE_STOPPED = BIT(4), + WPAN_PHY_FLAG_DATAGRAMS_ONLY = BIT(5), }; struct wpan_phy { diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index a20ac4bb48a4..cd688b51d977 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1417,6 +1417,11 @@ static int nl802154_trigger_scan(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + if (wpan_phy->flags & WPAN_PHY_FLAG_DATAGRAMS_ONLY) { + NL_SET_ERR_MSG(info->extack, "PHY only supports datagrams"); + return -EOPNOTSUPP; + } + request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return -ENOMEM; @@ -1584,6 +1589,11 @@ nl802154_send_beacons(struct sk_buff *skb, struct genl_info *info) return -EPERM; } + if (wpan_phy->flags & WPAN_PHY_FLAG_DATAGRAMS_ONLY) { + NL_SET_ERR_MSG(info->extack, "PHY only supports datagrams"); + return -EOPNOTSUPP; + } + request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return -ENOMEM; -- cgit v1.2.3 From 6ec7be9a2d2b09815f69fc2183ec31857eaa6e27 Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Sun, 23 Apr 2023 20:01:56 +0200 Subject: xsk: Use pool->dma_pages to check for DMA Compare pool->dma_pages instead of pool->dma_pages_cnt to check for an active DMA mapping. pool->dma_pages needs to be read anyway to access the map so this compiles to more efficient code. Signed-off-by: Kal Conley Signed-off-by: Daniel Borkmann Reviewed-by: Xuan Zhuo Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20230423180157.93559-1-kal.conley@dectris.com --- include/net/xsk_buff_pool.h | 2 +- net/xdp/xsk_buff_pool.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index d318c769b445..a8d7b8a3688a 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -180,7 +180,7 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, if (likely(!cross_pg)) return false; - return pool->dma_pages_cnt && + return pool->dma_pages && !(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK); } diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index b2df1e0f8153..26f6d304451e 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -350,7 +350,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) { struct xsk_dma_map *dma_map; - if (pool->dma_pages_cnt == 0) + if (!pool->dma_pages) return; dma_map = xp_find_dma_map(pool); @@ -364,6 +364,7 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) __xp_dma_unmap(dma_map, attrs); kvfree(pool->dma_pages); + pool->dma_pages = NULL; pool->dma_pages_cnt = 0; pool->dev = NULL; } @@ -503,7 +504,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) if (pool->unaligned) { xskb = pool->free_heads[--pool->free_heads_cnt]; xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages_cnt) + if (pool->dma_pages) xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); } else { xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; @@ -569,7 +570,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd if (pool->unaligned) { xskb = pool->free_heads[--pool->free_heads_cnt]; xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages_cnt) + if (pool->dma_pages) xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); } else { xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; -- cgit v1.2.3 From bf6882aebd0ea67558764d44ddeffc7d253c6eb8 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 2 May 2023 11:05:43 -0700 Subject: bpf: Emit struct bpf_tcp_sock type in vmlinux BTF In one of our internal testing, we found a case where - uapi struct bpf_tcp_sock is in vmlinux.h where vmlinux.h is not generated from the testing kernel - struct bpf_tcp_sock is not in vmlinux BTF The above combination caused bpf load failure as the following memory access struct bpf_tcp_sock *tcp_sock = ...; ... tcp_sock->snd_cwnd ... needs CORE relocation but the relocation cannot be resolved since the kernel BTF does not have corresponding type. Similar to other previous cases (nf_conn___init, tcp6_sock, mctcp_sock, etc.), add the type to vmlinux BTF with BTF_EMIT_TYPE macro. Signed-off-by: Yonghong Song Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230502180543.1832140-1-yhs@fb.com --- net/core/filter.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index d9ce04ca22ce..451b0ec7f242 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6916,6 +6916,8 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, FIELD)); \ } while (0) + BTF_TYPE_EMIT(struct bpf_tcp_sock); + switch (si->off) { case offsetof(struct bpf_tcp_sock, rtt_min): BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != -- cgit v1.2.3 From 559ae55cfc334c91c62d2ea054a3345611363ee0 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Tue, 9 May 2023 09:12:07 +0200 Subject: net: skbuff: remove special handling for SLOB Commit c9929f0e344a ("mm/slob: remove CONFIG_SLOB") removes CONFIG_SLOB. Now, we can also remove special handling for socket buffers with the SLOB allocator. The code with HAVE_SKB_SMALL_HEAD_CACHE=1 is now the default behavior for all allocators. Remove an unnecessary distinction between SLOB and SLAB/SLUB allocator after the SLOB allocator is gone. Signed-off-by: Lukas Bulwahn Reviewed-by: Leon Romanovsky Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230509071207.28942-1-lukas.bulwahn@gmail.com Signed-off-by: Jakub Kicinski --- net/core/skbuff.c | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26a586007d8b..a82b33b1555e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -92,15 +92,7 @@ static struct kmem_cache *skbuff_fclone_cache __ro_after_init; static struct kmem_cache *skbuff_ext_cache __ro_after_init; #endif -/* skb_small_head_cache and related code is only supported - * for CONFIG_SLAB and CONFIG_SLUB. - * As soon as SLOB is removed from the kernel, we can clean up this. - */ -#if !defined(CONFIG_SLOB) -# define HAVE_SKB_SMALL_HEAD_CACHE 1 -#endif -#ifdef HAVE_SKB_SMALL_HEAD_CACHE static struct kmem_cache *skb_small_head_cache __ro_after_init; #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) @@ -117,7 +109,6 @@ static struct kmem_cache *skb_small_head_cache __ro_after_init; #define SKB_SMALL_HEAD_HEADROOM \ SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) -#endif /* HAVE_SKB_SMALL_HEAD_CACHE */ int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; EXPORT_SYMBOL(sysctl_max_skb_frags); @@ -562,7 +553,6 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, void *obj; obj_size = SKB_HEAD_ALIGN(*size); -#ifdef HAVE_SKB_SMALL_HEAD_CACHE if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && !(flags & KMALLOC_NOT_NORMAL_BITS)) { obj = kmem_cache_alloc_node(skb_small_head_cache, @@ -576,7 +566,6 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node); goto out; } -#endif *size = obj_size = kmalloc_size_roundup(obj_size); /* * Try a regular allocation, when that fails and we're not entitled @@ -898,11 +887,9 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) static void skb_kfree_head(void *head, unsigned int end_offset) { -#ifdef HAVE_SKB_SMALL_HEAD_CACHE if (end_offset == SKB_SMALL_HEAD_HEADROOM) kmem_cache_free(skb_small_head_cache, head); else -#endif kfree(head); } @@ -2160,7 +2147,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) if (likely(skb_end_offset(skb) == saved_end_offset)) return 0; -#ifdef HAVE_SKB_SMALL_HEAD_CACHE /* We can not change skb->end if the original or new value * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head(). */ @@ -2174,7 +2160,6 @@ int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) WARN_ON_ONCE(1); return 0; } -#endif shinfo = skb_shinfo(skb); @@ -4768,7 +4753,6 @@ void __init skb_init(void) 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); -#ifdef HAVE_SKB_SMALL_HEAD_CACHE /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes. * struct skb_shared_info is located at the end of skb->head, * and should not be copied to/from user. @@ -4780,7 +4764,6 @@ void __init skb_init(void) 0, SKB_SMALL_HEAD_HEADROOM, NULL); -#endif skb_extensions_init(); } -- cgit v1.2.3 From 81ac2722fa198fbfac62575259cf85163f3eeef0 Mon Sep 17 00:00:00 2001 From: Martin Wetterwald Date: Mon, 8 May 2023 19:44:47 +0200 Subject: net: ipconfig: Allow DNS to be overwritten by DHCPACK Some DHCP server implementations only send the important requested DHCP options in the final BOOTP reply (DHCPACK). One example is systemd-networkd. However, RFC2131, in section 4.3.1 states: > The server MUST return to the client: > [...] > o Parameters requested by the client, according to the following > rules: > > -- IF the server has been explicitly configured with a default > value for the parameter, the server MUST include that value > in an appropriate option in the 'option' field, ELSE I've reported the issue here: https://github.com/systemd/systemd/issues/27471 Linux PNP DHCP client implementation only takes into account the DNS servers received in the first BOOTP reply (DHCPOFFER). This usually isn't an issue as servers are required to put the same values in the DHCPOFFER and DHCPACK. However, RFC2131, in section 4.3.2 states: > Any configuration parameters in the DHCPACK message SHOULD NOT > conflict with those in the earlier DHCPOFFER message to which the > client is responding. The client SHOULD use the parameters in the > DHCPACK message for configuration. When making Linux PNP DHCP client (cmdline ip=dhcp) interact with systemd-networkd DHCP server, an interesting "protocol misunderstanding" happens: Because DNS servers were only specified in the DHCPACK and not in the DHCPOFFER, Linux will not catch the correct DNS servers: in the first BOOTP reply (DHCPOFFER), it sees that there is no DNS, and sets as fallback the IP of the DHCP server itself. When the second BOOTP reply comes (DHCPACK), it's already too late: the kernel will not overwrite the fallback setting it has set previously. This patch makes the kernel overwrite its DNS fallback by DNS servers specified in the DHCPACK if any. Signed-off-by: Martin Wetterwald Signed-off-by: David S. Miller --- net/ipv4/ipconfig.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index e90bc0aa85c7..202fa1943ccd 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -173,6 +173,9 @@ static int ic_proto_have_if __initdata; /* MTU for boot device */ static int ic_dev_mtu __initdata; +/* DHCPACK can overwrite DNS if fallback was set upon first BOOTP reply */ +static int ic_nameservers_fallback __initdata; + #ifdef IPCONFIG_DYNAMIC static DEFINE_SPINLOCK(ic_recv_lock); static volatile int ic_got_reply __initdata; /* Proto(s) that replied */ @@ -938,7 +941,8 @@ static void __init ic_do_bootp_ext(u8 *ext) if (servers > CONF_NAMESERVERS_MAX) servers = CONF_NAMESERVERS_MAX; for (i = 0; i < servers; i++) { - if (ic_nameservers[i] == NONE) + if (ic_nameservers[i] == NONE || + ic_nameservers_fallback) memcpy(&ic_nameservers[i], ext+1+4*i, 4); } break; @@ -1158,8 +1162,10 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str ic_addrservaddr = b->iph.saddr; if (ic_gateway == NONE && b->relay_ip) ic_gateway = b->relay_ip; - if (ic_nameservers[0] == NONE) + if (ic_nameservers[0] == NONE) { ic_nameservers[0] = ic_servaddr; + ic_nameservers_fallback = 1; + } ic_got_reply = IC_BOOTP; drop_unlock: -- cgit v1.2.3 From 059fa492027e99c167f4c53822c0900ca9bc254a Mon Sep 17 00:00:00 2001 From: "Ilia.Gavrilov" Date: Wed, 10 May 2023 09:23:40 +0000 Subject: sctp: fix a potential OOB access in sctp_sched_set_sched() The 'sched' index value must be checked before accessing an element of the 'sctp_sched_ops' array. Otherwise, it can lead to OOB access. Note that it's harmless since the 'sched' parameter is checked before calling 'sctp_sched_set_sched'. Found by InfoTeCS on behalf of Linux Verification Center (linuxtesting.org) with SVACE. Acked-by: Marcelo Ricardo Leitner Reviewed-by: Xin Long Reviewed-by: Simon Horman Signed-off-by: Ilia.Gavrilov Signed-off-by: David S. Miller --- net/sctp/stream_sched.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c index e843760e9aaa..54afbe4fb087 100644 --- a/net/sctp/stream_sched.c +++ b/net/sctp/stream_sched.c @@ -148,18 +148,19 @@ static void sctp_sched_free_sched(struct sctp_stream *stream) int sctp_sched_set_sched(struct sctp_association *asoc, enum sctp_sched_type sched) { - struct sctp_sched_ops *n = sctp_sched_ops[sched]; struct sctp_sched_ops *old = asoc->outqueue.sched; struct sctp_datamsg *msg = NULL; + struct sctp_sched_ops *n; struct sctp_chunk *ch; int i, ret = 0; - if (old == n) - return ret; - if (sched > SCTP_SS_MAX) return -EINVAL; + n = sctp_sched_ops[sched]; + if (old == n) + return ret; + if (old) sctp_sched_free_sched(&asoc->stream); -- cgit v1.2.3 From ccce324dabfe2143519daf50ed8b1ef1d0c542f7 Mon Sep 17 00:00:00 2001 From: David Morley Date: Tue, 9 May 2023 18:05:58 +0000 Subject: tcp: make the first N SYN RTO backoffs linear Currently the SYN RTO schedule follows an exponential backoff scheme, which can be unnecessarily conservative in cases where there are link failures. In such cases, it's better to aggressively try to retransmit packets, so it takes routers less time to find a repath with a working link. We chose a default value for this sysctl of 4, to follow the macOS and IOS backoff scheme of 1,1,1,1,1,2,4,8, ... MacOS and IOS have used this backoff schedule for over a decade, since before this 2009 IETF presentation discussed the behavior: https://www.ietf.org/proceedings/75/slides/tcpm-1.pdf This commit makes the SYN RTO schedule start with a number of linear backoffs given by the following sysctl: * tcp_syn_linear_timeouts This changes the SYN RTO scheme to be: init_rto_val for tcp_syn_linear_timeouts, exp backoff starting at init_rto_val For example if init_rto_val = 1 and tcp_syn_linear_timeouts = 2, our backoff scheme would be: 1, 1, 1, 2, 4, 8, 16, ... Signed-off-by: David Morley Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Tested-by: David Morley Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230509180558.2541885-1-morleyd.kernel@gmail.com Signed-off-by: Paolo Abeni --- Documentation/networking/ip-sysctl.rst | 17 ++++++++++++++--- include/net/netns/ipv4.h | 1 + net/ipv4/sysctl_net_ipv4.c | 10 ++++++++++ net/ipv4/tcp_ipv4.c | 1 + net/ipv4/tcp_timer.c | 17 +++++++++++++---- 5 files changed, 39 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 6ec06a33688a..3f6d3d5f5626 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -881,9 +881,10 @@ tcp_fastopen_key - list of comma separated 32-digit hexadecimal INTEGERs tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt will be retransmitted. Should not be higher than 127. Default value - is 6, which corresponds to 63seconds till the last retransmission - with the current initial RTO of 1second. With this the final timeout - for an active TCP connection attempt will happen after 127seconds. + is 6, which corresponds to 67seconds (with tcp_syn_linear_timeouts = 4) + till the last retransmission with the current initial RTO of 1second. + With this the final timeout for an active TCP connection attempt + will happen after 131seconds. tcp_timestamps - INTEGER Enable timestamps as defined in RFC1323. @@ -946,6 +947,16 @@ tcp_pacing_ca_ratio - INTEGER Default: 120 +tcp_syn_linear_timeouts - INTEGER + The number of times for an active TCP connection to retransmit SYNs with + a linear backoff timeout before defaulting to an exponential backoff + timeout. This has no effect on SYNACK at the passive TCP side. + + With an initial RTO of 1 and tcp_syn_linear_timeouts = 4 we would + expect SYN RTOs to be: 1, 1, 1, 1, 1, 2, 4, ... (4 linear timeouts, + and the first exponential backoff using 2^0 * initial_RTO). + Default: 4 + tcp_tso_win_divisor - INTEGER This allows control over what percentage of the congestion window can be consumed by a single TSO frame. diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index db762e35aca9..a4efb7a2796c 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -194,6 +194,7 @@ struct netns_ipv4 { int sysctl_udp_rmem_min; u8 sysctl_fib_notify_on_flag_change; + u8 sysctl_tcp_syn_linear_timeouts; #ifdef CONFIG_NET_L3_MASTER_DEV u8 sysctl_udp_l3mdev_accept; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 40fe70fc2015..6ae3345a3bdf 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -34,6 +34,7 @@ static int ip_ttl_min = 1; static int ip_ttl_max = 255; static int tcp_syn_retries_min = 1; static int tcp_syn_retries_max = MAX_TCP_SYNCNT; +static int tcp_syn_linear_timeouts_max = MAX_TCP_SYNCNT; static int ip_ping_group_range_min[] = { 0, 0 }; static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; static u32 u32_max_div_HZ = UINT_MAX / HZ; @@ -1470,6 +1471,15 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = &tcp_plb_max_cong_thresh, }, + { + .procname = "tcp_syn_linear_timeouts", + .data = &init_net.ipv4.sysctl_tcp_syn_linear_timeouts, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &tcp_syn_linear_timeouts_max, + }, { } }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 39bda2b1066e..db24ed8f8509 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3275,6 +3275,7 @@ static int __net_init tcp_sk_init(struct net *net) else net->ipv4.tcp_congestion_control = &tcp_reno; + net->ipv4.sysctl_tcp_syn_linear_timeouts = 4; return 0; } diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b839c2f91292..0d93a2573807 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -234,14 +234,19 @@ static int tcp_write_timeout(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); bool expired = false, do_reset; - int retry_until; + int retry_until, max_retransmits; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if (icsk->icsk_retransmits) __dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); - expired = icsk->icsk_retransmits >= retry_until; + + max_retransmits = retry_until; + if (sk->sk_state == TCP_SYN_SENT) + max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts); + + expired = icsk->icsk_retransmits >= max_retransmits; } else { if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { /* Black hole detection */ @@ -577,8 +582,12 @@ out_reset_timer: icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); - } else { - /* Use normal (exponential) backoff */ + } else if (sk->sk_state != TCP_SYN_SENT || + icsk->icsk_backoff > + READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) { + /* Use normal (exponential) backoff unless linear timeouts are + * activated. + */ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, -- cgit v1.2.3 From b16d76fe9a27d337c16972a71d959bcf0c96c2e6 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:47:09 -0400 Subject: net/handshake: Remove unneeded check from handshake_dup() handshake_req_submit() now verifies that the socket has a file. Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: David S. Miller --- net/handshake/netlink.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 35c9c445e0b8..7ec8a76c3c8a 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -99,9 +99,6 @@ static int handshake_dup(struct socket *sock) struct file *file; int newfd; - if (!sock->file) - return -EBADF; - file = get_file(sock->file); newfd = get_unused_fd_flags(O_CLOEXEC); if (newfd < 0) { -- cgit v1.2.3 From 2200c1a87074548f0a36e5aae5ad283ce2ac43b2 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:47:40 -0400 Subject: net/handshake: Fix handshake_dup() ref counting If get_unused_fd_flags() fails, we ended up calling fput(sock->file) twice. Reported-by: Dan Carpenter Suggested-by: Paolo Abeni Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Signed-off-by: Chuck Lever Signed-off-by: David S. Miller --- net/handshake/netlink.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 7ec8a76c3c8a..f5dc170689d9 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -139,15 +139,16 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info) goto out_complete; } err = req->hr_proto->hp_accept(req, info, fd); - if (err) + if (err) { + fput(sock->file); goto out_complete; + } trace_handshake_cmd_accept(net, req, req->hr_sk, fd); return 0; out_complete: handshake_complete(req, -EIO, NULL); - fput(sock->file); out_status: trace_handshake_cmd_accept_err(net, req, NULL, err); return err; -- cgit v1.2.3 From 7301034026d0efe0e86af0cb685405da120583d4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:48:13 -0400 Subject: net/handshake: Fix uninitialized local variable trace_handshake_cmd_done_err() simply records the pointer in @req, so initializing it to NULL is sufficient and safe. Reported-by: Dan Carpenter Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: David S. Miller --- net/handshake/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index f5dc170689d9..16a4bde648ba 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -157,8 +157,8 @@ out_status: int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info) { struct net *net = sock_net(skb->sk); + struct handshake_req *req = NULL; struct socket *sock = NULL; - struct handshake_req *req; int fd, status, err; if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD)) -- cgit v1.2.3 From e36a93e1723eb09c8393604ddc8ef2966f592597 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:48:45 -0400 Subject: net/handshake: handshake_genl_notify() shouldn't ignore @flags Reported-by: Dan Carpenter Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: David S. Miller --- net/handshake/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c index 16a4bde648ba..1086653e1fad 100644 --- a/net/handshake/netlink.c +++ b/net/handshake/netlink.c @@ -48,7 +48,7 @@ int handshake_genl_notify(struct net *net, const struct handshake_proto *proto, proto->hp_handler_class)) return -ESRCH; - msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); + msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags); if (!msg) return -ENOMEM; -- cgit v1.2.3 From f921bd41001ccff2249f5f443f2917f7ef937daf Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:49:17 -0400 Subject: net/handshake: Unpin sock->file if a handshake is cancelled If user space never calls DONE, sock->file's reference count remains elevated. Enable sock->file to be freed eventually in this case. Reported-by: Jakub Kacinski Fixes: 3b3009ea8abb ("net/handshake: Create a NETLINK service for handling handshake requests") Signed-off-by: Chuck Lever Signed-off-by: David S. Miller --- net/handshake/handshake.h | 1 + net/handshake/request.c | 4 ++++ 2 files changed, 5 insertions(+) (limited to 'net') diff --git a/net/handshake/handshake.h b/net/handshake/handshake.h index 4dac965c99df..8aeaadca844f 100644 --- a/net/handshake/handshake.h +++ b/net/handshake/handshake.h @@ -31,6 +31,7 @@ struct handshake_req { struct list_head hr_list; struct rhash_head hr_rhash; unsigned long hr_flags; + struct file *hr_file; const struct handshake_proto *hr_proto; struct sock *hr_sk; void (*hr_odestruct)(struct sock *sk); diff --git a/net/handshake/request.c b/net/handshake/request.c index 94d5cef3e048..d78d41abb3d9 100644 --- a/net/handshake/request.c +++ b/net/handshake/request.c @@ -239,6 +239,7 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req, } req->hr_odestruct = req->hr_sk->sk_destruct; req->hr_sk->sk_destruct = handshake_sk_destruct; + req->hr_file = sock->file; ret = -EOPNOTSUPP; net = sock_net(req->hr_sk); @@ -334,6 +335,9 @@ bool handshake_req_cancel(struct sock *sk) return false; } + /* Request accepted and waiting for DONE */ + fput(req->hr_file); + out_true: trace_handshake_cancel(net, req, sk); -- cgit v1.2.3 From eefca7ec514262aef08d0ef261552f2f604bd851 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 11 May 2023 11:49:50 -0400 Subject: net/handshake: Enable the SNI extension to work properly Enable the upper layer protocol to specify the SNI peername. This avoids the need for tlshd to use a DNS lookup, which can return a hostname that doesn't match the incoming certificate's SubjectName. Fixes: 2fd5532044a8 ("net/handshake: Add a kernel API for requesting a TLSv1.3 handshake") Reviewed-by: Simon Horman Signed-off-by: Chuck Lever Signed-off-by: David S. Miller --- Documentation/netlink/specs/handshake.yaml | 4 ++++ Documentation/networking/tls-handshake.rst | 5 +++++ include/net/handshake.h | 1 + include/uapi/linux/handshake.h | 1 + net/handshake/tlshd.c | 8 ++++++++ 5 files changed, 19 insertions(+) (limited to 'net') diff --git a/Documentation/netlink/specs/handshake.yaml b/Documentation/netlink/specs/handshake.yaml index 614f1a585511..6d89e30f5fd5 100644 --- a/Documentation/netlink/specs/handshake.yaml +++ b/Documentation/netlink/specs/handshake.yaml @@ -68,6 +68,9 @@ attribute-sets: type: nest nested-attributes: x509 multi-attr: true + - + name: peername + type: string - name: done attributes: @@ -105,6 +108,7 @@ operations: - auth-mode - peer-identity - certificate + - peername - name: done doc: Handler reports handshake completion diff --git a/Documentation/networking/tls-handshake.rst b/Documentation/networking/tls-handshake.rst index a2817a88e905..6f5ea1646a47 100644 --- a/Documentation/networking/tls-handshake.rst +++ b/Documentation/networking/tls-handshake.rst @@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request: struct socket *ta_sock; tls_done_func_t ta_done; void *ta_data; + const char *ta_peername; unsigned int ta_timeout_ms; key_serial_t ta_keyring; key_serial_t ta_my_cert; @@ -71,6 +72,10 @@ instantiated a struct file in sock->file. has completed. Further explanation of this function is in the "Handshake Completion" sesction below. +The consumer can provide a NUL-terminated hostname in the @ta_peername +field that is sent as part of ClientHello. If no peername is provided, +the DNS hostname associated with the server's IP address is used instead. + The consumer can fill in the @ta_timeout_ms field to force the servicing handshake agent to exit after a number of milliseconds. This enables the socket to be fully closed once both the kernel and the handshake agent diff --git a/include/net/handshake.h b/include/net/handshake.h index 3352b1ab43b3..2e26e436e85f 100644 --- a/include/net/handshake.h +++ b/include/net/handshake.h @@ -24,6 +24,7 @@ struct tls_handshake_args { struct socket *ta_sock; tls_done_func_t ta_done; void *ta_data; + const char *ta_peername; unsigned int ta_timeout_ms; key_serial_t ta_keyring; key_serial_t ta_my_cert; diff --git a/include/uapi/linux/handshake.h b/include/uapi/linux/handshake.h index 1de4d0b95325..3d7ea58778c9 100644 --- a/include/uapi/linux/handshake.h +++ b/include/uapi/linux/handshake.h @@ -44,6 +44,7 @@ enum { HANDSHAKE_A_ACCEPT_AUTH_MODE, HANDSHAKE_A_ACCEPT_PEER_IDENTITY, HANDSHAKE_A_ACCEPT_CERTIFICATE, + HANDSHAKE_A_ACCEPT_PEERNAME, __HANDSHAKE_A_ACCEPT_MAX, HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1) diff --git a/net/handshake/tlshd.c b/net/handshake/tlshd.c index fcbeb63b4eb1..b735f5cced2f 100644 --- a/net/handshake/tlshd.c +++ b/net/handshake/tlshd.c @@ -31,6 +31,7 @@ struct tls_handshake_req { int th_type; unsigned int th_timeout_ms; int th_auth_mode; + const char *th_peername; key_serial_t th_keyring; key_serial_t th_certificate; key_serial_t th_privkey; @@ -48,6 +49,7 @@ tls_handshake_req_init(struct handshake_req *req, treq->th_timeout_ms = args->ta_timeout_ms; treq->th_consumer_done = args->ta_done; treq->th_consumer_data = args->ta_data; + treq->th_peername = args->ta_peername; treq->th_keyring = args->ta_keyring; treq->th_num_peerids = 0; treq->th_certificate = TLS_NO_CERT; @@ -214,6 +216,12 @@ static int tls_handshake_accept(struct handshake_req *req, ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type); if (ret < 0) goto out_cancel; + if (treq->th_peername) { + ret = nla_put_string(msg, HANDSHAKE_A_ACCEPT_PEERNAME, + treq->th_peername); + if (ret < 0) + goto out_cancel; + } if (treq->th_timeout_ms) { ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms); if (ret < 0) -- cgit v1.2.3 From 2598619e012cee5273a2821441b9a051ad931249 Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Thu, 11 May 2023 15:25:06 +0200 Subject: sctp: add bpf_bypass_getsockopt proto callback Implement ->bpf_bypass_getsockopt proto callback and filter out SCTP_SOCKOPT_PEELOFF, SCTP_SOCKOPT_PEELOFF_FLAGS and SCTP_SOCKOPT_CONNECTX3 socket options from running eBPF hook on them. SCTP_SOCKOPT_PEELOFF and SCTP_SOCKOPT_PEELOFF_FLAGS options do fd_install(), and if BPF_CGROUP_RUN_PROG_GETSOCKOPT hook returns an error after success of the original handler sctp_getsockopt(...), userspace will receive an error from getsockopt syscall and will be not aware that fd was successfully installed into a fdtable. As pointed by Marcelo Ricardo Leitner it seems reasonable to skip bpf getsockopt hook for SCTP_SOCKOPT_CONNECTX3 sockopt too. Because internaly, it triggers connect() and if error is masked then userspace will be confused. This patch was born as a result of discussion around a new SCM_PIDFD interface: https://lore.kernel.org/all/20230413133355.350571-3-aleksandr.mikhalitsyn@canonical.com/ Fixes: 0d01da6afc54 ("bpf: implement getsockopt and setsockopt hooks") Cc: Daniel Borkmann Cc: Christian Brauner Cc: Stanislav Fomichev Cc: Neil Horman Cc: Marcelo Ricardo Leitner Cc: Xin Long Cc: linux-sctp@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: netdev@vger.kernel.org Suggested-by: Stanislav Fomichev Acked-by: Stanislav Fomichev Signed-off-by: Alexander Mikhalitsyn Acked-by: Xin Long Signed-off-by: David S. Miller --- net/sctp/socket.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'net') diff --git a/net/sctp/socket.c b/net/sctp/socket.c index cda8c2874691..a68e1d541b12 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -8281,6 +8281,22 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, return retval; } +static bool sctp_bpf_bypass_getsockopt(int level, int optname) +{ + if (level == SOL_SCTP) { + switch (optname) { + case SCTP_SOCKOPT_PEELOFF: + case SCTP_SOCKOPT_PEELOFF_FLAGS: + case SCTP_SOCKOPT_CONNECTX3: + return true; + default: + return false; + } + } + + return false; +} + static int sctp_hash(struct sock *sk) { /* STUB */ @@ -9650,6 +9666,7 @@ struct proto sctp_prot = { .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, + .bpf_bypass_getsockopt = sctp_bpf_bypass_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, @@ -9705,6 +9722,7 @@ struct proto sctpv6_prot = { .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, + .bpf_bypass_getsockopt = sctp_bpf_bypass_getsockopt, .sendmsg = sctp_sendmsg, .recvmsg = sctp_recvmsg, .bind = sctp_bind, -- cgit v1.2.3 From b51f4113ebb02011f0ca86abc3134b28d2071b6a Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Thu, 11 May 2023 09:12:12 +0800 Subject: net: introduce and use skb_frag_fill_page_desc() Most users use __skb_frag_set_page()/skb_frag_off_set()/ skb_frag_size_set() to fill the page desc for a skb frag. Introduce skb_frag_fill_page_desc() to do that. net/bpf/test_run.c does not call skb_frag_off_set() to set the offset, "copy_from_user(page_address(page), ...)" and 'shinfo' being part of the 'data' kzalloced in bpf_test_init() suggest that it is assuming offset to be initialized as zero, so call skb_frag_fill_page_desc() with offset being zero for this case. Also, skb_frag_set_page() is not used anymore, so remove it. Signed-off-by: Yunsheng Lin Reviewed-by: Leon Romanovsky Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 6 ++--- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 ++-- drivers/net/ethernet/chelsio/cxgb3/sge.c | 5 ++-- drivers/net/ethernet/emulex/benet/be_main.c | 32 +++++++++++++----------- drivers/net/ethernet/freescale/enetc/enetc.c | 5 ++-- drivers/net/ethernet/fungible/funeth/funeth_rx.c | 5 ++-- drivers/net/ethernet/marvell/mvneta.c | 5 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 +-- drivers/net/ethernet/sun/cassini.c | 8 ++---- drivers/net/virtio_net.c | 4 +-- drivers/net/vmxnet3/vmxnet3_drv.c | 4 +-- drivers/net/xen-netback/netback.c | 4 +-- include/linux/skbuff.h | 27 ++++++++------------ net/bpf/test_run.c | 3 +-- net/core/gro.c | 4 +-- net/core/pktgen.c | 13 ++++++---- net/core/skbuff.c | 7 +++--- net/tls/tls_device.c | 10 +++----- net/xfrm/xfrm_ipcomp.c | 5 +--- 19 files changed, 64 insertions(+), 92 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 7f933175cbda..4de22eed099a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -532,10 +532,10 @@ static bool aq_add_rx_fragment(struct device *dev, buff_->rxdata.pg_off, buff_->len, DMA_FROM_DEVICE); - skb_frag_off_set(frag, buff_->rxdata.pg_off); - skb_frag_size_set(frag, buff_->len); sinfo->xdp_frags_size += buff_->len; - __skb_frag_set_page(frag, buff_->rxdata.page); + skb_frag_fill_page_desc(frag, buff_->rxdata.page, + buff_->rxdata.pg_off, + buff_->len); buff_->is_cleaned = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dcd9367f05af..efaff5018af8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1085,9 +1085,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; cons_rx_buf = &rxr->rx_agg_ring[cons]; - skb_frag_off_set(frag, cons_rx_buf->offset); - skb_frag_size_set(frag, frag_len); - __skb_frag_set_page(frag, cons_rx_buf->page); + skb_frag_fill_page_desc(frag, cons_rx_buf->page, + cons_rx_buf->offset, frag_len); shinfo->nr_frags = i + 1; __clear_bit(cons, rxr->rx_agg_bmap); diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index efa7f401529e..2e9a74fe0970 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2184,9 +2184,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, len -= offset; rx_frag += nr_frags; - __skb_frag_set_page(rx_frag, sd->pg_chunk.page); - skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset); - skb_frag_size_set(rx_frag, len); + skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page, + sd->pg_chunk.offset + offset, len); skb->len += len; skb->data_len += len; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7e408bcc88de..3164ed205cf7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2343,11 +2343,10 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, hdr_len = ETH_HLEN; memcpy(skb->data, start, hdr_len); skb_shinfo(skb)->nr_frags = 1; - skb_frag_set_page(skb, 0, page_info->page); - skb_frag_off_set(&skb_shinfo(skb)->frags[0], - page_info->page_offset + hdr_len); - skb_frag_size_set(&skb_shinfo(skb)->frags[0], - curr_frag_len - hdr_len); + skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0], + page_info->page, + page_info->page_offset + hdr_len, + curr_frag_len - hdr_len); skb->data_len = curr_frag_len - hdr_len; skb->truesize += rx_frag_size; skb->tail += hdr_len; @@ -2369,16 +2368,17 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, if (page_info->page_offset == 0) { /* Fresh page */ j++; - skb_frag_set_page(skb, j, page_info->page); - skb_frag_off_set(&skb_shinfo(skb)->frags[j], - page_info->page_offset); - skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); + skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j], + page_info->page, + page_info->page_offset, + curr_frag_len); skb_shinfo(skb)->nr_frags++; } else { put_page(page_info->page); + skb_frag_size_add(&skb_shinfo(skb)->frags[j], + curr_frag_len); } - skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); skb->len += curr_frag_len; skb->data_len += curr_frag_len; skb->truesize += rx_frag_size; @@ -2451,14 +2451,16 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, if (i == 0 || page_info->page_offset == 0) { /* First frag or Fresh page */ j++; - skb_frag_set_page(skb, j, page_info->page); - skb_frag_off_set(&skb_shinfo(skb)->frags[j], - page_info->page_offset); - skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); + skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j], + page_info->page, + page_info->page_offset, + curr_frag_len); } else { put_page(page_info->page); + skb_frag_size_add(&skb_shinfo(skb)->frags[j], + curr_frag_len); } - skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); + skb->truesize += rx_frag_size; remaining -= curr_frag_len; memset(page_info, 0, sizeof(*page_info)); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 3c4fa26f0f9b..63854294ac33 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1445,9 +1445,8 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i, xdp_buff_set_frag_pfmemalloc(xdp_buff); frag = &shinfo->frags[shinfo->nr_frags]; - skb_frag_off_set(frag, rx_swbd->page_offset); - skb_frag_size_set(frag, size); - __skb_frag_set_page(frag, rx_swbd->page); + skb_frag_fill_page_desc(frag, rx_swbd->page, rx_swbd->page_offset, + size); shinfo->nr_frags++; } diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c index 29a6c2ede43a..7e2584895de3 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_rx.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c @@ -323,9 +323,8 @@ static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len, if (ref_ok) ref_ok |= buf->node; - __skb_frag_set_page(frags, buf->page); - skb_frag_off_set(frags, q->buf_offset); - skb_frag_size_set(frags++, frag_len); + skb_frag_fill_page_desc(frags++, buf->page, q->buf_offset, + frag_len); tot_len -= frag_len; if (!tot_len) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 6c6b66d3ea6e..e2abc00d0472 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2376,9 +2376,8 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++]; - skb_frag_off_set(frag, pp->rx_offset_correction); - skb_frag_size_set(frag, data_len); - __skb_frag_set_page(frag, page); + skb_frag_fill_page_desc(frag, page, + pp->rx_offset_correction, data_len); if (!xdp_buff_has_frags(xdp)) { sinfo->xdp_frags_size = *size; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 69634829558e..704b022cd1f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -491,9 +491,7 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf } frag = &sinfo->frags[sinfo->nr_frags++]; - __skb_frag_set_page(frag, frag_page->page); - skb_frag_off_set(frag, frag_offset); - skb_frag_size_set(frag, len); + skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len); if (page_is_pfmemalloc(frag_page->page)) xdp_buff_set_frag_pfmemalloc(xdp); diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 4ef05bad4613..2d52f54ebb45 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1998,10 +1998,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, skb->truesize += hlen - swivel; skb->len += hlen - swivel; - __skb_frag_set_page(frag, page->buffer); + skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel); __skb_frag_ref(frag); - skb_frag_off_set(frag, off); - skb_frag_size_set(frag, hlen - swivel); /* any more data? */ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { @@ -2024,10 +2022,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, skb->len += hlen; frag++; - __skb_frag_set_page(frag, page->buffer); + skb_frag_fill_page_desc(frag, page->buffer, 0, hlen); __skb_frag_ref(frag); - skb_frag_off_set(frag, 0); - skb_frag_size_set(frag, hlen); RX_USED_ADD(page, hlen + cp->crc_size); } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 97241006d64a..29eccc8ff41f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1272,9 +1272,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, } frag = &shinfo->frags[shinfo->nr_frags++]; - __skb_frag_set_page(frag, page); - skb_frag_off_set(frag, offset); - skb_frag_size_set(frag, len); + skb_frag_fill_page_desc(frag, page, offset, len); if (page_is_pfmemalloc(page)) xdp_buff_set_frag_pfmemalloc(xdp); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index f2b76ee866a4..7fa74b8b2100 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -686,9 +686,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - __skb_frag_set_page(frag, rbi->page); - skb_frag_off_set(frag, 0); - skb_frag_size_set(frag, rcd->len); + skb_frag_fill_page_desc(frag, rbi->page, 0, rcd->len); skb->data_len += rcd->len; skb->truesize += PAGE_SIZE; skb_shinfo(skb)->nr_frags++; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c1501f41e2d8..3d79b35eb577 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1128,9 +1128,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s BUG(); offset += len; - __skb_frag_set_page(&frags[i], page); - skb_frag_off_set(&frags[i], 0); - skb_frag_size_set(&frags[i], len); + skb_frag_fill_page_desc(&frags[i], page, 0, len); } /* Release all the original (foreign) frags. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 738776ab8838..30be21c7d05f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2411,6 +2411,15 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb) return skb_headlen(skb) + __skb_pagelen(skb); } +static inline void skb_frag_fill_page_desc(skb_frag_t *frag, + struct page *page, + int off, int size) +{ + frag->bv_page = page; + frag->bv_offset = off; + skb_frag_size_set(frag, size); +} + static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, int i, struct page *page, int off, int size) @@ -2422,9 +2431,7 @@ static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->bv_page = page; - frag->bv_offset = off; - skb_frag_size_set(frag, size); + skb_frag_fill_page_desc(frag, page, off, size); } /** @@ -3496,20 +3503,6 @@ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) frag->bv_page = page; } -/** - * skb_frag_set_page - sets the page contained in a paged fragment of an skb - * @skb: the buffer - * @f: the fragment offset - * @page: the page to set - * - * Sets the @f'th fragment of @skb to contain @page. - */ -static inline void skb_frag_set_page(struct sk_buff *skb, int f, - struct page *page) -{ - __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); -} - bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio); /** diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index e79e3a415ca9..98143b86a9dd 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -1415,11 +1415,10 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, } frag = &sinfo->frags[sinfo->nr_frags++]; - __skb_frag_set_page(frag, page); data_len = min_t(u32, kattr->test.data_size_in - size, PAGE_SIZE); - skb_frag_size_set(frag, data_len); + skb_frag_fill_page_desc(frag, page, 0, data_len); if (copy_from_user(page_address(page), data_in + size, data_len)) { diff --git a/net/core/gro.c b/net/core/gro.c index 2d84165cb4f1..6783a47a6136 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -239,9 +239,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; - __skb_frag_set_page(frag, page); - skb_frag_off_set(frag, first_offset); - skb_frag_size_set(frag, first_size); + skb_frag_fill_page_desc(frag, page, first_offset, first_size); memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); /* We dont need to clear skbinfo->nr_frags here */ diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 760238196db1..f56b8d697014 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2785,14 +2785,17 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb, break; } get_page(pkt_dev->page); - skb_frag_set_page(skb, i, pkt_dev->page); - skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0); + /*last fragment, fill rest of data*/ if (i == (frags - 1)) - skb_frag_size_set(&skb_shinfo(skb)->frags[i], - (datalen < PAGE_SIZE ? datalen : PAGE_SIZE)); + skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], + pkt_dev->page, 0, + (datalen < PAGE_SIZE ? + datalen : PAGE_SIZE)); else - skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len); + skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], + pkt_dev->page, 0, frag_len); + datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]); skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]); skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 01b48e68aca0..6724a84ebb09 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4234,10 +4234,9 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb) struct page *page; page = virt_to_head_page(frag_skb->head); - __skb_frag_set_page(&head_frag, page); - skb_frag_off_set(&head_frag, frag_skb->data - - (unsigned char *)page_address(page)); - skb_frag_size_set(&head_frag, skb_headlen(frag_skb)); + skb_frag_fill_page_desc(&head_frag, page, frag_skb->data - + (unsigned char *)page_address(page), + skb_headlen(frag_skb)); return head_frag; } diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a7cc4f9faac2..daeff54bdbfa 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -268,9 +268,8 @@ static void tls_append_frag(struct tls_record_info *record, skb_frag_size_add(frag, size); } else { ++frag; - __skb_frag_set_page(frag, pfrag->page); - skb_frag_off_set(frag, pfrag->offset); - skb_frag_size_set(frag, size); + skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset, + size); ++record->num_frags; get_page(pfrag->page); } @@ -357,9 +356,8 @@ static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, return -ENOMEM; frag = &record->frags[0]; - __skb_frag_set_page(frag, pfrag->page); - skb_frag_off_set(frag, pfrag->offset); - skb_frag_size_set(frag, prepend_size); + skb_frag_fill_page_desc(frag, pfrag->page, pfrag->offset, + prepend_size); get_page(pfrag->page); pfrag->offset += prepend_size; diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 80143360bf09..9c0fa0e1786a 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -74,14 +74,11 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) if (!page) return -ENOMEM; - __skb_frag_set_page(frag, page); - len = PAGE_SIZE; if (dlen < len) len = dlen; - skb_frag_off_set(frag, 0); - skb_frag_size_set(frag, len); + skb_frag_fill_page_desc(frag, page, 0, len); memcpy(skb_frag_address(frag), scratch, len); skb->truesize += len; -- cgit v1.2.3 From f1b5dfe63f6a9eb17948cbaee4da4b66f51ac794 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 10 May 2023 14:54:43 -0700 Subject: ping: Convert hlist_nulls to plain hlist. Since introduced in commit c319b4d76b9e ("net: ipv4: add IPPROTO_ICMP socket kind"), ping socket does not use SLAB_TYPESAFE_BY_RCU nor check nulls marker in loops. Signed-off-by: Kuniyuki Iwashima Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/ipv4/ping.c | 41 +++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 26 deletions(-) (limited to 'net') diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 5178a3f3cb53..3793c81bda8a 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -49,13 +49,8 @@ #include #endif -#define ping_portaddr_for_each_entry(__sk, node, list) \ - hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node) -#define ping_portaddr_for_each_entry_rcu(__sk, node, list) \ - hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node) - struct ping_table { - struct hlist_nulls_head hash[PING_HTABLE_SIZE]; + struct hlist_head hash[PING_HTABLE_SIZE]; spinlock_t lock; }; @@ -74,17 +69,16 @@ static inline u32 ping_hashfn(const struct net *net, u32 num, u32 mask) } EXPORT_SYMBOL_GPL(ping_hash); -static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, - struct net *net, unsigned int num) +static inline struct hlist_head *ping_hashslot(struct ping_table *table, + struct net *net, unsigned int num) { return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } int ping_get_port(struct sock *sk, unsigned short ident) { - struct hlist_nulls_node *node; - struct hlist_nulls_head *hlist; struct inet_sock *isk, *isk2; + struct hlist_head *hlist; struct sock *sk2 = NULL; isk = inet_sk(sk); @@ -98,7 +92,7 @@ int ping_get_port(struct sock *sk, unsigned short ident) result++; /* avoid zero */ hlist = ping_hashslot(&ping_table, sock_net(sk), result); - ping_portaddr_for_each_entry(sk2, node, hlist) { + sk_for_each(sk2, hlist) { isk2 = inet_sk(sk2); if (isk2->inet_num == result) @@ -115,7 +109,7 @@ next_port: goto fail; } else { hlist = ping_hashslot(&ping_table, sock_net(sk), ident); - ping_portaddr_for_each_entry(sk2, node, hlist) { + sk_for_each(sk2, hlist) { isk2 = inet_sk(sk2); /* BUG? Why is this reuse and not reuseaddr? ping.c @@ -133,9 +127,8 @@ next_port: isk->inet_num = ident; if (sk_unhashed(sk)) { pr_debug("was not hashed\n"); - sock_hold(sk); + sk_add_node_rcu(sk, hlist); sock_set_flag(sk, SOCK_RCU_FREE); - hlist_nulls_add_head_rcu(&sk->sk_nulls_node, hlist); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } spin_unlock(&ping_table.lock); @@ -161,9 +154,7 @@ void ping_unhash(struct sock *sk) pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); spin_lock(&ping_table.lock); - if (sk_hashed(sk)) { - hlist_nulls_del_init_rcu(&sk->sk_nulls_node); - sock_put(sk); + if (sk_del_node_init_rcu(sk)) { isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); @@ -175,10 +166,9 @@ EXPORT_SYMBOL_GPL(ping_unhash); /* Called under rcu_read_lock() */ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) { - struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident); + struct hlist_head *hslot = ping_hashslot(&ping_table, net, ident); struct sock *sk = NULL; struct inet_sock *isk; - struct hlist_nulls_node *hnode; int dif, sdif; if (skb->protocol == htons(ETH_P_IP)) { @@ -197,7 +187,7 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) return NULL; } - ping_portaddr_for_each_entry_rcu(sk, hnode, hslot) { + sk_for_each_rcu(sk, hslot) { isk = inet_sk(sk); pr_debug("iterate\n"); @@ -1045,15 +1035,14 @@ static struct sock *ping_get_first(struct seq_file *seq, int start) for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; ++state->bucket) { - struct hlist_nulls_node *node; - struct hlist_nulls_head *hslot; + struct hlist_head *hslot; hslot = &ping_table.hash[state->bucket]; - if (hlist_nulls_empty(hslot)) + if (hlist_empty(hslot)) continue; - sk_nulls_for_each(sk, node, hslot) { + sk_for_each(sk, hslot) { if (net_eq(sock_net(sk), net) && sk->sk_family == state->family) goto found; @@ -1070,7 +1059,7 @@ static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) struct net *net = seq_file_net(seq); do { - sk = sk_nulls_next(sk); + sk = sk_next(sk); } while (sk && (!net_eq(sock_net(sk), net))); if (!sk) @@ -1206,6 +1195,6 @@ void __init ping_init(void) int i; for (i = 0; i < PING_HTABLE_SIZE; i++) - INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i); + INIT_HLIST_HEAD(&ping_table.hash[i]); spin_lock_init(&ping_table.lock); } -- cgit v1.2.3 From b2cbac9b9b28730e9e53be20b6cdf979d3b9f27e Mon Sep 17 00:00:00 2001 From: Angus Chen Date: Fri, 12 May 2023 09:01:52 +0800 Subject: net: Remove low_thresh in ip defrag As low_thresh has no work in fragment reassembles,del it. And Mark it deprecated in sysctl Document. Signed-off-by: Angus Chen Signed-off-by: David S. Miller --- Documentation/networking/nf_conntrack-sysctl.rst | 1 + include/net/inet_frag.h | 1 - net/ieee802154/6lowpan/reassembly.c | 9 ++++----- net/ipv4/ip_fragment.c | 13 +++++-------- net/ipv6/netfilter/nf_conntrack_reasm.c | 9 ++++----- net/ipv6/reassembly.c | 9 ++++----- 6 files changed, 18 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 8b1045c3b59e..9ca356bc7217 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -55,6 +55,7 @@ nf_conntrack_frag6_high_thresh - INTEGER nf_conntrack_frag6_low_thresh is reached. nf_conntrack_frag6_low_thresh - INTEGER + (Obsolete since linux-4.17) default 196608 See nf_conntrack_frag6_low_thresh diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 325ad893f624..8543e740891a 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -13,7 +13,6 @@ struct fqdir { /* sysctls */ long high_thresh; - long low_thresh; int timeout; int max_dist; struct inet_frags *f; diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index a91283d1e5bf..3ba4c0f27af9 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -318,7 +318,7 @@ err: } #ifdef CONFIG_SYSCTL - +static unsigned long lowpanfrag_low_thresh_unuesd = IPV6_FRAG_LOW_THRESH; static struct ctl_table lowpan_frags_ns_ctl_table[] = { { .procname = "6lowpanfrag_high_thresh", @@ -374,9 +374,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net) } table[0].data = &ieee802154_lowpan->fqdir->high_thresh; - table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh; - table[1].data = &ieee802154_lowpan->fqdir->low_thresh; - table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh; + table[0].extra1 = &lowpanfrag_low_thresh_unuesd; + table[1].data = &lowpanfrag_low_thresh_unuesd; + table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh; table[2].data = &ieee802154_lowpan->fqdir->timeout; hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table); @@ -451,7 +451,6 @@ static int __net_init lowpan_frags_init_net(struct net *net) return res; ieee802154_lowpan->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH; - ieee802154_lowpan->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH; ieee802154_lowpan->fqdir->timeout = IPV6_FRAG_TIMEOUT; res = lowpan_frags_ns_sysctl_register(net); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 69c00ffdcf3e..0db5eb3dec83 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -553,7 +553,7 @@ EXPORT_SYMBOL(ip_check_defrag); #ifdef CONFIG_SYSCTL static int dist_min; - +static unsigned long ipfrag_low_thresh_unused; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", @@ -609,9 +609,9 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) } table[0].data = &net->ipv4.fqdir->high_thresh; - table[0].extra1 = &net->ipv4.fqdir->low_thresh; - table[1].data = &net->ipv4.fqdir->low_thresh; - table[1].extra2 = &net->ipv4.fqdir->high_thresh; + table[0].extra1 = &ipfrag_low_thresh_unused; + table[1].data = &ipfrag_low_thresh_unused; + table[1].extra2 = &net->ipv4.fqdir->high_thresh; table[2].data = &net->ipv4.fqdir->timeout; table[3].data = &net->ipv4.fqdir->max_dist; @@ -674,12 +674,9 @@ static int __net_init ipv4_frags_init_net(struct net *net) * A 64K fragment consumes 129736 bytes (44*2944)+200 * (1500 truesize == 2944, sizeof(struct ipq) == 200) * - * We will commit 4MB at one time. Should we cross that limit - * we will prune down to 3MB, making room for approx 8 big 64K - * fragments 8x128k. + * We will commit 4MB at one time. Should we cross that limit. */ net->ipv4.fqdir->high_thresh = 4 * 1024 * 1024; - net->ipv4.fqdir->low_thresh = 3 * 1024 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d13240f13607..dc8a2854e7f3 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -42,7 +42,7 @@ static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net) } #ifdef CONFIG_SYSCTL - +static unsigned long nf_conntrack_frag6_low_thresh_unused = IPV6_FRAG_LOW_THRESH; static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_timeout", @@ -82,10 +82,10 @@ static int nf_ct_frag6_sysctl_register(struct net *net) nf_frag = nf_frag_pernet(net); table[0].data = &nf_frag->fqdir->timeout; - table[1].data = &nf_frag->fqdir->low_thresh; - table[1].extra2 = &nf_frag->fqdir->high_thresh; + table[1].data = &nf_conntrack_frag6_low_thresh_unused; + table[1].extra2 = &nf_frag->fqdir->high_thresh; table[2].data = &nf_frag->fqdir->high_thresh; - table[2].extra1 = &nf_frag->fqdir->low_thresh; + table[2].extra1 = &nf_conntrack_frag6_low_thresh_unused; hdr = register_net_sysctl(net, "net/netfilter", table); if (hdr == NULL) @@ -500,7 +500,6 @@ static int nf_ct_net_init(struct net *net) return res; nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH; - nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH; nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT; res = nf_ct_frag6_sysctl_register(net); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 5bc8a28e67f9..eb8373c25675 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -416,7 +416,7 @@ static const struct inet6_protocol frag_protocol = { }; #ifdef CONFIG_SYSCTL - +static unsigned long ip6_frags_low_thresh_unused = IPV6_FRAG_LOW_THRESH; static struct ctl_table ip6_frags_ns_ctl_table[] = { { .procname = "ip6frag_high_thresh", @@ -465,9 +465,9 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) } table[0].data = &net->ipv6.fqdir->high_thresh; - table[0].extra1 = &net->ipv6.fqdir->low_thresh; - table[1].data = &net->ipv6.fqdir->low_thresh; - table[1].extra2 = &net->ipv6.fqdir->high_thresh; + table[0].extra1 = &ip6_frags_low_thresh_unused; + table[1].data = &ip6_frags_low_thresh_unused; + table[1].extra2 = &net->ipv6.fqdir->high_thresh; table[2].data = &net->ipv6.fqdir->timeout; hdr = register_net_sysctl(net, "net/ipv6", table); @@ -536,7 +536,6 @@ static int __net_init ipv6_frags_init_net(struct net *net) return res; net->ipv6.fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH; - net->ipv6.fqdir->low_thresh = IPV6_FRAG_LOW_THRESH; net->ipv6.fqdir->timeout = IPV6_FRAG_TIMEOUT; res = ip6_frags_ns_sysctl_register(net); -- cgit v1.2.3 From 12e7789ad5b476e945aba8edb1161c633cb7db31 Mon Sep 17 00:00:00 2001 From: Naveen Mamindlapalli Date: Sat, 13 May 2023 14:21:36 +0530 Subject: sch_htb: Allow HTB priority parameter in offload mode The current implementation of HTB offload returns the EINVAL error for unsupported parameters like prio and quantum. This patch removes the error returning checks for 'prio' parameter and populates its value to tc_htb_qopt_offload structure such that driver can use the same. Add prio parameter check in mlx5 driver, as mlx5 devices are not capable of supporting the prio parameter when htb offload is used. Report error if prio parameter is set to a non-default value. Signed-off-by: Naveen Mamindlapalli Co-developed-by: Rahul Rameshbabu Signed-off-by: Rahul Rameshbabu Signed-off-by: Hariprasad Kelam Signed-off-by: Sunil Kovvuri Goutham Reviewed-by: Simon Horman Reviewed-by: Jacob Keller Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en/qos.c | 7 ++++++- include/net/pkt_cls.h | 1 + net/sched/sch_htb.c | 7 +++---- 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 2842195ee548..1874c2f0587f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -379,6 +379,12 @@ int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_ if (!htb && htb_qopt->command != TC_HTB_CREATE) return -EINVAL; + if (htb_qopt->prio) { + NL_SET_ERR_MSG_MOD(htb_qopt->extack, + "prio parameter is not supported by device with HTB offload enabled."); + return -EOPNOTSUPP; + } + switch (htb_qopt->command) { case TC_HTB_CREATE: if (!mlx5_qos_is_supported(priv->mdev)) { @@ -515,4 +521,3 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i *hw_id = rl->leaves_id[tc]; return 0; } - diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index b3b5b0b62f16..a2ea45c7b53e 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -868,6 +868,7 @@ struct tc_htb_qopt_offload { u16 qid; u64 rate; u64 ceil; + u8 prio; }; #define TC_HTB_CLASSID_ROOT U32_MAX diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 8aef7dd9fb88..325c29041c7d 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1814,10 +1814,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter"); goto failure; } - if (hopt->prio) { - NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter"); - goto failure; - } } /* Keeping backward compatible with rate_table based iproute2 tc */ @@ -1913,6 +1909,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, TC_HTB_CLASSID_ROOT, .rate = max_t(u64, hopt->rate.rate, rate64), .ceil = max_t(u64, hopt->ceil.rate, ceil64), + .prio = hopt->prio, .extack = extack, }; err = htb_offload(dev, &offload_opt); @@ -1933,6 +1930,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, TC_H_MIN(parent->common.classid), .rate = max_t(u64, hopt->rate.rate, rate64), .ceil = max_t(u64, hopt->ceil.rate, ceil64), + .prio = hopt->prio, .extack = extack, }; err = htb_offload(dev, &offload_opt); @@ -2018,6 +2016,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, .classid = cl->common.classid, .rate = max_t(u64, hopt->rate.rate, rate64), .ceil = max_t(u64, hopt->ceil.rate, ceil64), + .prio = hopt->prio, .extack = extack, }; err = htb_offload(dev, &offload_opt); -- cgit v1.2.3 From 0d9b41daa5907756a31772d8af8ac5ff25cf17c1 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sat, 13 May 2023 13:52:04 +0200 Subject: nfc: llcp: fix possible use of uninitialized variable in nfc_llcp_send_connect() If sock->service_name is NULL, the local variable service_name_tlv_length will not be assigned by nfc_llcp_build_tlv(), later leading to using value frmo the stack. Smatch warning: net/nfc/llcp_commands.c:442 nfc_llcp_send_connect() error: uninitialized symbol 'service_name_tlv_length'. Fixes: de9e5aeb4f40 ("NFC: llcp: Fix usage of llcp_add_tlv()") Signed-off-by: Krzysztof Kozlowski Signed-off-by: David S. Miller --- net/nfc/llcp_commands.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 41e3a20c8935..cdb001de0692 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -390,7 +390,8 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) const u8 *service_name_tlv = NULL; const u8 *miux_tlv = NULL; const u8 *rw_tlv = NULL; - u8 service_name_tlv_length, miux_tlv_length, rw_tlv_length, rw; + u8 service_name_tlv_length = 0; + u8 miux_tlv_length, rw_tlv_length, rw; int err; u16 size = 0; __be16 miux; -- cgit v1.2.3 From fe34db062b8036f72e97c2b9eaa7e9fbb725ead2 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 9 May 2023 09:19:08 -0600 Subject: net: set FMODE_NOWAIT for sockets The socket read/write functions deal with O_NONBLOCK and IOCB_NOWAIT just fine, so we can flag them as being FMODE_NOWAIT compliant. With this, we can remove socket special casing in io_uring when checking if a file type is sane for nonblocking IO, and it's also the defined way to flag file types as such in the kernel. Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: netdev@vger.kernel.org Reviewed-by: Paolo Abeni Link: https://lore.kernel.org/r/20230509151910.183637-2-axboe@kernel.dk Signed-off-by: Jens Axboe --- net/socket.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/socket.c b/net/socket.c index b7e01d0fe082..0c7fda1e5e8d 100644 --- a/net/socket.c +++ b/net/socket.c @@ -471,6 +471,7 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) return file; } + file->f_mode |= FMODE_NOWAIT; sock->file = file; file->private_data = sock; stream_open(SOCK_INODE(sock), file); -- cgit v1.2.3 From e7480a44d7c4ce4691fa6bcdb0318f0d81fe4b12 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 16 May 2023 20:41:12 -0700 Subject: Revert "net: Remove low_thresh in ip defrag" This reverts commit b2cbac9b9b28730e9e53be20b6cdf979d3b9f27e. We have multiple reports of obvious breakage from this patch. Reported-by: Ido Schimmel Link: https://lore.kernel.org/all/ZGIRWjNcfqI8yY8W@shredder/ Link: https://lore.kernel.org/all/CADJHv_sDK=0RrMA2FTZQV5fw7UQ+qY=HG21Wu5qb0V9vvx5w6A@mail.gmail.com/ Reported-by: syzbot+a5e719ac7c268e414c95@syzkaller.appspotmail.com Reported-by: syzbot+a03fd670838d927d9cd8@syzkaller.appspotmail.com Fixes: b2cbac9b9b28 ("net: Remove low_thresh in ip defrag") Link: https://lore.kernel.org/r/20230517034112.1261835-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- Documentation/networking/nf_conntrack-sysctl.rst | 1 - include/net/inet_frag.h | 1 + net/ieee802154/6lowpan/reassembly.c | 9 +++++---- net/ipv4/ip_fragment.c | 13 ++++++++----- net/ipv6/netfilter/nf_conntrack_reasm.c | 9 +++++---- net/ipv6/reassembly.c | 9 +++++---- 6 files changed, 24 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 9ca356bc7217..8b1045c3b59e 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -55,7 +55,6 @@ nf_conntrack_frag6_high_thresh - INTEGER nf_conntrack_frag6_low_thresh is reached. nf_conntrack_frag6_low_thresh - INTEGER - (Obsolete since linux-4.17) default 196608 See nf_conntrack_frag6_low_thresh diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 8543e740891a..325ad893f624 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -13,6 +13,7 @@ struct fqdir { /* sysctls */ long high_thresh; + long low_thresh; int timeout; int max_dist; struct inet_frags *f; diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 3ba4c0f27af9..a91283d1e5bf 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -318,7 +318,7 @@ err: } #ifdef CONFIG_SYSCTL -static unsigned long lowpanfrag_low_thresh_unuesd = IPV6_FRAG_LOW_THRESH; + static struct ctl_table lowpan_frags_ns_ctl_table[] = { { .procname = "6lowpanfrag_high_thresh", @@ -374,9 +374,9 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net) } table[0].data = &ieee802154_lowpan->fqdir->high_thresh; - table[0].extra1 = &lowpanfrag_low_thresh_unuesd; - table[1].data = &lowpanfrag_low_thresh_unuesd; - table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh; + table[0].extra1 = &ieee802154_lowpan->fqdir->low_thresh; + table[1].data = &ieee802154_lowpan->fqdir->low_thresh; + table[1].extra2 = &ieee802154_lowpan->fqdir->high_thresh; table[2].data = &ieee802154_lowpan->fqdir->timeout; hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table); @@ -451,6 +451,7 @@ static int __net_init lowpan_frags_init_net(struct net *net) return res; ieee802154_lowpan->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH; + ieee802154_lowpan->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH; ieee802154_lowpan->fqdir->timeout = IPV6_FRAG_TIMEOUT; res = lowpan_frags_ns_sysctl_register(net); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 0db5eb3dec83..69c00ffdcf3e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -553,7 +553,7 @@ EXPORT_SYMBOL(ip_check_defrag); #ifdef CONFIG_SYSCTL static int dist_min; -static unsigned long ipfrag_low_thresh_unused; + static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", @@ -609,9 +609,9 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net) } table[0].data = &net->ipv4.fqdir->high_thresh; - table[0].extra1 = &ipfrag_low_thresh_unused; - table[1].data = &ipfrag_low_thresh_unused; - table[1].extra2 = &net->ipv4.fqdir->high_thresh; + table[0].extra1 = &net->ipv4.fqdir->low_thresh; + table[1].data = &net->ipv4.fqdir->low_thresh; + table[1].extra2 = &net->ipv4.fqdir->high_thresh; table[2].data = &net->ipv4.fqdir->timeout; table[3].data = &net->ipv4.fqdir->max_dist; @@ -674,9 +674,12 @@ static int __net_init ipv4_frags_init_net(struct net *net) * A 64K fragment consumes 129736 bytes (44*2944)+200 * (1500 truesize == 2944, sizeof(struct ipq) == 200) * - * We will commit 4MB at one time. Should we cross that limit. + * We will commit 4MB at one time. Should we cross that limit + * we will prune down to 3MB, making room for approx 8 big 64K + * fragments 8x128k. */ net->ipv4.fqdir->high_thresh = 4 * 1024 * 1024; + net->ipv4.fqdir->low_thresh = 3 * 1024 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index dc8a2854e7f3..d13240f13607 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -42,7 +42,7 @@ static struct nft_ct_frag6_pernet *nf_frag_pernet(struct net *net) } #ifdef CONFIG_SYSCTL -static unsigned long nf_conntrack_frag6_low_thresh_unused = IPV6_FRAG_LOW_THRESH; + static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_timeout", @@ -82,10 +82,10 @@ static int nf_ct_frag6_sysctl_register(struct net *net) nf_frag = nf_frag_pernet(net); table[0].data = &nf_frag->fqdir->timeout; - table[1].data = &nf_conntrack_frag6_low_thresh_unused; - table[1].extra2 = &nf_frag->fqdir->high_thresh; + table[1].data = &nf_frag->fqdir->low_thresh; + table[1].extra2 = &nf_frag->fqdir->high_thresh; table[2].data = &nf_frag->fqdir->high_thresh; - table[2].extra1 = &nf_conntrack_frag6_low_thresh_unused; + table[2].extra1 = &nf_frag->fqdir->low_thresh; hdr = register_net_sysctl(net, "net/netfilter", table); if (hdr == NULL) @@ -500,6 +500,7 @@ static int nf_ct_net_init(struct net *net) return res; nf_frag->fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH; + nf_frag->fqdir->low_thresh = IPV6_FRAG_LOW_THRESH; nf_frag->fqdir->timeout = IPV6_FRAG_TIMEOUT; res = nf_ct_frag6_sysctl_register(net); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index eb8373c25675..5bc8a28e67f9 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -416,7 +416,7 @@ static const struct inet6_protocol frag_protocol = { }; #ifdef CONFIG_SYSCTL -static unsigned long ip6_frags_low_thresh_unused = IPV6_FRAG_LOW_THRESH; + static struct ctl_table ip6_frags_ns_ctl_table[] = { { .procname = "ip6frag_high_thresh", @@ -465,9 +465,9 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net) } table[0].data = &net->ipv6.fqdir->high_thresh; - table[0].extra1 = &ip6_frags_low_thresh_unused; - table[1].data = &ip6_frags_low_thresh_unused; - table[1].extra2 = &net->ipv6.fqdir->high_thresh; + table[0].extra1 = &net->ipv6.fqdir->low_thresh; + table[1].data = &net->ipv6.fqdir->low_thresh; + table[1].extra2 = &net->ipv6.fqdir->high_thresh; table[2].data = &net->ipv6.fqdir->timeout; hdr = register_net_sysctl(net, "net/ipv6", table); @@ -536,6 +536,7 @@ static int __net_init ipv6_frags_init_net(struct net *net) return res; net->ipv6.fqdir->high_thresh = IPV6_FRAG_HIGH_THRESH; + net->ipv6.fqdir->low_thresh = IPV6_FRAG_LOW_THRESH; net->ipv6.fqdir->timeout = IPV6_FRAG_TIMEOUT; res = ip6_frags_ns_sysctl_register(net); -- cgit v1.2.3 From 65eb006d85a2ac0b23464808099726bd826e9877 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Mon, 15 May 2023 15:37:56 +0200 Subject: bpf: Move kernel test kfuncs to bpf_testmod Moving kernel test kfuncs into bpf_testmod kernel module, and adding necessary init calls and BTF IDs records. We need to keep following structs in kernel: struct prog_test_ref_kfunc struct prog_test_member (embedded in prog_test_ref_kfunc) The reason is because they need to be marked as rcu safe (check test prog mark_ref_as_untrusted_or_null) and such objects are being required to be defined only in kernel at the moment (see rcu_safe_kptr check in kernel). We need to keep also dtor functions for both objects in kernel: bpf_kfunc_call_test_release bpf_kfunc_call_memb_release We also keep the copy of these struct in bpf_testmod_kfunc.h, because other test functions use them. This is unfortunate, but this is just temporary solution until we are able to these structs them to bpf_testmod completely. As suggested by David adding bpf_testmod.ko make dependency for bpf programs, so they are rebuilt if we change the bpf_testmod.ko module. Also adding missing __bpf_kfunc to bpf_kfunc_call_test4 functions. Signed-off-by: Jiri Olsa Acked-by: David Vernet Link: https://lore.kernel.org/r/20230515133756.1658301-11-jolsa@kernel.org Signed-off-by: Alexei Starovoitov --- net/bpf/test_run.c | 201 --------------------- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 166 +++++++++++++++++ .../selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h | 60 ++++++ 3 files changed, 226 insertions(+), 201 deletions(-) (limited to 'net') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 98143b86a9dd..2321bd2f9964 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -561,29 +561,6 @@ __bpf_kfunc int bpf_modify_return_test(int a, int *b) return a + *b; } -__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) -{ - return a + b + c + d; -} - -__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) -{ - return a + b; -} - -__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) -{ - return sk; -} - -long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) -{ - /* Provoke the compiler to assume that the caller has sign-extended a, - * b and c on platforms where this is required (e.g. s390x). - */ - return (long)a + (long)b + (long)c + d; -} - int noinline bpf_fentry_shadow_test(int a) { return a + 1; @@ -606,32 +583,6 @@ struct prog_test_ref_kfunc { refcount_t cnt; }; -static struct prog_test_ref_kfunc prog_test_struct = { - .a = 42, - .b = 108, - .next = &prog_test_struct, - .cnt = REFCOUNT_INIT(1), -}; - -__bpf_kfunc struct prog_test_ref_kfunc * -bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) -{ - refcount_inc(&prog_test_struct.cnt); - return &prog_test_struct; -} - -__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) -{ - WARN_ON_ONCE(1); -} - -__bpf_kfunc struct prog_test_member * -bpf_kfunc_call_memb_acquire(void) -{ - WARN_ON_ONCE(1); - return NULL; -} - __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) { refcount_dec(&p->cnt); @@ -641,134 +592,6 @@ __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) { } -__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) -{ - WARN_ON_ONCE(1); -} - -static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) -{ - if (size > 2 * sizeof(int)) - return NULL; - - return (int *)p; -} - -__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, - const int rdwr_buf_size) -{ - return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); -} - -__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, - const int rdonly_buf_size) -{ - return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); -} - -/* the next 2 ones can't be really used for testing expect to ensure - * that the verifier rejects the call. - * Acquire functions must return struct pointers, so these ones are - * failing. - */ -__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, - const int rdonly_buf_size) -{ - return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); -} - -__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) -{ -} - -struct prog_test_pass1 { - int x0; - struct { - int x1; - struct { - int x2; - struct { - int x3; - }; - }; - }; -}; - -struct prog_test_pass2 { - int len; - short arr1[4]; - struct { - char arr2[4]; - unsigned long arr3[8]; - } x; -}; - -struct prog_test_fail1 { - void *p; - int x; -}; - -struct prog_test_fail2 { - int x8; - struct prog_test_pass1 x; -}; - -struct prog_test_fail3 { - int len; - char arr1[2]; - char arr2[]; -}; - -__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) -{ -} - -__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) -{ - /* p != NULL, but p->cnt could be 0 */ -} - -__bpf_kfunc void bpf_kfunc_call_test_destructive(void) -{ -} - -__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) -{ - return arg; -} - __diag_pop(); BTF_SET8_START(bpf_test_modify_return_ids) @@ -782,32 +605,8 @@ static const struct btf_kfunc_id_set bpf_test_modify_return_set = { }; BTF_SET8_START(test_sk_check_kfunc_ids) -BTF_ID_FLAGS(func, bpf_kfunc_call_test1) -BTF_ID_FLAGS(func, bpf_kfunc_call_test2) -BTF_ID_FLAGS(func, bpf_kfunc_call_test3) -BTF_ID_FLAGS(func, bpf_kfunc_call_test4) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) BTF_SET8_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 52785ba671e6..cf216041876c 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -9,6 +9,7 @@ #include #include #include "bpf_testmod.h" +#include "bpf_testmod_kfunc.h" #define CREATE_TRACE_POINTS #include "bpf_testmod-events.h" @@ -289,8 +290,171 @@ static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .set = &bpf_testmod_common_kfunc_ids, }; +__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) +{ + return a + b + c + d; +} + +__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) +{ + return a + b; +} + +__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) +{ + return sk; +} + +__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) +{ + /* Provoke the compiler to assume that the caller has sign-extended a, + * b and c on platforms where this is required (e.g. s390x). + */ + return (long)a + (long)b + (long)c + d; +} + +static struct prog_test_ref_kfunc prog_test_struct = { + .a = 42, + .b = 108, + .next = &prog_test_struct, + .cnt = REFCOUNT_INIT(1), +}; + +__bpf_kfunc struct prog_test_ref_kfunc * +bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) +{ + refcount_inc(&prog_test_struct.cnt); + return &prog_test_struct; +} + +__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) +{ + WARN_ON_ONCE(1); +} + +__bpf_kfunc struct prog_test_member * +bpf_kfunc_call_memb_acquire(void) +{ + WARN_ON_ONCE(1); + return NULL; +} + +__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) +{ + WARN_ON_ONCE(1); +} + +static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) +{ + if (size > 2 * sizeof(int)) + return NULL; + + return (int *)p; +} + +__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, + const int rdwr_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); +} + +__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, + const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +/* the next 2 ones can't be really used for testing expect to ensure + * that the verifier rejects the call. + * Acquire functions must return struct pointers, so these ones are + * failing. + */ +__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, + const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) +{ + /* p != NULL, but p->cnt could be 0 */ +} + +__bpf_kfunc void bpf_kfunc_call_test_destructive(void) +{ +} + +__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) +{ + return arg; +} + BTF_SET8_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) +BTF_ID_FLAGS(func, bpf_kfunc_call_test1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test4) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) BTF_SET8_END(bpf_testmod_check_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { @@ -312,6 +476,8 @@ static int bpf_testmod_init(void) ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h index 57f6166911f8..9693c626646b 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h @@ -8,8 +8,62 @@ #include #else #define __ksym +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; #endif +struct prog_test_pass1 { + int x0; + struct { + int x1; + struct { + int x2; + struct { + int x3; + }; + }; + }; +}; + +struct prog_test_pass2 { + int len; + short arr1[4]; + struct { + char arr2[4]; + unsigned long arr3[8]; + } x; +}; + +struct prog_test_fail1 { + void *p; + int x; +}; + +struct prog_test_fail2 { + int x8; + struct prog_test_pass1 x; +}; + +struct prog_test_fail3 { + int len; + char arr1[2]; + char arr2[]; +}; + struct prog_test_ref_kfunc * bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym; void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; @@ -20,7 +74,13 @@ int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int r int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; void bpf_kfunc_call_int_mem_release(int *p) __ksym; + +/* The bpf_kfunc_call_test_static_unused_arg is defined as static, + * but bpf program compilation needs to see it as global symbol. + */ +#ifndef __KERNEL__ u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym; +#endif void bpf_testmod_test_mod_kfunc(int i) __ksym; -- cgit v1.2.3 From eea96a3e2c909a055005ac65dde356b36cabc4ed Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 15 May 2023 17:06:36 +0100 Subject: net/tcp: don't peek at tail for io_uring zc Move tcp_write_queue_tail() to SOCK_ZEROCOPY specific flag as zerocopy setup for msghdr->ubuf_info doesn't need to peek into the last request. Signed-off-by: Pavel Begunkov Reviewed-by: David Ahern Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4d6392c16b7a..40f591f7fce1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1229,13 +1229,12 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) flags = msg->msg_flags; if ((flags & MSG_ZEROCOPY) && size) { - skb = tcp_write_queue_tail(sk); - if (msg->msg_ubuf) { uarg = msg->msg_ubuf; net_zcopy_get(uarg); zc = sk->sk_route_caps & NETIF_F_SG; } else if (sock_flag(sk, SOCK_ZEROCOPY)) { + skb = tcp_write_queue_tail(sk); uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); if (!uarg) { err = -ENOBUFS; -- cgit v1.2.3 From a7533584728d366f11b210d0e8a2fad03109c669 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 15 May 2023 17:06:37 +0100 Subject: net/tcp: optimise io_uring zc ubuf refcounting io_uring keeps a reference to ubuf_info during submission, so if tcp_sendmsg_locked() sees msghdr::msg_ubuf in can be sure the buffer will be kept alive and doesn't need to additionally pin it. Signed-off-by: Pavel Begunkov Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 40f591f7fce1..3d18e295bb2f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1231,7 +1231,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) if ((flags & MSG_ZEROCOPY) && size) { if (msg->msg_ubuf) { uarg = msg->msg_ubuf; - net_zcopy_get(uarg); zc = sk->sk_route_caps & NETIF_F_SG; } else if (sock_flag(sk, SOCK_ZEROCOPY)) { skb = tcp_write_queue_tail(sk); @@ -1458,7 +1457,9 @@ out: tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); } out_nopush: - net_zcopy_put(uarg); + /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ + if (uarg && !msg->msg_ubuf) + net_zcopy_put(uarg); return copied + copied_syn; do_error: @@ -1467,7 +1468,9 @@ do_error: if (copied + copied_syn) goto out; out_err: - net_zcopy_put_abort(uarg, true); + /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ + if (uarg && !msg->msg_ubuf) + net_zcopy_put_abort(uarg, true); err = sk_stream_error(sk, flags, err); /* make sure we wake any epoll edge trigger waiter */ if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { -- cgit v1.2.3 From fa0583c202433c3d359a1b0579f52da16e25e4df Mon Sep 17 00:00:00 2001 From: Yuya Tajima Date: Mon, 15 May 2023 15:34:27 +0000 Subject: seg6: Cleanup duplicates of skb_dst_drop calls In processing IPv6 segment routing header (SRH), several functions call skb_dst_drop before ip6_route_input. However, ip6_route_input calls skb_dst_drop within it, so there is no need to call skb_dst_drop in advance. Signed-off-by: Yuya Tajima Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/ipv6/exthdrs.c | 3 --- net/ipv6/seg6_iptunnel.c | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index a8d961d3a477..04c14fc4b14d 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -458,8 +458,6 @@ looped_back: ipv6_hdr(skb)->daddr = *addr; - skb_dst_drop(skb); - ip6_route_input(skb); if (skb_dst(skb)->error) { @@ -834,7 +832,6 @@ looped_back: *addr = ipv6_hdr(skb)->daddr; ipv6_hdr(skb)->daddr = daddr; - skb_dst_drop(skb); ip6_route_input(skb); if (skb_dst(skb)->error) { skb_push(skb, skb->data - skb_network_header(skb)); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 34db881204d2..03b877ff4558 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -470,8 +470,6 @@ static int seg6_input_core(struct net *net, struct sock *sk, dst = dst_cache_get(&slwt->cache); preempt_enable(); - skb_dst_drop(skb); - if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); @@ -482,6 +480,7 @@ static int seg6_input_core(struct net *net, struct sock *sk, preempt_enable(); } } else { + skb_dst_drop(skb); skb_dst_set(skb, dst); } -- cgit v1.2.3 From b50a8b0d57ab1ef11492171e98a030f48682eac3 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 6 May 2023 18:04:16 +0200 Subject: net: openvswitch: Use struct_size() Use struct_size() instead of hand writing it. This is less verbose and more informative. Signed-off-by: Christophe JAILLET Acked-by: Eelco Chaudron Link: https://lore.kernel.org/r/e7746fbbd62371d286081d5266e88bbe8d3fe9f0.1683388991.git.christophe.jaillet@wanadoo.fr Signed-off-by: Jakub Kicinski --- net/openvswitch/meter.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c index f2698d2316df..c4ebf810e4b1 100644 --- a/net/openvswitch/meter.c +++ b/net/openvswitch/meter.c @@ -69,9 +69,7 @@ static struct dp_meter_instance *dp_meter_instance_alloc(const u32 size) { struct dp_meter_instance *ti; - ti = kvzalloc(sizeof(*ti) + - sizeof(struct dp_meter *) * size, - GFP_KERNEL); + ti = kvzalloc(struct_size(ti, dp_meters, size), GFP_KERNEL); if (!ti) return NULL; -- cgit v1.2.3 From a4878eeae39048e6abe85891c714b49dc13fc08c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 12 May 2023 14:19:47 +0200 Subject: netfilter: nf_tables: relax set/map validation checks Its currently not allowed to perform queries on a map, for example: table t { map m { typeof ip saddr : meta mark .. chain c { ip saddr @m counter will fail, because kernel requires that userspace provides a destination register when the referenced set is a map. However, internally there is no real distinction between sets and maps, maps are just sets where each key is associated with a value. Relax this so that maps can be used just like sets. This allows to have rules that query if a given key exists without making use of the associated value. This also permits != checks which don't work for map lookups. When no destination reg is given for a map, then permit this for named maps. Data and dump paths need to be updated to consider priv->dreg_set instead of the 'set-is-a-map' check. Checks in reduce and validate callbacks are not changed, this can be relaxed later if a need arises. Signed-off-by: Florian Westphal --- net/netfilter/nft_lookup.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 03ef4fdaa460..29ac48cdd6db 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -19,6 +19,7 @@ struct nft_lookup { struct nft_set *set; u8 sreg; u8 dreg; + bool dreg_set; bool invert; struct nft_set_binding binding; }; @@ -75,7 +76,7 @@ void nft_lookup_eval(const struct nft_expr *expr, } if (ext) { - if (set->flags & NFT_SET_MAP) + if (priv->dreg_set) nft_data_copy(®s->data[priv->dreg], nft_set_ext_data(ext), set->dlen); @@ -122,11 +123,8 @@ static int nft_lookup_init(const struct nft_ctx *ctx, if (flags & ~NFT_LOOKUP_F_INV) return -EINVAL; - if (flags & NFT_LOOKUP_F_INV) { - if (set->flags & NFT_SET_MAP) - return -EINVAL; + if (flags & NFT_LOOKUP_F_INV) priv->invert = true; - } } if (tb[NFTA_LOOKUP_DREG] != NULL) { @@ -140,8 +138,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx, set->dlen); if (err < 0) return err; - } else if (set->flags & NFT_SET_MAP) - return -EINVAL; + priv->dreg_set = true; + } else if (set->flags & NFT_SET_MAP) { + /* Map given, but user asks for lookup only (i.e. to + * ignore value assoicated with key). + * + * This makes no sense for anonymous maps since they are + * scoped to the rule, but for named sets this can be useful. + */ + if (set->flags & NFT_SET_ANONYMOUS) + return -EINVAL; + } priv->binding.flags = set->flags & NFT_SET_MAP; @@ -188,7 +195,7 @@ static int nft_lookup_dump(struct sk_buff *skb, goto nla_put_failure; if (nft_dump_register(skb, NFTA_LOOKUP_SREG, priv->sreg)) goto nla_put_failure; - if (priv->set->flags & NFT_SET_MAP) + if (priv->dreg_set) if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_LOOKUP_FLAGS, htonl(flags))) -- cgit v1.2.3 From d4b7f29eb85c93893bc27388b37709efbc3c9a0e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 11 May 2023 22:45:35 +0200 Subject: netfilter: nf_tables: always increment set element count At this time, set->nelems counter only increments when the set has a maximum size. All set elements decrement the counter unconditionally, this is confusing. Increment the counter unconditionally to make this symmetrical. This would also allow changing the set maximum size after set creation in a later patch. Signed-off-by: Florian Westphal --- net/netfilter/nf_tables_api.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 59fb8320ab4d..7a61de80a8d1 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6541,10 +6541,13 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, goto err_element_clash; } - if (!(flags & NFT_SET_ELEM_CATCHALL) && set->size && - !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) { - err = -ENFILE; - goto err_set_full; + if (!(flags & NFT_SET_ELEM_CATCHALL)) { + unsigned int max = set->size ? set->size + set->ndeact : UINT_MAX; + + if (!atomic_add_unless(&set->nelems, 1, max)) { + err = -ENFILE; + goto err_set_full; + } } nft_trans_elem(trans) = elem; -- cgit v1.2.3 From b9f9a485fb0eb80b0e2b90410b28cbb9b0e85687 Mon Sep 17 00:00:00 2001 From: Jeremy Sowden Date: Tue, 9 May 2023 22:19:45 +0100 Subject: netfilter: nft_exthdr: add boolean DCCP option matching The xt_dccp iptables module supports the matching of DCCP packets based on the presence or absence of DCCP options. Extend nft_exthdr to add this functionality to nftables. Link: https://bugzilla.netfilter.org/show_bug.cgi?id=930 Signed-off-by: Jeremy Sowden Signed-off-by: Florian Westphal --- include/uapi/linux/netfilter/nf_tables.h | 2 + net/netfilter/nft_exthdr.c | 106 +++++++++++++++++++++++++++++++ 2 files changed, 108 insertions(+) (limited to 'net') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index c4d4d8e42dc8..e059dc2644df 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -859,12 +859,14 @@ enum nft_exthdr_flags { * @NFT_EXTHDR_OP_TCP: match against tcp options * @NFT_EXTHDR_OP_IPV4: match against ipv4 options * @NFT_EXTHDR_OP_SCTP: match against sctp chunks + * @NFT_EXTHDR_OP_DCCP: match against dccp otions */ enum nft_exthdr_op { NFT_EXTHDR_OP_IPV6, NFT_EXTHDR_OP_TCPOPT, NFT_EXTHDR_OP_IPV4, NFT_EXTHDR_OP_SCTP, + NFT_EXTHDR_OP_DCCP, __NFT_EXTHDR_OP_MAX }; #define NFT_EXTHDR_OP_MAX (__NFT_EXTHDR_OP_MAX - 1) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a54a7f772cec..671474e59817 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -406,6 +407,82 @@ err: regs->verdict.code = NFT_BREAK; } +static void nft_exthdr_dccp_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nft_exthdr *priv = nft_expr_priv(expr); + unsigned int thoff, dataoff, optoff, optlen, i; + u32 *dest = ®s->data[priv->dreg]; + const struct dccp_hdr *dh; + struct dccp_hdr _dh; + + if (pkt->tprot != IPPROTO_DCCP || pkt->fragoff) + goto err; + + thoff = nft_thoff(pkt); + + dh = skb_header_pointer(pkt->skb, thoff, sizeof(_dh), &_dh); + if (!dh) + goto err; + + dataoff = dh->dccph_doff * sizeof(u32); + optoff = __dccp_hdr_len(dh); + if (dataoff <= optoff) + goto err; + + optlen = dataoff - optoff; + + for (i = 0; i < optlen; ) { + /* Options 0 (DCCPO_PADDING) - 31 (DCCPO_MAX_RESERVED) are 1B in + * the length; the remaining options are at least 2B long. In + * all cases, the first byte contains the option type. In + * multi-byte options, the second byte contains the option + * length, which must be at least two: 1 for the type plus 1 for + * the length plus 0-253 for any following option data. We + * aren't interested in the option data, only the type and the + * length, so we don't need to read more than two bytes at a + * time. + */ + unsigned int buflen = optlen - i; + u8 buf[2], *bufp; + u8 type, len; + + if (buflen > sizeof(buf)) + buflen = sizeof(buf); + + bufp = skb_header_pointer(pkt->skb, thoff + optoff + i, buflen, + &buf); + if (!bufp) + goto err; + + type = bufp[0]; + + if (type == priv->type) { + *dest = 1; + return; + } + + if (type <= DCCPO_MAX_RESERVED) { + i++; + continue; + } + + if (buflen < 2) + goto err; + + len = bufp[1]; + + if (len < 2) + goto err; + + i += len; + } + +err: + *dest = 0; +} + static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_DREG] = { .type = NLA_U32 }, [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, @@ -557,6 +634,22 @@ static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx, return 0; } +static int nft_exthdr_dccp_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_exthdr *priv = nft_expr_priv(expr); + int err = nft_exthdr_init(ctx, expr, tb); + + if (err < 0) + return err; + + if (!(priv->flags & NFT_EXTHDR_F_PRESENT)) + return -EOPNOTSUPP; + + return 0; +} + static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv) { if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type)) @@ -686,6 +779,15 @@ static const struct nft_expr_ops nft_exthdr_sctp_ops = { .reduce = nft_exthdr_reduce, }; +static const struct nft_expr_ops nft_exthdr_dccp_ops = { + .type = &nft_exthdr_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), + .eval = nft_exthdr_dccp_eval, + .init = nft_exthdr_dccp_init, + .dump = nft_exthdr_dump, + .reduce = nft_exthdr_reduce, +}; + static const struct nft_expr_ops * nft_exthdr_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -720,6 +822,10 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx, if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_sctp_ops; break; + case NFT_EXTHDR_OP_DCCP: + if (tb[NFTA_EXTHDR_DREG]) + return &nft_exthdr_dccp_ops; + break; } return ERR_PTR(-EOPNOTSUPP); -- cgit v1.2.3 From a2a0ffb0846895923991aa87e022dc228d53c125 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 5 May 2023 23:26:34 +0200 Subject: netfilter: nft_set_pipapo: Use struct_size() Use struct_size() instead of hand writing it. This is less verbose and more informative. Signed-off-by: Christophe JAILLET Reviewed-by: Simon Horman Signed-off-by: Florian Westphal --- net/netfilter/nft_set_pipapo.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 06d46d182634..34c684e121d3 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1274,8 +1274,7 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old) struct nft_pipapo_match *new; int i; - new = kmalloc(sizeof(*new) + sizeof(*dst) * old->field_count, - GFP_KERNEL); + new = kmalloc(struct_size(new, f, old->field_count), GFP_KERNEL); if (!new) return ERR_PTR(-ENOMEM); @@ -2059,8 +2058,7 @@ static int nft_pipapo_init(const struct nft_set *set, if (field_count > NFT_PIPAPO_MAX_FIELDS) return -EINVAL; - m = kmalloc(sizeof(*priv->match) + sizeof(*f) * field_count, - GFP_KERNEL); + m = kmalloc(struct_size(m, f, field_count), GFP_KERNEL); if (!m) return -ENOMEM; -- cgit v1.2.3 From d671fd82eaa9bceedd48ea2b0679a9a6bbcd6532 Mon Sep 17 00:00:00 2001 From: Faicker Mo Date: Sun, 23 Apr 2023 10:29:57 +0800 Subject: netfilter: conntrack: allow insertion clash of gre protocol NVGRE tunnel is used in the VM-to-VM communications. The VM packets are encapsulated in NVGRE and sent from the host. For NVGRE there are two tuples(outer sip and outer dip) in the host conntrack item. Insertion clashes are more likely to happen if the concurrent connections are sent from the VM. Signed-off-by: Faicker Mo Signed-off-by: Florian Westphal --- net/netfilter/nf_conntrack_proto_gre.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index 728eeb0aea87..ad6f0ca40cd2 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -296,6 +296,7 @@ void nf_conntrack_gre_init_net(struct net *net) /* protocol helper struct */ const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre = { .l4proto = IPPROTO_GRE, + .allow_clash = true, #ifdef CONFIG_NF_CONNTRACK_PROCFS .print_conntrack = gre_print_conntrack, #endif -- cgit v1.2.3 From fa502c86566680ac62bc28ec883a069bf7a2aa5e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 11 May 2023 07:35:33 +0200 Subject: netfilter: flowtable: simplify route logic Grab reference to dst from skbuff earlier to simplify route caching. Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal --- include/net/netfilter/nf_flow_table.h | 4 ++-- net/netfilter/nf_flow_table_core.c | 24 +++--------------------- net/netfilter/nft_flow_offload.c | 12 ++++++++---- 3 files changed, 13 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index ebb28ec5b6fa..546fc4a9b939 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -263,8 +263,8 @@ nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, up_write(&flow_table->flow_block_lock); } -int flow_offload_route_init(struct flow_offload *flow, - const struct nf_flow_route *route); +void flow_offload_route_init(struct flow_offload *flow, + const struct nf_flow_route *route); int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); void flow_offload_refresh(struct nf_flowtable *flow_table, diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 04bd0ed4d2ae..b46dd897f2c5 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -125,9 +125,6 @@ static int flow_offload_fill_route(struct flow_offload *flow, break; case FLOW_OFFLOAD_XMIT_XFRM: case FLOW_OFFLOAD_XMIT_NEIGH: - if (!dst_hold_safe(route->tuple[dir].dst)) - return -1; - flow_tuple->dst_cache = dst; flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple); break; @@ -148,27 +145,12 @@ static void nft_flow_dst_release(struct flow_offload *flow, dst_release(flow->tuplehash[dir].tuple.dst_cache); } -int flow_offload_route_init(struct flow_offload *flow, +void flow_offload_route_init(struct flow_offload *flow, const struct nf_flow_route *route) { - int err; - - err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL); - if (err < 0) - return err; - - err = flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY); - if (err < 0) - goto err_route_reply; - + flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_ORIGINAL); + flow_offload_fill_route(flow, route, FLOW_OFFLOAD_DIR_REPLY); flow->type = NF_FLOW_OFFLOAD_ROUTE; - - return 0; - -err_route_reply: - nft_flow_dst_release(flow, FLOW_OFFLOAD_DIR_ORIGINAL); - - return err; } EXPORT_SYMBOL_GPL(flow_offload_route_init); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index e860d8fe0e5e..5ef9146e74ad 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -250,9 +250,14 @@ static int nft_flow_route(const struct nft_pktinfo *pkt, break; } + if (!dst_hold_safe(this_dst)) + return -ENOENT; + nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt)); - if (!other_dst) + if (!other_dst) { + dst_release(this_dst); return -ENOENT; + } nft_default_forward_path(route, this_dst, dir); nft_default_forward_path(route, other_dst, !dir); @@ -349,8 +354,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (!flow) goto err_flow_alloc; - if (flow_offload_route_init(flow, &route) < 0) - goto err_flow_add; + flow_offload_route_init(flow, &route); if (tcph) { ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; @@ -361,12 +365,12 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, if (ret < 0) goto err_flow_add; - dst_release(route.tuple[!dir].dst); return; err_flow_add: flow_offload_free(flow); err_flow_alloc: + dst_release(route.tuple[dir].dst); dst_release(route.tuple[!dir].dst); err_flow_route: clear_bit(IPS_OFFLOAD_BIT, &ct->status); -- cgit v1.2.3 From a10fa0b489d627911895b64c6530636748dd7911 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 11 May 2023 07:35:34 +0200 Subject: netfilter: flowtable: split IPv4 datapath in helper functions Add context structure and helper functions to look up for a matching IPv4 entry in the flowtable and to forward packets. No functional changes are intended. Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal --- net/netfilter/nf_flow_table_ip.c | 119 +++++++++++++++++++++++++-------------- 1 file changed, 77 insertions(+), 42 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 19efba1e51ef..3fb476167d1d 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -163,38 +163,43 @@ static void nf_flow_tuple_encap(struct sk_buff *skb, } } -static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, - struct flow_offload_tuple *tuple, u32 *hdrsize, - u32 offset) +struct nf_flowtable_ctx { + const struct net_device *in; + u32 offset; + u32 hdrsize; +}; + +static int nf_flow_tuple_ip(struct nf_flowtable_ctx *ctx, struct sk_buff *skb, + struct flow_offload_tuple *tuple) { struct flow_ports *ports; unsigned int thoff; struct iphdr *iph; u8 ipproto; - if (!pskb_may_pull(skb, sizeof(*iph) + offset)) + if (!pskb_may_pull(skb, sizeof(*iph) + ctx->offset)) return -1; - iph = (struct iphdr *)(skb_network_header(skb) + offset); + iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset); thoff = (iph->ihl * 4); if (ip_is_fragment(iph) || unlikely(ip_has_options(thoff))) return -1; - thoff += offset; + thoff += ctx->offset; ipproto = iph->protocol; switch (ipproto) { case IPPROTO_TCP: - *hdrsize = sizeof(struct tcphdr); + ctx->hdrsize = sizeof(struct tcphdr); break; case IPPROTO_UDP: - *hdrsize = sizeof(struct udphdr); + ctx->hdrsize = sizeof(struct udphdr); break; #ifdef CONFIG_NF_CT_PROTO_GRE case IPPROTO_GRE: - *hdrsize = sizeof(struct gre_base_hdr); + ctx->hdrsize = sizeof(struct gre_base_hdr); break; #endif default: @@ -204,7 +209,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, if (iph->ttl <= 1) return -1; - if (!pskb_may_pull(skb, thoff + *hdrsize)) + if (!pskb_may_pull(skb, thoff + ctx->hdrsize)) return -1; switch (ipproto) { @@ -224,13 +229,13 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, } } - iph = (struct iphdr *)(skb_network_header(skb) + offset); + iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset); tuple->src_v4.s_addr = iph->saddr; tuple->dst_v4.s_addr = iph->daddr; tuple->l3proto = AF_INET; tuple->l4proto = ipproto; - tuple->iifidx = dev->ifindex; + tuple->iifidx = ctx->in->ifindex; nf_flow_tuple_encap(skb, tuple); return 0; @@ -336,58 +341,56 @@ static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, return NF_STOLEN; } -unsigned int -nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) +static struct flow_offload_tuple_rhash * +nf_flow_offload_lookup(struct nf_flowtable_ctx *ctx, + struct nf_flowtable *flow_table, struct sk_buff *skb) { - struct flow_offload_tuple_rhash *tuplehash; - struct nf_flowtable *flow_table = priv; struct flow_offload_tuple tuple = {}; - enum flow_offload_tuple_dir dir; - struct flow_offload *flow; - struct net_device *outdev; - u32 hdrsize, offset = 0; - unsigned int thoff, mtu; - struct rtable *rt; - struct iphdr *iph; - __be32 nexthop; - int ret; if (skb->protocol != htons(ETH_P_IP) && - !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset)) - return NF_ACCEPT; + !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &ctx->offset)) + return NULL; - if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0) - return NF_ACCEPT; + if (nf_flow_tuple_ip(ctx, skb, &tuple) < 0) + return NULL; - tuplehash = flow_offload_lookup(flow_table, &tuple); - if (tuplehash == NULL) - return NF_ACCEPT; + return flow_offload_lookup(flow_table, &tuple); +} + +static int nf_flow_offload_forward(struct nf_flowtable_ctx *ctx, + struct nf_flowtable *flow_table, + struct flow_offload_tuple_rhash *tuplehash, + struct sk_buff *skb) +{ + enum flow_offload_tuple_dir dir; + struct flow_offload *flow; + unsigned int thoff, mtu; + struct iphdr *iph; dir = tuplehash->tuple.dir; flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); - mtu = flow->tuplehash[dir].tuple.mtu + offset; + mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset; if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) - return NF_ACCEPT; + return 0; - iph = (struct iphdr *)(skb_network_header(skb) + offset); - thoff = (iph->ihl * 4) + offset; + iph = (struct iphdr *)(skb_network_header(skb) + ctx->offset); + thoff = (iph->ihl * 4) + ctx->offset; if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) - return NF_ACCEPT; + return 0; if (!nf_flow_dst_check(&tuplehash->tuple)) { flow_offload_teardown(flow); - return NF_ACCEPT; + return 0; } - if (skb_try_make_writable(skb, thoff + hdrsize)) - return NF_DROP; + if (skb_try_make_writable(skb, thoff + ctx->hdrsize)) + return -1; flow_offload_refresh(flow_table, flow); nf_flow_encap_pop(skb, tuplehash); - thoff -= offset; + thoff -= ctx->offset; iph = ip_hdr(skb); nf_flow_nat_ip(flow, skb, thoff, dir, iph); @@ -398,6 +401,35 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, if (flow_table->flags & NF_FLOWTABLE_COUNTER) nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + return 1; +} + +unsigned int +nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *flow_table = priv; + enum flow_offload_tuple_dir dir; + struct nf_flowtable_ctx ctx = { + .in = state->in, + }; + struct flow_offload *flow; + struct net_device *outdev; + struct rtable *rt; + __be32 nexthop; + int ret; + + tuplehash = nf_flow_offload_lookup(&ctx, flow_table, skb); + if (!tuplehash) + return NF_ACCEPT; + + ret = nf_flow_offload_forward(&ctx, flow_table, tuplehash, skb); + if (ret < 0) + return NF_DROP; + else if (ret == 0) + return NF_ACCEPT; + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { rt = (struct rtable *)tuplehash->tuple.dst_cache; memset(skb->cb, 0, sizeof(struct inet_skb_parm)); @@ -406,6 +438,9 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, return nf_flow_xmit_xfrm(skb, state, &rt->dst); } + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); + switch (tuplehash->tuple.xmit_type) { case FLOW_OFFLOAD_XMIT_NEIGH: rt = (struct rtable *)tuplehash->tuple.dst_cache; -- cgit v1.2.3 From e05b5362166b18a224c30502e81416e4d622d3e4 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 11 May 2023 07:35:35 +0200 Subject: netfilter: flowtable: split IPv6 datapath in helper functions Add context structure and helper functions to look up for a matching IPv6 entry in the flowtable and to forward packets. No functional changes are intended. Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal --- net/netfilter/nf_flow_table_ip.c | 112 +++++++++++++++++++++++++-------------- 1 file changed, 71 insertions(+), 41 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 3fb476167d1d..d248763917ad 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -570,32 +570,31 @@ static void nf_flow_nat_ipv6(const struct flow_offload *flow, } } -static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, - struct flow_offload_tuple *tuple, u32 *hdrsize, - u32 offset) +static int nf_flow_tuple_ipv6(struct nf_flowtable_ctx *ctx, struct sk_buff *skb, + struct flow_offload_tuple *tuple) { struct flow_ports *ports; struct ipv6hdr *ip6h; unsigned int thoff; u8 nexthdr; - thoff = sizeof(*ip6h) + offset; + thoff = sizeof(*ip6h) + ctx->offset; if (!pskb_may_pull(skb, thoff)) return -1; - ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); + ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset); nexthdr = ip6h->nexthdr; switch (nexthdr) { case IPPROTO_TCP: - *hdrsize = sizeof(struct tcphdr); + ctx->hdrsize = sizeof(struct tcphdr); break; case IPPROTO_UDP: - *hdrsize = sizeof(struct udphdr); + ctx->hdrsize = sizeof(struct udphdr); break; #ifdef CONFIG_NF_CT_PROTO_GRE case IPPROTO_GRE: - *hdrsize = sizeof(struct gre_base_hdr); + ctx->hdrsize = sizeof(struct gre_base_hdr); break; #endif default: @@ -605,7 +604,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, if (ip6h->hop_limit <= 1) return -1; - if (!pskb_may_pull(skb, thoff + *hdrsize)) + if (!pskb_may_pull(skb, thoff + ctx->hdrsize)) return -1; switch (nexthdr) { @@ -625,65 +624,47 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, } } - ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); + ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset); tuple->src_v6 = ip6h->saddr; tuple->dst_v6 = ip6h->daddr; tuple->l3proto = AF_INET6; tuple->l4proto = nexthdr; - tuple->iifidx = dev->ifindex; + tuple->iifidx = ctx->in->ifindex; nf_flow_tuple_encap(skb, tuple); return 0; } -unsigned int -nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, - const struct nf_hook_state *state) +static int nf_flow_offload_ipv6_forward(struct nf_flowtable_ctx *ctx, + struct nf_flowtable *flow_table, + struct flow_offload_tuple_rhash *tuplehash, + struct sk_buff *skb) { - struct flow_offload_tuple_rhash *tuplehash; - struct nf_flowtable *flow_table = priv; - struct flow_offload_tuple tuple = {}; enum flow_offload_tuple_dir dir; - const struct in6_addr *nexthop; struct flow_offload *flow; - struct net_device *outdev; unsigned int thoff, mtu; - u32 hdrsize, offset = 0; struct ipv6hdr *ip6h; - struct rt6_info *rt; - int ret; - - if (skb->protocol != htons(ETH_P_IPV6) && - !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset)) - return NF_ACCEPT; - - if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0) - return NF_ACCEPT; - - tuplehash = flow_offload_lookup(flow_table, &tuple); - if (tuplehash == NULL) - return NF_ACCEPT; dir = tuplehash->tuple.dir; flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); - mtu = flow->tuplehash[dir].tuple.mtu + offset; + mtu = flow->tuplehash[dir].tuple.mtu + ctx->offset; if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) - return NF_ACCEPT; + return 0; - ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); - thoff = sizeof(*ip6h) + offset; + ip6h = (struct ipv6hdr *)(skb_network_header(skb) + ctx->offset); + thoff = sizeof(*ip6h) + ctx->offset; if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) - return NF_ACCEPT; + return 0; if (!nf_flow_dst_check(&tuplehash->tuple)) { flow_offload_teardown(flow); - return NF_ACCEPT; + return 0; } - if (skb_try_make_writable(skb, thoff + hdrsize)) - return NF_DROP; + if (skb_try_make_writable(skb, thoff + ctx->hdrsize)) + return -1; flow_offload_refresh(flow_table, flow); @@ -698,6 +679,52 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, if (flow_table->flags & NF_FLOWTABLE_COUNTER) nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); + return 1; +} + +static struct flow_offload_tuple_rhash * +nf_flow_offload_ipv6_lookup(struct nf_flowtable_ctx *ctx, + struct nf_flowtable *flow_table, + struct sk_buff *skb) +{ + struct flow_offload_tuple tuple = {}; + + if (skb->protocol != htons(ETH_P_IPV6) && + !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &ctx->offset)) + return NULL; + + if (nf_flow_tuple_ipv6(ctx, skb, &tuple) < 0) + return NULL; + + return flow_offload_lookup(flow_table, &tuple); +} + +unsigned int +nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *flow_table = priv; + enum flow_offload_tuple_dir dir; + struct nf_flowtable_ctx ctx = { + .in = state->in, + }; + const struct in6_addr *nexthop; + struct flow_offload *flow; + struct net_device *outdev; + struct rt6_info *rt; + int ret; + + tuplehash = nf_flow_offload_ipv6_lookup(&ctx, flow_table, skb); + if (tuplehash == NULL) + return NF_ACCEPT; + + ret = nf_flow_offload_ipv6_forward(&ctx, flow_table, tuplehash, skb); + if (ret < 0) + return NF_DROP; + else if (ret == 0) + return NF_ACCEPT; + if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { rt = (struct rt6_info *)tuplehash->tuple.dst_cache; memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); @@ -706,6 +733,9 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, return nf_flow_xmit_xfrm(skb, state, &rt->dst); } + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); + switch (tuplehash->tuple.xmit_type) { case FLOW_OFFLOAD_XMIT_NEIGH: rt = (struct rt6_info *)tuplehash->tuple.dst_cache; -- cgit v1.2.3 From e3d9387f002612093dbeaa272f7930ce5108033f Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 20 Apr 2023 19:17:13 +0200 Subject: security, lsm: Introduce security_mptcp_add_subflow() MPTCP can create subflows in kernel context, and later indirectly expose them to user-space, via the owning MPTCP socket. As discussed in the reported link, the above causes unexpected failures for server, MPTCP-enabled applications. Let's introduce a new LSM hook to allow the security module to relabel the subflow according to the owning user-space process, via the MPTCP socket owning the subflow. Note that the new hook requires both the MPTCP socket and the new subflow. This could allow future extensions, e.g. explicitly validating the MPTCP <-> subflow linkage. Link: https://lore.kernel.org/mptcp/CAHC9VhTNh-YwiyTds=P1e3rixEDqbRTFj22bpya=+qJqfcaMfg@mail.gmail.com/ Signed-off-by: Paolo Abeni Acked-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Paul Moore --- include/linux/lsm_hook_defs.h | 1 + include/linux/security.h | 6 ++++++ net/mptcp/subflow.c | 6 ++++++ security/security.c | 17 +++++++++++++++++ 4 files changed, 30 insertions(+) (limited to 'net') diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 6bb55e61e8e8..7308a1a7599b 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -343,6 +343,7 @@ LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc, struct sock *sk, struct sock *newsk) LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc, struct sk_buff *skb) +LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk) #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/security.h b/include/linux/security.h index e2734e9e44d5..32828502f09e 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1465,6 +1465,7 @@ void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk); int security_sctp_assoc_established(struct sctp_association *asoc, struct sk_buff *skb); +int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1692,6 +1693,11 @@ static inline int security_sctp_assoc_established(struct sctp_association *asoc, { return 0; } + +static inline int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk) +{ + return 0; +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index ba065b66551a..76952cf74fc0 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1694,6 +1694,10 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING); + err = security_mptcp_add_subflow(sk, sf->sk); + if (err) + goto release_ssk; + /* the newly created socket has to be in the same cgroup as its parent */ mptcp_attach_cgroup(sk, sf->sk); @@ -1706,6 +1710,8 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); sock_inuse_add(net, 1); err = tcp_set_ulp(sf->sk, "mptcp"); + +release_ssk: release_sock(sf->sk); if (err) { diff --git a/security/security.c b/security/security.c index d5ff7ff45b77..0e7be1da0b3d 100644 --- a/security/security.c +++ b/security/security.c @@ -4667,6 +4667,23 @@ int security_sctp_assoc_established(struct sctp_association *asoc, } EXPORT_SYMBOL(security_sctp_assoc_established); +/** + * security_mptcp_add_subflow() - Inherit the LSM label from the MPTCP socket + * @sk: the owning MPTCP socket + * @ssk: the new subflow + * + * Update the labeling for the given MPTCP subflow, to match the one of the + * owning MPTCP socket. This hook has to be called after the socket creation and + * initialization via the security_socket_create() and + * security_socket_post_create() LSM hooks. + * + * Return: Returns 0 on success or a negative error code on failure. + */ +int security_mptcp_add_subflow(struct sock *sk, struct sock *ssk) +{ + return call_int_hook(mptcp_add_subflow, 0, sk, ssk); +} + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND -- cgit v1.2.3 From 711bdd5141d81ab21dbe0a533024d594210d5ba4 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 17 May 2023 12:16:14 -0700 Subject: inet: factor out locked section of inet_accept() in a new helper No functional changes intended. The new helper will be used by the MPTCP protocol in the next patch to avoid duplicating a few LoC. Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- include/net/inet_common.h | 2 ++ net/ipv4/af_inet.c | 32 +++++++++++++++++--------------- 2 files changed, 19 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/include/net/inet_common.h b/include/net/inet_common.h index cec453c18f1d..77f4b0ef5b92 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -31,6 +31,8 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int inet_accept(struct socket *sock, struct socket *newsock, int flags, bool kern); +void __inet_accept(struct socket *sock, struct socket *newsock, + struct sock *newsk); int inet_send_prepare(struct sock *sk); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index c4aab3aacbd8..946650036c7f 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -730,6 +730,20 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, } EXPORT_SYMBOL(inet_stream_connect); +void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk) +{ + sock_rps_record_flow(newsk); + WARN_ON(!((1 << newsk->sk_state) & + (TCPF_ESTABLISHED | TCPF_SYN_RECV | + TCPF_CLOSE_WAIT | TCPF_CLOSE))); + + if (test_bit(SOCK_SUPPORT_ZC, &sock->flags)) + set_bit(SOCK_SUPPORT_ZC, &newsock->flags); + sock_graft(newsk, newsock); + + newsock->state = SS_CONNECTED; +} + /* * Accept a pending connection. The TCP layer now gives BSD semantics. */ @@ -743,24 +757,12 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags, /* IPV6_ADDRFORM can change sk->sk_prot under us. */ sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, flags, &err, kern); if (!sk2) - goto do_err; + return err; lock_sock(sk2); - - sock_rps_record_flow(sk2); - WARN_ON(!((1 << sk2->sk_state) & - (TCPF_ESTABLISHED | TCPF_SYN_RECV | - TCPF_CLOSE_WAIT | TCPF_CLOSE))); - - if (test_bit(SOCK_SUPPORT_ZC, &sock->flags)) - set_bit(SOCK_SUPPORT_ZC, &newsock->flags); - sock_graft(sk2, newsock); - - newsock->state = SS_CONNECTED; - err = 0; + __inet_accept(sock, newsock, sk2); release_sock(sk2); -do_err: - return err; + return 0; } EXPORT_SYMBOL(inet_accept); -- cgit v1.2.3 From e76c8ef5cc5b77debe711717f61a3fbf24904873 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 17 May 2023 12:16:15 -0700 Subject: mptcp: refactor mptcp_stream_accept() Rewrite the mptcp socket accept op, leveraging the new __inet_accept() helper. This way we can avoid acquiring the new socket lock twice and we can avoid a couple of indirect calls. Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 08dc53f56bc2..2d331b2d62b7 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3747,6 +3747,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct socket *ssock; + struct sock *newsk; int err; pr_debug("msk=%p", msk); @@ -3758,17 +3759,20 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, if (!ssock) return -EINVAL; - err = ssock->ops->accept(sock, newsock, flags, kern); - if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { - struct mptcp_sock *msk = mptcp_sk(newsock->sk); + newsk = mptcp_accept(sock->sk, flags, &err, kern); + if (!newsk) + return err; + + lock_sock(newsk); + + __inet_accept(sock, newsock, newsk); + if (!mptcp_is_tcpsk(newsock->sk)) { + struct mptcp_sock *msk = mptcp_sk(newsk); struct mptcp_subflow_context *subflow; - struct sock *newsk = newsock->sk; set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags); msk->in_accept_queue = 0; - lock_sock(newsk); - /* set ssk->sk_socket of accept()ed flows to mptcp socket. * This is needed so NOSPACE flag can be set from tcp stack. */ @@ -3789,11 +3793,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, if (unlikely(list_empty(&msk->conn_list))) inet_sk_state_store(newsk, TCP_CLOSE); } - - release_sock(newsk); } + release_sock(newsk); - return err; + return 0; } static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) -- cgit v1.2.3 From 45b1a1227a7aaa99254551c513406c7aa904e968 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 17 May 2023 12:16:16 -0700 Subject: mptcp: introduces more address related mibs Currently we don't track explicitly a few events related to address management suboption handling; this patch adds new mibs for ADD_ADDR and RM_ADDR options tx and for missed tx events due to internal storage exhaustion. The self-tests must be updated to properly handle different mibs with the same/shared prefix. Additionally removes a couple of warning tracking the loss event. Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/378 Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Mat Martineau Signed-off-by: Jakub Kicinski --- net/mptcp/mib.c | 6 ++++++ net/mptcp/mib.h | 18 ++++++++++++++++++ net/mptcp/options.c | 5 ++++- net/mptcp/pm.c | 6 ++++-- tools/testing/selftests/net/mptcp/mptcp_join.sh | 4 ++-- 5 files changed, 34 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index 0dac2863c6e1..a0990c365a2e 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -34,7 +34,11 @@ static const struct snmp_mib mptcp_snmp_list[] = { SNMP_MIB_ITEM("NoDSSInWindow", MPTCP_MIB_NODSSWINDOW), SNMP_MIB_ITEM("DuplicateData", MPTCP_MIB_DUPDATA), SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR), + SNMP_MIB_ITEM("AddAddrTx", MPTCP_MIB_ADDADDRTX), + SNMP_MIB_ITEM("AddAddrTxDrop", MPTCP_MIB_ADDADDRTXDROP), SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD), + SNMP_MIB_ITEM("EchoAddTx", MPTCP_MIB_ECHOADDTX), + SNMP_MIB_ITEM("EchoAddTxDrop", MPTCP_MIB_ECHOADDTXDROP), SNMP_MIB_ITEM("PortAdd", MPTCP_MIB_PORTADD), SNMP_MIB_ITEM("AddAddrDrop", MPTCP_MIB_ADDADDRDROP), SNMP_MIB_ITEM("MPJoinPortSynRx", MPTCP_MIB_JOINPORTSYNRX), @@ -44,6 +48,8 @@ static const struct snmp_mib mptcp_snmp_list[] = { SNMP_MIB_ITEM("MismatchPortAckRx", MPTCP_MIB_MISMATCHPORTACKRX), SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR), SNMP_MIB_ITEM("RmAddrDrop", MPTCP_MIB_RMADDRDROP), + SNMP_MIB_ITEM("RmAddrTx", MPTCP_MIB_RMADDRTX), + SNMP_MIB_ITEM("RmAddrTxDrop", MPTCP_MIB_RMADDRTXDROP), SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW), SNMP_MIB_ITEM("MPPrioTx", MPTCP_MIB_MPPRIOTX), SNMP_MIB_ITEM("MPPrioRx", MPTCP_MIB_MPPRIORX), diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index 2be3596374f4..cae71d947252 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -27,7 +27,15 @@ enum linux_mptcp_mib_field { MPTCP_MIB_NODSSWINDOW, /* Segments not in MPTCP windows */ MPTCP_MIB_DUPDATA, /* Segments discarded due to duplicate DSS */ MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */ + MPTCP_MIB_ADDADDRTX, /* Sent ADD_ADDR with echo-flag=0 */ + MPTCP_MIB_ADDADDRTXDROP, /* ADD_ADDR with echo-flag=0 not send due to + * resource exhaustion + */ MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */ + MPTCP_MIB_ECHOADDTX, /* Send ADD_ADDR with echo-flag=1 */ + MPTCP_MIB_ECHOADDTXDROP, /* ADD_ADDR with echo-flag=1 not send due + * to resource exhaustion + */ MPTCP_MIB_PORTADD, /* Received ADD_ADDR with a port-number */ MPTCP_MIB_ADDADDRDROP, /* Dropped incoming ADD_ADDR */ MPTCP_MIB_JOINPORTSYNRX, /* Received a SYN MP_JOIN with a different port-number */ @@ -37,6 +45,8 @@ enum linux_mptcp_mib_field { MPTCP_MIB_MISMATCHPORTACKRX, /* Received an ACK MP_JOIN with a mismatched port-number */ MPTCP_MIB_RMADDR, /* Received RM_ADDR */ MPTCP_MIB_RMADDRDROP, /* Dropped incoming RM_ADDR */ + MPTCP_MIB_RMADDRTX, /* Sent RM_ADDR */ + MPTCP_MIB_RMADDRTXDROP, /* RM_ADDR not sent due to resource exhaustion */ MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */ MPTCP_MIB_MPPRIOTX, /* Transmit a MP_PRIO */ MPTCP_MIB_MPPRIORX, /* Received a MP_PRIO */ @@ -63,6 +73,14 @@ struct mptcp_mib { unsigned long mibs[LINUX_MIB_MPTCP_MAX]; }; +static inline void MPTCP_ADD_STATS(struct net *net, + enum linux_mptcp_mib_field field, + int val) +{ + if (likely(net->mib.mptcp_statistics)) + SNMP_ADD_STATS(net->mib.mptcp_statistics, field, val); +} + static inline void MPTCP_INC_STATS(struct net *net, enum linux_mptcp_mib_field field) { diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 19a01b6566f1..8a8083207be4 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -687,9 +687,12 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * } opts->suboptions |= OPTION_MPTCP_ADD_ADDR; if (!echo) { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX); opts->ahmac = add_addr_generate_hmac(msk->local_key, msk->remote_key, &opts->addr); + } else { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADDTX); } pr_debug("addr_id=%d, ahmac=%llu, echo=%d, port=%d", opts->addr.id, opts->ahmac, echo, ntohs(opts->addr.port)); @@ -723,7 +726,7 @@ static bool mptcp_established_options_rm_addr(struct sock *sk, for (i = 0; i < opts->rm_list.nr; i++) pr_debug("rm_list_ids[%d]=%d", i, opts->rm_list.ids[i]); - + MPTCP_ADD_STATS(sock_net(sk), MPTCP_MIB_RMADDRTX, opts->rm_list.nr); return true; } diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 78c924506e83..7d03b5fd8200 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -26,7 +26,8 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, if (add_addr & (echo ? BIT(MPTCP_ADD_ADDR_ECHO) : BIT(MPTCP_ADD_ADDR_SIGNAL))) { - pr_warn("addr_signal error, add_addr=%d, echo=%d", add_addr, echo); + MPTCP_INC_STATS(sock_net((struct sock *)msk), + echo ? MPTCP_MIB_ECHOADDTXDROP : MPTCP_MIB_ADDADDRTXDROP); return -EINVAL; } @@ -48,7 +49,8 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_ pr_debug("msk=%p, rm_list_nr=%d", msk, rm_list->nr); if (rm_addr) { - pr_warn("addr_signal error, rm_addr=%d", rm_addr); + MPTCP_ADD_STATS(sock_net((struct sock *)msk), + MPTCP_MIB_RMADDRTXDROP, rm_list->nr); return -EINVAL; } diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 26310c17b4c6..0886ed2c59ea 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -1492,7 +1492,7 @@ chk_add_nr() fi echo -n " - echo " - count=$(ip netns exec $ns1 nstat -as | grep MPTcpExtEchoAdd | awk '{print $2}') + count=$(ip netns exec $ns1 nstat -as MPTcpExtEchoAdd | grep MPTcpExtEchoAdd | awk '{print $2}') [ -z "$count" ] && count=0 if [ "$count" != "$echo_nr" ]; then echo "[fail] got $count ADD_ADDR echo[s] expected $echo_nr" @@ -1614,7 +1614,7 @@ chk_rm_nr() fi printf "%-${nr_blank}s %s" " " "rm " - count=$(ip netns exec $addr_ns nstat -as | grep MPTcpExtRmAddr | awk '{print $2}') + count=$(ip netns exec $addr_ns nstat -as MPTcpExtRmAddr | grep MPTcpExtRmAddr | awk '{print $2}') [ -z "$count" ] && count=0 if [ "$count" != "$rm_addr_nr" ]; then echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr" -- cgit v1.2.3 From 9378096e8a656fb5c4099b26b1370c56f056eab9 Mon Sep 17 00:00:00 2001 From: Aditi Ghag Date: Fri, 19 May 2023 22:51:49 +0000 Subject: bpf: tcp: Avoid taking fast sock lock in iterator This is a preparatory commit to replace `lock_sock_fast` with `lock_sock`,and facilitate BPF programs executed from the TCP sockets iterator to be able to destroy TCP sockets using the bpf_sock_destroy kfunc (implemented in follow-up commits). Previously, BPF TCP iterator was acquiring the sock lock with BH disabled. This led to scenarios where the sockets hash table bucket lock can be acquired with BH enabled in some path versus disabled in other. In such situation, kernel issued a warning since it thinks that in the BH enabled path the same bucket lock *might* be acquired again in the softirq context (BH disabled), which will lead to a potential dead lock. Since bpf_sock_destroy also happens in a process context, the potential deadlock warning is likely a false alarm. Here is a snippet of annotated stack trace that motivated this change: ``` Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&h->lhash2[i].lock); local_bh_disable(); lock(&h->lhash2[i].lock); kernel imagined possible scenario: local_bh_disable(); /* Possible softirq */ lock(&h->lhash2[i].lock); *** Potential Deadlock *** process context: lock_acquire+0xcd/0x330 _raw_spin_lock+0x33/0x40 ------> Acquire (bucket) lhash2.lock with BH enabled __inet_hash+0x4b/0x210 inet_csk_listen_start+0xe6/0x100 inet_listen+0x95/0x1d0 __sys_listen+0x69/0xb0 __x64_sys_listen+0x14/0x20 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc bpf_sock_destroy run from iterator: lock_acquire+0xcd/0x330 _raw_spin_lock+0x33/0x40 ------> Acquire (bucket) lhash2.lock with BH disabled inet_unhash+0x9a/0x110 tcp_set_state+0x6a/0x210 tcp_abort+0x10d/0x200 bpf_prog_6793c5ca50c43c0d_iter_tcp6_server+0xa4/0xa9 bpf_iter_run_prog+0x1ff/0x340 ------> lock_sock_fast that acquires sock lock with BH disabled bpf_iter_tcp_seq_show+0xca/0x190 bpf_seq_read+0x177/0x450 ``` Also, Yonghong reported a deadlock for non-listening TCP sockets that this change resolves. Previously, `lock_sock_fast` held the sock spin lock with BH which was again being acquired in `tcp_abort`: ``` watchdog: BUG: soft lockup - CPU#0 stuck for 86s! [test_progs:2331] RIP: 0010:queued_spin_lock_slowpath+0xd8/0x500 Call Trace: _raw_spin_lock+0x84/0x90 tcp_abort+0x13c/0x1f0 bpf_prog_88539c5453a9dd47_iter_tcp6_client+0x82/0x89 bpf_iter_run_prog+0x1aa/0x2c0 ? preempt_count_sub+0x1c/0xd0 ? from_kuid_munged+0x1c8/0x210 bpf_iter_tcp_seq_show+0x14e/0x1b0 bpf_seq_read+0x36c/0x6a0 bpf_iter_tcp_seq_show lock_sock_fast __lock_sock_fast spin_lock_bh(&sk->sk_lock.slock); /* * Fast path return with bottom halves disabled and * sock::sk_lock.slock held.* */ ... tcp_abort local_bh_disable(); spin_lock(&((sk)->sk_lock.slock)); // from bh_lock_sock(sk) ``` With the switch to `lock_sock`, it calls `spin_unlock_bh` before returning: ``` lock_sock lock_sock_nested spin_lock_bh(&sk->sk_lock.slock); : spin_unlock_bh(&sk->sk_lock.slock); ``` Acked-by: Yonghong Song Acked-by: Stanislav Fomichev Signed-off-by: Aditi Ghag Link: https://lore.kernel.org/r/20230519225157.760788-2-aditi.ghag@isovalent.com Signed-off-by: Martin KaFai Lau --- net/ipv4/tcp_ipv4.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index db24ed8f8509..6e945199709b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2962,7 +2962,6 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) struct bpf_iter_meta meta; struct bpf_prog *prog; struct sock *sk = v; - bool slow; uid_t uid; int ret; @@ -2970,7 +2969,7 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) return 0; if (sk_fullsock(sk)) - slow = lock_sock_fast(sk); + lock_sock(sk); if (unlikely(sk_unhashed(sk))) { ret = SEQ_SKIP; @@ -2994,7 +2993,7 @@ static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) unlock: if (sk_fullsock(sk)) - unlock_sock_fast(sk, slow); + release_sock(sk); return ret; } -- cgit v1.2.3 From f44b1c515833c59701c86f92d47b4edd478fb0f3 Mon Sep 17 00:00:00 2001 From: Aditi Ghag Date: Fri, 19 May 2023 22:51:50 +0000 Subject: udp: seq_file: Helper function to match socket attributes This is a preparatory commit to refactor code that matches socket attributes in iterators to a helper function, and use it in the proc fs iterator. Signed-off-by: Aditi Ghag Link: https://lore.kernel.org/r/20230519225157.760788-3-aditi.ghag@isovalent.com Signed-off-by: Martin KaFai Lau --- net/ipv4/udp.c | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index aa32afd871ee..9d56a96672c6 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2988,6 +2988,16 @@ EXPORT_SYMBOL(udp_prot); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS +static unsigned short seq_file_family(const struct seq_file *seq); +static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) +{ + unsigned short family = seq_file_family(seq); + + /* AF_UNSPEC is used as a match all */ + return ((family == AF_UNSPEC || family == sk->sk_family) && + net_eq(sock_net(sk), seq_file_net(seq))); +} + static struct udp_table *udp_get_table_afinfo(struct udp_seq_afinfo *afinfo, struct net *net) { @@ -3018,10 +3028,7 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) spin_lock_bh(&hslot->lock); sk_for_each(sk, &hslot->head) { - if (!net_eq(sock_net(sk), net)) - continue; - if (afinfo->family == AF_UNSPEC || - sk->sk_family == afinfo->family) + if (seq_sk_match(seq, sk)) goto found; } spin_unlock_bh(&hslot->lock); @@ -3045,9 +3052,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) do { sk = sk_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || - (afinfo->family != AF_UNSPEC && - sk->sk_family != afinfo->family))); + } while (sk && !seq_sk_match(seq, sk)); if (!sk) { udptable = udp_get_table_afinfo(afinfo, net); @@ -3210,6 +3215,21 @@ static const struct seq_operations bpf_iter_udp_seq_ops = { }; #endif +static unsigned short seq_file_family(const struct seq_file *seq) +{ + const struct udp_seq_afinfo *afinfo; + +#ifdef CONFIG_BPF_SYSCALL + /* BPF iterator: bpf programs to filter sockets. */ + if (seq->op == &bpf_iter_udp_seq_ops) + return AF_UNSPEC; +#endif + + /* Proc fs iterator */ + afinfo = pde_data(file_inode(seq->file)); + return afinfo->family; +} + const struct seq_operations udp_seq_ops = { .start = udp_seq_start, .next = udp_seq_next, -- cgit v1.2.3 From 7625d2e9741c1f6e08ee79c28a1e27bbb5071805 Mon Sep 17 00:00:00 2001 From: Aditi Ghag Date: Fri, 19 May 2023 22:51:51 +0000 Subject: bpf: udp: Encapsulate logic to get udp table This is a preparatory commit that encapsulates the logic to get udp table in iterator inside udp_get_table_afinfo, and renames the function to `udp_get_table_seq` accordingly. Suggested-by: Martin KaFai Lau Signed-off-by: Aditi Ghag Link: https://lore.kernel.org/r/20230519225157.760788-4-aditi.ghag@isovalent.com Signed-off-by: Martin KaFai Lau --- net/ipv4/udp.c | 35 ++++++++++++----------------------- 1 file changed, 12 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9d56a96672c6..9dc98ac9a292 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2998,9 +2998,16 @@ static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) net_eq(sock_net(sk), seq_file_net(seq))); } -static struct udp_table *udp_get_table_afinfo(struct udp_seq_afinfo *afinfo, - struct net *net) +static struct udp_table *udp_get_table_seq(struct seq_file *seq, + struct net *net) { + const struct udp_iter_state *state = seq->private; + const struct udp_seq_afinfo *afinfo; + + if (state->bpf_seq_afinfo) + return net->ipv4.udp_table; + + afinfo = pde_data(file_inode(seq->file)); return afinfo->udp_table ? : net->ipv4.udp_table; } @@ -3008,16 +3015,10 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); - struct udp_seq_afinfo *afinfo; struct udp_table *udptable; struct sock *sk; - if (state->bpf_seq_afinfo) - afinfo = state->bpf_seq_afinfo; - else - afinfo = pde_data(file_inode(seq->file)); - - udptable = udp_get_table_afinfo(afinfo, net); + udptable = udp_get_table_seq(seq, net); for (state->bucket = start; state->bucket <= udptable->mask; ++state->bucket) { @@ -3042,20 +3043,14 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); - struct udp_seq_afinfo *afinfo; struct udp_table *udptable; - if (state->bpf_seq_afinfo) - afinfo = state->bpf_seq_afinfo; - else - afinfo = pde_data(file_inode(seq->file)); - do { sk = sk_next(sk); } while (sk && !seq_sk_match(seq, sk)); if (!sk) { - udptable = udp_get_table_afinfo(afinfo, net); + udptable = udp_get_table_seq(seq, net); if (state->bucket <= udptable->mask) spin_unlock_bh(&udptable->hash[state->bucket].lock); @@ -3101,15 +3096,9 @@ EXPORT_SYMBOL(udp_seq_next); void udp_seq_stop(struct seq_file *seq, void *v) { struct udp_iter_state *state = seq->private; - struct udp_seq_afinfo *afinfo; struct udp_table *udptable; - if (state->bpf_seq_afinfo) - afinfo = state->bpf_seq_afinfo; - else - afinfo = pde_data(file_inode(seq->file)); - - udptable = udp_get_table_afinfo(afinfo, seq_file_net(seq)); + udptable = udp_get_table_seq(seq, seq_file_net(seq)); if (state->bucket <= udptable->mask) spin_unlock_bh(&udptable->hash[state->bucket].lock); -- cgit v1.2.3 From e4fe1bf13e09019578b9b93b942fff3d76ed5793 Mon Sep 17 00:00:00 2001 From: Aditi Ghag Date: Fri, 19 May 2023 22:51:52 +0000 Subject: udp: seq_file: Remove bpf_seq_afinfo from udp_iter_state This is a preparatory commit to remove the field. The field was previously shared between proc fs and BPF UDP socket iterators. As the follow-up commits will decouple the implementation for the iterators, remove the field. As for BPF socket iterator, filtering of sockets is exepected to be done in BPF programs. Suggested-by: Martin KaFai Lau Signed-off-by: Aditi Ghag Link: https://lore.kernel.org/r/20230519225157.760788-5-aditi.ghag@isovalent.com Signed-off-by: Martin KaFai Lau --- include/net/udp.h | 1 - net/ipv4/udp.c | 27 +++++++-------------------- 2 files changed, 7 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/include/net/udp.h b/include/net/udp.h index de4b528522bb..5cad44318d71 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -437,7 +437,6 @@ struct udp_seq_afinfo { struct udp_iter_state { struct seq_net_private p; int bucket; - struct udp_seq_afinfo *bpf_seq_afinfo; }; void *udp_seq_start(struct seq_file *seq, loff_t *pos); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9dc98ac9a292..0053386e938e 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2998,14 +2998,18 @@ static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) net_eq(sock_net(sk), seq_file_net(seq))); } +#ifdef CONFIG_BPF_SYSCALL +static const struct seq_operations bpf_iter_udp_seq_ops; +#endif static struct udp_table *udp_get_table_seq(struct seq_file *seq, struct net *net) { - const struct udp_iter_state *state = seq->private; const struct udp_seq_afinfo *afinfo; - if (state->bpf_seq_afinfo) +#ifdef CONFIG_BPF_SYSCALL + if (seq->op == &bpf_iter_udp_seq_ops) return net->ipv4.udp_table; +#endif afinfo = pde_data(file_inode(seq->file)); return afinfo->udp_table ? : net->ipv4.udp_table; @@ -3429,28 +3433,11 @@ DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) { - struct udp_iter_state *st = priv_data; - struct udp_seq_afinfo *afinfo; - int ret; - - afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); - if (!afinfo) - return -ENOMEM; - - afinfo->family = AF_UNSPEC; - afinfo->udp_table = NULL; - st->bpf_seq_afinfo = afinfo; - ret = bpf_iter_init_seq_net(priv_data, aux); - if (ret) - kfree(afinfo); - return ret; + return bpf_iter_init_seq_net(priv_data, aux); } static void bpf_iter_fini_udp(void *priv_data) { - struct udp_iter_state *st = priv_data; - - kfree(st->bpf_seq_afinfo); bpf_iter_fini_seq_net(priv_data); } -- cgit v1.2.3 From c96dac8d369ffd713a45f4e5c30f23c47a1671f0 Mon Sep 17 00:00:00 2001 From: Aditi Ghag Date: Fri, 19 May 2023 22:51:53 +0000 Subject: bpf: udp: Implement batching for sockets iterator Batch UDP sockets from BPF iterator that allows for overlapping locking semantics in BPF/kernel helpers executed in BPF programs. This facilitates BPF socket destroy kfunc (introduced by follow-up patches) to execute from BPF iterator programs. Previously, BPF iterators acquired the sock lock and sockets hash table bucket lock while executing BPF programs. This prevented BPF helpers that again acquire these locks to be executed from BPF iterators. With the batching approach, we acquire a bucket lock, batch all the bucket sockets, and then release the bucket lock. This enables BPF or kernel helpers to skip sock locking when invoked in the supported BPF contexts. The batching logic is similar to the logic implemented in TCP iterator: https://lore.kernel.org/bpf/20210701200613.1036157-1-kafai@fb.com/. Suggested-by: Martin KaFai Lau Signed-off-by: Aditi Ghag Link: https://lore.kernel.org/r/20230519225157.760788-6-aditi.ghag@isovalent.com Signed-off-by: Martin KaFai Lau --- net/ipv4/udp.c | 205 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 199 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0053386e938e..0c999aa5ab30 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3155,6 +3155,143 @@ struct bpf_iter__udp { int bucket __aligned(8); }; +struct bpf_udp_iter_state { + struct udp_iter_state state; + unsigned int cur_sk; + unsigned int end_sk; + unsigned int max_sk; + int offset; + struct sock **batch; + bool st_bucket_done; +}; + +static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, + unsigned int new_batch_sz); +static struct sock *bpf_iter_udp_batch(struct seq_file *seq) +{ + struct bpf_udp_iter_state *iter = seq->private; + struct udp_iter_state *state = &iter->state; + struct net *net = seq_file_net(seq); + struct udp_table *udptable; + unsigned int batch_sks = 0; + bool resized = false; + struct sock *sk; + + /* The current batch is done, so advance the bucket. */ + if (iter->st_bucket_done) { + state->bucket++; + iter->offset = 0; + } + + udptable = udp_get_table_seq(seq, net); + +again: + /* New batch for the next bucket. + * Iterate over the hash table to find a bucket with sockets matching + * the iterator attributes, and return the first matching socket from + * the bucket. The remaining matched sockets from the bucket are batched + * before releasing the bucket lock. This allows BPF programs that are + * called in seq_show to acquire the bucket lock if needed. + */ + iter->cur_sk = 0; + iter->end_sk = 0; + iter->st_bucket_done = false; + batch_sks = 0; + + for (; state->bucket <= udptable->mask; state->bucket++) { + struct udp_hslot *hslot2 = &udptable->hash2[state->bucket]; + + if (hlist_empty(&hslot2->head)) { + iter->offset = 0; + continue; + } + + spin_lock_bh(&hslot2->lock); + udp_portaddr_for_each_entry(sk, &hslot2->head) { + if (seq_sk_match(seq, sk)) { + /* Resume from the last iterated socket at the + * offset in the bucket before iterator was stopped. + */ + if (iter->offset) { + --iter->offset; + continue; + } + if (iter->end_sk < iter->max_sk) { + sock_hold(sk); + iter->batch[iter->end_sk++] = sk; + } + batch_sks++; + } + } + spin_unlock_bh(&hslot2->lock); + + if (iter->end_sk) + break; + + /* Reset the current bucket's offset before moving to the next bucket. */ + iter->offset = 0; + } + + /* All done: no batch made. */ + if (!iter->end_sk) + return NULL; + + if (iter->end_sk == batch_sks) { + /* Batching is done for the current bucket; return the first + * socket to be iterated from the batch. + */ + iter->st_bucket_done = true; + goto done; + } + if (!resized && !bpf_iter_udp_realloc_batch(iter, batch_sks * 3 / 2)) { + resized = true; + /* After allocating a larger batch, retry one more time to grab + * the whole bucket. + */ + state->bucket--; + goto again; + } +done: + return iter->batch[0]; +} + +static void *bpf_iter_udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_udp_iter_state *iter = seq->private; + struct sock *sk; + + /* Whenever seq_next() is called, the iter->cur_sk is + * done with seq_show(), so unref the iter->cur_sk. + */ + if (iter->cur_sk < iter->end_sk) { + sock_put(iter->batch[iter->cur_sk++]); + ++iter->offset; + } + + /* After updating iter->cur_sk, check if there are more sockets + * available in the current bucket batch. + */ + if (iter->cur_sk < iter->end_sk) + sk = iter->batch[iter->cur_sk]; + else + /* Prepare a new batch. */ + sk = bpf_iter_udp_batch(seq); + + ++*pos; + return sk; +} + +static void *bpf_iter_udp_seq_start(struct seq_file *seq, loff_t *pos) +{ + /* bpf iter does not support lseek, so it always + * continue from where it was stop()-ped. + */ + if (*pos) + return bpf_iter_udp_batch(seq); + + return SEQ_START_TOKEN; +} + static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, struct udp_sock *udp_sk, uid_t uid, int bucket) { @@ -3175,18 +3312,37 @@ static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) struct bpf_prog *prog; struct sock *sk = v; uid_t uid; + int ret; if (v == SEQ_START_TOKEN) return 0; + lock_sock(sk); + + if (unlikely(sk_unhashed(sk))) { + ret = SEQ_SKIP; + goto unlock; + } + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); meta.seq = seq; prog = bpf_iter_get_info(&meta, false); - return udp_prog_seq_show(prog, &meta, v, uid, state->bucket); + ret = udp_prog_seq_show(prog, &meta, v, uid, state->bucket); + +unlock: + release_sock(sk); + return ret; +} + +static void bpf_iter_udp_put_batch(struct bpf_udp_iter_state *iter) +{ + while (iter->cur_sk < iter->end_sk) + sock_put(iter->batch[iter->cur_sk++]); } static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) { + struct bpf_udp_iter_state *iter = seq->private; struct bpf_iter_meta meta; struct bpf_prog *prog; @@ -3197,12 +3353,15 @@ static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) (void)udp_prog_seq_show(prog, &meta, v, 0, 0); } - udp_seq_stop(seq, v); + if (iter->cur_sk < iter->end_sk) { + bpf_iter_udp_put_batch(iter); + iter->st_bucket_done = false; + } } static const struct seq_operations bpf_iter_udp_seq_ops = { - .start = udp_seq_start, - .next = udp_seq_next, + .start = bpf_iter_udp_seq_start, + .next = bpf_iter_udp_seq_next, .stop = bpf_iter_udp_seq_stop, .show = bpf_iter_udp_seq_show, }; @@ -3431,21 +3590,55 @@ static struct pernet_operations __net_initdata udp_sysctl_ops = { DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, struct udp_sock *udp_sk, uid_t uid, int bucket) +static int bpf_iter_udp_realloc_batch(struct bpf_udp_iter_state *iter, + unsigned int new_batch_sz) +{ + struct sock **new_batch; + + new_batch = kvmalloc_array(new_batch_sz, sizeof(*new_batch), + GFP_USER | __GFP_NOWARN); + if (!new_batch) + return -ENOMEM; + + bpf_iter_udp_put_batch(iter); + kvfree(iter->batch); + iter->batch = new_batch; + iter->max_sk = new_batch_sz; + + return 0; +} + +#define INIT_BATCH_SZ 16 + static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) { - return bpf_iter_init_seq_net(priv_data, aux); + struct bpf_udp_iter_state *iter = priv_data; + int ret; + + ret = bpf_iter_init_seq_net(priv_data, aux); + if (ret) + return ret; + + ret = bpf_iter_udp_realloc_batch(iter, INIT_BATCH_SZ); + if (ret) + bpf_iter_fini_seq_net(priv_data); + + return ret; } static void bpf_iter_fini_udp(void *priv_data) { + struct bpf_udp_iter_state *iter = priv_data; + bpf_iter_fini_seq_net(priv_data); + kvfree(iter->batch); } static const struct bpf_iter_seq_info udp_seq_info = { .seq_ops = &bpf_iter_udp_seq_ops, .init_seq_private = bpf_iter_init_udp, .fini_seq_private = bpf_iter_fini_udp, - .seq_priv_size = sizeof(struct udp_iter_state), + .seq_priv_size = sizeof(struct bpf_udp_iter_state), }; static struct bpf_iter_reg udp_reg_info = { -- cgit v1.2.3 From 4ddbcb886268af8d12a23e6640b39d1d9c652b1b Mon Sep 17 00:00:00 2001 From: Aditi Ghag Date: Fri, 19 May 2023 22:51:55 +0000 Subject: bpf: Add bpf_sock_destroy kfunc The socket destroy kfunc is used to forcefully terminate sockets from certain BPF contexts. We plan to use the capability in Cilium load-balancing to terminate client sockets that continue to connect to deleted backends. The other use case is on-the-fly policy enforcement where existing socket connections prevented by policies need to be forcefully terminated. The kfunc also allows terminating sockets that may or may not be actively sending traffic. The kfunc can currently be called only from BPF TCP and UDP iterators where users can filter, and terminate selected sockets. More specifically, it can only be called from BPF contexts that ensure socket locking in order to allow synchronous execution of protocol specific `diag_destroy` handlers. The previous commit that batches UDP sockets during iteration facilitated a synchronous invocation of the UDP destroy callback from BPF context by skipping socket locks in `udp_abort`. TCP iterator already supported batching of sockets being iterated. To that end, `tracing_iter_filter` callback filter is added so that verifier can restrict the kfunc to programs with `BPF_TRACE_ITER` attach type, and reject other programs. The kfunc takes `sock_common` type argument, even though it expects, and casts them to a `sock` pointer. This enables the verifier to allow the sock_destroy kfunc to be called for TCP with `sock_common` and UDP with `sock` structs. Furthermore, as `sock_common` only has a subset of certain fields of `sock`, casting pointer to the latter type might not always be safe for certain sockets like request sockets, but these have a special handling in the diag_destroy handlers. Additionally, the kfunc is defined with `KF_TRUSTED_ARGS` flag to avoid the cases where a `PTR_TO_BTF_ID` sk is obtained by following another pointer. eg. getting a sk pointer (may be even NULL) by following another sk pointer. The pointer socket argument passed in TCP and UDP iterators is tagged as `PTR_TRUSTED` in {tcp,udp}_reg_info. The TRUSTED arg changes are contributed by Martin KaFai Lau . Signed-off-by: Aditi Ghag Link: https://lore.kernel.org/r/20230519225157.760788-8-aditi.ghag@isovalent.com Signed-off-by: Martin KaFai Lau --- net/core/filter.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/tcp.c | 9 +++++--- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/udp.c | 8 ++++--- 4 files changed, 75 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 451b0ec7f242..968139f4a1ac 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11723,3 +11723,66 @@ static int __init bpf_kfunc_init(void) return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); } late_initcall(bpf_kfunc_init); + +/* Disables missing prototype warnings */ +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +/* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code. + * + * The function expects a non-NULL pointer to a socket, and invokes the + * protocol specific socket destroy handlers. + * + * The helper can only be called from BPF contexts that have acquired the socket + * locks. + * + * Parameters: + * @sock: Pointer to socket to be destroyed + * + * Return: + * On error, may return EPROTONOSUPPORT, EINVAL. + * EPROTONOSUPPORT if protocol specific destroy handler is not supported. + * 0 otherwise + */ +__bpf_kfunc int bpf_sock_destroy(struct sock_common *sock) +{ + struct sock *sk = (struct sock *)sock; + + /* The locking semantics that allow for synchronous execution of the + * destroy handlers are only supported for TCP and UDP. + * Supporting protocols will need to acquire sock lock in the BPF context + * prior to invoking this kfunc. + */ + if (!sk->sk_prot->diag_destroy || (sk->sk_protocol != IPPROTO_TCP && + sk->sk_protocol != IPPROTO_UDP)) + return -EOPNOTSUPP; + + return sk->sk_prot->diag_destroy(sk, ECONNABORTED); +} + +__diag_pop() + +BTF_SET8_START(bpf_sk_iter_kfunc_ids) +BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS) +BTF_SET8_END(bpf_sk_iter_kfunc_ids) + +static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id) +{ + if (btf_id_set8_contains(&bpf_sk_iter_kfunc_ids, kfunc_id) && + prog->expected_attach_type != BPF_TRACE_ITER) + return -EACCES; + return 0; +} + +static const struct btf_kfunc_id_set bpf_sk_iter_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_sk_iter_kfunc_ids, + .filter = tracing_iter_filter, +}; + +static int init_subsystem(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_sk_iter_kfunc_set); +} +late_initcall(init_subsystem); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4d6392c16b7a..87a04c3d9425 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4682,8 +4682,10 @@ int tcp_abort(struct sock *sk, int err) return 0; } - /* Don't race with userspace socket closes such as tcp_close. */ - lock_sock(sk); + /* BPF context ensures sock locking. */ + if (!has_current_bpf_ctx()) + /* Don't race with userspace socket closes such as tcp_close. */ + lock_sock(sk); if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); @@ -4707,7 +4709,8 @@ int tcp_abort(struct sock *sk, int err) bh_unlock_sock(sk); local_bh_enable(); tcp_write_queue_purge(sk); - release_sock(sk); + if (!has_current_bpf_ctx()) + release_sock(sk); return 0; } EXPORT_SYMBOL_GPL(tcp_abort); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6e945199709b..6c2a5f1c63c4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3355,7 +3355,7 @@ static struct bpf_iter_reg tcp_reg_info = { .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__tcp, sk_common), - PTR_TO_BTF_ID_OR_NULL }, + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, }, .get_func_proto = bpf_iter_tcp_get_func_proto, .seq_info = &tcp_seq_info, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0c999aa5ab30..6893fb867529 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2930,7 +2930,8 @@ EXPORT_SYMBOL(udp_poll); int udp_abort(struct sock *sk, int err) { - lock_sock(sk); + if (!has_current_bpf_ctx()) + lock_sock(sk); /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing * with close() @@ -2943,7 +2944,8 @@ int udp_abort(struct sock *sk, int err) __udp_disconnect(sk, 0); out: - release_sock(sk); + if (!has_current_bpf_ctx()) + release_sock(sk); return 0; } @@ -3646,7 +3648,7 @@ static struct bpf_iter_reg udp_reg_info = { .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__udp, udp_sk), - PTR_TO_BTF_ID_OR_NULL }, + PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED }, }, .seq_info = &udp_seq_info, }; -- cgit v1.2.3 From dbb99d78522ae3d3f0d69fe7a8a0544a829f1d8e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 19 May 2023 11:32:38 +0200 Subject: net: ipconfig: move ic_nameservers_fallback into #ifdef block The new variable is only used when IPCONFIG_BOOTP is defined and otherwise causes a warning: net/ipv4/ipconfig.c:177:12: error: 'ic_nameservers_fallback' defined but not used [-Werror=unused-variable] Move it next to the user. Fixes: 81ac2722fa19 ("net: ipconfig: Allow DNS to be overwritten by DHCPACK") Signed-off-by: Arnd Bergmann Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/ipv4/ipconfig.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 202fa1943ccd..c56b6fe6f0d7 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -173,9 +173,6 @@ static int ic_proto_have_if __initdata; /* MTU for boot device */ static int ic_dev_mtu __initdata; -/* DHCPACK can overwrite DNS if fallback was set upon first BOOTP reply */ -static int ic_nameservers_fallback __initdata; - #ifdef IPCONFIG_DYNAMIC static DEFINE_SPINLOCK(ic_recv_lock); static volatile int ic_got_reply __initdata; /* Proto(s) that replied */ @@ -668,6 +665,9 @@ static struct packet_type bootp_packet_type __initdata = { .func = ic_bootp_recv, }; +/* DHCPACK can overwrite DNS if fallback was set upon first BOOTP reply */ +static int ic_nameservers_fallback __initdata; + /* * Initialize DHCP/BOOTP extension fields in the request. */ -- cgit v1.2.3 From fe79bd65c819cc520aa66de65caae8e4cea29c5a Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 19 May 2023 14:30:36 +0100 Subject: net/tcp: refactor tcp_inet6_sk() Don't keep hand coded offset caluclations and replace it with container_of(). It should be type safer and a bit less confusing. It also makes it with a macro instead of inline function to preserve constness, which was previously casted out like in case of tcp_v6_send_synack(). Signed-off-by: Pavel Begunkov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/ipv6/tcp_ipv6.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 7132eb213a7a..d657713d1c71 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -93,12 +93,8 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, * This avoids a dereference and allow compiler optimizations. * It is a specialized version of inet6_sk_generic(). */ -static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk) -{ - unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo); - - return (struct ipv6_pinfo *)(((u8 *)sk) + offset); -} +#define tcp_inet6_sk(sk) (&container_of_const(tcp_sk(sk), \ + struct tcp6_sock, tcp)->inet6) static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { @@ -533,7 +529,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct sk_buff *syn_skb) { struct inet_request_sock *ireq = inet_rsk(req); - struct ipv6_pinfo *np = tcp_inet6_sk(sk); + const struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct ipv6_txoptions *opt; struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; -- cgit v1.2.3 From b841b901c452d92610f739a36e54978453528876 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:10 +0100 Subject: net: Declare MSG_SPLICE_PAGES internal sendmsg() flag Declare MSG_SPLICE_PAGES, an internal sendmsg() flag, that hints to a network protocol that it should splice pages from the source iterator rather than copying the data if it can. This flag is added to a list that is cleared by sendmsg syscalls on entry. This is intended as a replacement for the ->sendpage() op, allowing a way to splice in several multipage folios in one go. Signed-off-by: David Howells Reviewed-by: Willem de Bruijn cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/linux/socket.h | 3 +++ io_uring/net.c | 2 ++ net/socket.c | 2 ++ 3 files changed, 7 insertions(+) (limited to 'net') diff --git a/include/linux/socket.h b/include/linux/socket.h index 13c3a237b9c9..bd1cc3238851 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -327,6 +327,7 @@ struct ucred { */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ +#define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ #define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file descriptor received through @@ -337,6 +338,8 @@ struct ucred { #define MSG_CMSG_COMPAT 0 /* We never have 32 bit fixups */ #endif +/* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */ +#define MSG_INTERNAL_SENDMSG_FLAGS (MSG_SPLICE_PAGES) /* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */ #define SOL_IP 0 diff --git a/io_uring/net.c b/io_uring/net.c index 89e839013837..f7cbb3c7a575 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -389,6 +389,7 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags) if (flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); + flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; msg.msg_flags = flags; ret = sock_sendmsg(sock, &msg); if (ret < min_ret) { @@ -1136,6 +1137,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags) msg_flags |= MSG_DONTWAIT; if (msg_flags & MSG_WAITALL) min_ret = iov_iter_count(&msg.msg_iter); + msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; msg.msg_flags = msg_flags; msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; diff --git a/net/socket.c b/net/socket.c index b7e01d0fe082..3df96e9ba4e2 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2138,6 +2138,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } + flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; @@ -2483,6 +2484,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys, msg_sys->msg_control = ctl_buf; msg_sys->msg_control_is_user = false; } + flags &= ~MSG_INTERNAL_SENDMSG_FLAGS; msg_sys->msg_flags = flags; if (sock->file->f_flags & O_NONBLOCK) -- cgit v1.2.3 From 96449f90240713bd9bd653d6b15266a1044cfa7b Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:11 +0100 Subject: net: Pass max frags into skb_append_pagefrags() Pass the maximum number of fragments into skb_append_pagefrags() rather than using MAX_SKB_FRAGS so that it can be used from code that wants to specify sysctl_max_skb_frags. Signed-off-by: David Howells cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 2 +- net/core/skbuff.c | 4 ++-- net/ipv4/ip_output.c | 3 ++- net/unix/af_unix.c | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8cff3d817131..15011408c47c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1383,7 +1383,7 @@ static inline int skb_pad(struct sk_buff *skb, int pad) #define dev_kfree_skb(a) consume_skb(a) int skb_append_pagefrags(struct sk_buff *skb, struct page *page, - int offset, size_t size); + int offset, size_t size, size_t max_frags); struct skb_seq_state { __u32 lower_offset; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6724a84ebb09..7f53dcb26ad3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4188,13 +4188,13 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, EXPORT_SYMBOL(skb_find_text); int skb_append_pagefrags(struct sk_buff *skb, struct page *page, - int offset, size_t size) + int offset, size_t size, size_t max_frags) { int i = skb_shinfo(skb)->nr_frags; if (skb_can_coalesce(skb, i, page, offset)) { skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); - } else if (i < MAX_SKB_FRAGS) { + } else if (i < max_frags) { skb_zcopy_downgrade_managed(skb); get_page(page); skb_fill_page_desc_noacc(skb, i, page, offset, size); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 61892268e8a6..52fc840898d8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1450,7 +1450,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, if (len > size) len = size; - if (skb_append_pagefrags(skb, page, offset, len)) { + if (skb_append_pagefrags(skb, page, offset, len, + MAX_SKB_FRAGS)) { err = -EMSGSIZE; goto error; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index cc695c9f09ec..dd55506b4632 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2349,7 +2349,7 @@ alloc_skb: newskb = NULL; } - if (skb_append_pagefrags(skb, page, offset, size)) { + if (skb_append_pagefrags(skb, page, offset, size, MAX_SKB_FRAGS)) { tail = skb; goto alloc_skb; } -- cgit v1.2.3 From 2e910b95329c2dc7feffbec00907f9e02d1a850a Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:12 +0100 Subject: net: Add a function to splice pages into an skbuff for MSG_SPLICE_PAGES Add a function to handle MSG_SPLICE_PAGES being passed internally to sendmsg(). Pages are spliced into the given socket buffer if possible and copied in if not (e.g. they're slab pages or have a zero refcount). Signed-off-by: David Howells cc: David Ahern cc: Al Viro cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 3 ++ net/core/skbuff.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) (limited to 'net') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 15011408c47c..1b2ebf6113e0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -5097,5 +5097,8 @@ static inline void skb_mark_for_recycle(struct sk_buff *skb) #endif } +ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, + ssize_t maxsize, gfp_t gfp); + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7f53dcb26ad3..f4a5b51aed22 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -6892,3 +6892,91 @@ nodefer: __kfree_skb(skb); if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) smp_call_function_single_async(cpu, &sd->defer_csd); } + +static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, + size_t offset, size_t len) +{ + const char *kaddr; + __wsum csum; + + kaddr = kmap_local_page(page); + csum = csum_partial(kaddr + offset, len, 0); + kunmap_local(kaddr); + skb->csum = csum_block_add(skb->csum, csum, skb->len); +} + +/** + * skb_splice_from_iter - Splice (or copy) pages to skbuff + * @skb: The buffer to add pages to + * @iter: Iterator representing the pages to be added + * @maxsize: Maximum amount of pages to be added + * @gfp: Allocation flags + * + * This is a common helper function for supporting MSG_SPLICE_PAGES. It + * extracts pages from an iterator and adds them to the socket buffer if + * possible, copying them to fragments if not possible (such as if they're slab + * pages). + * + * Returns the amount of data spliced/copied or -EMSGSIZE if there's + * insufficient space in the buffer to transfer anything. + */ +ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, + ssize_t maxsize, gfp_t gfp) +{ + size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); + struct page *pages[8], **ppages = pages; + ssize_t spliced = 0, ret = 0; + unsigned int i; + + while (iter->count > 0) { + ssize_t space, nr; + size_t off, len; + + ret = -EMSGSIZE; + space = frag_limit - skb_shinfo(skb)->nr_frags; + if (space < 0) + break; + + /* We might be able to coalesce without increasing nr_frags */ + nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages)); + + len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off); + if (len <= 0) { + ret = len ?: -EIO; + break; + } + + i = 0; + do { + struct page *page = pages[i++]; + size_t part = min_t(size_t, PAGE_SIZE - off, len); + + ret = -EIO; + if (WARN_ON_ONCE(!sendpage_ok(page))) + goto out; + + ret = skb_append_pagefrags(skb, page, off, part, + frag_limit); + if (ret < 0) { + iov_iter_revert(iter, len); + goto out; + } + + if (skb->ip_summed == CHECKSUM_NONE) + skb_splice_csum_page(skb, page, off, part); + + off = 0; + spliced += part; + maxsize -= part; + len -= part; + } while (len > 0); + + if (maxsize <= 0) + break; + } + +out: + skb_len_add(skb, spliced); + return spliced ?: ret; +} +EXPORT_SYMBOL(skb_splice_from_iter); -- cgit v1.2.3 From 270a1c3de47e49dd2fc18f48e46b101e48050e78 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:13 +0100 Subject: tcp: Support MSG_SPLICE_PAGES Make TCP's sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced or copied (if it cannot be spliced) from the source iterator. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3d18e295bb2f..2d61150d01f1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1223,7 +1223,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; int process_backlog = 0; - bool zc = false; + int zc = 0; long timeo; flags = msg->msg_flags; @@ -1231,7 +1231,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) if ((flags & MSG_ZEROCOPY) && size) { if (msg->msg_ubuf) { uarg = msg->msg_ubuf; - zc = sk->sk_route_caps & NETIF_F_SG; + if (sk->sk_route_caps & NETIF_F_SG) + zc = MSG_ZEROCOPY; } else if (sock_flag(sk, SOCK_ZEROCOPY)) { skb = tcp_write_queue_tail(sk); uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); @@ -1239,10 +1240,14 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) err = -ENOBUFS; goto out_err; } - zc = sk->sk_route_caps & NETIF_F_SG; - if (!zc) + if (sk->sk_route_caps & NETIF_F_SG) + zc = MSG_ZEROCOPY; + else uarg_to_msgzc(uarg)->zerocopy = 0; } + } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { + if (sk->sk_route_caps & NETIF_F_SG) + zc = MSG_SPLICE_PAGES; } if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && @@ -1305,7 +1310,7 @@ restart: goto do_error; while (msg_data_left(msg)) { - int copy = 0; + ssize_t copy = 0; skb = tcp_write_queue_tail(sk); if (skb) @@ -1346,7 +1351,7 @@ new_segment: if (copy > msg_data_left(msg)) copy = msg_data_left(msg); - if (!zc) { + if (zc == 0) { bool merge = true; int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); @@ -1391,7 +1396,7 @@ new_segment: page_ref_inc(pfrag->page); } pfrag->offset += copy; - } else { + } else if (zc == MSG_ZEROCOPY) { /* First append to a fragless skb builds initial * pure zerocopy skb */ @@ -1412,6 +1417,30 @@ new_segment: if (err < 0) goto do_error; copy = err; + } else if (zc == MSG_SPLICE_PAGES) { + /* Splice in data if we can; copy if we can't. */ + if (tcp_downgrade_zcopy_pure(sk, skb)) + goto wait_for_space; + copy = tcp_wmem_schedule(sk, copy); + if (!copy) + goto wait_for_space; + + err = skb_splice_from_iter(skb, &msg->msg_iter, copy, + sk->sk_allocation); + if (err < 0) { + if (err == -EMSGSIZE) { + tcp_mark_push(tp, skb); + goto new_segment; + } + goto do_error; + } + copy = err; + + if (!(flags & MSG_NO_SHARED_FRAGS)) + skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; + + sk_wmem_queued_add(sk, copy); + sk_mem_charge(sk, copy); } if (!copied) -- cgit v1.2.3 From c5c37af6ecad955acad82a440b812eb9cd73f77f Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:14 +0100 Subject: tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES Convert do_tcp_sendpages() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. do_tcp_sendpages() can then be inlined in subsequent patches into its callers. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 158 +++------------------------------------------------------ 1 file changed, 7 insertions(+), 151 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2d61150d01f1..f3a0c02678e0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -974,163 +974,19 @@ static int tcp_wmem_schedule(struct sock *sk, int copy) return min(copy, sk->sk_forward_alloc); } -static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, - struct page *page, int offset, size_t *size) -{ - struct sk_buff *skb = tcp_write_queue_tail(sk); - struct tcp_sock *tp = tcp_sk(sk); - bool can_coalesce; - int copy, i; - - if (!skb || (copy = size_goal - skb->len) <= 0 || - !tcp_skb_can_collapse_to(skb)) { -new_segment: - if (!sk_stream_memory_free(sk)) - return NULL; - - skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, - tcp_rtx_and_write_queues_empty(sk)); - if (!skb) - return NULL; - -#ifdef CONFIG_TLS_DEVICE - skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); -#endif - tcp_skb_entail(sk, skb); - copy = size_goal; - } - - if (copy > *size) - copy = *size; - - i = skb_shinfo(skb)->nr_frags; - can_coalesce = skb_can_coalesce(skb, i, page, offset); - if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) { - tcp_mark_push(tp, skb); - goto new_segment; - } - if (tcp_downgrade_zcopy_pure(sk, skb)) - return NULL; - - copy = tcp_wmem_schedule(sk, copy); - if (!copy) - return NULL; - - if (can_coalesce) { - skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); - } else { - get_page(page); - skb_fill_page_desc_noacc(skb, i, page, offset, copy); - } - - if (!(flags & MSG_NO_SHARED_FRAGS)) - skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; - - skb->len += copy; - skb->data_len += copy; - skb->truesize += copy; - sk_wmem_queued_add(sk, copy); - sk_mem_charge(sk, copy); - WRITE_ONCE(tp->write_seq, tp->write_seq + copy); - TCP_SKB_CB(skb)->end_seq += copy; - tcp_skb_pcount_set(skb, 0); - - *size = copy; - return skb; -} - ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct tcp_sock *tp = tcp_sk(sk); - int mss_now, size_goal; - int err; - ssize_t copied; - long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - - if (IS_ENABLED(CONFIG_DEBUG_VM) && - WARN_ONCE(!sendpage_ok(page), - "page must not be a Slab one and have page_count > 0")) - return -EINVAL; - - /* Wait for a connection to finish. One exception is TCP Fast Open - * (passive side) where data is allowed to be sent before a connection - * is fully established. - */ - if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && - !tcp_passive_fastopen(sk)) { - err = sk_stream_wait_connect(sk, &timeo); - if (err != 0) - goto out_err; - } + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - mss_now = tcp_send_mss(sk, &size_goal, flags); - copied = 0; + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - err = -EPIPE; - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) - goto out_err; - - while (size > 0) { - struct sk_buff *skb; - size_t copy = size; - - skb = tcp_build_frag(sk, size_goal, flags, page, offset, ©); - if (!skb) - goto wait_for_space; - - if (!copied) - TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; - - copied += copy; - offset += copy; - size -= copy; - if (!size) - goto out; - - if (skb->len < size_goal || (flags & MSG_OOB)) - continue; - - if (forced_push(tp)) { - tcp_mark_push(tp, skb); - __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); - } else if (skb == tcp_send_head(sk)) - tcp_push_one(sk, mss_now); - continue; - -wait_for_space: - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - tcp_push(sk, flags & ~MSG_MORE, mss_now, - TCP_NAGLE_PUSH, size_goal); - - err = sk_stream_wait_memory(sk, &timeo); - if (err != 0) - goto do_error; - - mss_now = tcp_send_mss(sk, &size_goal, flags); - } - -out: - if (copied) { - tcp_tx_timestamp(sk, sk->sk_tsflags); - if (!(flags & MSG_SENDPAGE_NOTLAST)) - tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); - } - return copied; - -do_error: - tcp_remove_empty_skb(sk); - if (copied) - goto out; -out_err: - /* make sure we wake any epoll edge trigger waiter */ - if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { - sk->sk_write_space(sk); - tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); - } - return sk_stream_error(sk, flags, err); + return tcp_sendmsg_locked(sk, &msg, size); } EXPORT_SYMBOL_GPL(do_tcp_sendpages); -- cgit v1.2.3 From ebf2e8860eea66e2c4764316b80c6a5ee5f336ee Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:15 +0100 Subject: tcp_bpf: Inline do_tcp_sendpages as it's now a wrapper around tcp_sendmsg do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(), so inline it. This is part of replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set. Signed-off-by: David Howells cc: John Fastabend cc: Jakub Sitnicki cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_bpf.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 2e9547467edb..0291d15acd19 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -72,11 +72,13 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, { bool apply = apply_bytes; struct scatterlist *sge; + struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, }; struct page *page; int size, ret = 0; u32 off; while (1) { + struct bio_vec bvec; bool has_tx_ulp; sge = sk_msg_elem(msg, msg->sg.start); @@ -88,16 +90,18 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, tcp_rate_check_app_limited(sk); retry: has_tx_ulp = tls_sw_has_ctx_tx(sk); - if (has_tx_ulp) { - flags |= MSG_SENDPAGE_NOPOLICY; - ret = kernel_sendpage_locked(sk, - page, off, size, flags); - } else { - ret = do_tcp_sendpages(sk, page, off, size, flags); - } + if (has_tx_ulp) + msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; + if (flags & MSG_SENDPAGE_NOTLAST) + msghdr.msg_flags |= MSG_MORE; + + bvec_set_page(&bvec, page, size, off); + iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); + ret = tcp_sendmsg_locked(sk, &msghdr, size); if (ret <= 0) return ret; + if (apply) apply_bytes -= ret; msg->sg.size -= ret; @@ -404,7 +408,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) long timeo; int flags; - /* Don't let internal do_tcp_sendpages() flags through */ + /* Don't let internal sendpage flags through */ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); flags |= MSG_NO_SHARED_FRAGS; -- cgit v1.2.3 From 7f8816ab4bae9dd42c5720fdad4b102532d4e43a Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:16 +0100 Subject: espintcp: Inline do_tcp_sendpages() do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(), so inline it, allowing do_tcp_sendpages() to be removed. This is part of replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set. Signed-off-by: David Howells cc: Steffen Klassert cc: Herbert Xu cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/xfrm/espintcp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 872b80188e83..3504925babdb 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -205,14 +205,16 @@ static int espintcp_sendskb_locked(struct sock *sk, struct espintcp_msg *emsg, static int espintcp_sendskmsg_locked(struct sock *sk, struct espintcp_msg *emsg, int flags) { + struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, }; struct sk_msg *skmsg = &emsg->skmsg; struct scatterlist *sg; int done = 0; int ret; - flags |= MSG_SENDPAGE_NOTLAST; + msghdr.msg_flags |= MSG_SENDPAGE_NOTLAST; sg = &skmsg->sg.data[skmsg->sg.start]; do { + struct bio_vec bvec; size_t size = sg->length - emsg->offset; int offset = sg->offset + emsg->offset; struct page *p; @@ -220,11 +222,13 @@ static int espintcp_sendskmsg_locked(struct sock *sk, emsg->offset = 0; if (sg_is_last(sg)) - flags &= ~MSG_SENDPAGE_NOTLAST; + msghdr.msg_flags &= ~MSG_SENDPAGE_NOTLAST; p = sg_page(sg); retry: - ret = do_tcp_sendpages(sk, p, offset, size, flags); + bvec_set_page(&bvec, p, size, offset); + iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); + ret = tcp_sendmsg_locked(sk, &msghdr, size); if (ret < 0) { emsg->offset = offset - sg->offset; skmsg->sg.start += done; -- cgit v1.2.3 From e117dcfd646e84a50999a395df3d724feb28b173 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:17 +0100 Subject: tls: Inline do_tcp_sendpages() do_tcp_sendpages() is now just a small wrapper around tcp_sendmsg_locked(), so inline it, allowing do_tcp_sendpages() to be removed. This is part of replacing ->sendpage() with a call to sendmsg() with MSG_SPLICE_PAGES set. Signed-off-by: David Howells cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/net/tls.h | 2 +- net/tls/tls_main.c | 24 +++++++++++++++--------- 2 files changed, 16 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/include/net/tls.h b/include/net/tls.h index 6056ce5a2aa5..5791ca7a189c 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -258,7 +258,7 @@ struct tls_context { struct scatterlist *partially_sent_record; u16 partially_sent_offset; - bool in_tcp_sendpages; + bool splicing_pages; bool pending_open_record_frags; struct mutex tx_lock; /* protects partially_sent_* fields and diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index f2e7302a4d96..3d45fdb5c4e9 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -125,7 +125,10 @@ int tls_push_sg(struct sock *sk, u16 first_offset, int flags) { - int sendpage_flags = flags | MSG_SENDPAGE_NOTLAST; + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = MSG_SENDPAGE_NOTLAST | MSG_SPLICE_PAGES | flags, + }; int ret = 0; struct page *p; size_t size; @@ -134,16 +137,19 @@ int tls_push_sg(struct sock *sk, size = sg->length - offset; offset += sg->offset; - ctx->in_tcp_sendpages = true; + ctx->splicing_pages = true; while (1) { if (sg_is_last(sg)) - sendpage_flags = flags; + msg.msg_flags = flags; /* is sending application-limited? */ tcp_rate_check_app_limited(sk); p = sg_page(sg); retry: - ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags); + bvec_set_page(&bvec, p, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + + ret = tcp_sendmsg_locked(sk, &msg, size); if (ret != size) { if (ret > 0) { @@ -155,7 +161,7 @@ retry: offset -= sg->offset; ctx->partially_sent_offset = offset; ctx->partially_sent_record = (void *)sg; - ctx->in_tcp_sendpages = false; + ctx->splicing_pages = false; return ret; } @@ -169,7 +175,7 @@ retry: size = sg->length; } - ctx->in_tcp_sendpages = false; + ctx->splicing_pages = false; return 0; } @@ -247,11 +253,11 @@ static void tls_write_space(struct sock *sk) { struct tls_context *ctx = tls_get_ctx(sk); - /* If in_tcp_sendpages call lower protocol write space handler + /* If splicing_pages call lower protocol write space handler * to ensure we wake up any waiting operations there. For example - * if do_tcp_sendpages where to call sk_wait_event. + * if splicing pages where to call sk_wait_event. */ - if (ctx->in_tcp_sendpages) { + if (ctx->splicing_pages) { ctx->sk_write_space(sk); return; } -- cgit v1.2.3 From 5367f9bbb86a9fa377f8cfc673d9b8a446f3e917 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:19 +0100 Subject: tcp: Fold do_tcp_sendpages() into tcp_sendpage_locked() Fold do_tcp_sendpages() into its last remaining caller, tcp_sendpage_locked(). Signed-off-by: David Howells cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/net/tcp.h | 2 -- net/ipv4/tcp.c | 21 +++++++-------------- 2 files changed, 7 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/include/net/tcp.h b/include/net/tcp.h index 04a31643cda3..02a6cff1827e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -333,8 +333,6 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); -ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, - size_t size, int flags); int tcp_send_mss(struct sock *sk, int *size_goal, int flags); void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, int size_goal); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f3a0c02678e0..e9506cebecce 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -974,12 +974,17 @@ static int tcp_wmem_schedule(struct sock *sk, int copy) return min(copy, sk->sk_forward_alloc); } -ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, - size_t size, int flags) +int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags) { struct bio_vec bvec; struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; + if (!(sk->sk_route_caps & NETIF_F_SG)) + return sock_no_sendpage_locked(sk, page, offset, size, flags); + + tcp_rate_check_app_limited(sk); /* is sending application-limited? */ + bvec_set_page(&bvec, page, size, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); @@ -988,18 +993,6 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, return tcp_sendmsg_locked(sk, &msg, size); } -EXPORT_SYMBOL_GPL(do_tcp_sendpages); - -int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, - size_t size, int flags) -{ - if (!(sk->sk_route_caps & NETIF_F_SG)) - return sock_no_sendpage_locked(sk, page, offset, size, flags); - - tcp_rate_check_app_limited(sk); /* is sending application-limited? */ - - return do_tcp_sendpages(sk, page, offset, size, flags); -} EXPORT_SYMBOL_GPL(tcp_sendpage_locked); int tcp_sendpage(struct sock *sk, struct page *page, int offset, -- cgit v1.2.3 From 7da0dde68486b2d5bd7c689a9b327b77efecdfd0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:20 +0100 Subject: ip, udp: Support MSG_SPLICE_PAGES Make IP/UDP sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced from the source iterator. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Willem de Bruijn cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv4/ip_output.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'net') diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 52fc840898d8..c7db973b5d29 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1048,6 +1048,14 @@ static int __ip_append_data(struct sock *sk, skb_zcopy_set(skb, uarg, &extra_uref); } } + } else if ((flags & MSG_SPLICE_PAGES) && length) { + if (inet->hdrincl) + return -EPERM; + if (rt->dst.dev->features & NETIF_F_SG) + /* We need an empty buffer to attach stuff to */ + paged = true; + else + flags &= ~MSG_SPLICE_PAGES; } cork->length += length; @@ -1207,6 +1215,15 @@ alloc_new_skb: err = -EFAULT; goto error; } + } else if (flags & MSG_SPLICE_PAGES) { + struct msghdr *msg = from; + + err = skb_splice_from_iter(skb, &msg->msg_iter, copy, + sk->sk_allocation); + if (err < 0) + goto error; + copy = err; + wmem_alloc_delta += copy; } else if (!zc) { int i = skb_shinfo(skb)->nr_frags; -- cgit v1.2.3 From 6d8192bd69bb431ef6b2558be3b5746b706cd252 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:21 +0100 Subject: ip6, udp6: Support MSG_SPLICE_PAGES Make IP6/UDP6 sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced from the source iterator if possible, copying the data if not. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Willem de Bruijn cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv6/ip6_output.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'net') diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 9554cf46ed88..c722cb881b2d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1589,6 +1589,14 @@ emsgsize: skb_zcopy_set(skb, uarg, &extra_uref); } } + } else if ((flags & MSG_SPLICE_PAGES) && length) { + if (inet_sk(sk)->hdrincl) + return -EPERM; + if (rt->dst.dev->features & NETIF_F_SG) + /* We need an empty buffer to attach stuff to */ + paged = true; + else + flags &= ~MSG_SPLICE_PAGES; } /* @@ -1778,6 +1786,15 @@ alloc_new_skb: err = -EFAULT; goto error; } + } else if (flags & MSG_SPLICE_PAGES) { + struct msghdr *msg = from; + + err = skb_splice_from_iter(skb, &msg->msg_iter, copy, + sk->sk_allocation); + if (err < 0) + goto error; + copy = err; + wmem_alloc_delta += copy; } else if (!zc) { int i = skb_shinfo(skb)->nr_frags; -- cgit v1.2.3 From 7ac7c987850c3ec617c778f7bd871804dc1c648d Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:22 +0100 Subject: udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES Convert udp_sendpage() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Willem de Bruijn cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv4/udp.c | 51 ++++++--------------------------------------------- 1 file changed, 6 insertions(+), 45 deletions(-) (limited to 'net') diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index aa32afd871ee..2879dc6d66ea 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1332,54 +1332,15 @@ EXPORT_SYMBOL(udp_sendmsg); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct inet_sock *inet = inet_sk(sk); - struct udp_sock *up = udp_sk(sk); - int ret; + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES }; if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - if (!up->pending) { - struct msghdr msg = { .msg_flags = flags|MSG_MORE }; - - /* Call udp_sendmsg to specify destination address which - * sendpage interface can't pass. - * This will succeed only when the socket is connected. - */ - ret = udp_sendmsg(sk, &msg, 0); - if (ret < 0) - return ret; - } - - lock_sock(sk); + msg.msg_flags |= MSG_MORE; - if (unlikely(!up->pending)) { - release_sock(sk); - - net_dbg_ratelimited("cork failed\n"); - return -EINVAL; - } - - ret = ip_append_page(sk, &inet->cork.fl.u.ip4, - page, offset, size, flags); - if (ret == -EOPNOTSUPP) { - release_sock(sk); - return sock_no_sendpage(sk->sk_socket, page, offset, - size, flags); - } - if (ret < 0) { - udp_flush_pending_frames(sk); - goto out; - } - - up->len += size; - if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE))) - ret = udp_push_pending_frames(sk); - if (!ret) - ret = size; -out: - release_sock(sk); - return ret; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return udp_sendmsg(sk, &msg, size); } #define UDP_SKB_IS_STATELESS 0x80000000 -- cgit v1.2.3 From c49cf26632910581ee1173d75c0fca322ccaf4bb Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:23 +0100 Subject: ip: Remove ip_append_page() ip_append_page() is no longer used with the removal of udp_sendpage(), so remove it. Signed-off-by: David Howells cc: Willem de Bruijn cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/net/ip.h | 2 - net/ipv4/ip_output.c | 148 ++------------------------------------------------- 2 files changed, 4 insertions(+), 146 deletions(-) (limited to 'net') diff --git a/include/net/ip.h b/include/net/ip.h index c3fffaa92d6e..7627a4df893b 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -220,8 +220,6 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4, unsigned int flags); int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb); -ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, - int offset, size_t size, int flags); struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index c7db973b5d29..553c740a6bfb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -946,17 +946,6 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk } EXPORT_SYMBOL(ip_generic_getfrag); -static inline __wsum -csum_page(struct page *page, int offset, int copy) -{ - char *kaddr; - __wsum csum; - kaddr = kmap(page); - csum = csum_partial(kaddr + offset, copy, 0); - kunmap(page); - return csum; -} - static int __ip_append_data(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, @@ -1327,10 +1316,10 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, } /* - * ip_append_data() and ip_append_page() can make one large IP datagram - * from many pieces of data. Each pieces will be holded on the socket - * until ip_push_pending_frames() is called. Each piece can be a page - * or non-page data. + * ip_append_data() can make one large IP datagram from many pieces of + * data. Each piece will be held on the socket until + * ip_push_pending_frames() is called. Each piece can be a page or + * non-page data. * * Not only UDP, other transport protocols - e.g. raw sockets - can use * this interface potentially. @@ -1363,135 +1352,6 @@ int ip_append_data(struct sock *sk, struct flowi4 *fl4, from, length, transhdrlen, flags); } -ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, - int offset, size_t size, int flags) -{ - struct inet_sock *inet = inet_sk(sk); - struct sk_buff *skb; - struct rtable *rt; - struct ip_options *opt = NULL; - struct inet_cork *cork; - int hh_len; - int mtu; - int len; - int err; - unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize; - - if (inet->hdrincl) - return -EPERM; - - if (flags&MSG_PROBE) - return 0; - - if (skb_queue_empty(&sk->sk_write_queue)) - return -EINVAL; - - cork = &inet->cork.base; - rt = (struct rtable *)cork->dst; - if (cork->flags & IPCORK_OPT) - opt = cork->opt; - - if (!(rt->dst.dev->features & NETIF_F_SG)) - return -EOPNOTSUPP; - - hh_len = LL_RESERVED_SPACE(rt->dst.dev); - mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; - - fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); - maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; - maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; - - if (cork->length + size > maxnonfragsize - fragheaderlen) { - ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, - mtu - (opt ? opt->optlen : 0)); - return -EMSGSIZE; - } - - skb = skb_peek_tail(&sk->sk_write_queue); - if (!skb) - return -EINVAL; - - cork->length += size; - - while (size > 0) { - /* Check if the remaining data fits into current packet. */ - len = mtu - skb->len; - if (len < size) - len = maxfraglen - skb->len; - - if (len <= 0) { - struct sk_buff *skb_prev; - int alloclen; - - skb_prev = skb; - fraggap = skb_prev->len - maxfraglen; - - alloclen = fragheaderlen + hh_len + fraggap + 15; - skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); - if (unlikely(!skb)) { - err = -ENOBUFS; - goto error; - } - - /* - * Fill in the control structures - */ - skb->ip_summed = CHECKSUM_NONE; - skb->csum = 0; - skb_reserve(skb, hh_len); - - /* - * Find where to start putting bytes. - */ - skb_put(skb, fragheaderlen + fraggap); - skb_reset_network_header(skb); - skb->transport_header = (skb->network_header + - fragheaderlen); - if (fraggap) { - skb->csum = skb_copy_and_csum_bits(skb_prev, - maxfraglen, - skb_transport_header(skb), - fraggap); - skb_prev->csum = csum_sub(skb_prev->csum, - skb->csum); - pskb_trim_unique(skb_prev, maxfraglen); - } - - /* - * Put the packet on the pending queue. - */ - __skb_queue_tail(&sk->sk_write_queue, skb); - continue; - } - - if (len > size) - len = size; - - if (skb_append_pagefrags(skb, page, offset, len, - MAX_SKB_FRAGS)) { - err = -EMSGSIZE; - goto error; - } - - if (skb->ip_summed == CHECKSUM_NONE) { - __wsum csum; - csum = csum_page(page, offset, len); - skb->csum = csum_block_add(skb->csum, csum, skb->len); - } - - skb_len_add(skb, len); - refcount_add(len, &sk->sk_wmem_alloc); - offset += len; - size -= len; - } - return 0; - -error: - cork->length -= size; - IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); - return err; -} - static void ip_cork_release(struct inet_cork *cork) { cork->flags &= ~IPCORK_OPT; -- cgit v1.2.3 From a0dbf5f818f9082d2262c1f8a8c0aab68364341f Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:24 +0100 Subject: af_unix: Support MSG_SPLICE_PAGES Make AF_UNIX sendmsg() support MSG_SPLICE_PAGES, splicing in pages from the source iterator if possible and copying the data in otherwise. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Kuniyuki Iwashima cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/unix/af_unix.c | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index dd55506b4632..976bc1c5e11b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2200,19 +2200,25 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, while (sent < len) { size = len - sent; - /* Keep two messages in the pipe so it schedules better */ - size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); + if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { + skb = sock_alloc_send_pskb(sk, 0, 0, + msg->msg_flags & MSG_DONTWAIT, + &err, 0); + } else { + /* Keep two messages in the pipe so it schedules better */ + size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64); - /* allow fallback to order-0 allocations */ - size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); + /* allow fallback to order-0 allocations */ + size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ); - data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); + data_len = max_t(int, 0, size - SKB_MAX_HEAD(0)); - data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); + data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); - skb = sock_alloc_send_pskb(sk, size - data_len, data_len, - msg->msg_flags & MSG_DONTWAIT, &err, - get_order(UNIX_SKB_FRAGS_SZ)); + skb = sock_alloc_send_pskb(sk, size - data_len, data_len, + msg->msg_flags & MSG_DONTWAIT, &err, + get_order(UNIX_SKB_FRAGS_SZ)); + } if (!skb) goto out_err; @@ -2224,13 +2230,24 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, } fds_sent = true; - skb_put(skb, size - data_len); - skb->data_len = data_len; - skb->len = size; - err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); - if (err) { - kfree_skb(skb); - goto out_err; + if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES)) { + err = skb_splice_from_iter(skb, &msg->msg_iter, size, + sk->sk_allocation); + if (err < 0) { + kfree_skb(skb); + goto out_err; + } + size = err; + refcount_add(size, &sk->sk_wmem_alloc); + } else { + skb_put(skb, size - data_len); + skb->data_len = data_len; + skb->len = size; + err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); + if (err) { + kfree_skb(skb); + goto out_err; + } } unix_state_lock(other); -- cgit v1.2.3 From 57d44a354a43edba4ef9963327d4657d12edbfbc Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 13:11:25 +0100 Subject: unix: Convert unix_stream_sendpage() to use MSG_SPLICE_PAGES Convert unix_stream_sendpage() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Kuniyuki Iwashima cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/unix/af_unix.c | 134 +++-------------------------------------------------- 1 file changed, 7 insertions(+), 127 deletions(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 976bc1c5e11b..115436ce1f8a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1839,24 +1839,6 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, } } -static int maybe_init_creds(struct scm_cookie *scm, - struct socket *socket, - const struct sock *other) -{ - int err; - struct msghdr msg = { .msg_controllen = 0 }; - - err = scm_send(socket, &msg, scm, false); - if (err) - return err; - - if (unix_passcred_enabled(socket, other)) { - scm->pid = get_pid(task_tgid(current)); - current_uid_gid(&scm->creds.uid, &scm->creds.gid); - } - return err; -} - static bool unix_skb_scm_eq(struct sk_buff *skb, struct scm_cookie *scm) { @@ -2292,117 +2274,15 @@ out_err: static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, int offset, size_t size, int flags) { - int err; - bool send_sigpipe = false; - bool init_scm = true; - struct scm_cookie scm; - struct sock *other, *sk = socket->sk; - struct sk_buff *skb, *newskb = NULL, *tail = NULL; - - if (flags & MSG_OOB) - return -EOPNOTSUPP; + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES }; - other = unix_peer(sk); - if (!other || sk->sk_state != TCP_ESTABLISHED) - return -ENOTCONN; - - if (false) { -alloc_skb: - unix_state_unlock(other); - mutex_unlock(&unix_sk(other)->iolock); - newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, - &err, 0); - if (!newskb) - goto err; - } - - /* we must acquire iolock as we modify already present - * skbs in the sk_receive_queue and mess with skb->len - */ - err = mutex_lock_interruptible(&unix_sk(other)->iolock); - if (err) { - err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; - goto err; - } - - if (sk->sk_shutdown & SEND_SHUTDOWN) { - err = -EPIPE; - send_sigpipe = true; - goto err_unlock; - } - - unix_state_lock(other); + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - if (sock_flag(other, SOCK_DEAD) || - other->sk_shutdown & RCV_SHUTDOWN) { - err = -EPIPE; - send_sigpipe = true; - goto err_state_unlock; - } - - if (init_scm) { - err = maybe_init_creds(&scm, socket, other); - if (err) - goto err_state_unlock; - init_scm = false; - } - - skb = skb_peek_tail(&other->sk_receive_queue); - if (tail && tail == skb) { - skb = newskb; - } else if (!skb || !unix_skb_scm_eq(skb, &scm)) { - if (newskb) { - skb = newskb; - } else { - tail = skb; - goto alloc_skb; - } - } else if (newskb) { - /* this is fast path, we don't necessarily need to - * call to kfree_skb even though with newskb == NULL - * this - does no harm - */ - consume_skb(newskb); - newskb = NULL; - } - - if (skb_append_pagefrags(skb, page, offset, size, MAX_SKB_FRAGS)) { - tail = skb; - goto alloc_skb; - } - - skb->len += size; - skb->data_len += size; - skb->truesize += size; - refcount_add(size, &sk->sk_wmem_alloc); - - if (newskb) { - err = unix_scm_to_skb(&scm, skb, false); - if (err) - goto err_state_unlock; - spin_lock(&other->sk_receive_queue.lock); - __skb_queue_tail(&other->sk_receive_queue, newskb); - spin_unlock(&other->sk_receive_queue.lock); - } - - unix_state_unlock(other); - mutex_unlock(&unix_sk(other)->iolock); - - other->sk_data_ready(other); - scm_destroy(&scm); - return size; - -err_state_unlock: - unix_state_unlock(other); -err_unlock: - mutex_unlock(&unix_sk(other)->iolock); -err: - kfree_skb(newskb); - if (send_sigpipe && !(flags & MSG_NOSIGNAL)) - send_sig(SIGPIPE, current, 0); - if (!init_scm) - scm_destroy(&scm); - return err; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return unix_stream_sendmsg(socket, &msg, size); } static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, -- cgit v1.2.3 From 726de790f66029a7654b3e748f8d3e7888a30ae5 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 22 May 2023 16:37:57 +0200 Subject: ping: Stop using RTO_ONLINK. Define a new helper to figure out the correct route scope to use on TX, depending on socket configuration, ancillary data and send flags. Use this new helper to properly initialise the scope in flowi4_init_output(), instead of overriding tos with the RTO_ONLINK flag. The objective is to eventually remove RTO_ONLINK, which will allow converting .flowi4_tos to dscp_t. Signed-off-by: Guillaume Nault Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip.h | 13 +++++++++++++ net/ipv4/ping.c | 15 +++++---------- 2 files changed, 18 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/include/net/ip.h b/include/net/ip.h index 7627a4df893b..f1cb28d649e4 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -240,6 +240,19 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base); } +/* Get the route scope that should be used when sending a packet. */ +static inline u8 ip_sendmsg_scope(const struct inet_sock *inet, + const struct ipcm_cookie *ipc, + const struct msghdr *msg) +{ + if (sock_flag(&inet->sk, SOCK_LOCALROUTE) || + msg->msg_flags & MSG_DONTROUTE || + (ipc->opt && ipc->opt->opt.is_strictroute)) + return RT_SCOPE_LINK; + + return RT_SCOPE_UNIVERSE; +} + static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) { return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 3793c81bda8a..25dd78cee179 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -705,7 +705,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip_options_data opt_copy; int free = 0; __be32 saddr, daddr, faddr; - u8 tos; + u8 tos, scope; int err; pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -769,11 +769,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) faddr = ipc.opt->opt.faddr; } tos = get_rttos(&ipc, inet); - if (sock_flag(sk, SOCK_LOCALROUTE) || - (msg->msg_flags & MSG_DONTROUTE) || - (ipc.opt && ipc.opt->opt.is_strictroute)) { - tos |= RTO_ONLINK; - } + scope = ip_sendmsg_scope(inet, &ipc, msg); if (ipv4_is_multicast(daddr)) { if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) @@ -783,10 +779,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } else if (!ipc.oif) ipc.oif = inet->uc_index; - flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, - RT_SCOPE_UNIVERSE, sk->sk_protocol, - inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, - sk->sk_uid); + flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, scope, + sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, + saddr, 0, 0, sk->sk_uid); fl4.fl4_icmp_type = user_icmph.type; fl4.fl4_icmp_code = user_icmph.code; -- cgit v1.2.3 From c85be08fc4fa44f07167be0377ebaa8d36b1dd58 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 22 May 2023 16:38:02 +0200 Subject: raw: Stop using RTO_ONLINK. Use ip_sendmsg_scope() to properly initialise the scope in flowi4_init_output(), instead of overriding tos with the RTO_ONLINK flag. The objective is to eventually remove RTO_ONLINK, which will allow converting .flowi4_tos to dscp_t. The MSG_DONTROUTE and SOCK_LOCALROUTE cases were already handled by raw_sendmsg() (SOCK_LOCALROUTE was handled by the RT_CONN_FLAGS*() macros called by get_rtconn_flags()). However, opt.is_strictroute wasn't taken into account. Therefore, a side effect of this patch is to now honour opt.is_strictroute, and thus align raw_sendmsg() with ping_v4_sendmsg() and udp_sendmsg(). Since raw_sendmsg() was the only user of get_rtconn_flags(), we can now remove this function. Signed-off-by: Guillaume Nault Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip.h | 5 ----- net/ipv4/raw.c | 10 ++++------ 2 files changed, 4 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/include/net/ip.h b/include/net/ip.h index f1cb28d649e4..8a3860a916dc 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -258,11 +258,6 @@ static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos); } -static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) -{ - return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk); -} - /* datagram.c */ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ff712bf2a98d..8b7b5c842bdd 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -476,10 +476,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; + u8 tos, scope; int free = 0; __be32 daddr; __be32 saddr; - u8 tos; int err; struct ip_options_data opt_copy; struct raw_frag_vec rfv; @@ -572,9 +572,8 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = ipc.opt->opt.faddr; } } - tos = get_rtconn_flags(&ipc, sk); - if (msg->msg_flags & MSG_DONTROUTE) - tos |= RTO_ONLINK; + tos = get_rttos(&ipc, inet); + scope = ip_sendmsg_scope(inet, &ipc, msg); if (ipv4_is_multicast(daddr)) { if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) @@ -597,8 +596,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } } - flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, - RT_SCOPE_UNIVERSE, + flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, scope, hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), -- cgit v1.2.3 From 0e26371db548834052a8f0c3e138c5ff07e253a0 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 22 May 2023 16:38:07 +0200 Subject: udp: Stop using RTO_ONLINK. Use ip_sendmsg_scope() to properly initialise the scope in flowi4_init_output(), instead of overriding tos with the RTO_ONLINK flag. The objective is to eventually remove RTO_ONLINK, which will allow converting .flowi4_tos to dscp_t. Now that the scope is determined by ip_sendmsg_scope(), we need to check its result to set the 'connected' variable. Signed-off-by: Guillaume Nault Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/udp.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2879dc6d66ea..e992dd5392e3 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1062,8 +1062,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int free = 0; int connected = 0; __be32 daddr, faddr, saddr; + u8 tos, scope; __be16 dport; - u8 tos; int err, is_udplite = IS_UDPLITE(sk); int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); @@ -1183,12 +1183,9 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) connected = 0; } tos = get_rttos(&ipc, inet); - if (sock_flag(sk, SOCK_LOCALROUTE) || - (msg->msg_flags & MSG_DONTROUTE) || - (ipc.opt && ipc.opt->opt.is_strictroute)) { - tos |= RTO_ONLINK; + scope = ip_sendmsg_scope(inet, &ipc, msg); + if (scope == RT_SCOPE_LINK) connected = 0; - } if (ipv4_is_multicast(daddr)) { if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif)) @@ -1221,11 +1218,9 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl4 = &fl4_stack; - flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, - RT_SCOPE_UNIVERSE, sk->sk_protocol, - flow_flags, - faddr, saddr, dport, inet->inet_sport, - sk->sk_uid); + flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope, + sk->sk_protocol, flow_flags, faddr, saddr, + dport, inet->inet_sport, sk->sk_uid); security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); rt = ip_route_output_flow(net, fl4, sk); -- cgit v1.2.3 From c496daeb863093a046e0bb8db7265bf45d91775a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 23 May 2023 14:37:59 +0200 Subject: devlink: remove duplicate port notification The notification about created port is send from devl_port_register() function called from ops->port_new(). No need to send it again here, so remove the call and the helper function. Signed-off-by: Jiri Pirko Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/sf/devlink.c | 9 ++--- drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h | 3 +- include/net/devlink.h | 4 +- net/devlink/leftover.c | 45 +--------------------- 4 files changed, 6 insertions(+), 55 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 7d955a4d9f14..de15b9c85e1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -282,8 +282,7 @@ out: static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, const struct devlink_port_new_attrs *new_attr, - struct netlink_ext_ack *extack, - unsigned int *new_port_index) + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_sf *sf; @@ -297,7 +296,6 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, new_attr->controller, new_attr->sfnum); if (err) goto esw_err; - *new_port_index = sf->port_index; trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum); return 0; @@ -338,8 +336,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *new_attr, - struct netlink_ext_ack *extack, - unsigned int *new_port_index) + struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_sf_table *table; @@ -355,7 +352,7 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, "Port add is only supported in eswitch switchdev mode or SF ports are disabled."); return -EOPNOTSUPP; } - err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index); + err = mlx5_sf_add(dev, table, new_attr, extack); mlx5_sf_table_put(table); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 3a480e06ecc0..1f7d8cbd72e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -20,8 +20,7 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *add_attr, - struct netlink_ext_ack *extack, - unsigned int *new_port_index); + struct netlink_ext_ack *extack); int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, struct netlink_ext_ack *extack); int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, diff --git a/include/net/devlink.h b/include/net/devlink.h index 6a942e70e451..ccea6e079777 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1500,7 +1500,6 @@ struct devlink_ops { * @devlink: Devlink instance * @attrs: attributes of the new port * @extack: extack for reporting error messages - * @new_port_index: index of the new port * * Devlink core will call this device driver function upon user request * to create a new port function of a specified flavor and optional @@ -1515,8 +1514,7 @@ struct devlink_ops { */ int (*port_new)(struct devlink *devlink, const struct devlink_port_new_attrs *attrs, - struct netlink_ext_ack *extack, - unsigned int *new_port_index); + struct netlink_ext_ack *extack); /** * port_del() - Delete a port function * @devlink: Devlink instance diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index cd0254968076..cb60e42b2761 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1354,45 +1354,12 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb, return devlink->ops->port_unsplit(devlink, devlink_port, info->extack); } -static int devlink_port_new_notify(struct devlink *devlink, - unsigned int port_index, - struct genl_info *info) -{ - struct devlink_port *devlink_port; - struct sk_buff *msg; - int err; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - lockdep_assert_held(&devlink->lock); - devlink_port = devlink_port_get_by_index(devlink, port_index); - if (!devlink_port) { - err = -ENODEV; - goto out; - } - - err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW, - info->snd_portid, info->snd_seq, 0, NULL); - if (err) - goto out; - - return genlmsg_reply(msg, info); - -out: - nlmsg_free(msg); - return err; -} - static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct genl_info *info) { struct netlink_ext_ack *extack = info->extack; struct devlink_port_new_attrs new_attrs = {}; struct devlink *devlink = info->user_ptr[0]; - unsigned int new_port_index; - int err; if (!devlink->ops->port_new || !devlink->ops->port_del) return -EOPNOTSUPP; @@ -1423,17 +1390,7 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, new_attrs.sfnum_valid = true; } - err = devlink->ops->port_new(devlink, &new_attrs, extack, - &new_port_index); - if (err) - return err; - - err = devlink_port_new_notify(devlink, new_port_index, info); - if (err && err != -ENODEV) { - /* Fail to send the response; destroy newly created port. */ - devlink->ops->port_del(devlink, new_port_index, extack); - } - return err; + return devlink->ops->port_new(devlink, &new_attrs, extack); } static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, -- cgit v1.2.3 From 9277649c66fe7cb0e2f8adb09621556bcfb052c7 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 23 May 2023 14:38:01 +0200 Subject: devlink: pass devlink_port pointer to ops->port_del() instead of index Historically there was a reason why port_dev() along with for example port_split() did get port_index instead of the devlink_port pointer. With the locking changes that were done which ensured devlink instance mutex is hold for every command, the port ops could get devlink_port pointer directly. Change the forgotten port_dev() op to be as others and pass devlink_port pointer instead of port_index. Signed-off-by: Jiri Pirko Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c | 5 +++-- drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h | 3 ++- include/net/devlink.h | 4 ++-- net/devlink/leftover.c | 11 +++-------- 4 files changed, 10 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index de15b9c85e1b..c7d4691cb65a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -376,7 +376,8 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) } } -int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, +int mlx5_devlink_sf_port_del(struct devlink *devlink, + struct devlink_port *dl_port, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); @@ -391,7 +392,7 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, "Port del is only supported in eswitch switchdev mode or SF ports are disabled."); return -EOPNOTSUPP; } - sf = mlx5_sf_lookup_by_index(table, port_index); + sf = mlx5_sf_lookup_by_index(table, dl_port->index); if (!sf) { err = -ENODEV; goto sf_err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 1f7d8cbd72e8..c5430b8dcdf6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -21,7 +21,8 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *add_attr, struct netlink_ext_ack *extack); -int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, +int mlx5_devlink_sf_port_del(struct devlink *devlink, + struct devlink_port *dl_port, struct netlink_ext_ack *extack); int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, enum devlink_port_fn_state *state, diff --git a/include/net/devlink.h b/include/net/devlink.h index 24a48f3d4c35..1bd56c8d6f3c 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1516,7 +1516,7 @@ struct devlink_ops { /** * port_del() - Delete a port function * @devlink: Devlink instance - * @port_index: port function index to delete + * @port: The devlink port * @extack: extack for reporting error messages * * Devlink core will call this device driver function upon user request @@ -1528,7 +1528,7 @@ struct devlink_ops { * * Return: 0 on success, negative value otherwise. */ - int (*port_del)(struct devlink *devlink, unsigned int port_index, + int (*port_del)(struct devlink *devlink, struct devlink_port *port, struct netlink_ext_ack *extack); /** * port_fn_state_get() - Get the state of a port function diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index cb60e42b2761..0410137a4a31 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1396,20 +1396,14 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, struct genl_info *info) { + struct devlink_port *devlink_port = info->user_ptr[1]; struct netlink_ext_ack *extack = info->extack; struct devlink *devlink = info->user_ptr[0]; - unsigned int port_index; if (!devlink->ops->port_del) return -EOPNOTSUPP; - if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_INDEX)) { - NL_SET_ERR_MSG(extack, "Port index is not specified"); - return -EINVAL; - } - port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]); - - return devlink->ops->port_del(devlink, port_index, extack); + return devlink->ops->port_del(devlink, devlink_port, extack); } static int @@ -6341,6 +6335,7 @@ const struct genl_small_ops devlink_nl_ops[56] = { .cmd = DEVLINK_CMD_PORT_DEL, .doit = devlink_nl_cmd_port_del_doit, .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, { .cmd = DEVLINK_CMD_LINECARD_GET, -- cgit v1.2.3 From 67178fd066d53d5a6cada1cdc6399d02b68b708e Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 22 May 2023 14:50:00 +0100 Subject: net: Make sock_splice_read() use copy_splice_read() by default Make sock_splice_read() use copy_splice_read() by default as file_splice_read() will return immediately with 0 as a socket has no pagecache and is a zero-size file. Signed-off-by: David Howells cc: "David S. Miller" cc: Eric Dumazet cc: Jakub Kicinski cc: Paolo Abeni cc: Christoph Hellwig cc: Al Viro cc: Jens Axboe cc: netdev@vger.kernel.org cc: linux-block@vger.kernel.org cc: linux-mm@kvack.org Link: https://lore.kernel.org/r/20230522135018.2742245-14-dhowells@redhat.com Signed-off-by: Jens Axboe --- net/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/socket.c b/net/socket.c index b7e01d0fe082..401778380195 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1093,7 +1093,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct socket *sock = file->private_data; if (unlikely(!sock->ops->splice_read)) - return generic_file_splice_read(file, ppos, pipe, len, flags); + return copy_splice_read(file, ppos, pipe, len, flags); return sock->ops->splice_read(sock, ppos, pipe, len, flags); } -- cgit v1.2.3 From 4fbfde4e272065943cbcf2d016f0679456cb4f75 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 May 2023 18:14:51 +0200 Subject: net: tcp: make the txhash available in TIME_WAIT sockets for IPv4 too Commit c67b85558ff2 ("ipv6: tcp: send consistent autoflowlabel in TIME_WAIT state") made the socket txhash also available in TIME_WAIT sockets but for IPv6 only. Make it available for IPv4 too as we'll use it in later commits. Signed-off-by: Antoine Tenart Reviewed-by: Eric Dumazet Signed-off-by: Paolo Abeni --- net/ipv4/tcp_minisocks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index dac0d62120e6..04fc328727e6 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -303,6 +303,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tcptw->tw_ts_offset = tp->tsoffset; tcptw->tw_last_oow_ack_time = 0; tcptw->tw_tx_delay = tp->tcp_tx_delay; + tw->tw_txhash = sk->sk_txhash; #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); @@ -311,7 +312,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; tw->tw_tclass = np->tclass; tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); - tw->tw_txhash = sk->sk_txhash; tw->tw_ipv6only = sk->sk_ipv6only; } #endif -- cgit v1.2.3 From c0a8966e2bc7d31f77a7246947ebc09c1ff06066 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 May 2023 18:14:52 +0200 Subject: net: ipv4: use consistent txhash in TIME_WAIT and SYN_RECV When using IPv4/TCP, skb->hash comes from sk->sk_txhash except in TIME_WAIT and SYN_RECV where it's not set in the reply skb from ip_send_unicast_reply. Those packets will have a mismatched hash with others from the same flow as their hashes will be 0. IPv6 does not have the same issue as the hash is set from the socket txhash in those cases. This commits sets the hash in the reply skb from ip_send_unicast_reply, which makes the IPv4 code behaving like IPv6. Signed-off-by: Antoine Tenart Reviewed-by: Eric Dumazet Signed-off-by: Paolo Abeni --- include/net/ip.h | 2 +- net/ipv4/ip_output.c | 4 +++- net/ipv4/tcp_ipv4.c | 14 +++++++++----- 3 files changed, 13 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/include/net/ip.h b/include/net/ip.h index 8a3860a916dc..2982dd13cf16 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -286,7 +286,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, - unsigned int len, u64 transmit_time); + unsigned int len, u64 transmit_time, u32 txhash); #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 553c740a6bfb..244fb9365d87 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1570,7 +1570,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, - unsigned int len, u64 transmit_time) + unsigned int len, u64 transmit_time, u32 txhash) { struct ip_options_data replyopts; struct ipcm_cookie ipc; @@ -1633,6 +1633,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; nskb->mono_delivery_time = !!transmit_time; + if (txhash) + skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4); ip_push_pending_frames(sk, &fl4); } out: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index af043f063a73..a50bd782f91f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -692,6 +692,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) u64 transmit_time = 0; struct sock *ctl_sk; struct net *net; + u32 txhash = 0; /* Never send a reset in response to a reset. */ if (th->rst) @@ -829,6 +830,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) inet_twsk(sk)->tw_priority : sk->sk_priority; transmit_time = tcp_transmit_time(sk); xfrm_sk_clone_policy(ctl_sk, sk); + txhash = (sk->sk_state == TCP_TIME_WAIT) ? + inet_twsk(sk)->tw_txhash : sk->sk_txhash; } else { ctl_sk->sk_mark = 0; ctl_sk->sk_priority = 0; @@ -837,7 +840,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len, - transmit_time); + transmit_time, txhash); xfrm_sk_free_policy(ctl_sk); sock_net_set(ctl_sk, &init_net); @@ -859,7 +862,7 @@ static void tcp_v4_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, - int reply_flags, u8 tos) + int reply_flags, u8 tos, u32 txhash) { const struct tcphdr *th = tcp_hdr(skb); struct { @@ -935,7 +938,7 @@ static void tcp_v4_send_ack(const struct sock *sk, skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len, - transmit_time); + transmit_time, txhash); sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); @@ -955,7 +958,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, - tw->tw_tos + tw->tw_tos, + tw->tw_txhash ); inet_twsk_put(tw); @@ -988,7 +992,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, 0, tcp_md5_do_lookup(sk, l3index, addr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, - ip_hdr(skb)->tos); + ip_hdr(skb)->tos, tcp_rsk(req)->txhash); } /* -- cgit v1.2.3 From 022acfa632607735833eab34487a098c1ee9a7fc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 25 May 2023 12:15:33 -1000 Subject: net: qrtr: Use alloc_ordered_workqueue() to create ordered workqueues BACKGROUND ========== When multiple work items are queued to a workqueue, their execution order doesn't match the queueing order. They may get executed in any order and simultaneously. When fully serialized execution - one by one in the queueing order - is needed, an ordered workqueue should be used which can be created with alloc_ordered_workqueue(). However, alloc_ordered_workqueue() was a later addition. Before it, an ordered workqueue could be obtained by creating an UNBOUND workqueue with @max_active==1. This originally was an implementation side-effect which was broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered"). Because there were users that depended on the ordered execution, 5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered") made workqueue allocation path to implicitly promote UNBOUND workqueues w/ @max_active==1 to ordered workqueues. While this has worked okay, overloading the UNBOUND allocation interface this way creates other issues. It's difficult to tell whether a given workqueue actually needs to be ordered and users that legitimately want a min concurrency level wq unexpectedly gets an ordered one instead. With planned UNBOUND workqueue updates to improve execution locality and more prevalence of chiplet designs which can benefit from such improvements, this isn't a state we wanna be in forever. This patch series audits all callsites that create an UNBOUND workqueue w/ @max_active==1 and converts them to alloc_ordered_workqueue() as necessary. WHAT TO LOOK FOR ================ The conversions are from alloc_workqueue(WQ_UNBOUND | flags, 1, args..) to alloc_ordered_workqueue(flags, args...) which don't cause any functional changes. If you know that fully ordered execution is not necessary, please let me know. I'll drop the conversion and instead add a comment noting the fact to reduce confusion while conversion is in progress. If you aren't fully sure, it's completely fine to let the conversion through. The behavior will stay exactly the same and we can always reconsider later. As there are follow-up workqueue core changes, I'd really appreciate if the patch can be routed through the workqueue tree w/ your acks. Thanks. Signed-off-by: Tejun Heo Cc: Manivannan Sadhasivam Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: linux-arm-msm@vger.kernel.org Cc: netdev@vger.kernel.org --- net/qrtr/ns.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c index 0f25a386138c..0f7a729f1a1f 100644 --- a/net/qrtr/ns.c +++ b/net/qrtr/ns.c @@ -783,7 +783,7 @@ int qrtr_ns_init(void) goto err_sock; } - qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); + qrtr_ns.workqueue = alloc_ordered_workqueue("qrtr_ns_handler", 0); if (!qrtr_ns.workqueue) { ret = -ENOMEM; goto err_sock; -- cgit v1.2.3 From 78ef970385ea4d02a44af2776e4f4c74d4fce3d4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 25 May 2023 12:15:36 -1000 Subject: rxrpc: Use alloc_ordered_workqueue() to create ordered workqueues BACKGROUND ========== When multiple work items are queued to a workqueue, their execution order doesn't match the queueing order. They may get executed in any order and simultaneously. When fully serialized execution - one by one in the queueing order - is needed, an ordered workqueue should be used which can be created with alloc_ordered_workqueue(). However, alloc_ordered_workqueue() was a later addition. Before it, an ordered workqueue could be obtained by creating an UNBOUND workqueue with @max_active==1. This originally was an implementation side-effect which was broken by 4c16bd327c74 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered"). Because there were users that depended on the ordered execution, 5c0338c68706 ("workqueue: restore WQ_UNBOUND/max_active==1 to be ordered") made workqueue allocation path to implicitly promote UNBOUND workqueues w/ @max_active==1 to ordered workqueues. While this has worked okay, overloading the UNBOUND allocation interface this way creates other issues. It's difficult to tell whether a given workqueue actually needs to be ordered and users that legitimately want a min concurrency level wq unexpectedly gets an ordered one instead. With planned UNBOUND workqueue updates to improve execution locality and more prevalence of chiplet designs which can benefit from such improvements, this isn't a state we wanna be in forever. This patch series audits all callsites that create an UNBOUND workqueue w/ @max_active==1 and converts them to alloc_ordered_workqueue() as necessary. WHAT TO LOOK FOR ================ The conversions are from alloc_workqueue(WQ_UNBOUND | flags, 1, args..) to alloc_ordered_workqueue(flags, args...) which don't cause any functional changes. If you know that fully ordered execution is not necessary, please let me know. I'll drop the conversion and instead add a comment noting the fact to reduce confusion while conversion is in progress. If you aren't fully sure, it's completely fine to let the conversion through. The behavior will stay exactly the same and we can always reconsider later. As there are follow-up workqueue core changes, I'd really appreciate if the patch can be routed through the workqueue tree w/ your acks. Thanks. Signed-off-by: Tejun Heo Cc: David Howells Cc: Marc Dionne Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: linux-afs@lists.infradead.org Cc: netdev@vger.kernel.org --- net/rxrpc/af_rxrpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 31f738d65f1c..7eb24c25c731 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -988,7 +988,7 @@ static int __init af_rxrpc_init(void) goto error_call_jar; } - rxrpc_workqueue = alloc_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + rxrpc_workqueue = alloc_ordered_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!rxrpc_workqueue) { pr_notice("Failed to allocate work queue\n"); goto error_work_queue; -- cgit v1.2.3 From c857946a4e262e0f798fe7625fadf85bf1190fc4 Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Tue, 23 May 2023 13:15:18 +0200 Subject: net/core: Enable socket busy polling on -RT Busy polling is currently not allowed on PREEMPT_RT, because it disables preemption while invoking the NAPI callback. It is not possible to acquire sleeping locks with disabled preemption. For details see commit 20ab39d13e2e ("net/core: disable NET_RX_BUSY_POLL on PREEMPT_RT"). However, strict cyclic and/or low latency network applications may prefer busy polling e.g., using AF_XDP instead of interrupt driven communication. The preempt_disable() is used in order to prevent the poll_owner and NAPI owner to be preempted while owning the resource to ensure progress. Netpoll performs busy polling in order to acquire the lock. NAPI is locked by setting the NAPIF_STATE_SCHED flag. There is no busy polling if the flag is set and the "owner" is preempted. Worst case is that the task owning NAPI gets preempted and NAPI processing stalls. This is can be prevented by properly prioritising the tasks within the system. Allow RX_BUSY_POLL on PREEMPT_RT if NETPOLL is disabled. Don't disable preemption on PREEMPT_RT within the busy poll loop. Tested on x86 hardware with v6.1-RT and v6.3-RT on Intel i225 (igc) with AF_XDP/ZC sockets configured to run in busy polling mode. Suggested-by: Sebastian Andrzej Siewior Signed-off-by: Kurt Kanzenbach Signed-off-by: David S. Miller --- net/Kconfig | 2 +- net/core/dev.c | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/Kconfig b/net/Kconfig index 7d39c1773eb4..2fb25b534df5 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -324,7 +324,7 @@ config CGROUP_NET_CLASSID config NET_RX_BUSY_POLL bool - default y if !PREEMPT_RT + default y if !PREEMPT_RT || (PREEMPT_RT && !NETCONSOLE) config BQL bool diff --git a/net/core/dev.c b/net/core/dev.c index b3c13e041935..3393c2f3dbe8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6197,7 +6197,8 @@ restart: if (!napi) goto out; - preempt_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_disable(); for (;;) { int work = 0; @@ -6239,7 +6240,8 @@ count: if (unlikely(need_resched())) { if (napi_poll) busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); - preempt_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); rcu_read_unlock(); cond_resched(); if (loop_end(loop_end_arg, start_time)) @@ -6250,7 +6252,8 @@ count: } if (napi_poll) busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget); - preempt_enable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + preempt_enable(); out: rcu_read_unlock(); } -- cgit v1.2.3 From 9b66ee06e5ca2698d0ba12a7ad7188cb724279e7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 24 May 2023 10:09:01 -0700 Subject: net: ynl: prefix uAPI header include with uapi/ To keep things simple we used to include the uAPI header in the kernel in the #include format. This works well enough, most of the genl families should have headers in include/net/ so linux/$family.h ends up referring to the uAPI header, anyway. And if it doesn't no big deal, we'll just include more info than we need. Unless that is there is a naming conflict. Someone recently created include/linux/psp.h which will be a problem when supporting the PSP protocol. (I'm talking about work-in-progress patches, but it's just a proof that assuming lack of name conflicts was overly optimistic.) Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/core/netdev-genl-gen.c | 2 +- net/core/netdev-genl-gen.h | 2 +- net/handshake/genl.c | 2 +- net/handshake/genl.h | 2 +- net/ipv4/fou_nl.c | 2 +- net/ipv4/fou_nl.h | 2 +- tools/net/ynl/ynl-gen-c.py | 4 +++- 7 files changed, 9 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c index de17ca2f7dbf..ea9231378aa6 100644 --- a/net/core/netdev-genl-gen.c +++ b/net/core/netdev-genl-gen.c @@ -8,7 +8,7 @@ #include "netdev-genl-gen.h" -#include +#include /* NETDEV_CMD_DEV_GET - do */ static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = { diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h index 74d74fc23167..7b370c073e7d 100644 --- a/net/core/netdev-genl-gen.h +++ b/net/core/netdev-genl-gen.h @@ -9,7 +9,7 @@ #include #include -#include +#include int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info); int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/handshake/genl.c b/net/handshake/genl.c index 9f29efb1493e..233be5cbfec9 100644 --- a/net/handshake/genl.c +++ b/net/handshake/genl.c @@ -8,7 +8,7 @@ #include "genl.h" -#include +#include /* HANDSHAKE_CMD_ACCEPT - do */ static const struct nla_policy handshake_accept_nl_policy[HANDSHAKE_A_ACCEPT_HANDLER_CLASS + 1] = { diff --git a/net/handshake/genl.h b/net/handshake/genl.h index 2c1f1aa6a02a..ae72a596f6cc 100644 --- a/net/handshake/genl.h +++ b/net/handshake/genl.h @@ -9,7 +9,7 @@ #include #include -#include +#include int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info); int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info); diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c index 6c37c4f98cca..98b90107b5ab 100644 --- a/net/ipv4/fou_nl.c +++ b/net/ipv4/fou_nl.c @@ -8,7 +8,7 @@ #include "fou_nl.h" -#include +#include /* Global operation policy for fou */ const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1] = { diff --git a/net/ipv4/fou_nl.h b/net/ipv4/fou_nl.h index dbd0780a5d34..63a6c4ed803d 100644 --- a/net/ipv4/fou_nl.h +++ b/net/ipv4/fou_nl.h @@ -9,7 +9,7 @@ #include #include -#include +#include /* Global operation policy for fou */ extern const struct nla_policy fou_nl_policy[FOU_ATTR_IFINDEX + 1]; diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index cc2f8c945340..be664510f484 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -2101,7 +2101,9 @@ def main(): if args.out_file: cw.p(f'#include "{os.path.basename(args.out_file[:-2])}.h"') cw.nl() - headers = [parsed.uapi_header] + headers = ['uapi/' + parsed.uapi_header] + else: + headers = [parsed.uapi_header] for definition in parsed['definitions']: if 'header' in definition: headers.append(definition['header']) -- cgit v1.2.3 From dd805cf3e80e038aeb06902399ce9bd6fafb4ff3 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Thu, 25 May 2023 11:38:44 +0100 Subject: net: dsa: add support for mac_prepare() and mac_finish() calls Add DSA support for the phylink mac_prepare() and mac_finish() calls. These were introduced as part of the PCS support to allow MACs to perform preparatory steps prior to configuration, and finalisation steps after the MAC and PCS has been configured. Introducing phylink_pcs support to the mv88e6xxx DSA driver needs some code moved out of its mac_config() stage into the mac_prepare() and mac_finish() stages, and this commit facilitates such code in DSA drivers. Reviewed-by: Andrew Lunn Signed-off-by: Russell King (Oracle) Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/net/dsa.h | 6 ++++++ net/dsa/port.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'net') diff --git a/include/net/dsa.h b/include/net/dsa.h index 8903053fa5aa..75022cf771cf 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -867,9 +867,15 @@ struct dsa_switch_ops { phy_interface_t iface); int (*phylink_mac_link_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); + int (*phylink_mac_prepare)(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface); void (*phylink_mac_config)(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state); + int (*phylink_mac_finish)(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface); void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port); void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, unsigned int mode, diff --git a/net/dsa/port.c b/net/dsa/port.c index 71ba30538411..0ce8fd311c78 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1603,6 +1603,21 @@ dsa_port_phylink_mac_select_pcs(struct phylink_config *config, return pcs; } +static int dsa_port_phylink_mac_prepare(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); + struct dsa_switch *ds = dp->ds; + int err = 0; + + if (ds->ops->phylink_mac_prepare) + err = ds->ops->phylink_mac_prepare(ds, dp->index, mode, + interface); + + return err; +} + static void dsa_port_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) @@ -1616,6 +1631,21 @@ static void dsa_port_phylink_mac_config(struct phylink_config *config, ds->ops->phylink_mac_config(ds, dp->index, mode, state); } +static int dsa_port_phylink_mac_finish(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); + struct dsa_switch *ds = dp->ds; + int err = 0; + + if (ds->ops->phylink_mac_finish) + err = ds->ops->phylink_mac_finish(ds, dp->index, mode, + interface); + + return err; +} + static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) { struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); @@ -1671,7 +1701,9 @@ static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .validate = dsa_port_phylink_validate, .mac_select_pcs = dsa_port_phylink_mac_select_pcs, .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, + .mac_prepare = dsa_port_phylink_mac_prepare, .mac_config = dsa_port_phylink_mac_config, + .mac_finish = dsa_port_phylink_mac_finish, .mac_an_restart = dsa_port_phylink_mac_an_restart, .mac_link_down = dsa_port_phylink_mac_link_down, .mac_link_up = dsa_port_phylink_mac_link_up, -- cgit v1.2.3 From ef1bc119ceb52d22a83f72b8dfce1dd64a3cca05 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 26 May 2023 16:39:15 +0300 Subject: net: fix signedness bug in skb_splice_from_iter() The "len" variable needs to be signed for the error handling to work correctly. Fixes: 2e910b95329c ("net: Add a function to splice pages into an skbuff for MSG_SPLICE_PAGES") Signed-off-by: Dan Carpenter Reviewed-by: David Howells Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/366861a7-87c8-4bbf-9101-69dd41021d07@kili.mountain Signed-off-by: Jakub Kicinski --- net/core/skbuff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 653abd8a6975..7c4338221b17 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -6931,8 +6931,8 @@ ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, unsigned int i; while (iter->count > 0) { - ssize_t space, nr; - size_t off, len; + ssize_t space, nr, len; + size_t off; ret = -EMSGSIZE; space = frag_limit - skb_shinfo(skb)->nr_frags; -- cgit v1.2.3 From 45402f04c5821a0c42c5d8b17e4abad504e598bb Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Fri, 26 May 2023 15:45:13 +0200 Subject: devlink: Spelling corrections Make some minor spelling corrections in comments. Found by inspection. Signed-off-by: Simon Horman Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/20230526-devlink-spelling-v1-1-9a3e36cdebc8@kernel.org Signed-off-by: Jakub Kicinski --- include/net/devlink.h | 2 +- net/devlink/leftover.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/net/devlink.h b/include/net/devlink.h index 1bd56c8d6f3c..ec109b39c3ea 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1261,7 +1261,7 @@ struct devlink_ops { /** * @supported_flash_update_params: * mask of parameters supported by the driver's .flash_update - * implemementation. + * implementation. */ u32 supported_flash_update_params; unsigned long reload_actions; diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 0410137a4a31..9e801b749194 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -6761,7 +6761,7 @@ static void devlink_port_type_warn_cancel(struct devlink_port *devlink_port) * @devlink: devlink * @devlink_port: devlink port * - * Initialize essencial stuff that is needed for functions + * Initialize essential stuff that is needed for functions * that may be called before devlink port registration. * Call to this function is optional and not needed * in case the driver does not use such functions. @@ -6782,7 +6782,7 @@ EXPORT_SYMBOL_GPL(devlink_port_init); * * @devlink_port: devlink port * - * Deinitialize essencial stuff that is in use for functions + * Deinitialize essential stuff that is in use for functions * that may be called after devlink port unregistration. * Call to this function is optional and not needed * in case the driver does not use such functions. -- cgit v1.2.3 From 6acdf43d8abe32082356d56a07598e513bbb59f4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:27 +0200 Subject: devlink: introduce port ops placeholder In devlink, some of the objects have separate ops registered alongside with the object itself. Port however have ops in devlink_ops structure. For drivers what register multiple kinds of ports with different ops this is not convenient. Introduce devlink_port_ops and a set of functions that allow drivers to pass ops pointer during port registration. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- include/net/devlink.h | 41 +++++++++++++++++++++++++++++++++++------ net/devlink/leftover.c | 30 +++++++++++++++++++----------- 2 files changed, 54 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/include/net/devlink.h b/include/net/devlink.h index ec109b39c3ea..a1e230d24f05 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -123,6 +123,7 @@ struct devlink_port { struct list_head list; struct list_head region_list; struct devlink *devlink; + const struct devlink_port_ops *ops; unsigned int index; spinlock_t type_lock; /* Protects type and type_eth/ib * structures consistency. @@ -1649,15 +1650,43 @@ void devl_unregister(struct devlink *devlink); void devlink_register(struct devlink *devlink); void devlink_unregister(struct devlink *devlink); void devlink_free(struct devlink *devlink); + +/** + * struct devlink_port_ops - Port operations + */ +struct devlink_port_ops { +}; + void devlink_port_init(struct devlink *devlink, struct devlink_port *devlink_port); void devlink_port_fini(struct devlink_port *devlink_port); -int devl_port_register(struct devlink *devlink, - struct devlink_port *devlink_port, - unsigned int port_index); -int devlink_port_register(struct devlink *devlink, - struct devlink_port *devlink_port, - unsigned int port_index); + +int devl_port_register_with_ops(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index, + const struct devlink_port_ops *ops); + +static inline int devl_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index) +{ + return devl_port_register_with_ops(devlink, devlink_port, + port_index, NULL); +} + +int devlink_port_register_with_ops(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index, + const struct devlink_port_ops *ops); + +static inline int devlink_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index) +{ + return devlink_port_register_with_ops(devlink, devlink_port, + port_index, NULL); +} + void devl_port_unregister(struct devlink_port *devlink_port); void devlink_port_unregister(struct devlink_port *devlink_port); void devlink_port_type_eth_set(struct devlink_port *devlink_port); diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 9e801b749194..2295fa542dd8 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -6793,12 +6793,15 @@ void devlink_port_fini(struct devlink_port *devlink_port) } EXPORT_SYMBOL_GPL(devlink_port_fini); +static const struct devlink_port_ops devlink_port_dummy_ops = {}; + /** - * devl_port_register() - Register devlink port + * devl_port_register_with_ops() - Register devlink port * * @devlink: devlink * @devlink_port: devlink port * @port_index: driver-specific numerical identifier of the port + * @ops: port ops * * Register devlink port with provided port index. User can use * any indexing, even hw-related one. devlink_port structure @@ -6806,9 +6809,10 @@ EXPORT_SYMBOL_GPL(devlink_port_fini); * Note that the caller should take care of zeroing the devlink_port * structure. */ -int devl_port_register(struct devlink *devlink, - struct devlink_port *devlink_port, - unsigned int port_index) +int devl_port_register_with_ops(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index, + const struct devlink_port_ops *ops) { int err; @@ -6819,6 +6823,7 @@ int devl_port_register(struct devlink *devlink, devlink_port_init(devlink, devlink_port); devlink_port->registered = true; devlink_port->index = port_index; + devlink_port->ops = ops ? ops : &devlink_port_dummy_ops; spin_lock_init(&devlink_port->type_lock); INIT_LIST_HEAD(&devlink_port->reporter_list); err = xa_insert(&devlink->ports, port_index, devlink_port, GFP_KERNEL); @@ -6830,14 +6835,15 @@ int devl_port_register(struct devlink *devlink, devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); return 0; } -EXPORT_SYMBOL_GPL(devl_port_register); +EXPORT_SYMBOL_GPL(devl_port_register_with_ops); /** - * devlink_port_register - Register devlink port + * devlink_port_register_with_ops - Register devlink port * * @devlink: devlink * @devlink_port: devlink port * @port_index: driver-specific numerical identifier of the port + * @ops: port ops * * Register devlink port with provided port index. User can use * any indexing, even hw-related one. devlink_port structure @@ -6847,18 +6853,20 @@ EXPORT_SYMBOL_GPL(devl_port_register); * * Context: Takes and release devlink->lock . */ -int devlink_port_register(struct devlink *devlink, - struct devlink_port *devlink_port, - unsigned int port_index) +int devlink_port_register_with_ops(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index, + const struct devlink_port_ops *ops) { int err; devl_lock(devlink); - err = devl_port_register(devlink, devlink_port, port_index); + err = devl_port_register_with_ops(devlink, devlink_port, + port_index, ops); devl_unlock(devlink); return err; } -EXPORT_SYMBOL_GPL(devlink_port_register); +EXPORT_SYMBOL_GPL(devlink_port_register_with_ops); /** * devl_port_unregister() - Unregister devlink port -- cgit v1.2.3 From f58a3e4dfe241393ce383363583190903f140da5 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:31 +0200 Subject: devlink: move port_split/unsplit() ops into devlink_port_ops Move port_split/unsplit() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/intel/ice/ice_devlink.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/core.c | 4 ++-- drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 4 ++-- include/net/devlink.h | 11 +++++++---- net/devlink/leftover.c | 10 +++++----- 5 files changed, 18 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 6661d12772a3..80dc5445b50d 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -1256,8 +1256,6 @@ static const struct devlink_ops ice_devlink_ops = { BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), .reload_down = ice_devlink_reload_down, .reload_up = ice_devlink_reload_up, - .port_split = ice_devlink_port_split, - .port_unsplit = ice_devlink_port_unsplit, .eswitch_mode_get = ice_eswitch_mode_get, .eswitch_mode_set = ice_eswitch_mode_set, .info_get = ice_devlink_info_get, @@ -1513,6 +1511,8 @@ ice_devlink_set_port_split_options(struct ice_pf *pf, } static const struct devlink_port_ops ice_devlink_port_ops = { + .port_split = ice_devlink_port_split, + .port_unsplit = ice_devlink_port_unsplit, }; /** diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 605881b17ccc..1ccf3b73ed72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1723,8 +1723,6 @@ static const struct devlink_ops mlxsw_devlink_ops = { BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), .reload_down = mlxsw_devlink_core_bus_device_reload_down, .reload_up = mlxsw_devlink_core_bus_device_reload_up, - .port_split = mlxsw_devlink_port_split, - .port_unsplit = mlxsw_devlink_port_unsplit, .sb_pool_get = mlxsw_devlink_sb_pool_get, .sb_pool_set = mlxsw_devlink_sb_pool_set, .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, @@ -3117,6 +3115,8 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core, EXPORT_SYMBOL(mlxsw_core_res_get); static const struct devlink_port_ops mlxsw_devlink_port_ops = { + .port_split = mlxsw_devlink_port_split, + .port_unsplit = mlxsw_devlink_port_unsplit, }; static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 4e4296ecae7c..8c6954c58a88 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -311,8 +311,6 @@ nfp_devlink_flash_update(struct devlink *devlink, } const struct devlink_ops nfp_devlink_ops = { - .port_split = nfp_devlink_port_split, - .port_unsplit = nfp_devlink_port_unsplit, .sb_pool_get = nfp_devlink_sb_pool_get, .sb_pool_set = nfp_devlink_sb_pool_set, .eswitch_mode_get = nfp_devlink_eswitch_mode_get, @@ -322,6 +320,8 @@ const struct devlink_ops nfp_devlink_ops = { }; static const struct devlink_port_ops nfp_devlink_port_ops = { + .port_split = nfp_devlink_port_split, + .port_unsplit = nfp_devlink_port_unsplit, }; int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) diff --git a/include/net/devlink.h b/include/net/devlink.h index a1e230d24f05..fdcb2c55f1b5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1276,10 +1276,6 @@ struct devlink_ops { struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); - int (*port_split)(struct devlink *devlink, struct devlink_port *port, - unsigned int count, struct netlink_ext_ack *extack); - int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port, - struct netlink_ext_ack *extack); int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); @@ -1653,8 +1649,15 @@ void devlink_free(struct devlink *devlink); /** * struct devlink_port_ops - Port operations + * @port_split: Callback used to split the port into multiple ones. + * @port_unsplit: Callback used to unsplit the port group back into + * a single port. */ struct devlink_port_ops { + int (*port_split)(struct devlink *devlink, struct devlink_port *port, + unsigned int count, struct netlink_ext_ack *extack); + int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 2295fa542dd8..1ed95c76cf67 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1320,7 +1320,7 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PORT_SPLIT_COUNT)) return -EINVAL; - if (!devlink->ops->port_split) + if (!devlink_port->ops->port_split) return -EOPNOTSUPP; count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]); @@ -1339,8 +1339,8 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, return -EINVAL; } - return devlink->ops->port_split(devlink, devlink_port, count, - info->extack); + return devlink_port->ops->port_split(devlink, devlink_port, count, + info->extack); } static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb, @@ -1349,9 +1349,9 @@ static int devlink_nl_cmd_port_unsplit_doit(struct sk_buff *skb, struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = info->user_ptr[0]; - if (!devlink->ops->port_unsplit) + if (!devlink_port->ops->port_unsplit) return -EOPNOTSUPP; - return devlink->ops->port_unsplit(devlink, devlink_port, info->extack); + return devlink_port->ops->port_unsplit(devlink, devlink_port, info->extack); } static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, -- cgit v1.2.3 From 65a4c44bf9375a5d13287ee1e389b512e83f37eb Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:33 +0200 Subject: devlink: move port_type_set() op into devlink_port_ops Move port_type_set() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx4/main.c | 52 +++++++++++++++---------------- include/net/devlink.h | 5 +-- net/devlink/leftover.c | 5 ++- 3 files changed, 31 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 369642478fab..61286b0d9b0c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3024,7 +3024,33 @@ no_msi: } } +static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, + enum devlink_port_type port_type) +{ + struct mlx4_port_info *info = container_of(devlink_port, + struct mlx4_port_info, + devlink_port); + enum mlx4_port_type mlx4_port_type; + + switch (port_type) { + case DEVLINK_PORT_TYPE_AUTO: + mlx4_port_type = MLX4_PORT_TYPE_AUTO; + break; + case DEVLINK_PORT_TYPE_ETH: + mlx4_port_type = MLX4_PORT_TYPE_ETH; + break; + case DEVLINK_PORT_TYPE_IB: + mlx4_port_type = MLX4_PORT_TYPE_IB; + break; + default: + return -EOPNOTSUPP; + } + + return __set_port_type(info, mlx4_port_type); +} + static const struct devlink_port_ops mlx4_devlink_port_ops = { + .port_type_set = mlx4_devlink_port_type_set, }; static int mlx4_init_port_info(struct mlx4_dev *dev, int port) @@ -3878,31 +3904,6 @@ err_disable_pdev: return err; } -static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, - enum devlink_port_type port_type) -{ - struct mlx4_port_info *info = container_of(devlink_port, - struct mlx4_port_info, - devlink_port); - enum mlx4_port_type mlx4_port_type; - - switch (port_type) { - case DEVLINK_PORT_TYPE_AUTO: - mlx4_port_type = MLX4_PORT_TYPE_AUTO; - break; - case DEVLINK_PORT_TYPE_ETH: - mlx4_port_type = MLX4_PORT_TYPE_ETH; - break; - case DEVLINK_PORT_TYPE_IB: - mlx4_port_type = MLX4_PORT_TYPE_IB; - break; - default: - return -EOPNOTSUPP; - } - - return __set_port_type(info, mlx4_port_type); -} - static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink) { struct mlx4_priv *priv = devlink_priv(devlink); @@ -3987,7 +3988,6 @@ static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a } static const struct devlink_ops mlx4_devlink_ops = { - .port_type_set = mlx4_devlink_port_type_set, .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), .reload_down = mlx4_devlink_reload_down, .reload_up = mlx4_devlink_reload_up, diff --git a/include/net/devlink.h b/include/net/devlink.h index fdcb2c55f1b5..0dfadc234b9e 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1274,8 +1274,6 @@ struct devlink_ops { int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action, enum devlink_reload_limit limit, u32 *actions_performed, struct netlink_ext_ack *extack); - int (*port_type_set)(struct devlink_port *devlink_port, - enum devlink_port_type port_type); int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); @@ -1652,12 +1650,15 @@ void devlink_free(struct devlink *devlink); * @port_split: Callback used to split the port into multiple ones. * @port_unsplit: Callback used to unsplit the port group back into * a single port. + * @port_type_set: Callback used to set a type of a port. */ struct devlink_port_ops { int (*port_split)(struct devlink *devlink, struct devlink_port *port, unsigned int count, struct netlink_ext_ack *extack); int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port, struct netlink_ext_ack *extack); + int (*port_type_set)(struct devlink_port *devlink_port, + enum devlink_port_type port_type); }; void devlink_port_init(struct devlink *devlink, diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 1ed95c76cf67..d69e278ae15a 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1137,14 +1137,13 @@ static int devlink_port_type_set(struct devlink_port *devlink_port, { int err; - if (!devlink_port->devlink->ops->port_type_set) + if (!devlink_port->ops->port_type_set) return -EOPNOTSUPP; if (port_type == devlink_port->type) return 0; - err = devlink_port->devlink->ops->port_type_set(devlink_port, - port_type); + err = devlink_port->ops->port_type_set(devlink_port, port_type); if (err) return err; -- cgit v1.2.3 From 71c93e37cf3d0528e5d17ecc0b1b07db2086db67 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:36 +0200 Subject: devlink: move port_fn_hw_addr_get/set() to devlink_port_ops Move port_fn_hw_addr_get/set() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Acked-by: Martin Habets Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 2 - .../ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 + drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 12 +-- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 12 +-- drivers/net/ethernet/sfc/efx_devlink.c | 86 +++++++++++----------- include/net/devlink.h | 38 ++++------ net/devlink/leftover.c | 15 ++-- 7 files changed, 80 insertions(+), 89 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index bfaec67abf0d..1e96f32bd1b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -310,8 +310,6 @@ static const struct devlink_ops mlx5_devlink_ops = { .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set, .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, - .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, - .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set, .rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set, .rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set, .rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index d9c17481b972..78d12c377900 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -66,6 +66,8 @@ static void mlx5_esw_dl_port_free(struct devlink_port *dl_port) } static const struct devlink_port_ops mlx5_esw_dl_port_ops = { + .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, + .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, }; int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num) @@ -139,6 +141,8 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 } static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { + .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, + .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, }; int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 280dc71b032c..f70124ad71cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -506,12 +506,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, enum devlink_eswitch_encap_mode *encap); -int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, - u8 *hw_addr, int *hw_addr_len, - struct netlink_ext_ack *extack); -int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, - const u8 *hw_addr, int hw_addr_len, - struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack); int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, struct netlink_ext_ack *extack); int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 7a65dcf01dba..1b2f5e273525 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3957,9 +3957,9 @@ is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) mlx5_esw_is_sf_vport(esw, vport_num); } -int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, - u8 *hw_addr, int *hw_addr_len, - struct netlink_ext_ack *extack) +int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw; struct mlx5_vport *vport; @@ -3986,9 +3986,9 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, return 0; } -int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, - const u8 *hw_addr, int hw_addr_len, - struct netlink_ext_ack *extack) +int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack) { struct mlx5_eswitch *esw; u16 vport_num; diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c index e74f74037405..b82dad50a5b1 100644 --- a/drivers/net/ethernet/sfc/efx_devlink.c +++ b/drivers/net/ethernet/sfc/efx_devlink.c @@ -26,46 +26,6 @@ struct efx_devlink { #ifdef CONFIG_SFC_SRIOV -static const struct devlink_port_ops sfc_devlink_port_ops = { -}; - -static void efx_devlink_del_port(struct devlink_port *dl_port) -{ - if (!dl_port) - return; - devl_port_unregister(dl_port); -} - -static int efx_devlink_add_port(struct efx_nic *efx, - struct mae_mport_desc *mport) -{ - bool external = false; - - if (!ef100_mport_on_local_intf(efx, mport)) - external = true; - - switch (mport->mport_type) { - case MAE_MPORT_DESC_MPORT_TYPE_VNIC: - if (mport->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL) - devlink_port_attrs_pci_vf_set(&mport->dl_port, 0, mport->pf_idx, - mport->vf_idx, - external); - else - devlink_port_attrs_pci_pf_set(&mport->dl_port, 0, mport->pf_idx, - external); - break; - default: - /* MAE_MPORT_DESC_MPORT_ALIAS and UNDEFINED */ - return 0; - } - - mport->dl_port.index = mport->mport_id; - - return devl_port_register_with_ops(efx->devlink, &mport->dl_port, - mport->mport_id, - &sfc_devlink_port_ops); -} - static int efx_devlink_port_addr_get(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack) @@ -164,6 +124,48 @@ static int efx_devlink_port_addr_set(struct devlink_port *port, return rc; } +static const struct devlink_port_ops sfc_devlink_port_ops = { + .port_fn_hw_addr_get = efx_devlink_port_addr_get, + .port_fn_hw_addr_set = efx_devlink_port_addr_set, +}; + +static void efx_devlink_del_port(struct devlink_port *dl_port) +{ + if (!dl_port) + return; + devl_port_unregister(dl_port); +} + +static int efx_devlink_add_port(struct efx_nic *efx, + struct mae_mport_desc *mport) +{ + bool external = false; + + if (!ef100_mport_on_local_intf(efx, mport)) + external = true; + + switch (mport->mport_type) { + case MAE_MPORT_DESC_MPORT_TYPE_VNIC: + if (mport->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL) + devlink_port_attrs_pci_vf_set(&mport->dl_port, 0, mport->pf_idx, + mport->vf_idx, + external); + else + devlink_port_attrs_pci_pf_set(&mport->dl_port, 0, mport->pf_idx, + external); + break; + default: + /* MAE_MPORT_DESC_MPORT_ALIAS and UNDEFINED */ + return 0; + } + + mport->dl_port.index = mport->mport_id; + + return devl_port_register_with_ops(efx->devlink, &mport->dl_port, + mport->mport_id, + &sfc_devlink_port_ops); +} + #endif static int efx_devlink_info_nvram_partition(struct efx_nic *efx, @@ -615,10 +617,6 @@ static int efx_devlink_info_get(struct devlink *devlink, static const struct devlink_ops sfc_devlink_ops = { .info_get = efx_devlink_info_get, -#ifdef CONFIG_SFC_SRIOV - .port_function_hw_addr_get = efx_devlink_port_addr_get, - .port_function_hw_addr_set = efx_devlink_port_addr_set, -#endif }; #ifdef CONFIG_SFC_SRIOV diff --git a/include/net/devlink.h b/include/net/devlink.h index 0dfadc234b9e..c580b154cfe4 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1429,28 +1429,6 @@ struct devlink_ops { int (*trap_policer_counter_get)(struct devlink *devlink, const struct devlink_trap_policer *policer, u64 *p_drops); - /** - * @port_function_hw_addr_get: Port function's hardware address get function. - * - * Should be used by device drivers to report the hardware address of a function managed - * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port - * function handling for a particular port. - * - * Note: @extack can be NULL when port notifier queries the port function. - */ - int (*port_function_hw_addr_get)(struct devlink_port *port, u8 *hw_addr, - int *hw_addr_len, - struct netlink_ext_ack *extack); - /** - * @port_function_hw_addr_set: Port function's hardware address set function. - * - * Should be used by device drivers to set the hardware address of a function managed - * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port - * function handling for a particular port. - */ - int (*port_function_hw_addr_set)(struct devlink_port *port, - const u8 *hw_addr, int hw_addr_len, - struct netlink_ext_ack *extack); /** * @port_fn_roce_get: Port function's roce get function. * @@ -1651,6 +1629,16 @@ void devlink_free(struct devlink *devlink); * @port_unsplit: Callback used to unsplit the port group back into * a single port. * @port_type_set: Callback used to set a type of a port. + * @port_fn_hw_addr_get: Callback used to set port function's hardware address. + * Should be used by device drivers to report + * the hardware address of a function managed + * by the devlink port. + * @port_fn_hw_addr_set: Callback used to set port function's hardware address. + * Should be used by device drivers to set the hardware + * address of a function managed by the devlink port. + * + * Note: Driver should return -EOPNOTSUPP if it doesn't support + * port function (@port_fn_*) handling for a particular port. */ struct devlink_port_ops { int (*port_split)(struct devlink *devlink, struct devlink_port *port, @@ -1659,6 +1647,12 @@ struct devlink_port_ops { struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); + int (*port_fn_hw_addr_get)(struct devlink_port *port, u8 *hw_addr, + int *hw_addr_len, + struct netlink_ext_ack *extack); + int (*port_fn_hw_addr_set)(struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index d69e278ae15a..ac171ea984cc 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -691,8 +691,7 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; } -static int devlink_port_fn_hw_addr_fill(const struct devlink_ops *ops, - struct devlink_port *port, +static int devlink_port_fn_hw_addr_fill(struct devlink_port *port, struct sk_buff *msg, struct netlink_ext_ack *extack, bool *msg_updated) @@ -701,10 +700,10 @@ static int devlink_port_fn_hw_addr_fill(const struct devlink_ops *ops, int hw_addr_len; int err; - if (!ops->port_function_hw_addr_get) + if (!port->ops->port_fn_hw_addr_get) return 0; - err = ops->port_function_hw_addr_get(port, hw_addr, &hw_addr_len, + err = port->ops->port_fn_hw_addr_get(port, hw_addr, &hw_addr_len, extack); if (err) { if (err == -EOPNOTSUPP) @@ -884,8 +883,7 @@ devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *por return -EMSGSIZE; ops = port->devlink->ops; - err = devlink_port_fn_hw_addr_fill(ops, port, msg, extack, - &msg_updated); + err = devlink_port_fn_hw_addr_fill(port, msg, extack, &msg_updated); if (err) goto out; err = devlink_port_fn_caps_fill(ops, port, msg, extack, @@ -1156,7 +1154,6 @@ static int devlink_port_function_hw_addr_set(struct devlink_port *port, const struct nlattr *attr, struct netlink_ext_ack *extack) { - const struct devlink_ops *ops = port->devlink->ops; const u8 *hw_addr; int hw_addr_len; @@ -1177,7 +1174,7 @@ static int devlink_port_function_hw_addr_set(struct devlink_port *port, } } - return ops->port_function_hw_addr_set(port, hw_addr, hw_addr_len, + return port->ops->port_fn_hw_addr_set(port, hw_addr, hw_addr_len, extack); } @@ -1201,7 +1198,7 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port, struct nlattr *attr; if (tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] && - !ops->port_function_hw_addr_set) { + !devlink_port->ops->port_fn_hw_addr_set) { NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR], "Port doesn't support function attributes"); return -EOPNOTSUPP; -- cgit v1.2.3 From 933c13275c4933ad68f107ed93ae9b0658b19ad0 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:37 +0200 Subject: devlink: move port_fn_roce_get/set() to devlink_port_ops Move port_fn_roce_get/set() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 2 -- .../ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 +++ include/net/devlink.h | 31 +++++++++------------- net/devlink/leftover.c | 17 ++++++------ 4 files changed, 25 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 1e96f32bd1b5..d63ec466dcd6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -317,8 +317,6 @@ static const struct devlink_ops mlx5_devlink_ops = { .rate_node_new = mlx5_esw_devlink_rate_node_new, .rate_node_del = mlx5_esw_devlink_rate_node_del, .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set, - .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, - .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 78d12c377900..9011619e1fdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -68,6 +68,8 @@ static void mlx5_esw_dl_port_free(struct devlink_port *dl_port) static const struct devlink_port_ops mlx5_esw_dl_port_ops = { .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, + .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, + .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, }; int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num) @@ -143,6 +145,8 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, + .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, + .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, }; int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, diff --git a/include/net/devlink.h b/include/net/devlink.h index c580b154cfe4..b8e8ea850562 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1429,24 +1429,6 @@ struct devlink_ops { int (*trap_policer_counter_get)(struct devlink *devlink, const struct devlink_trap_policer *policer, u64 *p_drops); - /** - * @port_fn_roce_get: Port function's roce get function. - * - * Query RoCE state of a function managed by the devlink port. - * Return -EOPNOTSUPP if port function RoCE handling is not supported. - */ - int (*port_fn_roce_get)(struct devlink_port *devlink_port, - bool *is_enable, - struct netlink_ext_ack *extack); - /** - * @port_fn_roce_set: Port function's roce set function. - * - * Enable/Disable the RoCE state of a function managed by the devlink - * port. - * Return -EOPNOTSUPP if port function RoCE handling is not supported. - */ - int (*port_fn_roce_set)(struct devlink_port *devlink_port, - bool enable, struct netlink_ext_ack *extack); /** * @port_fn_migratable_get: Port function's migratable get function. * @@ -1636,6 +1618,14 @@ void devlink_free(struct devlink *devlink); * @port_fn_hw_addr_set: Callback used to set port function's hardware address. * Should be used by device drivers to set the hardware * address of a function managed by the devlink port. + * @port_fn_roce_get: Callback used to get port function's RoCE capability. + * Should be used by device drivers to report + * the current state of RoCE capability of a function + * managed by the devlink port. + * @port_fn_roce_set: Callback used to set port function's RoCE capability. + * Should be used by device drivers to enable/disable + * RoCE capability of a function managed + * by the devlink port. * * Note: Driver should return -EOPNOTSUPP if it doesn't support * port function (@port_fn_*) handling for a particular port. @@ -1653,6 +1643,11 @@ struct devlink_port_ops { int (*port_fn_hw_addr_set)(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack); + int (*port_fn_roce_get)(struct devlink_port *devlink_port, + bool *is_enable, + struct netlink_ext_ack *extack); + int (*port_fn_roce_set)(struct devlink_port *devlink_port, + bool enable, struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index ac171ea984cc..e87ca3933a50 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -447,18 +447,18 @@ static void devlink_port_fn_cap_fill(struct nla_bitfield32 *caps, caps->value |= cap; } -static int devlink_port_fn_roce_fill(const struct devlink_ops *ops, - struct devlink_port *devlink_port, +static int devlink_port_fn_roce_fill(struct devlink_port *devlink_port, struct nla_bitfield32 *caps, struct netlink_ext_ack *extack) { bool is_enable; int err; - if (!ops->port_fn_roce_get) + if (!devlink_port->ops->port_fn_roce_get) return 0; - err = ops->port_fn_roce_get(devlink_port, &is_enable, extack); + err = devlink_port->ops->port_fn_roce_get(devlink_port, &is_enable, + extack); if (err) { if (err == -EOPNOTSUPP) return 0; @@ -501,7 +501,7 @@ static int devlink_port_fn_caps_fill(const struct devlink_ops *ops, struct nla_bitfield32 caps = {}; int err; - err = devlink_port_fn_roce_fill(ops, devlink_port, &caps, extack); + err = devlink_port_fn_roce_fill(devlink_port, &caps, extack); if (err) return err; @@ -837,9 +837,8 @@ static int devlink_port_fn_roce_set(struct devlink_port *devlink_port, bool enable, struct netlink_ext_ack *extack) { - const struct devlink_ops *ops = devlink_port->devlink->ops; - - return ops->port_fn_roce_set(devlink_port, enable, extack); + return devlink_port->ops->port_fn_roce_set(devlink_port, enable, + extack); } static int devlink_port_fn_caps_set(struct devlink_port *devlink_port, @@ -1214,7 +1213,7 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port, caps = nla_get_bitfield32(attr); if (caps.selector & DEVLINK_PORT_FN_CAP_ROCE && - !ops->port_fn_roce_set) { + !devlink_port->ops->port_fn_roce_set) { NL_SET_ERR_MSG_ATTR(extack, attr, "Port doesn't support RoCE function attribute"); return -EOPNOTSUPP; -- cgit v1.2.3 From 4a490d7154b3d84c7e451502306a53b6607b6566 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:38 +0200 Subject: devlink: move port_fn_migratable_get/set() to devlink_port_ops Move port_fn_migratable_get/set() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 2 -- .../ethernet/mellanox/mlx5/core/esw/devlink_port.c | 2 ++ include/net/devlink.h | 35 +++++++++------------- net/devlink/leftover.c | 23 +++++++------- 4 files changed, 26 insertions(+), 36 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d63ec466dcd6..678bae618769 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -317,8 +317,6 @@ static const struct devlink_ops mlx5_devlink_ops = { .rate_node_new = mlx5_esw_devlink_rate_node_new, .rate_node_del = mlx5_esw_devlink_rate_node_del, .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set, - .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, - .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, #endif #ifdef CONFIG_MLX5_SF_MANAGER .port_new = mlx5_devlink_sf_port_new, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 9011619e1fdd..2ececd2b86c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -70,6 +70,8 @@ static const struct devlink_port_ops mlx5_esw_dl_port_ops = { .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, + .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, + .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, }; int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num) diff --git a/include/net/devlink.h b/include/net/devlink.h index b8e8ea850562..25fa952a46a6 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1429,27 +1429,6 @@ struct devlink_ops { int (*trap_policer_counter_get)(struct devlink *devlink, const struct devlink_trap_policer *policer, u64 *p_drops); - /** - * @port_fn_migratable_get: Port function's migratable get function. - * - * Query migratable state of a function managed by the devlink port. - * Return -EOPNOTSUPP if port function migratable handling is not - * supported. - */ - int (*port_fn_migratable_get)(struct devlink_port *devlink_port, - bool *is_enable, - struct netlink_ext_ack *extack); - /** - * @port_fn_migratable_set: Port function's migratable set function. - * - * Enable/Disable migratable state of a function managed by the devlink - * port. - * Return -EOPNOTSUPP if port function migratable handling is not - * supported. - */ - int (*port_fn_migratable_set)(struct devlink_port *devlink_port, - bool enable, - struct netlink_ext_ack *extack); /** * port_new() - Add a new port function of a specified flavor * @devlink: Devlink instance @@ -1626,6 +1605,14 @@ void devlink_free(struct devlink *devlink); * Should be used by device drivers to enable/disable * RoCE capability of a function managed * by the devlink port. + * @port_fn_migratable_get: Callback used to get port function's migratable + * capability. Should be used by device drivers + * to report the current state of migratable capability + * of a function managed by the devlink port. + * @port_fn_migratable_set: Callback used to set port function's migratable + * capability. Should be used by device drivers + * to enable/disable migratable capability of + * a function managed by the devlink port. * * Note: Driver should return -EOPNOTSUPP if it doesn't support * port function (@port_fn_*) handling for a particular port. @@ -1648,6 +1635,12 @@ struct devlink_port_ops { struct netlink_ext_ack *extack); int (*port_fn_roce_set)(struct devlink_port *devlink_port, bool enable, struct netlink_ext_ack *extack); + int (*port_fn_migratable_get)(struct devlink_port *devlink_port, + bool *is_enable, + struct netlink_ext_ack *extack); + int (*port_fn_migratable_set)(struct devlink_port *devlink_port, + bool enable, + struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index e87ca3933a50..c16451ca744d 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -469,19 +469,19 @@ static int devlink_port_fn_roce_fill(struct devlink_port *devlink_port, return 0; } -static int devlink_port_fn_migratable_fill(const struct devlink_ops *ops, - struct devlink_port *devlink_port, +static int devlink_port_fn_migratable_fill(struct devlink_port *devlink_port, struct nla_bitfield32 *caps, struct netlink_ext_ack *extack) { bool is_enable; int err; - if (!ops->port_fn_migratable_get || + if (!devlink_port->ops->port_fn_migratable_get || devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_PCI_VF) return 0; - err = ops->port_fn_migratable_get(devlink_port, &is_enable, extack); + err = devlink_port->ops->port_fn_migratable_get(devlink_port, + &is_enable, extack); if (err) { if (err == -EOPNOTSUPP) return 0; @@ -492,8 +492,7 @@ static int devlink_port_fn_migratable_fill(const struct devlink_ops *ops, return 0; } -static int devlink_port_fn_caps_fill(const struct devlink_ops *ops, - struct devlink_port *devlink_port, +static int devlink_port_fn_caps_fill(struct devlink_port *devlink_port, struct sk_buff *msg, struct netlink_ext_ack *extack, bool *msg_updated) @@ -505,7 +504,7 @@ static int devlink_port_fn_caps_fill(const struct devlink_ops *ops, if (err) return err; - err = devlink_port_fn_migratable_fill(ops, devlink_port, &caps, extack); + err = devlink_port_fn_migratable_fill(devlink_port, &caps, extack); if (err) return err; @@ -828,9 +827,8 @@ static int devlink_port_fn_mig_set(struct devlink_port *devlink_port, bool enable, struct netlink_ext_ack *extack) { - const struct devlink_ops *ops = devlink_port->devlink->ops; - - return ops->port_fn_migratable_set(devlink_port, enable, extack); + return devlink_port->ops->port_fn_migratable_set(devlink_port, enable, + extack); } static int @@ -885,8 +883,7 @@ devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *por err = devlink_port_fn_hw_addr_fill(port, msg, extack, &msg_updated); if (err) goto out; - err = devlink_port_fn_caps_fill(ops, port, msg, extack, - &msg_updated); + err = devlink_port_fn_caps_fill(port, msg, extack, &msg_updated); if (err) goto out; err = devlink_port_fn_state_fill(ops, port, msg, extack, &msg_updated); @@ -1219,7 +1216,7 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port, return -EOPNOTSUPP; } if (caps.selector & DEVLINK_PORT_FN_CAP_MIGRATABLE) { - if (!ops->port_fn_migratable_set) { + if (!devlink_port->ops->port_fn_migratable_set) { NL_SET_ERR_MSG_ATTR(extack, attr, "Port doesn't support migratable function attribute"); return -EOPNOTSUPP; -- cgit v1.2.3 From 216aa67f3e981a0cfb0a7c3b0d4c107823ef6c56 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:39 +0200 Subject: devlink: move port_fn_state_get/set() to devlink_port_ops Move port_fn_state_get/set() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 2 - .../ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 ++ include/net/devlink.h | 45 ++++++++-------------- net/devlink/leftover.c | 19 ++++----- 4 files changed, 26 insertions(+), 44 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 678bae618769..e39fd85ea2f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -321,8 +321,6 @@ static const struct devlink_ops mlx5_devlink_ops = { #ifdef CONFIG_MLX5_SF_MANAGER .port_new = mlx5_devlink_sf_port_new, .port_del = mlx5_devlink_sf_port_del, - .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, - .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, #endif .flash_update = mlx5_devlink_flash_update, .info_get = mlx5_devlink_info_get, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 2ececd2b86c8..76c5d6e9d47f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -149,6 +149,10 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, +#ifdef CONFIG_MLX5_SF_MANAGER + .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, + .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, +#endif }; int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, diff --git a/include/net/devlink.h b/include/net/devlink.h index 25fa952a46a6..835989c10395 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1464,36 +1464,6 @@ struct devlink_ops { */ int (*port_del)(struct devlink *devlink, struct devlink_port *port, struct netlink_ext_ack *extack); - /** - * port_fn_state_get() - Get the state of a port function - * @devlink: Devlink instance - * @port: The devlink port - * @state: Admin configured state - * @opstate: Current operational state - * @extack: extack for reporting error messages - * - * Reports the admin and operational state of a devlink port function - * - * Return: 0 on success, negative value otherwise. - */ - int (*port_fn_state_get)(struct devlink_port *port, - enum devlink_port_fn_state *state, - enum devlink_port_fn_opstate *opstate, - struct netlink_ext_ack *extack); - /** - * port_fn_state_set() - Set the admin state of a port function - * @devlink: Devlink instance - * @port: The devlink port - * @state: Admin state - * @extack: extack for reporting error messages - * - * Set the admin state of a devlink port function - * - * Return: 0 on success, negative value otherwise. - */ - int (*port_fn_state_set)(struct devlink_port *port, - enum devlink_port_fn_state state, - struct netlink_ext_ack *extack); /** * Rate control callbacks. @@ -1613,6 +1583,14 @@ void devlink_free(struct devlink *devlink); * capability. Should be used by device drivers * to enable/disable migratable capability of * a function managed by the devlink port. + * @port_fn_state_get: Callback used to get port function's state. + * Should be used by device drivers to report + * the current admin and operational state of a + * function managed by the devlink port. + * @port_fn_state_set: Callback used to get port function's state. + * Should be used by device drivers set + * the admin state of a function managed + * by the devlink port. * * Note: Driver should return -EOPNOTSUPP if it doesn't support * port function (@port_fn_*) handling for a particular port. @@ -1641,6 +1619,13 @@ struct devlink_port_ops { int (*port_fn_migratable_set)(struct devlink_port *devlink_port, bool enable, struct netlink_ext_ack *extack); + int (*port_fn_state_get)(struct devlink_port *port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack); + int (*port_fn_state_set)(struct devlink_port *port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack); }; void devlink_port_init(struct devlink *devlink, diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index c16451ca744d..36acaa6a6f18 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -787,8 +787,7 @@ devlink_port_fn_opstate_valid(enum devlink_port_fn_opstate opstate) opstate == DEVLINK_PORT_FN_OPSTATE_ATTACHED; } -static int devlink_port_fn_state_fill(const struct devlink_ops *ops, - struct devlink_port *port, +static int devlink_port_fn_state_fill(struct devlink_port *port, struct sk_buff *msg, struct netlink_ext_ack *extack, bool *msg_updated) @@ -797,10 +796,10 @@ static int devlink_port_fn_state_fill(const struct devlink_ops *ops, enum devlink_port_fn_state state; int err; - if (!ops->port_fn_state_get) + if (!port->ops->port_fn_state_get) return 0; - err = ops->port_fn_state_get(port, &state, &opstate, extack); + err = port->ops->port_fn_state_get(port, &state, &opstate, extack); if (err) { if (err == -EOPNOTSUPP) return 0; @@ -870,7 +869,6 @@ static int devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port, struct netlink_ext_ack *extack) { - const struct devlink_ops *ops; struct nlattr *function_attr; bool msg_updated = false; int err; @@ -879,14 +877,13 @@ devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *por if (!function_attr) return -EMSGSIZE; - ops = port->devlink->ops; err = devlink_port_fn_hw_addr_fill(port, msg, extack, &msg_updated); if (err) goto out; err = devlink_port_fn_caps_fill(port, msg, extack, &msg_updated); if (err) goto out; - err = devlink_port_fn_state_fill(ops, port, msg, extack, &msg_updated); + err = devlink_port_fn_state_fill(port, msg, extack, &msg_updated); out: if (err || !msg_updated) nla_nest_cancel(msg, function_attr); @@ -1179,18 +1176,15 @@ static int devlink_port_fn_state_set(struct devlink_port *port, struct netlink_ext_ack *extack) { enum devlink_port_fn_state state; - const struct devlink_ops *ops; state = nla_get_u8(attr); - ops = port->devlink->ops; - return ops->port_fn_state_set(port, state, extack); + return port->ops->port_fn_state_set(port, state, extack); } static int devlink_port_function_validate(struct devlink_port *devlink_port, struct nlattr **tb, struct netlink_ext_ack *extack) { - const struct devlink_ops *ops = devlink_port->devlink->ops; struct nlattr *attr; if (tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] && @@ -1199,7 +1193,8 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port, "Port doesn't support function attributes"); return -EOPNOTSUPP; } - if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) { + if (tb[DEVLINK_PORT_FN_ATTR_STATE] && + !devlink_port->ops->port_fn_state_set) { NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR], "Function does not support state setting"); return -EOPNOTSUPP; -- cgit v1.2.3 From 216ba9f4adc8f2e452edb9a58d2dfbfc11608c00 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:40 +0200 Subject: devlink: move port_del() to devlink_port_ops Move port_del() from devlink_ops into newly introduced devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/devlink.c | 1 - .../ethernet/mellanox/mlx5/core/esw/devlink_port.c | 3 +++ include/net/devlink.h | 22 +++++----------------- net/devlink/leftover.c | 6 +++--- 4 files changed, 11 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index e39fd85ea2f9..63635cc44479 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -320,7 +320,6 @@ static const struct devlink_ops mlx5_devlink_ops = { #endif #ifdef CONFIG_MLX5_SF_MANAGER .port_new = mlx5_devlink_sf_port_new, - .port_del = mlx5_devlink_sf_port_del, #endif .flash_update = mlx5_devlink_flash_update, .info_get = mlx5_devlink_info_get, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 76c5d6e9d47f..f370f67d9e33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -145,6 +145,9 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1 } static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { +#ifdef CONFIG_MLX5_SF_MANAGER + .port_del = mlx5_devlink_sf_port_del, +#endif .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, diff --git a/include/net/devlink.h b/include/net/devlink.h index 835989c10395..fe42ad46cf3b 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1447,23 +1447,6 @@ struct devlink_ops { int (*port_new)(struct devlink *devlink, const struct devlink_port_new_attrs *attrs, struct netlink_ext_ack *extack); - /** - * port_del() - Delete a port function - * @devlink: Devlink instance - * @port: The devlink port - * @extack: extack for reporting error messages - * - * Devlink core will call this device driver function upon user request - * to delete a previously created port function - * - * Notes: - * - On success, drivers must unregister the corresponding devlink - * port - * - * Return: 0 on success, negative value otherwise. - */ - int (*port_del)(struct devlink *devlink, struct devlink_port *port, - struct netlink_ext_ack *extack); /** * Rate control callbacks. @@ -1560,6 +1543,9 @@ void devlink_free(struct devlink *devlink); * @port_unsplit: Callback used to unsplit the port group back into * a single port. * @port_type_set: Callback used to set a type of a port. + * @port_del: Callback used to delete selected port along with related function. + * Devlink core calls this upon user request to delete + * a port previously created by devlink_ops->port_new(). * @port_fn_hw_addr_get: Callback used to set port function's hardware address. * Should be used by device drivers to report * the hardware address of a function managed @@ -1602,6 +1588,8 @@ struct devlink_port_ops { struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); + int (*port_del)(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack); int (*port_fn_hw_addr_get)(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 36acaa6a6f18..404313128cfb 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1348,7 +1348,7 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct devlink_port_new_attrs new_attrs = {}; struct devlink *devlink = info->user_ptr[0]; - if (!devlink->ops->port_new || !devlink->ops->port_del) + if (!devlink->ops->port_new) return -EOPNOTSUPP; if (!info->attrs[DEVLINK_ATTR_PORT_FLAVOUR] || @@ -1387,10 +1387,10 @@ static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, struct netlink_ext_ack *extack = info->extack; struct devlink *devlink = info->user_ptr[0]; - if (!devlink->ops->port_del) + if (!devlink_port->ops->port_del) return -EOPNOTSUPP; - return devlink->ops->port_del(devlink, devlink_port, extack); + return devlink_port->ops->port_del(devlink, devlink_port, extack); } static int -- cgit v1.2.3 From 4b5ed2b5a145543bd901b97f33bcac55a574253b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 May 2023 12:28:41 +0200 Subject: devlink: save devlink_port_ops into a variable in devlink_port_function_validate() Now when the original ops variable is removed, introduce it again but this time for devlink_port_ops. Signed-off-by: Jiri Pirko Signed-off-by: Jakub Kicinski --- net/devlink/leftover.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 404313128cfb..d5ca9fbe2d40 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1185,16 +1185,16 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port, struct nlattr **tb, struct netlink_ext_ack *extack) { + const struct devlink_port_ops *ops = devlink_port->ops; struct nlattr *attr; if (tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] && - !devlink_port->ops->port_fn_hw_addr_set) { + !ops->port_fn_hw_addr_set) { NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR], "Port doesn't support function attributes"); return -EOPNOTSUPP; } - if (tb[DEVLINK_PORT_FN_ATTR_STATE] && - !devlink_port->ops->port_fn_state_set) { + if (tb[DEVLINK_PORT_FN_ATTR_STATE] && !ops->port_fn_state_set) { NL_SET_ERR_MSG_ATTR(extack, tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR], "Function does not support state setting"); return -EOPNOTSUPP; @@ -1205,13 +1205,13 @@ static int devlink_port_function_validate(struct devlink_port *devlink_port, caps = nla_get_bitfield32(attr); if (caps.selector & DEVLINK_PORT_FN_CAP_ROCE && - !devlink_port->ops->port_fn_roce_set) { + !ops->port_fn_roce_set) { NL_SET_ERR_MSG_ATTR(extack, attr, "Port doesn't support RoCE function attribute"); return -EOPNOTSUPP; } if (caps.selector & DEVLINK_PORT_FN_CAP_MIGRATABLE) { - if (!devlink_port->ops->port_fn_migratable_set) { + if (!ops->port_fn_migratable_set) { NL_SET_ERR_MSG_ATTR(extack, attr, "Port doesn't support migratable function attribute"); return -EOPNOTSUPP; -- cgit v1.2.3 From 7b4858df3bf7a8d43ed6b58f411543a040c56f10 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 29 May 2023 14:48:28 +0300 Subject: skbuff: bridge: Add layer 2 miss indication For EVPN non-DF (Designated Forwarder) filtering we need to be able to prevent decapsulated traffic from being flooded to a multi-homed host. Filtering of multicast and broadcast traffic can be achieved using the following flower filter: # tc filter add dev bond0 egress pref 1 proto all flower indev vxlan0 dst_mac 01:00:00:00:00:00/01:00:00:00:00:00 action drop Unlike broadcast and multicast traffic, it is not currently possible to filter unknown unicast traffic. The classification into unknown unicast is performed by the bridge driver, but is not visible to other layers such as tc. Solve this by adding a new 'l2_miss' bit to the tc skb extension. Clear the bit whenever a packet enters the bridge (received from a bridge port or transmitted via the bridge) and set it if the packet did not match an FDB or MDB entry. If there is no skb extension and the bit needs to be cleared, then do not allocate one as no extension is equivalent to the bit being cleared. The bit is not set for broadcast packets as they never perform a lookup and therefore never incur a miss. A bit that is set for every flooded packet would also work for the current use case, but it does not allow us to differentiate between registered and unregistered multicast traffic, which might be useful in the future. To keep the performance impact to a minimum, the marking of packets is guarded by the 'tc_skb_ext_tc' static key. When 'false', the skb is not touched and an skb extension is not allocated. Instead, only a 5 bytes nop is executed, as demonstrated below for the call site in br_handle_frame(). Before the patch: ``` memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); c37b09: 49 c7 44 24 28 00 00 movq $0x0,0x28(%r12) c37b10: 00 00 p = br_port_get_rcu(skb->dev); c37b12: 49 8b 44 24 10 mov 0x10(%r12),%rax memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); c37b17: 49 c7 44 24 30 00 00 movq $0x0,0x30(%r12) c37b1e: 00 00 c37b20: 49 c7 44 24 38 00 00 movq $0x0,0x38(%r12) c37b27: 00 00 ``` After the patch (when static key is disabled): ``` memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); c37c29: 49 c7 44 24 28 00 00 movq $0x0,0x28(%r12) c37c30: 00 00 c37c32: 49 8d 44 24 28 lea 0x28(%r12),%rax c37c37: 48 c7 40 08 00 00 00 movq $0x0,0x8(%rax) c37c3e: 00 c37c3f: 48 c7 40 10 00 00 00 movq $0x0,0x10(%rax) c37c46: 00 #ifdef CONFIG_HAVE_JUMP_LABEL_HACK static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:" c37c47: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) br_tc_skb_miss_set(skb, false); p = br_port_get_rcu(skb->dev); c37c4c: 49 8b 44 24 10 mov 0x10(%r12),%rax ``` Subsequent patches will extend the flower classifier to be able to match on the new 'l2_miss' bit and enable / disable the static key when filters that match on it are added / deleted. Signed-off-by: Ido Schimmel Acked-by: Nikolay Aleksandrov Acked-by: Jakub Kicinski Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 1 + net/bridge/br_device.c | 1 + net/bridge/br_forward.c | 3 +++ net/bridge/br_input.c | 1 + net/bridge/br_private.h | 27 +++++++++++++++++++++++++++ 5 files changed, 33 insertions(+) (limited to 'net') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5951904413ab..e2f48ddb2f7c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -330,6 +330,7 @@ struct tc_skb_ext { u8 post_ct_snat:1; u8 post_ct_dnat:1; u8 act_miss:1; /* Set if act_miss_cookie is used */ + u8 l2_miss:1; /* Set by bridge upon FDB or MDB miss */ }; #endif diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 8eca8a5c80c6..9a5ea06236bd 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -39,6 +39,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) u16 vid = 0; memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); + br_tc_skb_miss_set(skb, false); rcu_read_lock(); nf_ops = rcu_dereference(nf_br_ops); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 84d6dd5e5b1a..6116eba1bd89 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -203,6 +203,8 @@ void br_flood(struct net_bridge *br, struct sk_buff *skb, struct net_bridge_port *prev = NULL; struct net_bridge_port *p; + br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST); + list_for_each_entry_rcu(p, &br->port_list, list) { /* Do not flood unicast traffic to ports that turn it off, nor * other traffic if flood off, except for traffic we originate @@ -295,6 +297,7 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst, allow_mode_include = false; } else { p = NULL; + br_tc_skb_miss_set(skb, true); } while (p || rp) { diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index fc17b9fd93e6..c34a0b0901b0 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -334,6 +334,7 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); + br_tc_skb_miss_set(skb, false); p = br_port_get_rcu(skb->dev); if (p->flags & BR_VLAN_TUNNEL) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2119729ded2b..a63b32c1638e 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -754,6 +755,32 @@ void br_boolopt_multi_get(const struct net_bridge *br, struct br_boolopt_multi *bm); void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on); +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +static inline void br_tc_skb_miss_set(struct sk_buff *skb, bool miss) +{ + struct tc_skb_ext *ext; + + if (!tc_skb_ext_tc_enabled()) + return; + + ext = skb_ext_find(skb, TC_SKB_EXT); + if (ext) { + ext->l2_miss = miss; + return; + } + if (!miss) + return; + ext = tc_skb_ext_alloc(skb); + if (!ext) + return; + ext->l2_miss = true; +} +#else +static inline void br_tc_skb_miss_set(struct sk_buff *skb, bool miss) +{ +} +#endif + /* br_device.c */ void br_dev_setup(struct net_device *dev); void br_dev_delete(struct net_device *dev, struct list_head *list); -- cgit v1.2.3 From d5ccfd90df7fd0a50038a68634c131b8fd081bac Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 29 May 2023 14:48:29 +0300 Subject: flow_dissector: Dissect layer 2 miss from tc skb extension Extend the 'FLOW_DISSECTOR_KEY_META' key with a new 'l2_miss' field and populate it from a field with the same name in the tc skb extension. This field is set by the bridge driver for packets that incur an FDB or MDB miss. The next patch will extend the flower classifier to be able to match on layer 2 misses. Signed-off-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/net/flow_dissector.h | 2 ++ net/core/flow_dissector.c | 10 ++++++++++ 2 files changed, 12 insertions(+) (limited to 'net') diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 85b2281576ed..8b41668c77fc 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -243,10 +243,12 @@ struct flow_dissector_key_ip { * struct flow_dissector_key_meta: * @ingress_ifindex: ingress ifindex * @ingress_iftype: ingress interface type + * @l2_miss: packet did not match an L2 entry during forwarding */ struct flow_dissector_key_meta { int ingress_ifindex; u16 ingress_iftype; + u8 l2_miss; }; /** diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 25fb0bbc310f..481ca4080cbd 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -241,6 +242,15 @@ void skb_flow_dissect_meta(const struct sk_buff *skb, FLOW_DISSECTOR_KEY_META, target_container); meta->ingress_ifindex = skb->skb_iif; +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + if (tc_skb_ext_tc_enabled()) { + struct tc_skb_ext *ext; + + ext = skb_ext_find(skb, TC_SKB_EXT); + if (ext) + meta->l2_miss = ext->l2_miss; + } +#endif } EXPORT_SYMBOL(skb_flow_dissect_meta); -- cgit v1.2.3 From 1a432018c0cdf51a77a2e134b19ba6cab4c29c89 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 29 May 2023 14:48:30 +0300 Subject: net/sched: flower: Allow matching on layer 2 miss Add the 'TCA_FLOWER_L2_MISS' netlink attribute that allows user space to match on packets that encountered a layer 2 miss. The miss indication is set as metadata in the tc skb extension by the bridge driver upon FDB or MDB lookup miss and dissected by the flow dissector to the 'FLOW_DISSECTOR_KEY_META' key. The use of this skb extension is guarded by the 'tc_skb_ext_tc' static key. As such, enable / disable this key when filters that match on layer 2 miss are added / deleted. Tested: # cat tc_skb_ext_tc.py #!/usr/bin/env -S drgn -s vmlinux refcount = prog["tc_skb_ext_tc"].key.enabled.counter.value_() print(f"tc_skb_ext_tc reference count is {refcount}") # ./tc_skb_ext_tc.py tc_skb_ext_tc reference count is 0 # tc filter add dev swp1 egress proto all handle 101 pref 1 flower src_mac 00:11:22:33:44:55 action drop # tc filter add dev swp1 egress proto all handle 102 pref 2 flower src_mac 00:11:22:33:44:55 l2_miss true action drop # tc filter add dev swp1 egress proto all handle 103 pref 3 flower src_mac 00:11:22:33:44:55 l2_miss false action drop # ./tc_skb_ext_tc.py tc_skb_ext_tc reference count is 2 # tc filter replace dev swp1 egress proto all handle 102 pref 2 flower src_mac 00:01:02:03:04:05 l2_miss false action drop # ./tc_skb_ext_tc.py tc_skb_ext_tc reference count is 2 # tc filter del dev swp1 egress proto all handle 103 pref 3 flower # tc filter del dev swp1 egress proto all handle 102 pref 2 flower # tc filter del dev swp1 egress proto all handle 101 pref 1 flower # ./tc_skb_ext_tc.py tc_skb_ext_tc reference count is 0 Signed-off-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: Jakub Kicinski --- include/uapi/linux/pkt_cls.h | 2 ++ net/sched/cls_flower.c | 30 ++++++++++++++++++++++++++++-- 2 files changed, 30 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 648a82f32666..00933dda7b10 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -594,6 +594,8 @@ enum { TCA_FLOWER_KEY_L2TPV3_SID, /* be32 */ + TCA_FLOWER_L2_MISS, /* u8 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9dbc43388e57..04adcde9eb81 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -120,6 +120,7 @@ struct cls_fl_filter { u32 handle; u32 flags; u32 in_hw_count; + u8 needs_tc_skb_ext:1; struct rcu_work rwork; struct net_device *hw_dev; /* Flower classifier is unlocked, which means that its reference counter @@ -415,6 +416,8 @@ static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) static void __fl_destroy_filter(struct cls_fl_filter *f) { + if (f->needs_tc_skb_ext) + tc_skb_ext_tc_disable(); tcf_exts_destroy(&f->exts); tcf_exts_put_net(&f->exts); kfree(f); @@ -615,7 +618,8 @@ static void *fl_get(struct tcf_proto *tp, u32 handle) } static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { - [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, + [TCA_FLOWER_UNSPEC] = { .strict_start_type = + TCA_FLOWER_L2_MISS }, [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, [TCA_FLOWER_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, @@ -720,7 +724,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, - + [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), }; static const struct nla_policy @@ -1668,6 +1672,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb, mask->meta.ingress_ifindex = 0xffffffff; } + fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS, + &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, + sizeof(key->meta.l2_miss)); + fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)); @@ -2085,6 +2093,11 @@ errout_cleanup: return ret; } +static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask) +{ + return mask->meta.l2_miss; +} + static int fl_set_parms(struct net *net, struct tcf_proto *tp, struct cls_fl_filter *f, struct fl_flow_mask *mask, unsigned long base, struct nlattr **tb, @@ -2121,6 +2134,14 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, return -EINVAL; } + /* Enable tc skb extension if filter matches on data extracted from + * this extension. + */ + if (fl_needs_tc_skb_ext(&mask->key)) { + f->needs_tc_skb_ext = 1; + tc_skb_ext_tc_enable(); + } + return 0; } @@ -3074,6 +3095,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, goto nla_put_failure; } + if (fl_dump_key_val(skb, &key->meta.l2_miss, + TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, + TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss))) + goto nla_put_failure; + if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, sizeof(key->eth.dst)) || -- cgit v1.2.3 From 6cd8ec58c1bf65adcdf346fe241ba69e0b56e6bd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 29 May 2023 10:52:13 -0400 Subject: tipc: delete tipc_mtu_bad from tipc_udp_enable Since commit a4dfa72d0acd ("tipc: set default MTU for UDP media"), it's been no longer using dev->mtu for b->mtu, and the issue described in commit 3de81b758853 ("tipc: check minimum bearer MTU") doesn't exist in UDP bearer any more. Besides, dev->mtu can still be changed to a too small mtu after the UDP bearer is created even with tipc_mtu_bad() check in tipc_udp_enable(). Note that NETDEV_CHANGEMTU event processing in tipc_l2_device_event() doesn't really work for UDP bearer. So this patch deletes the unnecessary tipc_mtu_bad from tipc_udp_enable. Signed-off-by: Xin Long Reviewed-by: Tung Nguyen Link: https://lore.kernel.org/r/282f1f5cc40e6cad385aa1c60569e6c5b70e2fb3.1685371933.git.lucien.xin@gmail.com Signed-off-by: Jakub Kicinski --- net/tipc/bearer.c | 4 ++-- net/tipc/bearer.h | 4 ++-- net/tipc/udp_media.c | 4 ---- 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 53881406e200..114140c49108 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -431,7 +431,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, dev = dev_get_by_name(net, dev_name); if (!dev) return -ENODEV; - if (tipc_mtu_bad(dev, 0)) { + if (tipc_mtu_bad(dev)) { dev_put(dev); return -EINVAL; } @@ -708,7 +708,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, test_and_set_bit_lock(0, &b->up); break; case NETDEV_CHANGEMTU: - if (tipc_mtu_bad(dev, 0)) { + if (tipc_mtu_bad(dev)) { bearer_disable(net, b); break; } diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index bd0cc5c287ef..1ee60649bd17 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -257,9 +257,9 @@ static inline void tipc_loopback_trace(struct net *net, } /* check if device MTU is too low for tipc headers */ -static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve) +static inline bool tipc_mtu_bad(struct net_device *dev) { - if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve) + if (dev->mtu >= TIPC_MIN_BEARER_MTU) return false; netdev_warn(dev, "MTU too low for tipc bearer\n"); return true; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 0a85244fd618..926232557e77 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -739,10 +739,6 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, udp_conf.use_udp_checksums = false; ub->ifindex = dev->ifindex; b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr); - if (tipc_mtu_bad(dev, b->encap_hlen)) { - err = -EINVAL; - goto err; - } b->mtu = b->media->mtu; #if IS_ENABLED(CONFIG_IPV6) } else if (local.proto == htons(ETH_P_IPV6)) { -- cgit v1.2.3 From b1f2abcf817d82b54764d1474424649feda6fe1b Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 29 May 2023 16:44:30 +0300 Subject: net: Make gro complete function to return void tcp_gro_complete() function only updates the skb fields related to GRO and it always returns zero. All the 3 drivers which are using it do not check for the return value either. Change it to return void instead which simplifies its callers as error handing becomes unnecessary. Signed-off-by: Parav Pandit Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp_offload.c | 7 +++---- net/ipv6/tcpv6_offload.c | 3 ++- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/include/net/tcp.h b/include/net/tcp.h index 0b755988e20c..14fa716cac50 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2041,7 +2041,7 @@ INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)) INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)); -int tcp_gro_complete(struct sk_buff *skb); +void tcp_gro_complete(struct sk_buff *skb); void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 45dda7889387..88f9b0081ee7 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -296,7 +296,7 @@ out: return pp; } -int tcp_gro_complete(struct sk_buff *skb) +void tcp_gro_complete(struct sk_buff *skb) { struct tcphdr *th = tcp_hdr(skb); @@ -311,8 +311,6 @@ int tcp_gro_complete(struct sk_buff *skb) if (skb->encapsulation) skb->inner_transport_header = skb->transport_header; - - return 0; } EXPORT_SYMBOL(tcp_gro_complete); @@ -342,7 +340,8 @@ INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) if (NAPI_GRO_CB(skb)->is_atomic) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; - return tcp_gro_complete(skb); + tcp_gro_complete(skb); + return 0; } static const struct net_offload tcpv4_offload = { diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index 39db5a226855..bf0c957e4b5e 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -36,7 +36,8 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff) &iph->daddr, 0); skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; - return tcp_gro_complete(skb); + tcp_gro_complete(skb); + return 0; } static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, -- cgit v1.2.3 From dced11ef84fb310f4ddfa74d1c09687b8f845d1b Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 30 May 2023 12:19:44 +0300 Subject: net/sched: taprio: don't overwrite "sch" variable in taprio_dump_class_stats() In taprio_dump_class_stats() we don't need a reference to the root Qdisc once we get the reference to the child corresponding to this traffic class, so it's okay to overwrite "sch". But in a future patch we will need the root Qdisc too, so create a dedicated "child" pointer variable to hold the child reference. This also makes the code adhere to a more conventional coding style. Signed-off-by: Vladimir Oltean Acked-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- net/sched/sch_taprio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 76db9a10ef50..d29e6785854d 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -2388,10 +2388,10 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, __acquires(d->lock) { struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + struct Qdisc *child = dev_queue->qdisc_sleeping; - sch = dev_queue->qdisc_sleeping; - if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 || - qdisc_qstats_copy(d, sch) < 0) + if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 || + qdisc_qstats_copy(d, child) < 0) return -1; return 0; } -- cgit v1.2.3 From 2d800bc500fb3fb07a0fb42e2d0a1356fb9e1e8f Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 30 May 2023 12:19:45 +0300 Subject: net/sched: taprio: replace tc_taprio_qopt_offload :: enable with a "cmd" enum Inspired from struct flow_cls_offload :: cmd, in order for taprio to be able to report statistics (which is future work), it seems that we need to drill one step further with the ndo_setup_tc(TC_SETUP_QDISC_TAPRIO) multiplexing, and pass the command as part of the common portion of the muxed structure. Since we already have an "enable" variable in tc_taprio_qopt_offload, refactor all drivers to check for "cmd" instead of "enable", and reject every other command except "replace" and "destroy" - to be future proof. Signed-off-by: Vladimir Oltean Reviewed-by: Horatiu Vultur # for lan966x Acked-by: Kurt Kanzenbach # hellcreek Reviewed-by: Muhammad Husaini Zulkifli Reviewed-by: Gerhard Engleder Signed-off-by: David S. Miller --- drivers/net/dsa/hirschmann/hellcreek.c | 14 +++++++++----- drivers/net/dsa/ocelot/felix_vsc9959.c | 4 +++- drivers/net/dsa/sja1105/sja1105_tas.c | 7 +++++-- drivers/net/ethernet/engleder/tsnep_selftests.c | 12 ++++++------ drivers/net/ethernet/engleder/tsnep_tc.c | 4 +++- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 6 +++++- drivers/net/ethernet/intel/igc/igc_main.c | 13 +++++++++++-- drivers/net/ethernet/microchip/lan966x/lan966x_tc.c | 10 ++++++++-- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 7 +++++-- drivers/net/ethernet/ti/am65-cpsw-qos.c | 11 ++++++++--- include/net/pkt_sched.h | 7 ++++++- net/sched/sch_taprio.c | 4 ++-- 12 files changed, 71 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c index 595a548bb0a8..af50001ccdd4 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@ -1885,13 +1885,17 @@ static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port, case TC_SETUP_QDISC_TAPRIO: { struct tc_taprio_qopt_offload *taprio = type_data; - if (!hellcreek_validate_schedule(hellcreek, taprio)) - return -EOPNOTSUPP; + switch (taprio->cmd) { + case TAPRIO_CMD_REPLACE: + if (!hellcreek_validate_schedule(hellcreek, taprio)) + return -EOPNOTSUPP; - if (taprio->enable) return hellcreek_port_set_schedule(ds, port, taprio); - - return hellcreek_port_del_schedule(ds, port); + case TAPRIO_CMD_DESTROY: + return hellcreek_port_del_schedule(ds, port); + default: + return -EOPNOTSUPP; + } } default: return -EOPNOTSUPP; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 030738fef60e..5de6a27052fc 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1411,7 +1411,7 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, mutex_lock(&ocelot->tas_lock); - if (!taprio->enable) { + if (taprio->cmd == TAPRIO_CMD_DESTROY) { ocelot_port_mqprio(ocelot, port, &taprio->mqprio); ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE, QSYS_TAG_CONFIG, port); @@ -1423,6 +1423,8 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port, mutex_unlock(&ocelot->tas_lock); return 0; + } else if (taprio->cmd != TAPRIO_CMD_REPLACE) { + return -EOPNOTSUPP; } ret = ocelot_port_mqprio(ocelot, port, &taprio->mqprio); diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c index e6153848a950..d7818710bc02 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.c +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -516,10 +516,11 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, /* Can't change an already configured port (must delete qdisc first). * Can't delete the qdisc from an unconfigured port. */ - if (!!tas_data->offload[port] == admin->enable) + if ((!!tas_data->offload[port] && admin->cmd == TAPRIO_CMD_REPLACE) || + (!tas_data->offload[port] && admin->cmd == TAPRIO_CMD_DESTROY)) return -EINVAL; - if (!admin->enable) { + if (admin->cmd == TAPRIO_CMD_DESTROY) { taprio_offload_free(tas_data->offload[port]); tas_data->offload[port] = NULL; @@ -528,6 +529,8 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port, return rc; return sja1105_static_config_reload(priv, SJA1105_SCHEDULING); + } else if (admin->cmd != TAPRIO_CMD_REPLACE) { + return -EOPNOTSUPP; } /* The cycle time extension is the amount of time the last cycle from diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c index 1581d6b22232..8a9145f93147 100644 --- a/drivers/net/ethernet/engleder/tsnep_selftests.c +++ b/drivers/net/ethernet/engleder/tsnep_selftests.c @@ -329,7 +329,7 @@ static bool disable_taprio(struct tsnep_adapter *adapter) int retval; memset(&qopt, 0, sizeof(qopt)); - qopt.enable = 0; + qopt.cmd = TAPRIO_CMD_DESTROY; retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt); if (retval) return false; @@ -360,7 +360,7 @@ static bool tsnep_test_taprio(struct tsnep_adapter *adapter) for (i = 0; i < 255; i++) qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; - qopt->enable = 1; + qopt->cmd = TAPRIO_CMD_REPLACE; qopt->base_time = ktime_set(0, 0); qopt->cycle_time = 1500000; qopt->cycle_time_extension = 0; @@ -382,7 +382,7 @@ static bool tsnep_test_taprio(struct tsnep_adapter *adapter) if (!run_taprio(adapter, qopt, 100)) goto failed; - qopt->enable = 1; + qopt->cmd = TAPRIO_CMD_REPLACE; qopt->base_time = ktime_set(0, 0); qopt->cycle_time = 411854; qopt->cycle_time_extension = 0; @@ -406,7 +406,7 @@ static bool tsnep_test_taprio(struct tsnep_adapter *adapter) if (!run_taprio(adapter, qopt, 100)) goto failed; - qopt->enable = 1; + qopt->cmd = TAPRIO_CMD_REPLACE; qopt->base_time = ktime_set(0, 0); delay_base_time(adapter, qopt, 12); qopt->cycle_time = 125000; @@ -457,7 +457,7 @@ static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter) for (i = 0; i < 255; i++) qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; - qopt->enable = 1; + qopt->cmd = TAPRIO_CMD_REPLACE; qopt->base_time = ktime_set(0, 0); qopt->cycle_time = 100000; qopt->cycle_time_extension = 0; @@ -610,7 +610,7 @@ static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter) for (i = 0; i < 255; i++) qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; - qopt->enable = 1; + qopt->cmd = TAPRIO_CMD_REPLACE; qopt->base_time = ktime_set(0, 0); qopt->cycle_time = 100000; qopt->cycle_time_extension = 50000; diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c index d083e6684f12..745b191a5540 100644 --- a/drivers/net/ethernet/engleder/tsnep_tc.c +++ b/drivers/net/ethernet/engleder/tsnep_tc.c @@ -325,7 +325,7 @@ static int tsnep_taprio(struct tsnep_adapter *adapter, if (!adapter->gate_control) return -EOPNOTSUPP; - if (!qopt->enable) { + if (qopt->cmd == TAPRIO_CMD_DESTROY) { /* disable gate control if active */ mutex_lock(&adapter->gate_control_lock); @@ -337,6 +337,8 @@ static int tsnep_taprio(struct tsnep_adapter *adapter, mutex_unlock(&adapter->gate_control_lock); return 0; + } else if (qopt->cmd != TAPRIO_CMD_REPLACE) { + return -EOPNOTSUPP; } retval = tsnep_validate_gcl(qopt); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 83c27bbbc6ed..7aad824f4da7 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -65,7 +65,7 @@ static int enetc_setup_taprio(struct net_device *ndev, gcl_len = admin_conf->num_entries; tge = enetc_rd(hw, ENETC_PTGCR); - if (!admin_conf->enable) { + if (admin_conf->cmd == TAPRIO_CMD_DESTROY) { enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE); enetc_reset_ptcmsdur(hw); @@ -138,6 +138,10 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) struct enetc_ndev_priv *priv = netdev_priv(ndev); int err, i; + if (taprio->cmd != TAPRIO_CMD_REPLACE && + taprio->cmd != TAPRIO_CMD_DESTROY) + return -EOPNOTSUPP; + /* TSD and Qbv are mutually exclusive in hardware */ for (i = 0; i < priv->num_tx_rings; i++) if (priv->tx_ring[i]->tsd_enable) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index c5ef1edcf548..88145c30c919 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6113,9 +6113,18 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, size_t n; int i; - adapter->qbv_enable = qopt->enable; + switch (qopt->cmd) { + case TAPRIO_CMD_REPLACE: + adapter->qbv_enable = true; + break; + case TAPRIO_CMD_DESTROY: + adapter->qbv_enable = false; + break; + default: + return -EOPNOTSUPP; + } - if (!qopt->enable) + if (!adapter->qbv_enable) return igc_tsn_clear_schedule(adapter); if (qopt->base_time < 0) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c index cf0cc7562d04..ee652f2d2359 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c @@ -21,8 +21,14 @@ static int lan966x_tc_setup_qdisc_mqprio(struct lan966x_port *port, static int lan966x_tc_setup_qdisc_taprio(struct lan966x_port *port, struct tc_taprio_qopt_offload *taprio) { - return taprio->enable ? lan966x_taprio_add(port, taprio) : - lan966x_taprio_del(port); + switch (taprio->cmd) { + case TAPRIO_CMD_REPLACE: + return lan966x_taprio_add(port, taprio); + case TAPRIO_CMD_DESTROY: + return lan966x_taprio_del(port); + default: + return -EOPNOTSUPP; + } } static int lan966x_tc_setup_qdisc_tbf(struct lan966x_port *port, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 9d55226479b4..ac41ef4cbd2f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -966,8 +966,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return -EOPNOTSUPP; } - if (!qopt->enable) + if (qopt->cmd == TAPRIO_CMD_DESTROY) goto disable; + else if (qopt->cmd != TAPRIO_CMD_REPLACE) + return -EOPNOTSUPP; + if (qopt->num_entries >= dep) return -EINVAL; if (!qopt->cycle_time) @@ -988,7 +991,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, mutex_lock(&priv->plat->est->lock); priv->plat->est->gcl_size = size; - priv->plat->est->enable = qopt->enable; + priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE; mutex_unlock(&priv->plat->est->lock); for (i = 0; i < size; i++) { diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index 3a908db6e5b2..eced87fa261c 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -450,7 +450,7 @@ static int am65_cpsw_configure_taprio(struct net_device *ndev, am65_cpsw_est_update_state(ndev); - if (!est_new->taprio.enable) { + if (est_new->taprio.cmd == TAPRIO_CMD_DESTROY) { am65_cpsw_stop_est(ndev); return ret; } @@ -476,7 +476,7 @@ static int am65_cpsw_configure_taprio(struct net_device *ndev, am65_cpsw_est_set_sched_list(ndev, est_new); am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf); - am65_cpsw_est_set(ndev, est_new->taprio.enable); + am65_cpsw_est_set(ndev, est_new->taprio.cmd == TAPRIO_CMD_REPLACE); if (tact == TACT_PROG) { ret = am65_cpsw_timer_set(ndev, est_new); @@ -520,7 +520,7 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data) am65_cpsw_cp_taprio(taprio, &est_new->taprio); ret = am65_cpsw_configure_taprio(ndev, est_new); if (!ret) { - if (taprio->enable) { + if (taprio->cmd == TAPRIO_CMD_REPLACE) { devm_kfree(&ndev->dev, port->qos.est_admin); port->qos.est_admin = est_new; @@ -564,8 +564,13 @@ purge_est: static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data) { struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + struct tc_taprio_qopt_offload *taprio = type_data; struct am65_cpsw_common *common = port->common; + if (taprio->cmd != TAPRIO_CMD_REPLACE && + taprio->cmd != TAPRIO_CMD_DESTROY) + return -EOPNOTSUPP; + if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS)) return -ENODEV; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index f436688b6efc..f5fb11da357b 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -185,6 +185,11 @@ struct tc_taprio_caps { bool broken_mqprio:1; }; +enum tc_taprio_qopt_cmd { + TAPRIO_CMD_REPLACE, + TAPRIO_CMD_DESTROY, +}; + struct tc_taprio_sched_entry { u8 command; /* TC_TAPRIO_CMD_* */ @@ -196,7 +201,7 @@ struct tc_taprio_sched_entry { struct tc_taprio_qopt_offload { struct tc_mqprio_qopt_offload mqprio; struct netlink_ext_ack *extack; - u8 enable; + enum tc_taprio_qopt_cmd cmd; ktime_t base_time; u64 cycle_time; u64 cycle_time_extension; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index d29e6785854d..06bf4c6355a5 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1524,7 +1524,7 @@ static int taprio_enable_offload(struct net_device *dev, "Not enough memory for enabling offload mode"); return -ENOMEM; } - offload->enable = 1; + offload->cmd = TAPRIO_CMD_REPLACE; offload->extack = extack; mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt); offload->mqprio.extack = extack; @@ -1572,7 +1572,7 @@ static int taprio_disable_offload(struct net_device *dev, "Not enough memory to disable offload mode"); return -ENOMEM; } - offload->enable = 0; + offload->cmd = TAPRIO_CMD_DESTROY; err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); if (err < 0) { -- cgit v1.2.3 From 6c1adb650c8d85c6cb471dbc900c2468f462995a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 30 May 2023 12:19:46 +0300 Subject: net/sched: taprio: add netlink reporting for offload statistics counters Offloading drivers may report some additional statistics counters, some of them even suggested by 802.1Q, like TransmissionOverrun. In my opinion we don't have to limit ourselves to reporting counters only globally to the Qdisc/interface, especially if the device has more detailed reporting (per traffic class), since the more detailed info is valuable for debugging and can help identifying who is exceeding its time slot. But on the other hand, some devices may not be able to report both per TC and global stats. So we end up reporting both ways, and use the good old ethtool_put_stat() strategy to determine which statistics are supported by this NIC. Statistics which aren't set are simply not reported to netlink. For this reason, we need something dynamic (a nlattr nest) to be reported through TCA_STATS_APP, and not something daft like the fixed-size and inextensible struct tc_codel_xstats. A good model for xstats which are a nlattr nest rather than a fixed struct seems to be cake. # Global stats $ tc -s qdisc show dev eth0 root # Per-tc stats $ tc -s class show dev eth0 Signed-off-by: Vladimir Oltean Acked-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 47 ++++++++++++++++++++----- include/uapi/linux/pkt_sched.h | 10 ++++++ net/sched/sch_taprio.c | 78 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 126 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index f5fb11da357b..530d33adec88 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -188,6 +188,27 @@ struct tc_taprio_caps { enum tc_taprio_qopt_cmd { TAPRIO_CMD_REPLACE, TAPRIO_CMD_DESTROY, + TAPRIO_CMD_STATS, + TAPRIO_CMD_TC_STATS, +}; + +/** + * struct tc_taprio_qopt_stats - IEEE 802.1Qbv statistics + * @window_drops: Frames that were dropped because they were too large to be + * transmitted in any of the allotted time windows (open gates) for their + * traffic class. + * @tx_overruns: Frames still being transmitted by the MAC after the + * transmission gate associated with their traffic class has closed. + * Equivalent to `12.29.1.1.2 TransmissionOverrun` from 802.1Q-2018. + */ +struct tc_taprio_qopt_stats { + u64 window_drops; + u64 tx_overruns; +}; + +struct tc_taprio_qopt_tc_stats { + int tc; + struct tc_taprio_qopt_stats stats; }; struct tc_taprio_sched_entry { @@ -199,16 +220,26 @@ struct tc_taprio_sched_entry { }; struct tc_taprio_qopt_offload { - struct tc_mqprio_qopt_offload mqprio; - struct netlink_ext_ack *extack; enum tc_taprio_qopt_cmd cmd; - ktime_t base_time; - u64 cycle_time; - u64 cycle_time_extension; - u32 max_sdu[TC_MAX_QUEUE]; - size_t num_entries; - struct tc_taprio_sched_entry entries[]; + union { + /* TAPRIO_CMD_STATS */ + struct tc_taprio_qopt_stats stats; + /* TAPRIO_CMD_TC_STATS */ + struct tc_taprio_qopt_tc_stats tc_stats; + /* TAPRIO_CMD_REPLACE */ + struct { + struct tc_mqprio_qopt_offload mqprio; + struct netlink_ext_ack *extack; + ktime_t base_time; + u64 cycle_time; + u64 cycle_time_extension; + u32 max_sdu[TC_MAX_QUEUE]; + + size_t num_entries; + struct tc_taprio_sched_entry entries[]; + }; + }; }; #if IS_ENABLED(CONFIG_NET_SCH_TAPRIO) diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 51a7addc56c6..00f6ff0aff1f 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1259,6 +1259,16 @@ enum { TCA_TAPRIO_TC_ENTRY_MAX = (__TCA_TAPRIO_TC_ENTRY_CNT - 1) }; +enum { + TCA_TAPRIO_OFFLOAD_STATS_PAD = 1, /* u64 */ + TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS, /* u64 */ + TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS, /* u64 */ + + /* add new constants above here */ + __TCA_TAPRIO_OFFLOAD_STATS_CNT, + TCA_TAPRIO_OFFLOAD_STATS_MAX = (__TCA_TAPRIO_OFFLOAD_STATS_CNT - 1) +}; + enum { TCA_TAPRIO_ATTR_UNSPEC, TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */ diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 06bf4c6355a5..3c4c2c334878 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -27,6 +27,8 @@ #include #include +#define TAPRIO_STAT_NOT_SET (~0ULL) + #include "sch_mqprio_lib.h" static LIST_HEAD(taprio_list); @@ -2289,6 +2291,72 @@ nla_put_failure: return -EMSGSIZE; } +static int taprio_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) +{ + if (val == TAPRIO_STAT_NOT_SET) + return 0; + if (nla_put_u64_64bit(skb, attrtype, val, TCA_TAPRIO_OFFLOAD_STATS_PAD)) + return -EMSGSIZE; + return 0; +} + +static int taprio_dump_xstats(struct Qdisc *sch, struct gnet_dump *d, + struct tc_taprio_qopt_offload *offload, + struct tc_taprio_qopt_stats *stats) +{ + struct net_device *dev = qdisc_dev(sch); + const struct net_device_ops *ops; + struct sk_buff *skb = d->skb; + struct nlattr *xstats; + int err; + + ops = qdisc_dev(sch)->netdev_ops; + + /* FIXME I could use qdisc_offload_dump_helper(), but that messes + * with sch->flags depending on whether the device reports taprio + * stats, and I'm not sure whether that's a good idea, considering + * that stats are optional to the offload itself + */ + if (!ops->ndo_setup_tc) + return 0; + + memset(stats, 0xff, sizeof(*stats)); + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); + if (err == -EOPNOTSUPP) + return 0; + if (err) + return err; + + xstats = nla_nest_start(skb, TCA_STATS_APP); + if (!xstats) + goto err; + + if (taprio_put_stat(skb, stats->window_drops, + TCA_TAPRIO_OFFLOAD_STATS_WINDOW_DROPS) || + taprio_put_stat(skb, stats->tx_overruns, + TCA_TAPRIO_OFFLOAD_STATS_TX_OVERRUNS)) + goto err_cancel; + + nla_nest_end(skb, xstats); + + return 0; + +err_cancel: + nla_nest_cancel(skb, xstats); +err: + return -EMSGSIZE; +} + +static int taprio_dump_stats(struct Qdisc *sch, struct gnet_dump *d) +{ + struct tc_taprio_qopt_offload offload = { + .cmd = TAPRIO_CMD_STATS, + }; + + return taprio_dump_xstats(sch, d, &offload, &offload.stats); +} + static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct taprio_sched *q = qdisc_priv(sch); @@ -2389,11 +2457,18 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, { struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); struct Qdisc *child = dev_queue->qdisc_sleeping; + struct tc_taprio_qopt_offload offload = { + .cmd = TAPRIO_CMD_TC_STATS, + .tc_stats = { + .tc = cl - 1, + }, + }; if (gnet_stats_copy_basic(d, NULL, &child->bstats, true) < 0 || qdisc_qstats_copy(d, child) < 0) return -1; - return 0; + + return taprio_dump_xstats(sch, d, &offload, &offload.tc_stats.stats); } static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) @@ -2440,6 +2515,7 @@ static struct Qdisc_ops taprio_qdisc_ops __read_mostly = { .dequeue = taprio_dequeue, .enqueue = taprio_enqueue, .dump = taprio_dump, + .dump_stats = taprio_dump_stats, .owner = THIS_MODULE, }; -- cgit v1.2.3 From 9b68f30b68701e98abcec331a2cf3df972d910f8 Mon Sep 17 00:00:00 2001 From: Jarkko Sakkinen Date: Fri, 26 May 2023 14:21:02 +0300 Subject: net: Use umd_cleanup_helper() bpfilter_umh_cleanup() is the same function as umd_cleanup_helper(). Drop the redundant function. Signed-off-by: Jarkko Sakkinen Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230526112104.1044686-1-jarkko@kernel.org --- include/linux/bpfilter.h | 1 - net/bpfilter/bpfilter_kern.c | 2 +- net/ipv4/bpfilter/sockopt.c | 11 +---------- 3 files changed, 2 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index 2ae3c8e1d83c..736ded4905e0 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -11,7 +11,6 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); -void bpfilter_umh_cleanup(struct umd_info *info); struct bpfilter_umh_ops { struct umd_info info; diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 422ec6e7ccff..97e129e3f31c 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -21,7 +21,7 @@ static void shutdown_umh(void) if (tgid) { kill_pid(tgid, SIGKILL, 1); wait_event(tgid->wait_pidfd, thread_group_exited(tgid)); - bpfilter_umh_cleanup(info); + umd_cleanup_helper(info); } } diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 1b34cb9a7708..193bcc2acccc 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -12,15 +12,6 @@ struct bpfilter_umh_ops bpfilter_ops; EXPORT_SYMBOL_GPL(bpfilter_ops); -void bpfilter_umh_cleanup(struct umd_info *info) -{ - fput(info->pipe_to_umh); - fput(info->pipe_from_umh); - put_pid(info->tgid); - info->tgid = NULL; -} -EXPORT_SYMBOL_GPL(bpfilter_umh_cleanup); - static int bpfilter_mbox_request(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen, bool is_set) { @@ -38,7 +29,7 @@ static int bpfilter_mbox_request(struct sock *sk, int optname, sockptr_t optval, } if (bpfilter_ops.info.tgid && thread_group_exited(bpfilter_ops.info.tgid)) - bpfilter_umh_cleanup(&bpfilter_ops.info); + umd_cleanup_helper(&bpfilter_ops.info); if (!bpfilter_ops.info.tgid) { err = bpfilter_ops.start(); -- cgit v1.2.3 From 748b442800e877a93a4fa1256418d0b66bdc55ce Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 28 May 2023 19:39:59 +0200 Subject: net: don't set sw irq coalescing defaults in case of PREEMPT_RT If PREEMPT_RT is set, then assume that the user focuses on minimum latency. Therefore don't set sw irq coalescing defaults. This affects the defaults only, users can override these settings via sysfs. Signed-off-by: Heiner Kallweit Link: https://lore.kernel.org/r/f9439c7f-c92c-4c2c-703e-110f96d841b7@gmail.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 3393c2f3dbe8..99d99b247bc9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10571,8 +10571,10 @@ void netdev_sw_irq_coalesce_default_on(struct net_device *dev) { WARN_ON(dev->reg_state == NETREG_REGISTERED); - dev->gro_flush_timeout = 20000; - dev->napi_defer_hard_irqs = 1; + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + dev->gro_flush_timeout = 20000; + dev->napi_defer_hard_irqs = 1; + } } EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); -- cgit v1.2.3 From 6f4b98147b8dfcabacb19b5c6abd087af66d0049 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 30 May 2023 18:55:23 -0700 Subject: devlink: make health report on unregistered instance warn just once Devlink health is involved in error recovery. Machines in bad state tend to be fairly unreliable, and occasionally get stuck in error loops. Even with a reasonable grace period devlink health may get a thousand reports in an hour. In case of reporting on an unregistered devlink instance the subsequent reports don't add much value. Switch to WARN_ON_ONCE() to avoid flooding dmesg and fleet monitoring dashboards. Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/20230531015523.48961-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- net/devlink/health.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/devlink/health.c b/net/devlink/health.c index 0839706d5741..194340a8bb86 100644 --- a/net/devlink/health.c +++ b/net/devlink/health.c @@ -480,7 +480,7 @@ static void devlink_recover_notify(struct devlink_health_reporter *reporter, int err; WARN_ON(cmd != DEVLINK_CMD_HEALTH_REPORTER_RECOVER); - WARN_ON(!xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED)); + ASSERT_DEVLINK_REGISTERED(devlink); msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) -- cgit v1.2.3 From 8ad77e72caae22a1ddcfd0c03f2884929e93b7a4 Mon Sep 17 00:00:00 2001 From: Louis DeLosSantos Date: Wed, 31 May 2023 15:38:48 -0400 Subject: bpf: Add table ID to bpf_fib_lookup BPF helper Add ability to specify routing table ID to the `bpf_fib_lookup` BPF helper. A new field `tbid` is added to `struct bpf_fib_lookup` used as parameters to the `bpf_fib_lookup` BPF helper. When the helper is called with the `BPF_FIB_LOOKUP_DIRECT` and `BPF_FIB_LOOKUP_TBID` flags the `tbid` field in `struct bpf_fib_lookup` will be used as the table ID for the fib lookup. If the `tbid` does not exist the fib lookup will fail with `BPF_FIB_LKUP_RET_NOT_FWDED`. The `tbid` field becomes a union over the vlan related output fields in `struct bpf_fib_lookup` and will be zeroed immediately after usage. This functionality is useful in containerized environments. For instance, if a CNI wants to dictate the next-hop for traffic leaving a container it can create a container-specific routing table and perform a fib lookup against this table in a "host-net-namespace-side" TC program. This functionality also allows `ip rule` like functionality at the TC layer, allowing an eBPF program to pick a routing table based on some aspect of the sk_buff. As a concrete use case, this feature will be used in Cilium's SRv6 L3VPN datapath. When egress traffic leaves a Pod an eBPF program attached by Cilium will determine which VRF the egress traffic should target, and then perform a FIB lookup in a specific table representing this VRF's FIB. Signed-off-by: Louis DeLosSantos Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230505-bpf-add-tbid-fib-lookup-v2-1-0a31c22c748c@gmail.com --- include/uapi/linux/bpf.h | 21 ++++++++++++++++++--- net/core/filter.c | 14 +++++++++++++- tools/include/uapi/linux/bpf.h | 21 ++++++++++++++++++--- 3 files changed, 49 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9273c654743c..a7b5e91dd768 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3177,6 +3177,10 @@ union bpf_attr { * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. + * **BPF_FIB_LOOKUP_TBID** + * Used with BPF_FIB_LOOKUP_DIRECT. + * Use the routing table ID present in *params*->tbid + * for the fib lookup. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). @@ -6831,6 +6835,7 @@ enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + BPF_FIB_LOOKUP_TBID = (1U << 3), }; enum { @@ -6891,9 +6896,19 @@ struct bpf_fib_lookup { __u32 ipv6_dst[4]; /* in6_addr; network order */ }; - /* output */ - __be16 h_vlan_proto; - __be16 h_vlan_TCI; + union { + struct { + /* output */ + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + /* input: when accompanied with the + * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a + * specific routing table to use for the fib lookup. + */ + __u32 tbid; + }; + __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; diff --git a/net/core/filter.c b/net/core/filter.c index 968139f4a1ac..d25d52854c21 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5803,6 +5803,12 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; struct fib_table *tb; + if (flags & BPF_FIB_LOOKUP_TBID) { + tbid = params->tbid; + /* zero out for vlan output */ + params->tbid = 0; + } + tb = fib_get_table(net, tbid); if (unlikely(!tb)) return BPF_FIB_LKUP_RET_NOT_FWDED; @@ -5936,6 +5942,12 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; struct fib6_table *tb; + if (flags & BPF_FIB_LOOKUP_TBID) { + tbid = params->tbid; + /* zero out for vlan output */ + params->tbid = 0; + } + tb = ipv6_stub->fib6_get_table(net, tbid); if (unlikely(!tb)) return BPF_FIB_LKUP_RET_NOT_FWDED; @@ -6008,7 +6020,7 @@ set_fwd_params: #endif #define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \ - BPF_FIB_LOOKUP_SKIP_NEIGH) + BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID) BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, struct bpf_fib_lookup *, params, int, plen, u32, flags) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9273c654743c..a7b5e91dd768 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3177,6 +3177,10 @@ union bpf_attr { * **BPF_FIB_LOOKUP_DIRECT** * Do a direct table lookup vs full lookup using FIB * rules. + * **BPF_FIB_LOOKUP_TBID** + * Used with BPF_FIB_LOOKUP_DIRECT. + * Use the routing table ID present in *params*->tbid + * for the fib lookup. * **BPF_FIB_LOOKUP_OUTPUT** * Perform lookup from an egress perspective (default is * ingress). @@ -6831,6 +6835,7 @@ enum { BPF_FIB_LOOKUP_DIRECT = (1U << 0), BPF_FIB_LOOKUP_OUTPUT = (1U << 1), BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2), + BPF_FIB_LOOKUP_TBID = (1U << 3), }; enum { @@ -6891,9 +6896,19 @@ struct bpf_fib_lookup { __u32 ipv6_dst[4]; /* in6_addr; network order */ }; - /* output */ - __be16 h_vlan_proto; - __be16 h_vlan_TCI; + union { + struct { + /* output */ + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + }; + /* input: when accompanied with the + * 'BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID` flags, a + * specific routing table to use for the fib lookup. + */ + __u32 tbid; + }; + __u8 smac[6]; /* ETH_ALEN */ __u8 dmac[6]; /* ETH_ALEN */ }; -- cgit v1.2.3 From 04292c695f82b6cf0d25dd5ae494f16ddbb621f6 Mon Sep 17 00:00:00 2001 From: Abhijeet Rastogi Date: Tue, 16 May 2023 20:08:49 -0700 Subject: ipvs: increase ip_vs_conn_tab_bits range for 64BIT Current range [8, 20] is set purely due to historical reasons because at the time, ~1M (2^20) was considered sufficient. With this change, 27 is the upper limit for 64-bit, 20 otherwise. Previous change regarding this limit is here. Link: https://lore.kernel.org/all/86eabeb9dd62aebf1e2533926fdd13fed48bab1f.1631289960.git.aclaudi@redhat.com/T/#u Signed-off-by: Abhijeet Rastogi Acked-by: Julian Anastasov Acked-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/Kconfig | 27 ++++++++++++++------------- net/netfilter/ipvs/ip_vs_conn.c | 4 ++-- 2 files changed, 16 insertions(+), 15 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index 271da8447b29..2a3017b9c001 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -44,7 +44,8 @@ config IP_VS_DEBUG config IP_VS_TAB_BITS int "IPVS connection table size (the Nth power of 2)" - range 8 20 + range 8 20 if !64BIT + range 8 27 if 64BIT default 12 help The IPVS connection hash table uses the chaining scheme to handle @@ -54,24 +55,24 @@ config IP_VS_TAB_BITS Note the table size must be power of 2. The table size will be the value of 2 to the your input number power. The number to choose is - from 8 to 20, the default number is 12, which means the table size - is 4096. Don't input the number too small, otherwise you will lose - performance on it. You can adapt the table size yourself, according - to your virtual server application. It is good to set the table size - not far less than the number of connections per second multiplying - average lasting time of connection in the table. For example, your - virtual server gets 200 connections per second, the connection lasts - for 200 seconds in average in the connection table, the table size - should be not far less than 200x200, it is good to set the table - size 32768 (2**15). + from 8 to 27 for 64BIT(20 otherwise), the default number is 12, + which means the table size is 4096. Don't input the number too + small, otherwise you will lose performance on it. You can adapt the + table size yourself, according to your virtual server application. + It is good to set the table size not far less than the number of + connections per second multiplying average lasting time of + connection in the table. For example, your virtual server gets 200 + connections per second, the connection lasts for 200 seconds in + average in the connection table, the table size should be not far + less than 200x200, it is good to set the table size 32768 (2**15). Another note that each connection occupies 128 bytes effectively and each hash entry uses 8 bytes, so you can estimate how much memory is needed for your box. You can overwrite this number setting conn_tab_bits module parameter - or by appending ip_vs.conn_tab_bits=? to the kernel command line - if IP VS was compiled built-in. + or by appending ip_vs.conn_tab_bits=? to the kernel command line if + IP VS was compiled built-in. comment "IPVS transport protocol load balancing support" diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 928e64653837..f4c55e65abd1 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1485,8 +1485,8 @@ int __init ip_vs_conn_init(void) int idx; /* Compute size and mask */ - if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) { - pr_info("conn_tab_bits not in [8, 20]. Using default value\n"); + if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 27) { + pr_info("conn_tab_bits not in [8, 27]. Using default value\n"); ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; } ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; -- cgit v1.2.3 From 4f325e26277b6a1381235008ca6fa97e6cc8f43b Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Wed, 17 May 2023 15:37:31 +0300 Subject: ipvs: dynamically limit the connection hash table As we allow the hash table to be configured to rows above 2^20, we should limit it depending on the available memory to some sane values. Switch to kvmalloc allocation to better select the needed allocation type. Signed-off-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_conn.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index f4c55e65abd1..9065da3cdd12 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -26,7 +26,6 @@ #include #include #include -#include #include /* for proc_net_* */ #include #include @@ -1482,13 +1481,21 @@ void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs) int __init ip_vs_conn_init(void) { size_t tab_array_size; + int max_avail; +#if BITS_PER_LONG > 32 + int max = 27; +#else + int max = 20; +#endif + int min = 8; int idx; - /* Compute size and mask */ - if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 27) { - pr_info("conn_tab_bits not in [8, 27]. Using default value\n"); - ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS; - } + max_avail = order_base_2(totalram_pages()) + PAGE_SHIFT; + max_avail -= 2; /* ~4 in hash row */ + max_avail -= 1; /* IPVS up to 1/2 of mem */ + max_avail -= order_base_2(sizeof(struct ip_vs_conn)); + max = clamp(max, min, max_avail); + ip_vs_conn_tab_bits = clamp_val(ip_vs_conn_tab_bits, min, max); ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits; ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1; @@ -1497,7 +1504,8 @@ int __init ip_vs_conn_init(void) */ tab_array_size = array_size(ip_vs_conn_tab_size, sizeof(*ip_vs_conn_tab)); - ip_vs_conn_tab = vmalloc(tab_array_size); + ip_vs_conn_tab = kvmalloc_array(ip_vs_conn_tab_size, + sizeof(*ip_vs_conn_tab), GFP_KERNEL); if (!ip_vs_conn_tab) return -ENOMEM; @@ -1506,7 +1514,7 @@ int __init ip_vs_conn_init(void) sizeof(struct ip_vs_conn), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ip_vs_conn_cachep) { - vfree(ip_vs_conn_tab); + kvfree(ip_vs_conn_tab); return -ENOMEM; } @@ -1534,5 +1542,5 @@ void ip_vs_conn_cleanup(void) rcu_barrier(); /* Release the empty cache */ kmem_cache_destroy(ip_vs_conn_cachep); - vfree(ip_vs_conn_tab); + kvfree(ip_vs_conn_tab); } -- cgit v1.2.3 From 5ff9424ea03a1fce2298b271eec1dad5ff4df1be Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 31 May 2023 16:20:25 +0200 Subject: devlink: bring port new reply back In the offending fixes commit I mistakenly removed the reply message of the port new command. I was under impression it is a new port notification, partly due to the "notify" in the name of the helper function. Bring the code sending reply with new port message back, this time putting it directly to devlink_nl_cmd_port_new_doit() Fixes: c496daeb8630 ("devlink: remove duplicate port notification") Signed-off-by: Jiri Pirko Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230531142025.2605001-1-jiri@resnulli.us Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlx5/core/sf/devlink.c | 9 ++++--- drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h | 3 ++- include/net/devlink.h | 4 +++- net/devlink/leftover.c | 28 +++++++++++++++++++++- 4 files changed, 38 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index c7d4691cb65a..9c02e5ea797c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -282,7 +282,8 @@ out: static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, const struct devlink_port_new_attrs *new_attr, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + struct devlink_port **dl_port) { struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5_sf *sf; @@ -296,6 +297,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table, new_attr->controller, new_attr->sfnum); if (err) goto esw_err; + *dl_port = &sf->dl_port; trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum); return 0; @@ -336,7 +338,8 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *new_attr, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + struct devlink_port **dl_port) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_sf_table *table; @@ -352,7 +355,7 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, "Port add is only supported in eswitch switchdev mode or SF ports are disabled."); return -EOPNOTSUPP; } - err = mlx5_sf_add(dev, table, new_attr, extack); + err = mlx5_sf_add(dev, table, new_attr, extack, dl_port); mlx5_sf_table_put(table); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index c5430b8dcdf6..860f9ddb7107 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -20,7 +20,8 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev); int mlx5_devlink_sf_port_new(struct devlink *devlink, const struct devlink_port_new_attrs *add_attr, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + struct devlink_port **dl_port); int mlx5_devlink_sf_port_del(struct devlink *devlink, struct devlink_port *dl_port, struct netlink_ext_ack *extack); diff --git a/include/net/devlink.h b/include/net/devlink.h index fe42ad46cf3b..9a3c51aa6e81 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1434,6 +1434,7 @@ struct devlink_ops { * @devlink: Devlink instance * @attrs: attributes of the new port * @extack: extack for reporting error messages + * @devlink_port: pointer to store new devlink port pointer * * Devlink core will call this device driver function upon user request * to create a new port function of a specified flavor and optional @@ -1446,7 +1447,8 @@ struct devlink_ops { */ int (*port_new)(struct devlink *devlink, const struct devlink_port_new_attrs *attrs, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port); /** * Rate control callbacks. diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index d5ca9fbe2d40..649a9701eb6a 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -1347,6 +1347,9 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, struct netlink_ext_ack *extack = info->extack; struct devlink_port_new_attrs new_attrs = {}; struct devlink *devlink = info->user_ptr[0]; + struct devlink_port *devlink_port; + struct sk_buff *msg; + int err; if (!devlink->ops->port_new) return -EOPNOTSUPP; @@ -1377,7 +1380,30 @@ static int devlink_nl_cmd_port_new_doit(struct sk_buff *skb, new_attrs.sfnum_valid = true; } - return devlink->ops->port_new(devlink, &new_attrs, extack); + err = devlink->ops->port_new(devlink, &new_attrs, + extack, &devlink_port); + if (err) + return err; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + err = -ENOMEM; + goto err_out_port_del; + } + err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW, + info->snd_portid, info->snd_seq, 0, NULL); + if (WARN_ON_ONCE(err)) + goto err_out_msg_free; + err = genlmsg_reply(msg, info); + if (err) + goto err_out_port_del; + return 0; + +err_out_msg_free: + nlmsg_free(msg); +err_out_port_del: + devlink_port->ops->port_del(devlink, devlink_port, NULL); + return err; } static int devlink_nl_cmd_port_del_doit(struct sk_buff *skb, -- cgit v1.2.3 From 121dca784fc0f6c022493a5d23d86b3cc20380f4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 31 May 2023 08:35:50 -0700 Subject: tls: suppress wakeups unless we have a full record TLS does not override .poll() so TLS-enabled socket will generate an event whenever data arrives at the TCP socket. This leads to unnecessary wakeups on slow connections. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/tls/tls_main.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'net') diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 3d45fdb5c4e9..e02a0d882ed3 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -358,6 +358,39 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) tls_ctx_free(sk, ctx); } +static __poll_t tls_sk_poll(struct file *file, struct socket *sock, + struct poll_table_struct *wait) +{ + struct tls_sw_context_rx *ctx; + struct tls_context *tls_ctx; + struct sock *sk = sock->sk; + struct sk_psock *psock; + __poll_t mask = 0; + u8 shutdown; + int state; + + mask = tcp_poll(file, sock, wait); + + state = inet_sk_state_load(sk); + shutdown = READ_ONCE(sk->sk_shutdown); + if (unlikely(state != TCP_ESTABLISHED || shutdown & RCV_SHUTDOWN)) + return mask; + + tls_ctx = tls_get_ctx(sk); + ctx = tls_sw_ctx_rx(tls_ctx); + psock = sk_psock_get(sk); + + if (skb_queue_empty_lockless(&ctx->rx_list) && + !tls_strp_msg_ready(ctx) && + sk_psock_queue_empty(psock)) + mask &= ~(EPOLLIN | EPOLLRDNORM); + + if (psock) + sk_psock_put(sk, psock); + + return mask; +} + static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, int __user *optlen, int tx) { @@ -928,9 +961,11 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG] ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; + ops[TLS_BASE][TLS_SW ].poll = tls_sk_poll; ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE]; ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read; + ops[TLS_SW ][TLS_SW ].poll = tls_sk_poll; #ifdef CONFIG_TLS_DEVICE ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; -- cgit v1.2.3 From a92fb5c0340411c66b48f66b6b8854f271e4ca8d Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 2 Jun 2023 13:59:25 +0800 Subject: ip_gre: clean up some inconsistent indenting No functional modification involved. net/ipv4/ip_gre.c:192 ipgre_err() warn: inconsistent indenting. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5375 Signed-off-by: Jiapeng Chong Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e55a20264960..81a1cce1a7d1 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -189,10 +189,10 @@ static int ipgre_err(struct sk_buff *skb, u32 info, } #if IS_ENABLED(CONFIG_IPV6) - if (tpi->proto == htons(ETH_P_IPV6) && - !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, - type, data_len)) - return 0; + if (tpi->proto == htons(ETH_P_IPV6) && + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, + type, data_len)) + return 0; #endif if (t->parms.iph.daddr == 0 || -- cgit v1.2.3 From 3f06760c00f56c5fe6c7f3361c2cf64becee1174 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Thu, 1 Jun 2023 18:37:46 +0200 Subject: ipv4: Drop tos parameter from flowi4_update_output() Callers of flowi4_update_output() never try to update ->flowi4_tos: * ip_route_connect() updates ->flowi4_tos with its own current value. * ip_route_newports() has two users: tcp_v4_connect() and dccp_v4_connect. Both initialise fl4 with ip_route_connect(), which in turn sets ->flowi4_tos with RT_TOS(inet_sk(sk)->tos) and ->flowi4_scope based on SOCK_LOCALROUTE. Then ip_route_newports() updates ->flowi4_tos with RT_CONN_FLAGS(sk), which is the same as RT_TOS(inet_sk(sk)->tos), unless SOCK_LOCALROUTE is set on the socket. In that case, the lowest order bit is set to 1, to eventually inform ip_route_output_key_hash() to restrict the scope to RT_SCOPE_LINK. This is equivalent to properly setting ->flowi4_scope as ip_route_connect() did. * ip_vs_xmit.c initialises ->flowi4_tos with memset(0), then calls flowi4_update_output() with tos=0. * sctp_v4_get_dst() uses the same RT_CONN_FLAGS_TOS() when initialising ->flowi4_tos and when calling flowi4_update_output(). In the end, ->flowi4_tos never changes. So let's just drop the tos parameter. This will simplify the conversion of ->flowi4_tos from __u8 to dscp_t. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- include/net/flow.h | 3 +-- include/net/route.h | 6 ++---- net/netfilter/ipvs/ip_vs_xmit.c | 4 ++-- net/sctp/protocol.c | 4 +--- 4 files changed, 6 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/include/net/flow.h b/include/net/flow.h index bb8651a6eaa7..7f0adda3bf2f 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -116,11 +116,10 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, } /* Reset some input parameters after previous lookup */ -static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos, +static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __be32 daddr, __be32 saddr) { fl4->flowi4_oif = oif; - fl4->flowi4_tos = tos; fl4->daddr = daddr; fl4->saddr = saddr; } diff --git a/include/net/route.h b/include/net/route.h index bcc367cf3aa2..5a5c726472bd 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -321,8 +321,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst, if (IS_ERR(rt)) return rt; ip_rt_put(rt); - flowi4_update_output(fl4, oif, fl4->flowi4_tos, fl4->daddr, - fl4->saddr); + flowi4_update_output(fl4, oif, fl4->daddr, fl4->saddr); } security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(net, fl4, sk); @@ -337,8 +336,7 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable fl4->fl4_dport = dport; fl4->fl4_sport = sport; ip_rt_put(rt); - flowi4_update_output(fl4, sk->sk_bound_dev_if, - RT_CONN_FLAGS(sk), fl4->daddr, + flowi4_update_output(fl4, sk->sk_bound_dev_if, fl4->daddr, fl4->saddr); security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(sock_net(sk), fl4, sk); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index feb1d7fcb09f..c7652da78c88 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -139,7 +139,7 @@ retry: if (PTR_ERR(rt) == -EINVAL && *saddr && rt_mode & IP_VS_RT_MODE_CONNECT && !loop) { *saddr = 0; - flowi4_update_output(&fl4, 0, 0, daddr, 0); + flowi4_update_output(&fl4, 0, daddr, 0); goto retry; } IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr); @@ -147,7 +147,7 @@ retry: } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { ip_rt_put(rt); *saddr = fl4.saddr; - flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr); + flowi4_update_output(&fl4, 0, daddr, fl4.saddr); loop = true; goto retry; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index c365df24ad33..664d1f2e9121 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -500,9 +500,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, continue; fl4->fl4_sport = laddr->a.v4.sin_port; - flowi4_update_output(fl4, - asoc->base.sk->sk_bound_dev_if, - RT_CONN_FLAGS_TOS(asoc->base.sk, tos), + flowi4_update_output(fl4, asoc->base.sk->sk_bound_dev_if, daddr->v4.sin_addr.s_addr, laddr->a.v4.sin_addr.s_addr); -- cgit v1.2.3 From f69de8aa4752adae750892c71711a5b806ec0dff Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Fri, 2 Jun 2023 11:36:07 +0200 Subject: ipv6: lower "link become ready"'s level message This following message is printed in the console each time a network device configured with an IPv6 addresses is ready to be used: ADDRCONF(NETDEV_CHANGE): : link becomes ready When netns are being extensively used -- e.g. by re-creating netns' with veth to discuss with each others for testing purposes like mptcp_join.sh selftest does -- it generates a lot of messages like that: more than 700 when executing mptcp_join.sh with the latest version. It looks like this message is not that helpful after all: maybe it can be used as a sign to know if there is something wrong, e.g. if a device is being regularly reconfigured by accident? But even then, there are better ways to monitor and diagnose such issues. When looking at commit 3c21edbd1137 ("[IPV6]: Defer IPv6 device initialization until the link becomes ready.") which introduces this new message, it seems it had been added to verify that the new feature was working as expected. It could have then used a lower level than "info" from the beginning but it was fine like that back then: 17 years ago. It seems then OK today to simply lower its level, similar to commit 7c62b8dd5ca8 ("net/ipv6: lower the level of "link is not ready" messages") and as suggested by Mat [1], Stephen and David [2]. Link: https://lore.kernel.org/mptcp/614e76ac-184e-c553-af72-084f792e60b0@kernel.org/T/ [1] Link: https://lore.kernel.org/netdev/68035bad-b53e-91cb-0e4a-007f27d62b05@tessares.net/T/ [2] Suggested-by: Mat Martineau Suggested-by: Stephen Hemminger Suggested-by: David Ahern Signed-off-by: Matthieu Baerts Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3797917237d0..5479da08ef40 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3633,8 +3633,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, idev->if_flags |= IF_READY; } - pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n", - dev->name); + pr_debug("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n", + dev->name); run_pending = 1; } -- cgit v1.2.3 From fc80fc2d4e39137869da3150ee169b40bf879287 Mon Sep 17 00:00:00 2001 From: Ding Hui Date: Mon, 15 May 2023 10:13:07 +0800 Subject: SUNRPC: Fix UAF in svc_tcp_listen_data_ready() After the listener svc_sock is freed, and before invoking svc_tcp_accept() for the established child sock, there is a window that the newsock retaining a freed listener svc_sock in sk_user_data which cloning from parent. In the race window, if data is received on the newsock, we will observe use-after-free report in svc_tcp_listen_data_ready(). Reproduce by two tasks: 1. while :; do rpc.nfsd 0 ; rpc.nfsd; done 2. while :; do echo "" | ncat -4 127.0.0.1 2049 ; done KASAN report: ================================================================== BUG: KASAN: slab-use-after-free in svc_tcp_listen_data_ready+0x1cf/0x1f0 [sunrpc] Read of size 8 at addr ffff888139d96228 by task nc/102553 CPU: 7 PID: 102553 Comm: nc Not tainted 6.3.0+ #18 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020 Call Trace: dump_stack_lvl+0x33/0x50 print_address_description.constprop.0+0x27/0x310 print_report+0x3e/0x70 kasan_report+0xae/0xe0 svc_tcp_listen_data_ready+0x1cf/0x1f0 [sunrpc] tcp_data_queue+0x9f4/0x20e0 tcp_rcv_established+0x666/0x1f60 tcp_v4_do_rcv+0x51c/0x850 tcp_v4_rcv+0x23fc/0x2e80 ip_protocol_deliver_rcu+0x62/0x300 ip_local_deliver_finish+0x267/0x350 ip_local_deliver+0x18b/0x2d0 ip_rcv+0x2fb/0x370 __netif_receive_skb_one_core+0x166/0x1b0 process_backlog+0x24c/0x5e0 __napi_poll+0xa2/0x500 net_rx_action+0x854/0xc90 __do_softirq+0x1bb/0x5de do_softirq+0xcb/0x100 ... Allocated by task 102371: kasan_save_stack+0x1e/0x40 kasan_set_track+0x21/0x30 __kasan_kmalloc+0x7b/0x90 svc_setup_socket+0x52/0x4f0 [sunrpc] svc_addsock+0x20d/0x400 [sunrpc] __write_ports_addfd+0x209/0x390 [nfsd] write_ports+0x239/0x2c0 [nfsd] nfsctl_transaction_write+0xac/0x110 [nfsd] vfs_write+0x1c3/0xae0 ksys_write+0xed/0x1c0 do_syscall_64+0x38/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc Freed by task 102551: kasan_save_stack+0x1e/0x40 kasan_set_track+0x21/0x30 kasan_save_free_info+0x2a/0x50 __kasan_slab_free+0x106/0x190 __kmem_cache_free+0x133/0x270 svc_xprt_free+0x1e2/0x350 [sunrpc] svc_xprt_destroy_all+0x25a/0x440 [sunrpc] nfsd_put+0x125/0x240 [nfsd] nfsd_svc+0x2cb/0x3c0 [nfsd] write_threads+0x1ac/0x2a0 [nfsd] nfsctl_transaction_write+0xac/0x110 [nfsd] vfs_write+0x1c3/0xae0 ksys_write+0xed/0x1c0 do_syscall_64+0x38/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc Fix the UAF by simply doing nothing in svc_tcp_listen_data_ready() if state != TCP_LISTEN, that will avoid dereferencing svsk for all child socket. Link: https://lore.kernel.org/lkml/20230507091131.23540-1-dinghui@sangfor.com.cn/ Fixes: fa9251afc33c ("SUNRPC: Call the default socket callbacks instead of open coding") Signed-off-by: Ding Hui Cc: Signed-off-by: Chuck Lever --- net/sunrpc/svcsock.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index f77cebe2c071..15f4d0d40bdd 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -826,12 +826,6 @@ static void svc_tcp_listen_data_ready(struct sock *sk) trace_sk_data_ready(sk); - if (svsk) { - /* Refer to svc_setup_socket() for details. */ - rmb(); - svsk->sk_odata(sk); - } - /* * This callback may called twice when a new connection * is established as a child socket inherits everything @@ -840,13 +834,18 @@ static void svc_tcp_listen_data_ready(struct sock *sk) * when one of child sockets become ESTABLISHED. * 2) data_ready method of the child socket may be called * when it receives data before the socket is accepted. - * In case of 2, we should ignore it silently. + * In case of 2, we should ignore it silently and DO NOT + * dereference svsk. */ - if (sk->sk_state == TCP_LISTEN) { - if (svsk) { - set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); - svc_xprt_enqueue(&svsk->sk_xprt); - } + if (sk->sk_state != TCP_LISTEN) + return; + + if (svsk) { + /* Refer to svc_setup_socket() for details. */ + rmb(); + svsk->sk_odata(sk); + set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); + svc_xprt_enqueue(&svsk->sk_xprt); } } -- cgit v1.2.3 From e8277327d74fb28bf46969ad68a225272f04c318 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 May 2023 09:32:41 -0400 Subject: SUNRPC: Fix an incorrect comment The correct function name is svc_tcp_listen_data_ready(). Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/svcsock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 15f4d0d40bdd..a2d0bf429347 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1463,7 +1463,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, svsk->sk_owspace = inet->sk_write_space; /* * This barrier is necessary in order to prevent race condition - * with svc_data_ready(), svc_listen_data_ready() and others + * with svc_data_ready(), svc_tcp_listen_data_ready(), and others * when calling callbacks above. */ wmb(); -- cgit v1.2.3 From cce4ee9c7834f68cf6a221d61d614c2748611c1c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 May 2023 09:32:47 -0400 Subject: SUNRPC: Remove dprintk() in svc_handle_xprt() When enabled, this dprintk() fires for every incoming RPC, which is an enormous amount of log traffic. These days, after the first few hundred log messages, the system journald is just going to mute it, along with all other NFSD debug output. Let's rely on trace points for this high-traffic information instead. Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/svc_xprt.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 13a14897bc17..42536a0b1db4 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -843,9 +843,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) svc_xprt_received(xprt); } else if (svc_xprt_reserve_slot(rqstp, xprt)) { /* XPT_DATA|XPT_DEFERRED case: */ - dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", - rqstp, rqstp->rq_pool->sp_id, xprt, - kref_read(&xprt->xpt_ref)); rqstp->rq_deferred = svc_deferred_dequeue(xprt); if (rqstp->rq_deferred) len = svc_deferred_recv(rqstp); -- cgit v1.2.3 From d7900daea0b9818cd1cbeb9c5bd94653c487b0e4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 May 2023 09:32:53 -0400 Subject: SUNRPC: Improve observability in svc_tcp_accept() The -ENOMEM arm could fire repeatedly if the system runs low on memory, so remove it. Don't bother to trace -EAGAIN error events, since those fire after a listener is created (with no work done) and once again after an accept has been handled successfully (again, with no work done). Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/svcsock.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index a2d0bf429347..fbe33c922882 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -886,13 +886,8 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); err = kernel_accept(sock, &newsock, O_NONBLOCK); if (err < 0) { - if (err == -ENOMEM) - printk(KERN_WARNING "%s: no more sockets!\n", - serv->sv_name); - else if (err != -EAGAIN) - net_warn_ratelimited("%s: accept failed (err %d)!\n", - serv->sv_name, -err); - trace_svcsock_accept_err(xprt, serv->sv_name, err); + if (err != -EAGAIN) + trace_svcsock_accept_err(xprt, serv->sv_name, err); return NULL; } if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL))) -- cgit v1.2.3 From c42bebca967da88c054ccb5ab152c9822c054662 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 May 2023 09:33:00 -0400 Subject: SUNRPC: Trace struct svc_sock lifetime events Capture a timestamp and pointer address during the creation and destruction of struct svc_sock to record its lifetime. This helps to diagnose transport reference counting issues. Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- include/trace/events/sunrpc.h | 39 +++++++++++++++++++++++++++------------ net/sunrpc/svcsock.c | 4 +++- 2 files changed, 30 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 31bc7025cb44..69e42ef30979 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -2104,31 +2104,46 @@ DEFINE_SVC_DEFERRED_EVENT(drop); DEFINE_SVC_DEFERRED_EVENT(queue); DEFINE_SVC_DEFERRED_EVENT(recv); -TRACE_EVENT(svcsock_new_socket, +DECLARE_EVENT_CLASS(svcsock_lifetime_class, TP_PROTO( + const void *svsk, const struct socket *socket ), - - TP_ARGS(socket), - + TP_ARGS(svsk, socket), TP_STRUCT__entry( + __field(unsigned int, netns_ino) + __field(const void *, svsk) + __field(const void *, sk) __field(unsigned long, type) __field(unsigned long, family) - __field(bool, listener) + __field(unsigned long, state) ), - TP_fast_assign( + struct sock *sk = socket->sk; + + __entry->netns_ino = sock_net(sk)->ns.inum; + __entry->svsk = svsk; + __entry->sk = sk; __entry->type = socket->type; - __entry->family = socket->sk->sk_family; - __entry->listener = (socket->sk->sk_state == TCP_LISTEN); + __entry->family = sk->sk_family; + __entry->state = sk->sk_state; ), - - TP_printk("type=%s family=%s%s", - show_socket_type(__entry->type), + TP_printk("svsk=%p type=%s family=%s%s", + __entry->svsk, show_socket_type(__entry->type), rpc_show_address_family(__entry->family), - __entry->listener ? " (listener)" : "" + __entry->state == TCP_LISTEN ? " (listener)" : "" ) ); +#define DEFINE_SVCSOCK_LIFETIME_EVENT(name) \ + DEFINE_EVENT(svcsock_lifetime_class, name, \ + TP_PROTO( \ + const void *svsk, \ + const struct socket *socket \ + ), \ + TP_ARGS(svsk, socket)) + +DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_new); +DEFINE_SVCSOCK_LIFETIME_EVENT(svcsock_free); TRACE_EVENT(svcsock_marker, TP_PROTO( diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index fbe33c922882..5f519fc0541b 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1470,7 +1470,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, else svc_tcp_init(svsk, serv); - trace_svcsock_new_socket(sock); + trace_svcsock_new(svsk, sock); return svsk; } @@ -1651,6 +1651,8 @@ static void svc_sock_free(struct svc_xprt *xprt) struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); struct socket *sock = svsk->sk_sock; + trace_svcsock_free(svsk, sock); + tls_handshake_cancel(sock->sk); if (sock->file) sockfd_put(sock); -- cgit v1.2.3 From 5f7fc5d69f6e92ec0b38774c387f5cf7812c5806 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 May 2023 09:47:29 -0400 Subject: SUNRPC: Resupply rq_pages from node-local memory svc_init_buffer() is careful to allocate the initial set of server thread buffer pages from memory on the local NUMA node. svc_alloc_arg() should also be that careful. Signed-off-by: Chuck Lever --- net/sunrpc/svc_xprt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 42536a0b1db4..9ca3393a197e 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -685,8 +685,9 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) } for (filled = 0; filled < pages; filled = ret) { - ret = alloc_pages_bulk_array(GFP_KERNEL, pages, - rqstp->rq_pages); + ret = alloc_pages_bulk_array_node(GFP_KERNEL, + rqstp->rq_pool->sp_id, + pages, rqstp->rq_pages); if (ret > filled) /* Made progress, don't sleep yet */ continue; -- cgit v1.2.3 From 88e4d41a264d00fbfd344eb2485c1c59096197f4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 May 2023 09:47:36 -0400 Subject: SUNRPC: Use __alloc_bulk_pages() in svc_init_buffer() Clean up: Use the bulk page allocator when filling a server thread's buffer page array. Signed-off-by: Chuck Lever --- net/sunrpc/svc.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 79967b6925bd..e6d4cec61e47 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -597,34 +597,25 @@ svc_destroy(struct kref *ref) } EXPORT_SYMBOL_GPL(svc_destroy); -/* - * Allocate an RPC server's buffer space. - * We allocate pages and place them in rq_pages. - */ -static int +static bool svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) { - unsigned int pages, arghi; + unsigned long pages, ret; /* bc_xprt uses fore channel allocated buffers */ if (svc_is_backchannel(rqstp)) - return 1; + return true; pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. * We assume one is at most one page */ - arghi = 0; WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); if (pages > RPCSVC_MAXPAGES) pages = RPCSVC_MAXPAGES; - while (pages) { - struct page *p = alloc_pages_node(node, GFP_KERNEL, 0); - if (!p) - break; - rqstp->rq_pages[arghi++] = p; - pages--; - } - return pages == 0; + + ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, + rqstp->rq_pages); + return ret == pages; } /* -- cgit v1.2.3 From 411486626e5779bd85439282985ff3fc25a3f6d2 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 1 Jun 2023 18:21:54 +0200 Subject: bpf/xdp: optimize bpf_xdp_pointer to avoid reading sinfo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we observed a significant performance degradation in samples/bpf xdp1 and xdp2, due XDP multibuffer "xdp.frags" handling, added in commit 772251742262 ("samples/bpf: fixup some tools to be able to support xdp multibuffer"). This patch reduce the overhead by avoiding to read/load shared_info (sinfo) memory area, when XDP packet don't have any frags. This improves performance because sinfo is located in another cacheline. Function bpf_xdp_pointer() is used by BPF helpers bpf_xdp_load_bytes() and bpf_xdp_store_bytes(). As a help to reviewers, xdp_get_buff_len() can potentially access sinfo, but it uses xdp_buff_has_frags() flags bit check to avoid accessing sinfo in no-frags case. The likely/unlikely instrumentation lays out asm code such that sinfo access isn't interleaved with no-frags case (checked on GCC 12.2.1-4). The generated asm code is more compact towards the no-frags case. The BPF kfunc bpf_dynptr_slice() also use bpf_xdp_pointer(). Thus, it should also take effect for that. Signed-off-by: Jesper Dangaard Brouer Acked-by: Lorenzo Bianconi Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/r/168563651438.3436004.17735707525651776648.stgit@firesoul Signed-off-by: Alexei Starovoitov --- net/core/filter.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index d25d52854c21..428df050d021 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3948,20 +3948,21 @@ void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len) { - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); u32 size = xdp->data_end - xdp->data; + struct skb_shared_info *sinfo; void *addr = xdp->data; int i; if (unlikely(offset > 0xffff || len > 0xffff)) return ERR_PTR(-EFAULT); - if (offset + len > xdp_get_buff_len(xdp)) + if (unlikely(offset + len > xdp_get_buff_len(xdp))) return ERR_PTR(-EINVAL); - if (offset < size) /* linear area */ + if (likely(offset < size)) /* linear area */ goto out; + sinfo = xdp_get_shared_info_from_buff(xdp); offset -= size; for (i = 0; i < sinfo->nr_frags; i++) { /* paged area */ u32 frag_size = skb_frag_size(&sinfo->frags[i]); -- cgit v1.2.3 From b6d7c0eb2dcbd238fa233a3a1737654e380e784a Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 2 Jun 2023 12:21:34 +0200 Subject: lib/ref_tracker: improve printing stats In case the library is tracking busy subsystem, simply printing stack for every active reference will spam log with long, hard to read, redundant stack traces. To improve readabilty following changes have been made: - reports are printed per stack_handle - log is more compact, - added display name for ref_tracker_dir - it will differentiate multiple subsystems, - stack trace is printed indented, in the same printk call, - info about dropped references is printed as well. Signed-off-by: Andrzej Hajda Reviewed-by: Andi Shyti Reviewed-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- include/linux/ref_tracker.h | 9 ++++- lib/ref_tracker.c | 90 +++++++++++++++++++++++++++++++++++++++------ lib/test_ref_tracker.c | 2 +- net/core/dev.c | 2 +- net/core/net_namespace.c | 4 +- 5 files changed, 90 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h index 87a92f2bec1b..19a69e7809d6 100644 --- a/include/linux/ref_tracker.h +++ b/include/linux/ref_tracker.h @@ -17,12 +17,15 @@ struct ref_tracker_dir { bool dead; struct list_head list; /* List of active trackers */ struct list_head quarantine; /* List of dead trackers */ + char name[32]; #endif }; #ifdef CONFIG_REF_TRACKER + static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, - unsigned int quarantine_count) + unsigned int quarantine_count, + const char *name) { INIT_LIST_HEAD(&dir->list); INIT_LIST_HEAD(&dir->quarantine); @@ -31,6 +34,7 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, dir->dead = false; refcount_set(&dir->untracked, 1); refcount_set(&dir->no_tracker, 1); + strscpy(dir->name, name, sizeof(dir->name)); stack_depot_init(); } @@ -51,7 +55,8 @@ int ref_tracker_free(struct ref_tracker_dir *dir, #else /* CONFIG_REF_TRACKER */ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, - unsigned int quarantine_count) + unsigned int quarantine_count, + const char *name) { } diff --git a/lib/ref_tracker.c b/lib/ref_tracker.c index d4eb0929af8f..2ffe79c90c17 100644 --- a/lib/ref_tracker.c +++ b/lib/ref_tracker.c @@ -1,11 +1,16 @@ // SPDX-License-Identifier: GPL-2.0-or-later + +#define pr_fmt(fmt) "ref_tracker: " fmt + #include +#include #include #include #include #include #define REF_TRACKER_STACK_ENTRIES 16 +#define STACK_BUF_SIZE 1024 struct ref_tracker { struct list_head head; /* anchor into dir->list or dir->quarantine */ @@ -14,24 +19,87 @@ struct ref_tracker { depot_stack_handle_t free_stack_handle; }; -void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, - unsigned int display_limit) +struct ref_tracker_dir_stats { + int total; + int count; + struct { + depot_stack_handle_t stack_handle; + unsigned int count; + } stacks[]; +}; + +static struct ref_tracker_dir_stats * +ref_tracker_get_stats(struct ref_tracker_dir *dir, unsigned int limit) { + struct ref_tracker_dir_stats *stats; struct ref_tracker *tracker; - unsigned int i = 0; - lockdep_assert_held(&dir->lock); + stats = kmalloc(struct_size(stats, stacks, limit), + GFP_NOWAIT | __GFP_NOWARN); + if (!stats) + return ERR_PTR(-ENOMEM); + stats->total = 0; + stats->count = 0; list_for_each_entry(tracker, &dir->list, head) { - if (i < display_limit) { - pr_err("leaked reference.\n"); - if (tracker->alloc_stack_handle) - stack_depot_print(tracker->alloc_stack_handle); - i++; - } else { - break; + depot_stack_handle_t stack = tracker->alloc_stack_handle; + int i; + + ++stats->total; + for (i = 0; i < stats->count; ++i) + if (stats->stacks[i].stack_handle == stack) + break; + if (i >= limit) + continue; + if (i >= stats->count) { + stats->stacks[i].stack_handle = stack; + stats->stacks[i].count = 0; + ++stats->count; } + ++stats->stacks[i].count; + } + + return stats; +} + +void ref_tracker_dir_print_locked(struct ref_tracker_dir *dir, + unsigned int display_limit) +{ + struct ref_tracker_dir_stats *stats; + unsigned int i = 0, skipped; + depot_stack_handle_t stack; + char *sbuf; + + lockdep_assert_held(&dir->lock); + + if (list_empty(&dir->list)) + return; + + stats = ref_tracker_get_stats(dir, display_limit); + if (IS_ERR(stats)) { + pr_err("%s@%pK: couldn't get stats, error %pe\n", + dir->name, dir, stats); + return; } + + sbuf = kmalloc(STACK_BUF_SIZE, GFP_NOWAIT | __GFP_NOWARN); + + for (i = 0, skipped = stats->total; i < stats->count; ++i) { + stack = stats->stacks[i].stack_handle; + if (sbuf && !stack_depot_snprint(stack, sbuf, STACK_BUF_SIZE, 4)) + sbuf[0] = 0; + pr_err("%s@%pK has %d/%d users at\n%s\n", dir->name, dir, + stats->stacks[i].count, stats->total, sbuf); + skipped -= stats->stacks[i].count; + } + + if (skipped) + pr_err("%s@%pK skipped reports about %d/%d users.\n", + dir->name, dir, skipped, stats->total); + + kfree(sbuf); + + kfree(stats); } EXPORT_SYMBOL(ref_tracker_dir_print_locked); diff --git a/lib/test_ref_tracker.c b/lib/test_ref_tracker.c index 19d7dec70cc6..49970a7c96f3 100644 --- a/lib/test_ref_tracker.c +++ b/lib/test_ref_tracker.c @@ -64,7 +64,7 @@ static int __init test_ref_tracker_init(void) { int i; - ref_tracker_dir_init(&ref_dir, 100); + ref_tracker_dir_init(&ref_dir, 100, "selftest"); timer_setup(&test_ref_tracker_timer, test_ref_tracker_timer_func, 0); mod_timer(&test_ref_tracker_timer, jiffies + 1); diff --git a/net/core/dev.c b/net/core/dev.c index b3c13e041935..5ca7a133d3be 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10630,7 +10630,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; - ref_tracker_dir_init(&dev->refcnt_tracker, 128); + ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); #ifdef CONFIG_PCPU_DEV_REFCNT dev->pcpu_refcnt = alloc_percpu(int); if (!dev->pcpu_refcnt) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 3e3598cd49f2..f4183c4c1ec8 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(get_net_ns_by_id); /* init code that must occur even if setup_net() is not called. */ static __net_init void preinit_net(struct net *net) { - ref_tracker_dir_init(&net->notrefcnt_tracker, 128); + ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net notrefcnt"); } /* @@ -322,7 +322,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) LIST_HEAD(net_exit_list); refcount_set(&net->ns.count, 1); - ref_tracker_dir_init(&net->refcnt_tracker, 128); + ref_tracker_dir_init(&net->refcnt_tracker, 128, "net refcnt"); refcount_set(&net->passive, 1); get_random_bytes(&net->hash_mix, sizeof(u32)); -- cgit v1.2.3 From 2b03bcae66c7b29f151e51d3109dbe1e31272235 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 31 May 2023 12:04:21 +0100 Subject: kcm: Support MSG_SPLICE_PAGES Make AF_KCM sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced from the source iterator if possible. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Tom Herbert cc: Tom Herbert cc: Cong Wang cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/kcm/kcmsock.c | 57 ++++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 40 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index cfe828bd7fc6..8555ede66333 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -989,29 +989,52 @@ start: merge = false; } - copy = min_t(int, msg_data_left(msg), - pfrag->size - pfrag->offset); + if (msg->msg_flags & MSG_SPLICE_PAGES) { + copy = msg_data_left(msg); + if (!sk_wmem_schedule(sk, copy)) + goto wait_for_memory; - if (!sk_wmem_schedule(sk, copy)) - goto wait_for_memory; + err = skb_splice_from_iter(skb, &msg->msg_iter, copy, + sk->sk_allocation); + if (err < 0) { + if (err == -EMSGSIZE) + goto wait_for_memory; + goto out_error; + } - err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, - pfrag->page, - pfrag->offset, - copy); - if (err) - goto out_error; + copy = err; + skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; + sk_wmem_queued_add(sk, copy); + sk_mem_charge(sk, copy); - /* Update the skb. */ - if (merge) { - skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); + if (head != skb) + head->truesize += copy; } else { - skb_fill_page_desc(skb, i, pfrag->page, - pfrag->offset, copy); - get_page(pfrag->page); + copy = min_t(int, msg_data_left(msg), + pfrag->size - pfrag->offset); + if (!sk_wmem_schedule(sk, copy)) + goto wait_for_memory; + + err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, + pfrag->page, + pfrag->offset, + copy); + if (err) + goto out_error; + + /* Update the skb. */ + if (merge) { + skb_frag_size_add( + &skb_shinfo(skb)->frags[i - 1], copy); + } else { + skb_fill_page_desc(skb, i, pfrag->page, + pfrag->offset, copy); + get_page(pfrag->page); + } + + pfrag->offset += copy; } - pfrag->offset += copy; copied += copy; if (head != skb) { head->len += copy; -- cgit v1.2.3 From 5bb3a5cb3e217b838e85661f527818e16ce61bec Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 31 May 2023 12:04:22 +0100 Subject: kcm: Convert kcm_sendpage() to use MSG_SPLICE_PAGES Convert kcm_sendpage() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Tom Herbert cc: Tom Herbert cc: Cong Wang cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/kcm/kcmsock.c | 161 ++++++------------------------------------------------ 1 file changed, 18 insertions(+), 143 deletions(-) (limited to 'net') diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 8555ede66333..ba22af16b96d 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -761,149 +761,6 @@ static void kcm_push(struct kcm_sock *kcm) kcm_write_msgs(kcm); } -static ssize_t kcm_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) - -{ - struct sock *sk = sock->sk; - struct kcm_sock *kcm = kcm_sk(sk); - struct sk_buff *skb = NULL, *head = NULL; - long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - bool eor; - int err = 0; - int i; - - if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - /* No MSG_EOR from splice, only look at MSG_MORE */ - eor = !(flags & MSG_MORE); - - lock_sock(sk); - - sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - - err = -EPIPE; - if (sk->sk_err) - goto out_error; - - if (kcm->seq_skb) { - /* Previously opened message */ - head = kcm->seq_skb; - skb = kcm_tx_msg(head)->last_skb; - i = skb_shinfo(skb)->nr_frags; - - if (skb_can_coalesce(skb, i, page, offset)) { - skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); - skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; - goto coalesced; - } - - if (i >= MAX_SKB_FRAGS) { - struct sk_buff *tskb; - - tskb = alloc_skb(0, sk->sk_allocation); - while (!tskb) { - kcm_push(kcm); - err = sk_stream_wait_memory(sk, &timeo); - if (err) - goto out_error; - } - - if (head == skb) - skb_shinfo(head)->frag_list = tskb; - else - skb->next = tskb; - - skb = tskb; - skb->ip_summed = CHECKSUM_UNNECESSARY; - i = 0; - } - } else { - /* Call the sk_stream functions to manage the sndbuf mem. */ - if (!sk_stream_memory_free(sk)) { - kcm_push(kcm); - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - err = sk_stream_wait_memory(sk, &timeo); - if (err) - goto out_error; - } - - head = alloc_skb(0, sk->sk_allocation); - while (!head) { - kcm_push(kcm); - err = sk_stream_wait_memory(sk, &timeo); - if (err) - goto out_error; - } - - skb = head; - i = 0; - } - - get_page(page); - skb_fill_page_desc_noacc(skb, i, page, offset, size); - skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; - -coalesced: - skb->len += size; - skb->data_len += size; - skb->truesize += size; - sk->sk_wmem_queued += size; - sk_mem_charge(sk, size); - - if (head != skb) { - head->len += size; - head->data_len += size; - head->truesize += size; - } - - if (eor) { - bool not_busy = skb_queue_empty(&sk->sk_write_queue); - - /* Message complete, queue it on send buffer */ - __skb_queue_tail(&sk->sk_write_queue, head); - kcm->seq_skb = NULL; - KCM_STATS_INCR(kcm->stats.tx_msgs); - - if (flags & MSG_BATCH) { - kcm->tx_wait_more = true; - } else if (kcm->tx_wait_more || not_busy) { - err = kcm_write_msgs(kcm); - if (err < 0) { - /* We got a hard error in write_msgs but have - * already queued this message. Report an error - * in the socket, but don't affect return value - * from sendmsg - */ - pr_warn("KCM: Hard failure on kcm_write_msgs\n"); - report_csk_error(&kcm->sk, -err); - } - } - } else { - /* Message not complete, save state */ - kcm->seq_skb = head; - kcm_tx_msg(head)->last_skb = skb; - } - - KCM_STATS_ADD(kcm->stats.tx_bytes, size); - - release_sock(sk); - return size; - -out_error: - kcm_push(kcm); - - err = sk_stream_error(sk, flags, err); - - /* make sure we wake any epoll edge trigger waiter */ - if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) - sk->sk_write_space(sk); - - release_sock(sk); - return err; -} - static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; @@ -1111,6 +968,24 @@ out_error: return err; } +static ssize_t kcm_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int flags) + +{ + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; + + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return kcm_sendmsg(sock, &msg, size); +} + static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { -- cgit v1.2.3 From 7b355b76e2b32cc516969c01984efdf49b11fc81 Mon Sep 17 00:00:00 2001 From: Richard Gobert Date: Thu, 1 Jun 2023 18:14:09 +0200 Subject: gro: decrease size of CB The GRO control block (NAPI_GRO_CB) is currently at its maximum size. This commit reduces its size by putting two groups of fields that are used only at different times into a union. Specifically, the fields frag0 and frag0_len are the fields that make up the frag0 optimisation mechanism, which is used during the initial parsing of the SKB. The fields last and age are used after the initial parsing, while the SKB is stored in the GRO list, waiting for other packets to arrive. There was one location in dev_gro_receive that modified the frag0 fields after setting last and age. I changed this accordingly without altering the code behaviour. Signed-off-by: Richard Gobert Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230601161407.GA9253@debian Signed-off-by: Paolo Abeni --- include/net/gro.h | 26 ++++++++++++++++---------- net/core/gro.c | 19 ++++++++++++------- 2 files changed, 28 insertions(+), 17 deletions(-) (limited to 'net') diff --git a/include/net/gro.h b/include/net/gro.h index a4fab706240d..7b47dd6ce94f 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -11,11 +11,23 @@ #include struct napi_gro_cb { - /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ - void *frag0; + union { + struct { + /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ + void *frag0; - /* Length of frag0. */ - unsigned int frag0_len; + /* Length of frag0. */ + unsigned int frag0_len; + }; + + struct { + /* used in skb_gro_receive() slow path */ + struct sk_buff *last; + + /* jiffies when first packet was created/queued */ + unsigned long age; + }; + }; /* This indicates where we are processing relative to skb->data. */ int data_offset; @@ -32,9 +44,6 @@ struct napi_gro_cb { /* Used in ipv6_gro_receive() and foo-over-udp */ u16 proto; - /* jiffies when first packet was created/queued */ - unsigned long age; - /* Used in napi_gro_cb::free */ #define NAPI_GRO_FREE 1 #define NAPI_GRO_FREE_STOLEN_HEAD 2 @@ -77,9 +86,6 @@ struct napi_gro_cb { /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; - - /* used in skb_gro_receive() slow path */ - struct sk_buff *last; }; #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) diff --git a/net/core/gro.c b/net/core/gro.c index 6783a47a6136..4d45f78e2fac 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -458,6 +458,14 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow) } } +static void gro_try_pull_from_frag0(struct sk_buff *skb) +{ + int grow = skb_gro_offset(skb) - skb_headlen(skb); + + if (grow > 0) + gro_pull_from_frag0(skb, grow); +} + static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) { struct sk_buff *oldest; @@ -487,7 +495,6 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff struct sk_buff *pp = NULL; enum gro_result ret; int same_flow; - int grow; if (netif_elide_gro(skb->dev)) goto normal; @@ -562,17 +569,14 @@ found_ptype: else gro_list->count++; + /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */ + gro_try_pull_from_frag0(skb); NAPI_GRO_CB(skb)->age = jiffies; NAPI_GRO_CB(skb)->last = skb; if (!skb_is_gso(skb)) skb_shinfo(skb)->gso_size = skb_gro_len(skb); list_add(&skb->list, &gro_list->list); ret = GRO_HELD; - -pull: - grow = skb_gro_offset(skb) - skb_headlen(skb); - if (grow > 0) - gro_pull_from_frag0(skb, grow); ok: if (gro_list->count) { if (!test_bit(bucket, &napi->gro_bitmask)) @@ -585,7 +589,8 @@ ok: normal: ret = GRO_NORMAL; - goto pull; + gro_try_pull_from_frag0(skb); + goto ok; } struct packet_offload *gro_find_receive_by_type(__be16 type) -- cgit v1.2.3 From acb8bca343f8d5b8697d027771abf8a371580d53 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 4 May 2023 16:45:05 +0300 Subject: wifi: mac80211: HW restart for MLO Implement proper reconfiguration for interfaces that are doing MLO, in order to be able to recover from HW restart correctly. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230504134511.828474-6-gregory.greenman@intel.com Signed-off-by: Johannes Berg --- net/mac80211/util.c | 107 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 73 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1527d6aafc14..75c517dc8bee 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2624,21 +2624,55 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* Finally also reconfigure all the BSS information */ list_for_each_entry(sdata, &local->interfaces, list) { + /* common change flags for all interface types - link only */ + u32 changed = BSS_CHANGED_ERP_CTS_PROT | + BSS_CHANGED_ERP_PREAMBLE | + BSS_CHANGED_ERP_SLOT | + BSS_CHANGED_HT | + BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_BEACON_INT | + BSS_CHANGED_BSSID | + BSS_CHANGED_CQM | + BSS_CHANGED_QOS | + BSS_CHANGED_TXPOWER | + BSS_CHANGED_MCAST_RATE; + struct ieee80211_link_data *link = NULL; unsigned int link_id; - u32 changed; + u32 active_links = 0; if (!ieee80211_sdata_running(sdata)) continue; sdata_lock(sdata); + if (sdata->vif.valid_links) { + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS] = { + [0] = &sdata->vif.bss_conf, + }; + + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* start with a single active link */ + active_links = sdata->vif.active_links; + link_id = ffs(active_links) - 1; + sdata->vif.active_links = BIT(link_id); + } + + drv_change_vif_links(local, sdata, 0, + sdata->vif.active_links, + old); + } + for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { - struct ieee80211_link_data *link; + if (sdata->vif.valid_links && + !(sdata->vif.active_links & BIT(link_id))) + continue; link = sdata_dereference(sdata->link[link_id], sdata); - if (link) - ieee80211_assign_chanctx(local, sdata, link); + if (!link) + continue; + + ieee80211_assign_chanctx(local, sdata, link); } switch (sdata->vif.type) { @@ -2658,42 +2692,42 @@ int ieee80211_reconfig(struct ieee80211_local *local) &sdata->deflink.tx_conf[i]); break; } - sdata_unlock(sdata); - - /* common change flags for all interface types */ - changed = BSS_CHANGED_ERP_CTS_PROT | - BSS_CHANGED_ERP_PREAMBLE | - BSS_CHANGED_ERP_SLOT | - BSS_CHANGED_HT | - BSS_CHANGED_BASIC_RATES | - BSS_CHANGED_BEACON_INT | - BSS_CHANGED_BSSID | - BSS_CHANGED_CQM | - BSS_CHANGED_QOS | - BSS_CHANGED_IDLE | - BSS_CHANGED_TXPOWER | - BSS_CHANGED_MCAST_RATE; if (sdata->vif.bss_conf.mu_mimo_owner) changed |= BSS_CHANGED_MU_GROUPS; + if (!sdata->vif.valid_links) + changed |= BSS_CHANGED_IDLE; + switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: - changed |= BSS_CHANGED_ASSOC | - BSS_CHANGED_ARP_FILTER | - BSS_CHANGED_PS; - - /* Re-send beacon info report to the driver */ - if (sdata->deflink.u.mgd.have_beacon) - changed |= BSS_CHANGED_BEACON_INFO; - - if (sdata->vif.bss_conf.max_idle_period || - sdata->vif.bss_conf.protected_keep_alive) - changed |= BSS_CHANGED_KEEP_ALIVE; - - sdata_lock(sdata); - ieee80211_bss_info_change_notify(sdata, changed); - sdata_unlock(sdata); + if (!sdata->vif.valid_links) { + changed |= BSS_CHANGED_ASSOC | + BSS_CHANGED_ARP_FILTER | + BSS_CHANGED_PS; + + /* Re-send beacon info report to the driver */ + if (sdata->deflink.u.mgd.have_beacon) + changed |= BSS_CHANGED_BEACON_INFO; + + if (sdata->vif.bss_conf.max_idle_period || + sdata->vif.bss_conf.protected_keep_alive) + changed |= BSS_CHANGED_KEEP_ALIVE; + + if (sdata->vif.bss_conf.eht_puncturing) + changed |= BSS_CHANGED_EHT_PUNCTURING; + + ieee80211_bss_info_change_notify(sdata, + changed); + } else if (!WARN_ON(!link)) { + ieee80211_link_info_change_notify(sdata, link, + changed); + changed = BSS_CHANGED_ASSOC | + BSS_CHANGED_IDLE | + BSS_CHANGED_PS | + BSS_CHANGED_ARP_FILTER; + ieee80211_vif_cfg_change_notify(sdata, changed); + } break; case NL80211_IFTYPE_OCB: changed |= BSS_CHANGED_OCB; @@ -2728,6 +2762,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) case NL80211_IFTYPE_NAN: res = ieee80211_reconfig_nan(sdata); if (res < 0) { + sdata_unlock(sdata); ieee80211_handle_reconfig_failure(local); return res; } @@ -2745,6 +2780,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) WARN_ON(1); break; } + sdata_unlock(sdata); + + if (active_links) + ieee80211_set_active_links(&sdata->vif, active_links); } ieee80211_recalc_ps(local); -- cgit v1.2.3 From 91f53ae97cb1a8c1c26f6cbcf9e2dc1cdff9b012 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:13:20 +0200 Subject: wifi: mac80211: remove element scratch_len This isn't used, and there isn't really a good way it could be used, so just remove that. Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 3 --- net/mac80211/util.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a0a7839cb961..007b36c7db53 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2269,8 +2269,6 @@ static inline void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, * (or re-association) response frame if this is given * @from_ap: frame is received from an AP (currently used only * for EHT capabilities parsing) - * @scratch_len: if non zero, specifies the requested length of the scratch - * buffer; otherwise, 'len' is used. */ struct ieee80211_elems_parse_params { const u8 *start; @@ -2281,7 +2279,6 @@ struct ieee80211_elems_parse_params { struct cfg80211_bss *bss; int link_id; bool from_ap; - size_t scratch_len; }; struct ieee802_11_elems * diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 75c517dc8bee..22871e4a9f0f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1604,7 +1604,7 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) const struct element *non_inherit = NULL; u8 *nontransmitted_profile; int nontransmitted_profile_len = 0; - size_t scratch_len = params->scratch_len ?: 3 * params->len; + size_t scratch_len = 3 * params->len; elems = kzalloc(sizeof(*elems) + scratch_len, GFP_ATOMIC); if (!elems) -- cgit v1.2.3 From 08dbff230048ec2812c33e78f81855635a3c1734 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 4 May 2023 16:45:08 +0300 Subject: wifi: mac80211: skip EHT BSS membership selector Skip the EHT BSS membership selector for getting rates. While at it, add the definitions for GLK and EPS, and sort the list. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230504134511.828474-9-gregory.greenman@intel.com Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 5 ++++- net/mac80211/mlme.c | 5 +++-- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index c4cf296e7eaf..c271184a3968 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1349,8 +1349,11 @@ struct ieee80211_mgmt { /* Supported rates membership selectors */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 -#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 +#define BSS_MEMBERSHIP_SELECTOR_GLK 125 +#define BSS_MEMBERSHIP_SELECTOR_EPS 124 #define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123 +#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 +#define BSS_MEMBERSHIP_SELECTOR_EHT_PHY 121 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index e13a0354c397..0020c9e41caa 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3902,8 +3902,8 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, *have_higher_than_11mbit = true; /* - * Skip HT, VHT, HE and SAE H2E only BSS membership selectors - * since they're not rates. + * Skip HT, VHT, HE, EHT and SAE H2E only BSS membership + * selectors since they're not rates. * * Note: Even though the membership selector and the basic * rate flag share the same bit, they are not exactly @@ -3912,6 +3912,7 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband, if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) || supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) || supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) || + supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY) || supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E)) continue; -- cgit v1.2.3 From ce2bb3b66273d7d122c5dbf8f1e58e8ebc82c5fb Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 4 May 2023 16:45:10 +0300 Subject: wifi: mac80211: fetch and store the EML capability information We need to teach the low level driver about the EML capability which includes information for EMLSR / EMLMR operation. Signed-off-by: Emmanuel Grumbach Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230504134511.828474-11-gregory.greenman@intel.com Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 35 +++++++++++++++++++++++++++++++++++ include/net/mac80211.h | 2 ++ net/mac80211/mlme.c | 11 +++++++++++ 3 files changed, 48 insertions(+) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index c271184a3968..fba4c44da832 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4635,6 +4635,41 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) return sizeof(*mle) + common + mle->variable[0]; } +/** + * ieee80211_mle_get_eml_cap - returns the EML capability + * @data: pointer to the multi link EHT IE + * + * The element is assumed to be big enough. This must be checked by + * ieee80211_mle_size_ok(). + * If the EML capability can't be found (the type is not basic, or + * the EML capability presence bit is clear), 0 will be returned. + */ +static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) +{ + const struct ieee80211_multi_link_elem *mle = (const void *)data; + u16 control = le16_to_cpu(mle->control); + const u8 *common = mle->variable; + + if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) + return 0; + + /* common points now at the beginning of ieee80211_mle_basic_common_info */ + common += sizeof(struct ieee80211_mle_basic_common_info); + + if (!(control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)) + return 0; + + if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY) + common += 2; + + return get_unaligned_le16(common); +} + /** * ieee80211_mle_size_ok - validate multi-link element size * @data: pointer to the element data diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ac0370e76874..f75d941eece8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1790,6 +1790,7 @@ enum ieee80211_offload_flags { * @ps: power-save mode (STA only). This flag is NOT affected by * offchannel/dynamic_ps operations. * @aid: association ID number, valid only when @assoc is true + * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k. * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The * may filter ARP queries targeted for other addresses than listed here. * The driver must allow ARP queries targeted for all address listed here @@ -1812,6 +1813,7 @@ struct ieee80211_vif_cfg { bool ibss_creator; bool ps; u16 aid; + u16 eml_cap; __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; int arp_addr_cnt; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0020c9e41caa..4ea383aafcac 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4838,6 +4838,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, IEEE80211_CONN_DISABLE_EHT)) && he_oper) { const struct cfg80211_bss_ies *cbss_ies; + const struct element *eht_ml_elem; const u8 *eht_oper_ie; cbss_ies = rcu_dereference(cbss->ies); @@ -4848,6 +4849,16 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, eht_oper = (void *)(eht_oper_ie + 3); else eht_oper = NULL; + + eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK, + cbss_ies->data, cbss_ies->len); + + /* data + 1 / datalen - 1 since it's an extended element */ + if (eht_ml_elem && + ieee80211_mle_size_ok(eht_ml_elem->data + 1, + eht_ml_elem->datalen - 1)) + sdata->vif.cfg.eml_cap = + ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1); } /* Allow VHT if at least one channel on the sband supports 80 MHz */ -- cgit v1.2.3 From 61403414e1719f929386dda8fb954bb302628ef3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 4 May 2023 16:45:11 +0300 Subject: wifi: mac80211: implement proper AP MLD HW restart Previously, I didn't implement restarting here at all if the interface is an MLD, so it only worked for non-MLO. Add the needed code to restart an AP MLD correctly. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230504134511.828474-12-gregory.greenman@intel.com Signed-off-by: Johannes Berg --- net/mac80211/util.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 22871e4a9f0f..7a9abe8bf438 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2475,6 +2475,35 @@ static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata) return 0; } +static void ieee80211_reconfig_ap_links(struct ieee80211_local *local, + struct ieee80211_sub_if_data *sdata, + u32 changed) +{ + int link_id; + + for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { + struct ieee80211_link_data *link; + + if (!(sdata->vif.active_links & BIT(link_id))) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + if (rcu_access_pointer(link->u.ap.beacon)) + drv_start_ap(local, sdata, link->conf); + + if (!link->conf->enable_beacon) + continue; + + changed |= BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED; + + ieee80211_link_info_change_notify(sdata, link, changed); + } +} + int ieee80211_reconfig(struct ieee80211_local *local) { struct ieee80211_hw *hw = &local->hw; @@ -2737,7 +2766,13 @@ int ieee80211_reconfig(struct ieee80211_local *local) changed |= BSS_CHANGED_IBSS; fallthrough; case NL80211_IFTYPE_AP: - changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS; + changed |= BSS_CHANGED_P2P_PS; + + if (sdata->vif.valid_links) + ieee80211_vif_cfg_change_notify(sdata, + BSS_CHANGED_SSID); + else + changed |= BSS_CHANGED_SSID; if (sdata->vif.bss_conf.ftm_responder == 1 && wiphy_ext_feature_isset(sdata->local->hw.wiphy, @@ -2747,6 +2782,13 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->vif.type == NL80211_IFTYPE_AP) { changed |= BSS_CHANGED_AP_PROBE_RESP; + if (sdata->vif.valid_links) { + ieee80211_reconfig_ap_links(local, + sdata, + changed); + break; + } + if (rcu_access_pointer(sdata->deflink.u.ap.beacon)) drv_start_ap(local, sdata, sdata->deflink.conf); -- cgit v1.2.3 From 2a5325f802863c399fe40de935ba01195d4c43c8 Mon Sep 17 00:00:00 2001 From: Mukesh Sisodiya Date: Sun, 4 Jun 2023 12:11:13 +0300 Subject: wifi: mac80211: use u64 to hold enum ieee80211_bss_change flags The size of enum ieee80211_bss_change is bigger that 32, so we need u64 to be used in a flag. Also pass u64 instead of u32 to ieee80211_reconfig_ap_links() for the same reason. Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.d53b7018a4eb.I1adaa041de51d50d84a11226573e81ceac0fe90d@changeid Signed-off-by: Johannes Berg --- net/mac80211/util.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 7a9abe8bf438..3f24b6e59652 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2477,7 +2477,7 @@ static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata) static void ieee80211_reconfig_ap_links(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, - u32 changed) + u64 changed) { int link_id; @@ -2654,7 +2654,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* Finally also reconfigure all the BSS information */ list_for_each_entry(sdata, &local->interfaces, list) { /* common change flags for all interface types - link only */ - u32 changed = BSS_CHANGED_ERP_CTS_PROT | + u64 changed = BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_HT | -- cgit v1.2.3 From 1d10575bced5b65e138b029a35e6ef657f3b3273 Mon Sep 17 00:00:00 2001 From: Mukesh Sisodiya Date: Sun, 4 Jun 2023 12:11:14 +0300 Subject: wifi: mac80211: refactor ieee80211_select_link_key() Simplify ieee80211_select_link_key(), no functional changes are made. Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.a4e332755bb0.Iff4a2b6ed767b2a329c51c29bb597ece9ebe2af8@changeid Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1a3327407552..e809c4236f78 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -581,25 +581,9 @@ ieee80211_select_link_key(struct ieee80211_tx_data *tx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); - enum { - USE_NONE, - USE_MGMT_KEY, - USE_MCAST_KEY, - } which_key = USE_NONE; struct ieee80211_link_data *link; unsigned int link_id; - if (ieee80211_is_group_privacy_action(tx->skb)) - which_key = USE_MCAST_KEY; - else if (ieee80211_is_mgmt(hdr->frame_control) && - is_multicast_ether_addr(hdr->addr1) && - ieee80211_is_robust_mgmt_frame(tx->skb)) - which_key = USE_MGMT_KEY; - else if (is_multicast_ether_addr(hdr->addr1)) - which_key = USE_MCAST_KEY; - else - return NULL; - link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK); if (link_id == IEEE80211_LINK_UNSPECIFIED) { link = &tx->sdata->deflink; @@ -609,14 +593,14 @@ ieee80211_select_link_key(struct ieee80211_tx_data *tx) return NULL; } - switch (which_key) { - case USE_NONE: - break; - case USE_MGMT_KEY: + if (ieee80211_is_group_privacy_action(tx->skb)) + return rcu_dereference(link->default_multicast_key); + else if (ieee80211_is_mgmt(hdr->frame_control) && + is_multicast_ether_addr(hdr->addr1) && + ieee80211_is_robust_mgmt_frame(tx->skb)) return rcu_dereference(link->default_mgmt_key); - case USE_MCAST_KEY: + else if (is_multicast_ether_addr(hdr->addr1)) return rcu_dereference(link->default_multicast_key); - } return NULL; } -- cgit v1.2.3 From 29c6e2dc3d12a188a48f2a45759e8da44840546b Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 4 Jun 2023 12:11:17 +0300 Subject: wifi: mac80211: provide a helper to fetch the medium synchronization delay There are drivers which need this information. Signed-off-by: Emmanuel Grumbach Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.b1043f3126e2.Iad3806f8bf8df07f52ef0a02cc3d0373c44a8c93@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 35 +++++++++++++++++++++++++++++++++++ include/net/mac80211.h | 3 +++ net/mac80211/mlme.c | 5 ++++- 3 files changed, 42 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index fba4c44da832..516cd32d6196 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4635,6 +4635,41 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) return sizeof(*mle) + common + mle->variable[0]; } +/** + * ieee80211_mle_get_eml_sync_delay - returns the medium sync delay + * @data: pointer to the multi link EHT IE + * + * The element is assumed to be big enough. This must be checked by + * ieee80211_mle_size_ok(). + * If the medium synchronization can't be found (the type is not basic, or + * the medium sync presence bit is clear), 0 will be returned. + */ +static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) +{ + const struct ieee80211_multi_link_elem *mle = (const void *)data; + u16 control = le16_to_cpu(mle->control); + const u8 *common = mle->variable; + + if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) + return 0; + + /* common points now at the beginning of + * ieee80211_mle_basic_common_info + */ + common += sizeof(struct ieee80211_mle_basic_common_info); + + if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)) + return 0; + + if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) + common += 1; + + return get_unaligned_le16(common); +} + /** * ieee80211_mle_get_eml_cap - returns the EML capability * @data: pointer to the multi link EHT IE diff --git a/include/net/mac80211.h b/include/net/mac80211.h index f75d941eece8..f4516c034da2 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1791,6 +1791,8 @@ enum ieee80211_offload_flags { * offchannel/dynamic_ps operations. * @aid: association ID number, valid only when @assoc is true * @eml_cap: EML capabilities as described in P802.11be_D2.2 Figure 9-1002k. + * @eml_med_sync_delay: Medium Synchronization delay as described in + * P802.11be_D2.2 Figure 9-1002j. * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The * may filter ARP queries targeted for other addresses than listed here. * The driver must allow ARP queries targeted for all address listed here @@ -1814,6 +1816,7 @@ struct ieee80211_vif_cfg { bool ps; u16 aid; u16 eml_cap; + u16 eml_med_sync_delay; __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; int arp_addr_cnt; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 4ea383aafcac..56c375213202 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4856,9 +4856,12 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, /* data + 1 / datalen - 1 since it's an extended element */ if (eht_ml_elem && ieee80211_mle_size_ok(eht_ml_elem->data + 1, - eht_ml_elem->datalen - 1)) + eht_ml_elem->datalen - 1)) { sdata->vif.cfg.eml_cap = ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1); + sdata->vif.cfg.eml_med_sync_delay = + ieee80211_mle_get_eml_med_sync_delay(eht_ml_elem->data + 1); + } } /* Allow VHT if at least one channel on the sband supports 80 MHz */ -- cgit v1.2.3 From ba7af2654e3b7b810c750b3c6106f6f20b81cc88 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 4 Jun 2023 12:11:20 +0300 Subject: wifi: mac80211: recalc min chandef for new STA links When adding a new link to a station, this needs to cause a recalculation of the minimum chandef since otherwise we can have a higher bandwidth station connected on that link than the link is operating at. Do the appropriate recalc. Fixes: cb71f1d136a6 ("wifi: mac80211: add sta link addition/removal") Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.377adf3c789a.I91bf28f399e16e6ac1f83bacd1029a698b4e6685@changeid Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 1400512e0dde..a1cd5c234f47 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2913,6 +2913,8 @@ int ieee80211_sta_activate_link(struct sta_info *sta, unsigned int link_id) if (!test_sta_flag(sta, WLAN_STA_INSERTED)) goto hash; + ieee80211_recalc_min_chandef(sdata, link_id); + /* Ensure the values are updated for the driver, * redone by sta_remove_link on failure. */ -- cgit v1.2.3 From 10a7ba92c7ab6cbac3c805ab3ee0642e91f1da97 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 4 Jun 2023 12:11:21 +0300 Subject: wifi: mac80211: move sta_info_move_state() up To fix a sequencing issue, this code needs to be changed a bit. Move it up in the file to prepare for that. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.05bb735d7075.I984b5c194a0f84580247d73620a4e61a5f82a774@changeid Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 201 ++++++++++++++++++++++++------------------------ 1 file changed, 100 insertions(+), 101 deletions(-) (limited to 'net') diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a1cd5c234f47..dc76bc40350f 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include @@ -1274,6 +1274,105 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta) return 0; } +int sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state) +{ + might_sleep(); + + if (sta->sta_state == new_state) + return 0; + + /* check allowed transitions first */ + + switch (new_state) { + case IEEE80211_STA_NONE: + if (sta->sta_state != IEEE80211_STA_AUTH) + return -EINVAL; + break; + case IEEE80211_STA_AUTH: + if (sta->sta_state != IEEE80211_STA_NONE && + sta->sta_state != IEEE80211_STA_ASSOC) + return -EINVAL; + break; + case IEEE80211_STA_ASSOC: + if (sta->sta_state != IEEE80211_STA_AUTH && + sta->sta_state != IEEE80211_STA_AUTHORIZED) + return -EINVAL; + break; + case IEEE80211_STA_AUTHORIZED: + if (sta->sta_state != IEEE80211_STA_ASSOC) + return -EINVAL; + break; + default: + WARN(1, "invalid state %d", new_state); + return -EINVAL; + } + + sta_dbg(sta->sdata, "moving STA %pM to state %d\n", + sta->sta.addr, new_state); + + /* notify the driver before the actual changes so it can + * fail the transition + */ + if (test_sta_flag(sta, WLAN_STA_INSERTED)) { + int err = drv_sta_state(sta->local, sta->sdata, sta, + sta->sta_state, new_state); + if (err) + return err; + } + + /* reflect the change in all state variables */ + + switch (new_state) { + case IEEE80211_STA_NONE: + if (sta->sta_state == IEEE80211_STA_AUTH) + clear_bit(WLAN_STA_AUTH, &sta->_flags); + break; + case IEEE80211_STA_AUTH: + if (sta->sta_state == IEEE80211_STA_NONE) { + set_bit(WLAN_STA_AUTH, &sta->_flags); + } else if (sta->sta_state == IEEE80211_STA_ASSOC) { + clear_bit(WLAN_STA_ASSOC, &sta->_flags); + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } + break; + case IEEE80211_STA_ASSOC: + if (sta->sta_state == IEEE80211_STA_AUTH) { + set_bit(WLAN_STA_ASSOC, &sta->_flags); + sta->assoc_at = ktime_get_boottime_ns(); + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + ieee80211_vif_dec_num_mcast(sta->sdata); + clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); + ieee80211_clear_fast_xmit(sta); + ieee80211_clear_fast_rx(sta); + } + break; + case IEEE80211_STA_AUTHORIZED: + if (sta->sta_state == IEEE80211_STA_ASSOC) { + ieee80211_vif_inc_num_mcast(sta->sdata); + set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); + ieee80211_check_fast_xmit(sta); + ieee80211_check_fast_rx(sta); + } + if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sta->sdata->vif.type == NL80211_IFTYPE_AP) + cfg80211_send_layer2_update(sta->sdata->dev, + sta->sta.addr); + break; + default: + break; + } + + sta->sta_state = new_state; + + return 0; +} + static void __sta_info_destroy_part2(struct sta_info *sta) { struct ieee80211_local *local = sta->local; @@ -2252,106 +2351,6 @@ void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, } } -int sta_info_move_state(struct sta_info *sta, - enum ieee80211_sta_state new_state) -{ - might_sleep(); - - if (sta->sta_state == new_state) - return 0; - - /* check allowed transitions first */ - - switch (new_state) { - case IEEE80211_STA_NONE: - if (sta->sta_state != IEEE80211_STA_AUTH) - return -EINVAL; - break; - case IEEE80211_STA_AUTH: - if (sta->sta_state != IEEE80211_STA_NONE && - sta->sta_state != IEEE80211_STA_ASSOC) - return -EINVAL; - break; - case IEEE80211_STA_ASSOC: - if (sta->sta_state != IEEE80211_STA_AUTH && - sta->sta_state != IEEE80211_STA_AUTHORIZED) - return -EINVAL; - break; - case IEEE80211_STA_AUTHORIZED: - if (sta->sta_state != IEEE80211_STA_ASSOC) - return -EINVAL; - break; - default: - WARN(1, "invalid state %d", new_state); - return -EINVAL; - } - - sta_dbg(sta->sdata, "moving STA %pM to state %d\n", - sta->sta.addr, new_state); - - /* - * notify the driver before the actual changes so it can - * fail the transition - */ - if (test_sta_flag(sta, WLAN_STA_INSERTED)) { - int err = drv_sta_state(sta->local, sta->sdata, sta, - sta->sta_state, new_state); - if (err) - return err; - } - - /* reflect the change in all state variables */ - - switch (new_state) { - case IEEE80211_STA_NONE: - if (sta->sta_state == IEEE80211_STA_AUTH) - clear_bit(WLAN_STA_AUTH, &sta->_flags); - break; - case IEEE80211_STA_AUTH: - if (sta->sta_state == IEEE80211_STA_NONE) { - set_bit(WLAN_STA_AUTH, &sta->_flags); - } else if (sta->sta_state == IEEE80211_STA_ASSOC) { - clear_bit(WLAN_STA_ASSOC, &sta->_flags); - ieee80211_recalc_min_chandef(sta->sdata, -1); - if (!sta->sta.support_p2p_ps) - ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); - } - break; - case IEEE80211_STA_ASSOC: - if (sta->sta_state == IEEE80211_STA_AUTH) { - set_bit(WLAN_STA_ASSOC, &sta->_flags); - sta->assoc_at = ktime_get_boottime_ns(); - ieee80211_recalc_min_chandef(sta->sdata, -1); - if (!sta->sta.support_p2p_ps) - ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); - } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { - ieee80211_vif_dec_num_mcast(sta->sdata); - clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); - ieee80211_clear_fast_xmit(sta); - ieee80211_clear_fast_rx(sta); - } - break; - case IEEE80211_STA_AUTHORIZED: - if (sta->sta_state == IEEE80211_STA_ASSOC) { - ieee80211_vif_inc_num_mcast(sta->sdata); - set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); - ieee80211_check_fast_xmit(sta); - ieee80211_check_fast_rx(sta); - } - if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN || - sta->sdata->vif.type == NL80211_IFTYPE_AP) - cfg80211_send_layer2_update(sta->sdata->dev, - sta->sta.addr); - break; - default: - break; - } - - sta->sta_state = new_state; - - return 0; -} - static struct ieee80211_sta_rx_stats * sta_get_last_rx_stats(struct sta_info *sta) { -- cgit v1.2.3 From 92747f17c431a75967b461bedbb36ff259acead1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 4 Jun 2023 12:11:22 +0300 Subject: wifi: mac80211: batch recalc during STA flush When we flush stations, we first take them off the list and then destroy them one by one. If we do the different mode recalculations while destroying them, we cause the following scenario: - STA 1 has 80 MHz - min chanctx width is now 80 MHz - STA 2 has 80 MHz - empty STA list - destroy STA 2 - recalc min chanctx width -> results in 20 MHz as the STA list is already empty This is broken, since as far as the driver is concerned STA 1 still exists at this point, and this causes issues at least with iwlwifi. Fix - and also optimize - this by doing the recalc of min chanctx width (and also P2P PS) only after all the stations were removed. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.48d262b6b42d.Ia15532657c17535c28ec0c5df263b65f0f80663c@changeid Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 48 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index dc76bc40350f..731b832b257c 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1274,8 +1274,9 @@ static int __must_check __sta_info_destroy_part1(struct sta_info *sta) return 0; } -int sta_info_move_state(struct sta_info *sta, - enum ieee80211_sta_state new_state) +static int _sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state, + bool recalc) { might_sleep(); @@ -1333,18 +1334,22 @@ int sta_info_move_state(struct sta_info *sta, set_bit(WLAN_STA_AUTH, &sta->_flags); } else if (sta->sta_state == IEEE80211_STA_ASSOC) { clear_bit(WLAN_STA_ASSOC, &sta->_flags); - ieee80211_recalc_min_chandef(sta->sdata, -1); - if (!sta->sta.support_p2p_ps) - ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + if (recalc) { + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } } break; case IEEE80211_STA_ASSOC: if (sta->sta_state == IEEE80211_STA_AUTH) { set_bit(WLAN_STA_ASSOC, &sta->_flags); sta->assoc_at = ktime_get_boottime_ns(); - ieee80211_recalc_min_chandef(sta->sdata, -1); - if (!sta->sta.support_p2p_ps) - ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + if (recalc) { + ieee80211_recalc_min_chandef(sta->sdata, -1); + if (!sta->sta.support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sta->sdata); + } } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ieee80211_vif_dec_num_mcast(sta->sdata); clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); @@ -1373,7 +1378,13 @@ int sta_info_move_state(struct sta_info *sta, return 0; } -static void __sta_info_destroy_part2(struct sta_info *sta) +int sta_info_move_state(struct sta_info *sta, + enum ieee80211_sta_state new_state) +{ + return _sta_info_move_state(sta, new_state, true); +} + +static void __sta_info_destroy_part2(struct sta_info *sta, bool recalc) { struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; @@ -1389,7 +1400,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) lockdep_assert_held(&local->sta_mtx); if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { - ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); + ret = _sta_info_move_state(sta, IEEE80211_STA_ASSOC, recalc); WARN_ON_ONCE(ret); } @@ -1417,7 +1428,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) local->sta_generation++; while (sta->sta_state > IEEE80211_STA_NONE) { - ret = sta_info_move_state(sta, sta->sta_state - 1); + ret = _sta_info_move_state(sta, sta->sta_state - 1, recalc); if (ret) { WARN_ON_ONCE(1); break; @@ -1454,7 +1465,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) synchronize_net(); - __sta_info_destroy_part2(sta); + __sta_info_destroy_part2(sta, true); return 0; } @@ -1561,9 +1572,18 @@ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans) } if (!list_empty(&free_list)) { + bool support_p2p_ps = true; + synchronize_net(); - list_for_each_entry_safe(sta, tmp, &free_list, free_list) - __sta_info_destroy_part2(sta); + list_for_each_entry_safe(sta, tmp, &free_list, free_list) { + if (!sta->sta.support_p2p_ps) + support_p2p_ps = false; + __sta_info_destroy_part2(sta, false); + } + + ieee80211_recalc_min_chandef(sdata, -1); + if (!support_p2p_ps) + ieee80211_recalc_p2p_go_ps_allowed(sdata); } mutex_unlock(&local->sta_mtx); -- cgit v1.2.3 From 15ddba5f43114c1fd9cd83676e04a9e1acf8e37f Mon Sep 17 00:00:00 2001 From: Anjaneyulu Date: Sun, 4 Jun 2023 12:11:26 +0300 Subject: wifi: mac80211: consistently use u64 for BSS changes Currently, enum ieee80211_bss_change has more than 32 flags. Change the type of the corresponding variables from u32 to u64. Signed-off-by: Anjaneyulu Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.10354a05eaf1.If19359262fe2728dd523ea6d7c3aa7dc50940411@changeid Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 79 ++++++++++++++++++++++------------------------ net/mac80211/chan.c | 4 +-- net/mac80211/ibss.c | 16 ++++++---- net/mac80211/ieee80211_i.h | 14 +++++--- net/mac80211/iface.c | 4 +-- net/mac80211/main.c | 4 +-- net/mac80211/mesh.c | 30 ++++++++++-------- net/mac80211/mesh.h | 19 +++++------ net/mac80211/mesh_plink.c | 37 +++++++++++----------- net/mac80211/mesh_ps.c | 7 ++-- net/mac80211/mlme.c | 12 +++---- net/mac80211/ocb.c | 4 +-- 12 files changed, 119 insertions(+), 111 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 7317e4a5d1ff..801aaa62b68f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1101,18 +1101,20 @@ ieee80211_copy_rnr_beacon(u8 *pos, struct cfg80211_rnr_elems *dst, return offset; } -static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, - struct ieee80211_link_data *link, - struct cfg80211_beacon_data *params, - const struct ieee80211_csa_settings *csa, - const struct ieee80211_color_change_settings *cca) +static int +ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_link_data *link, + struct cfg80211_beacon_data *params, + const struct ieee80211_csa_settings *csa, + const struct ieee80211_color_change_settings *cca, + u64 *changed) { struct cfg80211_mbssid_elems *mbssid = NULL; struct cfg80211_rnr_elems *rnr = NULL; struct beacon_data *new, *old; int new_head_len, new_tail_len; int size, err; - u32 changed = BSS_CHANGED_BEACON; + u64 _changed = BSS_CHANGED_BEACON; struct ieee80211_bss_conf *link_conf = link->conf; old = sdata_dereference(link->u.ap.beacon, sdata); @@ -1219,7 +1221,7 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, return err; } if (err == 0) - changed |= BSS_CHANGED_AP_PROBE_RESP; + _changed |= BSS_CHANGED_AP_PROBE_RESP; if (params->ftm_responder != -1) { link_conf->ftm_responder = params->ftm_responder; @@ -1235,7 +1237,7 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, return err; } - changed |= BSS_CHANGED_FTM_RESPONDER; + _changed |= BSS_CHANGED_FTM_RESPONDER; } rcu_assign_pointer(link->u.ap.beacon, new); @@ -1244,7 +1246,8 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata, if (old) kfree_rcu(old, rcu_head); - return changed; + *changed |= _changed; + return 0; } static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, @@ -1446,10 +1449,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) link_conf->beacon_tx_rate = params->beacon_rate; - err = ieee80211_assign_beacon(sdata, link, ¶ms->beacon, NULL, NULL); + err = ieee80211_assign_beacon(sdata, link, ¶ms->beacon, NULL, NULL, + &changed); if (err < 0) goto error; - changed |= err; if (params->fils_discovery.max_interval) { err = ieee80211_set_fils_discovery(sdata, @@ -1506,6 +1509,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, struct beacon_data *old; int err; struct ieee80211_bss_conf *link_conf; + u64 changed = 0; sdata_assert_lock(sdata); @@ -1525,17 +1529,18 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev, if (!old) return -ENOENT; - err = ieee80211_assign_beacon(sdata, link, params, NULL, NULL); + err = ieee80211_assign_beacon(sdata, link, params, NULL, NULL, + &changed); if (err < 0) return err; if (params->he_bss_color_valid && params->he_bss_color.enabled != link_conf->he_bss_color.enabled) { link_conf->he_bss_color.enabled = params->he_bss_color.enabled; - err |= BSS_CHANGED_HE_BSS_COLOR; + changed |= BSS_CHANGED_HE_BSS_COLOR; } - ieee80211_link_info_change_notify(sdata, link, err); + ieee80211_link_info_change_notify(sdata, link, changed); return 0; } @@ -1717,7 +1722,7 @@ static void sta_apply_mesh_params(struct ieee80211_local *local, { #ifdef CONFIG_MAC80211_MESH struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed = 0; + u64 changed = 0; if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) { switch (params->plink_state) { @@ -2664,7 +2669,7 @@ static int ieee80211_change_bss(struct wiphy *wiphy, struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_link_data *link; struct ieee80211_supported_band *sband; - u32 changed = 0; + u64 changed = 0; link = ieee80211_link_or_deflink(sdata, params->link_id, true); if (IS_ERR(link)) @@ -3589,7 +3594,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, - u32 *changed) + u64 *changed) { int err; @@ -3600,25 +3605,22 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, err = ieee80211_assign_beacon(sdata, &sdata->deflink, sdata->deflink.u.ap.next_beacon, - NULL, NULL); + NULL, NULL, changed); ieee80211_free_next_beacon(&sdata->deflink); if (err < 0) return err; - *changed |= err; break; case NL80211_IFTYPE_ADHOC: - err = ieee80211_ibss_finish_csa(sdata); + err = ieee80211_ibss_finish_csa(sdata, changed); if (err < 0) return err; - *changed |= err; break; #ifdef CONFIG_MAC80211_MESH case NL80211_IFTYPE_MESH_POINT: - err = ieee80211_mesh_finish_csa(sdata); + err = ieee80211_mesh_finish_csa(sdata, changed); if (err < 0) return err; - *changed |= err; break; #endif default: @@ -3632,7 +3634,7 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata, static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - u32 changed = 0; + u64 changed = 0; int err; sdata_assert_lock(sdata); @@ -3729,7 +3731,7 @@ unlock: static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *params, - u32 *changed) + u64 *changed) { struct ieee80211_csa_settings csa = {}; int err; @@ -3776,12 +3778,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, err = ieee80211_assign_beacon(sdata, &sdata->deflink, ¶ms->beacon_csa, &csa, - NULL); + NULL, changed); if (err < 0) { ieee80211_free_next_beacon(&sdata->deflink); return err; } - *changed |= err; break; case NL80211_IFTYPE_ADHOC: @@ -3813,10 +3814,9 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, /* see comments in the NL80211_IFTYPE_AP block */ if (params->count > 1) { - err = ieee80211_ibss_csa_beacon(sdata, params); + err = ieee80211_ibss_csa_beacon(sdata, params, changed); if (err < 0) return err; - *changed |= err; } ieee80211_send_action_csa(sdata, params); @@ -3841,12 +3841,11 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata, /* see comments in the NL80211_IFTYPE_AP block */ if (params->count > 1) { - err = ieee80211_mesh_csa_beacon(sdata, params); + err = ieee80211_mesh_csa_beacon(sdata, params, changed); if (err < 0) { ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; return err; } - *changed |= err; } if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) @@ -3880,7 +3879,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel_switch ch_switch; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *chanctx; - u32 changed = 0; + u64 changed = 0; int err; sdata_assert_lock(sdata); @@ -4613,7 +4612,7 @@ static int ieee80211_set_sar_specs(struct wiphy *wiphy, static int ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata, - u32 *changed) + u64 *changed) { switch (sdata->vif.type) { case NL80211_IFTYPE_AP: { @@ -4624,13 +4623,12 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata, ret = ieee80211_assign_beacon(sdata, &sdata->deflink, sdata->deflink.u.ap.next_beacon, - NULL, NULL); + NULL, NULL, changed); ieee80211_free_next_beacon(&sdata->deflink); if (ret < 0) return ret; - *changed |= ret; break; } default: @@ -4644,7 +4642,7 @@ ieee80211_set_after_color_change_beacon(struct ieee80211_sub_if_data *sdata, static int ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_color_change_settings *params, - u32 *changed) + u64 *changed) { struct ieee80211_color_change_settings color_change = {}; int err; @@ -4667,12 +4665,11 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata, err = ieee80211_assign_beacon(sdata, &sdata->deflink, ¶ms->beacon_color_change, - NULL, &color_change); + NULL, &color_change, changed); if (err < 0) { ieee80211_free_next_beacon(&sdata->deflink); return err; } - *changed |= err; break; default: return -EOPNOTSUPP; @@ -4683,7 +4680,7 @@ ieee80211_set_color_change_beacon(struct ieee80211_sub_if_data *sdata, static void ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata, - u8 color, int enable, u32 changed) + u8 color, int enable, u64 changed) { sdata->vif.bss_conf.he_bss_color.color = color; sdata->vif.bss_conf.he_bss_color.enabled = enable; @@ -4711,7 +4708,7 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata, static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; - u32 changed = 0; + u64 changed = 0; int err; sdata_assert_lock(sdata); @@ -4808,7 +4805,7 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; - u32 changed = 0; + u64 changed = 0; int err; sdata_assert_lock(sdata); diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index dbc34fbe7c8f..88b24a69b891 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1238,7 +1238,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) struct ieee80211_vif_chanctx_switch vif_chsw[1] = {}; struct ieee80211_chanctx *old_ctx, *new_ctx; const struct cfg80211_chan_def *chandef; - u32 changed = 0; + u64 changed = 0; int err; lockdep_assert_held(&local->mtx); @@ -1634,7 +1634,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) reserved_chanctx_list) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *link_conf = link->conf; - u32 changed = 0; + u64 changed = 0; if (!ieee80211_link_has_in_place_reservation(link)) continue; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 9dffc3079588..faa01ee11d32 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -9,7 +9,7 @@ * Copyright 2009, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018-2022 Intel Corporation + * Copyright(c) 2018-2023 Intel Corporation */ #include @@ -226,7 +226,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; struct cfg80211_bss *bss; - u32 bss_change; + u64 bss_change; struct cfg80211_chan_def chandef; struct ieee80211_channel *chan; struct beacon_data *presp; @@ -478,7 +478,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, } int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, - struct cfg80211_csa_settings *csa_settings) + struct cfg80211_csa_settings *csa_settings, + u64 *changed) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct beacon_data *presp, *old_presp; @@ -520,10 +521,11 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, if (old_presp) kfree_rcu(old_presp, rcu_head); - return BSS_CHANGED_BEACON; + *changed |= BSS_CHANGED_BEACON; + return 0; } -int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) +int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; struct cfg80211_bss *cbss; @@ -552,7 +554,7 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata) ifibss->chandef = sdata->deflink.csa_chandef; /* generate the beacon */ - return ieee80211_ibss_csa_beacon(sdata, NULL); + return ieee80211_ibss_csa_beacon(sdata, NULL, changed); } void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata) @@ -1754,7 +1756,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, struct cfg80211_ibss_params *params) { - u32 changed = 0; + u64 changed = 0; u32 rate_flags; struct ieee80211_supported_band *sband; enum ieee80211_chanctx_mode chanmode; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 007b36c7db53..c80e552b6197 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1827,7 +1827,7 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, u64 changed); void ieee80211_configure_filter(struct ieee80211_local *local); -u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); +u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata); u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local); int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb, @@ -1887,8 +1887,10 @@ void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, - struct cfg80211_csa_settings *csa_settings); -int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata); + struct cfg80211_csa_settings *csa_settings, + u64 *changed); +int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, + u64 *changed); void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata); /* OCB code */ @@ -1905,8 +1907,10 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, - struct cfg80211_csa_settings *csa_settings); -int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata); + struct cfg80211_csa_settings *csa_settings, + u64 *changed); +int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, + u64 *changed); /* scan/BSS handling */ void ieee80211_scan_work(struct work_struct *work); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index bd2c48870add..f1e01f7a9b2e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -8,7 +8,7 @@ * Copyright 2008, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include #include @@ -1221,7 +1221,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); struct net_device *dev = wdev->netdev; struct ieee80211_local *local = sdata->local; - u32 changed = 0; + u64 changed = 0; int res; u32 hw_reconf_flags = 0; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 55cdfaef0f5d..3db178b1d50c 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include @@ -291,7 +291,7 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata, drv_link_info_changed(local, sdata, link->conf, link->link_id, changed); } -u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) +u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata) { sdata->vif.bss_conf.use_cts_prot = false; sdata->vif.bss_conf.use_short_preamble = false; diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index f72333201903..a4d8764073bf 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2018 - 2022 Intel Corporation + * Copyright (C) 2018 - 2023 Intel Corporation * Authors: Luis Carlos Cobo * Javier Cardona */ @@ -133,10 +133,10 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie) * * Returns: beacon changed flag if the beacon content changed. */ -u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) +u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) { bool free_plinks; - u32 changed = 0; + u64 changed = 0; /* In case mesh_plink_free_count > 0 and mesh_plinktbl_capacity == 0, * the mesh interface might be able to establish plinks with peers that @@ -162,7 +162,7 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata) void mesh_sta_cleanup(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed = mesh_plink_deactivate(sta); + u64 changed = mesh_plink_deactivate(sta); if (changed) ieee80211_mbss_info_change_notify(sdata, changed); @@ -923,7 +923,7 @@ unsigned int ieee80211_new_mesh_header(struct ieee80211_sub_if_data *sdata, static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - u32 changed; + u64 changed; if (ifmsh->mshcfg.plink_timeout > 0) ieee80211_sta_expire(sdata, ifmsh->mshcfg.plink_timeout * HZ); @@ -1164,7 +1164,7 @@ ieee80211_mesh_rebuild_beacon(struct ieee80211_sub_if_data *sdata) } void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, - u32 changed) + u64 changed) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; unsigned long bits = changed; @@ -1184,7 +1184,7 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct ieee80211_local *local = sdata->local; - u32 changed = BSS_CHANGED_BEACON | + u64 changed = BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | @@ -1525,12 +1525,11 @@ free: kfree(elems); } -int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) +int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_csa_settings *tmp_csa_settings; int ret = 0; - int changed = 0; /* Reset the TTL value and Initiator flag */ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE; @@ -1545,15 +1544,16 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) if (ret) return -EINVAL; - changed |= BSS_CHANGED_BEACON; + *changed |= BSS_CHANGED_BEACON; mcsa_dbg(sdata, "complete switching to center freq %d MHz", sdata->vif.bss_conf.chandef.chan->center_freq); - return changed; + return 0; } int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, - struct cfg80211_csa_settings *csa_settings) + struct cfg80211_csa_settings *csa_settings, + u64 *changed) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_csa_settings *tmp_csa_settings; @@ -1579,7 +1579,8 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata, return ret; } - return BSS_CHANGED_BEACON; + *changed |= BSS_CHANGED_BEACON; + return 0; } static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata, @@ -1720,7 +1721,8 @@ out: static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - u32 bit, changed = 0; + u32 bit; + u64 changed = 0; for_each_set_bit(bit, &ifmsh->mbss_changed, sizeof(changed) * BITS_PER_BYTE) { diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 022f41292a05..6c94222a9df5 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2008, 2009 open80211s Ltd. + * Copyright (C) 2023 Intel Corporation * Authors: Luis Carlos Cobo * Javier Cardona */ @@ -252,11 +253,11 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); const struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method); /* wrapper for ieee80211_bss_info_change_notify() */ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, - u32 changed); + u64 changed); /* mesh power save */ -u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata); -u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, +u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata); +u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, enum nl80211_mesh_power_mode pm); void ieee80211_mps_set_frame_flags(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, @@ -303,12 +304,12 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, u8 *hw_addr, struct ieee802_11_elems *ie, struct ieee80211_rx_status *rx_status); bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); -u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); +u64 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); void mesh_plink_timer(struct timer_list *t); void mesh_plink_broken(struct sta_info *sta); -u32 mesh_plink_deactivate(struct sta_info *sta); -u32 mesh_plink_open(struct sta_info *sta); -u32 mesh_plink_block(struct sta_info *sta); +u64 mesh_plink_deactivate(struct sta_info *sta); +u64 mesh_plink_open(struct sta_info *sta); +u64 mesh_plink_block(struct sta_info *sta); void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len, struct ieee80211_rx_status *rx_status); @@ -349,14 +350,14 @@ void mesh_path_refresh(struct ieee80211_sub_if_data *sdata, #ifdef CONFIG_MAC80211_MESH static inline -u32 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) +u64 mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.estab_plinks); return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; } static inline -u32 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) +u64 mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.estab_plinks); return mesh_accept_plinks_update(sdata) | BSS_CHANGED_BEACON; diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 8f168bc4e4b8..f3d5bb0a59f1 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2019, 2021-2022 Intel Corporation + * Copyright (C) 2019, 2021-2023 Intel Corporation * Author: Luis Carlos Cobo */ #include @@ -90,12 +90,13 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta) * * Returns BSS_CHANGED_ERP_SLOT or 0 for no change. */ -static u32 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) +static u64 mesh_set_short_slot_time(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; struct sta_info *sta; - u32 erp_rates = 0, changed = 0; + u32 erp_rates = 0; + u64 changed = 0; int i; bool short_slot = false; @@ -153,7 +154,7 @@ out: * is selected if all peers in our 20/40MHz MBSS support HT and at least one * HT20 peer is present. Otherwise no-protection mode is selected. */ -static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) +static u64 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; @@ -365,10 +366,10 @@ free: * * Locking: the caller must hold sta->mesh->plink_lock */ -static u32 __mesh_plink_deactivate(struct sta_info *sta) +static u64 __mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed = 0; + u64 changed = 0; lockdep_assert_held(&sta->mesh->plink_lock); @@ -390,10 +391,10 @@ static u32 __mesh_plink_deactivate(struct sta_info *sta) * * All mesh paths with this peer as next hop will be flushed */ -u32 mesh_plink_deactivate(struct sta_info *sta) +u64 mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed; + u64 changed; spin_lock_bh(&sta->mesh->plink_lock); changed = __mesh_plink_deactivate(sta); @@ -622,7 +623,7 @@ void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata, struct ieee80211_rx_status *rx_status) { struct sta_info *sta; - u32 changed = 0; + u64 changed = 0; sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status); if (!sta) @@ -775,10 +776,10 @@ static u16 mesh_get_new_llid(struct ieee80211_sub_if_data *sdata) return llid; } -u32 mesh_plink_open(struct sta_info *sta) +u64 mesh_plink_open(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; - u32 changed; + u64 changed; if (!test_sta_flag(sta, WLAN_STA_AUTH)) return 0; @@ -805,9 +806,9 @@ u32 mesh_plink_open(struct sta_info *sta) return changed; } -u32 mesh_plink_block(struct sta_info *sta) +u64 mesh_plink_block(struct sta_info *sta) { - u32 changed; + u64 changed; spin_lock_bh(&sta->mesh->plink_lock); changed = __mesh_plink_deactivate(sta); @@ -831,11 +832,11 @@ static void mesh_plink_close(struct ieee80211_sub_if_data *sdata, mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout); } -static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata, +static u64 mesh_plink_establish(struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; - u32 changed = 0; + u64 changed = 0; del_timer(&sta->mesh->plink_timer); sta->mesh->plink_state = NL80211_PLINK_ESTAB; @@ -857,12 +858,12 @@ static u32 mesh_plink_establish(struct ieee80211_sub_if_data *sdata, * * Return: changed MBSS flags */ -static u32 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, +static u64 mesh_plink_fsm(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum plink_event event) { struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg; enum ieee80211_self_protected_actioncode action = 0; - u32 changed = 0; + u64 changed = 0; bool flush = false; mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr, @@ -1117,7 +1118,7 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, struct sta_info *sta; enum plink_event event; enum ieee80211_self_protected_actioncode ftype; - u32 changed = 0; + u64 changed = 0; u8 ie_len = elems->peering_len; u16 plid, llid = 0; diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c index 3fbd0b9ff913..35eacca43e49 100644 --- a/net/mac80211/mesh_ps.c +++ b/net/mac80211/mesh_ps.c @@ -3,6 +3,7 @@ * Copyright 2012-2013, Marco Porsch * Copyright 2012-2013, cozybit Inc. * Copyright (C) 2021 Intel Corporation + * Copyright (C) 2023 Intel Corporation */ #include "mesh.h" @@ -77,14 +78,14 @@ static void mps_qos_null_tx(struct sta_info *sta) * sets the non-peer power mode and triggers the driver PS (re-)configuration * Return BSS_CHANGED_BEACON if a beacon update is necessary. */ -u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata) +u64 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct sta_info *sta; bool peering = false; int light_sleep_cnt = 0; int deep_sleep_cnt = 0; - u32 changed = 0; + u64 changed = 0; enum nl80211_mesh_power_mode nonpeer_pm; rcu_read_lock(); @@ -148,7 +149,7 @@ u32 ieee80211_mps_local_status_update(struct ieee80211_sub_if_data *sdata) * @pm: the power mode to set * Return BSS_CHANGED_BEACON if a beacon update is in order. */ -u32 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, +u64 ieee80211_mps_set_sta_local_pm(struct sta_info *sta, enum nl80211_mesh_power_mode pm) { struct ieee80211_sub_if_data *sdata = sta->sdata; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 56c375213202..9af755388171 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2109,7 +2109,7 @@ static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata, *pwr_level = (__s8)cisco_dtpc_ie[4]; } -static u32 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link, +static u64 ieee80211_handle_pwr_constr(struct ieee80211_link_data *link, struct ieee80211_channel *channel, struct ieee80211_mgmt *mgmt, const u8 *country_ie, u8 country_ie_len, @@ -2699,12 +2699,12 @@ static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata) mutex_unlock(&sdata->local->mtx); } -static u32 ieee80211_handle_bss_capability(struct ieee80211_link_data *link, +static u64 ieee80211_handle_bss_capability(struct ieee80211_link_data *link, u16 capab, bool erp_valid, u8 erp) { struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_supported_band *sband; - u32 changed = 0; + u64 changed = 0; bool use_protection; bool use_short_preamble; bool use_short_slot; @@ -2750,7 +2750,7 @@ static u64 ieee80211_link_set_associated(struct ieee80211_link_data *link, struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_bss *bss = (void *)cbss->priv; - u32 changed = BSS_CHANGED_QOS; + u64 changed = BSS_CHANGED_QOS; /* not really used in MLO */ sdata->u.mgd.beacon_timeout = @@ -2888,7 +2888,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_local *local = sdata->local; unsigned int link_id; - u32 changed = 0; + u64 changed = 0; struct ieee80211_prep_tx_info info = { .subtype = stype, }; @@ -3959,7 +3959,7 @@ static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata, IEEE80211_HE_MAC_CAP0_TWT_REQ); } -static int ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata, +static u64 ieee80211_recalc_twt_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband, struct ieee80211_link_data *link, struct link_sta_info *link_sta, diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index a57dcbe99a0d..cf205762ab96 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -4,7 +4,7 @@ * * Copyright: (c) 2014 Czech Technical University in Prague * (c) 2014 Volkswagen Group Research - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022 - 2023 Intel Corporation * Author: Rostislav Lisovy * Funded by: Volkswagen Group Research */ @@ -175,7 +175,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_if_ocb *ifocb = &sdata->u.ocb; - u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; + u64 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID; int err; if (ifocb->joined == true) -- cgit v1.2.3 From c4fdb0818d38fbe1ea7021fb51346df70bbca284 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 4 Jun 2023 12:11:28 +0300 Subject: wifi: mac80211: stop warning after reconfig failures If we have a reconfig failure in the driver, then we need to shut down the network interface(s) at the network stack level through cfg80211, which can result in a lot of those "Failed check-sdata-in-driver check, ..." warnings, since interfaces are considered to not be in the driver when the reconfiguration fails, but we still need to go through all the shutdown flow. Avoid many of these warnings by storing the fact that the stack experienced a reconfiguration failure and not doing the warning in that case. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.3750c4ae6e76.I9e80d6026f59263c008a1a68f6cd6891ca0b93b0@changeid Signed-off-by: Johannes Berg --- net/mac80211/driver-ops.h | 10 ++++++---- net/mac80211/ieee80211_i.h | 3 +++ net/mac80211/iface.c | 3 +++ net/mac80211/util.c | 1 + 4 files changed, 13 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 45d3e53c7383..c4505593ba7a 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH -* Copyright (C) 2018 - 2019, 2021 Intel Corporation +* Copyright (C) 2018 - 2019, 2021 - 2023 Intel Corporation */ #ifndef __MAC80211_DRIVER_OPS @@ -13,9 +13,11 @@ #include "trace.h" #define check_sdata_in_driver(sdata) ({ \ - !WARN_ONCE(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), \ - "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", \ - sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); \ + WARN_ONCE(!sdata->local->reconfig_failure && \ + !(sdata->flags & IEEE80211_SDATA_IN_DRIVER), \ + "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", \ + sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); \ + !!(sdata->flags & IEEE80211_SDATA_IN_DRIVER); \ }) static inline struct ieee80211_sub_if_data * diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c80e552b6197..865210726d54 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1394,6 +1394,9 @@ struct ieee80211_local { /* device is during a HW reconfig */ bool in_reconfig; + /* reconfiguration failed ... suppress some warnings etc. */ + bool reconfig_failure; + /* wowlan is enabled -- don't reconfig on resume */ bool wowlan; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f1e01f7a9b2e..2e2115af38f5 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1281,6 +1281,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) } if (local->open_count == 0) { + /* here we can consider everything in good order (again) */ + local->reconfig_failure = false; + res = drv_start(local); if (res) goto err_del_bss; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3f24b6e59652..a027b9e24160 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2373,6 +2373,7 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) local->resuming = false; local->suspended = false; local->in_reconfig = false; + local->reconfig_failure = true; ieee80211_flush_completed_scan(local, true); -- cgit v1.2.3 From 4b095281cacab0b62e7f11d6d8c4a6d49ecaae42 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 5 Jun 2023 23:55:25 +0200 Subject: ipv4: Set correct scope in inet_csk_route_*(). RT_CONN_FLAGS(sk) overloads the tos parameter with the RTO_ONLINK bit when sk has the SOCK_LOCALROUTE flag set. This is only useful for ip_route_output_key_hash() to eventually adjust the route scope. Let's drop RTO_ONLINK and set the correct scope directly to avoid this special case in the future and to allow converting ->flowi4_tos to dscp_t. Signed-off-by: Guillaume Nault Reviewed-by: David Ahern Signed-off-by: Jakub Kicinski --- net/ipv4/inet_connection_sock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 1386787eaf1a..15424dec4584 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -792,7 +792,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, opt = rcu_dereference(ireq->ireq_opt); flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, - RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, + ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), sk->sk_protocol, inet_sk_flowi_flags(sk), (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, ireq->ir_rmt_port, @@ -830,7 +830,7 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, fl4 = &newinet->cork.fl.u.ip4; flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, - RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, + ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), sk->sk_protocol, inet_sk_flowi_flags(sk), (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, ireq->ir_rmt_port, -- cgit v1.2.3 From 6f8a76f8022121f7e4dc9cc29da7fb716b7db45f Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 5 Jun 2023 23:55:30 +0200 Subject: tcp: Set route scope properly in cookie_v4_check(). RT_CONN_FLAGS(sk) overloads flowi4_tos with the RTO_ONLINK bit when sk has the SOCK_LOCALROUTE flag set. This allows ip_route_output_key_hash() to eventually adjust flowi4_scope. Instead of relying on special handling of the RTO_ONLINK bit, we can just set the route scope correctly. This will eventually allow to avoid special interpretation of tos variables and to convert ->flowi4_tos to dscp_t. Signed-off-by: Guillaume Nault Reviewed-by: David Ahern Signed-off-by: Jakub Kicinski --- net/ipv4/syncookies.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 26fb97d1d4d9..dc478a0574cb 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -418,8 +418,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) * no easy way to do this. */ flowi4_init_output(&fl4, ireq->ir_iif, ireq->ir_mark, - RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, - inet_sk_flowi_flags(sk), + ip_sock_rt_tos(sk), ip_sock_rt_scope(sk), + IPPROTO_TCP, inet_sk_flowi_flags(sk), opt->srr ? opt->faddr : ireq->ir_rmt_addr, ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid); security_req_classify_flow(req, flowi4_to_flowi_common(&fl4)); -- cgit v1.2.3 From ae28ea5cbdee9956464223b304d6c767238b2506 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 5 Jun 2023 10:40:44 -0400 Subject: tipc: replace open-code bearer rcu_dereference access in bearer.c Replace these open-code bearer rcu_dereference access with bearer_get(), like other places in bearer.c. While at it, also use tipc_net() instead of net_generic(net, tipc_net_id) to get "tn" in bearer.c. Signed-off-by: Xin Long Reviewed-by: Larysa Zaremba Reviewed-by: Tung Nguyen Link: https://lore.kernel.org/r/1072588a8691f970bda950c7e2834d1f2983f58e.1685976044.git.lucien.xin@gmail.com Signed-off-by: Jakub Kicinski --- net/tipc/bearer.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 114140c49108..1d5d3677bdaf 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -176,7 +176,7 @@ static int bearer_name_validate(const char *name, */ struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name) { - struct tipc_net *tn = net_generic(net, tipc_net_id); + struct tipc_net *tn = tipc_net(net); struct tipc_bearer *b; u32 i; @@ -211,11 +211,10 @@ int tipc_bearer_get_name(struct net *net, char *name, u32 bearer_id) void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest) { - struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_bearer *b; rcu_read_lock(); - b = rcu_dereference(tn->bearer_list[bearer_id]); + b = bearer_get(net, bearer_id); if (b) tipc_disc_add_dest(b->disc); rcu_read_unlock(); @@ -223,11 +222,10 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest) void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) { - struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_bearer *b; rcu_read_lock(); - b = rcu_dereference(tn->bearer_list[bearer_id]); + b = bearer_get(net, bearer_id); if (b) tipc_disc_remove_dest(b->disc); rcu_read_unlock(); @@ -534,7 +532,7 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id) struct tipc_bearer *b; rcu_read_lock(); - b = rcu_dereference(tipc_net(net)->bearer_list[bearer_id]); + b = bearer_get(net, bearer_id); if (b) mtu = b->mtu; rcu_read_unlock(); @@ -745,7 +743,7 @@ void tipc_bearer_cleanup(void) void tipc_bearer_stop(struct net *net) { - struct tipc_net *tn = net_generic(net, tipc_net_id); + struct tipc_net *tn = tipc_net(net); struct tipc_bearer *b; u32 i; @@ -881,7 +879,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) struct tipc_bearer *bearer; struct tipc_nl_msg msg; struct net *net = sock_net(skb->sk); - struct tipc_net *tn = net_generic(net, tipc_net_id); + struct tipc_net *tn = tipc_net(net); if (i == MAX_BEARERS) return 0; -- cgit v1.2.3 From 0824a987a58070470351906590a89fa5e0f88ba1 Mon Sep 17 00:00:00 2001 From: David Morley Date: Tue, 6 Jun 2023 18:12:33 +0000 Subject: tcp: fix formatting in sysctl_net_ipv4.c Fix incorrectly formatted tcp_syn_linear_timeouts sysctl in the ipv4_net_table. Fixes: ccce324dabfe ("tcp: make the first N SYN RTO backoffs linear") Signed-off-by: David Morley Signed-off-by: Neal Cardwell Tested-by: David Morley Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/sysctl_net_ipv4.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 6ae3345a3bdf..ef26f9013a0f 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -1472,13 +1472,13 @@ static struct ctl_table ipv4_net_table[] = { .extra2 = &tcp_plb_max_cong_thresh, }, { - .procname = "tcp_syn_linear_timeouts", - .data = &init_net.ipv4.sysctl_tcp_syn_linear_timeouts, - .maxlen = sizeof(u8), - .mode = 0644, - .proc_handler = proc_dou8vec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = &tcp_syn_linear_timeouts_max, + .procname = "tcp_syn_linear_timeouts", + .data = &init_net.ipv4.sysctl_tcp_syn_linear_timeouts, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &tcp_syn_linear_timeouts_max, }, { } }; -- cgit v1.2.3 From e9da6df7492a981b071bafd169fb4c35b45f5ebf Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:20 +0200 Subject: wifi: cfg80211: hold wiphy lock in auto-disconnect Most code paths in cfg80211 already hold the wiphy lock, mostly by virtue of being called from nl80211, so make the auto-disconnect worker also hold it, aligning the locking promises between different parts of cfg80211. Signed-off-by: Johannes Berg --- net/wireless/core.c | 6 ++---- net/wireless/sme.c | 4 +++- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index b3ec9eaec36b..061f7a6dd279 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1178,10 +1178,6 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev, kfree_sensitive(wdev->wext.keys); wdev->wext.keys = NULL; #endif - /* only initialized if we have a netdev */ - if (wdev->netdev) - flush_work(&wdev->disconnect_wk); - cfg80211_cqm_config_free(wdev); /* @@ -1455,6 +1451,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, cfg80211_leave(rdev, wdev); cfg80211_remove_links(wdev); wiphy_unlock(&rdev->wiphy); + /* since we just did cfg80211_leave() nothing to do there */ + cancel_work_sync(&wdev->disconnect_wk); break; case NETDEV_DOWN: wiphy_lock(&rdev->wiphy); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 7bdeb8eea92d..247369004aaa 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -5,7 +5,7 @@ * (for nl80211's connect() and wext) * * Copyright 2009 Johannes Berg - * Copyright (C) 2009, 2020, 2022 Intel Corporation. All rights reserved. + * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved. * Copyright 2017 Intel Deutschland GmbH */ @@ -1569,6 +1569,7 @@ void cfg80211_autodisconnect_wk(struct work_struct *work) container_of(work, struct wireless_dev, disconnect_wk); struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); + wiphy_lock(wdev->wiphy); wdev_lock(wdev); if (wdev->conn_owner_nlportid) { @@ -1607,4 +1608,5 @@ void cfg80211_autodisconnect_wk(struct work_struct *work) } wdev_unlock(wdev); + wiphy_unlock(wdev->wiphy); } -- cgit v1.2.3 From 0dcb84ede5b0a99fc125df2f82ca1f539d41446d Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:21 +0200 Subject: wifi: cfg80211: hold wiphy lock in pmsr work Most code paths in cfg80211 already hold the wiphy lock, mostly by virtue of being called from nl80211, so make the pmsr cleanup worker also hold it, aligning the locking promises between different parts of cfg80211. Signed-off-by: Johannes Berg --- net/wireless/core.c | 3 +-- net/wireless/pmsr.c | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index 061f7a6dd279..2e12b166c6fc 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1145,8 +1145,6 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev, ASSERT_RTNL(); lockdep_assert_held(&rdev->wiphy.mtx); - flush_work(&wdev->pmsr_free_wk); - nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); wdev->registered = false; @@ -1453,6 +1451,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, wiphy_unlock(&rdev->wiphy); /* since we just did cfg80211_leave() nothing to do there */ cancel_work_sync(&wdev->disconnect_wk); + cancel_work_sync(&wdev->pmsr_free_wk); break; case NETDEV_DOWN: wiphy_lock(&rdev->wiphy); diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index 2bc647720cda..77000a264855 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2018 - 2021 Intel Corporation + * Copyright (C) 2018 - 2021, 2023 Intel Corporation */ #include #include "core.h" @@ -623,9 +623,11 @@ void cfg80211_pmsr_free_wk(struct work_struct *work) struct wireless_dev *wdev = container_of(work, struct wireless_dev, pmsr_free_wk); + wiphy_lock(wdev->wiphy); wdev_lock(wdev); cfg80211_pmsr_process_abort(wdev); wdev_unlock(wdev); + wiphy_unlock(wdev->wiphy); } void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) -- cgit v1.2.3 From a993df0f9143e63eca38c96a30daf08db99a98a3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:22 +0200 Subject: wifi: cfg80211: move wowlan disable under locks This is a driver callback, and the driver should be able to assume that it's called with the wiphy lock held. Move the call up so that's true, it has no other effect since the device is already unregistering and we cannot reach this function through other paths. Signed-off-by: Johannes Berg --- net/wireless/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index 2e12b166c6fc..a04b6eeb55f8 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1065,6 +1065,10 @@ void wiphy_unregister(struct wiphy *wiphy) cfg80211_rdev_list_generation++; device_del(&rdev->wiphy.dev); +#ifdef CONFIG_PM + if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) + rdev_set_wakeup(rdev, false); +#endif wiphy_unlock(&rdev->wiphy); rtnl_unlock(); @@ -1080,10 +1084,6 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->mgmt_registrations_update_wk); flush_work(&rdev->background_cac_abort_wk); -#ifdef CONFIG_PM - if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) - rdev_set_wakeup(rdev, false); -#endif cfg80211_rdev_free_wowlan(rdev); cfg80211_rdev_free_coalesce(rdev); } -- cgit v1.2.3 From 7d2d0ff49dfdb38ad37baa9a27c0132d5e51a923 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:23 +0200 Subject: wifi: cfg80211: wext: hold wiphy lock in siwgenie Missed this ioctl since it's in wext-sme.c where we usually get via a front-level ioctl handler in the other files, but it should also hold the wiphy lock to align the locking contract towards the driver. Signed-off-by: Johannes Berg --- net/wireless/wext-sme.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index f231207ca210..f3eaa3388694 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -3,7 +3,7 @@ * cfg80211 wext compat for managed mode. * * Copyright 2009 Johannes Berg - * Copyright (C) 2009, 2020-2022 Intel Corporation + * Copyright (C) 2009, 2020-2023 Intel Corporation */ #include @@ -338,6 +338,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev, if (!ie_len) ie = NULL; + wiphy_lock(wdev->wiphy); wdev_lock(wdev); /* no change */ @@ -370,6 +371,7 @@ int cfg80211_wext_siwgenie(struct net_device *dev, err = 0; out: wdev_unlock(wdev); + wiphy_unlock(wdev->wiphy); return err; } -- cgit v1.2.3 From 4d45145ba6e2e1c0eba4ebda7d6273319191f4e8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:24 +0200 Subject: wifi: cfg80211: hold wiphy lock when sending wiphy Sending the wiphy out might cause calls to the driver, notably get_txq_stats() and get_antenna(). These aren't very important, since the normally have their own locks and/or just send out static data, but if the contract should be that the wiphy lock is always held, these are also affected. Fix that. Signed-off-by: Johannes Berg --- net/wireless/core.c | 8 ++++++++ net/wireless/nl80211.c | 3 +++ 2 files changed, 11 insertions(+) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index a04b6eeb55f8..151a8567d2a5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -129,6 +129,7 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev, int result; ASSERT_RTNL(); + lockdep_assert_wiphy(&rdev->wiphy); /* Ignore nop renames */ if (strcmp(newname, wiphy_name(&rdev->wiphy)) == 0) @@ -195,6 +196,8 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, continue; nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); } + + wiphy_lock(&rdev->wiphy); nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY); wiphy_net_set(&rdev->wiphy, net); @@ -203,6 +206,8 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, WARN_ON(err); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); + wiphy_unlock(&rdev->wiphy); + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; @@ -941,8 +946,10 @@ int wiphy_register(struct wiphy *wiphy) rdev->wiphy.features |= NL80211_FEATURE_SCAN_FLUSH; rtnl_lock(); + wiphy_lock(&rdev->wiphy); res = device_add(&rdev->wiphy.dev); if (res) { + wiphy_unlock(&rdev->wiphy); rtnl_unlock(); return res; } @@ -956,6 +963,7 @@ int wiphy_register(struct wiphy *wiphy) cfg80211_debugfs_rdev_add(rdev); nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY); + wiphy_unlock(&rdev->wiphy); /* set up regulatory info */ wiphy_regulatory_register(wiphy); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 087d60c0f6e4..a52bd739cf84 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3081,6 +3081,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) if (state->filter_wiphy != -1 && state->filter_wiphy != rdev->wiphy_idx) continue; + wiphy_lock(&rdev->wiphy); /* attempt to fit multiple wiphy data chunks into the skb */ do { ret = nl80211_send_wiphy(rdev, NL80211_CMD_NEW_WIPHY, @@ -3107,6 +3108,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) cb->min_dump_alloc < 4096) { cb->min_dump_alloc = 4096; state->split_start = 0; + wiphy_unlock(&rdev->wiphy); rtnl_unlock(); return 1; } @@ -3114,6 +3116,7 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) break; } } while (state->split_start > 0); + wiphy_unlock(&rdev->wiphy); break; } rtnl_unlock(); -- cgit v1.2.3 From a3ee4dc84c4e9d14cb34dad095fd678127aca5b6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:25 +0200 Subject: wifi: cfg80211: add a work abstraction with special semantics Add a work abstraction at the cfg80211 level that will always hold the wiphy_lock() for any work executed and therefore also can be canceled safely (without waiting) while holding that. This improves on what we do now as with the new wiphy works we don't have to worry about locking while cancelling them safely. Also, don't let such works run while the device is suspended, since they'll likely need to interact with the device. Flush them before suspend though. Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 95 ++++++++++++++++++++++++++++++++++++-- net/wireless/core.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++ net/wireless/core.h | 7 +++ net/wireless/sysfs.c | 8 +++- 4 files changed, 227 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9e04f69712b1..1b8619685bf6 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -5724,12 +5724,17 @@ struct cfg80211_cqm_config; * wiphy_lock - lock the wiphy * @wiphy: the wiphy to lock * - * This is mostly exposed so it can be done around registering and - * unregistering netdevs that aren't created through cfg80211 calls, - * since that requires locking in cfg80211 when the notifiers is - * called, but that cannot differentiate which way it's called. + * This is needed around registering and unregistering netdevs that + * aren't created through cfg80211 calls, since that requires locking + * in cfg80211 when the notifiers is called, but that cannot + * differentiate which way it's called. + * + * It can also be used by drivers for their own purposes. * * When cfg80211 ops are called, the wiphy is already locked. + * + * Note that this makes sure that no workers that have been queued + * with wiphy_queue_work() are running. */ static inline void wiphy_lock(struct wiphy *wiphy) __acquires(&wiphy->mtx) @@ -5749,6 +5754,88 @@ static inline void wiphy_unlock(struct wiphy *wiphy) mutex_unlock(&wiphy->mtx); } +struct wiphy_work; +typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *); + +struct wiphy_work { + struct list_head entry; + wiphy_work_func_t func; +}; + +static inline void wiphy_work_init(struct wiphy_work *work, + wiphy_work_func_t func) +{ + INIT_LIST_HEAD(&work->entry); + work->func = func; +} + +/** + * wiphy_work_queue - queue work for the wiphy + * @wiphy: the wiphy to queue for + * @work: the work item + * + * This is useful for work that must be done asynchronously, and work + * queued here has the special property that the wiphy mutex will be + * held as if wiphy_lock() was called, and that it cannot be running + * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can + * use just cancel_work() instead of cancel_work_sync(), it requires + * being in a section protected by wiphy_lock(). + */ +void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work); + +/** + * wiphy_work_cancel - cancel previously queued work + * @wiphy: the wiphy, for debug purposes + * @work: the work to cancel + * + * Cancel the work *without* waiting for it, this assumes being + * called under the wiphy mutex acquired by wiphy_lock(). + */ +void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work); + +struct wiphy_delayed_work { + struct wiphy_work work; + struct wiphy *wiphy; + struct timer_list timer; +}; + +void wiphy_delayed_work_timer(struct timer_list *t); + +static inline void wiphy_delayed_work_init(struct wiphy_delayed_work *dwork, + wiphy_work_func_t func) +{ + timer_setup(&dwork->timer, wiphy_delayed_work_timer, 0); + wiphy_work_init(&dwork->work, func); +} + +/** + * wiphy_delayed_work_queue - queue delayed work for the wiphy + * @wiphy: the wiphy to queue for + * @dwork: the delayable worker + * @delay: number of jiffies to wait before queueing + * + * This is useful for work that must be done asynchronously, and work + * queued here has the special property that the wiphy mutex will be + * held as if wiphy_lock() was called, and that it cannot be running + * after wiphy_lock() was called. Therefore, wiphy_cancel_work() can + * use just cancel_work() instead of cancel_work_sync(), it requires + * being in a section protected by wiphy_lock(). + */ +void wiphy_delayed_work_queue(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork, + unsigned long delay); + +/** + * wiphy_delayed_work_cancel - cancel previously queued delayed work + * @wiphy: the wiphy, for debug purposes + * @dwork: the delayed work to cancel + * + * Cancel the work *without* waiting for it, this assumes being + * called under the wiphy mutex acquired by wiphy_lock(). + */ +void wiphy_delayed_work_cancel(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork); + /** * struct wireless_dev - wireless device state * diff --git a/net/wireless/core.c b/net/wireless/core.c index 151a8567d2a5..ca585e96d535 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -413,6 +413,34 @@ static void cfg80211_propagate_cac_done_wk(struct work_struct *work) rtnl_unlock(); } +static void cfg80211_wiphy_work(struct work_struct *work) +{ + struct cfg80211_registered_device *rdev; + struct wiphy_work *wk; + + rdev = container_of(work, struct cfg80211_registered_device, wiphy_work); + + wiphy_lock(&rdev->wiphy); + if (rdev->suspended) + goto out; + + spin_lock_irq(&rdev->wiphy_work_lock); + wk = list_first_entry_or_null(&rdev->wiphy_work_list, + struct wiphy_work, entry); + if (wk) { + list_del_init(&wk->entry); + if (!list_empty(&rdev->wiphy_work_list)) + schedule_work(work); + spin_unlock_irq(&rdev->wiphy_work_lock); + + wk->func(&rdev->wiphy, wk); + } else { + spin_unlock_irq(&rdev->wiphy_work_lock); + } +out: + wiphy_unlock(&rdev->wiphy); +} + /* exported functions */ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, @@ -538,6 +566,9 @@ use_default_name: return NULL; } + INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); + INIT_LIST_HEAD(&rdev->wiphy_work_list); + spin_lock_init(&rdev->wiphy_work_lock); INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); @@ -1035,6 +1066,31 @@ void wiphy_rfkill_start_polling(struct wiphy *wiphy) } EXPORT_SYMBOL(wiphy_rfkill_start_polling); +void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev) +{ + unsigned int runaway_limit = 100; + unsigned long flags; + + lockdep_assert_held(&rdev->wiphy.mtx); + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + while (!list_empty(&rdev->wiphy_work_list)) { + struct wiphy_work *wk; + + wk = list_first_entry(&rdev->wiphy_work_list, + struct wiphy_work, entry); + list_del_init(&wk->entry); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + + wk->func(&rdev->wiphy, wk); + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + if (WARN_ON(--runaway_limit == 0)) + INIT_LIST_HEAD(&rdev->wiphy_work_list); + } + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); +} + void wiphy_unregister(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); @@ -1077,9 +1133,15 @@ void wiphy_unregister(struct wiphy *wiphy) if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup) rdev_set_wakeup(rdev, false); #endif + + /* surely nothing is reachable now, clean up work */ + cfg80211_process_wiphy_works(rdev); wiphy_unlock(&rdev->wiphy); rtnl_unlock(); + /* this has nothing to do now but make sure it's gone */ + cancel_work_sync(&rdev->wiphy_work); + flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); @@ -1569,6 +1631,66 @@ static struct pernet_operations cfg80211_pernet_ops = { .exit = cfg80211_pernet_exit, }; +void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + unsigned long flags; + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + if (list_empty(&work->entry)) + list_add_tail(&work->entry, &rdev->wiphy_work_list); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + + schedule_work(&rdev->wiphy_work); +} +EXPORT_SYMBOL_GPL(wiphy_work_queue); + +void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + unsigned long flags; + + lockdep_assert_held(&wiphy->mtx); + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + if (!list_empty(&work->entry)) + list_del_init(&work->entry); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); +} +EXPORT_SYMBOL_GPL(wiphy_work_cancel); + +void wiphy_delayed_work_timer(struct timer_list *t) +{ + struct wiphy_delayed_work *dwork = from_timer(dwork, t, timer); + + wiphy_work_queue(dwork->wiphy, &dwork->work); +} +EXPORT_SYMBOL(wiphy_delayed_work_timer); + +void wiphy_delayed_work_queue(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork, + unsigned long delay) +{ + if (!delay) { + wiphy_work_queue(wiphy, &dwork->work); + return; + } + + dwork->wiphy = wiphy; + mod_timer(&dwork->timer, jiffies + delay); +} +EXPORT_SYMBOL_GPL(wiphy_delayed_work_queue); + +void wiphy_delayed_work_cancel(struct wiphy *wiphy, + struct wiphy_delayed_work *dwork) +{ + lockdep_assert_held(&wiphy->mtx); + + del_timer_sync(&dwork->timer); + wiphy_work_cancel(wiphy, &dwork->work); +} +EXPORT_SYMBOL_GPL(wiphy_delayed_work_cancel); + static int __init cfg80211_init(void) { int err; diff --git a/net/wireless/core.h b/net/wireless/core.h index 7c61752f6d83..435060dad81e 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -108,6 +108,12 @@ struct cfg80211_registered_device { /* lock for all wdev lists */ spinlock_t mgmt_registrations_lock; + struct work_struct wiphy_work; + struct list_head wiphy_work_list; + /* protects the list above */ + spinlock_t wiphy_work_lock; + bool suspended; + /* must be last because of the way we do wiphy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wiphy wiphy __aligned(NETDEV_ALIGN); @@ -453,6 +459,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); +void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev); void cfg80211_process_wdev_events(struct wireless_dev *wdev); bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c index 268f670835e9..c629bac3f298 100644 --- a/net/wireless/sysfs.c +++ b/net/wireless/sysfs.c @@ -5,7 +5,7 @@ * * Copyright 2005-2006 Jiri Benc * Copyright 2006 Johannes Berg - * Copyright (C) 2020-2021 Intel Corporation + * Copyright (C) 2020-2021, 2023 Intel Corporation */ #include @@ -105,14 +105,18 @@ static int wiphy_suspend(struct device *dev) cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); } + cfg80211_process_wiphy_works(rdev); if (rdev->ops->suspend) ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config); if (ret == 1) { /* Driver refuse to configure wowlan */ cfg80211_leave_all(rdev); cfg80211_process_rdev_events(rdev); + cfg80211_process_wiphy_works(rdev); ret = rdev_suspend(rdev, NULL); } + if (ret == 0) + rdev->suspended = true; } wiphy_unlock(&rdev->wiphy); rtnl_unlock(); @@ -132,6 +136,8 @@ static int wiphy_resume(struct device *dev) wiphy_lock(&rdev->wiphy); if (rdev->wiphy.registered && rdev->ops->resume) ret = rdev_resume(rdev); + rdev->suspended = false; + schedule_work(&rdev->wiphy_work); wiphy_unlock(&rdev->wiphy); if (ret) -- cgit v1.2.3 From 16114496d684a3df4ce09f7c6b7557a8b2922795 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:26 +0200 Subject: wifi: mac80211: use wiphy work for sdata->work We'll need this later to convert other works that might be cancelled from here, so convert this one first. Signed-off-by: Johannes Berg --- net/mac80211/ibss.c | 8 ++++---- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/iface.c | 10 +++++----- net/mac80211/mesh.c | 10 +++++----- net/mac80211/mesh_hwmp.c | 6 +++--- net/mac80211/mlme.c | 6 +++--- net/mac80211/ocb.c | 6 +++--- net/mac80211/rx.c | 2 +- net/mac80211/scan.c | 2 +- net/mac80211/status.c | 6 +++--- net/mac80211/util.c | 2 +- 11 files changed, 30 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index faa01ee11d32..19017810024b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -743,7 +743,7 @@ static void ieee80211_csa_connection_drop_work(struct work_struct *work) skb_queue_purge(&sdata->skb_queue); /* trigger a scan to find another IBSS network to join */ - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); sdata_unlock(sdata); } @@ -1244,7 +1244,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata, spin_lock(&ifibss->incomplete_lock); list_add(&sta->list, &ifibss->incomplete_stations); spin_unlock(&ifibss->incomplete_lock); - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); } static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata) @@ -1723,7 +1723,7 @@ static void ieee80211_ibss_timer(struct timer_list *t) struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.ibss.timer); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) @@ -1858,7 +1858,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, sdata->deflink.needed_rx_chains = local->rx_chains; sdata->control_port_over_nl80211 = params->control_port_over_nl80211; - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); return 0; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 037040787194..1cefe69706ef 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1061,7 +1061,7 @@ struct ieee80211_sub_if_data { /* used to reconfigure hardware SM PS */ struct work_struct recalc_smps; - struct work_struct work; + struct wiphy_work work; struct sk_buff_head skb_queue; struct sk_buff_head status_queue; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 2e2115af38f5..f820098e6a70 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -43,7 +43,7 @@ * by either the RTNL, the iflist_mtx or RCU. */ -static void ieee80211_iface_work(struct work_struct *work); +static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work); bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) { @@ -614,7 +614,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do RCU_INIT_POINTER(local->p2p_sdata, NULL); fallthrough; default: - cancel_work_sync(&sdata->work); + wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work); /* * When we get here, the interface is marked down. * Free the remaining keys, if there are any @@ -1173,7 +1173,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) skb_queue_head_init(&sdata->skb_queue); skb_queue_head_init(&sdata->status_queue); - INIT_WORK(&sdata->work, ieee80211_iface_work); + wiphy_work_init(&sdata->work, ieee80211_iface_work); return 0; } @@ -1625,7 +1625,7 @@ static void ieee80211_iface_process_status(struct ieee80211_sub_if_data *sdata, } } -static void ieee80211_iface_work(struct work_struct *work) +static void ieee80211_iface_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, work); @@ -1737,7 +1737,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, skb_queue_head_init(&sdata->skb_queue); skb_queue_head_init(&sdata->status_queue); - INIT_WORK(&sdata->work, ieee80211_iface_work); + wiphy_work_init(&sdata->work, ieee80211_iface_work); INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work); INIT_WORK(&sdata->activate_links_work, ieee80211_activate_links_work); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index a4d8764073bf..af8c5fc2db14 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -45,7 +45,7 @@ static void ieee80211_mesh_housekeeping_timer(struct timer_list *t) set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); } /** @@ -703,7 +703,7 @@ static void ieee80211_mesh_path_timer(struct timer_list *t) struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mesh.mesh_path_timer); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } static void ieee80211_mesh_path_root_timer(struct timer_list *t) @@ -714,7 +714,7 @@ static void ieee80211_mesh_path_root_timer(struct timer_list *t) set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) @@ -1177,7 +1177,7 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata, for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE) set_bit(bit, &ifmsh->mbss_changed); set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) @@ -1202,7 +1202,7 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) ifmsh->sync_offset_clockdrift_max = 0; set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); ieee80211_mesh_root_setup(ifmsh); - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); sdata->vif.bss_conf.ht_operation_mode = ifmsh->mshcfg.ht_opmode; sdata->vif.bss_conf.enable_beacon = true; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 5217e1d97dd6..51369072984e 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008, 2009 open80211s Ltd. - * Copyright (C) 2019, 2021-2022 Intel Corporation + * Copyright (C) 2019, 2021-2023 Intel Corporation * Author: Luis Carlos Cobo */ @@ -1026,14 +1026,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) spin_unlock_bh(&ifmsh->mesh_preq_queue_lock); if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); else if (time_before(jiffies, ifmsh->last_preq)) { /* avoid long wait if did not send preqs for a long time * and jiffies wrapped around */ ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } else mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + min_preq_int_jiff(sdata)); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 7ec0ad29b8c9..03251a725d65 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3161,7 +3161,7 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, sdata->u.mgd.probe_send_count = 0; else sdata->u.mgd.nullfunc_failed = true; - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata, @@ -6074,7 +6074,7 @@ static void ieee80211_sta_timer(struct timer_list *t) struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.timer); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, @@ -6218,7 +6218,7 @@ void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, sdata->u.mgd.status_acked = acked; sdata->u.mgd.status_received = true; - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); } void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c index cf205762ab96..b44896e14522 100644 --- a/net/mac80211/ocb.c +++ b/net/mac80211/ocb.c @@ -81,7 +81,7 @@ void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata, spin_lock(&ifocb->incomplete_lock); list_add(&sta->list, &ifocb->incomplete_stations); spin_unlock(&ifocb->incomplete_lock); - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); } static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta) @@ -157,7 +157,7 @@ static void ieee80211_ocb_housekeeping_timer(struct timer_list *t) set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); } void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata) @@ -197,7 +197,7 @@ int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata, ifocb->joined = true; set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags); - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); netif_carrier_on(sdata->dev); return 0; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d996aa2579df..ed9939466198 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -229,7 +229,7 @@ static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, } skb_queue_tail(&sdata->skb_queue, skb); - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); if (sta) sta->deflink.rx_stats.packets++; } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 32fa8aca7005..ea5383136fff 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -502,7 +502,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) */ list_for_each_entry_rcu(sdata, &local->interfaces, list) { if (ieee80211_sdata_running(sdata)) - ieee80211_queue_work(&sdata->local->hw, &sdata->work); + wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); } if (was_scanning) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 2b13a52ce96c..44d83da60aee 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2008-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2021-2022 Intel Corporation + * Copyright 2021-2023 Intel Corporation */ #include @@ -747,8 +747,8 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local, if (qskb) { skb_queue_tail(&sdata->status_queue, qskb); - ieee80211_queue_work(&local->hw, - &sdata->work); + wiphy_work_queue(local->hw.wiphy, + &sdata->work); } } } else { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 4f29c307b3f6..083ad56d08d9 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2942,7 +2942,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) /* Requeue all works */ list_for_each_entry(sdata, &local->interfaces, list) - ieee80211_queue_work(&local->hw, &sdata->work); + wiphy_work_queue(local->hw.wiphy, &sdata->work); } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, -- cgit v1.2.3 From a3df43b16fc49213ab4b925711d4725e1e2f2305 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:27 +0200 Subject: wifi: mac80211: unregister netdevs through cfg80211 Since we want to have wiphy_lock() for the unregistration in the future, unregister also netdevs via cfg80211 now to be able to hold the wiphy_lock() for it. Signed-off-by: Johannes Berg --- net/mac80211/iface.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f820098e6a70..5b67b44e3f89 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -2258,7 +2258,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; LIST_HEAD(unreg_list); - LIST_HEAD(wdev_list); ASSERT_RTNL(); @@ -2281,23 +2280,18 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) ieee80211_txq_teardown_flows(local); mutex_lock(&local->iflist_mtx); - list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { - list_del(&sdata->list); - - if (sdata->dev) - unregister_netdevice_queue(sdata->dev, &unreg_list); - else - list_add(&sdata->list, &wdev_list); - } + list_splice_init(&local->interfaces, &unreg_list); mutex_unlock(&local->iflist_mtx); - unregister_netdevice_many(&unreg_list); - wiphy_lock(local->hw.wiphy); - list_for_each_entry_safe(sdata, tmp, &wdev_list, list) { + list_for_each_entry_safe(sdata, tmp, &unreg_list, list) { + bool netdev = sdata->dev; + list_del(&sdata->list); cfg80211_unregister_wdev(&sdata->wdev); - kfree(sdata); + + if (!netdev) + kfree(sdata); } wiphy_unlock(local->hw.wiphy); } -- cgit v1.2.3 From 1444f58931ef5227532cf5436bb55c1dd511d9a2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:28 +0200 Subject: wifi: mac80211: use wiphy work for SMPS SMPS requests are per link, and currently there's a potential deadlock with canceling. Use the new wiphy work to handle SMPS instead, so that the cancel cannot deadlock. Signed-off-by: Johannes Berg --- net/mac80211/ht.c | 5 +++-- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/mlme.c | 10 ++++++---- net/mac80211/tdls.c | 4 ++-- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 5315ab750280..33729870ad8a 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright 2017 Intel Deutschland GmbH - * Copyright(c) 2020-2022 Intel Corporation + * Copyright(c) 2020-2023 Intel Corporation */ #include @@ -602,7 +602,8 @@ void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id, goto out; link->u.mgd.driver_smps_mode = smps_mode; - ieee80211_queue_work(&sdata->local->hw, &link->u.mgd.request_smps_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &link->u.mgd.request_smps_work); out: rcu_read_unlock(); } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1cefe69706ef..0ecb725f5307 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -921,7 +921,7 @@ struct ieee80211_link_data_managed { struct timer_list chswitch_timer; struct work_struct chswitch_work; - struct work_struct request_smps_work; + struct wiphy_work request_smps_work; bool beacon_crc_valid; u32 beacon_crc; struct ewma_beacon_signal ave_beacon_signal; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 03251a725d65..c4a1bf6b6fb4 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -6538,7 +6538,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) sdata_unlock(sdata); } -static void ieee80211_request_smps_mgd_work(struct work_struct *work) +static void ieee80211_request_smps_mgd_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, @@ -6588,8 +6589,8 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link) link->u.mgd.conn_flags = 0; link->conf->bssid = link->u.mgd.bssid; - INIT_WORK(&link->u.mgd.request_smps_work, - ieee80211_request_smps_mgd_work); + wiphy_work_init(&link->u.mgd.request_smps_work, + ieee80211_request_smps_mgd_work); if (local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) link->u.mgd.req_smps = IEEE80211_SMPS_AUTOMATIC; else @@ -7552,7 +7553,8 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, void ieee80211_mgd_stop_link(struct ieee80211_link_data *link) { - cancel_work_sync(&link->u.mgd.request_smps_work); + wiphy_work_cancel(link->sdata->local->hw.wiphy, + &link->u.mgd.request_smps_work); cancel_work_sync(&link->u.mgd.chswitch_work); } diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index b255f3b5bf01..52c47674a554 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -1431,8 +1431,8 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, } if (ret == 0) - ieee80211_queue_work(&sdata->local->hw, - &sdata->deflink.u.mgd.request_smps_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &sdata->deflink.u.mgd.request_smps_work); mutex_unlock(&local->mtx); sdata_unlock(sdata); -- cgit v1.2.3 From ec3252bff7b60fd2ee1a51a11054c54d63435ed2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:29 +0200 Subject: wifi: mac80211: use wiphy work for channel switch Channel switch obviously must be handled per link, and we have a (potential) deadlock when canceling that work. Use the new delayed wiphy work to handle this instead and get rid of the explicit timer that way too. Signed-off-by: Johannes Berg --- net/mac80211/chan.c | 4 ++-- net/mac80211/ieee80211_i.h | 3 +-- net/mac80211/mlme.c | 40 +++++++++++++++++----------------------- 3 files changed, 20 insertions(+), 27 deletions(-) (limited to 'net') diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 4d50e95dd75f..168bf3edd4b4 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1205,8 +1205,8 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link) &link->csa_finalize_work); break; case NL80211_IFTYPE_STATION: - ieee80211_queue_work(&sdata->local->hw, - &link->u.mgd.chswitch_work); + wiphy_delayed_work_queue(sdata->local->hw.wiphy, + &link->u.mgd.chswitch_work, 0); break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_AP_VLAN: diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 0ecb725f5307..b39a923cdd0e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -918,8 +918,7 @@ struct ieee80211_link_data_managed { bool csa_waiting_bcn; bool csa_ignored_same_chan; - struct timer_list chswitch_timer; - struct work_struct chswitch_work; + struct wiphy_delayed_work chswitch_work; struct wiphy_work request_smps_work; bool beacon_crc_valid; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c4a1bf6b6fb4..0e68ede2a9f3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1679,10 +1679,12 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, } /* spectrum management related things */ -static void ieee80211_chswitch_work(struct work_struct *work) +static void ieee80211_chswitch_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_link_data *link = - container_of(work, struct ieee80211_link_data, u.mgd.chswitch_work); + container_of(work, struct ieee80211_link_data, + u.mgd.chswitch_work.work); struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -1802,21 +1804,13 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) ieee80211_queue_work(&sdata->local->hw, &ifmgd->csa_connection_drop_work); } else { - ieee80211_queue_work(&sdata->local->hw, - &sdata->deflink.u.mgd.chswitch_work); + wiphy_delayed_work_queue(sdata->local->hw.wiphy, + &sdata->deflink.u.mgd.chswitch_work, + 0); } } EXPORT_SYMBOL(ieee80211_chswitch_done); -static void ieee80211_chswitch_timer(struct timer_list *t) -{ - struct ieee80211_link_data *link = - from_timer(link, t, u.mgd.chswitch_timer); - - ieee80211_queue_work(&link->sdata->local->hw, - &link->u.mgd.chswitch_work); -} - static void ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link) { @@ -1860,6 +1854,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, struct ieee80211_csa_ie csa_ie; struct ieee80211_channel_switch ch_switch; struct ieee80211_bss *bss; + unsigned long timeout; int res; sdata_assert_lock(sdata); @@ -2003,12 +1998,11 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, } /* channel switch handled in software */ - if (csa_ie.count <= 1) - ieee80211_queue_work(&local->hw, &link->u.mgd.chswitch_work); - else - mod_timer(&link->u.mgd.chswitch_timer, - TU_TO_EXP_TIME((csa_ie.count - 1) * - cbss->beacon_interval)); + timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) * + cbss->beacon_interval); + wiphy_delayed_work_queue(local->hw.wiphy, + &link->u.mgd.chswitch_work, + timeout); return; lock_and_drop_connection: mutex_lock(&local->mtx); @@ -3030,7 +3024,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.conn_mon_timer); del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); - del_timer_sync(&sdata->deflink.u.mgd.chswitch_timer); sdata->vif.bss_conf.dtim_period = 0; sdata->vif.bss_conf.beacon_rate = NULL; @@ -6596,8 +6589,8 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link) else link->u.mgd.req_smps = IEEE80211_SMPS_OFF; - INIT_WORK(&link->u.mgd.chswitch_work, ieee80211_chswitch_work); - timer_setup(&link->u.mgd.chswitch_timer, ieee80211_chswitch_timer, 0); + wiphy_delayed_work_init(&link->u.mgd.chswitch_work, + ieee80211_chswitch_work); if (sdata->u.mgd.assoc_data) ether_addr_copy(link->conf->addr, @@ -7555,7 +7548,8 @@ void ieee80211_mgd_stop_link(struct ieee80211_link_data *link) { wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->u.mgd.request_smps_work); - cancel_work_sync(&link->u.mgd.chswitch_work); + wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, + &link->u.mgd.chswitch_work); } void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) -- cgit v1.2.3 From 87351d09261308472ddb6d700e182c34db4fce7a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:30 +0200 Subject: wifi: mac80211: ibss: move disconnect to wiphy work Move the IBSS disconnect work to be a wiphy work. Signed-off-by: Johannes Berg --- net/mac80211/ibss.c | 14 ++++++++------ net/mac80211/ieee80211_i.h | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 19017810024b..e1900077bc4b 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -561,7 +561,8 @@ void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; - cancel_work_sync(&ifibss->csa_connection_drop_work); + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifibss->csa_connection_drop_work); } static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta) @@ -730,7 +731,8 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata) mutex_unlock(&local->mtx); } -static void ieee80211_csa_connection_drop_work(struct work_struct *work) +static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, @@ -896,8 +898,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, return true; disconnect: ibss_dbg(sdata, "Can't handle channel switch, disconnect\n"); - ieee80211_queue_work(&sdata->local->hw, - &ifibss->csa_connection_drop_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifibss->csa_connection_drop_work); ieee80211_ibss_csa_mark_radar(sdata); @@ -1733,8 +1735,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0); INIT_LIST_HEAD(&ifibss->incomplete_stations); spin_lock_init(&ifibss->incomplete_lock); - INIT_WORK(&ifibss->csa_connection_drop_work, - ieee80211_csa_connection_drop_work); + wiphy_work_init(&ifibss->csa_connection_drop_work, + ieee80211_csa_connection_drop_work); } /* scan finished notification */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b39a923cdd0e..78ac71b66fce 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -553,7 +553,7 @@ struct ieee80211_if_managed { struct ieee80211_if_ibss { struct timer_list timer; - struct work_struct csa_connection_drop_work; + struct wiphy_work csa_connection_drop_work; unsigned long last_scan_completed; -- cgit v1.2.3 From 4b8d43f1137cb9f6e8bb3b77251ad9429ab7ef34 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:31 +0200 Subject: wifi: mac80211: mlme: move disconnects to wiphy work Move the beacon loss work that might cause a disconnect and the CSA disconnect work to be wiphy work, so we hold the wiphy lock for them. Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 2 +- net/mac80211/ieee80211_i.h | 4 ++-- net/mac80211/main.c | 3 ++- net/mac80211/mlme.c | 49 +++++++++++++++++++++++++--------------------- 4 files changed, 32 insertions(+), 26 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index cf090abda400..1b78a2ae7a83 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3590,7 +3590,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t sdata->deflink.csa_block_tx = block_tx; sdata_info(sdata, "channel switch failed, disconnecting\n"); - ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); + wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work); } EXPORT_SYMBOL(ieee80211_channel_switch_disconnect); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 78ac71b66fce..f918e73469a7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -466,8 +466,8 @@ struct ieee80211_if_managed { struct timer_list conn_mon_timer; struct timer_list bcn_mon_timer; struct work_struct monitor_work; - struct work_struct beacon_connection_loss_work; - struct work_struct csa_connection_drop_work; + struct wiphy_work beacon_connection_loss_work; + struct wiphy_work csa_connection_drop_work; unsigned long beacon_timeout; unsigned long probe_timeout; diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 3db178b1d50c..24315d7b3126 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -364,7 +364,8 @@ static void ieee80211_restart_work(struct work_struct *work) * The exception is ieee80211_chswitch_done. * Then we can have a race... */ - cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work); + wiphy_work_cancel(local->hw.wiphy, + &sdata->u.mgd.csa_connection_drop_work); if (sdata->vif.bss_conf.csa_active) { sdata_lock(sdata); ieee80211_sta_connection_lost(sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 0e68ede2a9f3..3827b5dcdb03 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1724,8 +1724,8 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy, sdata_info(sdata, "failed to use reserved channel context, disconnecting (err=%d)\n", ret); - ieee80211_queue_work(&sdata->local->hw, - &ifmgd->csa_connection_drop_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); goto out; } @@ -1736,8 +1736,8 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy, &link->csa_chandef)) { sdata_info(sdata, "failed to finalize channel switch, disconnecting\n"); - ieee80211_queue_work(&sdata->local->hw, - &ifmgd->csa_connection_drop_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); goto out; } @@ -1781,8 +1781,8 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link) if (ret) { sdata_info(sdata, "driver post channel switch failed, disconnecting\n"); - ieee80211_queue_work(&local->hw, - &ifmgd->csa_connection_drop_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); return; } @@ -1801,8 +1801,8 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) if (!success) { sdata_info(sdata, "driver channel switch failed, disconnecting\n"); - ieee80211_queue_work(&sdata->local->hw, - &ifmgd->csa_connection_drop_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); } else { wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->deflink.u.mgd.chswitch_work, @@ -2018,7 +2018,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, link->conf->csa_active = true; link->csa_block_tx = csa_ie.mode; - ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); mutex_unlock(&local->chanctx_mtx); mutex_unlock(&local->mtx); } @@ -3415,7 +3416,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) sdata_unlock(sdata); } -static void ieee80211_beacon_connection_loss_work(struct work_struct *work) +static void ieee80211_beacon_connection_loss_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, @@ -3440,7 +3442,8 @@ static void ieee80211_beacon_connection_loss_work(struct work_struct *work) } } -static void ieee80211_csa_connection_drop_work(struct work_struct *work) +static void ieee80211_csa_connection_drop_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, @@ -3457,7 +3460,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif) trace_api_beacon_loss(sdata); sdata->u.mgd.connection_loss = false; - ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); + wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_beacon_loss); @@ -3469,7 +3472,7 @@ void ieee80211_connection_loss(struct ieee80211_vif *vif) trace_api_connection_loss(sdata); sdata->u.mgd.connection_loss = true; - ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); + wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_connection_loss); @@ -3485,7 +3488,7 @@ void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect) sdata->u.mgd.driver_disconnect = true; sdata->u.mgd.reconnect = reconnect; - ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work); + wiphy_work_queue(hw->wiphy, &sdata->u.mgd.beacon_connection_loss_work); } EXPORT_SYMBOL(ieee80211_disconnect); @@ -6374,8 +6377,8 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) return; sdata->u.mgd.connection_loss = false; - ieee80211_queue_work(&sdata->local->hw, - &sdata->u.mgd.beacon_connection_loss_work); + wiphy_work_queue(sdata->local->hw.wiphy, + &sdata->u.mgd.beacon_connection_loss_work); } static void ieee80211_sta_conn_mon_timer(struct timer_list *t) @@ -6550,10 +6553,10 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); - INIT_WORK(&ifmgd->beacon_connection_loss_work, - ieee80211_beacon_connection_loss_work); - INIT_WORK(&ifmgd->csa_connection_drop_work, - ieee80211_csa_connection_drop_work); + wiphy_work_init(&ifmgd->beacon_connection_loss_work, + ieee80211_beacon_connection_loss_work); + wiphy_work_init(&ifmgd->csa_connection_drop_work, + ieee80211_csa_connection_drop_work); INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work, ieee80211_tdls_peer_del_work); timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0); @@ -7562,8 +7565,10 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) * cancelled when disconnecting. */ cancel_work_sync(&ifmgd->monitor_work); - cancel_work_sync(&ifmgd->beacon_connection_loss_work); - cancel_work_sync(&ifmgd->csa_connection_drop_work); + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->beacon_connection_loss_work); + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work); sdata_lock(sdata); -- cgit v1.2.3 From c88d7178229b7b9482ab4cc0b781aef0f20c3dfb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:32 +0200 Subject: wifi: cfg80211: move sched scan stop to wiphy work This work can now trivially be converted, it behaves identical either way. Signed-off-by: Johannes Berg --- net/wireless/core.c | 8 +++----- net/wireless/core.h | 2 +- net/wireless/nl80211.c | 3 ++- 3 files changed, 6 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index ca585e96d535..f59624f008fb 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -365,7 +365,8 @@ static void cfg80211_destroy_iface_wk(struct work_struct *work) rtnl_unlock(); } -static void cfg80211_sched_scan_stop_wk(struct work_struct *work) +static void cfg80211_sched_scan_stop_wk(struct wiphy *wiphy, + struct wiphy_work *work) { struct cfg80211_registered_device *rdev; struct cfg80211_sched_scan_request *req, *tmp; @@ -373,12 +374,10 @@ static void cfg80211_sched_scan_stop_wk(struct work_struct *work) rdev = container_of(work, struct cfg80211_registered_device, sched_scan_stop_wk); - wiphy_lock(&rdev->wiphy); list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) { if (req->nl_owner_dead) cfg80211_stop_sched_scan_req(rdev, req, false); } - wiphy_unlock(&rdev->wiphy); } static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) @@ -541,7 +540,7 @@ use_default_name: device_enable_async_suspend(&rdev->wiphy.dev); INIT_WORK(&rdev->destroy_work, cfg80211_destroy_iface_wk); - INIT_WORK(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk); + wiphy_work_init(&rdev->sched_scan_stop_wk, cfg80211_sched_scan_stop_wk); INIT_WORK(&rdev->sched_scan_res_wk, cfg80211_sched_scan_results_wk); INIT_WORK(&rdev->propagate_radar_detect_wk, cfg80211_propagate_radar_detect_wk); @@ -1148,7 +1147,6 @@ void wiphy_unregister(struct wiphy *wiphy) cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); cancel_delayed_work_sync(&rdev->background_cac_done_wk); flush_work(&rdev->destroy_work); - flush_work(&rdev->sched_scan_stop_wk); flush_work(&rdev->propagate_radar_detect_wk); flush_work(&rdev->propagate_cac_done_wk); flush_work(&rdev->mgmt_registrations_update_wk); diff --git a/net/wireless/core.h b/net/wireless/core.h index 435060dad81e..468957a0be24 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -95,7 +95,7 @@ struct cfg80211_registered_device { struct cfg80211_coalesce *coalesce; struct work_struct destroy_work; - struct work_struct sched_scan_stop_wk; + struct wiphy_work sched_scan_stop_wk; struct work_struct sched_scan_res_wk; struct cfg80211_chan_def radar_chandef; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index a52bd739cf84..772671b9bc42 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -19777,7 +19777,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb, list) { if (sched_scan_req->owner_nlportid == notify->portid) { sched_scan_req->nl_owner_dead = true; - schedule_work(&rdev->sched_scan_stop_wk); + wiphy_work_queue(&rdev->wiphy, + &rdev->sched_scan_stop_wk); } } -- cgit v1.2.3 From fe0af9fe54d0ff53aa49eef390c8962355b274e2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2023 14:49:33 +0200 Subject: wifi: cfg80211: move scan done work to wiphy work Move the scan done work to the new wiphy work to simplify the code a bit. Signed-off-by: Johannes Berg --- net/wireless/core.c | 3 +-- net/wireless/core.h | 4 ++-- net/wireless/scan.c | 14 ++++---------- 3 files changed, 7 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/wireless/core.c b/net/wireless/core.c index f59624f008fb..fc449bea39a1 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -527,7 +527,7 @@ use_default_name: spin_lock_init(&rdev->bss_lock); INIT_LIST_HEAD(&rdev->bss_list); INIT_LIST_HEAD(&rdev->sched_scan_req_list); - INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); + wiphy_work_init(&rdev->scan_done_wk, __cfg80211_scan_done); INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk, cfg80211_dfs_channels_update_work); #ifdef CONFIG_CFG80211_WEXT @@ -1141,7 +1141,6 @@ void wiphy_unregister(struct wiphy *wiphy) /* this has nothing to do now but make sure it's gone */ cancel_work_sync(&rdev->wiphy_work); - flush_work(&rdev->scan_done_wk); cancel_work_sync(&rdev->conn_work); flush_work(&rdev->event_work); cancel_delayed_work_sync(&rdev->dfs_update_channels_wk); diff --git a/net/wireless/core.h b/net/wireless/core.h index 468957a0be24..291c6d83d56f 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -75,7 +75,7 @@ struct cfg80211_registered_device { struct sk_buff *scan_msg; struct list_head sched_scan_req_list; time64_t suspend_at; - struct work_struct scan_done_wk; + struct wiphy_work scan_done_wk; struct genl_info *cur_cmd_info; @@ -441,7 +441,7 @@ bool cfg80211_valid_key_idx(struct cfg80211_registered_device *rdev, int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev, struct key_params *params, int key_idx, bool pairwise, const u8 *mac_addr); -void __cfg80211_scan_done(struct work_struct *wk); +void __cfg80211_scan_done(struct wiphy *wiphy, struct wiphy_work *wk); void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message); void cfg80211_add_sched_scan_req(struct cfg80211_registered_device *rdev, diff --git a/net/wireless/scan.c b/net/wireless/scan.c index c501db7bbdb3..ce2104dc05c6 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1004,16 +1004,9 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, nl80211_send_scan_msg(rdev, msg); } -void __cfg80211_scan_done(struct work_struct *wk) +void __cfg80211_scan_done(struct wiphy *wiphy, struct wiphy_work *wk) { - struct cfg80211_registered_device *rdev; - - rdev = container_of(wk, struct cfg80211_registered_device, - scan_done_wk); - - wiphy_lock(&rdev->wiphy); - ___cfg80211_scan_done(rdev, true); - wiphy_unlock(&rdev->wiphy); + ___cfg80211_scan_done(wiphy_to_rdev(wiphy), true); } void cfg80211_scan_done(struct cfg80211_scan_request *request, @@ -1039,7 +1032,8 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, } request->notified = true; - queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk); + wiphy_work_queue(request->wiphy, + &wiphy_to_rdev(request->wiphy)->scan_done_wk); } EXPORT_SYMBOL(cfg80211_scan_done); -- cgit v1.2.3 From 55b24334c0f2db55ca059848121939f59133bdb6 Mon Sep 17 00:00:00 2001 From: Justin Chen Date: Wed, 7 Jun 2023 16:14:11 -0700 Subject: ethtool: ioctl: improve error checking for set_wol The netlink version of set_wol checks for not supported wolopts and avoids setting wol when the correct wolopt is already set. If we do the same with the ioctl version then we can remove these checks from the driver layer. Reviewed-by: Simon Horman Signed-off-by: Justin Chen Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/1686179653-29750-1-git-send-email-justin.chen@broadcom.com Signed-off-by: Jakub Kicinski --- net/ethtool/ioctl.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 6bb778e10461..37b582225854 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1436,15 +1436,25 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) { - struct ethtool_wolinfo wol; + struct ethtool_wolinfo wol, cur_wol; int ret; - if (!dev->ethtool_ops->set_wol) + if (!dev->ethtool_ops->get_wol || !dev->ethtool_ops->set_wol) return -EOPNOTSUPP; + memset(&cur_wol, 0, sizeof(struct ethtool_wolinfo)); + cur_wol.cmd = ETHTOOL_GWOL; + dev->ethtool_ops->get_wol(dev, &cur_wol); + if (copy_from_user(&wol, useraddr, sizeof(wol))) return -EFAULT; + if (wol.wolopts & ~cur_wol.supported) + return -EINVAL; + + if (wol.wolopts == cur_wol.wolopts) + return 0; + ret = dev->ethtool_ops->set_wol(dev, &wol); if (ret) return ret; -- cgit v1.2.3 From 736013292e3ca5ec2aabb32daf72a73b1256ac57 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 7 Jun 2023 21:41:13 +0000 Subject: tcp: let tcp_mtu_probe() build headless packets tcp_mtu_probe() is still copying payload from skbs in the write queue, using skb_copy_bits(), ignoring potential errors. Modern TCP stack wants to only deal with payload found in page frags, as this is a prereq for TCPDirect (host stack might not have access to the payload) Signed-off-by: Eric Dumazet Link: https://lore.kernel.org/r/20230607214113.1992947-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_output.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cfe128b81a01..f8ce77ce7c3e 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2319,6 +2319,57 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len) return true; } +static int tcp_clone_payload(struct sock *sk, struct sk_buff *to, + int probe_size) +{ + skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; + int i, todo, len = 0, nr_frags = 0; + const struct sk_buff *skb; + + if (!sk_wmem_schedule(sk, to->truesize + probe_size)) + return -ENOMEM; + + skb_queue_walk(&sk->sk_write_queue, skb) { + const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; + + if (skb_headlen(skb)) + return -EINVAL; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) { + if (len >= probe_size) + goto commit; + todo = min_t(int, skb_frag_size(fragfrom), + probe_size - len); + len += todo; + if (lastfrag && + skb_frag_page(fragfrom) == skb_frag_page(lastfrag) && + skb_frag_off(fragfrom) == skb_frag_off(lastfrag) + + skb_frag_size(lastfrag)) { + skb_frag_size_add(lastfrag, todo); + continue; + } + if (unlikely(nr_frags == MAX_SKB_FRAGS)) + return -E2BIG; + skb_frag_page_copy(fragto, fragfrom); + skb_frag_off_copy(fragto, fragfrom); + skb_frag_size_set(fragto, todo); + nr_frags++; + lastfrag = fragto++; + } + } +commit: + WARN_ON_ONCE(len != probe_size); + for (i = 0; i < nr_frags; i++) + skb_frag_ref(to, i); + + skb_shinfo(to)->nr_frags = nr_frags; + to->truesize += probe_size; + to->len += probe_size; + to->data_len += probe_size; + __skb_header_release(to); + return 0; +} + /* Create a new MTU probe if we are ready. * MTU probe is regularly attempting to increase the path MTU by * deliberately sending larger packets. This discovers routing @@ -2395,9 +2446,15 @@ static int tcp_mtu_probe(struct sock *sk) return -1; /* We're allowed to probe. Build it now. */ - nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); + nskb = tcp_stream_alloc_skb(sk, 0, GFP_ATOMIC, false); if (!nskb) return -1; + + /* build the payload, and be prepared to abort if this fails. */ + if (tcp_clone_payload(sk, nskb, probe_size)) { + consume_skb(nskb); + return -1; + } sk_wmem_queued_add(sk, nskb->truesize); sk_mem_charge(sk, nskb->truesize); @@ -2415,7 +2472,6 @@ static int tcp_mtu_probe(struct sock *sk) len = 0; tcp_for_write_queue_from_safe(skb, next, sk) { copy = min_t(int, skb->len, probe_size - len); - skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); if (skb->len <= copy) { /* We've eaten all the data from this skb. -- cgit v1.2.3 From 81840b3b91aad06053ad2712f3da5d0448eeb0e8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:08 +0100 Subject: tls: Allow MSG_SPLICE_PAGES but treat it as normal sendmsg Allow MSG_SPLICE_PAGES to be specified to sendmsg() but treat it as normal sendmsg for now. This means the data will just be copied until MSG_SPLICE_PAGES is handled. Signed-off-by: David Howells cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls_device.c | 3 ++- net/tls/tls_sw.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a959572a816f..9ef766e41c7a 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -447,7 +447,8 @@ static int tls_push_data(struct sock *sk, long timeo; if (flags & - ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST)) + ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST | + MSG_SPLICE_PAGES)) return -EOPNOTSUPP; if (unlikely(sk->sk_err)) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 1a53c8f481e9..38acc27a0dd0 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -955,7 +955,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int pending; if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | - MSG_CMSG_COMPAT)) + MSG_CMSG_COMPAT | MSG_SPLICE_PAGES)) return -EOPNOTSUPP; ret = mutex_lock_interruptible(&tls_ctx->tx_lock); -- cgit v1.2.3 From 2dc334f1a63a8839b88483a3e73c0f27c9c1791c Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:09 +0100 Subject: splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage() Replace generic_splice_sendpage() + splice_from_pipe + pipe_to_sendpage() with a net-specific handler, splice_to_socket(), that calls sendmsg() with MSG_SPLICE_PAGES set instead of calling ->sendpage(). MSG_MORE is used to indicate if the sendmsg() is expected to be followed with more data. This allows multiple pipe-buffer pages to be passed in a single call in a BVEC iterator, allowing the processing to be pushed down to a loop in the protocol driver. This helps pave the way for passing multipage folios down too. Protocols that haven't been converted to handle MSG_SPLICE_PAGES yet should just ignore it and do a normal sendmsg() for now - although that may be a bit slower as it may copy everything. Signed-off-by: David Howells Reviewed-by: Jakub Kicinski cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- fs/splice.c | 158 +++++++++++++++++++++++++++++++++++++++---------- include/linux/fs.h | 2 - include/linux/splice.h | 2 + net/socket.c | 26 +------- 4 files changed, 131 insertions(+), 57 deletions(-) (limited to 'net') diff --git a/fs/splice.c b/fs/splice.c index 3e06611d19ae..e337630aed64 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -448,30 +449,6 @@ const struct pipe_buf_operations nosteal_pipe_buf_ops = { }; EXPORT_SYMBOL(nosteal_pipe_buf_ops); -/* - * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' - * using sendpage(). Return the number of bytes sent. - */ -static int pipe_to_sendpage(struct pipe_inode_info *pipe, - struct pipe_buffer *buf, struct splice_desc *sd) -{ - struct file *file = sd->u.file; - loff_t pos = sd->pos; - int more; - - if (!likely(file->f_op->sendpage)) - return -EINVAL; - - more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; - - if (sd->len < sd->total_len && - pipe_occupancy(pipe->head, pipe->tail) > 1) - more |= MSG_SENDPAGE_NOTLAST; - - return file->f_op->sendpage(file, buf->page, buf->offset, - sd->len, &pos, more); -} - static void wakeup_pipe_writers(struct pipe_inode_info *pipe) { smp_mb(); @@ -652,7 +629,7 @@ static void splice_from_pipe_end(struct pipe_inode_info *pipe, struct splice_des * Description: * This function does little more than loop over the pipe and call * @actor to do the actual moving of a single struct pipe_buffer to - * the desired destination. See pipe_to_file, pipe_to_sendpage, or + * the desired destination. See pipe_to_file, pipe_to_sendmsg, or * pipe_to_user. * */ @@ -833,8 +810,9 @@ done: EXPORT_SYMBOL(iter_file_splice_write); +#ifdef CONFIG_NET /** - * generic_splice_sendpage - splice data from a pipe to a socket + * splice_to_socket - splice data from a pipe to a socket * @pipe: pipe to splice from * @out: socket to write to * @ppos: position in @out @@ -846,13 +824,131 @@ EXPORT_SYMBOL(iter_file_splice_write); * is involved. * */ -ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out, - loff_t *ppos, size_t len, unsigned int flags) +ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags) { - return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage); -} + struct socket *sock = sock_from_file(out); + struct bio_vec bvec[16]; + struct msghdr msg = {}; + ssize_t ret = 0; + size_t spliced = 0; + bool need_wakeup = false; + + pipe_lock(pipe); + + while (len > 0) { + unsigned int head, tail, mask, bc = 0; + size_t remain = len; + + /* + * Check for signal early to make process killable when there + * are always buffers available + */ + ret = -ERESTARTSYS; + if (signal_pending(current)) + break; -EXPORT_SYMBOL(generic_splice_sendpage); + while (pipe_empty(pipe->head, pipe->tail)) { + ret = 0; + if (!pipe->writers) + goto out; + + if (spliced) + goto out; + + ret = -EAGAIN; + if (flags & SPLICE_F_NONBLOCK) + goto out; + + ret = -ERESTARTSYS; + if (signal_pending(current)) + goto out; + + if (need_wakeup) { + wakeup_pipe_writers(pipe); + need_wakeup = false; + } + + pipe_wait_readable(pipe); + } + + head = pipe->head; + tail = pipe->tail; + mask = pipe->ring_size - 1; + + while (!pipe_empty(head, tail)) { + struct pipe_buffer *buf = &pipe->bufs[tail & mask]; + size_t seg; + + if (!buf->len) { + tail++; + continue; + } + + seg = min_t(size_t, remain, buf->len); + seg = min_t(size_t, seg, PAGE_SIZE); + + ret = pipe_buf_confirm(pipe, buf); + if (unlikely(ret)) { + if (ret == -ENODATA) + ret = 0; + break; + } + + bvec_set_page(&bvec[bc++], buf->page, seg, buf->offset); + remain -= seg; + if (seg >= buf->len) + tail++; + if (bc >= ARRAY_SIZE(bvec)) + break; + } + + if (!bc) + break; + + msg.msg_flags = MSG_SPLICE_PAGES; + if (flags & SPLICE_F_MORE) + msg.msg_flags |= MSG_MORE; + if (remain && pipe_occupancy(pipe->head, tail) > 0) + msg.msg_flags |= MSG_MORE; + + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, bc, + len - remain); + ret = sock_sendmsg(sock, &msg); + if (ret <= 0) + break; + + spliced += ret; + len -= ret; + tail = pipe->tail; + while (ret > 0) { + struct pipe_buffer *buf = &pipe->bufs[tail & mask]; + size_t seg = min_t(size_t, ret, buf->len); + + buf->offset += seg; + buf->len -= seg; + ret -= seg; + + if (!buf->len) { + pipe_buf_release(pipe, buf); + tail++; + } + } + + if (tail != pipe->tail) { + pipe->tail = tail; + if (pipe->files) + need_wakeup = true; + } + } + +out: + pipe_unlock(pipe); + if (need_wakeup) + wakeup_pipe_writers(pipe); + return spliced ?: ret; +} +#endif static int warn_unsupported(struct file *file, const char *op) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 133f0640fb24..df92f4b3d122 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2759,8 +2759,6 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); extern ssize_t iter_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); -extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, - struct file *out, loff_t *, size_t len, unsigned int flags); extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, loff_t *opos, size_t len, unsigned int flags); diff --git a/include/linux/splice.h b/include/linux/splice.h index a55179fd60fc..991ae318b6eb 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -84,6 +84,8 @@ extern long do_splice(struct file *in, loff_t *off_in, extern long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags); +extern ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out, + loff_t *ppos, size_t len, unsigned int flags); /* * for dynamic pipe sizing diff --git a/net/socket.c b/net/socket.c index 3df96e9ba4e2..c4d9104418c8 100644 --- a/net/socket.c +++ b/net/socket.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -126,8 +127,6 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #endif static int sock_fasync(int fd, struct file *filp, int on); -static ssize_t sock_sendpage(struct file *file, struct page *page, - int offset, size_t size, loff_t *ppos, int more); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); @@ -162,8 +161,7 @@ static const struct file_operations socket_file_ops = { .mmap = sock_mmap, .release = sock_close, .fasync = sock_fasync, - .sendpage = sock_sendpage, - .splice_write = generic_splice_sendpage, + .splice_write = splice_to_socket, .splice_read = sock_splice_read, .show_fdinfo = sock_show_fdinfo, }; @@ -1066,26 +1064,6 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, } EXPORT_SYMBOL(kernel_recvmsg); -static ssize_t sock_sendpage(struct file *file, struct page *page, - int offset, size_t size, loff_t *ppos, int more) -{ - struct socket *sock; - int flags; - int ret; - - sock = file->private_data; - - flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0; - /* more is a combination of MSG_MORE and MSG_SENDPAGE_NOTLAST */ - flags |= more; - - ret = kernel_sendpage(sock, page, offset, size, flags); - - if (trace_sock_send_length_enabled()) - call_trace_sock_send_length(sock->sk, ret, 0); - return ret; -} - static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) -- cgit v1.2.3 From 2bfc66850952b6921b2033b09729ec59eabbc81d Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:10 +0100 Subject: splice, net: Add a splice_eof op to file-ops and socket-ops Add an optional method, ->splice_eof(), to allow splice to indicate the premature termination of a splice to struct file_operations and struct proto_ops. This is called if sendfile() or splice() encounters all of the following conditions inside splice_direct_to_actor(): (1) the user did not set SPLICE_F_MORE (splice only), and (2) an EOF condition occurred (->splice_read() returned 0), and (3) we haven't read enough to fulfill the request (ie. len > 0 still), and (4) we have already spliced at least one byte. A further patch will modify the behaviour of SPLICE_F_MORE to always be passed to the actor if either the user set it or we haven't yet read sufficient data to fulfill the request. Suggested-by: Linus Torvalds Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/ Signed-off-by: David Howells Reviewed-by: Jakub Kicinski cc: Jens Axboe cc: Christoph Hellwig cc: Al Viro cc: Matthew Wilcox cc: Jan Kara cc: Jeff Layton cc: David Hildenbrand cc: Christian Brauner cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: linux-mm@kvack.org Signed-off-by: Jakub Kicinski --- fs/splice.c | 31 ++++++++++++++++++++++++++++++- include/linux/fs.h | 1 + include/linux/net.h | 1 + include/linux/splice.h | 1 + include/net/sock.h | 1 + net/socket.c | 10 ++++++++++ 6 files changed, 44 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/fs/splice.c b/fs/splice.c index e337630aed64..67dbd85db207 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -969,6 +969,17 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, return out->f_op->splice_write(pipe, out, ppos, len, flags); } +/* + * Indicate to the caller that there was a premature EOF when reading from the + * source and the caller didn't indicate they would be sending more data after + * this. + */ +static void do_splice_eof(struct splice_desc *sd) +{ + if (sd->splice_eof) + sd->splice_eof(sd); +} + /* * Attempt to initiate a splice from a file to a pipe. */ @@ -1068,7 +1079,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, ret = do_splice_to(in, &pos, pipe, len, flags); if (unlikely(ret <= 0)) - goto out_release; + goto read_failure; read_len = ret; sd->total_len = read_len; @@ -1108,6 +1119,15 @@ done: file_accessed(in); return bytes; +read_failure: + /* + * If the user did *not* set SPLICE_F_MORE *and* we didn't hit that + * "use all of len" case that cleared SPLICE_F_MORE, *and* we did a + * "->splice_in()" that returned EOF (ie zero) *and* we have sent at + * least 1 byte *then* we will also do the ->splice_eof() call. + */ + if (ret == 0 && !more && len > 0 && bytes) + do_splice_eof(sd); out_release: /* * If we did an incomplete transfer we must release @@ -1136,6 +1156,14 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, sd->flags); } +static void direct_file_splice_eof(struct splice_desc *sd) +{ + struct file *file = sd->u.file; + + if (file->f_op->splice_eof) + file->f_op->splice_eof(file); +} + /** * do_splice_direct - splices data directly between two files * @in: file to splice from @@ -1161,6 +1189,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, .flags = flags, .pos = *ppos, .u.file = out, + .splice_eof = direct_file_splice_eof, .opos = opos, }; long ret; diff --git a/include/linux/fs.h b/include/linux/fs.h index df92f4b3d122..de2cb1132f07 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1796,6 +1796,7 @@ struct file_operations { int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); + void (*splice_eof)(struct file *file); int (*setlease)(struct file *, long, struct file_lock **, void **); long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); diff --git a/include/linux/net.h b/include/linux/net.h index b73ad8e3c212..8defc8f1d82e 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -210,6 +210,7 @@ struct proto_ops { int offset, size_t size, int flags); ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); + void (*splice_eof)(struct socket *sock); int (*set_peek_off)(struct sock *sk, int val); int (*peek_len)(struct socket *sock); diff --git a/include/linux/splice.h b/include/linux/splice.h index 991ae318b6eb..4fab18a6e371 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -38,6 +38,7 @@ struct splice_desc { struct file *file; /* file to read/write */ void *data; /* cookie */ } u; + void (*splice_eof)(struct splice_desc *sd); /* Unexpected EOF handler */ loff_t pos; /* file position */ loff_t *opos; /* sendfile: output position */ size_t num_spliced; /* number of bytes already spliced */ diff --git a/include/net/sock.h b/include/net/sock.h index 6f428a7f3567..2790133b4b76 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1279,6 +1279,7 @@ struct proto { size_t len, int flags, int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, int offset, size_t size, int flags); + void (*splice_eof)(struct socket *sock); int (*bind)(struct sock *sk, struct sockaddr *addr, int addr_len); int (*bind_add)(struct sock *sk, diff --git a/net/socket.c b/net/socket.c index c4d9104418c8..b778fc03c6e0 100644 --- a/net/socket.c +++ b/net/socket.c @@ -130,6 +130,7 @@ static int sock_fasync(int fd, struct file *filp, int on); static ssize_t sock_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); +static void sock_splice_eof(struct file *file); #ifdef CONFIG_PROC_FS static void sock_show_fdinfo(struct seq_file *m, struct file *f) @@ -163,6 +164,7 @@ static const struct file_operations socket_file_ops = { .fasync = sock_fasync, .splice_write = splice_to_socket, .splice_read = sock_splice_read, + .splice_eof = sock_splice_eof, .show_fdinfo = sock_show_fdinfo, }; @@ -1076,6 +1078,14 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, return sock->ops->splice_read(sock, ppos, pipe, len, flags); } +static void sock_splice_eof(struct file *file) +{ + struct socket *sock = file->private_data; + + if (sock->ops->splice_eof) + sock->ops->splice_eof(sock); +} + static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; -- cgit v1.2.3 From df720d288dbb1793e82b6ccbfc670ec871e9def4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:11 +0100 Subject: tls/sw: Use splice_eof() to flush Allow splice to end a TLS record after prematurely ending a splice/sendfile due to getting an EOF condition (->splice_read() returned 0) after splice had called TLS with a sendmsg() with MSG_MORE set when the user didn't set MSG_MORE. Suggested-by: Linus Torvalds Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/ Signed-off-by: David Howells Reviewed-by: Jakub Kicinski cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls.h | 1 + net/tls/tls_main.c | 2 ++ net/tls/tls_sw.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 77 insertions(+) (limited to 'net') diff --git a/net/tls/tls.h b/net/tls/tls.h index 0672acab2773..4922668fefaa 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -97,6 +97,7 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx); void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); void tls_sw_strparser_done(struct tls_context *tls_ctx); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); +void tls_sw_splice_eof(struct socket *sock); int tls_sw_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tls_sw_sendpage(struct sock *sk, struct page *page, diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index e02a0d882ed3..82ec5c654f32 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -957,6 +957,7 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG] ops[TLS_BASE][TLS_BASE] = *base; ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; + ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof; ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked; ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; @@ -1027,6 +1028,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; + prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof; prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 38acc27a0dd0..a2fb0256ff1c 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1157,6 +1157,80 @@ send_end: return copied > 0 ? copied : ret; } +/* + * Handle unexpected EOF during splice without SPLICE_F_MORE set. + */ +void tls_sw_splice_eof(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec; + struct sk_msg *msg_pl; + ssize_t copied = 0; + bool retrying = false; + int ret = 0; + int pending; + + if (!ctx->open_rec) + return; + + mutex_lock(&tls_ctx->tx_lock); + lock_sock(sk); + +retry: + rec = ctx->open_rec; + if (!rec) + goto unlock; + + msg_pl = &rec->msg_plaintext; + + /* Check the BPF advisor and perform transmission. */ + ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, + &copied, 0); + switch (ret) { + case 0: + case -EAGAIN: + if (retrying) + goto unlock; + retrying = true; + goto retry; + case -EINPROGRESS: + break; + default: + goto unlock; + } + + /* Wait for pending encryptions to get completed */ + spin_lock_bh(&ctx->encrypt_compl_lock); + ctx->async_notify = true; + + pending = atomic_read(&ctx->encrypt_pending); + spin_unlock_bh(&ctx->encrypt_compl_lock); + if (pending) + crypto_wait_req(-EINPROGRESS, &ctx->async_wait); + else + reinit_completion(&ctx->async_wait.completion); + + /* There can be no concurrent accesses, since we have no pending + * encrypt operations + */ + WRITE_ONCE(ctx->async_notify, false); + + if (ctx->async_wait.err) + goto unlock; + + /* Transmit if any encryptions have completed */ + if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + cancel_delayed_work(&ctx->tx_work.work); + tls_tx_records(sk, 0); + } + +unlock: + release_sock(sk); + mutex_unlock(&tls_ctx->tx_lock); +} + static int tls_sw_do_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { -- cgit v1.2.3 From d4c1e80b0d1bacbbca5184f122b9893c5920c598 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:12 +0100 Subject: tls/device: Use splice_eof() to flush Allow splice to end a TLS record after prematurely ending a splice/sendfile due to getting an EOF condition (->splice_read() returned 0) after splice had called TLS with a sendmsg() with MSG_MORE set when the user didn't set MSG_MORE. Suggested-by: Linus Torvalds Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/ Signed-off-by: David Howells Reviewed-by: Jakub Kicinski cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls.h | 1 + net/tls/tls_device.c | 23 +++++++++++++++++++++++ net/tls/tls_main.c | 2 ++ 3 files changed, 26 insertions(+) (limited to 'net') diff --git a/net/tls/tls.h b/net/tls/tls.h index 4922668fefaa..d002c3af1966 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -116,6 +116,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, size_t len, unsigned int flags); int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); +void tls_device_splice_eof(struct socket *sock); int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tls_tx_records(struct sock *sk, int flags); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 9ef766e41c7a..439be833dcf9 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -590,6 +590,29 @@ out: return rc; } +void tls_device_splice_eof(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct tls_context *tls_ctx = tls_get_ctx(sk); + union tls_iter_offset iter; + struct iov_iter iov_iter = {}; + + if (!tls_is_partially_sent_record(tls_ctx)) + return; + + mutex_lock(&tls_ctx->tx_lock); + lock_sock(sk); + + if (tls_is_partially_sent_record(tls_ctx)) { + iov_iter_bvec(&iov_iter, ITER_SOURCE, NULL, 0, 0); + iter.msg_iter = &iov_iter; + tls_push_data(sk, iter, 0, 0, TLS_RECORD_TYPE_DATA, NULL); + } + + release_sock(sk); + mutex_unlock(&tls_ctx->tx_lock); +} + int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 82ec5c654f32..7b9c83dd7de2 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -1044,10 +1044,12 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], #ifdef CONFIG_TLS_DEVICE prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; + prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof; prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; + prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof; prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; -- cgit v1.2.3 From 1d7e4538a5463faa0b0e26a7a7b6bd68c7dfdd78 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:13 +0100 Subject: ipv4, ipv6: Use splice_eof() to flush Allow splice to undo the effects of MSG_MORE after prematurely ending a splice/sendfile due to getting an EOF condition (->splice_read() returned 0) after splice had called sendmsg() with MSG_MORE set when the user didn't set MSG_MORE. For UDP, a pending packet will not be emitted if the socket is closed before it is flushed; with this change, it be flushed by ->splice_eof(). For TCP, it's not clear that MSG_MORE is actually effective. Suggested-by: Linus Torvalds Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/ Signed-off-by: David Howells cc: Kuniyuki Iwashima cc: Willem de Bruijn cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/net/inet_common.h | 1 + include/net/tcp.h | 1 + include/net/udp.h | 1 + net/ipv4/af_inet.c | 18 ++++++++++++++++++ net/ipv4/tcp.c | 16 ++++++++++++++++ net/ipv4/tcp_ipv4.c | 1 + net/ipv4/udp.c | 16 ++++++++++++++++ net/ipv6/af_inet6.c | 1 + net/ipv6/tcp_ipv6.c | 1 + net/ipv6/udp.c | 15 +++++++++++++++ 10 files changed, 71 insertions(+) (limited to 'net') diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 77f4b0ef5b92..a75333342c4e 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -35,6 +35,7 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk); int inet_send_prepare(struct sock *sk); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); +void inet_splice_eof(struct socket *sock); ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, diff --git a/include/net/tcp.h b/include/net/tcp.h index 68990a8f556a..49611af31bb7 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -327,6 +327,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, size_t size, struct ubuf_info *uarg); +void tcp_splice_eof(struct socket *sock); int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, diff --git a/include/net/udp.h b/include/net/udp.h index 5cad44318d71..4ed0b47c5582 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -278,6 +278,7 @@ int udp_get_port(struct sock *sk, unsigned short snum, int udp_err(struct sk_buff *, u32); int udp_abort(struct sock *sk, int err); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); +void udp_splice_eof(struct socket *sock); int udp_push_pending_frames(struct sock *sk); void udp_flush_pending_frames(struct sock *sk); int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index b5735b3551cf..fd233c4195ac 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -831,6 +831,21 @@ int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) } EXPORT_SYMBOL(inet_sendmsg); +void inet_splice_eof(struct socket *sock) +{ + const struct proto *prot; + struct sock *sk = sock->sk; + + if (unlikely(inet_send_prepare(sk))) + return; + + /* IPV6_ADDRFORM can change sk->sk_prot under us. */ + prot = READ_ONCE(sk->sk_prot); + if (prot->splice_eof) + prot->splice_eof(sock); +} +EXPORT_SYMBOL_GPL(inet_splice_eof); + ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) { @@ -1050,6 +1065,7 @@ const struct proto_ops inet_stream_ops = { #ifdef CONFIG_MMU .mmap = tcp_mmap, #endif + .splice_eof = inet_splice_eof, .sendpage = inet_sendpage, .splice_read = tcp_splice_read, .read_sock = tcp_read_sock, @@ -1084,6 +1100,7 @@ const struct proto_ops inet_dgram_ops = { .read_skb = udp_read_skb, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, + .splice_eof = inet_splice_eof, .sendpage = inet_sendpage, .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT @@ -1115,6 +1132,7 @@ static const struct proto_ops inet_sockraw_ops = { .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, + .splice_eof = inet_splice_eof, .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 53b7751b68e1..09f03221a6f1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1371,6 +1371,22 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) } EXPORT_SYMBOL(tcp_sendmsg); +void tcp_splice_eof(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct tcp_sock *tp = tcp_sk(sk); + int mss_now, size_goal; + + if (!tcp_write_queue_tail(sk)) + return; + + lock_sock(sk); + mss_now = tcp_send_mss(sk, &size_goal, 0); + tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); + release_sock(sk); +} +EXPORT_SYMBOL_GPL(tcp_splice_eof); + /* * Handle reading urgent data. BSD has very simple semantics for * this, no blocking and very strange errors 8) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 53e9ce2f05bb..84a5d557dc1a 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3116,6 +3116,7 @@ struct proto tcp_prot = { .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, + .splice_eof = tcp_splice_eof, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .release_cb = tcp_release_cb, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fd3dae081f3a..df5e407286d7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1324,6 +1324,21 @@ do_confirm: } EXPORT_SYMBOL(udp_sendmsg); +void udp_splice_eof(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct udp_sock *up = udp_sk(sk); + + if (!up->pending || READ_ONCE(up->corkflag)) + return; + + lock_sock(sk); + if (up->pending && !READ_ONCE(up->corkflag)) + udp_push_pending_frames(sk); + release_sock(sk); +} +EXPORT_SYMBOL_GPL(udp_splice_eof); + int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { @@ -2918,6 +2933,7 @@ struct proto udp_prot = { .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, + .splice_eof = udp_splice_eof, .sendpage = udp_sendpage, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 2bbf13216a3d..564942bee067 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -695,6 +695,7 @@ const struct proto_ops inet6_stream_ops = { #ifdef CONFIG_MMU .mmap = tcp_mmap, #endif + .splice_eof = inet_splice_eof, .sendpage = inet_sendpage, .sendmsg_locked = tcp_sendmsg_locked, .sendpage_locked = tcp_sendpage_locked, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d657713d1c71..c17c8ff94b79 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2150,6 +2150,7 @@ struct proto tcpv6_prot = { .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, + .splice_eof = tcp_splice_eof, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index e5a337e6b970..317b01c9bc39 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1653,6 +1653,20 @@ do_confirm: } EXPORT_SYMBOL(udpv6_sendmsg); +static void udpv6_splice_eof(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct udp_sock *up = udp_sk(sk); + + if (!up->pending || READ_ONCE(up->corkflag)) + return; + + lock_sock(sk); + if (up->pending && !READ_ONCE(up->corkflag)) + udp_v6_push_pending_frames(sk); + release_sock(sk); +} + void udpv6_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); @@ -1764,6 +1778,7 @@ struct proto udpv6_prot = { .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, + .splice_eof = udpv6_splice_eof, .release_cb = ip6_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, -- cgit v1.2.3 From 951ace9951382a90a56e98590e0b9b651e7579b4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:15 +0100 Subject: kcm: Use splice_eof() to flush Allow splice to undo the effects of MSG_MORE after prematurely ending a splice/sendfile due to getting an EOF condition (->splice_read() returned 0) after splice had called sendmsg() with MSG_MORE set when the user didn't set MSG_MORE. Suggested-by: Linus Torvalds Link: https://lore.kernel.org/r/CAHk-=wh=V579PDYvkpnTobCLGczbgxpMgGmmhqiTyE34Cpi5Gg@mail.gmail.com/ Signed-off-by: David Howells cc: Tom Herbert cc: Tom Herbert cc: Cong Wang cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/kcm/kcmsock.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'net') diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index ba22af16b96d..7dee74430b59 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -968,6 +968,19 @@ out_error: return err; } +static void kcm_splice_eof(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct kcm_sock *kcm = kcm_sk(sk); + + if (skb_queue_empty_lockless(&sk->sk_write_queue)) + return; + + lock_sock(sk); + kcm_write_msgs(kcm); + release_sock(sk); +} + static ssize_t kcm_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) @@ -1773,6 +1786,7 @@ static const struct proto_ops kcm_dgram_ops = { .sendmsg = kcm_sendmsg, .recvmsg = kcm_recvmsg, .mmap = sock_no_mmap, + .splice_eof = kcm_splice_eof, .sendpage = kcm_sendpage, }; @@ -1794,6 +1808,7 @@ static const struct proto_ops kcm_seqpacket_ops = { .sendmsg = kcm_sendmsg, .recvmsg = kcm_recvmsg, .mmap = sock_no_mmap, + .splice_eof = kcm_splice_eof, .sendpage = kcm_sendpage, .splice_read = kcm_splice_read, }; -- cgit v1.2.3 From fe1e81d4f73b6cbaed4fcc476960d26770642842 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:17 +0100 Subject: tls/sw: Support MSG_SPLICE_PAGES Make TLS's sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced from the source iterator if possible. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls_sw.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'net') diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index a2fb0256ff1c..2d2bb933d2a6 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -931,6 +931,35 @@ static int tls_sw_push_pending_record(struct sock *sk, int flags) &copied, flags); } +static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, + struct sk_msg *msg_pl, size_t try_to_copy, + ssize_t *copied) +{ + struct page *page = NULL, **pages = &page; + + do { + ssize_t part; + size_t off; + + part = iov_iter_extract_pages(&msg->msg_iter, &pages, + try_to_copy, 1, 0, &off); + if (part <= 0) + return part ?: -EIO; + + if (WARN_ON_ONCE(!sendpage_ok(page))) { + iov_iter_revert(&msg->msg_iter, part); + return -EIO; + } + + sk_msg_page_add(msg_pl, page, part, off); + sk_mem_charge(sk, part); + *copied += part; + try_to_copy -= part; + } while (try_to_copy && !sk_msg_full(msg_pl)); + + return 0; +} + int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); @@ -1020,6 +1049,17 @@ alloc_encrypted: full_record = true; } + if (try_to_copy && (msg->msg_flags & MSG_SPLICE_PAGES)) { + ret = tls_sw_sendmsg_splice(sk, msg, msg_pl, + try_to_copy, &copied); + if (ret < 0) + goto send_end; + tls_ctx->pending_open_record_frags = true; + if (full_record || eor || sk_msg_full(msg_pl)) + goto copied; + continue; + } + if (!is_kvec && (full_record || eor) && !async_capable) { u32 first = msg_pl->sg.end; @@ -1084,6 +1124,7 @@ fallback_to_reg_send: */ tls_ctx->pending_open_record_frags = true; copied += try_to_copy; +copied: if (full_record || eor) { ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, record_type, &copied, -- cgit v1.2.3 From 45e5be844ab6b2845b6d2935b61278c5ecae7d38 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:18 +0100 Subject: tls/sw: Convert tls_sw_sendpage() to use MSG_SPLICE_PAGES Convert tls_sw_sendpage() and tls_sw_sendpage_locked() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. [!] Note that tls_sw_sendpage_locked() appears to have the wrong locking upstream. I think the caller will only hold the socket lock, but it should hold tls_ctx->tx_lock too. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells Reviewed-by: Jakub Kicinski cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls_sw.c | 173 +++++++++++-------------------------------------------- 1 file changed, 35 insertions(+), 138 deletions(-) (limited to 'net') diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 2d2bb933d2a6..319f61590d2c 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -960,7 +960,8 @@ static int tls_sw_sendmsg_splice(struct sock *sk, struct msghdr *msg, return 0; } -int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +static int tls_sw_sendmsg_locked(struct sock *sk, struct msghdr *msg, + size_t size) { long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -983,15 +984,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) int ret = 0; int pending; - if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | - MSG_CMSG_COMPAT | MSG_SPLICE_PAGES)) - return -EOPNOTSUPP; - - ret = mutex_lock_interruptible(&tls_ctx->tx_lock); - if (ret) - return ret; - lock_sock(sk); - if (unlikely(msg->msg_controllen)) { ret = tls_process_cmsg(sk, msg, &record_type); if (ret) { @@ -1192,10 +1184,27 @@ trim_sgl: send_end: ret = sk_stream_error(sk, msg->msg_flags, ret); + return copied > 0 ? copied : ret; +} +int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + int ret; + + if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | + MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) + return -EOPNOTSUPP; + + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); + if (ret) + return ret; + lock_sock(sk); + ret = tls_sw_sendmsg_locked(sk, msg, size); release_sock(sk); mutex_unlock(&tls_ctx->tx_lock); - return copied > 0 ? copied : ret; + return ret; } /* @@ -1272,151 +1281,39 @@ unlock: mutex_unlock(&tls_ctx->tx_lock); } -static int tls_sw_do_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); - struct tls_prot_info *prot = &tls_ctx->prot_info; - unsigned char record_type = TLS_RECORD_TYPE_DATA; - struct sk_msg *msg_pl; - struct tls_rec *rec; - int num_async = 0; - ssize_t copied = 0; - bool full_record; - int record_room; - int ret = 0; - bool eor; - - eor = !(flags & MSG_SENDPAGE_NOTLAST); - sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); - - /* Call the sk_stream functions to manage the sndbuf mem. */ - while (size > 0) { - size_t copy, required_size; - - if (sk->sk_err) { - ret = -sk->sk_err; - goto sendpage_end; - } - - if (ctx->open_rec) - rec = ctx->open_rec; - else - rec = ctx->open_rec = tls_get_rec(sk); - if (!rec) { - ret = -ENOMEM; - goto sendpage_end; - } - - msg_pl = &rec->msg_plaintext; - - full_record = false; - record_room = TLS_MAX_PAYLOAD_SIZE - msg_pl->sg.size; - copy = size; - if (copy >= record_room) { - copy = record_room; - full_record = true; - } - - required_size = msg_pl->sg.size + copy + prot->overhead_size; - - if (!sk_stream_memory_free(sk)) - goto wait_for_sndbuf; -alloc_payload: - ret = tls_alloc_encrypted_msg(sk, required_size); - if (ret) { - if (ret != -ENOSPC) - goto wait_for_memory; - - /* Adjust copy according to the amount that was - * actually allocated. The difference is due - * to max sg elements limit - */ - copy -= required_size - msg_pl->sg.size; - full_record = true; - } - - sk_msg_page_add(msg_pl, page, copy, offset); - sk_mem_charge(sk, copy); - - offset += copy; - size -= copy; - copied += copy; - - tls_ctx->pending_open_record_frags = true; - if (full_record || eor || sk_msg_full(msg_pl)) { - ret = bpf_exec_tx_verdict(msg_pl, sk, full_record, - record_type, &copied, flags); - if (ret) { - if (ret == -EINPROGRESS) - num_async++; - else if (ret == -ENOMEM) - goto wait_for_memory; - else if (ret != -EAGAIN) { - if (ret == -ENOSPC) - ret = 0; - goto sendpage_end; - } - } - } - continue; -wait_for_sndbuf: - set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); -wait_for_memory: - ret = sk_stream_wait_memory(sk, &timeo); - if (ret) { - if (ctx->open_rec) - tls_trim_both_msgs(sk, msg_pl->sg.size); - goto sendpage_end; - } - - if (ctx->open_rec) - goto alloc_payload; - } - - if (num_async) { - /* Transmit if any encryptions have completed */ - if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { - cancel_delayed_work(&ctx->tx_work.work); - tls_tx_records(sk, flags); - } - } -sendpage_end: - ret = sk_stream_error(sk, flags, ret); - return copied > 0 ? copied : ret; -} - int tls_sw_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags) { + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; + if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | MSG_NO_SHARED_FRAGS)) return -EOPNOTSUPP; + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - return tls_sw_do_sendpage(sk, page, offset, size, flags); + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return tls_sw_sendmsg_locked(sk, &msg, size); } int tls_sw_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct tls_context *tls_ctx = tls_get_ctx(sk); - int ret; + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) return -EOPNOTSUPP; + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - ret = mutex_lock_interruptible(&tls_ctx->tx_lock); - if (ret) - return ret; - lock_sock(sk); - ret = tls_sw_do_sendpage(sk, page, offset, size, flags); - release_sock(sk); - mutex_unlock(&tls_ctx->tx_lock); - return ret; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return tls_sw_sendmsg(sk, &msg, size); } static int -- cgit v1.2.3 From 24763c9c09804db9b178f41358034144b87a937a Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:19 +0100 Subject: tls/device: Support MSG_SPLICE_PAGES Make TLS's device sendmsg() support MSG_SPLICE_PAGES. This causes pages to be spliced from the source iterator if possible. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells Reviewed-by: Jakub Kicinski cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls_device.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'net') diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 439be833dcf9..bb3bb523544e 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -509,6 +509,29 @@ handle_error: tls_append_frag(record, &zc_pfrag, copy); iter_offset.offset += copy; + } else if (copy && (flags & MSG_SPLICE_PAGES)) { + struct page_frag zc_pfrag; + struct page **pages = &zc_pfrag.page; + size_t off; + + rc = iov_iter_extract_pages(iter_offset.msg_iter, + &pages, copy, 1, 0, &off); + if (rc <= 0) { + if (rc == 0) + rc = -EIO; + goto handle_error; + } + copy = rc; + + if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) { + iov_iter_revert(iter_offset.msg_iter, copy); + rc = -EIO; + goto handle_error; + } + + zc_pfrag.offset = off; + zc_pfrag.size = copy; + tls_append_frag(record, &zc_pfrag, copy); } else if (copy) { copy = min_t(size_t, copy, pfrag->size - pfrag->offset); @@ -572,6 +595,9 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) union tls_iter_offset iter; int rc; + if (!tls_ctx->zerocopy_sendfile) + msg->msg_flags &= ~MSG_SPLICE_PAGES; + mutex_lock(&tls_ctx->tx_lock); lock_sock(sk); -- cgit v1.2.3 From 3dc8976c7ad6ec46954eb99076551e2a2c4114da Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 7 Jun 2023 19:19:20 +0100 Subject: tls/device: Convert tls_device_sendpage() to use MSG_SPLICE_PAGES Convert tls_device_sendpage() to use sendmsg() with MSG_SPLICE_PAGES rather than directly splicing in the pages itself. With that, the tls_iter_offset union is no longer necessary and can be replaced with an iov_iter pointer and the zc_page argument to tls_push_data() can also be removed. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Signed-off-by: David Howells Acked-by: Jakub Kicinski cc: Chuck Lever cc: Boris Pismenny cc: John Fastabend cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/tls/tls_device.c | 92 +++++++++++++--------------------------------------- 1 file changed, 23 insertions(+), 69 deletions(-) (limited to 'net') diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index bb3bb523544e..b4864d55900f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -422,16 +422,10 @@ static int tls_device_copy_data(void *addr, size_t bytes, struct iov_iter *i) return 0; } -union tls_iter_offset { - struct iov_iter *msg_iter; - int offset; -}; - static int tls_push_data(struct sock *sk, - union tls_iter_offset iter_offset, + struct iov_iter *iter, size_t size, int flags, - unsigned char record_type, - struct page *zc_page) + unsigned char record_type) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; @@ -500,22 +494,13 @@ handle_error: record = ctx->open_record; copy = min_t(size_t, size, max_open_record_len - record->len); - if (copy && zc_page) { - struct page_frag zc_pfrag; - - zc_pfrag.page = zc_page; - zc_pfrag.offset = iter_offset.offset; - zc_pfrag.size = copy; - tls_append_frag(record, &zc_pfrag, copy); - - iter_offset.offset += copy; - } else if (copy && (flags & MSG_SPLICE_PAGES)) { + if (copy && (flags & MSG_SPLICE_PAGES)) { struct page_frag zc_pfrag; struct page **pages = &zc_pfrag.page; size_t off; - rc = iov_iter_extract_pages(iter_offset.msg_iter, - &pages, copy, 1, 0, &off); + rc = iov_iter_extract_pages(iter, &pages, + copy, 1, 0, &off); if (rc <= 0) { if (rc == 0) rc = -EIO; @@ -524,7 +509,7 @@ handle_error: copy = rc; if (WARN_ON_ONCE(!sendpage_ok(zc_pfrag.page))) { - iov_iter_revert(iter_offset.msg_iter, copy); + iov_iter_revert(iter, copy); rc = -EIO; goto handle_error; } @@ -537,7 +522,7 @@ handle_error: rc = tls_device_copy_data(page_address(pfrag->page) + pfrag->offset, copy, - iter_offset.msg_iter); + iter); if (rc) goto handle_error; tls_append_frag(record, pfrag, copy); @@ -592,7 +577,6 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { unsigned char record_type = TLS_RECORD_TYPE_DATA; struct tls_context *tls_ctx = tls_get_ctx(sk); - union tls_iter_offset iter; int rc; if (!tls_ctx->zerocopy_sendfile) @@ -607,8 +591,8 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) goto out; } - iter.msg_iter = &msg->msg_iter; - rc = tls_push_data(sk, iter, size, msg->msg_flags, record_type, NULL); + rc = tls_push_data(sk, &msg->msg_iter, size, msg->msg_flags, + record_type); out: release_sock(sk); @@ -620,8 +604,7 @@ void tls_device_splice_eof(struct socket *sock) { struct sock *sk = sock->sk; struct tls_context *tls_ctx = tls_get_ctx(sk); - union tls_iter_offset iter; - struct iov_iter iov_iter = {}; + struct iov_iter iter = {}; if (!tls_is_partially_sent_record(tls_ctx)) return; @@ -630,9 +613,8 @@ void tls_device_splice_eof(struct socket *sock) lock_sock(sk); if (tls_is_partially_sent_record(tls_ctx)) { - iov_iter_bvec(&iov_iter, ITER_SOURCE, NULL, 0, 0); - iter.msg_iter = &iov_iter; - tls_push_data(sk, iter, 0, 0, TLS_RECORD_TYPE_DATA, NULL); + iov_iter_bvec(&iter, ITER_SOURCE, NULL, 0, 0); + tls_push_data(sk, &iter, 0, 0, TLS_RECORD_TYPE_DATA); } release_sock(sk); @@ -642,44 +624,18 @@ void tls_device_splice_eof(struct socket *sock) int tls_device_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct tls_context *tls_ctx = tls_get_ctx(sk); - union tls_iter_offset iter_offset; - struct iov_iter msg_iter; - char *kaddr; - struct kvec iov; - int rc; + struct bio_vec bvec; + struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; if (flags & MSG_SENDPAGE_NOTLAST) - flags |= MSG_MORE; - - mutex_lock(&tls_ctx->tx_lock); - lock_sock(sk); + msg.msg_flags |= MSG_MORE; - if (flags & MSG_OOB) { - rc = -EOPNOTSUPP; - goto out; - } - - if (tls_ctx->zerocopy_sendfile) { - iter_offset.offset = offset; - rc = tls_push_data(sk, iter_offset, size, - flags, TLS_RECORD_TYPE_DATA, page); - goto out; - } - - kaddr = kmap(page); - iov.iov_base = kaddr + offset; - iov.iov_len = size; - iov_iter_kvec(&msg_iter, ITER_SOURCE, &iov, 1, size); - iter_offset.msg_iter = &msg_iter; - rc = tls_push_data(sk, iter_offset, size, flags, TLS_RECORD_TYPE_DATA, - NULL); - kunmap(page); + if (flags & MSG_OOB) + return -EOPNOTSUPP; -out: - release_sock(sk); - mutex_unlock(&tls_ctx->tx_lock); - return rc; + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + return tls_device_sendmsg(sk, &msg, size); } struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, @@ -744,12 +700,10 @@ EXPORT_SYMBOL(tls_get_record); static int tls_device_push_pending_record(struct sock *sk, int flags) { - union tls_iter_offset iter; - struct iov_iter msg_iter; + struct iov_iter iter; - iov_iter_kvec(&msg_iter, ITER_SOURCE, NULL, 0, 0); - iter.msg_iter = &msg_iter; - return tls_push_data(sk, iter, 0, flags, TLS_RECORD_TYPE_DATA, NULL); + iov_iter_kvec(&iter, ITER_SOURCE, NULL, 0, 0); + return tls_push_data(sk, &iter, 0, flags, TLS_RECORD_TYPE_DATA); } void tls_device_write_space(struct sock *sk, struct tls_context *ctx) -- cgit v1.2.3 From 74b449b98dccdf24288d562f9d207fa066da793d Mon Sep 17 00:00:00 2001 From: Ivan Mikhaylov Date: Wed, 7 Jun 2023 18:17:41 +0300 Subject: net/ncsi: make one oem_gma function for all mfr id Make the one Get Mac Address function for all manufacturers and change this call in handlers accordingly. Reviewed-by: Simon Horman Signed-off-by: Ivan Mikhaylov Signed-off-by: David S. Miller --- net/ncsi/ncsi-rsp.c | 88 ++++++++++++----------------------------------------- 1 file changed, 19 insertions(+), 69 deletions(-) (limited to 'net') diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 6447a09932f5..91c42253a711 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -611,14 +611,15 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr) return 0; } -/* Response handler for Mellanox command Get Mac Address */ -static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr) +/* Response handler for Get Mac Address command */ +static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id) { struct ncsi_dev_priv *ndp = nr->ndp; struct net_device *ndev = ndp->ndev.dev; const struct net_device_ops *ops = ndev->netdev_ops; struct ncsi_rsp_oem_pkt *rsp; struct sockaddr saddr; + u32 mac_addr_off = 0; int ret = 0; /* Get the response header */ @@ -626,7 +627,19 @@ static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr) saddr.sa_family = ndev->type; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN); + if (mfr_id == NCSI_OEM_MFR_BCM_ID) + mac_addr_off = BCM_MAC_ADDR_OFFSET; + else if (mfr_id == NCSI_OEM_MFR_MLX_ID) + mac_addr_off = MLX_MAC_ADDR_OFFSET; + else if (mfr_id == NCSI_OEM_MFR_INTEL_ID) + mac_addr_off = INTEL_MAC_ADDR_OFFSET; + + memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN); + if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID) + eth_addr_inc((u8 *)saddr.sa_data); + if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) + return -ENXIO; + /* Set the flag for GMA command which should only be called once */ ndp->gma_flag = 1; @@ -649,41 +662,10 @@ static int ncsi_rsp_handler_oem_mlx(struct ncsi_request *nr) if (mlx->cmd == NCSI_OEM_MLX_CMD_GMA && mlx->param == NCSI_OEM_MLX_CMD_GMA_PARAM) - return ncsi_rsp_handler_oem_mlx_gma(nr); + return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_MLX_ID); return 0; } -/* Response handler for Broadcom command Get Mac Address */ -static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr) -{ - struct ncsi_dev_priv *ndp = nr->ndp; - struct net_device *ndev = ndp->ndev.dev; - const struct net_device_ops *ops = ndev->netdev_ops; - struct ncsi_rsp_oem_pkt *rsp; - struct sockaddr saddr; - int ret = 0; - - /* Get the response header */ - rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp); - - saddr.sa_family = ndev->type; - ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN); - /* Increase mac address by 1 for BMC's address */ - eth_addr_inc((u8 *)saddr.sa_data); - if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) - return -ENXIO; - - /* Set the flag for GMA command which should only be called once */ - ndp->gma_flag = 1; - - ret = ops->ndo_set_mac_address(ndev, &saddr); - if (ret < 0) - netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); - - return ret; -} - /* Response handler for Broadcom card */ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr) { @@ -695,42 +677,10 @@ static int ncsi_rsp_handler_oem_bcm(struct ncsi_request *nr) bcm = (struct ncsi_rsp_oem_bcm_pkt *)(rsp->data); if (bcm->type == NCSI_OEM_BCM_CMD_GMA) - return ncsi_rsp_handler_oem_bcm_gma(nr); + return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_BCM_ID); return 0; } -/* Response handler for Intel command Get Mac Address */ -static int ncsi_rsp_handler_oem_intel_gma(struct ncsi_request *nr) -{ - struct ncsi_dev_priv *ndp = nr->ndp; - struct net_device *ndev = ndp->ndev.dev; - const struct net_device_ops *ops = ndev->netdev_ops; - struct ncsi_rsp_oem_pkt *rsp; - struct sockaddr saddr; - int ret = 0; - - /* Get the response header */ - rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp); - - saddr.sa_family = ndev->type; - ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; - memcpy(saddr.sa_data, &rsp->data[INTEL_MAC_ADDR_OFFSET], ETH_ALEN); - /* Increase mac address by 1 for BMC's address */ - eth_addr_inc((u8 *)saddr.sa_data); - if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) - return -ENXIO; - - /* Set the flag for GMA command which should only be called once */ - ndp->gma_flag = 1; - - ret = ops->ndo_set_mac_address(ndev, &saddr); - if (ret < 0) - netdev_warn(ndev, - "NCSI: 'Writing mac address to device failed\n"); - - return ret; -} - /* Response handler for Intel card */ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr) { @@ -742,7 +692,7 @@ static int ncsi_rsp_handler_oem_intel(struct ncsi_request *nr) intel = (struct ncsi_rsp_oem_intel_pkt *)(rsp->data); if (intel->cmd == NCSI_OEM_INTEL_CMD_GMA) - return ncsi_rsp_handler_oem_intel_gma(nr); + return ncsi_rsp_handler_oem_gma(nr, NCSI_OEM_MFR_INTEL_ID); return 0; } -- cgit v1.2.3 From 790071347a0a1a89e618eedcd51c687ea783aeb3 Mon Sep 17 00:00:00 2001 From: Ivan Mikhaylov Date: Wed, 7 Jun 2023 18:17:42 +0300 Subject: net/ncsi: change from ndo_set_mac_address to dev_set_mac_address Change ndo_set_mac_address to dev_set_mac_address because dev_set_mac_address provides a way to notify network layer about MAC change. In other case, services may not aware about MAC change and keep using old one which set from network adapter driver. As example, DHCP client from systemd do not update MAC address without notification from net subsystem which leads to the problem with acquiring the right address from DHCP server. Fixes: cb10c7c0dfd9e ("net/ncsi: Add NCSI Broadcom OEM command") Cc: stable@vger.kernel.org # v6.0+ 2f38e84 net/ncsi: make one oem_gma function for all mfr id Signed-off-by: Paul Fertser Signed-off-by: Ivan Mikhaylov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/ncsi/ncsi-rsp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 91c42253a711..069c2659074b 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -616,7 +616,6 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id) { struct ncsi_dev_priv *ndp = nr->ndp; struct net_device *ndev = ndp->ndev.dev; - const struct net_device_ops *ops = ndev->netdev_ops; struct ncsi_rsp_oem_pkt *rsp; struct sockaddr saddr; u32 mac_addr_off = 0; @@ -643,7 +642,9 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id) /* Set the flag for GMA command which should only be called once */ ndp->gma_flag = 1; - ret = ops->ndo_set_mac_address(ndev, &saddr); + rtnl_lock(); + ret = dev_set_mac_address(ndev, &saddr, NULL); + rtnl_unlock(); if (ret < 0) netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); -- cgit v1.2.3 From 4c630f307455c06f99bdeca7f7a1ab5318604fe0 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:45 +0100 Subject: mm/gup: remove vmas parameter from pin_user_pages() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are now in a position where no caller of pin_user_pages() requires the vmas parameter at all, so eliminate this parameter from the function and all callers. This clears the way to removing the vmas parameter from GUP altogether. Link: https://lkml.kernel.org/r/195a99ae949c9f5cb589d2222b736ced96ec199a.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Acked-by: Dennis Dalessandro [qib] Reviewed-by: Christoph Hellwig Acked-by: Sakari Ailus [drivers/media] Cc: Catalin Marinas Cc: Christian König Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sean Christopherson Signed-off-by: Andrew Morton --- arch/powerpc/mm/book3s64/iommu_api.c | 2 +- drivers/infiniband/hw/qib/qib_user_pages.c | 2 +- drivers/infiniband/hw/usnic/usnic_uiom.c | 2 +- drivers/infiniband/sw/siw/siw_mem.c | 2 +- drivers/media/v4l2-core/videobuf-dma-sg.c | 2 +- drivers/vdpa/vdpa_user/vduse_dev.c | 2 +- drivers/vhost/vdpa.c | 2 +- include/linux/mm.h | 3 +-- io_uring/rsrc.c | 2 +- mm/gup.c | 9 +++------ mm/gup_test.c | 9 ++++----- net/xdp/xdp_umem.c | 2 +- 12 files changed, 17 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 81d7185e2ae8..d19fb1f3007d 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, FOLL_WRITE | FOLL_LONGTERM, - mem->hpages + entry, NULL); + mem->hpages + entry); if (ret == n) { pinned += n; continue; diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index f693bc753b6b..1bb7507325bc 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -111,7 +111,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, ret = pin_user_pages(start_page + got * PAGE_SIZE, num_pages - got, FOLL_LONGTERM | FOLL_WRITE, - p + got, NULL); + p + got); if (ret < 0) { mmap_read_unlock(current->mm); goto bail_release; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 2a5cac2658ec..84e0f41e7dfa 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -140,7 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = pin_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - gup_flags, page_list, NULL); + gup_flags, page_list); if (ret < 0) goto out; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index f51ab2ccf151..e6e25f15567d 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -422,7 +422,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) umem->page_chunk[i].plist = plist; while (nents) { rv = pin_user_pages(first_page_va, nents, foll_flags, - plist, NULL); + plist); if (rv < 0) goto out_sem_up; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 53001532e8e3..405b89ea1054 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, data, size, dma->nr_pages); err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags, - dma->pages, NULL); + dma->pages); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index de97e38c3b82..4d4405f058e8 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1052,7 +1052,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev, goto out; pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE, - page_list, NULL); + page_list); if (pinned != npages) { ret = pinned < 0 ? pinned : -ENOMEM; goto out; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 8c1aefc865f0..61223fcbe82b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -983,7 +983,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, while (npages) { sz2pin = min_t(unsigned long, npages, list_size); pinned = pin_user_pages(cur_base, sz2pin, - gup_flags, page_list, NULL); + gup_flags, page_list); if (sz2pin != pinned) { if (pinned < 0) { ret = pinned; diff --git a/include/linux/mm.h b/include/linux/mm.h index fcbfb961b49f..280429ffa91d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2399,8 +2399,7 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + unsigned int gup_flags, struct page **pages); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index b6451f8bc5d5..b56bda46a9eb 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1044,7 +1044,7 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) ret = 0; mmap_read_lock(current->mm); pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, - pages, NULL); + pages); if (pret == nr_pages) *npages = nr_pages; else diff --git a/mm/gup.c b/mm/gup.c index 764bf0c20827..18e3bc2ee3f1 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3131,8 +3131,6 @@ EXPORT_SYMBOL(pin_user_pages_remote); * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and * FOLL_PIN is set. @@ -3141,15 +3139,14 @@ EXPORT_SYMBOL(pin_user_pages_remote); * see Documentation/core-api/pin_user_pages.rst for details. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) + unsigned int gup_flags, struct page **pages) { int locked = 1; - if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, - pages, vmas, &locked, gup_flags); + pages, NULL, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); diff --git a/mm/gup_test.c b/mm/gup_test.c index 9ba8ea23f84e..1668ce0e0783 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -146,18 +146,17 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i); break; case PIN_BASIC_TEST: - nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i, - NULL); + nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_LONGTERM_BENCHMARK: nr = pin_user_pages(addr, nr, gup->gup_flags | FOLL_LONGTERM, - pages + i, NULL); + pages + i); break; case DUMP_USER_PAGES_TEST: if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) nr = pin_user_pages(addr, nr, gup->gup_flags, - pages + i, NULL); + pages + i); else nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); @@ -270,7 +269,7 @@ static inline int pin_longterm_test_start(unsigned long arg) gup_flags, pages); else cur_pages = pin_user_pages(addr, remaining_pages, - gup_flags, pages, NULL); + gup_flags, pages); if (cur_pages < 0) { pin_longterm_test_stop(); ret = cur_pages; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 02207e852d79..06cead2b8e34 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -103,7 +103,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address) mmap_read_lock(current->mm); npgs = pin_user_pages(address, umem->npgs, - gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL); + gup_flags | FOLL_LONGTERM, &umem->pgs[0]); mmap_read_unlock(current->mm); if (npgs != umem->npgs) { -- cgit v1.2.3 From dc886bce753cc2cf3c88ec5c7a6880a4e17d65ba Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Jun 2023 15:20:49 +0200 Subject: mptcp: export local_address Rename local_address() with "mptcp_" prefix and export it in protocol.h. This function will be re-used in the common PM code (pm.c) in the following commit. Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Reviewed-by: Larysa Zaremba Signed-off-by: Jakub Kicinski --- net/mptcp/pm_netlink.c | 17 ++++++++--------- net/mptcp/protocol.h | 1 + 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 59f8f3124855..0bf09c45febd 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -86,8 +86,7 @@ bool mptcp_addresses_equal(const struct mptcp_addr_info *a, return a->port == b->port; } -static void local_address(const struct sock_common *skc, - struct mptcp_addr_info *addr) +void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) { addr->family = skc->skc_family; addr->port = htons(skc->skc_num); @@ -122,7 +121,7 @@ static bool lookup_subflow_by_saddr(const struct list_head *list, list_for_each_entry(subflow, list, node) { skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); - local_address(skc, &cur); + mptcp_local_address(skc, &cur); if (mptcp_addresses_equal(&cur, saddr, saddr->port)) return true; } @@ -263,7 +262,7 @@ bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) struct mptcp_addr_info saddr; bool ret = false; - local_address((struct sock_common *)sk, &saddr); + mptcp_local_address((struct sock_common *)sk, &saddr); spin_lock_bh(&msk->pm.lock); list_for_each_entry(entry, &msk->pm.anno_list, list) { @@ -541,7 +540,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) struct mptcp_addr_info mpc_addr; bool backup = false; - local_address((struct sock_common *)msk->first, &mpc_addr); + mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); rcu_read_lock(); entry = __lookup_addr(pernet, &mpc_addr, false); if (entry) { @@ -752,7 +751,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, struct sock *ssk = mptcp_subflow_tcp_sock(subflow); struct mptcp_addr_info local, remote; - local_address((struct sock_common *)ssk, &local); + mptcp_local_address((struct sock_common *)ssk, &local); if (!mptcp_addresses_equal(&local, addr, addr->port)) continue; @@ -1070,8 +1069,8 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) /* The 0 ID mapping is defined by the first subflow, copied into the msk * addr */ - local_address((struct sock_common *)msk, &msk_local); - local_address((struct sock_common *)skc, &skc_local); + mptcp_local_address((struct sock_common *)msk, &msk_local); + mptcp_local_address((struct sock_common *)skc, &skc_local); if (mptcp_addresses_equal(&msk_local, &skc_local, false)) return 0; @@ -1491,7 +1490,7 @@ static int mptcp_nl_remove_id_zero_address(struct net *net, if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) goto next; - local_address((struct sock_common *)msk, &msk_local); + mptcp_local_address((struct sock_common *)msk, &msk_local); if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) goto next; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 70c957bc56a8..3580c7fc39c3 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -638,6 +638,7 @@ void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk); bool mptcp_addresses_equal(const struct mptcp_addr_info *a, const struct mptcp_addr_info *b, bool use_port); +void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr); /* called with sk socket lock held */ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, -- cgit v1.2.3 From 9bbec87ecfe8a5c06710100a93e6b7e66f2cbbaf Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Jun 2023 15:20:50 +0200 Subject: mptcp: unify pm get_local_id interfaces This patch unifies the three PM get_local_id() interfaces: mptcp_pm_nl_get_local_id() in mptcp/pm_netlink.c for the in-kernel PM and mptcp_userspace_pm_get_local_id() in mptcp/pm_userspace.c for the userspace PM. They'll be switched in the common PM infterface mptcp_pm_get_local_id() in mptcp/pm.c based on whether mptcp_pm_is_userspace() or not. Also put together the declarations of these three functions in protocol.h. Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Reviewed-by: Larysa Zaremba Signed-off-by: Jakub Kicinski --- net/mptcp/pm.c | 18 +++++++++++++++++- net/mptcp/pm_netlink.c | 22 +++------------------- net/mptcp/protocol.h | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 92d540e527a2..300fa9bea047 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -415,7 +415,23 @@ out_unlock: int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) { - return mptcp_pm_nl_get_local_id(msk, skc); + struct mptcp_addr_info skc_local; + struct mptcp_addr_info msk_local; + + if (WARN_ON_ONCE(!msk)) + return -1; + + /* The 0 ID mapping is defined by the first subflow, copied into the msk + * addr + */ + mptcp_local_address((struct sock_common *)msk, &msk_local); + mptcp_local_address((struct sock_common *)skc, &skc_local); + if (mptcp_addresses_equal(&msk_local, &skc_local, false)) + return 0; + + if (mptcp_pm_is_userspace(msk)) + return mptcp_userspace_pm_get_local_id(msk, &skc_local); + return mptcp_pm_nl_get_local_id(msk, &skc_local); } void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 0bf09c45febd..e51d98877485 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1055,33 +1055,17 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk, return 0; } -int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) +int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc) { struct mptcp_pm_addr_entry *entry; - struct mptcp_addr_info skc_local; - struct mptcp_addr_info msk_local; struct pm_nl_pernet *pernet; int ret = -1; - if (WARN_ON_ONCE(!msk)) - return -1; - - /* The 0 ID mapping is defined by the first subflow, copied into the msk - * addr - */ - mptcp_local_address((struct sock_common *)msk, &msk_local); - mptcp_local_address((struct sock_common *)skc, &skc_local); - if (mptcp_addresses_equal(&msk_local, &skc_local, false)) - return 0; - - if (mptcp_pm_is_userspace(msk)) - return mptcp_userspace_pm_get_local_id(msk, &skc_local); - pernet = pm_nl_get_pernet_from_msk(msk); rcu_read_lock(); list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { - if (mptcp_addresses_equal(&entry->addr, &skc_local, entry->addr.port)) { + if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) { ret = entry->addr.id; break; } @@ -1095,7 +1079,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) if (!entry) return -ENOMEM; - entry->addr = skc_local; + entry->addr = *skc; entry->addr.id = 0; entry->addr.port = 0; entry->ifindex = 0; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 3580c7fc39c3..1ac799a6b959 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -917,13 +917,13 @@ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, struct mptcp_rm_list *rm_list); int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); +int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); void __init mptcp_pm_nl_init(void); void mptcp_pm_nl_work(struct mptcp_sock *msk); void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); -int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk); unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk); unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk); -- cgit v1.2.3 From f40be0db0b7680c2e9f0b1289788542813ba0f00 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Jun 2023 15:20:51 +0200 Subject: mptcp: unify pm get_flags_and_ifindex_by_id This patch unifies the three PM get_flags_and_ifindex_by_id() interfaces: mptcp_pm_nl_get_flags_and_ifindex_by_id() in mptcp/pm_netlink.c for the in-kernel PM and mptcp_userspace_pm_get_flags_and_ifindex_by_id() in mptcp/pm_userspace.c for the userspace PM. They'll be switched in the common PM infterface mptcp_pm_get_flags_and_ifindex_by_id() in mptcp/pm.c based on whether mptcp_pm_is_userspace() or not. Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Reviewed-by: Larysa Zaremba Signed-off-by: Jakub Kicinski --- net/mptcp/pm.c | 14 ++++++++++++++ net/mptcp/pm_netlink.c | 27 ++++++++------------------- net/mptcp/pm_userspace.c | 3 --- net/mptcp/protocol.h | 2 ++ 4 files changed, 24 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 300fa9bea047..b4a1277b4bb5 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -434,6 +434,20 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) return mptcp_pm_nl_get_local_id(msk, &skc_local); } +int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, + u8 *flags, int *ifindex) +{ + *flags = 0; + *ifindex = 0; + + if (!id) + return 0; + + if (mptcp_pm_is_userspace(msk)) + return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); + return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); +} + void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index e51d98877485..0fd938933373 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1356,31 +1356,20 @@ out_free: return ret; } -int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, - u8 *flags, int *ifindex) +int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, + u8 *flags, int *ifindex) { struct mptcp_pm_addr_entry *entry; struct sock *sk = (struct sock *)msk; struct net *net = sock_net(sk); - *flags = 0; - *ifindex = 0; - - if (id) { - if (mptcp_pm_is_userspace(msk)) - return mptcp_userspace_pm_get_flags_and_ifindex_by_id(msk, - id, - flags, - ifindex); - - rcu_read_lock(); - entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id); - if (entry) { - *flags = entry->flags; - *ifindex = entry->ifindex; - } - rcu_read_unlock(); + rcu_read_lock(); + entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id); + if (entry) { + *flags = entry->flags; + *ifindex = entry->ifindex; } + rcu_read_unlock(); return 0; } diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index b06aa58dfcf2..47a883a16c11 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -111,9 +111,6 @@ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, { struct mptcp_pm_addr_entry *entry, *match = NULL; - *flags = 0; - *ifindex = 0; - spin_lock_bh(&msk->pm.lock); list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { if (id == entry->addr.id) { diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 1ac799a6b959..0a0a36fd0310 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -822,6 +822,8 @@ mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, u8 *flags, int *ifindex); +int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, + u8 *flags, int *ifindex); int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, u8 *flags, int *ifindex); -- cgit v1.2.3 From 6ba7ce89905c5d5cdb4ff9ff7c763a6a1d31f48d Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Jun 2023 15:20:52 +0200 Subject: mptcp: unify pm set_flags interfaces This patch unifies the three PM set_flags() interfaces: mptcp_pm_nl_set_flags() in mptcp/pm_netlink.c for the in-kernel PM and mptcp_userspace_pm_set_flags() in mptcp/pm_userspace.c for the userspace PM. They'll be switched in the common PM infterface mptcp_pm_set_flags() in mptcp/pm.c based on whether token is NULL or not. Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Reviewed-by: Larysa Zaremba Signed-off-by: Jakub Kicinski --- net/mptcp/pm.c | 9 +++++++ net/mptcp/pm_netlink.c | 70 +++++++++++++++++++++++++++----------------------- net/mptcp/protocol.h | 4 +++ 3 files changed, 51 insertions(+), 32 deletions(-) (limited to 'net') diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index b4a1277b4bb5..7dbbad1e4f55 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -448,6 +448,15 @@ int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id return mptcp_pm_nl_get_flags_and_ifindex_by_id(msk, id, flags, ifindex); } +int mptcp_pm_set_flags(struct net *net, struct nlattr *token, + struct mptcp_pm_addr_entry *loc, + struct mptcp_pm_addr_entry *rem, u8 bkup) +{ + if (token) + return mptcp_userspace_pm_set_flags(net, token, loc, rem, bkup); + return mptcp_pm_nl_set_flags(net, loc, bkup); +} + void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 0fd938933373..a12a87b780f6 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -1882,18 +1882,50 @@ next: return ret; } +int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); + u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | + MPTCP_PM_ADDR_FLAG_FULLMESH; + struct mptcp_pm_addr_entry *entry; + u8 lookup_by_id = 0; + + if (addr->addr.family == AF_UNSPEC) { + lookup_by_id = 1; + if (!addr->addr.id) + return -EOPNOTSUPP; + } + + spin_lock_bh(&pernet->lock); + entry = __lookup_addr(pernet, &addr->addr, lookup_by_id); + if (!entry) { + spin_unlock_bh(&pernet->lock); + return -EINVAL; + } + if ((addr->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && + (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { + spin_unlock_bh(&pernet->lock); + return -EINVAL; + } + + changed = (addr->flags ^ entry->flags) & mask; + entry->flags = (entry->flags & ~mask) | (addr->flags & mask); + *addr = *entry; + spin_unlock_bh(&pernet->lock); + + mptcp_nl_set_flags(net, &addr->addr, bkup, changed); + return 0; +} + static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info) { - struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry; struct mptcp_pm_addr_entry remote = { .addr = { .family = AF_UNSPEC }, }; + struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }; struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | - MPTCP_PM_ADDR_FLAG_FULLMESH; struct net *net = sock_net(skb->sk); - u8 bkup = 0, lookup_by_id = 0; + u8 bkup = 0; int ret; ret = mptcp_pm_parse_entry(attr, info, false, &addr); @@ -1908,34 +1940,8 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info) if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) bkup = 1; - if (addr.addr.family == AF_UNSPEC) { - lookup_by_id = 1; - if (!addr.addr.id) - return -EOPNOTSUPP; - } - - if (token) - return mptcp_userspace_pm_set_flags(net, token, &addr, &remote, bkup); - - spin_lock_bh(&pernet->lock); - entry = __lookup_addr(pernet, &addr.addr, lookup_by_id); - if (!entry) { - spin_unlock_bh(&pernet->lock); - return -EINVAL; - } - if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && - (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { - spin_unlock_bh(&pernet->lock); - return -EINVAL; - } - changed = (addr.flags ^ entry->flags) & mask; - entry->flags = (entry->flags & ~mask) | (addr.flags & mask); - addr = *entry; - spin_unlock_bh(&pernet->lock); - - mptcp_nl_set_flags(net, &addr.addr, bkup, changed); - return 0; + return mptcp_pm_set_flags(net, token, &addr, &remote, bkup); } static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 0a0a36fd0310..47b46602870e 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -827,6 +827,10 @@ int mptcp_pm_nl_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, u8 *flags, int *ifindex); +int mptcp_pm_set_flags(struct net *net, struct nlattr *token, + struct mptcp_pm_addr_entry *loc, + struct mptcp_pm_addr_entry *rem, u8 bkup); +int mptcp_pm_nl_set_flags(struct net *net, struct mptcp_pm_addr_entry *addr, u8 bkup); int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token, struct mptcp_pm_addr_entry *loc, struct mptcp_pm_addr_entry *rem, u8 bkup); -- cgit v1.2.3 From d457a0e329b0bfd3a1450e0b1a18cd2b47a25a08 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 8 Jun 2023 19:17:37 +0000 Subject: net: move gso declarations and functions to their own files Move declarations into include/net/gso.h and code into net/core/gso.c Signed-off-by: Eric Dumazet Cc: Stanislav Fomichev Reviewed-by: Simon Horman Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20230608191738.3947077-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/broadcom/tg3.c | 1 + drivers/net/ethernet/myricom/myri10ge/myri10ge.c | 1 + drivers/net/ethernet/sfc/siena/tx_common.c | 1 + drivers/net/ethernet/sfc/tx_common.c | 1 + drivers/net/tap.c | 1 + drivers/net/usb/r8152.c | 1 + drivers/net/wireguard/device.c | 1 + drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 1 + include/linux/netdevice.h | 26 +-- include/linux/skbuff.h | 71 ------ include/net/gro.h | 1 + include/net/gso.h | 109 +++++++++ include/net/udp.h | 1 + net/core/Makefile | 2 +- net/core/dev.c | 70 +----- net/core/gro.c | 59 +---- net/core/gso.c | 273 +++++++++++++++++++++++ net/core/skbuff.c | 142 +----------- net/ipv4/af_inet.c | 1 + net/ipv4/esp4_offload.c | 1 + net/ipv4/gre_offload.c | 1 + net/ipv4/ip_output.c | 1 + net/ipv4/tcp_offload.c | 1 + net/ipv4/udp.c | 1 + net/ipv4/udp_offload.c | 1 + net/ipv6/esp6_offload.c | 1 + net/ipv6/ip6_offload.c | 1 + net/ipv6/ip6_output.c | 1 + net/ipv6/udp_offload.c | 1 + net/mac80211/tx.c | 1 + net/mpls/af_mpls.c | 1 + net/mpls/mpls_gso.c | 1 + net/netfilter/nf_flow_table_ip.c | 1 + net/netfilter/nfnetlink_queue.c | 1 + net/nsh/nsh.c | 1 + net/openvswitch/actions.c | 1 + net/openvswitch/datapath.c | 1 + net/sched/act_police.c | 1 + net/sched/sch_cake.c | 1 + net/sched/sch_netem.c | 1 + net/sched/sch_taprio.c | 1 + net/sched/sch_tbf.c | 1 + net/sctp/offload.c | 1 + net/xfrm/xfrm_device.c | 1 + net/xfrm/xfrm_interface_core.c | 1 + net/xfrm/xfrm_output.c | 1 + 46 files changed, 425 insertions(+), 365 deletions(-) create mode 100644 include/net/gso.h create mode 100644 net/core/gso.c (limited to 'net') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 58747292521d..5e68a6a4b2af 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -57,6 +57,7 @@ #include #include +#include #include #include diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index c5687d94ea88..7b7e1c5b00f4 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c index 93a32d61944f..a7a9ab304e13 100644 --- a/drivers/net/ethernet/sfc/siena/tx_common.c +++ b/drivers/net/ethernet/sfc/siena/tx_common.c @@ -12,6 +12,7 @@ #include "efx.h" #include "nic_common.h" #include "tx_common.h" +#include static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) { diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 67e789b96c43..4ce7d00e697d 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -12,6 +12,7 @@ #include "efx.h" #include "nic_common.h" #include "tx_common.h" +#include static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) { diff --git a/drivers/net/tap.c b/drivers/net/tap.c index d30d730ed5a7..9137fb8c1c42 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -18,6 +18,7 @@ #include #include +#include #include #include #include diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 0999a58ca9d2..0738baa5b82e 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -27,6 +27,7 @@ #include #include #include +#include /* Information for net-next */ #define NETNEXT_VERSION "12" diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index d58e9f818d3b..258dcc103921 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 5fa6f98b8e55..ef0f53b3b89f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c2f0c6002a84..2d6cb2bf2f05 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4827,13 +4827,6 @@ int skb_crc32c_csum_help(struct sk_buff *skb); int skb_csum_hwoffload_help(struct sk_buff *skb, const netdev_features_t features); -struct sk_buff *__skb_gso_segment(struct sk_buff *skb, - netdev_features_t features, bool tx_path); -struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, - netdev_features_t features, __be16 type); -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features); - struct netdev_bonding_info { ifslave slave; ifbond master; @@ -4856,11 +4849,6 @@ static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, } #endif -static inline -struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) -{ - return __skb_gso_segment(skb, features, true); -} __be16 skb_network_protocol(struct sk_buff *skb, int *depth); static inline bool can_checksum_protocol(netdev_features_t features, @@ -4987,6 +4975,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features); netdev_features_t netif_skb_features(struct sk_buff *skb); +void skb_warn_bad_offload(const struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) { @@ -5035,19 +5024,6 @@ void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); void netif_inherit_tso_max(struct net_device *to, const struct net_device *from); -static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, - int pulled_hlen, u16 mac_offset, - int mac_len) -{ - skb->protocol = protocol; - skb->encapsulation = 1; - skb_push(skb, pulled_hlen); - skb_reset_transport_header(skb); - skb->mac_header = mac_offset; - skb->network_header = skb->mac_header + mac_len; - skb->mac_len = mac_len; -} - static inline bool netif_is_macsec(const struct net_device *dev) { return dev->priv_flags & IFF_MACSEC; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e2f48ddb2f7c..91ed66952580 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3974,8 +3974,6 @@ int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); void skb_scrub_packet(struct sk_buff *skb, bool xnet); -bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); -bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features, unsigned int offset); @@ -4841,75 +4839,6 @@ static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) #endif } -/* Keeps track of mac header offset relative to skb->head. - * It is useful for TSO of Tunneling protocol. e.g. GRE. - * For non-tunnel skb it points to skb_mac_header() and for - * tunnel skb it points to outer mac header. - * Keeps track of level of encapsulation of network headers. - */ -struct skb_gso_cb { - union { - int mac_offset; - int data_offset; - }; - int encap_level; - __wsum csum; - __u16 csum_start; -}; -#define SKB_GSO_CB_OFFSET 32 -#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) - -static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) -{ - return (skb_mac_header(inner_skb) - inner_skb->head) - - SKB_GSO_CB(inner_skb)->mac_offset; -} - -static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) -{ - int new_headroom, headroom; - int ret; - - headroom = skb_headroom(skb); - ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); - if (ret) - return ret; - - new_headroom = skb_headroom(skb); - SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); - return 0; -} - -static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) -{ - /* Do not update partial checksums if remote checksum is enabled. */ - if (skb->remcsum_offload) - return; - - SKB_GSO_CB(skb)->csum = res; - SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; -} - -/* Compute the checksum for a gso segment. First compute the checksum value - * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and - * then add in skb->csum (checksum from csum_start to end of packet). - * skb->csum and csum_start are then updated to reflect the checksum of the - * resultant packet starting from the transport header-- the resultant checksum - * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo - * header. - */ -static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) -{ - unsigned char *csum_start = skb_transport_header(skb); - int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; - __wsum partial = SKB_GSO_CB(skb)->csum; - - SKB_GSO_CB(skb)->csum = res; - SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; - - return csum_fold(csum_partial(csum_start, plen, partial)); -} - static inline bool skb_is_gso(const struct sk_buff *skb) { return skb_shinfo(skb)->gso_size; diff --git a/include/net/gro.h b/include/net/gro.h index 7b47dd6ce94f..75efa6fb8441 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -452,5 +452,6 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, gro_normal_list(napi); } +extern struct list_head offload_base; #endif /* _NET_IPV6_GRO_H */ diff --git a/include/net/gso.h b/include/net/gso.h new file mode 100644 index 000000000000..29975440cad5 --- /dev/null +++ b/include/net/gso.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _NET_GSO_H +#define _NET_GSO_H + +#include + +/* Keeps track of mac header offset relative to skb->head. + * It is useful for TSO of Tunneling protocol. e.g. GRE. + * For non-tunnel skb it points to skb_mac_header() and for + * tunnel skb it points to outer mac header. + * Keeps track of level of encapsulation of network headers. + */ +struct skb_gso_cb { + union { + int mac_offset; + int data_offset; + }; + int encap_level; + __wsum csum; + __u16 csum_start; +}; +#define SKB_GSO_CB_OFFSET 32 +#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) + +static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) +{ + return (skb_mac_header(inner_skb) - inner_skb->head) - + SKB_GSO_CB(inner_skb)->mac_offset; +} + +static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) +{ + int new_headroom, headroom; + int ret; + + headroom = skb_headroom(skb); + ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); + if (ret) + return ret; + + new_headroom = skb_headroom(skb); + SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); + return 0; +} + +static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) +{ + /* Do not update partial checksums if remote checksum is enabled. */ + if (skb->remcsum_offload) + return; + + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; +} + +/* Compute the checksum for a gso segment. First compute the checksum value + * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and + * then add in skb->csum (checksum from csum_start to end of packet). + * skb->csum and csum_start are then updated to reflect the checksum of the + * resultant packet starting from the transport header-- the resultant checksum + * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo + * header. + */ +static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) +{ + unsigned char *csum_start = skb_transport_header(skb); + int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; + __wsum partial = SKB_GSO_CB(skb)->csum; + + SKB_GSO_CB(skb)->csum = res; + SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; + + return csum_fold(csum_partial(csum_start, plen, partial)); +} + +struct sk_buff *__skb_gso_segment(struct sk_buff *skb, + netdev_features_t features, bool tx_path); + +static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + return __skb_gso_segment(skb, features, true); +} + +struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, + netdev_features_t features, __be16 type); + +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features); + +bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); + +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); + +static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, + int pulled_hlen, u16 mac_offset, + int mac_len) +{ + skb->protocol = protocol; + skb->encapsulation = 1; + skb_push(skb, pulled_hlen); + skb_reset_transport_header(skb); + skb->mac_header = mac_offset; + skb->network_header = skb->mac_header + mac_len; + skb->mac_len = mac_len; +} + +#endif /* _NET_GSO_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 4ed0b47c5582..e01340a27155 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/net/core/Makefile b/net/core/Makefile index 8f367813bc68..731db2eaa610 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -13,7 +13,7 @@ obj-y += dev.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ fib_notifier.o xdp.o flow_offload.o gro.o \ - netdev-genl.o netdev-genl-gen.o + netdev-genl.o netdev-genl-gen.o gso.o obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o diff --git a/net/core/dev.c b/net/core/dev.c index 6d6f8a7fe6b4..c2456b3667fe 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3209,7 +3209,7 @@ static u16 skb_tx_hash(const struct net_device *dev, return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; } -static void skb_warn_bad_offload(const struct sk_buff *skb) +void skb_warn_bad_offload(const struct sk_buff *skb) { static const netdev_features_t null_features; struct net_device *dev = skb->dev; @@ -3338,74 +3338,6 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) return vlan_get_protocol_and_depth(skb, type, depth); } -/* openvswitch calls this on rx path, so we need a different check. - */ -static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) -{ - if (tx_path) - return skb->ip_summed != CHECKSUM_PARTIAL && - skb->ip_summed != CHECKSUM_UNNECESSARY; - - return skb->ip_summed == CHECKSUM_NONE; -} - -/** - * __skb_gso_segment - Perform segmentation on skb. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - * @tx_path: whether it is called in TX path - * - * This function segments the given skb and returns a list of segments. - * - * It may return NULL if the skb requires no segmentation. This is - * only possible when GSO is used for verifying header integrity. - * - * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. - */ -struct sk_buff *__skb_gso_segment(struct sk_buff *skb, - netdev_features_t features, bool tx_path) -{ - struct sk_buff *segs; - - if (unlikely(skb_needs_check(skb, tx_path))) { - int err; - - /* We're going to init ->check field in TCP or UDP header */ - err = skb_cow_head(skb, 0); - if (err < 0) - return ERR_PTR(err); - } - - /* Only report GSO partial support if it will enable us to - * support segmentation on this frame without needing additional - * work. - */ - if (features & NETIF_F_GSO_PARTIAL) { - netdev_features_t partial_features = NETIF_F_GSO_ROBUST; - struct net_device *dev = skb->dev; - - partial_features |= dev->features & dev->gso_partial_features; - if (!skb_gso_ok(skb, features | partial_features)) - features &= ~NETIF_F_GSO_PARTIAL; - } - - BUILD_BUG_ON(SKB_GSO_CB_OFFSET + - sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); - - SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); - SKB_GSO_CB(skb)->encap_level = 0; - - skb_reset_mac_header(skb); - skb_reset_mac_len(skb); - - segs = skb_mac_gso_segment(skb, features); - - if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) - skb_warn_bad_offload(skb); - - return segs; -} -EXPORT_SYMBOL(__skb_gso_segment); /* Take action when hardware reception checksum errors are detected. */ #ifdef CONFIG_BUG diff --git a/net/core/gro.c b/net/core/gro.c index 4d45f78e2fac..dca800068e41 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -10,7 +10,7 @@ #define GRO_MAX_HEAD (MAX_HEADER + 128) static DEFINE_SPINLOCK(offload_lock); -static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); +struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ int gro_normal_batch __read_mostly = 8; @@ -92,63 +92,6 @@ void dev_remove_offload(struct packet_offload *po) } EXPORT_SYMBOL(dev_remove_offload); -/** - * skb_eth_gso_segment - segmentation handler for ethernet protocols. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - * @type: Ethernet Protocol ID - */ -struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, - netdev_features_t features, __be16 type) -{ - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, &offload_base, list) { - if (ptype->type == type && ptype->callbacks.gso_segment) { - segs = ptype->callbacks.gso_segment(skb, features); - break; - } - } - rcu_read_unlock(); - - return segs; -} -EXPORT_SYMBOL(skb_eth_gso_segment); - -/** - * skb_mac_gso_segment - mac layer segmentation handler. - * @skb: buffer to segment - * @features: features for the output path (see dev->features) - */ -struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, - netdev_features_t features) -{ - struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); - struct packet_offload *ptype; - int vlan_depth = skb->mac_len; - __be16 type = skb_network_protocol(skb, &vlan_depth); - - if (unlikely(!type)) - return ERR_PTR(-EINVAL); - - __skb_pull(skb, vlan_depth); - - rcu_read_lock(); - list_for_each_entry_rcu(ptype, &offload_base, list) { - if (ptype->type == type && ptype->callbacks.gso_segment) { - segs = ptype->callbacks.gso_segment(skb, features); - break; - } - } - rcu_read_unlock(); - - __skb_push(skb, skb->data - skb_mac_header(skb)); - - return segs; -} -EXPORT_SYMBOL(skb_mac_gso_segment); int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) { diff --git a/net/core/gso.c b/net/core/gso.c new file mode 100644 index 000000000000..9e1803bfc9c6 --- /dev/null +++ b/net/core/gso.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include +#include +#include + +/** + * skb_eth_gso_segment - segmentation handler for ethernet protocols. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + * @type: Ethernet Protocol ID + */ +struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, + netdev_features_t features, __be16 type) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, &offload_base, list) { + if (ptype->type == type && ptype->callbacks.gso_segment) { + segs = ptype->callbacks.gso_segment(skb, features); + break; + } + } + rcu_read_unlock(); + + return segs; +} +EXPORT_SYMBOL(skb_eth_gso_segment); + +/** + * skb_mac_gso_segment - mac layer segmentation handler. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + */ +struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, + netdev_features_t features) +{ + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); + struct packet_offload *ptype; + int vlan_depth = skb->mac_len; + __be16 type = skb_network_protocol(skb, &vlan_depth); + + if (unlikely(!type)) + return ERR_PTR(-EINVAL); + + __skb_pull(skb, vlan_depth); + + rcu_read_lock(); + list_for_each_entry_rcu(ptype, &offload_base, list) { + if (ptype->type == type && ptype->callbacks.gso_segment) { + segs = ptype->callbacks.gso_segment(skb, features); + break; + } + } + rcu_read_unlock(); + + __skb_push(skb, skb->data - skb_mac_header(skb)); + + return segs; +} +EXPORT_SYMBOL(skb_mac_gso_segment); +/* openvswitch calls this on rx path, so we need a different check. + */ +static bool skb_needs_check(const struct sk_buff *skb, bool tx_path) +{ + if (tx_path) + return skb->ip_summed != CHECKSUM_PARTIAL && + skb->ip_summed != CHECKSUM_UNNECESSARY; + + return skb->ip_summed == CHECKSUM_NONE; +} + +/** + * __skb_gso_segment - Perform segmentation on skb. + * @skb: buffer to segment + * @features: features for the output path (see dev->features) + * @tx_path: whether it is called in TX path + * + * This function segments the given skb and returns a list of segments. + * + * It may return NULL if the skb requires no segmentation. This is + * only possible when GSO is used for verifying header integrity. + * + * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb. + */ +struct sk_buff *__skb_gso_segment(struct sk_buff *skb, + netdev_features_t features, bool tx_path) +{ + struct sk_buff *segs; + + if (unlikely(skb_needs_check(skb, tx_path))) { + int err; + + /* We're going to init ->check field in TCP or UDP header */ + err = skb_cow_head(skb, 0); + if (err < 0) + return ERR_PTR(err); + } + + /* Only report GSO partial support if it will enable us to + * support segmentation on this frame without needing additional + * work. + */ + if (features & NETIF_F_GSO_PARTIAL) { + netdev_features_t partial_features = NETIF_F_GSO_ROBUST; + struct net_device *dev = skb->dev; + + partial_features |= dev->features & dev->gso_partial_features; + if (!skb_gso_ok(skb, features | partial_features)) + features &= ~NETIF_F_GSO_PARTIAL; + } + + BUILD_BUG_ON(SKB_GSO_CB_OFFSET + + sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); + + SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); + SKB_GSO_CB(skb)->encap_level = 0; + + skb_reset_mac_header(skb); + skb_reset_mac_len(skb); + + segs = skb_mac_gso_segment(skb, features); + + if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) + skb_warn_bad_offload(skb); + + return segs; +} +EXPORT_SYMBOL(__skb_gso_segment); + +/** + * skb_gso_transport_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_transport_seglen is used to determine the real size of the + * individual segments, including Layer4 headers (TCP/UDP). + * + * The MAC/L2 or network (IP, IPv6) headers are not accounted for. + */ +static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int thlen = 0; + + if (skb->encapsulation) { + thlen = skb_inner_transport_header(skb) - + skb_transport_header(skb); + + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + thlen += inner_tcp_hdrlen(skb); + } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { + thlen = tcp_hdrlen(skb); + } else if (unlikely(skb_is_gso_sctp(skb))) { + thlen = sizeof(struct sctphdr); + } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { + thlen = sizeof(struct udphdr); + } + /* UFO sets gso_size to the size of the fragmentation + * payload, i.e. the size of the L4 (UDP) header is already + * accounted for. + */ + return thlen + shinfo->gso_size; +} + +/** + * skb_gso_network_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_network_seglen is used to determine the real size of the + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). + * + * The MAC/L2 header is not accounted for. + */ +static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - + skb_network_header(skb); + + return hdr_len + skb_gso_transport_seglen(skb); +} + +/** + * skb_gso_mac_seglen - Return length of individual segments of a gso packet + * + * @skb: GSO skb + * + * skb_gso_mac_seglen is used to determine the real size of the + * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 + * headers (TCP/UDP). + */ +static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) +{ + unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); + + return hdr_len + skb_gso_transport_seglen(skb); +} + +/** + * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS + * + * There are a couple of instances where we have a GSO skb, and we + * want to determine what size it would be after it is segmented. + * + * We might want to check: + * - L3+L4+payload size (e.g. IP forwarding) + * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) + * + * This is a helper to do that correctly considering GSO_BY_FRAGS. + * + * @skb: GSO skb + * + * @seg_len: The segmented length (from skb_gso_*_seglen). In the + * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. + * + * @max_len: The maximum permissible length. + * + * Returns true if the segmented length <= max length. + */ +static inline bool skb_gso_size_check(const struct sk_buff *skb, + unsigned int seg_len, + unsigned int max_len) { + const struct skb_shared_info *shinfo = skb_shinfo(skb); + const struct sk_buff *iter; + + if (shinfo->gso_size != GSO_BY_FRAGS) + return seg_len <= max_len; + + /* Undo this so we can re-use header sizes */ + seg_len -= GSO_BY_FRAGS; + + skb_walk_frags(skb, iter) { + if (seg_len + skb_headlen(iter) > max_len) + return false; + } + + return true; +} + +/** + * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? + * + * @skb: GSO skb + * @mtu: MTU to validate against + * + * skb_gso_validate_network_len validates if a given skb will fit a + * wanted MTU once split. It considers L3 headers, L4 headers, and the + * payload. + */ +bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) +{ + return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); +} +EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); + +/** + * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? + * + * @skb: GSO skb + * @len: length to validate against + * + * skb_gso_validate_mac_len validates if a given skb will fit a wanted + * length once split, including L2, L3 and L4 headers and the payload. + */ +bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) +{ + return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); +} +EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); + diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7c4338221b17..fee2b1c105fe 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -67,6 +67,7 @@ #include #include #include +#include #include #include #include @@ -5766,147 +5767,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) } EXPORT_SYMBOL_GPL(skb_scrub_packet); -/** - * skb_gso_transport_seglen - Return length of individual segments of a gso packet - * - * @skb: GSO skb - * - * skb_gso_transport_seglen is used to determine the real size of the - * individual segments, including Layer4 headers (TCP/UDP). - * - * The MAC/L2 or network (IP, IPv6) headers are not accounted for. - */ -static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) -{ - const struct skb_shared_info *shinfo = skb_shinfo(skb); - unsigned int thlen = 0; - - if (skb->encapsulation) { - thlen = skb_inner_transport_header(skb) - - skb_transport_header(skb); - - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) - thlen += inner_tcp_hdrlen(skb); - } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { - thlen = tcp_hdrlen(skb); - } else if (unlikely(skb_is_gso_sctp(skb))) { - thlen = sizeof(struct sctphdr); - } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { - thlen = sizeof(struct udphdr); - } - /* UFO sets gso_size to the size of the fragmentation - * payload, i.e. the size of the L4 (UDP) header is already - * accounted for. - */ - return thlen + shinfo->gso_size; -} - -/** - * skb_gso_network_seglen - Return length of individual segments of a gso packet - * - * @skb: GSO skb - * - * skb_gso_network_seglen is used to determine the real size of the - * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP). - * - * The MAC/L2 header is not accounted for. - */ -static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) -{ - unsigned int hdr_len = skb_transport_header(skb) - - skb_network_header(skb); - - return hdr_len + skb_gso_transport_seglen(skb); -} - -/** - * skb_gso_mac_seglen - Return length of individual segments of a gso packet - * - * @skb: GSO skb - * - * skb_gso_mac_seglen is used to determine the real size of the - * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 - * headers (TCP/UDP). - */ -static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) -{ - unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); - - return hdr_len + skb_gso_transport_seglen(skb); -} - -/** - * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS - * - * There are a couple of instances where we have a GSO skb, and we - * want to determine what size it would be after it is segmented. - * - * We might want to check: - * - L3+L4+payload size (e.g. IP forwarding) - * - L2+L3+L4+payload size (e.g. sanity check before passing to driver) - * - * This is a helper to do that correctly considering GSO_BY_FRAGS. - * - * @skb: GSO skb - * - * @seg_len: The segmented length (from skb_gso_*_seglen). In the - * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS]. - * - * @max_len: The maximum permissible length. - * - * Returns true if the segmented length <= max length. - */ -static inline bool skb_gso_size_check(const struct sk_buff *skb, - unsigned int seg_len, - unsigned int max_len) { - const struct skb_shared_info *shinfo = skb_shinfo(skb); - const struct sk_buff *iter; - - if (shinfo->gso_size != GSO_BY_FRAGS) - return seg_len <= max_len; - - /* Undo this so we can re-use header sizes */ - seg_len -= GSO_BY_FRAGS; - - skb_walk_frags(skb, iter) { - if (seg_len + skb_headlen(iter) > max_len) - return false; - } - - return true; -} - -/** - * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU? - * - * @skb: GSO skb - * @mtu: MTU to validate against - * - * skb_gso_validate_network_len validates if a given skb will fit a - * wanted MTU once split. It considers L3 headers, L4 headers, and the - * payload. - */ -bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) -{ - return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); -} -EXPORT_SYMBOL_GPL(skb_gso_validate_network_len); - -/** - * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length? - * - * @skb: GSO skb - * @len: length to validate against - * - * skb_gso_validate_mac_len validates if a given skb will fit a wanted - * length once split, including L2, L3 and L4 headers and the payload. - */ -bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) -{ - return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); -} -EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len); - static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) { int mac_len, meta_len; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index fd233c4195ac..0e16ac8282c5 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -100,6 +100,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 3969fa805679..12c5fb3c6e1e 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 2b9cb5398335..311e70bfce40 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -11,6 +11,7 @@ #include #include #include +#include static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 244fb9365d87..457598dfa128 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 05b38f58b404..8311c38267b5 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index df5e407286d7..7e0542c10471 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -103,6 +103,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 1f01e15ca24f..75aa4de5b731 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 75c02992c520..b33c7de5bdbc 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 00dc2e3b0184..d6314287338d 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "ip6_offload.h" diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c722cb881b2d..c06ff7519f19 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index c39c1e32f980..ad3b8726873e 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -14,6 +14,7 @@ #include #include "ip6_offload.h" #include +#include static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 148a0e2aa740..cfbe4beb8f1c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "ieee80211_i.h" #include "driver-ops.h" diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index dc5165d3eec4..bf6e81d56263 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index 1482259de9b5..533d082f0701 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -14,6 +14,7 @@ #include #include #include +#include #include static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index d248763917ad..d885d34edfe1 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index e311462f6d98..556bc902af00 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index 0f23e5e8e03e..f4a38bd6a7e0 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index a8cf9a88758e..8074ea00d577 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 58f530f60172..a6d2a0b1aa21 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 2e9dce03d1ec..f3121c5a85e9 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 891e007d5c0b..9cff99558694 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -65,6 +65,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 6ef3021e1169..0c9e93d66c50 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -21,6 +21,7 @@ #include #include +#include #include #include #include diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 5076da103f63..4a4e6ff894c1 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 277ad11f4d61..17d2d00ddb18 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/net/sctp/offload.c b/net/sctp/offload.c index eb874e3c399a..502095173d88 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -22,6 +22,7 @@ #include #include #include +#include static __le32 sctp_gso_make_checksum(struct sk_buff *skb) { diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 408f5e55744e..533697e2488f 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index 1f99dc469027..0ee864a76579 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -33,6 +33,7 @@ #include #include +#include #include #include #include diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 369e5de8558f..662c83beb345 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 703d7521555504b3a316b105b4806d641b7ebc76 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 18 May 2023 13:46:03 -0400 Subject: NFSD: Hoist rq_vec preparation into nfsd_read() [step two] Now that the preparation of an rq_vec has been removed from the generic read path, nfsd_splice_read() no longer needs to reset rq_next_page. nfsd4_encode_read() calls nfsd_splice_read() directly. As far as I can ascertain, resetting rq_next_page for NFSv4 splice reads is unnecessary because rq_next_page is already set correctly. Moreover, resetting it might even be incorrect if previous operations in the COMPOUND have already consumed at least a page of the send buffer. I would expect that the result would be encoding the READ payload over previously-encoded results. Signed-off-by: Chuck Lever --- fs/nfsd/nfs4xdr.c | 10 +++++----- fs/nfsd/vfs.c | 13 ++++++++++++- include/linux/sunrpc/xdr.h | 3 +-- net/sunrpc/xdr.c | 26 ++++++++++++-------------- 4 files changed, 30 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 3887da048232..c67bc904c7b7 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -4103,13 +4103,13 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp, __be32 zero = xdr_zero; __be32 nfserr; - read->rd_vlen = xdr_reserve_space_vec(xdr, resp->rqstp->rq_vec, maxcount); - if (read->rd_vlen < 0) + if (xdr_reserve_space_vec(xdr, maxcount) < 0) return nfserr_resource; - nfserr = nfsd_readv(resp->rqstp, read->rd_fhp, file, read->rd_offset, - resp->rqstp->rq_vec, read->rd_vlen, &maxcount, - &read->rd_eof); + nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, file, + read->rd_offset, &maxcount, + xdr->buf->page_len & ~PAGE_MASK, + &read->rd_eof); read->rd_length = maxcount; if (nfserr) return nfserr; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d9055b5c496b..37febe1ff039 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1003,6 +1003,18 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp, } } +/** + * nfsd_splice_read - Perform a VFS read using a splice pipe + * @rqstp: RPC transaction context + * @fhp: file handle of file to be read + * @file: opened struct file of file to be read + * @offset: starting byte offset + * @count: IN: requested number of bytes; OUT: number of bytes read + * @eof: OUT: set non-zero if operation reached the end of the file + * + * Returns nfs_ok on success, otherwise an nfserr stat value is + * returned. + */ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, unsigned long *count, u32 *eof) @@ -1016,7 +1028,6 @@ __be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp, ssize_t host_err; trace_nfsd_read_splice(rqstp, fhp, offset, *count); - rqstp->rq_next_page = rqstp->rq_respages + 1; host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor); return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err); } diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 72014c9216fc..f89ec4b5ea16 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -242,8 +242,7 @@ extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, extern void xdr_init_encode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, struct page **pages, struct rpc_rqst *rqst); extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); -extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, - size_t nbytes); +extern int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes); extern void __xdr_commit_encode(struct xdr_stream *xdr); extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); extern void xdr_truncate_decode(struct xdr_stream *xdr, size_t len); diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 36835b2f5446..2a22e78af116 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -1070,22 +1070,22 @@ __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes) } EXPORT_SYMBOL_GPL(xdr_reserve_space); - /** * xdr_reserve_space_vec - Reserves a large amount of buffer space for sending * @xdr: pointer to xdr_stream - * @vec: pointer to a kvec array * @nbytes: number of bytes to reserve * - * Reserves enough buffer space to encode 'nbytes' of data and stores the - * pointers in 'vec'. The size argument passed to xdr_reserve_space() is - * determined based on the number of bytes remaining in the current page to - * avoid invalidating iov_base pointers when xdr_commit_encode() is called. + * The size argument passed to xdr_reserve_space() is determined based + * on the number of bytes remaining in the current page to avoid + * invalidating iov_base pointers when xdr_commit_encode() is called. + * + * Return values: + * %0: success + * %-EMSGSIZE: not enough space is available in @xdr */ -int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbytes) +int xdr_reserve_space_vec(struct xdr_stream *xdr, size_t nbytes) { - int thislen; - int v = 0; + size_t thislen; __be32 *p; /* @@ -1097,21 +1097,19 @@ int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec, size_t nbyte xdr->end = xdr->p; } + /* XXX: Let's find a way to make this more efficient */ while (nbytes) { thislen = xdr->buf->page_len % PAGE_SIZE; thislen = min_t(size_t, nbytes, PAGE_SIZE - thislen); p = xdr_reserve_space(xdr, thislen); if (!p) - return -EIO; + return -EMSGSIZE; - vec[v].iov_base = p; - vec[v].iov_len = thislen; - v++; nbytes -= thislen; } - return v; + return 0; } EXPORT_SYMBOL_GPL(xdr_reserve_space_vec); -- cgit v1.2.3 From 26e35370b9766914364409626035fda2c63fef05 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 9 Jun 2023 15:01:17 +0800 Subject: net/sched: act_pedit: Use kmemdup() to replace kmalloc + memcpy ./net/sched/act_pedit.c:245:21-28: WARNING opportunity for kmemdup. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=5478 Signed-off-by: Jiapeng Chong Reviewed-by: Pedro Tammela Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index fc945c7e4123..8c4e7fddddbf 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -242,14 +242,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, nparms->tcfp_flags = parm->flags; nparms->tcfp_nkeys = parm->nkeys; - nparms->tcfp_keys = kmalloc(ksize, GFP_KERNEL); + nparms->tcfp_keys = kmemdup(parm->keys, ksize, GFP_KERNEL); if (!nparms->tcfp_keys) { ret = -ENOMEM; goto put_chain; } - memcpy(nparms->tcfp_keys, parm->keys, ksize); - for (i = 0; i < nparms->tcfp_nkeys; ++i) { u32 offmask = nparms->tcfp_keys[i].offmask; u32 cur = nparms->tcfp_keys[i].off; -- cgit v1.2.3 From 2b84960fc5dd38a19241388fb33f20936cb217e2 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 9 Jun 2023 16:59:16 +0300 Subject: net/sched: taprio: report class offload stats per TXQ, not per TC The taprio Qdisc creates child classes per netdev TX queue, but taprio_dump_class_stats() currently reports offload statistics per traffic class. Traffic classes are groups of TXQs sharing the same dequeue priority, so this is incorrect and we shouldn't be bundling up the TXQ stats when reporting them, as we currently do in enetc. Modify the API from taprio to drivers such that they report TXQ offload stats and not TC offload stats. There is no change in the UAPI or in the global Qdisc stats. Fixes: 6c1adb650c8d ("net/sched: taprio: add netlink reporting for offload statistics counters") Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 20 +++++++------------- include/net/pkt_sched.h | 10 +++++----- net/sched/sch_taprio.c | 8 ++++---- 3 files changed, 16 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 71157eba1fbe..58cdd67bb573 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -160,20 +160,14 @@ static void enetc_taprio_stats(struct net_device *ndev, stats->window_drops = window_drops; } -static void enetc_taprio_tc_stats(struct net_device *ndev, - struct tc_taprio_qopt_tc_stats *tc_stats) +static void enetc_taprio_queue_stats(struct net_device *ndev, + struct tc_taprio_qopt_queue_stats *queue_stats) { - struct tc_taprio_qopt_stats *stats = &tc_stats->stats; + struct tc_taprio_qopt_stats *stats = &queue_stats->stats; struct enetc_ndev_priv *priv = netdev_priv(ndev); - int tc = tc_stats->tc; - u64 window_drops = 0; - int i; + int queue = queue_stats->queue; - for (i = 0; i < priv->num_tx_rings; i++) - if (priv->tx_ring[i]->prio == tc) - window_drops += priv->tx_ring[i]->stats.win_drop; - - stats->window_drops = window_drops; + stats->window_drops = priv->tx_ring[queue]->stats.win_drop; } static int enetc_taprio_replace(struct net_device *ndev, @@ -208,8 +202,8 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data) case TAPRIO_CMD_STATS: enetc_taprio_stats(ndev, &offload->stats); break; - case TAPRIO_CMD_TC_STATS: - enetc_taprio_tc_stats(ndev, &offload->tc_stats); + case TAPRIO_CMD_QUEUE_STATS: + enetc_taprio_queue_stats(ndev, &offload->queue_stats); break; default: err = -EOPNOTSUPP; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 518febb91c9f..e98aac9d5ad5 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -191,7 +191,7 @@ enum tc_taprio_qopt_cmd { TAPRIO_CMD_REPLACE, TAPRIO_CMD_DESTROY, TAPRIO_CMD_STATS, - TAPRIO_CMD_TC_STATS, + TAPRIO_CMD_QUEUE_STATS, }; /** @@ -208,8 +208,8 @@ struct tc_taprio_qopt_stats { u64 tx_overruns; }; -struct tc_taprio_qopt_tc_stats { - int tc; +struct tc_taprio_qopt_queue_stats { + int queue; struct tc_taprio_qopt_stats stats; }; @@ -227,8 +227,8 @@ struct tc_taprio_qopt_offload { union { /* TAPRIO_CMD_STATS */ struct tc_taprio_qopt_stats stats; - /* TAPRIO_CMD_TC_STATS */ - struct tc_taprio_qopt_tc_stats tc_stats; + /* TAPRIO_CMD_QUEUE_STATS */ + struct tc_taprio_qopt_queue_stats queue_stats; /* TAPRIO_CMD_REPLACE */ struct { struct tc_mqprio_qopt_offload mqprio; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 4a4e6ff894c1..c6627f5abdfa 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -2458,9 +2458,9 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, { struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); struct tc_taprio_qopt_offload offload = { - .cmd = TAPRIO_CMD_TC_STATS, - .tc_stats = { - .tc = cl - 1, + .cmd = TAPRIO_CMD_QUEUE_STATS, + .queue_stats = { + .queue = cl - 1, }, }; struct Qdisc *child; @@ -2470,7 +2470,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, qdisc_qstats_copy(d, child) < 0) return -1; - return taprio_dump_xstats(sch, d, &offload, &offload.tc_stats.stats); + return taprio_dump_xstats(sch, d, &offload, &offload.queue_stats.stats); } static void taprio_walk(struct Qdisc *sch, struct qdisc_walker *arg) -- cgit v1.2.3 From e069ba07e6c7af69e119316bc87ff44869095f49 Mon Sep 17 00:00:00 2001 From: Aaron Conole Date: Fri, 9 Jun 2023 09:59:55 -0400 Subject: net: openvswitch: add support for l4 symmetric hashing Since its introduction, the ovs module execute_hash action allowed hash algorithms other than the skb->l4_hash to be used. However, additional hash algorithms were not implemented. This means flows requiring different hash distributions weren't able to use the kernel datapath. Now, introduce support for symmetric hashing algorithm as an alternative hash supported by the ovs module using the flow dissector. Output of flow using l4_sym hash: recirc_id(0),in_port(3),eth(),eth_type(0x0800), ipv4(dst=64.0.0.0/192.0.0.0,proto=6,frag=no), packets:30473425, bytes:45902883702, used:0.000s, flags:SP., actions:hash(sym_l4(0)),recirc(0xd) Some performance testing with no GRO/GSO, two veths, single flow: hash(l4(0)): 4.35 GBits/s hash(l4_sym(0)): 4.24 GBits/s Signed-off-by: Aaron Conole Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 1 + net/openvswitch/actions.c | 12 ++++++++++-- net/openvswitch/flow_netlink.c | 2 ++ 3 files changed, 13 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index c5d62ee82567..e94870e77ee9 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -765,6 +765,7 @@ struct ovs_action_push_vlan { */ enum ovs_hash_alg { OVS_HASH_ALG_L4, + OVS_HASH_ALG_SYM_L4, }; /* diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 8074ea00d577..cab1e02b63e0 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1073,8 +1073,16 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, struct ovs_action_hash *hash_act = nla_data(attr); u32 hash = 0; - /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */ - hash = skb_get_hash(skb); + if (hash_act->hash_alg == OVS_HASH_ALG_L4) { + /* OVS_HASH_ALG_L4 hasing type. */ + hash = skb_get_hash(skb); + } else if (hash_act->hash_alg == OVS_HASH_ALG_SYM_L4) { + /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't + * extend past an encapsulated header. + */ + hash = __skb_get_hash_symmetric(skb); + } + hash = jhash_1word(hash, hash_act->hash_basis); if (!hash) hash = 0x1; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index ead5418c126e..41116361433d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -3221,6 +3221,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, switch (act_hash->hash_alg) { case OVS_HASH_ALG_L4: + fallthrough; + case OVS_HASH_ALG_SYM_L4: break; default: return -EINVAL; -- cgit v1.2.3 From 5e2ff6704a275be009be8979af17c52361b79b89 Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Thu, 8 Jun 2023 22:26:25 +0200 Subject: scm: add SO_PASSPIDFD and SCM_PIDFD Implement SCM_PIDFD, a new type of CMSG type analogical to SCM_CREDENTIALS, but it contains pidfd instead of plain pid, which allows programmers not to care about PID reuse problem. We mask SO_PASSPIDFD feature if CONFIG_UNIX is not builtin because it depends on a pidfd_prepare() API which is not exported to the kernel modules. Idea comes from UAPI kernel group: https://uapi-group.org/kernel-features/ Big thanks to Christian Brauner and Lennart Poettering for productive discussions about this. Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Leon Romanovsky Cc: David Ahern Cc: Arnd Bergmann Cc: Kees Cook Cc: Christian Brauner Cc: Kuniyuki Iwashima Cc: Lennart Poettering Cc: Luca Boccassi Cc: linux-kernel@vger.kernel.org Cc: netdev@vger.kernel.org Cc: linux-arch@vger.kernel.org Tested-by: Luca Boccassi Reviewed-by: Kuniyuki Iwashima Reviewed-by: Christian Brauner Signed-off-by: Alexander Mikhalitsyn Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/alpha/include/uapi/asm/socket.h | 2 ++ arch/mips/include/uapi/asm/socket.h | 2 ++ arch/parisc/include/uapi/asm/socket.h | 2 ++ arch/sparc/include/uapi/asm/socket.h | 2 ++ include/linux/net.h | 1 + include/linux/socket.h | 1 + include/net/scm.h | 39 +++++++++++++++++++++++++++++++-- include/uapi/asm-generic/socket.h | 2 ++ net/core/sock.c | 11 ++++++++++ net/mptcp/sockopt.c | 1 + net/unix/af_unix.c | 18 ++++++++++----- tools/include/uapi/asm-generic/socket.h | 2 ++ 12 files changed, 76 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 739891b94136..ff310613ae64 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -137,6 +137,8 @@ #define SO_RCVMARK 75 +#define SO_PASSPIDFD 76 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 18f3d95ecfec..762dcb80e4ec 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -148,6 +148,8 @@ #define SO_RCVMARK 75 +#define SO_PASSPIDFD 76 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index f486d3dfb6bb..df16a3e16d64 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -129,6 +129,8 @@ #define SO_RCVMARK 0x4049 +#define SO_PASSPIDFD 0x404A + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 2fda57a3ea86..6e2847804fea 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -130,6 +130,8 @@ #define SO_RCVMARK 0x0054 +#define SO_PASSPIDFD 0x0055 + #if !defined(__KERNEL__) diff --git a/include/linux/net.h b/include/linux/net.h index 8defc8f1d82e..23324e9a2b3d 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -43,6 +43,7 @@ struct net; #define SOCK_PASSSEC 4 #define SOCK_SUPPORT_ZC 5 #define SOCK_CUSTOM_SOCKOPT 6 +#define SOCK_PASSPIDFD 7 #ifndef ARCH_HAS_SOCKET_TYPES /** diff --git a/include/linux/socket.h b/include/linux/socket.h index 3fd3436bc09f..58204700018a 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -177,6 +177,7 @@ static inline size_t msg_data_left(struct msghdr *msg) #define SCM_RIGHTS 0x01 /* rw: access rights (array of int) */ #define SCM_CREDENTIALS 0x02 /* rw: struct ucred */ #define SCM_SECURITY 0x03 /* rw: security label */ +#define SCM_PIDFD 0x04 /* ro: pidfd (int) */ struct ucred { __u32 pid; diff --git a/include/net/scm.h b/include/net/scm.h index 585adc1346bd..c67f765a165b 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -120,12 +120,44 @@ static inline bool scm_has_secdata(struct socket *sock) } #endif /* CONFIG_SECURITY_NETWORK */ +static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm) +{ + struct file *pidfd_file = NULL; + int pidfd; + + /* + * put_cmsg() doesn't return an error if CMSG is truncated, + * that's why we need to opencode these checks here. + */ + if ((msg->msg_controllen <= sizeof(struct cmsghdr)) || + (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) { + msg->msg_flags |= MSG_CTRUNC; + return; + } + + WARN_ON_ONCE(!scm->pid); + pidfd = pidfd_prepare(scm->pid, 0, &pidfd_file); + + if (put_cmsg(msg, SOL_SOCKET, SCM_PIDFD, sizeof(int), &pidfd)) { + if (pidfd_file) { + put_unused_fd(pidfd); + fput(pidfd_file); + } + + return; + } + + if (pidfd_file) + fd_install(pidfd, pidfd_file); +} + static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm, int flags) { if (!msg->msg_control) { - if (test_bit(SOCK_PASSCRED, &sock->flags) || scm->fp || - scm_has_secdata(sock)) + if (test_bit(SOCK_PASSCRED, &sock->flags) || + test_bit(SOCK_PASSPIDFD, &sock->flags) || + scm->fp || scm_has_secdata(sock)) msg->msg_flags |= MSG_CTRUNC; scm_destroy(scm); return; @@ -141,6 +173,9 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); } + if (test_bit(SOCK_PASSPIDFD, &sock->flags)) + scm_pidfd_recv(msg, scm); + scm_destroy_cred(scm); scm_passec(sock, msg, scm); diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 638230899e98..b76169fdb80b 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -132,6 +132,8 @@ #define SO_RCVMARK 75 +#define SO_PASSPIDFD 76 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/net/core/sock.c b/net/core/sock.c index 24f2761bdb1d..ed4eb4ba738b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1246,6 +1246,13 @@ set_sndbuf: clear_bit(SOCK_PASSCRED, &sock->flags); break; + case SO_PASSPIDFD: + if (valbool) + set_bit(SOCK_PASSPIDFD, &sock->flags); + else + clear_bit(SOCK_PASSPIDFD, &sock->flags); + break; + case SO_TIMESTAMP_OLD: case SO_TIMESTAMP_NEW: case SO_TIMESTAMPNS_OLD: @@ -1732,6 +1739,10 @@ int sk_getsockopt(struct sock *sk, int level, int optname, v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); break; + case SO_PASSPIDFD: + v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags); + break; + case SO_PEERCRED: { struct ucred peercred; diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index d4258869ac48..e172a5848b0d 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -355,6 +355,7 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, case SO_BROADCAST: case SO_BSDCOMPAT: case SO_PASSCRED: + case SO_PASSPIDFD: case SO_PASSSEC: case SO_RXQ_OVFL: case SO_WIFI_STATUS: diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 653136d68b32..c46c2f5d860c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1361,7 +1361,8 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, if (err) goto out; - if (test_bit(SOCK_PASSCRED, &sock->flags) && + if ((test_bit(SOCK_PASSCRED, &sock->flags) || + test_bit(SOCK_PASSPIDFD, &sock->flags)) && !unix_sk(sk)->addr) { err = unix_autobind(sk); if (err) @@ -1469,7 +1470,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, if (err) goto out; - if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + if ((test_bit(SOCK_PASSCRED, &sock->flags) || + test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) { err = unix_autobind(sk); if (err) goto out; @@ -1670,6 +1672,8 @@ static void unix_sock_inherit_flags(const struct socket *old, { if (test_bit(SOCK_PASSCRED, &old->flags)) set_bit(SOCK_PASSCRED, &new->flags); + if (test_bit(SOCK_PASSPIDFD, &old->flags)) + set_bit(SOCK_PASSPIDFD, &new->flags); if (test_bit(SOCK_PASSSEC, &old->flags)) set_bit(SOCK_PASSSEC, &new->flags); } @@ -1819,8 +1823,10 @@ static bool unix_passcred_enabled(const struct socket *sock, const struct sock *other) { return test_bit(SOCK_PASSCRED, &sock->flags) || + test_bit(SOCK_PASSPIDFD, &sock->flags) || !other->sk_socket || - test_bit(SOCK_PASSCRED, &other->sk_socket->flags); + test_bit(SOCK_PASSCRED, &other->sk_socket->flags) || + test_bit(SOCK_PASSPIDFD, &other->sk_socket->flags); } /* @@ -1904,7 +1910,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out; } - if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) { + if ((test_bit(SOCK_PASSCRED, &sock->flags) || + test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) { err = unix_autobind(sk); if (err) goto out; @@ -2718,7 +2725,8 @@ unlock: /* Never glue messages from different writers */ if (!unix_skb_scm_eq(skb, &scm)) break; - } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { + } else if (test_bit(SOCK_PASSCRED, &sock->flags) || + test_bit(SOCK_PASSPIDFD, &sock->flags)) { /* Copy credentials */ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); unix_set_secdata(&scm, skb); diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h index 8756df13be50..fbbc4bf53ee3 100644 --- a/tools/include/uapi/asm-generic/socket.h +++ b/tools/include/uapi/asm-generic/socket.h @@ -121,6 +121,8 @@ #define SO_RCVMARK 75 +#define SO_PASSPIDFD 76 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) -- cgit v1.2.3 From 7b26952a91cf65ff1cc867a2382a8964d8c0ee7d Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Thu, 8 Jun 2023 22:26:26 +0200 Subject: net: core: add getsockopt SO_PEERPIDFD Add SO_PEERPIDFD which allows to get pidfd of peer socket holder pidfd. This thing is direct analog of SO_PEERCRED which allows to get plain PID. Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Leon Romanovsky Cc: David Ahern Cc: Arnd Bergmann Cc: Kees Cook Cc: Christian Brauner Cc: Kuniyuki Iwashima Cc: Lennart Poettering Cc: Luca Boccassi Cc: Daniel Borkmann Cc: Stanislav Fomichev Cc: bpf@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: netdev@vger.kernel.org Cc: linux-arch@vger.kernel.org Reviewed-by: Christian Brauner Acked-by: Stanislav Fomichev Tested-by: Luca Boccassi Signed-off-by: Alexander Mikhalitsyn Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- arch/alpha/include/uapi/asm/socket.h | 1 + arch/mips/include/uapi/asm/socket.h | 1 + arch/parisc/include/uapi/asm/socket.h | 1 + arch/sparc/include/uapi/asm/socket.h | 1 + include/uapi/asm-generic/socket.h | 1 + net/core/sock.c | 33 +++++++++++++++++++++++++++++++++ net/unix/af_unix.c | 16 ++++++++++++++++ tools/include/uapi/asm-generic/socket.h | 1 + 8 files changed, 55 insertions(+) (limited to 'net') diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index ff310613ae64..e94f621903fe 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -138,6 +138,7 @@ #define SO_RCVMARK 75 #define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 #if !defined(__KERNEL__) diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 762dcb80e4ec..60ebaed28a4c 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -149,6 +149,7 @@ #define SO_RCVMARK 75 #define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 #if !defined(__KERNEL__) diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index df16a3e16d64..be264c2b1a11 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -130,6 +130,7 @@ #define SO_RCVMARK 0x4049 #define SO_PASSPIDFD 0x404A +#define SO_PEERPIDFD 0x404B #if !defined(__KERNEL__) diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 6e2847804fea..682da3714686 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -131,6 +131,7 @@ #define SO_RCVMARK 0x0054 #define SO_PASSPIDFD 0x0055 +#define SO_PEERPIDFD 0x0056 #if !defined(__KERNEL__) diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index b76169fdb80b..8ce8a39a1e5f 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -133,6 +133,7 @@ #define SO_RCVMARK 75 #define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 #if !defined(__KERNEL__) diff --git a/net/core/sock.c b/net/core/sock.c index ed4eb4ba738b..ea66b1afadd0 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1758,6 +1758,39 @@ int sk_getsockopt(struct sock *sk, int level, int optname, goto lenout; } + case SO_PEERPIDFD: + { + struct pid *peer_pid; + struct file *pidfd_file = NULL; + int pidfd; + + if (len > sizeof(pidfd)) + len = sizeof(pidfd); + + spin_lock(&sk->sk_peer_lock); + peer_pid = get_pid(sk->sk_peer_pid); + spin_unlock(&sk->sk_peer_lock); + + if (!peer_pid) + return -ESRCH; + + pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file); + put_pid(peer_pid); + if (pidfd < 0) + return pidfd; + + if (copy_to_sockptr(optval, &pidfd, len) || + copy_to_sockptr(optlen, &len, sizeof(int))) { + put_unused_fd(pidfd); + fput(pidfd_file); + + return -EFAULT; + } + + fd_install(pidfd, pidfd_file); + return 0; + } + case SO_PEERGROUPS: { const struct cred *cred; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c46c2f5d860c..73c61a010b01 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -921,11 +921,26 @@ static void unix_unhash(struct sock *sk) */ } +static bool unix_bpf_bypass_getsockopt(int level, int optname) +{ + if (level == SOL_SOCKET) { + switch (optname) { + case SO_PEERPIDFD: + return true; + default: + return false; + } + } + + return false; +} + struct proto unix_dgram_proto = { .name = "UNIX", .owner = THIS_MODULE, .obj_size = sizeof(struct unix_sock), .close = unix_close, + .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = unix_dgram_bpf_update_proto, #endif @@ -937,6 +952,7 @@ struct proto unix_stream_proto = { .obj_size = sizeof(struct unix_sock), .close = unix_close, .unhash = unix_unhash, + .bpf_bypass_getsockopt = unix_bpf_bypass_getsockopt, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = unix_stream_bpf_update_proto, #endif diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h index fbbc4bf53ee3..54d9c8bf7c55 100644 --- a/tools/include/uapi/asm-generic/socket.h +++ b/tools/include/uapi/asm-generic/socket.h @@ -122,6 +122,7 @@ #define SO_RCVMARK 75 #define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 #if !defined(__KERNEL__) -- cgit v1.2.3 From 97154bcf4d1b7cabefec8a72cff5fbb91d5afb7b Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Thu, 8 Jun 2023 22:26:28 +0200 Subject: af_unix: Kconfig: make CONFIG_UNIX bool Let's make CONFIG_UNIX a bool instead of a tristate. We've decided to do that during discussion about SCM_PIDFD patchset [1]. [1] https://lore.kernel.org/lkml/20230524081933.44dc8bea@kernel.org/ Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Leon Romanovsky Cc: David Ahern Cc: Arnd Bergmann Cc: Kees Cook Cc: Christian Brauner Cc: Kuniyuki Iwashima Cc: Lennart Poettering Cc: Luca Boccassi Cc: linux-kernel@vger.kernel.org Cc: netdev@vger.kernel.org Cc: linux-arch@vger.kernel.org Suggested-by: Jakub Kicinski Signed-off-by: Alexander Mikhalitsyn Acked-by: Christian Brauner Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- net/unix/Kconfig | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'net') diff --git a/net/unix/Kconfig b/net/unix/Kconfig index b7f811216820..28b232f281ab 100644 --- a/net/unix/Kconfig +++ b/net/unix/Kconfig @@ -4,7 +4,7 @@ # config UNIX - tristate "Unix domain sockets" + bool "Unix domain sockets" help If you say Y here, you will include support for Unix domain sockets; sockets are the standard Unix mechanism for establishing and @@ -14,10 +14,6 @@ config UNIX an embedded system or something similar, you therefore definitely want to say Y here. - To compile this driver as a module, choose M here: the module will be - called unix. Note that several important services won't work - correctly if you say M here and then neglect to load the module. - Say Y unless you know what you are doing. config UNIX_SCM -- cgit v1.2.3 From 5ab8c41cef30d8b6160a80b69d2eb39d570491ac Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 9 Jun 2023 14:53:30 -0700 Subject: netlink: support extack in dump ->start() Commit 4a19edb60d02 ("netlink: Pass extack to dump handlers") added extack support to netlink dumps. It was focused on rtnl and since rtnl does not use ->start(), ->done() callbacks it ignored those. Genetlink on the other hand uses ->start() extensively, for parsing and input validation. Pass the extact in via struct netlink_dump_control and link it to cb for the time of ->start(). Both struct netlink_dump_control and extack itself live on the stack so we can't keep the same extack for the duration of the dump. This means that the extack visible in ->start() and each ->dump() callbacks will be different. Corner cases like reporting a warning message in DONE across dump calls are still not supported. We could put the extack (for dumps) in the socket struct, but layering makes it slightly awkward (extack pointer is decided before the DO / DUMP split). The genetlink dump error extacks are now surfaced: $ cli.py --spec netlink/specs/ethtool.yaml --dump channels-get lib.ynl.NlError: Netlink error: Invalid argument nl_len = 64 (48) nl_flags = 0x300 nl_type = 2 error: -22 extack: {'msg': 'request header missing'} Previously extack was missing: $ cli.py --spec netlink/specs/ethtool.yaml --dump channels-get lib.ynl.NlError: Netlink error: Invalid argument nl_len = 36 (20) nl_flags = 0x100 nl_type = 2 error: -22 Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/linux/netlink.h | 1 + net/netlink/af_netlink.c | 2 ++ net/netlink/genetlink.c | 2 ++ 3 files changed, 5 insertions(+) (limited to 'net') diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 19c0791ed9d5..9eec3f4f5351 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -311,6 +311,7 @@ struct netlink_dump_control { int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *skb, struct netlink_callback *); int (*done)(struct netlink_callback *); + struct netlink_ext_ack *extack; void *data; struct module *module; u32 min_dump_alloc; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 3a1e0fd5bf14..cbd9aa7ee24a 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2360,7 +2360,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, cb->strict_check = !!(nlk2->flags & NETLINK_F_STRICT_CHK); if (control->start) { + cb->extack = control->extack; ret = control->start(cb); + cb->extack = NULL; if (ret) goto error_put; } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 04c4036bf406..a157247a1e45 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -912,6 +912,7 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family, .start = genl_start, .dump = genl_lock_dumpit, .done = genl_lock_done, + .extack = extack, }; genl_unlock(); @@ -924,6 +925,7 @@ static int genl_family_rcv_msg_dumpit(const struct genl_family *family, .start = genl_start, .dump = ops->dumpit, .done = genl_parallel_done, + .extack = extack, }; err = __netlink_dump_start(net->genl_sock, skb, nlh, &c); -- cgit v1.2.3 From 500e1340d1d2695de3f15fc0b3781f593a77acc2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 9 Jun 2023 14:53:31 -0700 Subject: net: ethtool: don't require empty header nests Ethtool currently requires a header nest (which is used to carry the common family options) in all requests including dumps. $ cli.py --spec netlink/specs/ethtool.yaml --dump channels-get lib.ynl.NlError: Netlink error: Invalid argument nl_len = 64 (48) nl_flags = 0x300 nl_type = 2 error: -22 extack: {'msg': 'request header missing'} $ cli.py --spec netlink/specs/ethtool.yaml --dump channels-get \ --json '{"header":{}}'; ) [{'combined-count': 1, 'combined-max': 1, 'header': {'dev-index': 2, 'dev-name': 'enp1s0'}}] Requiring the header nest to always be there may seem nice from the consistency perspective, but it's not serving any practical purpose. We shouldn't burden the user like this. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/ethtool/netlink.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 08120095cc68..5dd5e8222c45 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -96,6 +96,8 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, int ret; if (!header) { + if (!require_dev) + return 0; NL_SET_ERR_MSG(extack, "request header missing"); return -EINVAL; } -- cgit v1.2.3 From fbf934068f6b93a8c4ddda293fc02b4eb3747323 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 9 Jun 2023 20:42:44 +0000 Subject: tcp: let tcp_send_syn_data() build headless packets tcp_send_syn_data() is the last component in TCP transmit path to put payload in skb->head. Switch it to use page frags, so that we can remove dead code later. This allows to put more payload than previous implementation. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 1 + net/ipv4/tcp.c | 2 +- net/ipv4/tcp_output.c | 31 +++++++++++++++++++------------ 3 files changed, 21 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/include/net/tcp.h b/include/net/tcp.h index 49611af31bb7..f718ed258f07 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -333,6 +333,7 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags); int tcp_send_mss(struct sock *sk, int *size_goal, int flags); +int tcp_wmem_schedule(struct sock *sk, int copy); void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, int size_goal); void tcp_release_cb(struct sock *sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 09f03221a6f1..da7f156d9fad 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -957,7 +957,7 @@ static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) } -static int tcp_wmem_schedule(struct sock *sk, int copy) +int tcp_wmem_schedule(struct sock *sk, int copy) { int left; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index f8ce77ce7c3e..d03966337359 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3802,8 +3802,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; - int space, err = 0; + struct page_frag *pfrag = sk_page_frag(sk); struct sk_buff *syn_data; + int space, err = 0; tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) @@ -3822,25 +3823,31 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) space = min_t(size_t, space, fo->size); - /* limit to order-0 allocations */ - space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); - - syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false); + if (space && + !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE), + pfrag, sk->sk_allocation)) + goto fallback; + syn_data = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, false); if (!syn_data) goto fallback; memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); if (space) { - int copied = copy_from_iter(skb_put(syn_data, space), space, - &fo->data->msg_iter); - if (unlikely(!copied)) { + space = min_t(size_t, space, pfrag->size - pfrag->offset); + space = tcp_wmem_schedule(sk, space); + } + if (space) { + space = copy_page_from_iter(pfrag->page, pfrag->offset, + space, &fo->data->msg_iter); + if (unlikely(!space)) { tcp_skb_tsorted_anchor_cleanup(syn_data); kfree_skb(syn_data); goto fallback; } - if (copied != space) { - skb_trim(syn_data, copied); - space = copied; - } + skb_fill_page_desc(syn_data, 0, pfrag->page, + pfrag->offset, space); + page_ref_inc(pfrag->page); + pfrag->offset += space; + skb_len_add(syn_data, space); skb_zcopy_set(syn_data, fo->uarg, NULL); } /* No more data pending in inet_wait_for_connect() */ -- cgit v1.2.3 From b4a24397139cd943c8fa057c372f8a019e07168d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 9 Jun 2023 20:42:45 +0000 Subject: tcp: remove some dead code Now all skbs in write queue do not contain any payload in skb->head, we can remove some dead code. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 40 ++++++++++++---------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d03966337359..fedbe842abdd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1530,7 +1530,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; - int nsize, old_factor; + int old_factor; long limit; int nlen; u8 flags; @@ -1538,9 +1538,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, if (WARN_ON(len > skb->len)) return -EINVAL; - nsize = skb_headlen(skb) - len; - if (nsize < 0) - nsize = 0; + DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb. * We need some allowance to not penalize applications setting small @@ -1560,7 +1558,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, return -ENOMEM; /* Get a new skb... force flag on. */ - buff = tcp_stream_alloc_skb(sk, nsize, gfp, true); + buff = tcp_stream_alloc_skb(sk, 0, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ skb_copy_decrypted(buff, skb); @@ -1568,7 +1566,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, sk_wmem_queued_add(sk, buff->truesize); sk_mem_charge(sk, buff->truesize); - nlen = skb->len - len - nsize; + nlen = skb->len - len; buff->truesize += nlen; skb->truesize -= nlen; @@ -1626,13 +1624,7 @@ static int __pskb_trim_head(struct sk_buff *skb, int len) struct skb_shared_info *shinfo; int i, k, eat; - eat = min_t(int, len, skb_headlen(skb)); - if (eat) { - __skb_pull(skb, eat); - len -= eat; - if (!len) - return 0; - } + DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb)); eat = len; k = 0; shinfo = skb_shinfo(skb); @@ -1671,12 +1663,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) TCP_SKB_CB(skb)->seq += len; - if (delta_truesize) { - skb->truesize -= delta_truesize; - sk_wmem_queued_add(sk, -delta_truesize); - if (!skb_zcopy_pure(skb)) - sk_mem_uncharge(sk, delta_truesize); - } + skb->truesize -= delta_truesize; + sk_wmem_queued_add(sk, -delta_truesize); + if (!skb_zcopy_pure(skb)) + sk_mem_uncharge(sk, delta_truesize); /* Any change of skb->len requires recalculation of tso factor. */ if (tcp_skb_pcount(skb) > 1) @@ -2126,9 +2116,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, u8 flags; /* All of a TSO frame must be composed of paged data. */ - if (skb->len != skb->data_len) - return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE, - skb, len, mss_now, gfp); + DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); buff = tcp_stream_alloc_skb(sk, 0, gfp, true); if (unlikely(!buff)) @@ -2487,12 +2475,8 @@ static int tcp_mtu_probe(struct sock *sk) } else { TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & ~(TCPHDR_FIN|TCPHDR_PSH); - if (!skb_shinfo(skb)->nr_frags) { - skb_pull(skb, copy); - } else { - __pskb_trim_head(skb, copy); - tcp_set_skb_tso_segs(skb, mss_now); - } + __pskb_trim_head(skb, copy); + tcp_set_skb_tso_segs(skb, mss_now); TCP_SKB_CB(skb)->seq += copy; } -- cgit v1.2.3 From 5882efff88aa7063ebdecd4ee92cc2cd1d0b3a8f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 9 Jun 2023 20:42:46 +0000 Subject: tcp: remove size parameter from tcp_stream_alloc_skb() Now all tcp_stream_alloc_skb() callers pass @size == 0, we can remove this parameter. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 6 +++--- net/ipv4/tcp_output.c | 10 +++++----- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/include/net/tcp.h b/include/net/tcp.h index f718ed258f07..bf9f56225821 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -350,7 +350,7 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family); ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); -struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule); void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index da7f156d9fad..fba6578bc98f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -858,12 +858,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, } EXPORT_SYMBOL(tcp_splice_read); -struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, +struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule) { struct sk_buff *skb; - skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); + skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); if (likely(skb)) { bool mem_scheduled; @@ -1178,7 +1178,7 @@ new_segment: goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, first_skb); if (!skb) goto wait_for_space; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index fedbe842abdd..660eac4bf2a7 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1558,7 +1558,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, return -ENOMEM; /* Get a new skb... force flag on. */ - buff = tcp_stream_alloc_skb(sk, 0, gfp, true); + buff = tcp_stream_alloc_skb(sk, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ skb_copy_decrypted(buff, skb); @@ -2118,7 +2118,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, /* All of a TSO frame must be composed of paged data. */ DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); - buff = tcp_stream_alloc_skb(sk, 0, gfp, true); + buff = tcp_stream_alloc_skb(sk, gfp, true); if (unlikely(!buff)) return -ENOMEM; skb_copy_decrypted(buff, skb); @@ -2434,7 +2434,7 @@ static int tcp_mtu_probe(struct sock *sk) return -1; /* We're allowed to probe. Build it now. */ - nskb = tcp_stream_alloc_skb(sk, 0, GFP_ATOMIC, false); + nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false); if (!nskb) return -1; @@ -3811,7 +3811,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE), pfrag, sk->sk_allocation)) goto fallback; - syn_data = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, false); + syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false); if (!syn_data) goto fallback; memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); @@ -3896,7 +3896,7 @@ int tcp_connect(struct sock *sk) return 0; } - buff = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, true); + buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true); if (unlikely(!buff)) return -ENOBUFS; -- cgit v1.2.3 From fe2b401e55482cf90a0056209c8a232b2d39056c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 5 Jun 2023 09:11:24 -0400 Subject: svcrdma: Allocate new transports on device's NUMA node The physical device's NUMA node ID is available when allocating an svc_xprt for an incoming connection. Use that value to ensure the svc_xprt structure is allocated on the NUMA node closest to the device. Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_transport.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index ca04f7a6a085..2abd895046ee 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -64,7 +64,7 @@ #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv, - struct net *net); + struct net *net, int node); static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, @@ -123,14 +123,14 @@ static void qp_event_handler(struct ib_event *event, void *context) } static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv, - struct net *net) + struct net *net, int node) { - struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL); + struct svcxprt_rdma *cma_xprt; - if (!cma_xprt) { - dprintk("svcrdma: failed to create new transport\n"); + cma_xprt = kzalloc_node(sizeof(*cma_xprt), GFP_KERNEL, node); + if (!cma_xprt) return NULL; - } + svc_xprt_init(net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); INIT_LIST_HEAD(&cma_xprt->sc_accept_q); INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); @@ -193,9 +193,9 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, struct svcxprt_rdma *newxprt; struct sockaddr *sa; - /* Create a new transport */ newxprt = svc_rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, - listen_xprt->sc_xprt.xpt_net); + listen_xprt->sc_xprt.xpt_net, + ibdev_to_node(new_cma_id->device)); if (!newxprt) return; newxprt->sc_cm_id = new_cma_id; @@ -304,7 +304,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, if (sa->sa_family != AF_INET && sa->sa_family != AF_INET6) return ERR_PTR(-EAFNOSUPPORT); - cma_xprt = svc_rdma_create_xprt(serv, net); + cma_xprt = svc_rdma_create_xprt(serv, net, NUMA_NO_NODE); if (!cma_xprt) return ERR_PTR(-ENOMEM); set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags); -- cgit v1.2.3 From c5d68d25bd6b5798bf0eb96661e1b26748e970d7 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 5 Jun 2023 09:11:30 -0400 Subject: svcrdma: Clean up allocation of svc_rdma_recv_ctxt The physical device's favored NUMA node ID is available when allocating a recv_ctxt. Use that value instead of relying on the assumption that the memory allocation happens to be running on a node close to the device. This clean up eliminates the hack of destroying recv_ctxts that were not created by the receive CQ thread -- recv_ctxts are now always allocated on a "good" node. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 1 - net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 18 +++++++----------- 2 files changed, 7 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index fbc4bd423b35..a0f3ea357977 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -135,7 +135,6 @@ struct svc_rdma_recv_ctxt { struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_stream rc_stream; - bool rc_temp; u32 rc_byte_len; unsigned int rc_page_count; u32 rc_inv_rkey; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index a22fe7587fa6..46a719ba4917 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -125,14 +125,15 @@ static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma, static struct svc_rdma_recv_ctxt * svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) { + int node = ibdev_to_node(rdma->sc_cm_id->device); struct svc_rdma_recv_ctxt *ctxt; dma_addr_t addr; void *buffer; - ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL); + ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node); if (!ctxt) goto fail0; - buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); + buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); if (!buffer) goto fail1; addr = ib_dma_map_single(rdma->sc_pd->device, buffer, @@ -155,7 +156,6 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma) ctxt->rc_recv_sge.length = rdma->sc_max_req_size; ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey; ctxt->rc_recv_buf = buffer; - ctxt->rc_temp = false; return ctxt; fail2: @@ -232,10 +232,7 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, pcl_free(&ctxt->rc_write_pcl); pcl_free(&ctxt->rc_reply_pcl); - if (!ctxt->rc_temp) - llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); - else - svc_rdma_recv_ctxt_destroy(rdma, ctxt); + llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts); } /** @@ -258,7 +255,7 @@ void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt) } static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma, - unsigned int wanted, bool temp) + unsigned int wanted) { const struct ib_recv_wr *bad_wr = NULL; struct svc_rdma_recv_ctxt *ctxt; @@ -275,7 +272,6 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma, break; trace_svcrdma_post_recv(ctxt); - ctxt->rc_temp = temp; ctxt->rc_recv_wr.next = recv_chain; recv_chain = &ctxt->rc_recv_wr; rdma->sc_pending_recvs++; @@ -309,7 +305,7 @@ err_free: */ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) { - return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true); + return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests); } /** @@ -343,7 +339,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) * client reconnects. */ if (rdma->sc_pending_recvs < rdma->sc_max_requests) - if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false)) + if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch)) goto dropped; /* All wc fields are now known to be valid */ -- cgit v1.2.3 From ed51b426101427c90c14696f073ef8fe71a806f9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 5 Jun 2023 09:11:37 -0400 Subject: svcrdma: Clean up allocation of svc_rdma_send_ctxt The physical device's favored NUMA node ID is available when allocating a send_ctxt. Use that value instead of relying on the assumption that the memory allocation happens to be running on a node close to the device. Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 22a871e6fe4d..a35d1e055b1a 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -123,18 +123,17 @@ static void svc_rdma_send_cid_init(struct svcxprt_rdma *rdma, static struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) { + int node = ibdev_to_node(rdma->sc_cm_id->device); struct svc_rdma_send_ctxt *ctxt; dma_addr_t addr; void *buffer; - size_t size; int i; - size = sizeof(*ctxt); - size += rdma->sc_max_send_sges * sizeof(struct ib_sge); - ctxt = kmalloc(size, GFP_KERNEL); + ctxt = kmalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), + GFP_KERNEL, node); if (!ctxt) goto fail0; - buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL); + buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node); if (!buffer) goto fail1; addr = ib_dma_map_single(rdma->sc_pd->device, buffer, -- cgit v1.2.3 From ac3c32bbdb0e3f787333ae67d650868b6ef5c9a7 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 5 Jun 2023 09:11:43 -0400 Subject: svcrdma: Clean up allocation of svc_rdma_rw_ctxt The physical device's favored NUMA node ID is available when allocating a rw_ctxt. Use that value instead of relying on the assumption that the memory allocation happens to be running on a node close to the device. Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_rw.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 11cf7c646644..068c365e7812 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -62,8 +62,8 @@ svc_rdma_get_rw_ctxt(struct svcxprt_rdma *rdma, unsigned int sges) if (node) { ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); } else { - ctxt = kmalloc(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), - GFP_KERNEL); + ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, SG_CHUNK_SIZE), + GFP_KERNEL, ibdev_to_node(rdma->sc_cm_id->device)); if (!ctxt) goto out_noctx; @@ -234,7 +234,8 @@ svc_rdma_write_info_alloc(struct svcxprt_rdma *rdma, { struct svc_rdma_write_info *info; - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kmalloc_node(sizeof(*info), GFP_KERNEL, + ibdev_to_node(rdma->sc_cm_id->device)); if (!info) return info; @@ -304,7 +305,8 @@ svc_rdma_read_info_alloc(struct svcxprt_rdma *rdma) { struct svc_rdma_read_info *info; - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kmalloc_node(sizeof(*info), GFP_KERNEL, + ibdev_to_node(rdma->sc_cm_id->device)); if (!info) return info; -- cgit v1.2.3 From 6be7afcd92cd426428b94dd7e258b2472a45cde7 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:10:01 -0400 Subject: SUNRPC: Revert cc93ce9529a6 ("svcrdma: Retain the page backing rq_res.head[0].iov_base") Pre-requisite for releasing pages in the send completion handler. Reverted by hand: patch -R would not apply cleanly. Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index a35d1e055b1a..8e7ccef74207 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -975,11 +975,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp); if (ret < 0) goto put_ctxt; - - /* Prevent svc_xprt_release() from releasing the page backing - * rq_res.head[0].iov_base. It's no longer being accessed by - * the I/O device. */ - rqstp->rq_respages++; return 0; reply_chunk: -- cgit v1.2.3 From a944209c11aff9a5c9b7987fc958cc2344dca51f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:10:07 -0400 Subject: SUNRPC: Revert 579900670ac7 ("svcrdma: Remove unused sc_pages field") Pre-requisite for releasing pages in the send completion handler. Reverted by hand: patch -R would not apply cleanly. Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 3 ++- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index a0f3ea357977..8e654da55170 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -158,8 +158,9 @@ struct svc_rdma_send_ctxt { struct xdr_buf sc_hdrbuf; struct xdr_stream sc_stream; void *sc_xprt_buf; + int sc_page_count; int sc_cur_sge_no; - + struct page *sc_pages[RPCSVC_MAXPAGES]; struct ib_sge sc_sges[]; }; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 8e7ccef74207..4c62bc41ea40 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -213,6 +213,7 @@ out: ctxt->sc_send_wr.num_sge = 0; ctxt->sc_cur_sge_no = 0; + ctxt->sc_page_count = 0; return ctxt; out_empty: @@ -227,6 +228,8 @@ out_empty: * svc_rdma_send_ctxt_put - Return send_ctxt to free list * @rdma: controlling svcxprt_rdma * @ctxt: object to return to the free list + * + * Pages left in sc_pages are DMA unmapped and released. */ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) @@ -234,6 +237,9 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct ib_device *device = rdma->sc_cm_id->device; unsigned int i; + for (i = 0; i < ctxt->sc_page_count; ++i) + put_page(ctxt->sc_pages[i]); + /* The first SGE contains the transport header, which * remains mapped until @ctxt is destroyed. */ @@ -798,6 +804,25 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, svc_rdma_xb_dma_map, &args); } +/* The svc_rqst and all resources it owns are released as soon as + * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt + * so they are released by the Send completion handler. + */ +static inline void svc_rdma_save_io_pages(struct svc_rqst *rqstp, + struct svc_rdma_send_ctxt *ctxt) +{ + int i, pages = rqstp->rq_next_page - rqstp->rq_respages; + + ctxt->sc_page_count += pages; + for (i = 0; i < pages; i++) { + ctxt->sc_pages[i] = rqstp->rq_respages[i]; + rqstp->rq_respages[i] = NULL; + } + + /* Prevent svc_xprt_release from releasing pages in rq_pages */ + rqstp->rq_next_page = rqstp->rq_respages; +} + /* Prepare the portion of the RPC Reply that will be transmitted * via RDMA Send. The RPC-over-RDMA transport header is prepared * in sc_sges[0], and the RPC xdr_buf is prepared in following sges. -- cgit v1.2.3 From c4b50cdf9d9d7962d58ece5efba865f56ec40398 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:10:14 -0400 Subject: svcrdma: Revert 2a1e4f21d841 ("svcrdma: Normalize Send page handling") Get rid of the completion wait in svc_rdma_sendto(), and release pages in the send completion handler again. A subsequent patch will handle releasing those pages more efficiently. Reverted by hand: patch -R would not apply cleanly. Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 1 - net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 8 +------- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 27 ++++++++++++--------------- 3 files changed, 13 insertions(+), 23 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 8e654da55170..a5ee0af2a310 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -154,7 +154,6 @@ struct svc_rdma_send_ctxt { struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; - struct completion sc_done; struct xdr_buf sc_hdrbuf; struct xdr_stream sc_stream; void *sc_xprt_buf; diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index aa2227a7e552..7420a2c990c7 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -93,13 +93,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, */ get_page(virt_to_page(rqst->rq_buffer)); sctxt->sc_send_wr.opcode = IB_WR_SEND; - ret = svc_rdma_send(rdma, sctxt); - if (ret < 0) - return ret; - - ret = wait_for_completion_killable(&sctxt->sc_done); - svc_rdma_send_ctxt_put(rdma, sctxt); - return ret; + return svc_rdma_send(rdma, sctxt); } /* Server-side transport endpoint wants a whole page for its send diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 4c62bc41ea40..1ae4236d04a3 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -147,7 +147,6 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma) ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; ctxt->sc_send_wr.sg_list = ctxt->sc_sges; ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; - init_completion(&ctxt->sc_done); ctxt->sc_cqe.done = svc_rdma_wc_send; ctxt->sc_xprt_buf = buffer; xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, @@ -286,12 +285,12 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe); svc_rdma_wake_send_waiters(rdma, 1); - complete(&ctxt->sc_done); if (unlikely(wc->status != IB_WC_SUCCESS)) goto flushed; trace_svcrdma_wc_send(wc, &ctxt->sc_cid); + svc_rdma_send_ctxt_put(rdma, ctxt); return; flushed: @@ -299,6 +298,7 @@ flushed: trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid); else trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid); + svc_rdma_send_ctxt_put(rdma, ctxt); svc_xprt_deferred_close(&rdma->sc_xprt); } @@ -315,8 +315,6 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) struct ib_send_wr *wr = &ctxt->sc_send_wr; int ret; - reinit_completion(&ctxt->sc_done); - /* Sync the transport header buffer */ ib_dma_sync_single_for_device(rdma->sc_pd->device, wr->sg_list[0].addr, @@ -808,8 +806,8 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, * svc_rdma_sendto returns. Transfer pages under I/O to the ctxt * so they are released by the Send completion handler. */ -static inline void svc_rdma_save_io_pages(struct svc_rqst *rqstp, - struct svc_rdma_send_ctxt *ctxt) +static void svc_rdma_save_io_pages(struct svc_rqst *rqstp, + struct svc_rdma_send_ctxt *ctxt) { int i, pages = rqstp->rq_next_page - rqstp->rq_respages; @@ -852,6 +850,8 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, if (ret < 0) return ret; + svc_rdma_save_io_pages(rqstp, sctxt); + if (rctxt->rc_inv_rkey) { sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV; sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey; @@ -859,13 +859,7 @@ static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma, sctxt->sc_send_wr.opcode = IB_WR_SEND; } - ret = svc_rdma_send(rdma, sctxt); - if (ret < 0) - return ret; - - ret = wait_for_completion_killable(&sctxt->sc_done); - svc_rdma_send_ctxt_put(rdma, sctxt); - return ret; + return svc_rdma_send(rdma, sctxt); } /** @@ -931,8 +925,7 @@ void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, sctxt->sc_sges[0].length = sctxt->sc_hdrbuf.len; if (svc_rdma_send(rdma, sctxt)) goto put_ctxt; - - wait_for_completion_killable(&sctxt->sc_done); + return; put_ctxt: svc_rdma_send_ctxt_put(rdma, sctxt); @@ -1006,6 +999,10 @@ reply_chunk: if (ret != -E2BIG && ret != -EINVAL) goto put_ctxt; + /* Send completion releases payload pages that were part + * of previously posted RDMA Writes. + */ + svc_rdma_save_io_pages(rqstp, sctxt); svc_rdma_send_error_msg(rdma, sctxt, rctxt, ret); return 0; -- cgit v1.2.3 From d7ad70b5ef5ab8dedaa403e0e5c711ca1aa8cb14 Mon Sep 17 00:00:00 2001 From: Zahari Doychev Date: Thu, 8 Jun 2023 12:56:46 +0200 Subject: net: flow_dissector: add support for cfm packets Add support for dissecting cfm packets. The cfm packet header fields maintenance domain level and opcode can be dissected. Signed-off-by: Zahari Doychev Reviewed-by: Simon Horman Reviewed-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- include/net/flow_dissector.h | 21 +++++++++++++++++++++ net/core/flow_dissector.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) (limited to 'net') diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 8b41668c77fc..8664ed4fbbdf 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -301,6 +301,26 @@ struct flow_dissector_key_l2tpv3 { __be32 session_id; }; +/** + * struct flow_dissector_key_cfm + * @mdl_ver: maintenance domain level (mdl) and cfm protocol version + * @opcode: code specifying a type of cfm protocol packet + * + * See 802.1ag, ITU-T G.8013/Y.1731 + * 1 2 + * |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0| + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | mdl | version | opcode | + * +-----+---------+-+-+-+-+-+-+-+-+ + */ +struct flow_dissector_key_cfm { + u8 mdl_ver; + u8 opcode; +}; + +#define FLOW_DIS_CFM_MDL_MASK GENMASK(7, 5) +#define FLOW_DIS_CFM_MDL_MAX 7 + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -333,6 +353,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */ FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */ FLOW_DISSECTOR_KEY_L2TPV3, /* struct flow_dissector_key_l2tpv3 */ + FLOW_DISSECTOR_KEY_CFM, /* struct flow_dissector_key_cfm */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 481ca4080cbd..85a2d0d9bd39 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -557,6 +557,30 @@ __skb_flow_dissect_arp(const struct sk_buff *skb, return FLOW_DISSECT_RET_OUT_GOOD; } +static enum flow_dissect_ret +__skb_flow_dissect_cfm(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, const void *data, + int nhoff, int hlen) +{ + struct flow_dissector_key_cfm *key, *hdr, _hdr; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CFM)) + return FLOW_DISSECT_RET_OUT_GOOD; + + hdr = __skb_header_pointer(skb, nhoff, sizeof(*key), data, hlen, &_hdr); + if (!hdr) + return FLOW_DISSECT_RET_OUT_BAD; + + key = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_CFM, + target_container); + + key->mdl_ver = hdr->mdl_ver; + key->opcode = hdr->opcode; + + return FLOW_DISSECT_RET_OUT_GOOD; +} + static enum flow_dissect_ret __skb_flow_dissect_gre(const struct sk_buff *skb, struct flow_dissector_key_control *key_control, @@ -1400,6 +1424,12 @@ proto_again: break; } + case htons(ETH_P_CFM): + fdret = __skb_flow_dissect_cfm(skb, flow_dissector, + target_container, data, + nhoff, hlen); + break; + default: fdret = FLOW_DISSECT_RET_OUT_BAD; break; -- cgit v1.2.3 From 7cfffd5fed3e385010583840402f0bf66c4ed147 Mon Sep 17 00:00:00 2001 From: Zahari Doychev Date: Thu, 8 Jun 2023 12:56:47 +0200 Subject: net: flower: add support for matching cfm fields Add support to the tc flower classifier to match based on fields in CFM information elements like level and opcode. tc filter add dev ens6 ingress protocol 802.1q \ flower vlan_id 698 vlan_ethtype 0x8902 cfm mdl 5 op 46 \ action drop Signed-off-by: Zahari Doychev Reviewed-by: Simon Horman Reviewed-by: Ido Schimmel Signed-off-by: Jakub Kicinski --- include/uapi/linux/pkt_cls.h | 9 ++++ net/sched/cls_flower.c | 102 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+) (limited to 'net') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 00933dda7b10..7865f5a9885b 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -596,6 +596,8 @@ enum { TCA_FLOWER_L2_MISS, /* u8 */ + TCA_FLOWER_KEY_CFM, /* nested */ + __TCA_FLOWER_MAX, }; @@ -704,6 +706,13 @@ enum { TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), }; +enum { + TCA_FLOWER_KEY_CFM_OPT_UNSPEC, + TCA_FLOWER_KEY_CFM_MD_LEVEL, + TCA_FLOWER_KEY_CFM_OPCODE, + TCA_FLOWER_KEY_CFM_OPT_MAX, +}; + #define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */ /* Match-all classifier */ diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index e02ecabbb75c..56065cc5a661 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -71,6 +72,7 @@ struct fl_flow_key { struct flow_dissector_key_num_of_vlans num_of_vlans; struct flow_dissector_key_pppoe pppoe; struct flow_dissector_key_l2tpv3 l2tpv3; + struct flow_dissector_key_cfm cfm; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -725,6 +727,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), + [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -773,6 +776,12 @@ mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, }; +static const struct nla_policy cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX] = { + [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, + FLOW_DIS_CFM_MDL_MAX), + [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, +}; + static void fl_set_key_val(struct nlattr **tb, void *val, int val_type, void *mask, int mask_type, int len) @@ -1660,6 +1669,53 @@ static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, return false; } +static void fl_set_key_cfm_md_level(struct nlattr **tb, + struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + u8 level; + + if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]) + return; + + level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]); + key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level); + mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK; +} + +static void fl_set_key_cfm_opcode(struct nlattr **tb, + struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE, + &mask->cfm.opcode, TCA_FLOWER_UNSPEC, + sizeof(key->cfm.opcode)); +} + +static int fl_set_key_cfm(struct nlattr **tb, + struct fl_flow_key *key, + struct fl_flow_key *mask, + struct netlink_ext_ack *extack) +{ + struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX]; + int err; + + if (!tb[TCA_FLOWER_KEY_CFM]) + return 0; + + err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX, + tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack); + if (err < 0) + return err; + + fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack); + fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack); + + return 0; +} + static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -1814,6 +1870,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb, TCA_FLOWER_KEY_L2TPV3_SID, &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC, sizeof(key->l2tpv3.session_id)); + } else if (key->basic.n_proto == htons(ETH_P_CFM)) { + ret = fl_set_key_cfm(tb, key, mask, extack); + if (ret) + return ret; } if (key->basic.ip_proto == IPPROTO_TCP || @@ -1996,6 +2056,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_PPPOE, pppoe); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_CFM, cfm); skb_flow_dissector_init(dissector, keys, cnt); } @@ -3029,6 +3091,43 @@ nla_put_failure: return -EMSGSIZE; } +static int fl_dump_key_cfm(struct sk_buff *skb, + struct flow_dissector_key_cfm *key, + struct flow_dissector_key_cfm *mask) +{ + struct nlattr *opts; + int err; + u8 mdl; + + if (!memchr_inv(mask, 0, sizeof(*mask))) + return 0; + + opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM); + if (!opts) + return -EMSGSIZE; + + if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) { + mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver); + err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl); + if (err) + goto err_cfm_opts; + } + + if (mask->opcode) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode); + if (err) + goto err_cfm_opts; + } + + nla_nest_end(skb, opts); + + return 0; + +err_cfm_opts: + nla_nest_cancel(skb, opts); + return err; +} + static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, struct flow_dissector_key_enc_opts *enc_opts) { @@ -3316,6 +3415,9 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, sizeof(key->hash.hash))) goto nla_put_failure; + if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) + goto nla_put_failure; + return 0; nla_put_failure: -- cgit v1.2.3 From 5df5dd03a8f71ca9640f208d8f523856e1069ee7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 9 Jun 2023 11:02:18 +0100 Subject: sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage When transmitting data, call down into TCP using sendmsg with MSG_SPLICE_PAGES to indicate that content should be spliced rather than performing sendpage calls to transmit header, data pages and trailer. Signed-off-by: David Howells Acked-by: Chuck Lever cc: Trond Myklebust cc: Anna Schumaker cc: Jeff Layton cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/linux/sunrpc/svc.h | 11 +++++------ net/sunrpc/svcsock.c | 38 ++++++++++++-------------------------- 2 files changed, 17 insertions(+), 32 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 762d7231e574..f66ec8fdb331 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -161,16 +161,15 @@ static inline bool svc_put_not_last(struct svc_serv *serv) extern u32 svc_max_payload(const struct svc_rqst *rqstp); /* - * RPC Requsts and replies are stored in one or more pages. + * RPC Requests and replies are stored in one or more pages. * We maintain an array of pages for each server thread. * Requests are copied into these pages as they arrive. Remaining * pages are available to write the reply into. * - * Pages are sent using ->sendpage so each server thread needs to - * allocate more to replace those used in sending. To help keep track - * of these pages we have a receive list where all pages initialy live, - * and a send list where pages are moved to when there are to be part - * of a reply. + * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each server thread + * needs to allocate more to replace those used in sending. To help keep track + * of these pages we have a receive list where all pages initialy live, and a + * send list where pages are moved to when there are to be part of a reply. * * We use xdr_buf for holding responses as it fits well with NFS * read responses (that have a header, and some data pages, and possibly diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index f77cebe2c071..9d9f522e3ae1 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1203,13 +1203,14 @@ err_noclose: static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec, int flags) { - return kernel_sendpage(sock, virt_to_page(vec->iov_base), - offset_in_page(vec->iov_base), - vec->iov_len, flags); + struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | flags, }; + + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, vec->iov_len); + return sock_sendmsg(sock, &msg); } /* - * kernel_sendpage() is used exclusively to reduce the number of + * MSG_SPLICE_PAGES is used exclusively to reduce the number of * copy operations in this path. Therefore the caller must ensure * that the pages backing @xdr are unchanging. * @@ -1249,28 +1250,13 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr, if (ret != head->iov_len) goto out; - if (xdr->page_len) { - unsigned int offset, len, remaining; - struct bio_vec *bvec; - - bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT); - offset = offset_in_page(xdr->page_base); - remaining = xdr->page_len; - while (remaining > 0) { - len = min(remaining, bvec->bv_len - offset); - ret = kernel_sendpage(sock, bvec->bv_page, - bvec->bv_offset + offset, - len, 0); - if (ret < 0) - return ret; - *sentp += ret; - if (ret != len) - goto out; - remaining -= len; - offset = 0; - bvec++; - } - } + msg.msg_flags = MSG_SPLICE_PAGES; + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec, + xdr_buf_pagecount(xdr), xdr->page_len); + ret = sock_sendmsg(sock, &msg); + if (ret < 0) + return ret; + *sentp += ret; if (tail->iov_len) { ret = svc_tcp_send_kvec(sock, tail, 0); -- cgit v1.2.3 From de17c6857301f1f5b0e71dd064bb816d6628a91d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 9 Jun 2023 11:02:19 +0100 Subject: tcp_bpf: Make tcp_bpf_sendpage() go through tcp_bpf_sendmsg(MSG_SPLICE_PAGES) Make tcp_bpf_sendpage() a wrapper around tcp_bpf_sendmsg(MSG_SPLICE_PAGES) rather than a loop calling tcp_sendpage(). sendpage() will be removed in the future. Signed-off-by: David Howells cc: John Fastabend cc: Jakub Sitnicki cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_bpf.c | 49 +++++++++---------------------------------------- 1 file changed, 9 insertions(+), 40 deletions(-) (limited to 'net') diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index e75023ea052f..5a84053ac62b 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -568,49 +568,18 @@ out_err: static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { - struct sk_msg tmp, *msg = NULL; - int err = 0, copied = 0; - struct sk_psock *psock; - bool enospc = false; - - psock = sk_psock_get(sk); - if (unlikely(!psock)) - return tcp_sendpage(sk, page, offset, size, flags); + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = flags | MSG_SPLICE_PAGES, + }; - lock_sock(sk); - if (psock->cork) { - msg = psock->cork; - } else { - msg = &tmp; - sk_msg_init(msg); - } + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - /* Catch case where ring is full and sendpage is stalled. */ - if (unlikely(sk_msg_full(msg))) - goto out_err; - - sk_msg_page_add(msg, page, size, offset); - sk_mem_charge(sk, size); - copied = size; - if (sk_msg_full(msg)) - enospc = true; - if (psock->cork_bytes) { - if (size > psock->cork_bytes) - psock->cork_bytes = 0; - else - psock->cork_bytes -= size; - if (psock->cork_bytes && !enospc) - goto out_err; - /* All cork bytes are accounted, rerun the prog. */ - psock->eval = __SK_NONE; - psock->cork_bytes = 0; - } + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; - err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags); -out_err: - release_sock(sk); - sk_psock_put(sk, psock); - return copied ? copied : err; + return tcp_bpf_sendmsg(sk, &msg, size); } enum { -- cgit v1.2.3 From 264ba53fac79b03ff754bce62da5027ee3c57b8f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 9 Jun 2023 11:02:20 +0100 Subject: kcm: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage When transmitting data, call down into the transport socket using sendmsg with MSG_SPLICE_PAGES to indicate that content should be spliced rather than using sendpage. Signed-off-by: David Howells cc: Tom Herbert cc: Tom Herbert cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- net/kcm/kcmsock.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 7dee74430b59..3bcac1453f10 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -641,6 +641,10 @@ do_frag_list: for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, + }; skb_frag_t *frag; frag_offset = 0; @@ -651,11 +655,13 @@ do_frag: goto out; } - ret = kernel_sendpage(psock->sk->sk_socket, - skb_frag_page(frag), - skb_frag_off(frag) + frag_offset, - skb_frag_size(frag) - frag_offset, - MSG_DONTWAIT); + bvec_set_page(&bvec, + skb_frag_page(frag), + skb_frag_size(frag) - frag_offset, + skb_frag_off(frag) + frag_offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, + bvec.bv_len); + ret = sock_sendmsg(psock->sk->sk_socket, &msg); if (ret <= 0) { if (ret == -EAGAIN) { /* Save state to try again when there's -- cgit v1.2.3 From c31a25e1db486f36a0ffe3c849b0a82cda3db7db Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 9 Jun 2023 11:02:21 +0100 Subject: kcm: Send multiple frags in one sendmsg() Rewrite the AF_KCM transmission loop to send all the fragments in a single skb or frag_list-skb in one sendmsg() with MSG_SPLICE_PAGES set. The list of fragments in each skb is conveniently a bio_vec[] that can just be attached to a BVEC iter. Note: I'm working out the size of each fragment-skb by adding up bv_len for all the bio_vecs in skb->frags[] - but surely this information is recorded somewhere? For the skbs in head->frag_list, this is equal to skb->data_len, but not for the head. head->data_len includes all the tail frags too. Signed-off-by: David Howells cc: Tom Herbert cc: Tom Herbert cc: Jens Axboe cc: Matthew Wilcox Signed-off-by: Jakub Kicinski --- include/net/kcm.h | 2 +- net/kcm/kcmsock.c | 126 ++++++++++++++++++++++-------------------------------- 2 files changed, 51 insertions(+), 77 deletions(-) (limited to 'net') diff --git a/include/net/kcm.h b/include/net/kcm.h index 2d704f8f4905..90279e5e09a5 100644 --- a/include/net/kcm.h +++ b/include/net/kcm.h @@ -47,9 +47,9 @@ struct kcm_stats { struct kcm_tx_msg { unsigned int sent; - unsigned int fragidx; unsigned int frag_offset; unsigned int msg_flags; + bool started_tx; struct sk_buff *frag_skb; struct sk_buff *last_skb; }; diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 3bcac1453f10..d75d775e9462 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -581,12 +581,10 @@ static void kcm_report_tx_retry(struct kcm_sock *kcm) */ static int kcm_write_msgs(struct kcm_sock *kcm) { + unsigned int total_sent = 0; struct sock *sk = &kcm->sk; struct kcm_psock *psock; - struct sk_buff *skb, *head; - struct kcm_tx_msg *txm; - unsigned short fragidx, frag_offset; - unsigned int sent, total_sent = 0; + struct sk_buff *head; int ret = 0; kcm->tx_wait_more = false; @@ -600,78 +598,57 @@ static int kcm_write_msgs(struct kcm_sock *kcm) if (skb_queue_empty(&sk->sk_write_queue)) return 0; - kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0; - - } else if (skb_queue_empty(&sk->sk_write_queue)) { - return 0; + kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false; } - head = skb_peek(&sk->sk_write_queue); - txm = kcm_tx_msg(head); - - if (txm->sent) { - /* Send of first skbuff in queue already in progress */ - if (WARN_ON(!psock)) { - ret = -EINVAL; - goto out; +retry: + while ((head = skb_peek(&sk->sk_write_queue))) { + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, + }; + struct kcm_tx_msg *txm = kcm_tx_msg(head); + struct sk_buff *skb; + unsigned int msize; + int i; + + if (!txm->started_tx) { + psock = reserve_psock(kcm); + if (!psock) + goto out; + skb = head; + txm->frag_offset = 0; + txm->sent = 0; + txm->started_tx = true; + } else { + if (WARN_ON(!psock)) { + ret = -EINVAL; + goto out; + } + skb = txm->frag_skb; } - sent = txm->sent; - frag_offset = txm->frag_offset; - fragidx = txm->fragidx; - skb = txm->frag_skb; - - goto do_frag; - } - -try_again: - psock = reserve_psock(kcm); - if (!psock) - goto out; - - do { - skb = head; - txm = kcm_tx_msg(head); - sent = 0; -do_frag_list: if (WARN_ON(!skb_shinfo(skb)->nr_frags)) { ret = -EINVAL; goto out; } - for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; - fragidx++) { - struct bio_vec bvec; - struct msghdr msg = { - .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, - }; - skb_frag_t *frag; - - frag_offset = 0; -do_frag: - frag = &skb_shinfo(skb)->frags[fragidx]; - if (WARN_ON(!skb_frag_size(frag))) { - ret = -EINVAL; - goto out; - } + msize = 0; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + msize += skb_shinfo(skb)->frags[i].bv_len; + + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, + skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags, + msize); + iov_iter_advance(&msg.msg_iter, txm->frag_offset); - bvec_set_page(&bvec, - skb_frag_page(frag), - skb_frag_size(frag) - frag_offset, - skb_frag_off(frag) + frag_offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, - bvec.bv_len); + do { ret = sock_sendmsg(psock->sk->sk_socket, &msg); if (ret <= 0) { if (ret == -EAGAIN) { /* Save state to try again when there's * write space on the socket */ - txm->sent = sent; - txm->frag_offset = frag_offset; - txm->fragidx = fragidx; txm->frag_skb = skb; - ret = 0; goto out; } @@ -685,39 +662,36 @@ do_frag: true); unreserve_psock(kcm); - txm->sent = 0; + txm->started_tx = false; kcm_report_tx_retry(kcm); ret = 0; - - goto try_again; + goto retry; } - sent += ret; - frag_offset += ret; + txm->sent += ret; + txm->frag_offset += ret; KCM_STATS_ADD(psock->stats.tx_bytes, ret); - if (frag_offset < skb_frag_size(frag)) { - /* Not finished with this frag */ - goto do_frag; - } - } + } while (msg.msg_iter.count > 0); if (skb == head) { if (skb_has_frag_list(skb)) { - skb = skb_shinfo(skb)->frag_list; - goto do_frag_list; + txm->frag_skb = skb_shinfo(skb)->frag_list; + txm->frag_offset = 0; + continue; } } else if (skb->next) { - skb = skb->next; - goto do_frag_list; + txm->frag_skb = skb->next; + txm->frag_offset = 0; + continue; } /* Successfully sent the whole packet, account for it. */ + sk->sk_wmem_queued -= txm->sent; + total_sent += txm->sent; skb_dequeue(&sk->sk_write_queue); kfree_skb(head); - sk->sk_wmem_queued -= sent; - total_sent += sent; KCM_STATS_INCR(psock->stats.tx_msgs); - } while ((head = skb_peek(&sk->sk_write_queue))); + } out: if (!head) { /* Done with all queued messages. */ -- cgit v1.2.3 From 2bddad9ec65a925da4ab4f73a9377d80540ca67b Mon Sep 17 00:00:00 2001 From: Justin Chen Date: Mon, 12 Jun 2023 14:37:00 -0700 Subject: ethtool: ioctl: account for sopass diff in set_wol sopass won't be set if wolopt doesn't change. This means the following will fail to set the correct sopass. ethtool -s eth0 wol s sopass 11:22:33:44:55:66 ethtool -s eth0 wol s sopass 22:44:55:66:77:88 Make sure we call into the driver layer set_wol if sopass is different. Fixes: 55b24334c0f2 ("ethtool: ioctl: improve error checking for set_wol") Signed-off-by: Justin Chen Link: https://lore.kernel.org/r/1686605822-34544-1-git-send-email-justin.chen@broadcom.com Signed-off-by: Jakub Kicinski --- net/ethtool/ioctl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 37b582225854..4a51e0ec295c 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1452,7 +1452,8 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) if (wol.wolopts & ~cur_wol.supported) return -EINVAL; - if (wol.wolopts == cur_wol.wolopts) + if (wol.wolopts == cur_wol.wolopts && + !memcmp(wol.sopass, cur_wol.sopass, sizeof(wol.sopass))) return 0; ret = dev->ethtool_ops->set_wol(dev, &wol); -- cgit v1.2.3 From 2ad66fcb2fded5359a676f7146cf442641d28307 Mon Sep 17 00:00:00 2001 From: Gilad Itzkovitch Date: Thu, 18 May 2023 12:07:23 +1200 Subject: wifi: cfg80211: S1G rate information and calculations Increase the size of S1G rate_info flags to support S1G and add flags for new S1G MCS and the supported bandwidths. Also, include S1G rate information to netlink STA rate message. Lastly, add rate calculation function for S1G MCS. Signed-off-by: Gilad Itzkovitch Link: https://lore.kernel.org/r/20230518000723.991912-1-gilad.itzkovitch@morsemicro.com Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 18 +++++-- include/uapi/linux/nl80211.h | 14 ++++++ net/wireless/nl80211.c | 23 +++++++++ net/wireless/util.c | 110 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 162 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1b8619685bf6..5d04e7eed43d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1702,6 +1702,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information + * @RATE_INFO_FLAGS_S1G_MCS: MCS field filled with S1G MCS */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), @@ -1712,6 +1713,7 @@ enum rate_info_flags { RATE_INFO_FLAGS_EDMG = BIT(5), RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6), RATE_INFO_FLAGS_EHT_MCS = BIT(7), + RATE_INFO_FLAGS_S1G_MCS = BIT(8), }; /** @@ -1728,6 +1730,11 @@ enum rate_info_flags { * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation * @RATE_INFO_BW_320: 320 MHz bandwidth * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation + * @RATE_INFO_BW_1: 1 MHz bandwidth + * @RATE_INFO_BW_2: 2 MHz bandwidth + * @RATE_INFO_BW_4: 4 MHz bandwidth + * @RATE_INFO_BW_8: 8 MHz bandwidth + * @RATE_INFO_BW_16: 16 MHz bandwidth */ enum rate_info_bw { RATE_INFO_BW_20 = 0, @@ -1739,6 +1746,11 @@ enum rate_info_bw { RATE_INFO_BW_HE_RU, RATE_INFO_BW_320, RATE_INFO_BW_EHT_RU, + RATE_INFO_BW_1, + RATE_INFO_BW_2, + RATE_INFO_BW_4, + RATE_INFO_BW_8, + RATE_INFO_BW_16, }; /** @@ -1747,8 +1759,8 @@ enum rate_info_bw { * Information about a receiving or transmitting bitrate * * @flags: bitflag of flags from &enum rate_info_flags - * @mcs: mcs index if struct describes an HT/VHT/HE rate * @legacy: bitrate in 100kbit/s for 802.11abg + * @mcs: mcs index if struct describes an HT/VHT/HE/EHT/S1G rate * @nss: number of streams (VHT & HE only) * @bw: bandwidth (from &enum rate_info_bw) * @he_gi: HE guard interval (from &enum nl80211_he_gi) @@ -1761,9 +1773,9 @@ enum rate_info_bw { * only valid if bw is %RATE_INFO_BW_EHT_RU) */ struct rate_info { - u8 flags; - u8 mcs; + u16 flags; u16 legacy; + u8 mcs; u8 nss; u8 bw; u8 he_gi; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c59fec406da5..435c4ac5d9bf 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3667,6 +3667,13 @@ enum nl80211_eht_ru_alloc { * (u8, see &enum nl80211_eht_gi) * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then * non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc) + * @NL80211_RATE_INFO_S1G_MCS: S1G MCS index (u8, 0-10) + * @NL80211_RATE_INFO_S1G_NSS: S1G NSS value (u8, 1-4) + * @NL80211_RATE_INFO_1_MHZ_WIDTH: 1 MHz S1G rate + * @NL80211_RATE_INFO_2_MHZ_WIDTH: 2 MHz S1G rate + * @NL80211_RATE_INFO_4_MHZ_WIDTH: 4 MHz S1G rate + * @NL80211_RATE_INFO_8_MHZ_WIDTH: 8 MHz S1G rate + * @NL80211_RATE_INFO_16_MHZ_WIDTH: 16 MHz S1G rate * @__NL80211_RATE_INFO_AFTER_LAST: internal use */ enum nl80211_rate_info { @@ -3693,6 +3700,13 @@ enum nl80211_rate_info { NL80211_RATE_INFO_EHT_NSS, NL80211_RATE_INFO_EHT_GI, NL80211_RATE_INFO_EHT_RU_ALLOC, + NL80211_RATE_INFO_S1G_MCS, + NL80211_RATE_INFO_S1G_NSS, + NL80211_RATE_INFO_1_MHZ_WIDTH, + NL80211_RATE_INFO_2_MHZ_WIDTH, + NL80211_RATE_INFO_4_MHZ_WIDTH, + NL80211_RATE_INFO_8_MHZ_WIDTH, + NL80211_RATE_INFO_16_MHZ_WIDTH, /* keep last */ __NL80211_RATE_INFO_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 772671b9bc42..f962765f7e0f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6368,12 +6368,27 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) return false; switch (info->bw) { + case RATE_INFO_BW_1: + rate_flg = NL80211_RATE_INFO_1_MHZ_WIDTH; + break; + case RATE_INFO_BW_2: + rate_flg = NL80211_RATE_INFO_2_MHZ_WIDTH; + break; + case RATE_INFO_BW_4: + rate_flg = NL80211_RATE_INFO_4_MHZ_WIDTH; + break; case RATE_INFO_BW_5: rate_flg = NL80211_RATE_INFO_5_MHZ_WIDTH; break; + case RATE_INFO_BW_8: + rate_flg = NL80211_RATE_INFO_8_MHZ_WIDTH; + break; case RATE_INFO_BW_10: rate_flg = NL80211_RATE_INFO_10_MHZ_WIDTH; break; + case RATE_INFO_BW_16: + rate_flg = NL80211_RATE_INFO_16_MHZ_WIDTH; + break; default: WARN_ON(1); fallthrough; @@ -6432,6 +6447,14 @@ bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) nla_put_u8(msg, NL80211_RATE_INFO_HE_RU_ALLOC, info->he_ru_alloc)) return false; + } else if (info->flags & RATE_INFO_FLAGS_S1G_MCS) { + if (nla_put_u8(msg, NL80211_RATE_INFO_S1G_MCS, info->mcs)) + return false; + if (nla_put_u8(msg, NL80211_RATE_INFO_S1G_NSS, info->nss)) + return false; + if (info->flags & RATE_INFO_FLAGS_SHORT_GI && + nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)) + return false; } else if (info->flags & RATE_INFO_FLAGS_EHT_MCS) { if (nla_put_u8(msg, NL80211_RATE_INFO_EHT_MCS, info->mcs)) return false; diff --git a/net/wireless/util.c b/net/wireless/util.c index 3bc0c3072e78..610a867c14f7 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1646,6 +1646,114 @@ static u32 cfg80211_calculate_bitrate_eht(struct rate_info *rate) return result / 10000; } +static u32 cfg80211_calculate_bitrate_s1g(struct rate_info *rate) +{ + /* For 1, 2, 4, 8 and 16 MHz channels */ + static const u32 base[5][11] = { + { 300000, + 600000, + 900000, + 1200000, + 1800000, + 2400000, + 2700000, + 3000000, + 3600000, + 4000000, + /* MCS 10 supported in 1 MHz only */ + 150000, + }, + { 650000, + 1300000, + 1950000, + 2600000, + 3900000, + 5200000, + 5850000, + 6500000, + 7800000, + /* MCS 9 not valid */ + }, + { 1350000, + 2700000, + 4050000, + 5400000, + 8100000, + 10800000, + 12150000, + 13500000, + 16200000, + 18000000, + }, + { 2925000, + 5850000, + 8775000, + 11700000, + 17550000, + 23400000, + 26325000, + 29250000, + 35100000, + 39000000, + }, + { 8580000, + 11700000, + 17550000, + 23400000, + 35100000, + 46800000, + 52650000, + 58500000, + 70200000, + 78000000, + }, + }; + u32 bitrate; + /* default is 1 MHz index */ + int idx = 0; + + if (rate->mcs >= 11) + goto warn; + + switch (rate->bw) { + case RATE_INFO_BW_16: + idx = 4; + break; + case RATE_INFO_BW_8: + idx = 3; + break; + case RATE_INFO_BW_4: + idx = 2; + break; + case RATE_INFO_BW_2: + idx = 1; + break; + case RATE_INFO_BW_1: + idx = 0; + break; + case RATE_INFO_BW_5: + case RATE_INFO_BW_10: + case RATE_INFO_BW_20: + case RATE_INFO_BW_40: + case RATE_INFO_BW_80: + case RATE_INFO_BW_160: + default: + goto warn; + } + + bitrate = base[idx][rate->mcs]; + bitrate *= rate->nss; + + if (rate->flags & RATE_INFO_FLAGS_SHORT_GI) + bitrate = (bitrate / 9) * 10; + /* do NOT round down here */ + return (bitrate + 50000) / 100000; +warn: + WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n", + rate->bw, rate->mcs, rate->nss); + return 0; +} + u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) @@ -1662,6 +1770,8 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate) return cfg80211_calculate_bitrate_he(rate); if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) return cfg80211_calculate_bitrate_eht(rate); + if (rate->flags & RATE_INFO_FLAGS_S1G_MCS) + return cfg80211_calculate_bitrate_s1g(rate); return rate->legacy; } -- cgit v1.2.3 From 1ec7291e247055fab3a088e1a333a31e7c06e2dd Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 4 Jun 2023 12:11:24 +0300 Subject: wifi: mac80211: add helpers to access sband iftype data There's quite a bit of code accessing sband iftype data (HE, HE 6 GHz, EHT) and we always need to remember to use the ieee80211_vif_type_p2p() helper. Add new helpers to directly get it from the sband/vif rather than having to call ieee80211_vif_type_p2p(). Convert most code with the following spatch: @@ expression vif, sband; @@ -ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)) +ieee80211_get_he_iftype_cap_vif(sband, vif) @@ expression vif, sband; @@ -ieee80211_get_eht_iftype_cap(sband, ieee80211_vif_type_p2p(vif)) +ieee80211_get_eht_iftype_cap_vif(sband, vif) @@ expression vif, sband; @@ -ieee80211_get_he_6ghz_capa(sband, ieee80211_vif_type_p2p(vif)) +ieee80211_get_he_6ghz_capa_vif(sband, vif) Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230604120651.db099f49e764.Ie892966c49e22c7b7ee1073bc684f142debfdc84@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 ++-- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 5 ++- drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c | 5 ++- include/net/mac80211.h | 44 ++++++++++++++++++++++- net/mac80211/eht.c | 5 ++- net/mac80211/he.c | 3 +- net/mac80211/mlme.c | 30 ++++++---------- net/mac80211/util.c | 11 +++--- 8 files changed, 66 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1ade1f003420..91c38d42d034 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2269,8 +2269,7 @@ bool iwl_mvm_is_nic_ack_enabled(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * so take it from one of them. */ sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; - own_he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); return (own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_ACK_EN)); @@ -3452,8 +3451,7 @@ static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (he_cap) { /* we know that ours is writable */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 8cc2e2145cf9..430642576f5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -192,8 +192,7 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm, WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (he_cap) { /* we know that ours is writable */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index c3a00bfbeef2..f72d1ca3cfed 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include "rs.h" #include "fw-api.h" @@ -94,8 +94,7 @@ static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm, IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; - sband_he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(vif)); + sband_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); if (sband_he_cap && !(sband_he_cap->he_cap_elem.phy_cap_info[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index f4516c034da2..8ea23884a583 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2022 Intel Corporation + * Copyright (C) 2018 - 2023 Intel Corporation */ #ifndef MAC80211_H @@ -6866,6 +6866,48 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif) return ieee80211_iftype_p2p(vif->type, vif->p2p); } +/** + * ieee80211_get_he_iftype_cap_vif - return HE capabilities for sband/vif + * @sband: the sband to search for the iftype on + * @vif: the vif to get the iftype from + * + * Return: pointer to the struct ieee80211_sta_he_cap, or %NULL is none found + */ +static inline const struct ieee80211_sta_he_cap * +ieee80211_get_he_iftype_cap_vif(const struct ieee80211_supported_band *sband, + struct ieee80211_vif *vif) +{ + return ieee80211_get_he_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); +} + +/** + * ieee80211_get_he_6ghz_capa_vif - return HE 6 GHz capabilities + * @sband: the sband to search for the STA on + * @vif: the vif to get the iftype from + * + * Return: the 6GHz capabilities + */ +static inline __le16 +ieee80211_get_he_6ghz_capa_vif(const struct ieee80211_supported_band *sband, + struct ieee80211_vif *vif) +{ + return ieee80211_get_he_6ghz_capa(sband, ieee80211_vif_type_p2p(vif)); +} + +/** + * ieee80211_get_eht_iftype_cap_vif - return ETH capabilities for sband/vif + * @sband: the sband to search for the iftype on + * @vif: the vif to get the iftype from + * + * Return: pointer to the struct ieee80211_sta_eht_cap, or %NULL is none found + */ +static inline const struct ieee80211_sta_eht_cap * +ieee80211_get_eht_iftype_cap_vif(const struct ieee80211_supported_band *sband, + struct ieee80211_vif *vif) +{ + return ieee80211_get_eht_iftype_cap(sband, ieee80211_vif_type_p2p(vif)); +} + /** * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data * diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c index 18bc6b78b267..ddc7acc68335 100644 --- a/net/mac80211/eht.c +++ b/net/mac80211/eht.c @@ -2,7 +2,7 @@ /* * EHT handling * - * Copyright(c) 2021-2022 Intel Corporation + * Copyright(c) 2021-2023 Intel Corporation */ #include "ieee80211_i.h" @@ -25,8 +25,7 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, memset(eht_cap, 0, sizeof(*eht_cap)); if (!eht_cap_ie_elem || - !ieee80211_get_eht_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) + !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) return; mcs_nss_size = ieee80211_eht_mcs_nss_size(he_cap_ie_elem, diff --git a/net/mac80211/he.c b/net/mac80211/he.c index 0322abae0825..9f5ffdc9db28 100644 --- a/net/mac80211/he.c +++ b/net/mac80211/he.c @@ -128,8 +128,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata, return; own_he_cap_ptr = - ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (!own_he_cap_ptr) return; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 3827b5dcdb03..270133883882 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -511,16 +511,14 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link, /* don't check HE if we associated as non-HE station */ if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE || - !ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) { + !ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) { he_oper = NULL; eht_oper = NULL; } /* don't check EHT if we associated as non-EHT station */ if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT || - !ieee80211_get_eht_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) + !ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) eht_oper = NULL; /* @@ -776,8 +774,7 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata, const struct ieee80211_sta_he_cap *he_cap; u8 he_cap_size; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (WARN_ON(!he_cap)) return; @@ -806,10 +803,8 @@ static void ieee80211_add_eht_ie(struct ieee80211_sub_if_data *sdata, const struct ieee80211_sta_eht_cap *eht_cap; u8 eht_cap_size; - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); - eht_cap = ieee80211_get_eht_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); /* * EHT capabilities element is only added if the HE capabilities element @@ -3945,8 +3940,7 @@ static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata, const struct ieee802_11_elems *elems) { const struct ieee80211_sta_he_cap *own_he_cap = - ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (elems->ext_capab_len < 10) return false; @@ -3982,8 +3976,7 @@ static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata, struct link_sta_info *link_sta) { const struct ieee80211_sta_he_cap *own_he_cap = - ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); return bss_conf->he_support && (link_sta->pub->he_cap.he_cap_elem.mac_cap_info[2] & @@ -4620,8 +4613,7 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata, const struct ieee80211_he_operation *he_op) { const struct ieee80211_sta_he_cap *sta_he_cap = - ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); u16 ap_min_req_set; int i; @@ -4755,15 +4747,13 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, *conn_flags |= IEEE80211_CONN_DISABLE_EHT; } - if (!ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) { + if (!ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif)) { mlme_dbg(sdata, "HE not supported, disabling HE and EHT\n"); *conn_flags |= IEEE80211_CONN_DISABLE_HE; *conn_flags |= IEEE80211_CONN_DISABLE_EHT; } - if (!ieee80211_get_eht_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif))) { + if (!ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif)) { mlme_dbg(sdata, "EHT not supported, disabling EHT\n"); *conn_flags |= IEEE80211_CONN_DISABLE_EHT; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 083ad56d08d9..a5be07a4dbe3 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -6,7 +6,7 @@ * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * utilities for mac80211 */ @@ -2121,8 +2121,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata, *offset = noffset; } - he_cap = ieee80211_get_he_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (he_cap && cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), IEEE80211_CHAN_NO_HE)) { @@ -2131,8 +2130,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata, goto out_err; } - eht_cap = ieee80211_get_eht_iftype_cap(sband, - ieee80211_vif_type_p2p(&sdata->vif)); + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); if (eht_cap && cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), @@ -2150,8 +2148,7 @@ static int ieee80211_build_preq_ies_band(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband6; sband6 = local->hw.wiphy->bands[NL80211_BAND_6GHZ]; - he_cap = ieee80211_get_he_iftype_cap(sband6, - ieee80211_vif_type_p2p(&sdata->vif)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband6, &sdata->vif); if (he_cap) { enum nl80211_iftype iftype = -- cgit v1.2.3 From 4c2d68f7981d7794b03fb5035208e6dd3014c9bc Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Jun 2023 16:35:57 +0300 Subject: wifi: mac80211: include key action/command in tracing We trace the key information and all, but not whether the key is added or removed - add that information. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.546e86e216df.Ie3bf9009926f8fa154dde52b0c02537ff7edae36@changeid Signed-off-by: Johannes Berg --- net/mac80211/trace.h | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index db0d0132c58c..e5edf6fe576f 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2022 Intel Corporation + * Copyright (C) 2018 - 2023 Intel Corporation */ #if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) @@ -634,6 +634,7 @@ TRACE_EVENT(drv_set_key, LOCAL_ENTRY VIF_ENTRY STA_ENTRY + __field(u32, cmd) KEY_ENTRY ), @@ -641,12 +642,13 @@ TRACE_EVENT(drv_set_key, LOCAL_ASSIGN; VIF_ASSIGN; STA_ASSIGN; + __entry->cmd = cmd; KEY_ASSIGN(key); ), TP_printk( - LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT KEY_PR_FMT, - LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, KEY_PR_ARG + LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd: %d" KEY_PR_FMT, + LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd, KEY_PR_ARG ) ); -- cgit v1.2.3 From c6968d4fc9aac110d607722d92fc17dfa65f0716 Mon Sep 17 00:00:00 2001 From: Anjaneyulu Date: Thu, 8 Jun 2023 16:35:58 +0300 Subject: wifi: mac80211: pass roc->sdata to drv_cancel_remain_on_channel() In suspend flow "sdata" is NULL, destroy all roc's which are started. pass "roc->sdata" to drv_cancel_remain_on_channel() to avoid NULL dereference and destroy that roc Signed-off-by: Anjaneyulu Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.c678187a308c.Ic11578778655e273931efc5355d570a16465d1be@changeid Signed-off-by: Johannes Berg --- net/mac80211/offchannel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index d78c82d6b696..cdf991e74ab9 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -8,7 +8,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2009 Johannes Berg - * Copyright (C) 2019, 2022 Intel Corporation + * Copyright (C) 2019, 2022-2023 Intel Corporation */ #include #include @@ -1014,7 +1014,7 @@ void ieee80211_roc_purge(struct ieee80211_local *local, if (roc->started) { if (local->ops->remain_on_channel) { /* can race, so ignore return value */ - drv_cancel_remain_on_channel(local, sdata); + drv_cancel_remain_on_channel(local, roc->sdata); ieee80211_roc_notify_destroy(roc); } else { roc->abort = true; -- cgit v1.2.3 From b580a372b84fa4895f9c8c96585f0cb9986a921c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Jun 2023 16:36:00 +0300 Subject: wifi: mac80211: mlme: clarify WMM messages These messages apply to a single link only, use link_info() to indicate that. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.21a6bece4313.I08118e5e851fae2f9e43f8a58d3b6217709bf578@changeid Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 270133883882..f13792994b4f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2639,9 +2639,9 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local, params[ac].aifs = pos[0] & 0x0f; if (params[ac].aifs < 2) { - sdata_info(sdata, - "AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n", - params[ac].aifs, aci); + link_info(link, + "AP has invalid WMM params (AIFSN=%d for ACI %d), will use 2\n", + params[ac].aifs, aci); params[ac].aifs = 2; } params[ac].cw_max = ecw2cw((pos[1] & 0xf0) >> 4); @@ -2652,9 +2652,9 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local, if (params[ac].cw_min == 0 || params[ac].cw_min > params[ac].cw_max) { - sdata_info(sdata, - "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", - params[ac].cw_min, params[ac].cw_max, aci); + link_info(link, + "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", + params[ac].cw_min, params[ac].cw_max, aci); return false; } ieee80211_regulatory_limit_wmm_params(sdata, ¶ms[ac], ac); @@ -2663,9 +2663,9 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local, /* WMM specification requires all 4 ACIs. */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { if (params[ac].cw_min == 0) { - sdata_info(sdata, - "AP has invalid WMM params (missing AC %d), using defaults\n", - ac); + link_info(link, + "AP has invalid WMM params (missing AC %d), using defaults\n", + ac); return false; } } -- cgit v1.2.3 From 556f16b83459590a2d8d1394fd3af4d6d8ccabc7 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Thu, 8 Jun 2023 16:36:03 +0300 Subject: wifi: mac80211: fix CSA processing while scanning The channel switch parsing code would simply return if a scan is in-progress. Supposedly, this was because channel switch announcements from other APs should be ignored. For the beacon case, the function is already only called if we are associated with the sender. For the action frame cases, add the appropriate check whether the frame is coming from the AP we are associated with. Finally, drop the scanning check from ieee80211_sta_process_chanswitch. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.3366e9302468.I6c7e0b58c33b7fb4c675374cfe8c3a5cddcec416@changeid Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f13792994b4f..d1e30ff54c1f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1857,9 +1857,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, if (!cbss) return; - if (local->scanning) - return; - current_band = cbss->channel->band; bss = (void *)cbss->priv; res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, @@ -5997,6 +5994,10 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: + if (!sdata->u.mgd.associated || + !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) + break; + if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { struct ieee802_11_elems *elems; -- cgit v1.2.3 From 0e966d9a35fa5a3967a4900654fa925b8fb8e71b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Jun 2023 16:36:05 +0300 Subject: wifi: mac80211: don't update rx_stats.last_rate for NDP If we get an NDP (null data packet), there's reason to believe the peer is just sending it to probe, and that would happen at a low rate. Don't track this packet for purposes of last RX rate reporting. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.8af46c4ac094.I13d9d5019addeaa4aff3c8a05f56c9f5a86b1ebd@changeid Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index ed9939466198..77c7dac760ba 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -6,7 +6,7 @@ * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include @@ -1732,7 +1732,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { link_sta->rx_stats.last_rx = jiffies; - if (ieee80211_is_data(hdr->frame_control) && + if (ieee80211_is_data_present(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) link_sta->rx_stats.last_rate = sta_stats_encode_rate(status); @@ -1746,7 +1746,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) * match the current local configuration when processed. */ link_sta->rx_stats.last_rx = jiffies; - if (ieee80211_is_data(hdr->frame_control)) + if (ieee80211_is_data_present(hdr->frame_control)) link_sta->rx_stats.last_rate = sta_stats_encode_rate(status); } -- cgit v1.2.3 From bc1be54d7eb4ab4c6bd44e9f73d5b44b6c8e761c Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Thu, 8 Jun 2023 16:36:06 +0300 Subject: wifi: mac80211: allow disabling SMPS debugfs controls There are cases in which we don't want the user to override the smps mode, e.g. when SMPS should be disabled due to EMLSR. Add a driver flag to disable SMPS overriding and don't override if it is set. Signed-off-by: Miri Korenblit Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.ef129e80556c.I74a298fdc86b87074c95228d3916739de1400597@changeid Signed-off-by: Johannes Berg --- include/net/mac80211.h | 3 +++ net/mac80211/debugfs_netdev.c | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 8ea23884a583..98cb2c532025 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1755,12 +1755,15 @@ struct ieee80211_channel_switch { * @IEEE80211_VIF_GET_NOA_UPDATE: request to handle NOA attributes * and send P2P_PS notification to the driver if NOA changed, even * this is not pure P2P vif. + * @IEEE80211_VIF_DISABLE_SMPS_OVERRIDE: disable user configuration of + * SMPS mode via debugfs. */ enum ieee80211_vif_flags { IEEE80211_VIF_BEACON_FILTER = BIT(0), IEEE80211_VIF_SUPPORTS_CQM_RSSI = BIT(1), IEEE80211_VIF_SUPPORTS_UAPSD = BIT(2), IEEE80211_VIF_GET_NOA_UPDATE = BIT(3), + IEEE80211_VIF_DISABLE_SMPS_OVERRIDE = BIT(4), }; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index b0cef37eb394..3fea86c9a276 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -2,7 +2,7 @@ /* * Copyright (c) 2006 Jiri Benc * Copyright 2007 Johannes Berg - * Copyright (C) 2020-2022 Intel Corporation + * Copyright (C) 2020-2023 Intel Corporation */ #include @@ -267,6 +267,9 @@ static int ieee80211_set_smps(struct ieee80211_link_data *link, struct ieee80211_local *local = sdata->local; int err; + if (sdata->vif.driver_flags & IEEE80211_VIF_DISABLE_SMPS_OVERRIDE) + return -EOPNOTSUPP; + if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) && smps_mode == IEEE80211_SMPS_STATIC) return -EINVAL; -- cgit v1.2.3 From f1871abd27641c020298b5c7654e1d8341f22e5f Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Thu, 8 Jun 2023 16:36:08 +0300 Subject: wifi: mac80211: Add getter functions for vif MLD state As a preparation to support disabled/dormant links, add the following function: - ieee80211_vif_usable_links(): returns the bitmap of the links that can be activated. Use this function in all the places that the bitmap of the usable links is needed. - ieee80211_vif_is_mld(): returns true iff the vif is an MLD. Use this function in all the places where an indication that the connection is a MLD is needed. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.86e3351da1fc.If6fe3a339fda2019f13f57ff768ecffb711b710a@changeid Signed-off-by: Johannes Berg --- include/net/mac80211.h | 21 +++++++++++++++++++++ net/mac80211/cfg.c | 4 ++-- net/mac80211/debug.h | 8 ++++---- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/iface.c | 4 ++-- net/mac80211/link.c | 4 ++-- net/mac80211/mlme.c | 43 ++++++++++++++++++++++--------------------- net/mac80211/rx.c | 2 +- net/mac80211/tx.c | 22 +++++++++++----------- net/mac80211/util.c | 14 +++++++------- 10 files changed, 73 insertions(+), 51 deletions(-) (limited to 'net') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 98cb2c532025..5424301df3c8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1909,6 +1909,27 @@ struct ieee80211_vif { u8 drv_priv[] __aligned(sizeof(void *)); }; +/** + * ieee80211_vif_usable_links - Return the usable links for the vif + * @vif: the vif for which the usable links are requested + * Return: the usable link bitmap + */ +static inline u16 ieee80211_vif_usable_links(const struct ieee80211_vif *vif) +{ + return vif->valid_links; +} + +/** + * ieee80211_vif_is_mld - Returns true iff the vif is an MLD one + * @vif: the vif + * Return: %true if the vif is an MLD, %false otherwise. + */ +static inline bool ieee80211_vif_is_mld(const struct ieee80211_vif *vif) +{ + /* valid_links != 0 indicates this vif is an MLD */ + return vif->valid_links != 0; +} + #define for_each_vif_active_link(vif, link, link_id) \ for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \ if ((!(vif)->active_links || \ diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 1b78a2ae7a83..d31b66723ccc 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -35,7 +35,7 @@ ieee80211_link_or_deflink(struct ieee80211_sub_if_data *sdata, int link_id, * the return value at all (if it's not a pairwise key), * so in that case (require_valid==false) don't error. */ - if (require_valid && sdata->vif.valid_links) + if (require_valid && ieee80211_vif_is_mld(&sdata->vif)) return ERR_PTR(-EINVAL); return &sdata->deflink; @@ -228,7 +228,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy, return 0; /* FIXME: no support for 4-addr MLO yet */ - if (sdata->vif.valid_links) + if (ieee80211_vif_is_mld(&sdata->vif)) return -EOPNOTSUPP; sdata->u.mgd.use_4addr = params->use_4addr; diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h index b4c20f5e778e..d49894df2351 100644 --- a/net/mac80211/debug.h +++ b/net/mac80211/debug.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions - * Copyright (C) 2022 Intel Corporation + * Copyright (C) 2022 - 2023 Intel Corporation */ #ifndef __MAC80211_DEBUG_H #define __MAC80211_DEBUG_H @@ -136,7 +136,7 @@ do { \ #define link_info(link, fmt, ...) \ do { \ - if ((link)->sdata->vif.valid_links) \ + if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \ _sdata_info((link)->sdata, "[link %d] " fmt, \ (link)->link_id, \ ##__VA_ARGS__); \ @@ -145,7 +145,7 @@ do { \ } while (0) #define link_err(link, fmt, ...) \ do { \ - if ((link)->sdata->vif.valid_links) \ + if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \ _sdata_err((link)->sdata, "[link %d] " fmt, \ (link)->link_id, \ ##__VA_ARGS__); \ @@ -154,7 +154,7 @@ do { \ } while (0) #define link_dbg(link, fmt, ...) \ do { \ - if ((link)->sdata->vif.valid_links) \ + if (ieee80211_vif_is_mld(&(link)->sdata->vif)) \ _sdata_dbg(1, (link)->sdata, "[link %d] " fmt, \ (link)->link_id, \ ##__VA_ARGS__); \ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index f918e73469a7..f55cec0e6b2d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1614,7 +1614,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) struct ieee80211_chanctx_conf *chanctx_conf; enum nl80211_band band; - WARN_ON(sdata->vif.valid_links); + WARN_ON(ieee80211_vif_is_mld(&sdata->vif)); rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 5b67b44e3f89..9518acf9643b 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -521,7 +521,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do cancel_work_sync(&sdata->recalc_smps); sdata_lock(sdata); - WARN(sdata->vif.valid_links, + WARN(ieee80211_vif_is_mld(&sdata->vif), "destroying interface with valid links 0x%04x\n", sdata->vif.valid_links); @@ -1815,7 +1815,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, return -EBUSY; /* for now, don't support changing while links exist */ - if (sdata->vif.valid_links) + if (ieee80211_vif_is_mld(&sdata->vif)) return -EBUSY; switch (sdata->vif.type) { diff --git a/net/mac80211/link.c b/net/mac80211/link.c index e82db88a47f8..5ec60f0d821c 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -329,7 +329,7 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, return -EINVAL; /* cannot activate links that don't exist */ - if (active_links & ~sdata->vif.valid_links) + if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return -EINVAL; /* nothing to do */ @@ -485,7 +485,7 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif, return; /* cannot activate links that don't exist */ - if (active_links & ~sdata->vif.valid_links) + if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return; /* nothing to do */ diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index d1e30ff54c1f..fdd80f73e6cb 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1282,7 +1282,7 @@ static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, u8 *ml_elem_len; void *capab_pos; - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) return; ift_ext_capa = cfg80211_get_iftype_ext_capa(local->hw.wiphy, @@ -1456,7 +1456,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) capab |= WLAN_CAPABILITY_PRIVACY; } - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { /* consider the multi-link element with STA profile */ size += sizeof(struct ieee80211_multi_link_elem); /* max common info field in basic multi-link element */ @@ -1789,7 +1789,7 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - if (WARN_ON(sdata->vif.valid_links)) + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) success = false; trace_api_chswitch_done(sdata, success); @@ -2835,7 +2835,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, if (vif_cfg->arp_addr_cnt) vif_changed |= BSS_CHANGED_ARP_FILTER; - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { @@ -2867,7 +2867,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, mutex_unlock(&local->iflist_mtx); /* leave this here to not change ordering in non-MLO cases */ - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) ieee80211_recalc_smps(sdata, &sdata->deflink); ieee80211_recalc_ps_vif(sdata); @@ -2963,7 +2963,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sta_info_flush(sdata); /* finally reset all BSS / config parameters */ - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) changed |= ieee80211_reset_erp_info(sdata); ieee80211_led_assoc(local, 0); @@ -2988,7 +2988,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sizeof(sdata->vif.bss_conf.mu_group.membership)); memset(sdata->vif.bss_conf.mu_group.position, 0, sizeof(sdata->vif.bss_conf.mu_group.position)); - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) changed |= BSS_CHANGED_MU_GROUPS; sdata->vif.bss_conf.mu_mimo_owner = false; @@ -3002,7 +3002,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, changed |= BSS_CHANGED_ARP_FILTER; sdata->vif.bss_conf.qos = false; - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { changed |= BSS_CHANGED_QOS; /* The BSSID (not really interesting) and HT changed */ changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; @@ -3171,7 +3171,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) u8 unicast_limit = max(1, max_probe_tries - 3); struct sta_info *sta; - if (WARN_ON(sdata->vif.valid_links)) + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; /* @@ -3219,7 +3219,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool already = false; - if (WARN_ON_ONCE(sdata->vif.valid_links)) + if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif))) return; if (!ieee80211_sdata_running(sdata)) @@ -3294,7 +3294,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, int ssid_len; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION || - sdata->vif.valid_links)) + ieee80211_vif_is_mld(&sdata->vif))) return NULL; sdata_assert_lock(sdata); @@ -3359,7 +3359,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) } /* in MLO assume we have a link where we can TX the frame */ - tx = sdata->vif.valid_links || !sdata->deflink.csa_block_tx; + tx = ieee80211_vif_is_mld(&sdata->vif) || + !sdata->deflink.csa_block_tx; if (!ifmgd->driver_disconnect) { unsigned int link_id; @@ -3560,7 +3561,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, for (i = 0; i < ARRAY_SIZE(data.bss); i++) data.bss[i] = assoc_data->link[i].bss; - if (sdata->vif.valid_links) + if (ieee80211_vif_is_mld(&sdata->vif)) data.ap_mld_addr = assoc_data->ap_addr; cfg80211_assoc_failure(sdata->dev, &data); @@ -4989,7 +4990,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!sta)) goto out_err; - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!assoc_data->link[link_id].bss) continue; @@ -5017,7 +5018,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!link)) goto out_err; - if (sdata->vif.valid_links) + if (ieee80211_vif_is_mld(&sdata->vif)) link_info(link, "local address %pM, AP link address %pM%s\n", link->conf->addr, @@ -5266,7 +5267,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, ifmgd->broken_ap = true; } - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { if (!elems->multi_link) { sdata_info(sdata, "MLO association with %pM but no multi-link element in response!\n", @@ -5333,7 +5334,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, resp.uapsd_queues |= ieee80211_ac_to_qos_mask[ac]; } - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { ether_addr_copy(ap_mld_addr, sdata->vif.cfg.ap_addr); resp.ap_mld_addr = ap_mld_addr; } @@ -5659,7 +5660,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, rcu_read_unlock(); if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon && - !WARN_ON(sdata->vif.valid_links) && + !WARN_ON(ieee80211_vif_is_mld(&sdata->vif)) && ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->link[0].bss)) { parse_params.bss = ifmgd->assoc_data->link[0].bss; elems = ieee802_11_parse_elems_full(&parse_params); @@ -6357,7 +6358,7 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) struct ieee80211_sub_if_data *sdata = from_timer(sdata, t, u.mgd.bcn_mon_timer); - if (WARN_ON(sdata->vif.valid_links)) + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; if (sdata->vif.bss_conf.csa_active && @@ -6381,7 +6382,7 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t) struct sta_info *sta; unsigned long timeout; - if (WARN_ON(sdata->vif.valid_links)) + if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif))) return; if (sdata->vif.bss_conf.csa_active && @@ -6937,7 +6938,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, return 0; err_clear: - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { eth_zero_addr(sdata->deflink.u.mgd.bssid); ieee80211_link_info_change_notify(sdata, &sdata->deflink, BSS_CHANGED_BSSID); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 77c7dac760ba..6ebec32b4ebc 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2505,7 +2505,7 @@ bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, if (ether_addr_equal(sdata->vif.addr, addr)) return true; - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) return false; for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 148a0e2aa740..bdb09c8ccc96 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -844,7 +844,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) /* SNS11 from 802.11be 10.3.2.14 */ if (unlikely(is_multicast_ether_addr(hdr->addr1) && - info->control.vif->valid_links && + ieee80211_vif_is_mld(info->control.vif) && info->control.vif->type == NL80211_IFTYPE_AP)) { if (info->control.flags & IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX) tx->sdata->mld_mcast_seq += 0x10; @@ -2610,7 +2610,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, ethertype = (skb->data[12] << 8) | skb->data[13]; fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA); - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); @@ -2627,7 +2627,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED); wme_sta = sta->sta.wme; } - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { struct ieee80211_sub_if_data *ap_sdata; /* override chanctx_conf from AP (we don't have one) */ @@ -2645,7 +2645,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, /* DA BSSID SA */ memcpy(hdr.addr1, skb->data, ETH_ALEN); - if (sdata->vif.valid_links && sta && !sta->sta.mlo) { + if (ieee80211_vif_is_mld(&sdata->vif) && sta && !sta->sta.mlo) { struct ieee80211_link_data *link; link_id = sta->deflink.link_id; @@ -2797,7 +2797,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, } if (!chanctx_conf) { - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { ret = -ENOTCONN; goto free; } @@ -3039,7 +3039,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) !ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG)) goto out; - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); @@ -3110,7 +3110,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); - if (sta->sta.mlo || !sdata->vif.valid_links) { + if (sta->sta.mlo || !ieee80211_vif_is_mld(&sdata->vif)) { memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); } else { unsigned int link_id = sta->deflink.link_id; @@ -4479,7 +4479,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, __ieee80211_subif_start_xmit(skb, dev, 0, IEEE80211_TX_CTRL_MLO_LINK_UNSPEC, NULL); - } else if (sdata->vif.valid_links && + } else if (ieee80211_vif_is_mld(&sdata->vif) && sdata->vif.type == NL80211_IFTYPE_AP && !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) { ieee80211_mlo_multicast_tx(dev, skb); @@ -4755,7 +4755,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) { /* update band only for non-MLD */ - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (unlikely(!chanctx_conf)) { @@ -6002,7 +6002,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, BUILD_BUG_ON(!FIELD_FIT(IEEE80211_TX_CTRL_MLO_LINK, IEEE80211_LINK_UNSPECIFIED)); - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { link = 0; } else if (link_id >= 0) { link = link_id; @@ -6048,7 +6048,7 @@ void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, enum nl80211_band band; rcu_read_lock(); - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { WARN_ON(link_id >= 0); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a5be07a4dbe3..3aa363bdb6e0 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1824,7 +1824,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; - bool multi_link = sdata->vif.valid_links; + bool multi_link = ieee80211_vif_is_mld(&sdata->vif); struct { u8 id; u8 len; @@ -2671,7 +2671,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) continue; sdata_lock(sdata); - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS] = { [0] = &sdata->vif.bss_conf, }; @@ -2691,7 +2691,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { - if (sdata->vif.valid_links && + if (ieee80211_vif_is_mld(&sdata->vif) && !(sdata->vif.active_links & BIT(link_id))) continue; @@ -2723,12 +2723,12 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->vif.bss_conf.mu_mimo_owner) changed |= BSS_CHANGED_MU_GROUPS; - if (!sdata->vif.valid_links) + if (!ieee80211_vif_is_mld(&sdata->vif)) changed |= BSS_CHANGED_IDLE; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: - if (!sdata->vif.valid_links) { + if (!ieee80211_vif_is_mld(&sdata->vif)) { changed |= BSS_CHANGED_ASSOC | BSS_CHANGED_ARP_FILTER | BSS_CHANGED_PS; @@ -2766,7 +2766,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_P2P_PS; - if (sdata->vif.valid_links) + if (ieee80211_vif_is_mld(&sdata->vif)) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID); else @@ -2780,7 +2780,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (sdata->vif.type == NL80211_IFTYPE_AP) { changed |= BSS_CHANGED_AP_PROBE_RESP; - if (sdata->vif.valid_links) { + if (ieee80211_vif_is_mld(&sdata->vif)) { ieee80211_reconfig_ap_links(local, sdata, changed); -- cgit v1.2.3 From 6cf963edbbd3b279185e28e4864c9698ffaa23c3 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Thu, 8 Jun 2023 16:36:10 +0300 Subject: wifi: cfg80211: Support association to AP MLD with disabled links An AP part of an AP MLD might be temporarily disabled, and might be enabled later. Such a link should be included in the association exchange, but should not be used until enabled. Extend the NL80211_CMD_ASSOCIATE to also indicate disabled links. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.c4c61ee4c4a5.I784ef4a0d619fc9120514b5615458fbef3b3684a@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 5 ++++- include/uapi/linux/nl80211.h | 7 ++++++- net/wireless/nl80211.c | 13 ++++++++++++- 3 files changed, 22 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5d04e7eed43d..388f2a3851a2 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7,7 +7,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2021, 2023 Intel Corporation */ #include @@ -2882,11 +2882,14 @@ struct cfg80211_auth_request { * if this is %NULL for a link, that link is not requested * @elems: extra elements for the per-STA profile for this link * @elems_len: length of the elements + * @disabled: If set this link should be included during association etc. but it + * should not be used until enabled by the AP MLD. */ struct cfg80211_assoc_link { struct cfg80211_bss *bss; const u8 *elems; size_t elems_len; + bool disabled; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 435c4ac5d9bf..03939bdb0e48 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2805,6 +2805,9 @@ enum nl80211_commands { * index. If the userspace includes more RNR elements than number of * MBSSID elements then these will be added in every EMA beacon. * + * @NL80211_ATTR_MLO_LINK_DISABLED: Flag attribute indicating that the link is + * disabled. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3341,6 +3344,8 @@ enum nl80211_attrs { NL80211_ATTR_EMA_RNR_ELEMS, + NL80211_ATTR_MLO_LINK_DISABLED, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index f962765f7e0f..ec7d467cb096 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include @@ -816,6 +816,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS] = { .type = NLA_U16 }, [NL80211_ATTR_HW_TIMESTAMP_ENABLED] = { .type = NLA_FLAG }, [NL80211_ATTR_EMA_RNR_ELEMS] = { .type = NLA_NESTED }, + [NL80211_ATTR_MLO_LINK_DISABLED] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -11138,6 +11139,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) goto free; } } + + req.links[link_id].disabled = + nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]); } if (!req.links[req.link_id].bss) { @@ -11152,6 +11156,13 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) goto free; } + if (req.links[req.link_id].disabled) { + GENL_SET_ERR_MSG(info, + "cannot have assoc link disabled"); + err = -EINVAL; + goto free; + } + kfree(attrs); attrs = NULL; } else { -- cgit v1.2.3 From 4cacadc0dbd8013e6161aa8843d8e9d8ad435b47 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Sun, 11 Jun 2023 12:14:28 +0300 Subject: wifi: mac80211: Fix permissions for valid_links debugfs entry The entry should be a read only one and not a write only one. Fix it. Fixes: 3d9011029227 ("wifi: mac80211: implement link switching") Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230611121219.c75316990411.I1565a7fcba8a37f83efffb0cc6b71c572b896e94@changeid [remove x16 change since it doesn't work yet] Signed-off-by: Johannes Berg --- net/mac80211/debugfs_netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 3fea86c9a276..1b9293379c1e 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -720,7 +720,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD_MODE(uapsd_queues, 0600); DEBUGFS_ADD_MODE(uapsd_max_sp_len, 0600); DEBUGFS_ADD_MODE(tdls_wider_bw, 0600); - DEBUGFS_ADD_MODE(valid_links, 0200); + DEBUGFS_ADD_MODE(valid_links, 0400); DEBUGFS_ADD_MODE(active_links, 0600); } -- cgit v1.2.3 From 0ffe85885b31ac0308bb13a31eec6a441e2a2d77 Mon Sep 17 00:00:00 2001 From: Azeem Shaikh Date: Mon, 12 Jun 2023 23:23:01 +0000 Subject: wifi: cfg80211: replace strlcpy() with strscpy() strlcpy() reads the entire source buffer first. This read may exceed the destination size limit. This is both inefficient and can lead to linear read overflows if a source string is not NUL-terminated [1]. In an effort to remove strlcpy() completely [2], replace strlcpy() here with strscpy(). Direct replacement is safe here since WIPHY_ASSIGN is only used by TRACE macros and the return values are ignored. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html#strlcpy [2] https://github.com/KSPP/linux/issues/89 Signed-off-by: Azeem Shaikh Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230612232301.2572316-1-azeemshaikh38@gmail.com Signed-off-by: Johannes Berg --- net/wireless/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 716a1fa70069..a00da3ebfed5 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -22,7 +22,7 @@ #define MAXNAME 32 #define WIPHY_ENTRY __array(char, wiphy_name, 32) -#define WIPHY_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME) +#define WIPHY_ASSIGN strscpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME) #define WIPHY_PR_FMT "%s" #define WIPHY_PR_ARG __entry->wiphy_name -- cgit v1.2.3 From f3c21ed9ce17438b9b6fb4a959640c486cabda24 Mon Sep 17 00:00:00 2001 From: Azeem Shaikh Date: Tue, 13 Jun 2023 00:34:04 +0000 Subject: wifi: mac80211: Replace strlcpy with strscpy strlcpy() reads the entire source buffer first. This read may exceed the destination size limit. This is both inefficient and can lead to linear read overflows if a source string is not NUL-terminated [1]. In an effort to remove strlcpy() completely [2], replace strlcpy() here with strscpy(). Direct replacement is safe here since LOCAL_ASSIGN is only used by TRACE macros and the return values are ignored. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html#strlcpy [2] https://github.com/KSPP/linux/issues/89 Signed-off-by: Azeem Shaikh Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/20230613003404.3538524-1-azeemshaikh38@gmail.com Signed-off-by: Johannes Berg --- net/mac80211/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index e5edf6fe576f..b8c53b4a710b 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -17,7 +17,7 @@ #define MAXNAME 32 #define LOCAL_ENTRY __array(char, wiphy_name, 32) -#define LOCAL_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME) +#define LOCAL_ASSIGN strscpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME) #define LOCAL_PR_FMT "%s" #define LOCAL_PR_ARG __entry->wiphy_name -- cgit v1.2.3 From fa0e21fa44438a0e856d42224bfa24641d37b979 Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Sun, 11 Jun 2023 13:51:08 +0300 Subject: rtnetlink: extend RTEXT_FILTER_SKIP_STATS to IFLA_VF_INFO This filter already exists for excluding IPv6 SNMP stats. Extend its definition to also exclude IFLA_VF_INFO stats in RTM_GETLINK. This patch constitutes a partial fix for a netlink attribute nesting overflow bug in IFLA_VFINFO_LIST. By excluding the stats when the requester doesn't need them, the truncation of the VF list is avoided. While it was technically only the stats added in commit c5a9f6f0ab40 ("net/core: Add drop counters to VF statistics") breaking the camel's back, the appreciable size of the stats data should never have been included without due consideration for the maximum number of VFs supported by PCI. Fixes: 3b766cd83232 ("net/core: Add reading VF statistics through the PF netdevice") Fixes: c5a9f6f0ab40 ("net/core: Add drop counters to VF statistics") Signed-off-by: Edwin Peer Cc: Edwin Peer Signed-off-by: Gal Pressman Link: https://lore.kernel.org/r/20230611105108.122586-1-gal@nvidia.com Signed-off-by: Paolo Abeni --- net/core/rtnetlink.c | 96 ++++++++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 45 deletions(-) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 41de3a2f29e1..d1815122298d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -961,24 +961,27 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, nla_total_size(sizeof(struct ifla_vf_rate)) + nla_total_size(sizeof(struct ifla_vf_link_state)) + nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + - nla_total_size(0) + /* nest IFLA_VF_STATS */ - /* IFLA_VF_STATS_RX_PACKETS */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_TX_PACKETS */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_RX_BYTES */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_TX_BYTES */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_BROADCAST */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_MULTICAST */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_RX_DROPPED */ - nla_total_size_64bit(sizeof(__u64)) + - /* IFLA_VF_STATS_TX_DROPPED */ - nla_total_size_64bit(sizeof(__u64)) + nla_total_size(sizeof(struct ifla_vf_trust))); + if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { + size += num_vfs * + (nla_total_size(0) + /* nest IFLA_VF_STATS */ + /* IFLA_VF_STATS_RX_PACKETS */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_TX_PACKETS */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_RX_BYTES */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_TX_BYTES */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_BROADCAST */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_MULTICAST */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_RX_DROPPED */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_TX_DROPPED */ + nla_total_size_64bit(sizeof(__u64))); + } return size; } else return 0; @@ -1270,7 +1273,8 @@ static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb, static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct net_device *dev, int vfs_num, - struct nlattr *vfinfo) + struct nlattr *vfinfo, + u32 ext_filter_mask) { struct ifla_vf_rss_query_en vf_rss_query_en; struct nlattr *vf, *vfstats, *vfvlanlist; @@ -1376,33 +1380,35 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, goto nla_put_vf_failure; } nla_nest_end(skb, vfvlanlist); - memset(&vf_stats, 0, sizeof(vf_stats)); - if (dev->netdev_ops->ndo_get_vf_stats) - dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, - &vf_stats); - vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); - if (!vfstats) - goto nla_put_vf_failure; - if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, - vf_stats.rx_packets, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, - vf_stats.tx_packets, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, - vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, - vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, - vf_stats.broadcast, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, - vf_stats.multicast, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, - vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || - nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, - vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { - nla_nest_cancel(skb, vfstats); - goto nla_put_vf_failure; + if (~ext_filter_mask & RTEXT_FILTER_SKIP_STATS) { + memset(&vf_stats, 0, sizeof(vf_stats)); + if (dev->netdev_ops->ndo_get_vf_stats) + dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num, + &vf_stats); + vfstats = nla_nest_start_noflag(skb, IFLA_VF_STATS); + if (!vfstats) + goto nla_put_vf_failure; + if (nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_PACKETS, + vf_stats.rx_packets, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_PACKETS, + vf_stats.tx_packets, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_BYTES, + vf_stats.rx_bytes, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_BYTES, + vf_stats.tx_bytes, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, + vf_stats.broadcast, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, + vf_stats.multicast, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, + vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, + vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { + nla_nest_cancel(skb, vfstats); + goto nla_put_vf_failure; + } + nla_nest_end(skb, vfstats); } - nla_nest_end(skb, vfstats); nla_nest_end(skb, vf); return 0; @@ -1435,7 +1441,7 @@ static noinline_for_stack int rtnl_fill_vf(struct sk_buff *skb, return -EMSGSIZE; for (i = 0; i < num_vfs; i++) { - if (rtnl_fill_vfinfo(skb, dev, i, vfinfo)) + if (rtnl_fill_vfinfo(skb, dev, i, vfinfo, ext_filter_mask)) return -EMSGSIZE; } -- cgit v1.2.3 From 89da780aa4c7667c8b1a144959206262aebdeabe Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 12 Jun 2023 11:55:34 -0400 Subject: rtnetlink: move validate_linkmsg out of do_setlink This patch moves validate_linkmsg() out of do_setlink() to its callers and deletes the early validate_linkmsg() call in __rtnl_newlink(), so that it will not call validate_linkmsg() twice in either of the paths: - __rtnl_newlink() -> do_setlink() - __rtnl_newlink() -> rtnl_newlink_create() -> rtnl_create_link() Additionally, as validate_linkmsg() is now only called with a real dev, we can remove the NULL check for dev in validate_linkmsg(). Note that we moved validate_linkmsg() check to the places where it has not done any changes to the dev, as Jakub suggested. Signed-off-by: Xin Long Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/cf2ef061e08251faf9e8be25ff0d61150c030475.1686585334.git.lucien.xin@gmail.com Signed-off-by: Jakub Kicinski --- net/core/rtnetlink.c | 83 ++++++++++++++++++++++++++-------------------------- 1 file changed, 42 insertions(+), 41 deletions(-) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d1815122298d..2c61fb912772 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2383,45 +2383,43 @@ static int rtnl_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { - if (dev) { - if (tb[IFLA_ADDRESS] && - nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) - return -EINVAL; + if (tb[IFLA_ADDRESS] && + nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) + return -EINVAL; - if (tb[IFLA_BROADCAST] && - nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) - return -EINVAL; + if (tb[IFLA_BROADCAST] && + nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) + return -EINVAL; - if (tb[IFLA_GSO_MAX_SIZE] && - nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { - NL_SET_ERR_MSG(extack, "too big gso_max_size"); - return -EINVAL; - } + if (tb[IFLA_GSO_MAX_SIZE] && + nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) { + NL_SET_ERR_MSG(extack, "too big gso_max_size"); + return -EINVAL; + } - if (tb[IFLA_GSO_MAX_SEGS] && - (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || - nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { - NL_SET_ERR_MSG(extack, "too big gso_max_segs"); - return -EINVAL; - } + if (tb[IFLA_GSO_MAX_SEGS] && + (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS || + nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) { + NL_SET_ERR_MSG(extack, "too big gso_max_segs"); + return -EINVAL; + } - if (tb[IFLA_GRO_MAX_SIZE] && - nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { - NL_SET_ERR_MSG(extack, "too big gro_max_size"); - return -EINVAL; - } + if (tb[IFLA_GRO_MAX_SIZE] && + nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) { + NL_SET_ERR_MSG(extack, "too big gro_max_size"); + return -EINVAL; + } - if (tb[IFLA_GSO_IPV4_MAX_SIZE] && - nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { - NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); - return -EINVAL; - } + if (tb[IFLA_GSO_IPV4_MAX_SIZE] && + nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) { + NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size"); + return -EINVAL; + } - if (tb[IFLA_GRO_IPV4_MAX_SIZE] && - nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { - NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); - return -EINVAL; - } + if (tb[IFLA_GRO_IPV4_MAX_SIZE] && + nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) { + NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size"); + return -EINVAL; } if (tb[IFLA_AF_SPEC]) { @@ -2742,10 +2740,6 @@ static int do_setlink(const struct sk_buff *skb, char ifname[IFNAMSIZ]; int err; - err = validate_linkmsg(dev, tb, extack); - if (err < 0) - return err; - if (tb[IFLA_IFNAME]) nla_strscpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ); else @@ -3162,6 +3156,10 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } + err = validate_linkmsg(dev, tb, extack); + if (err < 0) + goto errout; + err = do_setlink(skb, dev, ifm, extack, tb, 0); errout: return err; @@ -3405,6 +3403,9 @@ static int rtnl_group_changelink(const struct sk_buff *skb, for_each_netdev_safe(net, dev, aux) { if (dev->group == group) { + err = validate_linkmsg(dev, tb, extack); + if (err < 0) + return err; err = do_setlink(skb, dev, ifm, extack, tb, 0); if (err < 0) return err; @@ -3562,10 +3563,6 @@ replay: m_ops = master_dev->rtnl_link_ops; } - err = validate_linkmsg(dev, tb, extack); - if (err < 0) - return err; - if (tb[IFLA_LINKINFO]) { err = nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO], @@ -3629,6 +3626,10 @@ replay: if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; + err = validate_linkmsg(dev, tb, extack); + if (err < 0) + return err; + if (linkinfo[IFLA_INFO_DATA]) { if (!ops || ops != dev->rtnl_link_ops || !ops->changelink) -- cgit v1.2.3 From 70f7457ad6d655e65f1b93cbba2a519e4b11c946 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 12 Jun 2023 14:49:43 -0700 Subject: net: create device lookup API with reference tracking New users of dev_get_by_index() and dev_get_by_name() keep getting added and it would be nice to steer them towards the APIs with reference tracking. Add variants of those calls which allocate the reference tracker and use them in a couple of places. Signed-off-by: Jakub Kicinski Reviewed-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 +++ net/core/dev.c | 63 +++++++++++++++++++++++++++++++++-------------- net/ethtool/netlink.c | 10 ++++---- net/ipv6/route.c | 12 ++++----- 4 files changed, 60 insertions(+), 29 deletions(-) (limited to 'net') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2d6cb2bf2f05..acf706d49c2b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3124,6 +3124,10 @@ struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, struct sock *sk); struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); +struct net_device *netdev_get_by_index(struct net *net, int ifindex, + netdevice_tracker *tracker, gfp_t gfp); +struct net_device *netdev_get_by_name(struct net *net, const char *name, + netdevice_tracker *tracker, gfp_t gfp); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); int dev_restart(struct net_device *dev); diff --git a/net/core/dev.c b/net/core/dev.c index c2456b3667fe..63abb0463c24 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -758,29 +758,43 @@ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) } EXPORT_SYMBOL(dev_get_by_name_rcu); +/* Deprecated for new users, call netdev_get_by_name() instead */ +struct net_device *dev_get_by_name(struct net *net, const char *name) +{ + struct net_device *dev; + + rcu_read_lock(); + dev = dev_get_by_name_rcu(net, name); + dev_hold(dev); + rcu_read_unlock(); + return dev; +} +EXPORT_SYMBOL(dev_get_by_name); + /** - * dev_get_by_name - find a device by its name + * netdev_get_by_name() - find a device by its name * @net: the applicable net namespace * @name: name to find + * @tracker: tracking object for the acquired reference + * @gfp: allocation flags for the tracker * * Find an interface by name. This can be called from any * context and does its own locking. The returned handle has - * the usage count incremented and the caller must use dev_put() to + * the usage count incremented and the caller must use netdev_put() to * release it when it is no longer needed. %NULL is returned if no * matching device is found. */ - -struct net_device *dev_get_by_name(struct net *net, const char *name) +struct net_device *netdev_get_by_name(struct net *net, const char *name, + netdevice_tracker *tracker, gfp_t gfp) { struct net_device *dev; - rcu_read_lock(); - dev = dev_get_by_name_rcu(net, name); - dev_hold(dev); - rcu_read_unlock(); + dev = dev_get_by_name(net, name); + if (dev) + netdev_tracker_alloc(dev, tracker, gfp); return dev; } -EXPORT_SYMBOL(dev_get_by_name); +EXPORT_SYMBOL(netdev_get_by_name); /** * __dev_get_by_index - find a device by its ifindex @@ -831,29 +845,42 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) } EXPORT_SYMBOL(dev_get_by_index_rcu); +/* Deprecated for new users, call netdev_get_by_index() instead */ +struct net_device *dev_get_by_index(struct net *net, int ifindex) +{ + struct net_device *dev; + + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifindex); + dev_hold(dev); + rcu_read_unlock(); + return dev; +} +EXPORT_SYMBOL(dev_get_by_index); /** - * dev_get_by_index - find a device by its ifindex + * netdev_get_by_index() - find a device by its ifindex * @net: the applicable net namespace * @ifindex: index of device + * @tracker: tracking object for the acquired reference + * @gfp: allocation flags for the tracker * * Search for an interface by index. Returns NULL if the device * is not found or a pointer to the device. The device returned has * had a reference added and the pointer is safe until the user calls - * dev_put to indicate they have finished with it. + * netdev_put() to indicate they have finished with it. */ - -struct net_device *dev_get_by_index(struct net *net, int ifindex) +struct net_device *netdev_get_by_index(struct net *net, int ifindex, + netdevice_tracker *tracker, gfp_t gfp) { struct net_device *dev; - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, ifindex); - dev_hold(dev); - rcu_read_unlock(); + dev = dev_get_by_index(net, ifindex); + if (dev) + netdev_tracker_alloc(dev, tracker, gfp); return dev; } -EXPORT_SYMBOL(dev_get_by_index); +EXPORT_SYMBOL(netdev_get_by_index); /** * dev_get_by_napi_id - find a device by napi_id diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 5dd5e8222c45..39a459b0111b 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -115,7 +115,8 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) { u32 ifindex = nla_get_u32(tb[ETHTOOL_A_HEADER_DEV_INDEX]); - dev = dev_get_by_index(net, ifindex); + dev = netdev_get_by_index(net, ifindex, &req_info->dev_tracker, + GFP_KERNEL); if (!dev) { NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_HEADER_DEV_INDEX], @@ -125,13 +126,14 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, /* if both ifindex and ifname are passed, they must match */ if (devname_attr && strncmp(dev->name, nla_data(devname_attr), IFNAMSIZ)) { - dev_put(dev); + netdev_put(dev, &req_info->dev_tracker); NL_SET_ERR_MSG_ATTR(extack, header, "ifindex and name do not match"); return -ENODEV; } } else if (devname_attr) { - dev = dev_get_by_name(net, nla_data(devname_attr)); + dev = netdev_get_by_name(net, nla_data(devname_attr), + &req_info->dev_tracker, GFP_KERNEL); if (!dev) { NL_SET_ERR_MSG_ATTR(extack, devname_attr, "no device matches name"); @@ -144,8 +146,6 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, } req_info->dev = dev; - if (dev) - netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL); req_info->flags = flags; return 0; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 392aaa373b66..e510a4162ef8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3503,6 +3503,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { + netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker; struct net_device *dev = NULL; struct inet6_dev *idev = NULL; int addr_type; @@ -3520,7 +3521,8 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, err = -ENODEV; if (cfg->fc_ifindex) { - dev = dev_get_by_index(net, cfg->fc_ifindex); + dev = netdev_get_by_index(net, cfg->fc_ifindex, + dev_tracker, gfp_flags); if (!dev) goto out; idev = in6_dev_get(dev); @@ -3554,11 +3556,11 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, /* hold loopback dev/idev if we haven't done so. */ if (dev != net->loopback_dev) { if (dev) { - dev_put(dev); + netdev_put(dev, dev_tracker); in6_dev_put(idev); } dev = net->loopback_dev; - dev_hold(dev); + netdev_hold(dev, dev_tracker, gfp_flags); idev = in6_dev_get(dev); if (!idev) { err = -ENODEV; @@ -3610,8 +3612,6 @@ pcpu_alloc: } fib6_nh->fib_nh_dev = dev; - netdev_tracker_alloc(dev, &fib6_nh->fib_nh_dev_tracker, gfp_flags); - fib6_nh->fib_nh_oif = dev->ifindex; err = 0; out: @@ -3621,7 +3621,7 @@ out: if (err) { lwtstate_put(fib6_nh->fib_nh_lws); fib6_nh->fib_nh_lws = NULL; - dev_put(dev); + netdev_put(dev, dev_tracker); } return err; -- cgit v1.2.3 From 48eed027d310326052655c5ac14588855663087b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 12 Jun 2023 14:49:44 -0700 Subject: netpoll: allocate netdev tracker right away Commit 5fa5ae605821 ("netpoll: add net device refcount tracker to struct netpoll") was part of one of the initial netdev tracker introduction patches. It added an explicit netdev_tracker_alloc() for netpoll, presumably because the flow of the function is somewhat odd. After most of the core networking stack was converted to use the tracking hold() variants, netpoll's call to old dev_hold() stands out a bit. np is allocated by the caller and ready to use, we can use netdev_hold() here, even tho np->ndev will only be set to ndev inside __netpoll_setup(). Reviewed-by: Eric Dumazet Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/core/netpoll.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index e6a739b1afa9..543007f159f9 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -690,7 +690,7 @@ int netpoll_setup(struct netpoll *np) err = -ENODEV; goto unlock; } - dev_hold(ndev); + netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL); if (netdev_master_upper_dev_get(ndev)) { np_err(np, "%s is a slave device, aborting\n", np->dev_name); @@ -783,12 +783,11 @@ put_noaddr: err = __netpoll_setup(np, ndev); if (err) goto put; - netdev_tracker_alloc(ndev, &np->dev_tracker, GFP_KERNEL); rtnl_unlock(); return 0; put: - dev_put(ndev); + netdev_put(ndev, &np->dev_tracker); unlock: rtnl_unlock(); return err; -- cgit v1.2.3 From ed3c9a2fcab3b60b0766eb5d7566fd3b10df9a8e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 13 Jun 2023 13:50:06 -0700 Subject: net: tls: make the offload check helper take skb not socket All callers of tls_is_sk_tx_device_offloaded() currently do an equivalent of: if (skb->sk && tls_is_skb_tx_device_offloaded(skb->sk)) Have the helper accept skb and do the skb->sk check locally. Two drivers have local static inlines with similar wrappers already. While at it change the ifdef condition to TLS_DEVICE. Only TLS_DEVICE selects SOCK_VALIDATE_XMIT, so the two are equivalent. This makes removing the duplicated IS_ENABLED() check in funeth more obviously correct. Signed-off-by: Jakub Kicinski Acked-by: Maxim Mikityanskiy Reviewed-by: Simon Horman Acked-by: Tariq Toukan Acked-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 4 ++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 5 ----- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 2 +- drivers/net/ethernet/fungible/funeth/funeth_tx.c | 3 +-- drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h | 5 ----- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 4 ++-- include/net/tls.h | 8 +++++--- net/tls/tls_device.c | 4 ++-- 12 files changed, 17 insertions(+), 26 deletions(-) (limited to 'net') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 007cec23a92f..16405b84dc2f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5442,7 +5442,7 @@ static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *sk { struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev); - /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded + /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded * was true, if tls_device_down is running in parallel, but it's OK, * because bond_get_slave_by_dev has a NULL check. */ @@ -5461,7 +5461,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev return NETDEV_TX_OK; #if IS_ENABLED(CONFIG_TLS_DEVICE) - if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk)) + if (tls_is_skb_tx_device_offloaded(skb)) return bond_tls_device_xmit(bond, skb, dev); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f0bc7396ce2b..2eb33a727bba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1175,7 +1175,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, txq = netdev_pick_tx(dev, skb, sb_dev); if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || skb->encapsulation || - cxgb4_is_ktls_skb(skb) || + tls_is_skb_tx_device_offloaded(skb) || (proto != IPPROTO_TCP && proto != IPPROTO_UDP)) txq = txq % pi->nqsets; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 34546f5312ee..a9599ba26975 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -497,11 +497,6 @@ struct cxgb4_uld_info { #endif }; -static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb) -{ - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); -} - void cxgb4_uld_enable(struct adapter *adap); void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 46809e2d94ee..98dd78551d89 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1530,7 +1530,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) #endif /* CHELSIO_IPSEC_INLINE */ #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) - if (cxgb4_is_ktls_skb(skb) && + if (tls_is_skb_tx_device_offloaded(skb) && (skb->len - skb_tcp_all_headers(skb))) return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); #endif /* CHELSIO_TLS_DEVICE */ diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 1a5fdd755e9e..bcdc7fc2f427 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -1946,7 +1946,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) tls_ctx = tls_get_ctx(skb->sk); tls_netdev = rcu_dereference_bh(tls_ctx->netdev); /* Don't quit on NULL: if tls_device_down is running in parallel, - * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was + * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was * true. Rather continue processing this packet. */ if (unlikely(tls_netdev && tls_netdev != dev)) diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c index 706d81e39a54..8ddefd3ec15b 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_tx.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c @@ -348,8 +348,7 @@ netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tls_len = 0; unsigned int ndesc; - if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk && - tls_is_sk_tx_device_offloaded(skb->sk)) { + if (tls_is_skb_tx_device_offloaded(skb)) { skb = fun_tls_tx(skb, q, &tls_len); if (unlikely(!skb)) goto dropped; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index c964644ee866..bac4717548c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -125,7 +125,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev, #ifdef CONFIG_MLX5_EN_TLS /* May send WQEs. */ - if (mlx5e_ktls_skb_offloaded(skb)) + if (tls_is_skb_tx_device_offloaded(skb)) if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb, &state->tls))) return false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 0e4c0a093293..efb2cf74ad6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -846,7 +846,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, tls_ctx = tls_get_ctx(skb->sk); tls_netdev = rcu_dereference_bh(tls_ctx->netdev); /* Don't WARN on NULL: if tls_device_down is running in parallel, - * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was + * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was * true. Rather continue processing this packet. */ if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 2dd78dd4ad65..f87b65c560ea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -49,11 +49,6 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget) return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state); } -static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb) -{ - return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk); -} - static inline void mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index b7cce746b5c0..49f2f081ebb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -598,7 +598,7 @@ nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, if (likely(!dp->ktls_tx)) return skb; - if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) + if (!tls_is_skb_tx_device_offloaded(skb)) return skb; datalen = skb->len - skb_tcp_all_headers(skb); @@ -666,7 +666,7 @@ void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle) if (!tls_handle) return; - if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))) + if (WARN_ON_ONCE(!tls_is_skb_tx_device_offloaded(skb))) return; datalen = skb->len - skb_tcp_all_headers(skb); diff --git a/include/net/tls.h b/include/net/tls.h index b7d0f1e3058b..5e71dd3df8ca 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -370,10 +370,12 @@ struct sk_buff * tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, struct sk_buff *skb); -static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) +static inline bool tls_is_skb_tx_device_offloaded(const struct sk_buff *skb) { -#ifdef CONFIG_SOCK_VALIDATE_XMIT - return sk_fullsock(sk) && +#ifdef CONFIG_TLS_DEVICE + struct sock *sk = skb->sk; + + return sk && sk_fullsock(sk) && (smp_load_acquire(&sk->sk_validate_xmit_skb) == &tls_validate_xmit_skb); #else diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index b4864d55900f..b82770f68807 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -1219,7 +1219,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) tls_device_attach(ctx, sk, netdev); up_read(&device_offload_lock); - /* following this assignment tls_is_sk_tx_device_offloaded + /* following this assignment tls_is_skb_tx_device_offloaded * will return true and the context might be accessed * by the netdev's xmit function. */ @@ -1372,7 +1372,7 @@ static int tls_device_down(struct net_device *netdev) list_for_each_entry_safe(ctx, tmp, &list, list) { /* Stop offloaded TX and switch to the fallback. - * tls_is_sk_tx_device_offloaded will return false. + * tls_is_skb_tx_device_offloaded will return false. */ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); -- cgit v1.2.3 From e1d001fa5b477c4da46a29be1fcece91db7c7c6f Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Fri, 9 Jun 2023 08:27:42 -0700 Subject: net: ioctl: Use kernel memory on protocol ioctl callbacks Most of the ioctls to net protocols operates directly on userspace argument (arg). Usually doing get_user()/put_user() directly in the ioctl callback. This is not flexible, because it is hard to reuse these functions without passing userspace buffers. Change the "struct proto" ioctls to avoid touching userspace memory and operate on kernel buffers, i.e., all protocol's ioctl callbacks is adapted to operate on a kernel memory other than on userspace (so, no more {put,get}_user() and friends being called in the ioctl callback). This changes the "struct proto" ioctl format in the following way: int (*ioctl)(struct sock *sk, int cmd, - unsigned long arg); + int *karg); (Important to say that this patch does not touch the "struct proto_ops" protocols) So, the "karg" argument, which is passed to the ioctl callback, is a pointer allocated to kernel space memory (inside a function wrapper). This buffer (karg) may contain input argument (copied from userspace in a prep function) and it might return a value/buffer, which is copied back to userspace if necessary. There is not one-size-fits-all format (that is I am using 'may' above), but basically, there are three type of ioctls: 1) Do not read from userspace, returns a result to userspace 2) Read an input parameter from userspace, and does not return anything to userspace 3) Read an input from userspace, and return a buffer to userspace. The default case (1) (where no input parameter is given, and an "int" is returned to userspace) encompasses more than 90% of the cases, but there are two other exceptions. Here is a list of exceptions: * Protocol RAW: * cmd = SIOCGETVIFCNT: * input and output = struct sioc_vif_req * cmd = SIOCGETSGCNT * input and output = struct sioc_sg_req * Explanation: for the SIOCGETVIFCNT case, userspace passes the input argument, which is struct sioc_vif_req. Then the callback populates the struct, which is copied back to userspace. * Protocol RAW6: * cmd = SIOCGETMIFCNT_IN6 * input and output = struct sioc_mif_req6 * cmd = SIOCGETSGCNT_IN6 * input and output = struct sioc_sg_req6 * Protocol PHONET: * cmd == SIOCPNADDRESOURCE | SIOCPNDELRESOURCE * input int (4 bytes) * Nothing is copied back to userspace. For the exception cases, functions sock_sk_ioctl_inout() will copy the userspace input, and copy it back to kernel space. The wrapper that prepare the buffer and put the buffer back to user is sk_ioctl(), so, instead of calling sk->sk_prot->ioctl(), the callee now calls sk_ioctl(), which will handle all cases. Signed-off-by: Breno Leitao Reviewed-by: Willem de Bruijn Reviewed-by: David Ahern Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230609152800.830401-1-leitao@debian.org Signed-off-by: Jakub Kicinski --- include/linux/icmpv6.h | 6 +++++ include/linux/mroute.h | 22 ++++++++++++++-- include/linux/mroute6.h | 31 ++++++++++++++++++++-- include/net/phonet/phonet.h | 21 +++++++++++++++ include/net/sock.h | 5 +++- include/net/tcp.h | 2 +- include/net/udp.h | 2 +- net/core/sock.c | 64 +++++++++++++++++++++++++++++++++++++++++++++ net/dccp/dccp.h | 2 +- net/dccp/proto.c | 12 ++++----- net/ieee802154/socket.c | 15 +++++------ net/ipv4/af_inet.c | 2 +- net/ipv4/ipmr.c | 63 +++++++++++++++++++++++++++----------------- net/ipv4/raw.c | 16 ++++++------ net/ipv4/tcp.c | 5 ++-- net/ipv4/udp.c | 12 ++++----- net/ipv6/af_inet6.c | 2 +- net/ipv6/ip6mr.c | 44 +++++++++++++------------------ net/ipv6/raw.c | 16 ++++++------ net/l2tp/l2tp_core.h | 2 +- net/l2tp/l2tp_ip.c | 9 +++---- net/mptcp/protocol.c | 11 ++++---- net/phonet/datagram.c | 11 +++----- net/phonet/pep.c | 11 ++++---- net/phonet/socket.c | 2 +- net/sctp/socket.c | 8 +++--- 26 files changed, 267 insertions(+), 129 deletions(-) (limited to 'net') diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index db0f4fcfdaf4..1fe33e6741cc 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -111,4 +111,10 @@ static inline bool icmpv6_is_err(int type) return false; } +static inline int sk_is_icmpv6(struct sock *sk) +{ + return sk->sk_family == AF_INET6 && + inet_sk(sk)->inet_num == IPPROTO_ICMPV6; +} + #endif diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 80b8400ab8b2..94c6e6f549f0 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -16,12 +16,19 @@ static inline int ip_mroute_opt(int opt) return opt >= MRT_BASE && opt <= MRT_MAX; } +static inline int sk_is_ipmr(struct sock *sk) +{ + return sk->sk_family == AF_INET && + inet_sk(sk)->inet_num == IPPROTO_IGMP; +} + int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); -int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); +int ipmr_ioctl(struct sock *sk, int cmd, void *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); int ip_mr_init(void); bool ipmr_rule_default(const struct fib_rule *rule); +int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); #else static inline int ip_mroute_setsockopt(struct sock *sock, int optname, sockptr_t optval, unsigned int optlen) @@ -35,7 +42,7 @@ static inline int ip_mroute_getsockopt(struct sock *sk, int optname, return -ENOPROTOOPT; } -static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +static inline int ipmr_ioctl(struct sock *sk, int cmd, void *arg) { return -ENOIOCTLCMD; } @@ -50,10 +57,21 @@ static inline int ip_mroute_opt(int opt) return 0; } +static inline int sk_is_ipmr(struct sock *sk) +{ + return 0; +} + static inline bool ipmr_rule_default(const struct fib_rule *rule) { return true; } + +static inline int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, + void __user *arg) +{ + return 1; +} #endif #define VIFF_STATIC 0x8000 diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index 8f2b307fb124..2f95d5b4e47a 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -29,10 +29,10 @@ struct sock; extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); extern int ip6_mr_input(struct sk_buff *skb); -extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); extern int ip6_mr_init(void); extern void ip6_mr_cleanup(void); +int ip6mr_ioctl(struct sock *sk, int cmd, void *arg); #else static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, sockptr_t optval, unsigned int optlen) @@ -48,7 +48,7 @@ int ip6_mroute_getsockopt(struct sock *sock, } static inline -int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) +int ip6mr_ioctl(struct sock *sk, int cmd, void *arg) { return -ENOIOCTLCMD; } @@ -100,6 +100,27 @@ extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, #ifdef CONFIG_IPV6_MROUTE bool mroute6_is_socket(struct net *net, struct sk_buff *skb); extern int ip6mr_sk_done(struct sock *sk); +static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd, + void __user *arg) +{ + switch (cmd) { + /* These userspace buffers will be consumed by ip6mr_ioctl() */ + case SIOCGETMIFCNT_IN6: { + struct sioc_mif_req6 buffer; + + return sock_ioctl_inout(sk, cmd, arg, &buffer, + sizeof(buffer)); + } + case SIOCGETSGCNT_IN6: { + struct sioc_mif_req6 buffer; + + return sock_ioctl_inout(sk, cmd, arg, &buffer, + sizeof(buffer)); + } + } + + return 1; +} #else static inline bool mroute6_is_socket(struct net *net, struct sk_buff *skb) { @@ -109,5 +130,11 @@ static inline int ip6mr_sk_done(struct sock *sk) { return 0; } + +static inline int ip6mr_sk_ioctl(struct sock *sk, unsigned int cmd, + void __user *arg) +{ + return 1; +} #endif #endif diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index 862f1719b523..cf5ecae4a2fc 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h @@ -109,4 +109,25 @@ void phonet_sysctl_exit(void); int isi_register(void); void isi_unregister(void); +static inline bool sk_is_phonet(struct sock *sk) +{ + return sk->sk_family == PF_PHONET; +} + +static inline int phonet_sk_ioctl(struct sock *sk, unsigned int cmd, + void __user *arg) +{ + int karg; + + switch (cmd) { + case SIOCPNADDRESOURCE: + case SIOCPNDELRESOURCE: + if (get_user(karg, (int __user *)arg)) + return -EFAULT; + + return sk->sk_prot->ioctl(sk, cmd, &karg); + } + /* A positive return value means that the ioctl was not processed */ + return 1; +} #endif diff --git a/include/net/sock.h b/include/net/sock.h index 2790133b4b76..62a1b99da349 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1258,7 +1258,7 @@ struct proto { bool kern); int (*ioctl)(struct sock *sk, int cmd, - unsigned long arg); + int *karg); int (*init)(struct sock *sk); void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); @@ -2974,6 +2974,9 @@ int sock_get_timeout(long timeo, void *optval, bool old_timeval); int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, sockptr_t optval, int optlen, bool old_timeval); +int sock_ioctl_inout(struct sock *sk, unsigned int cmd, + void __user *arg, void *karg, size_t size); +int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); static inline bool sk_is_readable(struct sock *sk) { if (sk->sk_prot->sock_is_readable) diff --git a/include/net/tcp.h b/include/net/tcp.h index bf9f56225821..9c08eab647a2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -340,7 +340,7 @@ void tcp_release_cb(struct sock *sk); void tcp_wfree(struct sk_buff *skb); void tcp_write_timer_handler(struct sock *sk); void tcp_delack_timer_handler(struct sock *sk); -int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int tcp_ioctl(struct sock *sk, int cmd, int *karg); int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); void tcp_rcv_space_adjust(struct sock *sk); diff --git a/include/net/udp.h b/include/net/udp.h index e01340a27155..4d13424f8f72 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -285,7 +285,7 @@ void udp_flush_pending_frames(struct sock *sk); int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size); void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst); int udp_rcv(struct sk_buff *skb); -int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int udp_ioctl(struct sock *sk, int cmd, int *karg); int udp_init_sock(struct sock *sk); int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int __udp_disconnect(struct sock *sk, int flags); diff --git a/net/core/sock.c b/net/core/sock.c index ea66b1afadd0..cff3e82514d1 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -114,6 +114,9 @@ #include #include #include +#include +#include +#include #include @@ -138,6 +141,7 @@ #include #include +#include #include @@ -4150,3 +4154,63 @@ int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) return sk->sk_prot->bind_add(sk, addr, addr_len); } EXPORT_SYMBOL(sock_bind_add); + +/* Copy 'size' bytes from userspace and return `size` back to userspace */ +int sock_ioctl_inout(struct sock *sk, unsigned int cmd, + void __user *arg, void *karg, size_t size) +{ + int ret; + + if (copy_from_user(karg, arg, size)) + return -EFAULT; + + ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg); + if (ret) + return ret; + + if (copy_to_user(arg, karg, size)) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(sock_ioctl_inout); + +/* This is the most common ioctl prep function, where the result (4 bytes) is + * copied back to userspace if the ioctl() returns successfully. No input is + * copied from userspace as input argument. + */ +static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg) +{ + int ret, karg = 0; + + ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg); + if (ret) + return ret; + + return put_user(karg, (int __user *)arg); +} + +/* A wrapper around sock ioctls, which copies the data from userspace + * (depending on the protocol/ioctl), and copies back the result to userspace. + * The main motivation for this function is to pass kernel memory to the + * protocol ioctl callbacks, instead of userspace memory. + */ +int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) +{ + int rc = 1; + + if (sk_is_ipmr(sk)) + rc = ipmr_sk_ioctl(sk, cmd, arg); + else if (sk_is_icmpv6(sk)) + rc = ip6mr_sk_ioctl(sk, cmd, arg); + else if (sk_is_phonet(sk)) + rc = phonet_sk_ioctl(sk, cmd, arg); + + /* If ioctl was processed, returns its value */ + if (rc <= 0) + return rc; + + /* Otherwise call the default handler */ + return sock_ioctl_out(sk, cmd, arg); +} +EXPORT_SYMBOL(sk_ioctl); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 9ddc3a9e89e4..1f748ed1279d 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -292,7 +292,7 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); -int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int dccp_ioctl(struct sock *sk, int cmd, int *karg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b0ebf853cb07..f331e5977a84 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -362,7 +362,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, EXPORT_SYMBOL_GPL(dccp_poll); -int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) +int dccp_ioctl(struct sock *sk, int cmd, int *karg) { int rc = -ENOTCONN; @@ -373,17 +373,17 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) switch (cmd) { case SIOCOUTQ: { - int amount = sk_wmem_alloc_get(sk); + *karg = sk_wmem_alloc_get(sk); /* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and * always 0, comparably to UDP. */ - rc = put_user(amount, (int __user *)arg); + rc = 0; } break; case SIOCINQ: { struct sk_buff *skb; - unsigned long amount = 0; + *karg = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { @@ -391,9 +391,9 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) * We will only return the amount of this packet since * that is all that will be read. */ - amount = skb->len; + *karg = skb->len; } - rc = put_user(amount, (int __user *)arg); + rc = 0; } break; default: diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 1fa2fe041ec0..9c124705120d 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -162,7 +162,7 @@ static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd, default: if (!sk->sk_prot->ioctl) return -ENOIOCTLCMD; - return sk->sk_prot->ioctl(sk, cmd, arg); + return sk_ioctl(sk, cmd, (void __user *)arg); } } @@ -531,22 +531,21 @@ out: return err; } -static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int dgram_ioctl(struct sock *sk, int cmd, int *karg) { switch (cmd) { case SIOCOUTQ: { - int amount = sk_wmem_alloc_get(sk); + *karg = sk_wmem_alloc_get(sk); - return put_user(amount, (int __user *)arg); + return 0; } case SIOCINQ: { struct sk_buff *skb; - unsigned long amount; - amount = 0; + *karg = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) { @@ -554,10 +553,10 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg) * of this packet since that is all * that will be read. */ - amount = skb->len - ieee802154_hdr_length(skb); + *karg = skb->len - ieee802154_hdr_length(skb); } spin_unlock_bh(&sk->sk_receive_queue.lock); - return put_user(amount, (int __user *)arg); + return 0; } } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 0e16ac8282c5..38e649fb4474 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -998,7 +998,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; default: if (sk->sk_prot->ioctl) - err = sk->sk_prot->ioctl(sk, cmd, arg); + err = sk_ioctl(sk, cmd, (void __user *)arg); else err = -ENOIOCTLCMD; break; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index eec1f6df80d8..3f0c6d602fb7 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1547,6 +1547,28 @@ out: return ret; } +/* Execute if this ioctl is a special mroute ioctl */ +int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) +{ + switch (cmd) { + /* These userspace buffers will be consumed by ipmr_ioctl() */ + case SIOCGETVIFCNT: { + struct sioc_vif_req buffer; + + return sock_ioctl_inout(sk, cmd, arg, &buffer, + sizeof(buffer)); + } + case SIOCGETSGCNT: { + struct sioc_sg_req buffer; + + return sock_ioctl_inout(sk, cmd, arg, &buffer, + sizeof(buffer)); + } + } + /* return code > 0 means that the ioctl was not executed */ + return 1; +} + /* Getsock opt support for the multicast routing system. */ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, sockptr_t optlen) @@ -1593,13 +1615,13 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, } /* The IP multicast ioctl support routines. */ -int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) +int ipmr_ioctl(struct sock *sk, int cmd, void *arg) { - struct sioc_sg_req sr; - struct sioc_vif_req vr; struct vif_device *vif; struct mfc_cache *c; struct net *net = sock_net(sk); + struct sioc_vif_req *vr; + struct sioc_sg_req *sr; struct mr_table *mrt; mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); @@ -1608,40 +1630,33 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg) switch (cmd) { case SIOCGETVIFCNT: - if (copy_from_user(&vr, arg, sizeof(vr))) - return -EFAULT; - if (vr.vifi >= mrt->maxvif) + vr = (struct sioc_vif_req *)arg; + if (vr->vifi >= mrt->maxvif) return -EINVAL; - vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif); + vr->vifi = array_index_nospec(vr->vifi, mrt->maxvif); rcu_read_lock(); - vif = &mrt->vif_table[vr.vifi]; - if (VIF_EXISTS(mrt, vr.vifi)) { - vr.icount = READ_ONCE(vif->pkt_in); - vr.ocount = READ_ONCE(vif->pkt_out); - vr.ibytes = READ_ONCE(vif->bytes_in); - vr.obytes = READ_ONCE(vif->bytes_out); + vif = &mrt->vif_table[vr->vifi]; + if (VIF_EXISTS(mrt, vr->vifi)) { + vr->icount = READ_ONCE(vif->pkt_in); + vr->ocount = READ_ONCE(vif->pkt_out); + vr->ibytes = READ_ONCE(vif->bytes_in); + vr->obytes = READ_ONCE(vif->bytes_out); rcu_read_unlock(); - if (copy_to_user(arg, &vr, sizeof(vr))) - return -EFAULT; return 0; } rcu_read_unlock(); return -EADDRNOTAVAIL; case SIOCGETSGCNT: - if (copy_from_user(&sr, arg, sizeof(sr))) - return -EFAULT; + sr = (struct sioc_sg_req *)arg; rcu_read_lock(); - c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr); + c = ipmr_cache_find(mrt, sr->src.s_addr, sr->grp.s_addr); if (c) { - sr.pktcnt = c->_c.mfc_un.res.pkt; - sr.bytecnt = c->_c.mfc_un.res.bytes; - sr.wrong_if = c->_c.mfc_un.res.wrong_if; + sr->pktcnt = c->_c.mfc_un.res.pkt; + sr->bytecnt = c->_c.mfc_un.res.bytes; + sr->wrong_if = c->_c.mfc_un.res.wrong_if; rcu_read_unlock(); - - if (copy_to_user(arg, &sr, sizeof(sr))) - return -EFAULT; return 0; } rcu_read_unlock(); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 9aacce9db7b9..7782ff5e6539 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -856,29 +856,29 @@ static int raw_getsockopt(struct sock *sk, int level, int optname, return do_raw_getsockopt(sk, level, optname, optval, optlen); } -static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int raw_ioctl(struct sock *sk, int cmd, int *karg) { switch (cmd) { case SIOCOUTQ: { - int amount = sk_wmem_alloc_get(sk); - - return put_user(amount, (int __user *)arg); + *karg = sk_wmem_alloc_get(sk); + return 0; } case SIOCINQ: { struct sk_buff *skb; - int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) - amount = skb->len; + *karg = skb->len; + else + *karg = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); - return put_user(amount, (int __user *)arg); + return 0; } default: #ifdef CONFIG_IP_MROUTE - return ipmr_ioctl(sk, cmd, (void __user *)arg); + return ipmr_ioctl(sk, cmd, karg); #else return -ENOIOCTLCMD; #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fba6578bc98f..0e21ea92dc1d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -599,7 +599,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) } EXPORT_SYMBOL(tcp_poll); -int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) +int tcp_ioctl(struct sock *sk, int cmd, int *karg) { struct tcp_sock *tp = tcp_sk(sk); int answ; @@ -641,7 +641,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) return -ENOIOCTLCMD; } - return put_user(answ, (int __user *)arg); + *karg = answ; + return 0; } EXPORT_SYMBOL(tcp_ioctl); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7e0542c10471..48fdcd3cad9c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1692,21 +1692,19 @@ static int first_packet_length(struct sock *sk) * IOCTL requests applicable to the UDP protocol */ -int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) +int udp_ioctl(struct sock *sk, int cmd, int *karg) { switch (cmd) { case SIOCOUTQ: { - int amount = sk_wmem_alloc_get(sk); - - return put_user(amount, (int __user *)arg); + *karg = sk_wmem_alloc_get(sk); + return 0; } case SIOCINQ: { - int amount = max_t(int, 0, first_packet_length(sk)); - - return put_user(amount, (int __user *)arg); + *karg = max_t(int, 0, first_packet_length(sk)); + return 0; } default: diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 564942bee067..b3451cf47d29 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -579,7 +579,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) prot = READ_ONCE(sk->sk_prot); if (!prot->ioctl) return -ENOIOCTLCMD; - return prot->ioctl(sk, cmd, arg); + return sk_ioctl(sk, cmd, (void __user *)arg); } /*NOTREACHED*/ return 0; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 51cf37abd142..cc3d5ad17257 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1879,11 +1879,10 @@ int ip6_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, /* * The IP multicast ioctl support routines. */ - -int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) +int ip6mr_ioctl(struct sock *sk, int cmd, void *arg) { - struct sioc_sg_req6 sr; - struct sioc_mif_req6 vr; + struct sioc_sg_req6 *sr; + struct sioc_mif_req6 *vr; struct vif_device *vif; struct mfc6_cache *c; struct net *net = sock_net(sk); @@ -1895,40 +1894,33 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg) switch (cmd) { case SIOCGETMIFCNT_IN6: - if (copy_from_user(&vr, arg, sizeof(vr))) - return -EFAULT; - if (vr.mifi >= mrt->maxvif) + vr = (struct sioc_mif_req6 *)arg; + if (vr->mifi >= mrt->maxvif) return -EINVAL; - vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif); + vr->mifi = array_index_nospec(vr->mifi, mrt->maxvif); rcu_read_lock(); - vif = &mrt->vif_table[vr.mifi]; - if (VIF_EXISTS(mrt, vr.mifi)) { - vr.icount = READ_ONCE(vif->pkt_in); - vr.ocount = READ_ONCE(vif->pkt_out); - vr.ibytes = READ_ONCE(vif->bytes_in); - vr.obytes = READ_ONCE(vif->bytes_out); + vif = &mrt->vif_table[vr->mifi]; + if (VIF_EXISTS(mrt, vr->mifi)) { + vr->icount = READ_ONCE(vif->pkt_in); + vr->ocount = READ_ONCE(vif->pkt_out); + vr->ibytes = READ_ONCE(vif->bytes_in); + vr->obytes = READ_ONCE(vif->bytes_out); rcu_read_unlock(); - - if (copy_to_user(arg, &vr, sizeof(vr))) - return -EFAULT; return 0; } rcu_read_unlock(); return -EADDRNOTAVAIL; case SIOCGETSGCNT_IN6: - if (copy_from_user(&sr, arg, sizeof(sr))) - return -EFAULT; + sr = (struct sioc_sg_req6 *)arg; rcu_read_lock(); - c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr); + c = ip6mr_cache_find(mrt, &sr->src.sin6_addr, + &sr->grp.sin6_addr); if (c) { - sr.pktcnt = c->_c.mfc_un.res.pkt; - sr.bytecnt = c->_c.mfc_un.res.bytes; - sr.wrong_if = c->_c.mfc_un.res.wrong_if; + sr->pktcnt = c->_c.mfc_un.res.pkt; + sr->bytecnt = c->_c.mfc_un.res.bytes; + sr->wrong_if = c->_c.mfc_un.res.wrong_if; rcu_read_unlock(); - - if (copy_to_user(arg, &sr, sizeof(sr))) - return -EFAULT; return 0; } rcu_read_unlock(); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 44ee7a2e72ac..c9caeb5a43ed 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1118,29 +1118,29 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname, return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } -static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int rawv6_ioctl(struct sock *sk, int cmd, int *karg) { switch (cmd) { case SIOCOUTQ: { - int amount = sk_wmem_alloc_get(sk); - - return put_user(amount, (int __user *)arg); + *karg = sk_wmem_alloc_get(sk); + return 0; } case SIOCINQ: { struct sk_buff *skb; - int amount = 0; spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); if (skb) - amount = skb->len; + *karg = skb->len; + else + *karg = 0; spin_unlock_bh(&sk->sk_receive_queue.lock); - return put_user(amount, (int __user *)arg); + return 0; } default: #ifdef CONFIG_IPV6_MROUTE - return ip6mr_ioctl(sk, cmd, (void __user *)arg); + return ip6mr_ioctl(sk, cmd, karg); #else return -ENOIOCTLCMD; #endif diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a88e070b431d..91ebf0a3f499 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -272,7 +272,7 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); /* IOCTL helper for IP encap modules. */ -int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); +int l2tp_ioctl(struct sock *sk, int cmd, int *karg); /* Extract the tunnel structure from a socket's sk_user_data pointer, * validating the tunnel magic feather. diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 41a74fc84ca1..2b795c1064f5 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -562,19 +562,18 @@ out: return err ? err : copied; } -int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) +int l2tp_ioctl(struct sock *sk, int cmd, int *karg) { struct sk_buff *skb; - int amount; switch (cmd) { case SIOCOUTQ: - amount = sk_wmem_alloc_get(sk); + *karg = sk_wmem_alloc_get(sk); break; case SIOCINQ: spin_lock_bh(&sk->sk_receive_queue.lock); skb = skb_peek(&sk->sk_receive_queue); - amount = skb ? skb->len : 0; + *karg = skb ? skb->len : 0; spin_unlock_bh(&sk->sk_receive_queue.lock); break; @@ -582,7 +581,7 @@ int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) return -ENOIOCTLCMD; } - return put_user(amount, (int __user *)arg); + return 0; } EXPORT_SYMBOL_GPL(l2tp_ioctl); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 5df5cc0ffedc..992b89c75631 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3570,11 +3570,10 @@ static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) return (int)delta; } -static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) { struct mptcp_sock *msk = mptcp_sk(sk); bool slow; - int answ; switch (cmd) { case SIOCINQ: @@ -3583,24 +3582,24 @@ static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg) lock_sock(sk); __mptcp_move_skbs(msk); - answ = mptcp_inq_hint(sk); + *karg = mptcp_inq_hint(sk); release_sock(sk); break; case SIOCOUTQ: slow = lock_sock_fast(sk); - answ = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); + *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); unlock_sock_fast(sk, slow); break; case SIOCOUTQNSD: slow = lock_sock_fast(sk); - answ = mptcp_ioctl_outq(msk, msk->snd_nxt); + *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); unlock_sock_fast(sk, slow); break; default: return -ENOIOCTLCMD; } - return put_user(answ, (int __user *)arg); + return 0; } static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index ff5f49ab236e..3aa50dc7535b 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -28,24 +28,21 @@ static void pn_sock_close(struct sock *sk, long timeout) sk_common_release(sk); } -static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int pn_ioctl(struct sock *sk, int cmd, int *karg) { struct sk_buff *skb; - int answ; switch (cmd) { case SIOCINQ: lock_sock(sk); skb = skb_peek(&sk->sk_receive_queue); - answ = skb ? skb->len : 0; + *karg = skb ? skb->len : 0; release_sock(sk); - return put_user(answ, (int __user *)arg); + return 0; case SIOCPNADDRESOURCE: case SIOCPNDELRESOURCE: { - u32 res; - if (get_user(res, (u32 __user *)arg)) - return -EFAULT; + u32 res = *karg; if (res >= 256) return -EINVAL; if (cmd == SIOCPNADDRESOURCE) diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 83ea13a50690..faba31f2eff2 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -917,10 +917,9 @@ static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len) return 0; } -static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int pep_ioctl(struct sock *sk, int cmd, int *karg) { struct pep_sock *pn = pep_sk(sk); - int answ; int ret = -ENOIOCTLCMD; switch (cmd) { @@ -933,13 +932,13 @@ static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg) lock_sock(sk); if (sock_flag(sk, SOCK_URGINLINE) && !skb_queue_empty(&pn->ctrlreq_queue)) - answ = skb_peek(&pn->ctrlreq_queue)->len; + *karg = skb_peek(&pn->ctrlreq_queue)->len; else if (!skb_queue_empty(&sk->sk_receive_queue)) - answ = skb_peek(&sk->sk_receive_queue)->len; + *karg = skb_peek(&sk->sk_receive_queue)->len; else - answ = 0; + *karg = 0; release_sock(sk); - ret = put_user(answ, (int __user *)arg); + ret = 0; break; case SIOCPNENABLEPIPE: diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 71e2caf6ab85..967f9b4dc026 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -387,7 +387,7 @@ static int pn_socket_ioctl(struct socket *sock, unsigned int cmd, return put_user(handle, (__u16 __user *)arg); } - return sk->sk_prot->ioctl(sk, cmd, arg); + return sk_ioctl(sk, cmd, (void __user *)arg); } static int pn_socket_listen(struct socket *sock, int backlog) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a68e1d541b12..6554a357fe33 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4895,7 +4895,7 @@ out: } /* The SCTP ioctl handler. */ -static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int sctp_ioctl(struct sock *sk, int cmd, int *karg) { int rc = -ENOTCONN; @@ -4911,7 +4911,7 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) switch (cmd) { case SIOCINQ: { struct sk_buff *skb; - unsigned int amount = 0; + *karg = 0; skb = skb_peek(&sk->sk_receive_queue); if (skb != NULL) { @@ -4919,9 +4919,9 @@ static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) * We will only return the amount of this packet since * that is all that will be read. */ - amount = skb->len; + *karg = skb->len; } - rc = put_user(amount, (int __user *)arg); + rc = 0; break; } default: -- cgit v1.2.3 From ad72c4a06acc6762e84994ac2f722da7a07df34e Mon Sep 17 00:00:00 2001 From: Piotr Gardocki Date: Wed, 14 Jun 2023 16:53:00 +0200 Subject: net: add check for current MAC address in dev_set_mac_address In some cases it is possible for kernel to come with request to change primary MAC address to the address that is already set on the given interface. Add proper check to return fast from the function in these cases. An example of such case is adding an interface to bonding channel in balance-alb mode: modprobe bonding mode=balance-alb miimon=100 max_bonds=1 ip link set bond0 up ifenslave bond0 Signed-off-by: Piotr Gardocki Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski --- net/core/dev.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index 63abb0463c24..e4ff0adf5523 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8781,6 +8781,8 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; + if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) + return 0; err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); if (err) return err; -- cgit v1.2.3 From 5a6f6873606e03a0a95afe40ba5e84bb6e28a26f Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 14 Jun 2023 09:04:16 +0100 Subject: ip, ip6: Fix splice to raw and ping sockets Splicing to SOCK_RAW sockets may set MSG_SPLICE_PAGES, but in such a case, __ip_append_data() will call skb_splice_from_iter() to access the 'from' data, assuming it to point to a msghdr struct with an iter, instead of using the provided getfrag function to access it. In the case of raw_sendmsg(), however, this is not the case and 'from' will point to a raw_frag_vec struct and raw_getfrag() will be the frag-getting function. A similar issue may occur with rawv6_sendmsg(). Fix this by ignoring MSG_SPLICE_PAGES if getfrag != ip_generic_getfrag as ip_generic_getfrag() expects "from" to be a msghdr*, but the other getfrags don't. Note that this will prevent MSG_SPLICE_PAGES from being effective for udplite. This likely affects ping sockets too. udplite looks like it should be okay as it expects "from" to be a msghdr. Signed-off-by: David Howells Reported-by: syzbot+d8486855ef44506fd675@syzkaller.appspotmail.com Link: https://lore.kernel.org/r/000000000000ae4cbf05fdeb8349@google.com/ Fixes: 2dc334f1a63a ("splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage()") Tested-by: syzbot+d8486855ef44506fd675@syzkaller.appspotmail.com cc: David Ahern cc: Jens Axboe cc: Matthew Wilcox Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/r/1410156.1686729856@warthog.procyon.org.uk Signed-off-by: Jakub Kicinski --- net/ipv4/ip_output.c | 3 ++- net/ipv6/ip6_output.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 457598dfa128..6e70839257f7 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1041,7 +1041,8 @@ static int __ip_append_data(struct sock *sk, } else if ((flags & MSG_SPLICE_PAGES) && length) { if (inet->hdrincl) return -EPERM; - if (rt->dst.dev->features & NETIF_F_SG) + if (rt->dst.dev->features & NETIF_F_SG && + getfrag == ip_generic_getfrag) /* We need an empty buffer to attach stuff to */ paged = true; else diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index c06ff7519f19..1e8c90e97608 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1593,7 +1593,8 @@ emsgsize: } else if ((flags & MSG_SPLICE_PAGES) && length) { if (inet_sk(sk)->hdrincl) return -EPERM; - if (rt->dst.dev->features & NETIF_F_SG) + if (rt->dst.dev->features & NETIF_F_SG && + getfrag == ip_generic_getfrag) /* We need an empty buffer to attach stuff to */ paged = true; else -- cgit v1.2.3 From 9f8d0dc0ec4a4b5b78c0be83238c4511ff5f1ae0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 15 Jun 2023 12:32:02 +0100 Subject: kcm: Fix unnecessary psock unreservation. kcm_write_msgs() calls unreserve_psock() to release its hold on the underlying TCP socket if it has run out of things to transmit, but if we have nothing in the write queue on entry (e.g. because someone did a zero-length sendmsg), we don't actually go into the transmission loop and as a consequence don't call reserve_psock(). Fix this by skipping the call to unreserve_psock() if we didn't reserve a psock. Fixes: c31a25e1db48 ("kcm: Send multiple frags in one sendmsg()") Reported-by: syzbot+dd1339599f1840e4cc65@syzkaller.appspotmail.com Link: https://lore.kernel.org/r/000000000000a61ffe05fe0c3d08@google.com/ Signed-off-by: David Howells Tested-by: syzbot+dd1339599f1840e4cc65@syzkaller.appspotmail.com cc: Tom Herbert cc: Tom Herbert cc: Jens Axboe cc: Matthew Wilcox Link: https://lore.kernel.org/r/20787.1686828722@warthog.procyon.org.uk Signed-off-by: Jakub Kicinski --- net/kcm/kcmsock.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index d75d775e9462..d0537c1c8cd7 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -661,6 +661,7 @@ retry: kcm_abort_tx_psock(psock, ret ? -ret : EPIPE, true); unreserve_psock(kcm); + psock = NULL; txm->started_tx = false; kcm_report_tx_retry(kcm); @@ -696,7 +697,8 @@ out: if (!head) { /* Done with all queued messages. */ WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); - unreserve_psock(kcm); + if (psock) + unreserve_psock(kcm); } /* Check if write space is available */ -- cgit v1.2.3 From f60ce8a48b97eb970bfbec1f26c914b9017c4a46 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Thu, 15 Jun 2023 23:22:40 +0800 Subject: net: mctp: remove redundant RTN_UNICAST check Current mctp_newroute() contains two exactly same check against rtm->rtm_type static int mctp_newroute(...) { ... if (rtm->rtm_type != RTN_UNICAST) { // (1) NL_SET_ERR_MSG(extack, "rtm_type must be RTN_UNICAST"); return -EINVAL; } ... if (rtm->rtm_type != RTN_UNICAST) // (2) return -EINVAL; ... } This commits removes the (2) check as it is redundant. Signed-off-by: Lin Ma Reviewed-by: Pavan Chebbi Acked-by: Jeremy Kerr Link: https://lore.kernel.org/r/20230615152240.1749428-1-linma@zju.edu.cn Signed-off-by: Jakub Kicinski --- net/mctp/route.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/mctp/route.c b/net/mctp/route.c index f51a05ec7162..ab62fe447038 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -1249,9 +1249,6 @@ static int mctp_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, mtu = nla_get_u32(tbx[RTAX_MTU]); } - if (rtm->rtm_type != RTN_UNICAST) - return -EINVAL; - rc = mctp_route_add(mdev, daddr_start, rtm->rtm_dst_len, mtu, rtm->rtm_type); return rc; -- cgit v1.2.3 From a52305a81d6bb74b90b400dfa56455d37872fe4b Mon Sep 17 00:00:00 2001 From: Petr Oros Date: Thu, 15 Jun 2023 11:54:47 +0200 Subject: devlink: report devlink_port_type_warn source device devlink_port_type_warn is scheduled for port devlink and warning when the port type is not set. But from this warning it is not easy found out which device (driver) has no devlink port set. [ 3709.975552] Type was not set for devlink port. [ 3709.975579] WARNING: CPU: 1 PID: 13092 at net/devlink/leftover.c:6775 devlink_port_type_warn+0x11/0x20 [ 3709.993967] Modules linked in: openvswitch nf_conncount nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nfnetlink bluetooth rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache netfs vhost_net vhost vhost_iotlb tap tun bridge stp llc qrtr intel_rapl_msr intel_rapl_common i10nm_edac nfit libnvdimm x86_pkg_temp_thermal mlx5_ib intel_powerclamp coretemp dell_wmi ledtrig_audio sparse_keymap ipmi_ssif kvm_intel ib_uverbs rfkill ib_core video kvm iTCO_wdt acpi_ipmi intel_vsec irqbypass ipmi_si iTCO_vendor_support dcdbas ipmi_devintf mei_me ipmi_msghandler rapl mei intel_cstate isst_if_mmio isst_if_mbox_pci dell_smbios intel_uncore isst_if_common i2c_i801 dell_wmi_descriptor wmi_bmof i2c_smbus intel_pch_thermal pcspkr acpi_power_meter xfs libcrc32c sd_mod sg nvme_tcp mgag200 i2c_algo_bit nvme_fabrics drm_shmem_helper drm_kms_helper nvme syscopyarea ahci sysfillrect sysimgblt nvme_core fb_sys_fops crct10dif_pclmul libahci mlx5_core sfc crc32_pclmul nvme_common drm [ 3709.994030] crc32c_intel mtd t10_pi mlxfw libata tg3 mdio megaraid_sas psample ghash_clmulni_intel pci_hyperv_intf wmi dm_multipath sunrpc dm_mirror dm_region_hash dm_log dm_mod be2iscsi bnx2i cnic uio cxgb4i cxgb4 tls libcxgbi libcxgb qla4xxx iscsi_boot_sysfs iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi fuse [ 3710.108431] CPU: 1 PID: 13092 Comm: kworker/1:1 Kdump: loaded Not tainted 5.14.0-319.el9.x86_64 #1 [ 3710.108435] Hardware name: Dell Inc. PowerEdge R750/0PJ80M, BIOS 1.8.2 09/14/2022 [ 3710.108437] Workqueue: events devlink_port_type_warn [ 3710.108440] RIP: 0010:devlink_port_type_warn+0x11/0x20 [ 3710.108443] Code: 84 76 fe ff ff 48 c7 03 20 0e 1a ad 31 c0 e9 96 fd ff ff 66 0f 1f 44 00 00 0f 1f 44 00 00 48 c7 c7 18 24 4e ad e8 ef 71 62 ff <0f> 0b c3 cc cc cc cc 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 f6 87 [ 3710.108445] RSP: 0018:ff3b6d2e8b3c7e90 EFLAGS: 00010282 [ 3710.108447] RAX: 0000000000000000 RBX: ff366d6580127080 RCX: 0000000000000027 [ 3710.108448] RDX: 0000000000000027 RSI: 00000000ffff86de RDI: ff366d753f41f8c8 [ 3710.108449] RBP: ff366d658ff5a0c0 R08: ff366d753f41f8c0 R09: ff3b6d2e8b3c7e18 [ 3710.108450] R10: 0000000000000001 R11: 0000000000000023 R12: ff366d753f430600 [ 3710.108451] R13: ff366d753f436900 R14: 0000000000000000 R15: ff366d753f436905 [ 3710.108452] FS: 0000000000000000(0000) GS:ff366d753f400000(0000) knlGS:0000000000000000 [ 3710.108453] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 3710.108454] CR2: 00007f1c57bc74e0 CR3: 000000111d26a001 CR4: 0000000000773ee0 [ 3710.108456] PKRU: 55555554 [ 3710.108457] Call Trace: [ 3710.108458] [ 3710.108459] process_one_work+0x1e2/0x3b0 [ 3710.108466] ? rescuer_thread+0x390/0x390 [ 3710.108468] worker_thread+0x50/0x3a0 [ 3710.108471] ? rescuer_thread+0x390/0x390 [ 3710.108473] kthread+0xdd/0x100 [ 3710.108477] ? kthread_complete_and_exit+0x20/0x20 [ 3710.108479] ret_from_fork+0x1f/0x30 [ 3710.108485] [ 3710.108486] ---[ end trace 1b4b23cd0c65d6a0 ]--- After patch: [ 402.473064] ice 0000:41:00.0: Type was not set for devlink port. [ 402.473064] ice 0000:41:00.1: Type was not set for devlink port. Signed-off-by: Petr Oros Reviewed-by: Pavan Chebbi Reviewed-by: Jakub Kicinski Link: https://lore.kernel.org/r/20230615095447.8259-1-poros@redhat.com Signed-off-by: Jakub Kicinski --- net/devlink/leftover.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 649a9701eb6a..1f00f874471f 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -6737,7 +6737,10 @@ void devlink_notify_unregister(struct devlink *devlink) static void devlink_port_type_warn(struct work_struct *work) { - WARN(true, "Type was not set for devlink port."); + struct devlink_port *port = container_of(to_delayed_work(work), + struct devlink_port, + type_warn_dw); + dev_warn(port->devlink->dev, "Type was not set for devlink port."); } static bool devlink_port_type_should_warn(struct devlink_port *devlink_port) -- cgit v1.2.3 From b650d953cd391595e536153ce30b4aab385643ac Mon Sep 17 00:00:00 2001 From: "mfreemon@cloudflare.com" Date: Sun, 11 Jun 2023 22:05:24 -0500 Subject: tcp: enforce receive buffer memory limits by allowing the tcp window to shrink Under certain circumstances, the tcp receive buffer memory limit set by autotuning (sk_rcvbuf) is increased due to incoming data packets as a result of the window not closing when it should be. This can result in the receive buffer growing all the way up to tcp_rmem[2], even for tcp sessions with a low BDP. To reproduce: Connect a TCP session with the receiver doing nothing and the sender sending small packets (an infinite loop of socket send() with 4 bytes of payload with a sleep of 1 ms in between each send()). This will cause the tcp receive buffer to grow all the way up to tcp_rmem[2]. As a result, a host can have individual tcp sessions with receive buffers of size tcp_rmem[2], and the host itself can reach tcp_mem limits, causing the host to go into tcp memory pressure mode. The fundamental issue is the relationship between the granularity of the window scaling factor and the number of byte ACKed back to the sender. This problem has previously been identified in RFC 7323, appendix F [1]. The Linux kernel currently adheres to never shrinking the window. In addition to the overallocation of memory mentioned above, the current behavior is functionally incorrect, because once tcp_rmem[2] is reached when no remediations remain (i.e. tcp collapse fails to free up any more memory and there are no packets to prune from the out-of-order queue), the receiver will drop in-window packets resulting in retransmissions and an eventual timeout of the tcp session. A receive buffer full condition should instead result in a zero window and an indefinite wait. In practice, this problem is largely hidden for most flows. It is not applicable to mice flows. Elephant flows can send data fast enough to "overrun" the sk_rcvbuf limit (in a single ACK), triggering a zero window. But this problem does show up for other types of flows. Examples are websockets and other type of flows that send small amounts of data spaced apart slightly in time. In these cases, we directly encounter the problem described in [1]. RFC 7323, section 2.4 [2], says there are instances when a retracted window can be offered, and that TCP implementations MUST ensure that they handle a shrinking window, as specified in RFC 1122, section 4.2.2.16 [3]. All prior RFCs on the topic of tcp window management have made clear that sender must accept a shrunk window from the receiver, including RFC 793 [4] and RFC 1323 [5]. This patch implements the functionality to shrink the tcp window when necessary to keep the right edge within the memory limit by autotuning (sk_rcvbuf). This new functionality is enabled with the new sysctl: net.ipv4.tcp_shrink_window Additional information can be found at: https://blog.cloudflare.com/unbounded-memory-usage-by-tcp-for-receive-buffers-and-how-we-fixed-it/ [1] https://www.rfc-editor.org/rfc/rfc7323#appendix-F [2] https://www.rfc-editor.org/rfc/rfc7323#section-2.4 [3] https://www.rfc-editor.org/rfc/rfc1122#page-91 [4] https://www.rfc-editor.org/rfc/rfc793 [5] https://www.rfc-editor.org/rfc/rfc1323 Signed-off-by: Mike Freemon Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.rst | 15 +++++++++ include/net/netns/ipv4.h | 1 + net/ipv4/sysctl_net_ipv4.c | 9 +++++ net/ipv4/tcp_ipv4.c | 2 ++ net/ipv4/tcp_output.c | 60 +++++++++++++++++++++++++++++----- 5 files changed, 78 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 366e2a5097d9..4a010a7cde7f 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -981,6 +981,21 @@ tcp_tw_reuse - INTEGER tcp_window_scaling - BOOLEAN Enable window scaling as defined in RFC1323. +tcp_shrink_window - BOOLEAN + This changes how the TCP receive window is calculated. + + RFC 7323, section 2.4, says there are instances when a retracted + window can be offered, and that TCP implementations MUST ensure + that they handle a shrinking window, as specified in RFC 1122. + + - 0 - Disabled. The window is never shrunk. + - 1 - Enabled. The window is shrunk when necessary to remain within + the memory limit set by autotuning (sk_rcvbuf). + This only occurs if a non-zero receive window + scaling factor is also in effect. + + Default: 0 + tcp_wmem - vector of 3 INTEGERs: min, default, max min: Amount of memory reserved for send buffers for TCP sockets. Each TCP socket has rights to use it due to fact of its birth. diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index a4efb7a2796c..f00374718159 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -65,6 +65,7 @@ struct netns_ipv4 { #endif bool fib_has_custom_local_routes; bool fib_offload_disabled; + u8 sysctl_tcp_shrink_window; #ifdef CONFIG_IP_ROUTE_CLASSID atomic_t fib_num_tclassid_users; #endif diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 356afe54951c..2afb0870648b 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -1480,6 +1480,15 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = &tcp_syn_linear_timeouts_max, }, + { + .procname = "tcp_shrink_window", + .data = &init_net.ipv4.sysctl_tcp_shrink_window, + .maxlen = sizeof(u8), + .mode = 0644, + .proc_handler = proc_dou8vec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 84a5d557dc1a..9213804b034f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3281,6 +3281,8 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.tcp_congestion_control = &tcp_reno; net->ipv4.sysctl_tcp_syn_linear_timeouts = 4; + net->ipv4.sysctl_tcp_shrink_window = 0; + return 0; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 660eac4bf2a7..2cb39b6dad02 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -260,8 +260,8 @@ static u16 tcp_select_window(struct sock *sk) u32 old_win = tp->rcv_wnd; u32 cur_win = tcp_receive_window(tp); u32 new_win = __tcp_select_window(sk); + struct net *net = sock_net(sk); - /* Never shrink the offered window */ if (new_win < cur_win) { /* Danger Will Robinson! * Don't update rcv_wup/rcv_wnd here or else @@ -270,11 +270,14 @@ static u16 tcp_select_window(struct sock *sk) * * Relax Will Robinson. */ - if (new_win == 0) - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPWANTZEROWINDOWADV); - new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); + if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { + /* Never shrink the offered window */ + if (new_win == 0) + NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV); + new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); + } } + tp->rcv_wnd = new_win; tp->rcv_wup = tp->rcv_nxt; @@ -282,7 +285,7 @@ static u16 tcp_select_window(struct sock *sk) * scaled window. */ if (!tp->rx_opt.rcv_wscale && - READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)) + READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows)) new_win = min(new_win, MAX_TCP_WINDOW); else new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); @@ -294,10 +297,9 @@ static u16 tcp_select_window(struct sock *sk) if (new_win == 0) { tp->pred_flags = 0; if (old_win) - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPTOZEROWINDOWADV); + NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV); } else if (old_win == 0) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV); + NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV); } return new_win; @@ -2987,6 +2989,7 @@ u32 __tcp_select_window(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + struct net *net = sock_net(sk); /* MSS for the peer's data. Previous versions used mss_clamp * here. I don't know if the value based on our guesses * of peer's MSS is better for the performance. It's more correct @@ -3008,6 +3011,15 @@ u32 __tcp_select_window(struct sock *sk) if (mss <= 0) return 0; } + + /* Only allow window shrink if the sysctl is enabled and we have + * a non-zero scaling factor in effect. + */ + if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) + goto shrink_window_allowed; + + /* do not allow window to shrink */ + if (free_space < (full_space >> 1)) { icsk->icsk_ack.quick = 0; @@ -3062,6 +3074,36 @@ u32 __tcp_select_window(struct sock *sk) } return window; + +shrink_window_allowed: + /* new window should always be an exact multiple of scaling factor */ + free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); + + if (free_space < (full_space >> 1)) { + icsk->icsk_ack.quick = 0; + + if (tcp_under_memory_pressure(sk)) + tcp_adjust_rcv_ssthresh(sk); + + /* if free space is too low, return a zero window */ + if (free_space < (allowed_space >> 4) || free_space < mss || + free_space < (1 << tp->rx_opt.rcv_wscale)) + return 0; + } + + if (free_space > tp->rcv_ssthresh) { + free_space = tp->rcv_ssthresh; + /* new window should always be an exact multiple of scaling factor + * + * For this case, we ALIGN "up" (increase free_space) because + * we know free_space is not zero here, it has been reduced from + * the memory-based limit, and rcv_ssthresh is not a hard limit + * (unlike sk_rcvbuf). + */ + free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); + } + + return free_space; } void tcp_skb_collapse_tstamp(struct sk_buff *skb, -- cgit v1.2.3 From baf6d18b116b7dc84ed5e212c3a89f17cdc3f28c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:10:20 -0400 Subject: svcrdma: Prevent page release when nothing was received I noticed that svc_rqst_release_pages() was still unnecessarily releasing a page when svc_rdma_recvfrom() returns zero. Fixes: a53d5cb0646a ("svcrdma: Avoid releasing a page in svc_xprt_release()") Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 46a719ba4917..16b673a7d280 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -792,6 +792,12 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) struct svc_rdma_recv_ctxt *ctxt; int ret; + /* Prevent svc_xprt_release() from releasing pages in rq_pages + * when returning 0 or an error. + */ + rqstp->rq_respages = rqstp->rq_pages; + rqstp->rq_next_page = rqstp->rq_respages; + rqstp->rq_xprt_ctxt = NULL; ctxt = NULL; @@ -815,12 +821,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) DMA_FROM_DEVICE); svc_rdma_build_arg_xdr(rqstp, ctxt); - /* Prevent svc_xprt_release from releasing pages in rq_pages - * if we return 0 or an error. - */ - rqstp->rq_respages = rqstp->rq_pages; - rqstp->rq_next_page = rqstp->rq_respages; - ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt); if (ret < 0) goto out_err; -- cgit v1.2.3 From 5581cf8efc3863e3831a3ee50854e823ec618df8 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:10:27 -0400 Subject: SUNRPC: Optimize page release in svc_rdma_sendto() Now that we have bulk page allocation and release APIs, it's more efficient to use those than it is for nfsd threads to wait for send completions. Previous patches have eliminated the calls to wait_for_completion() and complete(), in order to avoid scheduler overhead. Now release pages-under-I/O in the send completion handler using the efficient bulk release API. I've measured a 7% reduction in cumulative CPU utilization in svc_rdma_sendto(), svc_rdma_wc_send(), and svc_xprt_release(). In particular, using release_pages() instead of complete() cuts the time per svc_rdma_wc_send() call by two-thirds. This helps improve scalability because svc_rdma_wc_send() is single-threaded per connection. Reviewed-by: Tom Talpey Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 1ae4236d04a3..24228f3611e8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -236,8 +236,8 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct ib_device *device = rdma->sc_cm_id->device; unsigned int i; - for (i = 0; i < ctxt->sc_page_count; ++i) - put_page(ctxt->sc_pages[i]); + if (ctxt->sc_page_count) + release_pages(ctxt->sc_pages, ctxt->sc_page_count); /* The first SGE contains the transport header, which * remains mapped until @ctxt is destroyed. -- cgit v1.2.3 From f8335a212ac1495da3e9d98ff46c4bcd6a359011 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:13:33 -0400 Subject: SUNRPC: Move initialization of rq_stime Micro-optimization: Call ktime_get() only when ->xpo_recvfrom() has given us a full RPC message to process. rq_stime isn't used otherwise, so this avoids pointless work. Reviewed-by: Jeff Layton Acked-by: Tom Talpey Signed-off-by: Chuck Lever --- net/sunrpc/svc_xprt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 9ca3393a197e..79b88889f073 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -849,7 +849,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) len = svc_deferred_recv(rqstp); else len = xprt->xpt_ops->xpo_recvfrom(rqstp); - rqstp->rq_stime = ktime_get(); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); } else @@ -892,6 +891,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) err = -EAGAIN; if (len <= 0) goto out_release; + trace_svc_xdr_recvfrom(&rqstp->rq_arg); clear_bit(XPT_OLD, &xprt->xpt_flags); @@ -900,6 +900,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (serv->sv_stats) serv->sv_stats->netcnt++; + rqstp->rq_stime = ktime_get(); return len; out_release: rqstp->rq_res.len = 0; -- cgit v1.2.3 From 91f8ce28466e480fd9ef27e028ac89dbfd264c24 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:13:45 -0400 Subject: svcrdma: Convert "might sleep" comment into a code annotation Try to catch incorrect calling contexts mechanically rather than by code review. Reviewed-by: Jeff Layton Acked-by: Tom Talpey Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_rw.c | 5 +++-- net/sunrpc/xprtrdma/svc_rdma_sendto.c | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 068c365e7812..59ea87b5f458 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -353,8 +353,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) return; } -/* This function sleeps when the transport's Send Queue is congested. - * +/* * Assumptions: * - If ib_post_send() succeeds, only one completion is expected, * even if one or more WRs are flushed. This is true when posting @@ -369,6 +368,8 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) struct ib_cqe *cqe; int ret; + might_sleep(); + if (cc->cc_sqecount > rdma->sc_sq_depth) return -EINVAL; diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 24228f3611e8..c6644cca52c5 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -315,6 +315,8 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt) struct ib_send_wr *wr = &ctxt->sc_send_wr; int ret; + might_sleep(); + /* Sync the transport header buffer */ ib_dma_sync_single_for_device(rdma->sc_pd->device, wr->sg_list[0].addr, -- cgit v1.2.3 From a23c76e92d8215456ca2fbd5cf6c1ff71b744c1d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:13:51 -0400 Subject: svcrdma: trace cc_release calls This event brackets the svcrdma_post_* trace points. If this trace event is enabled but does not appear as expected, that indicates a chunk_ctxt leak. Reviewed-by: Jeff Layton Acked-by: Tom Talpey Signed-off-by: Chuck Lever --- include/trace/events/rpcrdma.h | 8 ++++++++ net/sunrpc/xprtrdma/svc_rdma_rw.c | 2 ++ 2 files changed, 10 insertions(+) (limited to 'net') diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 8f461e04e5f0..f8069ef2ee0f 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -2112,6 +2112,14 @@ DEFINE_POST_CHUNK_EVENT(read); DEFINE_POST_CHUNK_EVENT(write); DEFINE_POST_CHUNK_EVENT(reply); +DEFINE_EVENT(svcrdma_post_chunk_class, svcrdma_cc_release, + TP_PROTO( + const struct rpc_rdma_cid *cid, + int sqecount + ), + TP_ARGS(cid, sqecount) +); + TRACE_EVENT(svcrdma_wc_read, TP_PROTO( const struct ib_wc *wc, diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 59ea87b5f458..9836406cc41e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -191,6 +191,8 @@ static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc, struct svc_rdma_rw_ctxt *ctxt; LLIST_HEAD(free); + trace_svcrdma_cc_release(&cc->cc_cid, cc->cc_sqecount); + first = last = NULL; while ((ctxt = svc_rdma_next_ctxt(&cc->cc_rwctxts)) != NULL) { list_del(&ctxt->rw_list); -- cgit v1.2.3 From b55c63332e9a33833d3daedb83bc1fbbcfc00e1c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:13:58 -0400 Subject: svcrdma: Remove an unused argument from __svc_rdma_put_rw_ctxt() Clean up. Reviewed-by: Jeff Layton Acked-by: Tom Talpey Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_rw.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 9836406cc41e..e460e25a1d6d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -84,8 +84,7 @@ out_noctx: return NULL; } -static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, - struct svc_rdma_rw_ctxt *ctxt, +static void __svc_rdma_put_rw_ctxt(struct svc_rdma_rw_ctxt *ctxt, struct llist_head *list) { sg_free_table_chained(&ctxt->rw_sg_table, SG_CHUNK_SIZE); @@ -95,7 +94,7 @@ static void __svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, static void svc_rdma_put_rw_ctxt(struct svcxprt_rdma *rdma, struct svc_rdma_rw_ctxt *ctxt) { - __svc_rdma_put_rw_ctxt(rdma, ctxt, &rdma->sc_rw_ctxts); + __svc_rdma_put_rw_ctxt(ctxt, &rdma->sc_rw_ctxts); } /** @@ -200,7 +199,7 @@ static void svc_rdma_cc_release(struct svc_rdma_chunk_ctxt *cc, rdma_rw_ctx_destroy(&ctxt->rw_ctx, rdma->sc_qp, rdma->sc_port_num, ctxt->rw_sg_table.sgl, ctxt->rw_nents, dir); - __svc_rdma_put_rw_ctxt(rdma, ctxt, &free); + __svc_rdma_put_rw_ctxt(ctxt, &free); ctxt->rw_node.next = first; first = &ctxt->rw_node; -- cgit v1.2.3 From 02cea33f56241819f72edbd8f470a45e720d9ca3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:14:04 -0400 Subject: SUNRPC: Fix comments for transport class registration The preceding block comment before svc_register_xprt_class() is not related to that function. While we're here, add proper documenting comments for these two publicly-visible functions. Reviewed-by: Jeff Layton Acked-by: Tom Talpey Signed-off-by: Chuck Lever --- net/sunrpc/svc_xprt.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'net') diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 79b88889f073..5a3512bdfdce 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -74,6 +74,13 @@ static LIST_HEAD(svc_xprt_class_list); * that no other thread will be using the transport or will * try to set XPT_DEAD. */ + +/** + * svc_reg_xprt_class - Register a server-side RPC transport class + * @xcl: New transport class to be registered + * + * Returns zero on success; otherwise a negative errno is returned. + */ int svc_reg_xprt_class(struct svc_xprt_class *xcl) { struct svc_xprt_class *cl; @@ -96,6 +103,11 @@ out: } EXPORT_SYMBOL_GPL(svc_reg_xprt_class); +/** + * svc_unreg_xprt_class - Unregister a server-side RPC transport class + * @xcl: Transport class to be unregistered + * + */ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) { dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); -- cgit v1.2.3 From 6c53da5d66b1f0d30b1039e14eed943efe377805 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 12 Jun 2023 10:14:10 -0400 Subject: SUNRPC: Remove transport class dprintk call sites Remove a couple of dprintk call sites that are of little value. Reviewed-by: Jeff Layton Acked-by: Tom Talpey Signed-off-by: Chuck Lever --- net/sunrpc/svc_xprt.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 5a3512bdfdce..62c7919ea610 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -86,8 +86,6 @@ int svc_reg_xprt_class(struct svc_xprt_class *xcl) struct svc_xprt_class *cl; int res = -EEXIST; - dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); - INIT_LIST_HEAD(&xcl->xcl_list); spin_lock(&svc_xprt_class_lock); /* Make sure there isn't already a class with the same name */ @@ -110,7 +108,6 @@ EXPORT_SYMBOL_GPL(svc_reg_xprt_class); */ void svc_unreg_xprt_class(struct svc_xprt_class *xcl) { - dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); spin_lock(&svc_xprt_class_lock); list_del_init(&xcl->xcl_list); spin_unlock(&svc_xprt_class_lock); -- cgit v1.2.3 From a9156d7e7d6a811a0ac1a2bc9ab6006bb928e871 Mon Sep 17 00:00:00 2001 From: Azeem Shaikh Date: Wed, 14 Jun 2023 13:37:57 +0000 Subject: SUNRPC: Use sysfs_emit in place of strlcpy/sprintf Part of an effort to remove strlcpy() tree-wide [1]. Direct replacement is safe here since the getter in kernel_params_ops handles -errno return [2]. [1] https://github.com/KSPP/linux/issues/89 [2] https://elixir.bootlin.com/linux/v6.4-rc6/source/include/linux/moduleparam.h#L52 Signed-off-by: Azeem Shaikh Signed-off-by: Chuck Lever --- net/sunrpc/svc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index e6d4cec61e47..b011c318fef1 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -109,15 +109,15 @@ param_get_pool_mode(char *buf, const struct kernel_param *kp) switch (*ip) { case SVC_POOL_AUTO: - return strlcpy(buf, "auto\n", 20); + return sysfs_emit(buf, "auto\n"); case SVC_POOL_GLOBAL: - return strlcpy(buf, "global\n", 20); + return sysfs_emit(buf, "global\n"); case SVC_POOL_PERCPU: - return strlcpy(buf, "percpu\n", 20); + return sysfs_emit(buf, "percpu\n"); case SVC_POOL_PERNODE: - return strlcpy(buf, "pernode\n", 20); + return sysfs_emit(buf, "pernode\n"); default: - return sprintf(buf, "%d\n", *ip); + return sysfs_emit(buf, "%d\n", *ip); } } -- cgit v1.2.3 From 00a87e5d1d67ada9fc2d3a1f6407ae339b425bce Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 16 Jun 2023 09:19:45 -0400 Subject: SUNRPC: Address RCU warning in net/sunrpc/svc.c $ make C=1 W=1 net/sunrpc/svc.o make[1]: Entering directory 'linux/obj/manet.1015granger.net' GEN Makefile CALL linux/server-development/scripts/checksyscalls.sh DESCEND objtool INSTALL libsubcmd_headers DESCEND bpf/resolve_btfids INSTALL libsubcmd_headers CC [M] net/sunrpc/svc.o CHECK linux/server-development/net/sunrpc/svc.c linux/server-development/net/sunrpc/svc.c:1225:9: warning: incorrect type in argument 1 (different address spaces) linux/server-development/net/sunrpc/svc.c:1225:9: expected struct spinlock [usertype] *lock linux/server-development/net/sunrpc/svc.c:1225:9: got struct spinlock [noderef] __rcu * linux/server-development/net/sunrpc/svc.c:1227:40: warning: incorrect type in argument 1 (different address spaces) linux/server-development/net/sunrpc/svc.c:1227:40: expected struct spinlock [usertype] *lock linux/server-development/net/sunrpc/svc.c:1227:40: got struct spinlock [noderef] __rcu * make[1]: Leaving directory 'linux/obj/manet.1015granger.net' Warning introduced by commit 913292c97d75 ("sched.h: Annotate sighand_struct with __rcu"). Signed-off-by: Chuck Lever --- net/sunrpc/svc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b011c318fef1..e7c101290425 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1164,6 +1164,7 @@ static void __svc_unregister(struct net *net, const u32 program, const u32 versi */ static void svc_unregister(const struct svc_serv *serv, struct net *net) { + struct sighand_struct *sighand; struct svc_program *progp; unsigned long flags; unsigned int i; @@ -1180,9 +1181,12 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net) } } - spin_lock_irqsave(¤t->sighand->siglock, flags); + rcu_read_lock(); + sighand = rcu_dereference(current->sighand); + spin_lock_irqsave(&sighand->siglock, flags); recalc_sigpending(); - spin_unlock_irqrestore(¤t->sighand->siglock, flags); + spin_unlock_irqrestore(&sighand->siglock, flags); + rcu_read_unlock(); } /* -- cgit v1.2.3 From 7a7f094635349a7d0314364ad50bdeb770b6df4f Mon Sep 17 00:00:00 2001 From: Arjun Roy Date: Fri, 16 Jun 2023 12:34:27 -0700 Subject: tcp: Use per-vma locking for receive zerocopy Per-VMA locking allows us to lock a struct vm_area_struct without taking the process-wide mmap lock in read mode. Consider a process workload where the mmap lock is taken constantly in write mode. In this scenario, all zerocopy receives are periodically blocked during that period of time - though in principle, the memory ranges being used by TCP are not touched by the operations that need the mmap write lock. This results in performance degradation. Now consider another workload where the mmap lock is never taken in write mode, but there are many TCP connections using receive zerocopy that are concurrently receiving. These connections all take the mmap lock in read mode, but this does induce a lot of contention and atomic ops for this process-wide lock. This results in additional CPU overhead caused by contending on the cache line for this lock. However, with per-vma locking, both of these problems can be avoided. As a test, I ran an RPC-style request/response workload with 4KB payloads and receive zerocopy enabled, with 100 simultaneous TCP connections. I measured perf cycles within the find_tcp_vma/mmap_read_lock/mmap_read_unlock codepath, with and without per-vma locking enabled. When using process-wide mmap semaphore read locking, about 1% of measured perf cycles were within this path. With per-VMA locking, this value dropped to about 0.45%. Signed-off-by: Arjun Roy Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- MAINTAINERS | 1 + include/linux/net_mm.h | 17 +++++++++++++++++ include/net/tcp.h | 1 + mm/memory.c | 7 ++++--- net/ipv4/tcp.c | 45 +++++++++++++++++++++++++++++++++++++-------- 5 files changed, 60 insertions(+), 11 deletions(-) create mode 100644 include/linux/net_mm.h (limited to 'net') diff --git a/MAINTAINERS b/MAINTAINERS index 7322963b0670..cb14589d14ab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14743,6 +14743,7 @@ NETWORKING [TCP] M: Eric Dumazet L: netdev@vger.kernel.org S: Maintained +F: include/linux/net_mm.h F: include/linux/tcp.h F: include/net/tcp.h F: include/trace/events/tcp.h diff --git a/include/linux/net_mm.h b/include/linux/net_mm.h new file mode 100644 index 000000000000..b298998bd5a0 --- /dev/null +++ b/include/linux/net_mm.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifdef CONFIG_MMU + +#ifdef CONFIG_INET +extern const struct vm_operations_struct tcp_vm_ops; +static inline bool vma_is_tcp(const struct vm_area_struct *vma) +{ + return vma->vm_ops == &tcp_vm_ops; +} +#else +static inline bool vma_is_tcp(const struct vm_area_struct *vma) +{ + return false; +} +#endif /* CONFIG_INET*/ + +#endif /* CONFIG_MMU */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 9c08eab647a2..31b534370787 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -45,6 +45,7 @@ #include #include #include +#include extern struct inet_hashinfo tcp_hashinfo; diff --git a/mm/memory.c b/mm/memory.c index f69fbc251198..3e46b4d881dc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -77,6 +77,7 @@ #include #include #include +#include #include @@ -5280,12 +5281,12 @@ retry: if (!vma) goto inval; - /* Only anonymous vmas are supported for now */ - if (!vma_is_anonymous(vma)) + /* Only anonymous and tcp vmas are supported for now */ + if (!vma_is_anonymous(vma) && !vma_is_tcp(vma)) goto inval; /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ - if (!vma->anon_vma) + if (!vma->anon_vma && !vma_is_tcp(vma)) goto inval; if (!vma_start_read(vma)) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0e21ea92dc1d..71b42eef9dbf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1774,7 +1774,7 @@ void tcp_update_recv_tstamps(struct sk_buff *skb, } #ifdef CONFIG_MMU -static const struct vm_operations_struct tcp_vm_ops = { +const struct vm_operations_struct tcp_vm_ops = { }; int tcp_mmap(struct file *file, struct socket *sock, @@ -2073,6 +2073,34 @@ static void tcp_zc_finalize_rx_tstamp(struct sock *sk, } } +static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, + unsigned long address, + bool *mmap_locked) +{ + struct vm_area_struct *vma = NULL; + +#ifdef CONFIG_PER_VMA_LOCK + vma = lock_vma_under_rcu(mm, address); +#endif + if (vma) { + if (!vma_is_tcp(vma)) { + vma_end_read(vma); + return NULL; + } + *mmap_locked = false; + return vma; + } + + mmap_read_lock(mm); + vma = vma_lookup(mm, address); + if (!vma || !vma_is_tcp(vma)) { + mmap_read_unlock(mm); + return NULL; + } + *mmap_locked = true; + return vma; +} + #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 static int tcp_zerocopy_receive(struct sock *sk, struct tcp_zerocopy_receive *zc, @@ -2090,6 +2118,7 @@ static int tcp_zerocopy_receive(struct sock *sk, u32 seq = tp->copied_seq; u32 total_bytes_to_map; int inq = tcp_inq(sk); + bool mmap_locked; int ret; zc->copybuf_len = 0; @@ -2114,13 +2143,10 @@ static int tcp_zerocopy_receive(struct sock *sk, return 0; } - mmap_read_lock(current->mm); - - vma = vma_lookup(current->mm, address); - if (!vma || vma->vm_ops != &tcp_vm_ops) { - mmap_read_unlock(current->mm); + vma = find_tcp_vma(current->mm, address, &mmap_locked); + if (!vma) return -EINVAL; - } + vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); avail_len = min_t(u32, vma_len, inq); total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); @@ -2194,7 +2220,10 @@ static int tcp_zerocopy_receive(struct sock *sk, zc, total_bytes_to_map); } out: - mmap_read_unlock(current->mm); + if (mmap_locked) + mmap_read_unlock(current->mm); + else + vma_end_read(vma); /* Try to copy straggler data. */ if (!ret) copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); -- cgit v1.2.3 From 3515440df461359762d447b5068f148611bb4d42 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 16 Jun 2023 08:57:52 +0000 Subject: ipv6: also use netdev_hold() in ip6_route_check_nh() In blamed commit, we missed the fact that ip6_validate_gw() could change dev under us from ip6_route_check_nh() In this fix, I use GFP_ATOMIC in order to not pass too many additional arguments to ip6_validate_gw() and ip6_route_check_nh() only for a rarely used debug feature. syzbot reported: refcount_t: decrement hit 0; leaking memory. WARNING: CPU: 0 PID: 5006 at lib/refcount.c:31 refcount_warn_saturate+0x1d7/0x1f0 lib/refcount.c:31 Modules linked in: CPU: 0 PID: 5006 Comm: syz-executor403 Not tainted 6.4.0-rc5-syzkaller-01229-g97c5209b3d37 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 RIP: 0010:refcount_warn_saturate+0x1d7/0x1f0 lib/refcount.c:31 Code: 05 fb 8e 51 0a 01 e8 98 95 38 fd 0f 0b e9 d3 fe ff ff e8 ac d9 70 fd 48 c7 c7 00 d3 a6 8a c6 05 d8 8e 51 0a 01 e8 79 95 38 fd <0f> 0b e9 b4 fe ff ff 48 89 ef e8 1a d7 c3 fd e9 5c fe ff ff 0f 1f RSP: 0018:ffffc900039df6b8 EFLAGS: 00010282 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: ffff888026d71dc0 RSI: ffffffff814c03b7 RDI: 0000000000000001 RBP: ffff888146a505fc R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000001 R12: 1ffff9200073bedc R13: 00000000ffffffef R14: ffff888146a505fc R15: ffff8880284eb5a8 FS: 0000555556c88300(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000004585c0 CR3: 000000002b1b1000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __refcount_dec include/linux/refcount.h:344 [inline] refcount_dec include/linux/refcount.h:359 [inline] ref_tracker_free+0x539/0x820 lib/ref_tracker.c:236 netdev_tracker_free include/linux/netdevice.h:4097 [inline] netdev_put include/linux/netdevice.h:4114 [inline] netdev_put include/linux/netdevice.h:4110 [inline] fib6_nh_init+0xb96/0x1bd0 net/ipv6/route.c:3624 ip6_route_info_create+0x10f3/0x1980 net/ipv6/route.c:3791 ip6_route_add+0x28/0x150 net/ipv6/route.c:3835 ipv6_route_ioctl+0x3fc/0x570 net/ipv6/route.c:4459 inet6_ioctl+0x246/0x290 net/ipv6/af_inet6.c:569 sock_do_ioctl+0xcc/0x230 net/socket.c:1189 sock_ioctl+0x1f8/0x680 net/socket.c:1306 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:870 [inline] __se_sys_ioctl fs/ioctl.c:856 [inline] __x64_sys_ioctl+0x197/0x210 fs/ioctl.c:856 do_syscall_x64 arch/x86/entry/common.c:50 [inline] Fixes: 70f7457ad6d6 ("net: create device lookup API with reference tracking") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Jakub Kicinski Cc: David Ahern Reviewed-by: Jakub Kicinski Reviewed-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/ipv6/route.c b/net/ipv6/route.c index e510a4162ef8..64e873f5895f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3360,6 +3360,7 @@ static int ip6_route_check_nh_onlink(struct net *net, static int ip6_route_check_nh(struct net *net, struct fib6_config *cfg, struct net_device **_dev, + netdevice_tracker *dev_tracker, struct inet6_dev **idev) { const struct in6_addr *gw_addr = &cfg->fc_gateway; @@ -3404,7 +3405,7 @@ static int ip6_route_check_nh(struct net *net, err = -EHOSTUNREACH; } else { *_dev = dev = res.nh->fib_nh_dev; - dev_hold(dev); + netdev_hold(dev, dev_tracker, GFP_ATOMIC); *idev = in6_dev_get(dev); } @@ -3412,7 +3413,9 @@ static int ip6_route_check_nh(struct net *net, } static int ip6_validate_gw(struct net *net, struct fib6_config *cfg, - struct net_device **_dev, struct inet6_dev **idev, + struct net_device **_dev, + netdevice_tracker *dev_tracker, + struct inet6_dev **idev, struct netlink_ext_ack *extack) { const struct in6_addr *gw_addr = &cfg->fc_gateway; @@ -3453,7 +3456,8 @@ static int ip6_validate_gw(struct net *net, struct fib6_config *cfg, if (cfg->fc_flags & RTNH_F_ONLINK) err = ip6_route_check_nh_onlink(net, cfg, dev, extack); else - err = ip6_route_check_nh(net, cfg, _dev, idev); + err = ip6_route_check_nh(net, cfg, _dev, dev_tracker, + idev); rcu_read_unlock(); @@ -3571,7 +3575,8 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, } if (cfg->fc_flags & RTF_GATEWAY) { - err = ip6_validate_gw(net, cfg, &dev, &idev, extack); + err = ip6_validate_gw(net, cfg, &dev, dev_tracker, + &idev, extack); if (err) goto out; -- cgit v1.2.3 From 88770b8de38eeaf093c877ed78a7e6e1660df8df Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Sun, 18 Jun 2023 12:04:01 -0400 Subject: svcrdma: Fix stale comment Commit 7d81ee8722d6 ("svcrdma: Single-stage RDMA Read") changed the behavior of svc_rdma_recvfrom() but neglected to update the documenting comment. Signed-off-by: Chuck Lever --- net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 16b673a7d280..85c8bcaebb80 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -771,9 +771,6 @@ static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt, * * The next ctxt is removed from the "receive" lists. * - * - If the ctxt completes a Read, then finish assembling the Call - * message and return the number of bytes in the message. - * * - If the ctxt completes a Receive, then construct the Call * message from the contents of the Receive buffer. * @@ -782,7 +779,8 @@ static bool svc_rdma_is_reverse_direction_reply(struct svc_xprt *xprt, * in the message. * * - If there are Read chunks in this message, post Read WRs to - * pull that payload and return 0. + * pull that payload. When the Read WRs complete, build the + * full message and return the number of bytes in it. */ int svc_rdma_recvfrom(struct svc_rqst *rqstp) { -- cgit v1.2.3 From 2dc6af8be002d16715485372ce1e02b65faf9283 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 16 Jun 2023 13:49:39 -0700 Subject: gro: move the tc_ext comparison to a helper The double ifdefs (one for the variable declaration and one around the code) are quite aesthetically displeasing. Factor this code out into a helper for easier wrapping. This will become even more ugly when another skb ext comparison is added in the future. The resulting machine code looks the same, the compiler seems to try to use %rax more and some blocks more around but I haven't spotted minor differences. Reviewed-by: Simon Horman Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/core/gro.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/net/core/gro.c b/net/core/gro.c index dca800068e41..0759277dc14e 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -304,6 +304,24 @@ void napi_gro_flush(struct napi_struct *napi, bool flush_old) } EXPORT_SYMBOL(napi_gro_flush); +static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb, + const struct sk_buff *p, + unsigned long diffs) +{ +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + struct tc_skb_ext *skb_ext; + struct tc_skb_ext *p_ext; + + skb_ext = skb_ext_find(skb, TC_SKB_EXT); + p_ext = skb_ext_find(p, TC_SKB_EXT); + + diffs |= (!!p_ext) ^ (!!skb_ext); + if (!diffs && unlikely(skb_ext)) + diffs |= p_ext->chain ^ skb_ext->chain; +#endif + return diffs; +} + static void gro_list_prepare(const struct list_head *head, const struct sk_buff *skb) { @@ -338,23 +356,11 @@ static void gro_list_prepare(const struct list_head *head, * avoid trying too hard to skip each of them individually */ if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { -#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - struct tc_skb_ext *skb_ext; - struct tc_skb_ext *p_ext; -#endif - diffs |= p->sk != skb->sk; diffs |= skb_metadata_dst_cmp(p, skb); diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); -#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - skb_ext = skb_ext_find(skb, TC_SKB_EXT); - p_ext = skb_ext_find(p, TC_SKB_EXT); - - diffs |= (!!p_ext) ^ (!!skb_ext); - if (!diffs && unlikely(skb_ext)) - diffs |= p_ext->chain ^ skb_ext->chain; -#endif + diffs |= gro_list_prepare_tc_ext(skb, p, diffs); } NAPI_GRO_CB(p)->same_flow = !diffs; -- cgit v1.2.3 From 6d543b34dbcf6ec30064ae62a88faf60dbff4f8a Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Thu, 8 Jun 2023 16:36:11 +0300 Subject: wifi: mac80211: Support disabled links during association When the association is complete, do not configure disabled links, and track them as part of the interface data. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230608163202.c194fabeb81a.Iaefdef5ba0492afe9a5ede14c68060a4af36e444@changeid Signed-off-by: Johannes Berg --- include/net/mac80211.h | 6 ++++-- net/mac80211/cfg.c | 4 ++-- net/mac80211/ieee80211_i.h | 4 +++- net/mac80211/link.c | 38 +++++++++++++++++++++++--------------- net/mac80211/mlme.c | 35 ++++++++++++++++++++++------------- 5 files changed, 54 insertions(+), 33 deletions(-) (limited to 'net') diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 2ecc8d0c6ef4..914448cb0ecf 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1846,6 +1846,8 @@ struct ieee80211_vif_cfg { * @active_links: The bitmap of active links, or 0 for non-MLO. * The driver shouldn't change this directly, but use the * API calls meant for that purpose. + * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO. + * Must be a subset of valid_links. * @addr: address of this interface * @p2p: indicates whether this AP or STA interface is a p2p * interface, i.e. a GO or p2p-sta respectively @@ -1883,7 +1885,7 @@ struct ieee80211_vif { struct ieee80211_vif_cfg cfg; struct ieee80211_bss_conf bss_conf; struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS]; - u16 valid_links, active_links; + u16 valid_links, active_links, dormant_links; u8 addr[ETH_ALEN] __aligned(2); bool p2p; @@ -1916,7 +1918,7 @@ struct ieee80211_vif { */ static inline u16 ieee80211_vif_usable_links(const struct ieee80211_vif *vif) { - return vif->valid_links; + return vif->valid_links & ~vif->dormant_links; } /** diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index ca6d53d5751d..359589d525d5 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -4868,7 +4868,7 @@ static int ieee80211_add_intf_link(struct wiphy *wiphy, return -EOPNOTSUPP; mutex_lock(&sdata->local->mtx); - res = ieee80211_vif_set_links(sdata, wdev->valid_links); + res = ieee80211_vif_set_links(sdata, wdev->valid_links, 0); mutex_unlock(&sdata->local->mtx); return res; @@ -4881,7 +4881,7 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy, struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); mutex_lock(&sdata->local->mtx); - ieee80211_vif_set_links(sdata, wdev->valid_links); + ieee80211_vif_set_links(sdata, wdev->valid_links, 0); mutex_unlock(&sdata->local->mtx); } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 7a98fa26ec92..4afd5095366c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -410,6 +410,8 @@ struct ieee80211_mgd_assoc_data { ieee80211_conn_flags_t conn_flags; u16 status; + + bool disabled; } link[IEEE80211_MLD_MAX_NUM_LINKS]; u8 ap_addr[ETH_ALEN] __aligned(2); @@ -2019,7 +2021,7 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *link_conf); void ieee80211_link_stop(struct ieee80211_link_data *link); int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, - u16 new_links); + u16 new_links, u16 dormant_links); void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata); /* tx handling */ diff --git a/net/mac80211/link.c b/net/mac80211/link.c index 55cba3760ef5..d090ecc41ea2 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -142,25 +142,34 @@ static int ieee80211_check_dup_link_addrs(struct ieee80211_sub_if_data *sdata) } static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata, - u16 links) + u16 valid_links, u16 dormant_links) { - sdata->vif.valid_links = links; - - if (!links) { + sdata->vif.valid_links = valid_links; + sdata->vif.dormant_links = dormant_links; + + if (!valid_links || + WARN((~valid_links & dormant_links) || + !(valid_links & ~dormant_links), + "Invalid links: valid=0x%x, dormant=0x%x", + valid_links, dormant_links)) { sdata->vif.active_links = 0; + sdata->vif.dormant_links = 0; return; } switch (sdata->vif.type) { case NL80211_IFTYPE_AP: /* in an AP all links are always active */ - sdata->vif.active_links = links; + sdata->vif.active_links = valid_links; + + /* AP links are not expected to be disabled */ + WARN_ON(dormant_links); break; case NL80211_IFTYPE_STATION: if (sdata->vif.active_links) break; - WARN_ON(hweight16(links) > 1); - sdata->vif.active_links = links; + sdata->vif.active_links = valid_links & ~dormant_links; + WARN_ON(hweight16(sdata->vif.active_links) > 1); break; default: WARN_ON(1); @@ -169,7 +178,7 @@ static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata, static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, struct link_container **to_free, - u16 new_links) + u16 new_links, u16 dormant_links) { u16 old_links = sdata->vif.valid_links; u16 old_active = sdata->vif.active_links; @@ -245,7 +254,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, /* for keys we will not be able to undo this */ ieee80211_tear_down_links(sdata, to_free, rem); - ieee80211_set_vif_links_bitmaps(sdata, new_links); + ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links); /* tell the driver */ ret = drv_change_vif_links(sdata->local, sdata, @@ -258,7 +267,7 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata, /* restore config */ memcpy(sdata->link, old_data, sizeof(old_data)); memcpy(sdata->vif.link_conf, old, sizeof(old)); - ieee80211_set_vif_links_bitmaps(sdata, old_links); + ieee80211_set_vif_links_bitmaps(sdata, old_links, dormant_links); /* and free (only) the newly allocated links */ memset(to_free, 0, sizeof(links)); goto free; @@ -282,12 +291,13 @@ deinit: } int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, - u16 new_links) + u16 new_links, u16 dormant_links) { struct link_container *links[IEEE80211_MLD_MAX_NUM_LINKS]; int ret; - ret = ieee80211_vif_update_links(sdata, links, new_links); + ret = ieee80211_vif_update_links(sdata, links, new_links, + dormant_links); ieee80211_free_links(sdata, links); return ret; @@ -304,7 +314,7 @@ void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata) */ sdata_lock(sdata); - ieee80211_vif_update_links(sdata, links, 0); + ieee80211_vif_update_links(sdata, links, 0, 0); sdata_unlock(sdata); ieee80211_free_links(sdata, links); @@ -328,7 +338,6 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EINVAL; - /* cannot activate links that don't exist */ if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return -EINVAL; @@ -484,7 +493,6 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif, if (sdata->vif.type != NL80211_IFTYPE_STATION) return; - /* cannot activate links that don't exist */ if (active_links & ~ieee80211_vif_usable_links(&sdata->vif)) return; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 959695ed7649..738822b82d3e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2818,6 +2818,10 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) continue; + if (ieee80211_vif_is_mld(&sdata->vif) && + !(ieee80211_vif_usable_links(&sdata->vif) & BIT(link_id))) + continue; + link = sdata_dereference(sdata->link[link_id], sdata); if (WARN_ON(!link)) return; @@ -2844,6 +2848,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, struct cfg80211_bss *cbss = assoc_data->link[link_id].bss; if (!cbss || + !(BIT(link_id) & + ieee80211_vif_usable_links(&sdata->vif)) || assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) continue; @@ -3058,7 +3064,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, memset(sdata->vif.bss_conf.tx_pwr_env, 0, sizeof(sdata->vif.bss_conf.tx_pwr_env)); - ieee80211_vif_set_links(sdata, 0); + ieee80211_vif_set_links(sdata, 0, 0); } static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) @@ -3511,7 +3517,7 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata, mutex_lock(&sdata->local->mtx); ieee80211_link_release_channel(&sdata->deflink); - ieee80211_vif_set_links(sdata, 0); + ieee80211_vif_set_links(sdata, 0, 0); mutex_unlock(&sdata->local->mtx); } @@ -3570,7 +3576,7 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata, mutex_lock(&sdata->local->mtx); ieee80211_link_release_channel(&sdata->deflink); - ieee80211_vif_set_links(sdata, 0); + ieee80211_vif_set_links(sdata, 0, 0); mutex_unlock(&sdata->local->mtx); } @@ -4979,7 +4985,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, unsigned int link_id; struct sta_info *sta; u64 changed[IEEE80211_MLD_MAX_NUM_LINKS] = {}; - u16 valid_links = 0; + u16 valid_links = 0, dormant_links = 0; int err; mutex_lock(&sdata->local->sta_mtx); @@ -4995,16 +5001,18 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { if (!assoc_data->link[link_id].bss) continue; - valid_links |= BIT(link_id); - if (link_id != assoc_data->assoc_link_id) { + valid_links |= BIT(link_id); + if (assoc_data->link[link_id].disabled) { + dormant_links |= BIT(link_id); + } else if (link_id != assoc_data->assoc_link_id) { err = ieee80211_sta_allocate_link(sta, link_id); if (err) goto out_err; } } - ieee80211_vif_set_links(sdata, valid_links); + ieee80211_vif_set_links(sdata, valid_links, dormant_links); } for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { @@ -5012,7 +5020,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link; struct link_sta_info *link_sta; - if (!cbss) + if (!cbss || assoc_data->link[link_id].disabled) continue; link = sdata_dereference(sdata->link[link_id], sdata); @@ -5084,7 +5092,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, } /* links might have changed due to rejected ones, set them again */ - ieee80211_vif_set_links(sdata, valid_links); + ieee80211_vif_set_links(sdata, valid_links, dormant_links); rate_control_rate_init(sta); @@ -6627,12 +6635,12 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, mlo = true; if (WARN_ON(!ap_mld_addr)) return -EINVAL; - err = ieee80211_vif_set_links(sdata, BIT(link_id)); + err = ieee80211_vif_set_links(sdata, BIT(link_id), 0); } else { if (WARN_ON(ap_mld_addr)) return -EINVAL; ap_mld_addr = cbss->bssid; - err = ieee80211_vif_set_links(sdata, 0); + err = ieee80211_vif_set_links(sdata, 0, 0); link_id = 0; mlo = false; } @@ -6784,7 +6792,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, out_err: ieee80211_link_release_channel(&sdata->deflink); - ieee80211_vif_set_links(sdata, 0); + ieee80211_vif_set_links(sdata, 0, 0); return err; } @@ -7324,10 +7332,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, for (i = 0; i < ARRAY_SIZE(assoc_data->link); i++) { assoc_data->link[i].conn_flags = conn_flags; assoc_data->link[i].bss = req->links[i].bss; + assoc_data->link[i].disabled = req->links[i].disabled; } /* if there was no authentication, set up the link */ - err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id)); + err = ieee80211_vif_set_links(sdata, BIT(assoc_link_id), 0); if (err) goto err_clear; } else { -- cgit v1.2.3 From a8df1f580ff24d2d00bf7839551697a0235d16dc Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Sun, 11 Jun 2023 12:14:26 +0300 Subject: wifi: mac80211: Add debugfs entry to report dormant links Add debugfs entry to report dormant (valid but disabled) links. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230611121219.7fa5f022adfb.Iff6fa3e1a3b00ae726612f9d5a31f7fe2fcbfc68@changeid Signed-off-by: Johannes Berg --- net/mac80211/debugfs_netdev.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'net') diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 1b9293379c1e..63250286dc8b 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -693,6 +693,19 @@ IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, debugfs_create_file(#name, mode, sdata->vif.debugfs_dir, \ sdata, &name##_ops) +#define DEBUGFS_ADD_X(_bits, _name, _mode) \ + debugfs_create_x##_bits(#_name, _mode, sdata->vif.debugfs_dir, \ + &sdata->vif._name) + +#define DEBUGFS_ADD_X8(_name, _mode) \ + DEBUGFS_ADD_X(8, _name, _mode) + +#define DEBUGFS_ADD_X16(_name, _mode) \ + DEBUGFS_ADD_X(16, _name, _mode) + +#define DEBUGFS_ADD_X32(_name, _mode) \ + DEBUGFS_ADD_X(32, _name, _mode) + #define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400) static void add_common_files(struct ieee80211_sub_if_data *sdata) @@ -722,6 +735,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata) DEBUGFS_ADD_MODE(tdls_wider_bw, 0600); DEBUGFS_ADD_MODE(valid_links, 0400); DEBUGFS_ADD_MODE(active_links, 0600); + DEBUGFS_ADD_X16(dormant_links, 0400); } static void add_ap_files(struct ieee80211_sub_if_data *sdata) -- cgit v1.2.3 From 6e21e7b8cd897193cee3c2649640efceb3004ba5 Mon Sep 17 00:00:00 2001 From: Nicolas Cavallari Date: Wed, 14 Jun 2023 15:26:48 +0200 Subject: wifi: mac80211: Remove "Missing iftype sband data/EHT cap" spam In mesh mode, ieee80211_chandef_he_6ghz_oper() is called by mesh_matches_local() for every received mesh beacon. On a 6 GHz mesh of a HE-only phy, this spams that the hardware does not have EHT capabilities, even if the received mesh beacon does not have an EHT element. Unlike HE, not supporting EHT in the 6 GHz band is not an error so do not print anything in this case. Fixes: 5dca295dd767 ("mac80211: Add initial support for EHT and 320 MHz channels") Signed-off-by: Nicolas Cavallari Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230614132648.28995-1-nicolas.cavallari@green-communications.fr Signed-off-by: Johannes Berg --- net/mac80211/util.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 2fc07717bcad..ab60a530198b 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3880,10 +3880,8 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, } eht_cap = ieee80211_get_eht_iftype_cap(sband, iftype); - if (!eht_cap) { - sdata_info(sdata, "Missing iftype sband data/EHT cap"); + if (!eht_cap) eht_oper = NULL; - } he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); -- cgit v1.2.3 From 71e7552c90db2a2767f5c17c7ec72296b0d92061 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 15 Jun 2023 12:04:07 -0600 Subject: wifi: wext-core: Fix -Wstringop-overflow warning in ioctl_standard_iw_point() -Wstringop-overflow is legitimately warning us about extra_size pontentially being zero at some point, hence potenially ending up _allocating_ zero bytes of memory for extra pointer and then trying to access such object in a call to copy_from_user(). Fix this by adding a sanity check to ensure we never end up trying to allocate zero bytes of data for extra pointer, before continue executing the rest of the code in the function. Address the following -Wstringop-overflow warning seen when built m68k architecture with allyesconfig configuration: from net/wireless/wext-core.c:11: In function '_copy_from_user', inlined from 'copy_from_user' at include/linux/uaccess.h:183:7, inlined from 'ioctl_standard_iw_point' at net/wireless/wext-core.c:825:7: arch/m68k/include/asm/string.h:48:25: warning: '__builtin_memset' writing 1 or more bytes into a region of size 0 overflows the destination [-Wstringop-overflow=] 48 | #define memset(d, c, n) __builtin_memset(d, c, n) | ^~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/uaccess.h:153:17: note: in expansion of macro 'memset' 153 | memset(to + (n - res), 0, res); | ^~~~~~ In function 'kmalloc', inlined from 'kzalloc' at include/linux/slab.h:694:9, inlined from 'ioctl_standard_iw_point' at net/wireless/wext-core.c:819:10: include/linux/slab.h:577:16: note: at offset 1 into destination object of size 0 allocated by '__kmalloc' 577 | return __kmalloc(size, flags); | ^~~~~~~~~~~~~~~~~~~~~~ This help with the ongoing efforts to globally enable -Wstringop-overflow. Link: https://github.com/KSPP/linux/issues/315 Signed-off-by: Gustavo A. R. Silva Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/ZItSlzvIpjdjNfd8@work Signed-off-by: Johannes Berg --- net/wireless/wext-core.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'net') diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index a125fd1fa134..a161c64d1765 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -815,6 +815,12 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, } } + /* Sanity-check to ensure we never end up _allocating_ zero + * bytes of data for extra. + */ + if (extra_size <= 0) + return -EFAULT; + /* kzalloc() ensures NULL-termination for essid_compat. */ extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) -- cgit v1.2.3 From c6112046b1a9c130c455ab0bbe74c3f41138693c Mon Sep 17 00:00:00 2001 From: Mukesh Sisodiya Date: Fri, 16 Jun 2023 09:53:50 +0300 Subject: wifi: cfg80211: make TDLS management link-aware For multi-link operation(MLO) TDLS management frames need to be transmitted on a specific link. The TDLS setup request will add BSSID along with peer address and userspace will pass the link-id based on BSSID value to the driver(or mac80211). Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.cb3d87c22812.Ia3d15ac4a9a182145bf2d418bcb3ddf4539cd0a7@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 8 ++++---- include/net/cfg80211.h | 7 ++++--- net/mac80211/ieee80211_i.h | 8 ++++---- net/mac80211/tdls.c | 10 +++++----- net/wireless/nl80211.c | 7 +++++-- net/wireless/rdev-ops.h | 15 ++++++++------- net/wireless/trace.h | 13 ++++++++----- 7 files changed, 38 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index bcd564dc3554..1ef89cdcaa59 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3753,10 +3753,10 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy, */ static int mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, - u16 status_code, u32 peer_capability, - bool initiator, const u8 *extra_ies, - size_t extra_ies_len) + const u8 *peer, int link_id, u8 action_code, + u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int ret; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 388f2a3851a2..3ca581845206 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4607,9 +4607,10 @@ struct cfg80211_ops { struct cfg80211_gtk_rekey_data *data); int (*tdls_mgmt)(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, - u16 status_code, u32 peer_capability, - bool initiator, const u8 *buf, size_t len); + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *buf, size_t len); int (*tdls_oper)(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4afd5095366c..ca8a1e1c8bbd 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2565,10 +2565,10 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, /* TDLS */ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, - u16 status_code, u32 peer_capability, - bool initiator, const u8 *extra_ies, - size_t extra_ies_len); + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len); int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, enum nl80211_tdls_operation oper); void ieee80211_tdls_peer_del_work(struct work_struct *wk); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 52c47674a554..6575b2801676 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -6,7 +6,7 @@ * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH * Copyright 2015 - 2016 Intel Deutschland GmbH - * Copyright (C) 2019, 2021-2022 Intel Corporation + * Copyright (C) 2019, 2021-2023 Intel Corporation */ #include @@ -1185,10 +1185,10 @@ ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, } int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, - u16 status_code, u32 peer_capability, - bool initiator, const u8 *extra_ies, - size_t extra_ies_len) + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *extra_ies, size_t extra_ies_len) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int ret; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ec7d467cb096..7b547aeb52f1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12262,6 +12262,7 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) u32 peer_capability = 0; u16 status_code; u8 *peer; + int link_id; bool initiator; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) || @@ -12283,8 +12284,9 @@ static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]) peer_capability = nla_get_u32(info->attrs[NL80211_ATTR_TDLS_PEER_CAPABILITY]); + link_id = nl80211_link_id_or_invalid(info->attrs); - return rdev_tdls_mgmt(rdev, dev, peer, action_code, + return rdev_tdls_mgmt(rdev, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, nla_data(info->attrs[NL80211_ATTR_IE]), @@ -17151,7 +17153,8 @@ static const struct genl_small_ops nl80211_small_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = nl80211_tdls_mgmt, .flags = GENL_UNS_ADMIN_PERM, - .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), + .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP | + NL80211_FLAG_MLO_VALID_LINK_ID), }, { .cmd = NL80211_CMD_TDLS_OPER, diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 69b508743e57..f8c310849438 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -899,17 +899,18 @@ static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev, static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, - u8 action_code, u8 dialog_token, - u16 status_code, u32 peer_capability, - bool initiator, const u8 *buf, size_t len) + int link_id, u8 action_code, + u8 dialog_token, u16 status_code, + u32 peer_capability, bool initiator, + const u8 *buf, size_t len) { int ret; - trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code, + trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); - ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, - dialog_token, status_code, peer_capability, - initiator, buf, len); + ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, link_id, + action_code, dialog_token, status_code, + peer_capability, initiator, buf, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } diff --git a/net/wireless/trace.h b/net/wireless/trace.h index a00da3ebfed5..73a4f8f57438 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1779,15 +1779,16 @@ DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_stop, TRACE_EVENT(rdev_tdls_mgmt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - u8 *peer, u8 action_code, u8 dialog_token, + u8 *peer, int link_id, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len), - TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code, - peer_capability, initiator, buf, len), + TP_ARGS(wiphy, netdev, peer, link_id, action_code, dialog_token, + status_code, peer_capability, initiator, buf, len), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) + __field(int, link_id) __field(u8, action_code) __field(u8, dialog_token) __field(u16, status_code) @@ -1799,6 +1800,7 @@ TRACE_EVENT(rdev_tdls_mgmt, WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); + __entry->link_id = link_id; __entry->action_code = action_code; __entry->dialog_token = dialog_token; __entry->status_code = status_code; @@ -1806,11 +1808,12 @@ TRACE_EVENT(rdev_tdls_mgmt, __entry->initiator = initiator; memcpy(__get_dynamic_array(buf), buf, len); ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, action_code: %u, " + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM" + ", link_id: %d, action_code: %u " "dialog_token: %u, status_code: %u, peer_capability: %u " "initiator: %s buf: %#.2x ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, - __entry->action_code, __entry->dialog_token, + __entry->link_id, __entry->action_code, __entry->dialog_token, __entry->status_code, __entry->peer_capability, BOOL_TO_STR(__entry->initiator), ((u8 *)__get_dynamic_array(buf))[0]) -- cgit v1.2.3 From 78a7ea370d5f0eb6f3e774e7f6afece1c3a6860f Mon Sep 17 00:00:00 2001 From: Mukesh Sisodiya Date: Fri, 16 Jun 2023 09:53:51 +0300 Subject: wifi: mac80211: handle TDLS negotiation with MLO Userspace can now select the link to use for TDLS management frames (indicating e.g. which BSSID should be used), use the link_id received from cfg80211 to build the frames. Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.ce1fc230b505.Ie773c5679805001f5a52680d68d9ce0232c57648@changeid [Benjamin fixed some locking] Co-developed-by: Benjamin Berg Signed-off-by: Benjamin Berg [fix sta mutex locking too] Signed-off-by: Johannes Berg --- net/mac80211/tdls.c | 163 +++++++++++++++++++++++++++++----------------------- 1 file changed, 91 insertions(+), 72 deletions(-) (limited to 'net') diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 6575b2801676..a920e2a7a978 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -39,9 +39,10 @@ void ieee80211_tdls_peer_del_work(struct work_struct *wk) mutex_unlock(&local->mtx); } -static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, +static void ieee80211_tdls_add_ext_capab(struct ieee80211_link_data *link, struct sk_buff *skb) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; bool chan_switch = local->hw.wiphy->features & @@ -50,7 +51,7 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, !ifmgd->tdls_wider_bw_prohibited; bool buffer_sta = ieee80211_hw_check(&local->hw, SUPPORTS_TDLS_BUFFER_STA); - struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata); + struct ieee80211_supported_band *sband = ieee80211_get_link_sband(link); bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = skb_put(skb, 10); @@ -152,13 +153,13 @@ ieee80211_tdls_add_supp_channels(struct ieee80211_sub_if_data *sdata, *pos = 2 * subband_cnt; } -static void ieee80211_tdls_add_oper_classes(struct ieee80211_sub_if_data *sdata, +static void ieee80211_tdls_add_oper_classes(struct ieee80211_link_data *link, struct sk_buff *skb) { u8 *pos; u8 op_class; - if (!ieee80211_chandef_to_operating_class(&sdata->vif.bss_conf.chandef, + if (!ieee80211_chandef_to_operating_class(&link->conf->chandef, &op_class)) return; @@ -180,7 +181,7 @@ static void ieee80211_tdls_add_bss_coex_ie(struct sk_buff *skb) *pos++ = WLAN_BSS_COEX_INFORMATION_REQUEST; } -static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, +static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_link_data *link, u16 status_code) { struct ieee80211_supported_band *sband; @@ -189,7 +190,8 @@ static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, if (status_code != 0) return 0; - sband = ieee80211_get_sband(sdata); + sband = ieee80211_get_link_sband(link); + if (sband && sband->band == NL80211_BAND_2GHZ) { return WLAN_CAPABILITY_SHORT_SLOT_TIME | WLAN_CAPABILITY_SHORT_PREAMBLE; @@ -198,10 +200,11 @@ static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata, return 0; } -static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata, +static void ieee80211_tdls_add_link_ie(struct ieee80211_link_data *link, struct sk_buff *skb, const u8 *peer, bool initiator) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_tdls_lnkie *lnkid; const u8 *init_addr, *rsp_addr; @@ -218,7 +221,7 @@ static void ieee80211_tdls_add_link_ie(struct ieee80211_sub_if_data *sdata, lnkid->ie_type = WLAN_EID_LINK_ID; lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2; - memcpy(lnkid->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(lnkid->bssid, link->u.mgd.bssid, ETH_ALEN); memcpy(lnkid->init_sta, init_addr, ETH_ALEN); memcpy(lnkid->resp_sta, rsp_addr, ETH_ALEN); } @@ -359,11 +362,12 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, } static void -ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, +ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, struct sk_buff *skb, const u8 *peer, u8 action_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_supported_band *sband; struct ieee80211_local *local = sdata->local; struct ieee80211_sta_ht_cap ht_cap; @@ -372,8 +376,8 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, size_t offset = 0, noffset; u8 *pos; - sband = ieee80211_get_sband(sdata); - if (!sband) + sband = ieee80211_get_link_sband(link); + if (WARN_ON_ONCE(!sband)) return; ieee80211_add_srates_ie(sdata, skb, false, sband->band); @@ -397,7 +401,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, offset = noffset; } - ieee80211_tdls_add_ext_capab(sdata, skb); + ieee80211_tdls_add_ext_capab(link, skb); /* add the QoS element if we support it */ if (local->hw.queues >= IEEE80211_NUM_ACS && @@ -426,20 +430,16 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, offset = noffset; } - mutex_lock(&local->sta_mtx); - /* we should have the peer STA if we're already responding */ if (action_code == WLAN_TDLS_SETUP_RESPONSE) { sta = sta_info_get(sdata, peer); - if (WARN_ON_ONCE(!sta)) { - mutex_unlock(&local->sta_mtx); + if (WARN_ON_ONCE(!sta)) return; - } - sta->tdls_chandef = sdata->vif.bss_conf.chandef; + sta->tdls_chandef = link->conf->chandef; } - ieee80211_tdls_add_oper_classes(sdata, skb); + ieee80211_tdls_add_oper_classes(link, skb); /* * with TDLS we can switch channels, and HT-caps are not necessarily @@ -472,7 +472,7 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, (ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) ieee80211_tdls_add_bss_coex_ie(skb); - ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); /* add any custom IEs that go before VHT capabilities */ if (extra_ies_len) { @@ -529,8 +529,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, ieee80211_tdls_chandef_vht_upgrade(sdata, sta); } - mutex_unlock(&local->sta_mtx); - /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; @@ -540,31 +538,29 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata, } static void -ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, +ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_link_data *link, struct sk_buff *skb, const u8 *peer, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { + struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; size_t offset = 0, noffset; struct sta_info *sta, *ap_sta; struct ieee80211_supported_band *sband; u8 *pos; - sband = ieee80211_get_sband(sdata); - if (!sband) + sband = ieee80211_get_link_sband(link); + if (WARN_ON_ONCE(!sband)) return; - mutex_lock(&local->sta_mtx); - sta = sta_info_get(sdata, peer); - ap_sta = sta_info_get(sdata, sdata->deflink.u.mgd.bssid); - if (WARN_ON_ONCE(!sta || !ap_sta)) { - mutex_unlock(&local->sta_mtx); + ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); + + if (WARN_ON_ONCE(!sta || !ap_sta)) return; - } - sta->tdls_chandef = sdata->vif.bss_conf.chandef; + sta->tdls_chandef = link->conf->chandef; /* add any custom IEs that go before the QoS IE */ if (extra_ies_len) { @@ -610,11 +606,11 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation)); ieee80211_ie_build_ht_oper(pos, &sta->sta.deflink.ht_cap, - &sdata->vif.bss_conf.chandef, prot, + &link->conf->chandef, prot, true); } - ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); /* only include VHT-operation if not on the 2.4GHz band */ if (sband->band != NL80211_BAND_2GHZ && @@ -631,8 +627,6 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, &sta->tdls_chandef); } - mutex_unlock(&local->sta_mtx); - /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; @@ -641,7 +635,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata, } static void -ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata, +ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_link_data *link, struct sk_buff *skb, const u8 *peer, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, @@ -670,7 +664,7 @@ ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata, offset = noffset; } - ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); /* add any remaining IEs */ if (extra_ies_len) { @@ -680,20 +674,20 @@ ieee80211_tdls_add_chan_switch_req_ies(struct ieee80211_sub_if_data *sdata, } static void -ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_sub_if_data *sdata, +ieee80211_tdls_add_chan_switch_resp_ies(struct ieee80211_link_data *link, struct sk_buff *skb, const u8 *peer, u16 status_code, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { if (status_code == 0) - ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + ieee80211_tdls_add_link_ie(link, skb, peer, initiator); if (extra_ies_len) skb_put_data(skb, extra_ies, extra_ies_len); } -static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, +static void ieee80211_tdls_add_ies(struct ieee80211_link_data *link, struct sk_buff *skb, const u8 *peer, u8 action_code, u16 status_code, bool initiator, const u8 *extra_ies, @@ -705,7 +699,8 @@ static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, case WLAN_TDLS_SETUP_RESPONSE: case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: if (status_code == 0) - ieee80211_tdls_add_setup_start_ies(sdata, skb, peer, + ieee80211_tdls_add_setup_start_ies(link, + skb, peer, action_code, initiator, extra_ies, @@ -713,7 +708,7 @@ static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, break; case WLAN_TDLS_SETUP_CONFIRM: if (status_code == 0) - ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer, + ieee80211_tdls_add_setup_cfm_ies(link, skb, peer, initiator, extra_ies, extra_ies_len); break; @@ -722,16 +717,17 @@ static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, if (extra_ies_len) skb_put_data(skb, extra_ies, extra_ies_len); if (status_code == 0 || action_code == WLAN_TDLS_TEARDOWN) - ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); + ieee80211_tdls_add_link_ie(link, skb, + peer, initiator); break; case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: - ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer, + ieee80211_tdls_add_chan_switch_req_ies(link, skb, peer, initiator, extra_ies, extra_ies_len, oper_class, chandef); break; case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: - ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer, + ieee80211_tdls_add_chan_switch_resp_ies(link, skb, peer, status_code, initiator, extra_ies, extra_ies_len); @@ -742,6 +738,7 @@ static void ieee80211_tdls_add_ies(struct ieee80211_sub_if_data *sdata, static int ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, + struct ieee80211_link_data *link, const u8 *peer, u8 action_code, u8 dialog_token, u16 status_code, struct sk_buff *skb) { @@ -766,7 +763,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, skb_put(skb, sizeof(tf->u.setup_req)); tf->u.setup_req.dialog_token = dialog_token; tf->u.setup_req.capability = - cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, + cpu_to_le16(ieee80211_get_tdls_sta_capab(link, status_code)); break; case WLAN_TDLS_SETUP_RESPONSE: @@ -777,7 +774,7 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, tf->u.setup_resp.status_code = cpu_to_le16(status_code); tf->u.setup_resp.dialog_token = dialog_token; tf->u.setup_resp.capability = - cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, + cpu_to_le16(ieee80211_get_tdls_sta_capab(link, status_code)); break; case WLAN_TDLS_SETUP_CONFIRM: @@ -824,7 +821,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, + const u8 *peer, struct ieee80211_link_data *link, + u8 action_code, u8 dialog_token, u16 status_code, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); @@ -833,8 +831,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, mgmt = skb_put_zero(skb, 24); memcpy(mgmt->da, peer, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); - memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); - + memcpy(mgmt->bssid, link->u.mgd.bssid, ETH_ALEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@ -847,7 +844,7 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, mgmt->u.action.u.tdls_discover_resp.dialog_token = dialog_token; mgmt->u.action.u.tdls_discover_resp.capability = - cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata, + cpu_to_le16(ieee80211_get_tdls_sta_capab(link, status_code)); break; default: @@ -859,15 +856,23 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev, static struct sk_buff * ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, - const u8 *peer, u8 action_code, - u8 dialog_token, u16 status_code, - bool initiator, const u8 *extra_ies, - size_t extra_ies_len, u8 oper_class, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, + u16 status_code, bool initiator, + const u8 *extra_ies, size_t extra_ies_len, + u8 oper_class, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; int ret; + struct ieee80211_link_data *link; + + link_id = link_id >= 0 ? link_id : 0; + rcu_read_lock(); + link = rcu_dereference(sdata->link[link_id]); + if (WARN_ON(!link)) + goto unlock; skb = netdev_alloc_skb(sdata->dev, local->hw.extra_tx_headroom + @@ -887,7 +892,7 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, extra_ies_len + sizeof(struct ieee80211_tdls_lnkie)); if (!skb) - return NULL; + goto unlock; skb_reserve(skb, local->hw.extra_tx_headroom); @@ -900,13 +905,13 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, case WLAN_TDLS_CHANNEL_SWITCH_REQUEST: case WLAN_TDLS_CHANNEL_SWITCH_RESPONSE: ret = ieee80211_prep_tdls_encap_data(local->hw.wiphy, - sdata->dev, peer, + sdata->dev, link, peer, action_code, dialog_token, status_code, skb); break; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: ret = ieee80211_prep_tdls_direct(local->hw.wiphy, sdata->dev, - peer, action_code, + peer, link, action_code, dialog_token, status_code, skb); break; @@ -918,19 +923,23 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, if (ret < 0) goto fail; - ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code, + ieee80211_tdls_add_ies(link, skb, peer, action_code, status_code, initiator, extra_ies, extra_ies_len, oper_class, chandef); + rcu_read_unlock(); return skb; fail: dev_kfree_skb(skb); +unlock: + rcu_read_unlock(); return NULL; } static int ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len, u8 oper_class, @@ -988,7 +997,8 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, if (ret < 0) goto fail; - skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code, + skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, + link_id, action_code, dialog_token, status_code, initiator, extra_ies, extra_ies_len, oper_class, @@ -999,7 +1009,7 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, } if (action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) { - ieee80211_tx_skb(sdata, skb); + ieee80211_tx_skb_tid(sdata, skb, 7, link_id); return 0; } @@ -1066,7 +1076,8 @@ fail: static int ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) { @@ -1115,7 +1126,8 @@ ieee80211_tdls_mgmt_setup(struct wiphy *wiphy, struct net_device *dev, mutex_unlock(&local->mtx); /* we cannot take the mutex while preparing the setup packet */ - ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, + ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, + link_id, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, @@ -1139,7 +1151,8 @@ out_unlock: static int ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, - const u8 *peer, u8 action_code, u8 dialog_token, + const u8 *peer, int link_id, + u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *extra_ies, size_t extra_ies_len) @@ -1159,7 +1172,8 @@ ieee80211_tdls_mgmt_teardown(struct wiphy *wiphy, struct net_device *dev, IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN); ieee80211_flush_queues(local, sdata, false); - ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code, + ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, + link_id, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len, 0, @@ -1204,13 +1218,14 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: case WLAN_TDLS_SETUP_RESPONSE: - ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, action_code, + ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, + link_id, action_code, dialog_token, status_code, peer_capability, initiator, extra_ies, extra_ies_len); break; case WLAN_TDLS_TEARDOWN: - ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, + ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, @@ -1228,7 +1243,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: /* no special handling */ ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, - action_code, + link_id, action_code, dialog_token, status_code, peer_capability, @@ -1240,8 +1255,8 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, break; } - tdls_dbg(sdata, "TDLS mgmt action %d peer %pM status %d\n", - action_code, peer, ret); + tdls_dbg(sdata, "TDLS mgmt action %d peer %pM link_id %d status %d\n", + action_code, peer, link_id, ret); return ret; } @@ -1497,6 +1512,7 @@ ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class, int extra_ies_len = 2 + sizeof(struct ieee80211_ch_switch_timing); u8 *pos = extra_ies; struct sk_buff *skb; + int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; /* * if chandef points to a wide channel add a Secondary-Channel @@ -1524,6 +1540,7 @@ ieee80211_tdls_ch_sw_tmpl_get(struct sta_info *sta, u8 oper_class, iee80211_tdls_add_ch_switch_timing(pos, 0, 0); skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, + link_id, WLAN_TDLS_CHANNEL_SWITCH_REQUEST, 0, 0, !sta->sta.tdls_initiator, extra_ies, extra_ies_len, @@ -1644,11 +1661,13 @@ ieee80211_tdls_ch_sw_resp_tmpl_get(struct sta_info *sta, struct ieee80211_sub_if_data *sdata = sta->sdata; struct sk_buff *skb; u8 extra_ies[2 + sizeof(struct ieee80211_ch_switch_timing)]; + int link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0; /* initial timing are always zero in the template */ iee80211_tdls_add_ch_switch_timing(extra_ies, 0, 0); skb = ieee80211_tdls_build_mgmt_packet_data(sdata, sta->sta.addr, + link_id, WLAN_TDLS_CHANNEL_SWITCH_RESPONSE, 0, 0, !sta->sta.tdls_initiator, extra_ies, sizeof(extra_ies), 0, NULL); -- cgit v1.2.3 From 8cc07265b69141f8ed9597d0f27185239c241c80 Mon Sep 17 00:00:00 2001 From: Abhishek Naik Date: Fri, 16 Jun 2023 09:53:52 +0300 Subject: wifi: mac80211: handle TDLS data frames with MLO If the device is associated with an AP MLD, then TDLS data frames should have - A1 = peer address, - A2 = own MLD address (since the peer may now know about MLO), and - A3 = BSSID. Change the code to do that. Signed-off-by: Abhishek Naik Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.4bf648b63dfd.I98ef1dabd14b74a92120750f7746a7a512011701@changeid Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index d8460a14b6bd..b5183f6365aa 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2753,10 +2753,20 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, tdls_peer = test_sta_flag(sta, WLAN_STA_TDLS_PEER); if (tdls_peer) { + /* For TDLS only one link can be valid with peer STA */ + int tdls_link_id = sta->sta.valid_links ? + __ffs(sta->sta.valid_links) : 0; + struct ieee80211_link_data *link; + /* DA SA BSSID */ memcpy(hdr.addr1, skb->data, ETH_ALEN); memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN); - memcpy(hdr.addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN); + link = rcu_dereference(sdata->link[tdls_link_id]); + if (WARN_ON_ONCE(!link)) { + ret = -EINVAL; + goto free; + } + memcpy(hdr.addr3, link->u.mgd.bssid, ETH_ALEN); hdrlen = 24; } else if (sdata->u.mgd.use_4addr && cpu_to_be16(ethertype) != sdata->control_port_protocol) { @@ -3066,10 +3076,18 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) break; case NL80211_IFTYPE_STATION: if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + /* For TDLS only one link can be valid with peer STA */ + int tdls_link_id = sta->sta.valid_links ? + __ffs(sta->sta.valid_links) : 0; + struct ieee80211_link_data *link; + /* DA SA BSSID */ build.da_offs = offsetof(struct ieee80211_hdr, addr1); build.sa_offs = offsetof(struct ieee80211_hdr, addr2); - memcpy(hdr->addr3, sdata->deflink.u.mgd.bssid, ETH_ALEN); + link = rcu_dereference(sdata->link[tdls_link_id]); + if (WARN_ON_ONCE(!link)) + break; + memcpy(hdr->addr3, link->u.mgd.bssid, ETH_ALEN); build.hdr_len = 24; break; } -- cgit v1.2.3 From 71b3b7ac3eb8ceae582608dda0566e95c1108708 Mon Sep 17 00:00:00 2001 From: Abhishek Naik Date: Fri, 16 Jun 2023 09:53:53 +0300 Subject: wifi: mac80211: Add HE and EHT capa elements in TDLS frames Add HE and EHT capabilities IE in TDLS setup request, response, confirm and discovery response frames. Signed-off-by: Abhishek Naik Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.c77128828b0d.Ied2d8800847c759718c2c35e8f6c0902afd6bca1@changeid Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 1 + net/mac80211/tdls.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++ net/mac80211/util.c | 2 +- 3 files changed, 88 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ca8a1e1c8bbd..f8d5f37ebe9a 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2426,6 +2426,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *da, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf); +u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end); enum { IEEE80211_PROBE_FLAG_DIRECTED = BIT(0), diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index a920e2a7a978..a90404d0cd78 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -372,6 +372,8 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, struct ieee80211_local *local = sdata->local; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + const struct ieee80211_sta_he_cap *he_cap; + const struct ieee80211_sta_eht_cap *eht_cap; struct sta_info *sta = NULL; size_t offset = 0, noffset; u8 *pos; @@ -529,6 +531,83 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, ieee80211_tdls_chandef_vht_upgrade(sdata, sta); } + /* add any custom IEs that go before HE capabilities */ + if (extra_ies_len) { + static const u8 before_he_cap[] = { + WLAN_EID_EXTENSION, + WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + }; + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_he_cap, + ARRAY_SIZE(before_he_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* build the HE-cap from sband */ + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + if (he_cap && + (action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_TDLS_SETUP_RESPONSE || + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) { + __le16 he_6ghz_capa; + u8 cap_size; + + cap_size = + 2 + 1 + sizeof(he_cap->he_cap_elem) + + ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) + + ieee80211_he_ppe_size(he_cap->ppe_thres[0], + he_cap->he_cap_elem.phy_cap_info); + pos = skb_put(skb, cap_size); + pos = ieee80211_ie_build_he_cap(0, pos, he_cap, pos + cap_size); + + /* Build HE 6Ghz capa IE from sband */ + if (sband->band == NL80211_BAND_6GHZ) { + cap_size = 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa); + pos = skb_put(skb, cap_size); + he_6ghz_capa = + ieee80211_get_he_6ghz_capa_vif(sband, &sdata->vif); + pos = ieee80211_write_he_6ghz_cap(pos, he_6ghz_capa, + pos + cap_size); + } + } + + /* add any custom IEs that go before EHT capabilities */ + if (extra_ies_len) { + static const u8 before_he_cap[] = { + WLAN_EID_EXTENSION, + WLAN_EID_EXT_FILS_REQ_PARAMS, + WLAN_EID_AP_CSN, + }; + + noffset = ieee80211_ie_split(extra_ies, extra_ies_len, + before_he_cap, + ARRAY_SIZE(before_he_cap), + offset); + skb_put_data(skb, extra_ies + offset, noffset - offset); + offset = noffset; + } + + /* build the EHT-cap from sband */ + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + if (he_cap && eht_cap && + (action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_TDLS_SETUP_RESPONSE || + action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES)) { + u8 cap_size; + + cap_size = + 2 + 1 + sizeof(eht_cap->eht_cap_elem) + + ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, + &eht_cap->eht_cap_elem, false) + + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], + eht_cap->eht_cap_elem.phy_cap_info); + pos = skb_put(skb, cap_size); + ieee80211_ie_build_eht_cap(pos, he_cap, eht_cap, pos + cap_size, false); + } + /* add any remaining IEs */ if (extra_ies_len) { noffset = extra_ies_len; @@ -885,6 +964,13 @@ ieee80211_tdls_build_mgmt_packet_data(struct ieee80211_sub_if_data *sdata, sizeof(struct ieee80211_ht_operation)) + 2 + max(sizeof(struct ieee80211_vht_cap), sizeof(struct ieee80211_vht_operation)) + + 2 + 1 + sizeof(struct ieee80211_he_cap_elem) + + sizeof(struct ieee80211_he_mcs_nss_supp) + + IEEE80211_HE_PPE_THRES_MAX_LEN + + 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa) + + 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) + + sizeof(struct ieee80211_eht_mcs_nss_supp) + + IEEE80211_EHT_PPE_THRES_MAX_LEN + 50 + /* supported channels */ 3 + /* 40/20 BSS coex */ 4 + /* AID */ diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ab60a530198b..a0407baa6cd3 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1918,7 +1918,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, } } -static u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end) +u8 *ieee80211_write_he_6ghz_cap(u8 *pos, __le16 cap, u8 *end) { if ((end - pos) < 5) return pos; -- cgit v1.2.3 From 05995d05aab399fcb1fba579397bff216dbf3e86 Mon Sep 17 00:00:00 2001 From: Mukesh Sisodiya Date: Fri, 16 Jun 2023 09:53:54 +0300 Subject: wifi: mac80211: Extend AID element addition for TDLS frames Extend AID element addition in TDLS setup request and response frames to add it when HE or EHT capabilities are supported. Signed-off-by: Mukesh Sisodiya Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.483bf44ce684.Ia2387eb24c06fa41febc213923160bedafce2085@changeid Signed-off-by: Johannes Berg --- net/mac80211/tdls.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'net') diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index a90404d0cd78..a4af3b7675ef 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -499,17 +499,21 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, offset = noffset; } - /* build the VHT-cap similarly to the HT-cap */ + /* add AID if VHT, HE or EHT capabilities supported */ memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); + he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + if ((vht_cap.vht_supported || he_cap || eht_cap) && + (action_code == WLAN_TDLS_SETUP_REQUEST || + action_code == WLAN_TDLS_SETUP_RESPONSE)) + ieee80211_tdls_add_aid(sdata, skb); + + /* build the VHT-cap similarly to the HT-cap */ if ((action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_PUB_ACTION_TDLS_DISCOVER_RES) && vht_cap.vht_supported) { ieee80211_apply_vhtcap_overrides(sdata, &vht_cap); - /* the AID is present only when VHT is implemented */ - if (action_code == WLAN_TDLS_SETUP_REQUEST) - ieee80211_tdls_add_aid(sdata, skb); - pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); } else if (action_code == WLAN_TDLS_SETUP_RESPONSE && @@ -517,9 +521,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, /* the peer caps are already intersected with our own */ memcpy(&vht_cap, &sta->sta.deflink.vht_cap, sizeof(vht_cap)); - /* the AID is present only when VHT is implemented */ - ieee80211_tdls_add_aid(sdata, skb); - pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2); ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap); @@ -547,7 +548,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, } /* build the HE-cap from sband */ - he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (he_cap && (action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_TDLS_SETUP_RESPONSE || @@ -591,7 +591,6 @@ ieee80211_tdls_add_setup_start_ies(struct ieee80211_link_data *link, } /* build the EHT-cap from sband */ - eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); if (he_cap && eht_cap && (action_code == WLAN_TDLS_SETUP_REQUEST || action_code == WLAN_TDLS_SETUP_RESPONSE || -- cgit v1.2.3 From 276311d5814f98b113b68302c96500fd316c2cbf Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:53:55 +0300 Subject: wifi: mac80211: stop passing cbss to parser In both of these cases (config_link, prep_channel) it is not needed to parse the MBSSID data for a nontransmitted BSS. In the config_link case the frame does not contain any MBSSID element and inheritance rules are only needed for the ML STA profile. While in the prep_channel case the IEs have already been processed by cfg80211 and are already exploded. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.66d2605ff0ad.I7cdd1d390e7b0735c46204231a9e636d45b7f1e4@changeid Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 738822b82d3e..171ba9d237c2 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4709,7 +4709,6 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; struct ieee80211_bss *bss = (void *)cbss->priv; struct ieee80211_elems_parse_params parse_params = { - .bss = cbss, .link_id = -1, .from_ap = true, }; -- cgit v1.2.3 From 05050a2bc0c1599e95ef15a6ae34cb68ceabb06a Mon Sep 17 00:00:00 2001 From: Anjaneyulu Date: Fri, 16 Jun 2023 09:53:56 +0300 Subject: wifi: mac80211: add consistency check for compat chandef Add NULL check for compat variable to avoid crash in cfg80211_chandef_compatible() if it got called with some mixed up channel context where not all the users compatible with each other, which shouldn't happen. Signed-off-by: Anjaneyulu Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.ae0f10dfd36b.Iea98c74aeb87bf6ef49f6d0c8687bba0dbea2abd@changeid Signed-off-by: Johannes Berg --- net/mac80211/chan.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'net') diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 168bf3edd4b4..68952752b599 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -802,6 +802,11 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, } } + if (WARN_ON_ONCE(!compat)) { + rcu_read_unlock(); + return; + } + /* TDLS peers can sometimes affect the chandef width */ list_for_each_entry_rcu(sta, &local->sta_list, list) { if (!sta->uploaded || -- cgit v1.2.3 From 40e38c8dfce1cf543c8bd06e2c1c03a36bc22fe4 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Fri, 16 Jun 2023 09:53:57 +0300 Subject: wifi: mac80211: feed the link_id to cfg80211_ch_switch_started_notify For now, fix this only in station mode. We'll need to fix the AP mode later. Signed-off-by: Emmanuel Grumbach Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.41e662ba1d68.I8faae5acb45c58cfeeb6bc6247aedbdaf9249d32@changeid Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 171ba9d237c2..1fc66f09cbb8 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1981,8 +1981,9 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, IEEE80211_QUEUE_STOP_REASON_CSA); mutex_unlock(&local->mtx); - cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, 0, - csa_ie.count, csa_ie.mode, 0); + cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chandef, + link->link_id, csa_ie.count, + csa_ie.mode, 0); if (local->ops->channel_switch) { /* use driver's channel switch callback */ -- cgit v1.2.3 From c2edd3013266801d9c8595433c6eea462511f872 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:53:58 +0300 Subject: wifi: cfg80211: move regulatory_hint_found_beacon to be earlier These calls do not require any locking, so move them in preparation for the next patches. A minor change/bugfix is to not hint a beacon for nontransmitted BSSes Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.a5bf3558eae9.I33c7465d983c8bef19deb7a533ee475a16f91774@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ce2104dc05c6..19e7014f8bc3 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1971,6 +1971,18 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; } else { ts = jiffies; + + if (channel->band == NL80211_BAND_60GHZ) { + bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK; + if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || + bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) + regulatory_hint_found_beacon(wiphy, channel, + gfp); + } else { + if (capability & WLAN_CAPABILITY_ESS) + regulatory_hint_found_beacon(wiphy, channel, + gfp); + } } /* @@ -2007,16 +2019,6 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, if (!res) return NULL; - if (channel->band == NL80211_BAND_60GHZ) { - bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK; - if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || - bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) - regulatory_hint_found_beacon(wiphy, channel, gfp); - } else { - if (res->pub.capability & WLAN_CAPABILITY_ESS) - regulatory_hint_found_beacon(wiphy, channel, gfp); - } - if (non_tx_data) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there @@ -2445,6 +2447,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); } + if (channel->band == NL80211_BAND_60GHZ) { + bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK; + if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || + bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) + regulatory_hint_found_beacon(wiphy, channel, gfp); + } else { + if (capability & WLAN_CAPABILITY_ESS) + regulatory_hint_found_beacon(wiphy, channel, gfp); + } + ies = kzalloc(sizeof(*ies) + ielen, gfp); if (!ies) return NULL; @@ -2478,16 +2490,6 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, if (!res) return NULL; - if (channel->band == NL80211_BAND_60GHZ) { - bss_type = res->pub.capability & WLAN_CAPABILITY_DMG_TYPE_MASK; - if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || - bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) - regulatory_hint_found_beacon(wiphy, channel, gfp); - } else { - if (res->pub.capability & WLAN_CAPABILITY_ESS) - regulatory_hint_found_beacon(wiphy, channel, gfp); - } - trace_cfg80211_return_bss(&res->pub); /* cfg80211_bss_update gives us a referenced result */ return &res->pub; -- cgit v1.2.3 From 6b7c93c1439c8833ada9068f612df2de0571fd00 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:53:59 +0300 Subject: wifi: cfg80211: keep bss_lock held when informing It is reasonable to hold bss_lock for a little bit longer after cfg80211_bss_update is done. Right now, this does not make any big difference, but doing so in preparation for the next patch which adds a call to the driver. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094948.61701884ff0d.I3358228209eb6766202aff04d1bae0b8fdff611f@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 66 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 19e7014f8bc3..8984f74da891 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1701,10 +1701,10 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, } /* Returned bss is reference counted and must be cleaned up appropriately. */ -struct cfg80211_internal_bss * -cfg80211_bss_update(struct cfg80211_registered_device *rdev, - struct cfg80211_internal_bss *tmp, - bool signal_valid, unsigned long ts) +static struct cfg80211_internal_bss * +__cfg80211_bss_update(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *tmp, + bool signal_valid, unsigned long ts) { struct cfg80211_internal_bss *found = NULL; @@ -1713,10 +1713,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, tmp->ts = ts; - spin_lock_bh(&rdev->bss_lock); - if (WARN_ON(!rcu_access_pointer(tmp->pub.ies))) { - spin_unlock_bh(&rdev->bss_lock); return NULL; } @@ -1724,7 +1721,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, if (found) { if (!cfg80211_update_known_bss(rdev, found, tmp, signal_valid)) - goto drop; + return NULL; } else { struct cfg80211_internal_bss *new; struct cfg80211_internal_bss *hidden; @@ -1744,7 +1741,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, ies = (void *)rcu_dereference(tmp->pub.proberesp_ies); if (ies) kfree_rcu(ies, rcu_head); - goto drop; + return NULL; } memcpy(new, tmp, sizeof(*new)); new->refcount = 1; @@ -1775,14 +1772,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, */ if (!cfg80211_combine_bsses(rdev, new)) { bss_ref_put(rdev, new); - goto drop; + return NULL; } } if (rdev->bss_entries >= bss_entries_limit && !cfg80211_bss_expire_oldest(rdev)) { bss_ref_put(rdev, new); - goto drop; + return NULL; } /* This must be before the call to bss_ref_get */ @@ -1799,12 +1796,22 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, rdev->bss_generation++; bss_ref_get(rdev, found); - spin_unlock_bh(&rdev->bss_lock); return found; - drop: +} + +struct cfg80211_internal_bss * +cfg80211_bss_update(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *tmp, + bool signal_valid, unsigned long ts) +{ + struct cfg80211_internal_bss *res; + + spin_lock_bh(&rdev->bss_lock); + res = __cfg80211_bss_update(rdev, tmp, signal_valid, ts); spin_unlock_bh(&rdev->bss_lock); - return NULL; + + return res; } int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, @@ -2015,15 +2022,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, rcu_assign_pointer(tmp.pub.ies, ies); signal_valid = data->chan == channel; - res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid, ts); + spin_lock_bh(&rdev->bss_lock); + res = __cfg80211_bss_update(rdev, &tmp, signal_valid, ts); if (!res) - return NULL; + goto drop; if (non_tx_data) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ - spin_lock_bh(&rdev->bss_lock); if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, &res->pub)) { if (__cfg80211_unlink_bss(rdev, res)) { @@ -2031,15 +2038,19 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, res = NULL; } } - spin_unlock_bh(&rdev->bss_lock); if (!res) - return NULL; + goto drop; } + spin_unlock_bh(&rdev->bss_lock); trace_cfg80211_return_bss(&res->pub); - /* cfg80211_bss_update gives us a referenced result */ + /* __cfg80211_bss_update gives us a referenced result */ return &res->pub; + +drop: + spin_unlock_bh(&rdev->bss_lock); + return NULL; } static const struct element @@ -2376,6 +2387,7 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp) { + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); struct cfg80211_internal_bss tmp = {}, *res; struct cfg80211_bss_ies *ies; struct ieee80211_channel *channel; @@ -2485,14 +2497,20 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, ether_addr_copy(tmp.parent_bssid, data->parent_bssid); signal_valid = data->chan == channel; - res = cfg80211_bss_update(wiphy_to_rdev(wiphy), &tmp, signal_valid, - jiffies); + spin_lock_bh(&rdev->bss_lock); + res = __cfg80211_bss_update(rdev, &tmp, signal_valid, jiffies); if (!res) - return NULL; + goto drop; + + spin_unlock_bh(&rdev->bss_lock); trace_cfg80211_return_bss(&res->pub); - /* cfg80211_bss_update gives us a referenced result */ + /* __cfg80211_bss_update gives us a referenced result */ return &res->pub; + +drop: + spin_unlock_bh(&rdev->bss_lock); + return NULL; } struct cfg80211_bss * -- cgit v1.2.3 From 5db25290b77b4efcf26c2b25f288ca3f13ff2fc5 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:00 +0300 Subject: wifi: cfg80211: add inform_bss op to update BSS This new function is called from within the inform_bss(_frame)_data functions in order for the driver to update data that it is tracking. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.8d7781b0f965.I80041183072b75c081996a1a5a230b34aff5c668@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 13 +++++++++++++ net/wireless/rdev-ops.h | 12 ++++++++++++ net/wireless/scan.c | 4 ++++ net/wireless/trace.h | 17 +++++++++++++++++ 4 files changed, 46 insertions(+) (limited to 'net') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 3ca581845206..e912b7cd3093 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2720,6 +2720,7 @@ enum cfg80211_signal_type { * the BSS that requested the scan in which the beacon/probe was received. * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. + * @drv_data: Data to be passed through to @inform_bss */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; @@ -2730,6 +2731,8 @@ struct cfg80211_inform_bss { u8 parent_bssid[ETH_ALEN] __aligned(2); u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; + + void *drv_data; }; /** @@ -4101,6 +4104,13 @@ struct mgmt_frame_regs { * * @change_bss: Modify parameters for a given BSS. * + * @inform_bss: Called by cfg80211 while being informed about new BSS data + * for every BSS found within the reported data or frame. This is called + * from within the cfg8011 inform_bss handlers while holding the bss_lock. + * The data parameter is passed through from drv_data inside + * struct cfg80211_inform_bss. + * The new IE data for the BSS is explicitly passed. + * * @set_txq_params: Set TX queue parameters * * @libertas_set_mesh_channel: Only for backward compatibility for libertas, @@ -4488,6 +4498,9 @@ struct cfg80211_ops { int (*change_bss)(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params); + void (*inform_bss)(struct wiphy *wiphy, struct cfg80211_bss *bss, + const struct cfg80211_bss_ies *ies, void *data); + int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_txq_params *params); diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index f8c310849438..90bb7ac4b930 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -407,6 +407,18 @@ static inline int rdev_change_bss(struct cfg80211_registered_device *rdev, return ret; } +static inline void rdev_inform_bss(struct cfg80211_registered_device *rdev, + struct cfg80211_bss *bss, + const struct cfg80211_bss_ies *ies, + void *drv_data) + +{ + trace_rdev_inform_bss(&rdev->wiphy, bss); + if (rdev->ops->inform_bss) + rdev->ops->inform_bss(&rdev->wiphy, bss, ies, drv_data); + trace_rdev_return_void(&rdev->wiphy); +} + static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_txq_params *params) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 8984f74da891..d9abbf123ad1 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2027,6 +2027,8 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, if (!res) goto drop; + rdev_inform_bss(rdev, &res->pub, ies, data->drv_data); + if (non_tx_data) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there @@ -2502,6 +2504,8 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, if (!res) goto drop; + rdev_inform_bss(rdev, &res->pub, ies, data->drv_data); + spin_unlock_bh(&rdev->bss_lock); trace_cfg80211_return_bss(&res->pub); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 73a4f8f57438..e63990b81249 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1159,6 +1159,23 @@ TRACE_EVENT(rdev_change_bss, __entry->ap_isolate, __entry->ht_opmode) ); +TRACE_EVENT(rdev_inform_bss, + TP_PROTO(struct wiphy *wiphy, struct cfg80211_bss *bss), + TP_ARGS(wiphy, bss), + TP_STRUCT__entry( + WIPHY_ENTRY + MAC_ENTRY(bssid) + CHAN_ENTRY + ), + TP_fast_assign( + WIPHY_ASSIGN; + MAC_ASSIGN(bssid, bss->bssid); + CHAN_ASSIGN(bss->channel); + ), + TP_printk(WIPHY_PR_FMT ", %pM, " CHAN_PR_FMT, + WIPHY_PR_ARG, __entry->bssid, CHAN_PR_ARG) +); + TRACE_EVENT(rdev_set_txq_params, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_txq_params *params), -- cgit v1.2.3 From 108d202298bf03ce8d7e28bb94d23555c8385582 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:01 +0300 Subject: wifi: mac80211: use new inform_bss callback Doing this simplifies the code somewhat, as iteration over the nontransmitted BSSs is not required anymore. Also, mac80211 should not be iterating over the nontrans_list as it should only be accessed while the bss_lock is held. It also simplifies parsing of the IEs somewhat, as cfg80211 already extracts the IEs and passes them to the callback. Note that the only user left requiring parsing a specific BSS is the association code if a beacon is required by the hardware. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.39ebfe2f9e59.Ia012b08e0feed8ec431b666888b459f6366f7bd1@changeid Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 1 + net/mac80211/ieee80211_i.h | 3 ++ net/mac80211/scan.c | 93 +++++++++++++++++++--------------------------- 3 files changed, 42 insertions(+), 55 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 359589d525d5..eea7028a46a7 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -5050,6 +5050,7 @@ const struct cfg80211_ops mac80211_config_ops = { .join_ocb = ieee80211_join_ocb, .leave_ocb = ieee80211_leave_ocb, .change_bss = ieee80211_change_bss, + .inform_bss = ieee80211_inform_bss, .set_txq_params = ieee80211_set_txq_params, .set_monitor_channel = ieee80211_set_monitor_channel, .suspend = ieee80211_suspend, diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index f8d5f37ebe9a..b05dfdcfff11 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1929,6 +1929,9 @@ void ieee80211_scan_cancel(struct ieee80211_local *local); void ieee80211_run_deferred_scan(struct ieee80211_local *local); void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb); +void ieee80211_inform_bss(struct wiphy *wiphy, struct cfg80211_bss *bss, + const struct cfg80211_bss_ies *ies, void *data); + void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); struct ieee80211_bss * ieee80211_bss_info_update(struct ieee80211_local *local, diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index ea5383136fff..0805aa8603c6 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2018-2023 Intel Corporation */ #include @@ -55,27 +55,45 @@ static bool is_uapsd_supported(struct ieee802_11_elems *elems) return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD; } -static void -ieee80211_update_bss_from_elems(struct ieee80211_local *local, - struct ieee80211_bss *bss, - struct ieee802_11_elems *elems, - struct ieee80211_rx_status *rx_status, - bool beacon) +struct inform_bss_update_data { + struct ieee80211_rx_status *rx_status; + bool beacon; +}; + +void ieee80211_inform_bss(struct wiphy *wiphy, + struct cfg80211_bss *cbss, + const struct cfg80211_bss_ies *ies, + void *data) { + struct ieee80211_local *local = wiphy_priv(wiphy); + struct inform_bss_update_data *update_data = data; + struct ieee80211_bss *bss = (void *)cbss->priv; + struct ieee80211_rx_status *rx_status; + struct ieee802_11_elems *elems; int clen, srlen; - if (beacon) + /* This happens while joining an IBSS */ + if (!update_data) + return; + + elems = ieee802_11_parse_elems(ies->data, ies->len, false, NULL); + if (!elems) + return; + + rx_status = update_data->rx_status; + + if (update_data->beacon) bss->device_ts_beacon = rx_status->device_timestamp; else bss->device_ts_presp = rx_status->device_timestamp; if (elems->parse_error) { - if (beacon) + if (update_data->beacon) bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON; else bss->corrupt_data |= IEEE80211_BSS_CORRUPT_PROBE_RESP; } else { - if (beacon) + if (update_data->beacon) bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_BEACON; else bss->corrupt_data &= ~IEEE80211_BSS_CORRUPT_PROBE_RESP; @@ -124,7 +142,7 @@ ieee80211_update_bss_from_elems(struct ieee80211_local *local, bss->valid_data |= IEEE80211_BSS_VALID_WMM; } - if (beacon) { + if (update_data->beacon) { struct ieee80211_supported_band *sband = local->hw.wiphy->bands[rx_status->band]; if (!(rx_status->encoding == RX_ENC_HT) && @@ -138,6 +156,8 @@ ieee80211_update_bss_from_elems(struct ieee80211_local *local, le32_to_cpu(elems->vht_cap_elem->vht_cap_info); else bss->vht_cap_info = 0; + + kfree(elems); } struct ieee80211_bss * @@ -148,16 +168,17 @@ ieee80211_bss_info_update(struct ieee80211_local *local, { bool beacon = ieee80211_is_beacon(mgmt->frame_control) || ieee80211_is_s1g_beacon(mgmt->frame_control); - struct cfg80211_bss *cbss, *non_tx_cbss; - struct ieee80211_bss *bss, *non_tx_bss; + struct cfg80211_bss *cbss; + struct inform_bss_update_data update_data = { + .rx_status = rx_status, + .beacon = beacon, + }; struct cfg80211_inform_bss bss_meta = { .boottime_ns = rx_status->boottime_ns, + .drv_data = (void *)&update_data, }; bool signal_valid; struct ieee80211_sub_if_data *scan_sdata; - struct ieee802_11_elems *elems; - size_t baselen; - u8 *elements; if (rx_status->flag & RX_FLAG_NO_SIGNAL_VAL) bss_meta.signal = 0; /* invalid signal indication */ @@ -192,50 +213,12 @@ ieee80211_bss_info_update(struct ieee80211_local *local, if (!cbss) return NULL; - if (ieee80211_is_probe_resp(mgmt->frame_control)) { - elements = mgmt->u.probe_resp.variable; - baselen = offsetof(struct ieee80211_mgmt, - u.probe_resp.variable); - } else if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { - struct ieee80211_ext *ext = (void *) mgmt; - - baselen = offsetof(struct ieee80211_ext, u.s1g_beacon.variable); - elements = ext->u.s1g_beacon.variable; - } else { - baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable); - elements = mgmt->u.beacon.variable; - } - - if (baselen > len) - return NULL; - - elems = ieee802_11_parse_elems(elements, len - baselen, false, cbss); - if (!elems) - return NULL; - /* In case the signal is invalid update the status */ signal_valid = channel == cbss->channel; if (!signal_valid) rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; - bss = (void *)cbss->priv; - ieee80211_update_bss_from_elems(local, bss, elems, rx_status, beacon); - kfree(elems); - - list_for_each_entry(non_tx_cbss, &cbss->nontrans_list, nontrans_list) { - non_tx_bss = (void *)non_tx_cbss->priv; - - elems = ieee802_11_parse_elems(elements, len - baselen, false, - non_tx_cbss); - if (!elems) - continue; - - ieee80211_update_bss_from_elems(local, non_tx_bss, elems, - rx_status, beacon); - kfree(elems); - } - - return bss; + return (void *)cbss->priv; } static bool ieee80211_scan_accept_presp(struct ieee80211_sub_if_data *sdata, -- cgit v1.2.3 From 03e7e493f1a3697eba115f3f69e296f7e47500ee Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:02 +0300 Subject: wifi: cfg80211: ignore invalid TBTT info field types The TBTT information field type must be zero. This is only changed in the 802.11be draft specification where the value 1 is used to indicate that only the MLD parameters are included. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.7865606ffe94.I7ff28afb875d1b4c39acd497df8490a7d3628e3f@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 2 ++ net/wireless/scan.c | 7 +++++++ 2 files changed, 9 insertions(+) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 5dfed1a6625c..47ddc65b443b 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4481,6 +4481,8 @@ static inline bool for_each_element_completed(const struct element *element, #define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04 #define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08 #define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 +#define IEEE80211_TBTT_INFO_TYPE_TBTT 0 +#define IEEE80211_TBTT_INFO_TYPE_MLD 1 #define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9 #define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13 diff --git a/net/wireless/scan.c b/net/wireless/scan.c index d9abbf123ad1..2212e6d24204 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -629,6 +629,13 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, if (end - pos < count * length) break; + if (u8_get_bits(ap_info->tbtt_info_hdr, + IEEE80211_AP_INFO_TBTT_HDR_TYPE) != + IEEE80211_TBTT_INFO_TYPE_TBTT) { + pos += count * length; + continue; + } + /* * TBTT info must include bss param + BSSID + * (short SSID or same_ssid bit to be set). -- cgit v1.2.3 From dfd9aa3e7a456d57b18021d66472ab7ff8373ab7 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:03 +0300 Subject: wifi: cfg80211: rewrite merging of inherited elements The cfg80211_gen_new_ie function merges the IEs using inheritance rules. Rewrite this function to fix issues around inheritance rules. In particular, vendor elements do not require any special handling, as they are either all inherited or overridden by the subprofile. Also, add fragmentation handling as this may be needed in some cases. This also changes the function to not require making a copy. The new version could be optimized a bit by explicitly tracking which IEs have been handled already rather than looking that up again every time. Note that a small behavioural change is the removal of the SSID special handling. This should be fine for the MBSSID element, as the SSID must be included in the subelement. Fixes: 0b8fb8235be8 ("cfg80211: Parsing of Multiple BSSID information in scanning") Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.bc6152e146db.I2b5f3bc45085e1901e5b5192a674436adaf94748@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 213 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 124 insertions(+), 89 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 2212e6d24204..b9eb91f485f8 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -259,117 +259,152 @@ bool cfg80211_is_element_inherited(const struct element *elem, } EXPORT_SYMBOL(cfg80211_is_element_inherited); -static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, - const u8 *subelement, size_t subie_len, - u8 *new_ie, gfp_t gfp) +static size_t cfg80211_copy_elem_with_frags(const struct element *elem, + const u8 *ie, size_t ie_len, + u8 **pos, u8 *buf, size_t buf_len) { - u8 *pos, *tmp; - const u8 *tmp_old, *tmp_new; - const struct element *non_inherit_elem; - u8 *sub_copy; + if (WARN_ON((u8 *)elem < ie || elem->data > ie + ie_len || + elem->data + elem->datalen > ie + ie_len)) + return 0; - /* copy subelement as we need to change its content to - * mark an ie after it is processed. - */ - sub_copy = kmemdup(subelement, subie_len, gfp); - if (!sub_copy) + if (elem->datalen + 2 > buf + buf_len - *pos) return 0; - pos = &new_ie[0]; + memcpy(*pos, elem, elem->datalen + 2); + *pos += elem->datalen + 2; - /* set new ssid */ - tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len); - if (tmp_new) { - memcpy(pos, tmp_new, tmp_new[1] + 2); - pos += (tmp_new[1] + 2); + /* Finish if it is not fragmented */ + if (elem->datalen != 255) + return *pos - buf; + + ie_len = ie + ie_len - elem->data - elem->datalen; + ie = (const u8 *)elem->data + elem->datalen; + + for_each_element(elem, ie, ie_len) { + if (elem->id != WLAN_EID_FRAGMENT) + break; + + if (elem->datalen + 2 > buf + buf_len - *pos) + return 0; + + memcpy(*pos, elem, elem->datalen + 2); + *pos += elem->datalen + 2; + + if (elem->datalen != 255) + break; } - /* get non inheritance list if exists */ - non_inherit_elem = - cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, - sub_copy, subie_len); + return *pos - buf; +} - /* go through IEs in ie (skip SSID) and subelement, - * merge them into new_ie +static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen, + const u8 *subie, size_t subie_len, + u8 *new_ie, size_t new_ie_len) +{ + const struct element *non_inherit_elem, *parent, *sub; + u8 *pos = new_ie; + u8 id, ext_id; + unsigned int match_len; + + non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + subie, subie_len); + + /* We copy the elements one by one from the parent to the generated + * elements. + * If they are not inherited (included in subie or in the non + * inheritance element), then we copy all occurrences the first time + * we see this element type. */ - tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); - tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie; - - while (tmp_old + 2 - ie <= ielen && - tmp_old + tmp_old[1] + 2 - ie <= ielen) { - if (tmp_old[0] == 0) { - tmp_old++; + for_each_element(parent, ie, ielen) { + if (parent->id == WLAN_EID_FRAGMENT) continue; + + if (parent->id == WLAN_EID_EXTENSION) { + if (parent->datalen < 1) + continue; + + id = WLAN_EID_EXTENSION; + ext_id = parent->data[0]; + match_len = 1; + } else { + id = parent->id; + match_len = 0; } - if (tmp_old[0] == WLAN_EID_EXTENSION) - tmp = (u8 *)cfg80211_find_ext_ie(tmp_old[2], sub_copy, - subie_len); - else - tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy, - subie_len); + /* Find first occurrence in subie */ + sub = cfg80211_find_elem_match(id, subie, subie_len, + &ext_id, match_len, 0); - if (!tmp) { - const struct element *old_elem = (void *)tmp_old; + /* Copy from parent if not in subie and inherited */ + if (!sub && + cfg80211_is_element_inherited(parent, non_inherit_elem)) { + if (!cfg80211_copy_elem_with_frags(parent, + ie, ielen, + &pos, new_ie, + new_ie_len)) + return 0; - /* ie in old ie but not in subelement */ - if (cfg80211_is_element_inherited(old_elem, - non_inherit_elem)) { - memcpy(pos, tmp_old, tmp_old[1] + 2); - pos += tmp_old[1] + 2; - } - } else { - /* ie in transmitting ie also in subelement, - * copy from subelement and flag the ie in subelement - * as copied (by setting eid field to WLAN_EID_SSID, - * which is skipped anyway). - * For vendor ie, compare OUI + type + subType to - * determine if they are the same ie. - */ - if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) { - if (tmp_old[1] >= 5 && tmp[1] >= 5 && - !memcmp(tmp_old + 2, tmp + 2, 5)) { - /* same vendor ie, copy from - * subelement - */ - memcpy(pos, tmp, tmp[1] + 2); - pos += tmp[1] + 2; - tmp[0] = WLAN_EID_SSID; - } else { - memcpy(pos, tmp_old, tmp_old[1] + 2); - pos += tmp_old[1] + 2; - } - } else { - /* copy ie from subelement into new ie */ - memcpy(pos, tmp, tmp[1] + 2); - pos += tmp[1] + 2; - tmp[0] = WLAN_EID_SSID; - } + continue; } - if (tmp_old + tmp_old[1] + 2 - ie == ielen) - break; + /* Already copied if an earlier element had the same type */ + if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie, + &ext_id, match_len, 0)) + continue; - tmp_old += tmp_old[1] + 2; + /* Not inheriting, copy all similar elements from subie */ + while (sub) { + if (!cfg80211_copy_elem_with_frags(sub, + subie, subie_len, + &pos, new_ie, + new_ie_len)) + return 0; + + sub = cfg80211_find_elem_match(id, + sub->data + sub->datalen, + subie_len + subie - + (sub->data + + sub->datalen), + &ext_id, match_len, 0); + } } - /* go through subelement again to check if there is any ie not - * copied to new ie, skip ssid, capability, bssid-index ie + /* The above misses elements that are included in subie but not in the + * parent, so do a pass over subie and append those. + * Skip the non-tx BSSID caps and non-inheritance element. */ - tmp_new = sub_copy; - while (tmp_new + 2 - sub_copy <= subie_len && - tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) { - if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP || - tmp_new[0] == WLAN_EID_SSID)) { - memcpy(pos, tmp_new, tmp_new[1] + 2); - pos += tmp_new[1] + 2; + for_each_element(sub, subie, subie_len) { + if (sub->id == WLAN_EID_NON_TX_BSSID_CAP) + continue; + + if (sub->id == WLAN_EID_FRAGMENT) + continue; + + if (sub->id == WLAN_EID_EXTENSION) { + if (sub->datalen < 1) + continue; + + id = WLAN_EID_EXTENSION; + ext_id = sub->data[0]; + match_len = 1; + + if (ext_id == WLAN_EID_EXT_NON_INHERITANCE) + continue; + } else { + id = sub->id; + match_len = 0; } - if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len) - break; - tmp_new += tmp_new[1] + 2; + + /* Processed if one was included in the parent */ + if (cfg80211_find_elem_match(id, ie, ielen, + &ext_id, match_len, 0)) + continue; + + if (!cfg80211_copy_elem_with_frags(sub, subie, subie_len, + &pos, new_ie, new_ie_len)) + return 0; } - kfree(sub_copy); return pos - new_ie; } @@ -2228,7 +2263,7 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, new_ie_len = cfg80211_gen_new_ie(ie, ielen, profile, profile_len, new_ie, - gfp); + IEEE80211_MAX_DATA_LEN); if (!new_ie_len) continue; -- cgit v1.2.3 From 39432f8a3752a87a53fd8d5e51824a43aaae5cab Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:04 +0300 Subject: wifi: cfg80211: drop incorrect nontransmitted BSS update code The removed code ran for any BSS that was not included in the MBSSID element in order to update it. However, instead of using the correct inheritance rules, it would simply copy the elements from the transmitting AP. The result is that we would report incorrect elements in this case. After some discussions, it seems that there are likely not even APs actually using this feature. Either way, removing the code decreases complexity and makes the cfg80211 behaviour more correct. Fixes: 0b8fb8235be8 ("cfg80211: Parsing of Multiple BSSID information in scanning") Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.cfd6d8db1f26.Ia1044902b86cd7d366400a4bfb93691b8f05d68c@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 154 ++++------------------------------------------------ 1 file changed, 11 insertions(+), 143 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index b9eb91f485f8..75e6e032bb3a 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2312,118 +2312,6 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, } EXPORT_SYMBOL(cfg80211_inform_bss_data); -static void -cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy, - struct cfg80211_inform_bss *data, - struct ieee80211_mgmt *mgmt, size_t len, - struct cfg80211_non_tx_bss *non_tx_data, - gfp_t gfp) -{ - enum cfg80211_bss_frame_type ftype; - const u8 *ie = mgmt->u.probe_resp.variable; - size_t ielen = len - offsetof(struct ieee80211_mgmt, - u.probe_resp.variable); - - ftype = ieee80211_is_beacon(mgmt->frame_control) ? - CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP; - - cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid, - le64_to_cpu(mgmt->u.probe_resp.timestamp), - le16_to_cpu(mgmt->u.probe_resp.beacon_int), - ie, ielen, non_tx_data, gfp); -} - -static void -cfg80211_update_notlisted_nontrans(struct wiphy *wiphy, - struct cfg80211_bss *nontrans_bss, - struct ieee80211_mgmt *mgmt, size_t len) -{ - u8 *ie, *new_ie, *pos; - const struct element *nontrans_ssid; - const u8 *trans_ssid, *mbssid; - size_t ielen = len - offsetof(struct ieee80211_mgmt, - u.probe_resp.variable); - size_t new_ie_len; - struct cfg80211_bss_ies *new_ies; - const struct cfg80211_bss_ies *old; - size_t cpy_len; - - lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock); - - ie = mgmt->u.probe_resp.variable; - - new_ie_len = ielen; - trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen); - if (!trans_ssid) - return; - new_ie_len -= trans_ssid[1]; - mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen); - /* - * It's not valid to have the MBSSID element before SSID - * ignore if that happens - the code below assumes it is - * after (while copying things inbetween). - */ - if (!mbssid || mbssid < trans_ssid) - return; - new_ie_len -= mbssid[1]; - - nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID); - if (!nontrans_ssid) - return; - - new_ie_len += nontrans_ssid->datalen; - - /* generate new ie for nontrans BSS - * 1. replace SSID with nontrans BSS' SSID - * 2. skip MBSSID IE - */ - new_ie = kzalloc(new_ie_len, GFP_ATOMIC); - if (!new_ie) - return; - - new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC); - if (!new_ies) - goto out_free; - - pos = new_ie; - - /* copy the nontransmitted SSID */ - cpy_len = nontrans_ssid->datalen + 2; - memcpy(pos, nontrans_ssid, cpy_len); - pos += cpy_len; - /* copy the IEs between SSID and MBSSID */ - cpy_len = trans_ssid[1] + 2; - memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len))); - pos += (mbssid - (trans_ssid + cpy_len)); - /* copy the IEs after MBSSID */ - cpy_len = mbssid[1] + 2; - memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len))); - - /* update ie */ - new_ies->len = new_ie_len; - new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); - new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control); - memcpy(new_ies->data, new_ie, new_ie_len); - if (ieee80211_is_probe_resp(mgmt->frame_control)) { - old = rcu_access_pointer(nontrans_bss->proberesp_ies); - rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies); - rcu_assign_pointer(nontrans_bss->ies, new_ies); - if (old) - kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); - } else { - old = rcu_access_pointer(nontrans_bss->beacon_ies); - rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies); - cfg80211_update_hidden_bsses(bss_from_pub(nontrans_bss), - new_ies, old); - rcu_assign_pointer(nontrans_bss->ies, new_ies); - if (old) - kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head); - } - -out_free: - kfree(new_ie); -} - /* cfg80211_inform_bss_width_frame helper */ static struct cfg80211_bss * cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy, @@ -2565,51 +2453,31 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp) { - struct cfg80211_bss *res, *tmp_bss; + struct cfg80211_bss *res; const u8 *ie = mgmt->u.probe_resp.variable; - const struct cfg80211_bss_ies *ies1, *ies2; size_t ielen = len - offsetof(struct ieee80211_mgmt, u.probe_resp.variable); + enum cfg80211_bss_frame_type ftype; struct cfg80211_non_tx_bss non_tx_data = {}; res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); + if (!res) + return NULL; /* don't do any further MBSSID handling for S1G */ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) return res; - if (!res || !wiphy->support_mbssid || - !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) - return res; - if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) - return res; - + ftype = ieee80211_is_beacon(mgmt->frame_control) ? + CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP; non_tx_data.tx_bss = res; - /* process each non-transmitting bss */ - cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len, - &non_tx_data, gfp); - - spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock); - /* check if the res has other nontransmitting bss which is not - * in MBSSID IE - */ - ies1 = rcu_access_pointer(res->ies); - - /* go through nontrans_list, if the timestamp of the BSS is - * earlier than the timestamp of the transmitting BSS then - * update it - */ - list_for_each_entry(tmp_bss, &res->nontrans_list, - nontrans_list) { - ies2 = rcu_access_pointer(tmp_bss->ies); - if (ies2->tsf < ies1->tsf) - cfg80211_update_notlisted_nontrans(wiphy, tmp_bss, - mgmt, len); - } - spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock); + /* process each non-transmitting bss */ + cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid, + le64_to_cpu(mgmt->u.probe_resp.timestamp), + le16_to_cpu(mgmt->u.probe_resp.beacon_int), + ie, ielen, &non_tx_data, gfp); return res; } -- cgit v1.2.3 From f837a653a09700daa136a3db49c0c97d7295ca30 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:05 +0300 Subject: wifi: cfg80211: add element defragmentation helper This is already needed within mac80211 and support is also needed by cfg80211 to parse ML elements. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.29c3ebeed10d.I009c049289dd0162c2e858ed8b68d2875a672ed6@changeid Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 22 ++++++++++++++++++ net/wireless/scan.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) (limited to 'net') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index e912b7cd3093..9972de114d73 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -6676,6 +6676,28 @@ cfg80211_find_vendor_ie(unsigned int oui, int oui_type, return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len); } +/** + * cfg80211_defragment_element - Defrag the given element data into a buffer + * + * @elem: the element to defragment + * @ies: elements where @elem is contained + * @ieslen: length of @ies + * @data: buffer to store element data + * @data_len: length of @data + * @frag_id: the element ID of fragments + * + * Return: length of @data, or -EINVAL on error + * + * Copy out all data from an element that may be fragmented into @data, while + * skipping all headers. + * + * The function uses memmove() internally. It is acceptable to defragment an + * element in-place. + */ +ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies, + size_t ieslen, u8 *data, size_t data_len, + u8 frag_id); + /** * cfg80211_send_layer2_update - send layer 2 update frame * diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 75e6e032bb3a..dc71c6ac5bf5 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -2288,6 +2288,66 @@ out: kfree(profile); } +ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies, + size_t ieslen, u8 *data, size_t data_len, + u8 frag_id) +{ + const struct element *next; + ssize_t copied; + u8 elem_datalen; + + if (!elem) + return -EINVAL; + + /* elem might be invalid after the memmove */ + next = (void *)(elem->data + elem->datalen); + + elem_datalen = elem->datalen; + if (elem->id == WLAN_EID_EXTENSION) { + copied = elem->datalen - 1; + if (copied > data_len) + return -ENOSPC; + + memmove(data, elem->data + 1, copied); + } else { + copied = elem->datalen; + if (copied > data_len) + return -ENOSPC; + + memmove(data, elem->data, copied); + } + + /* Fragmented elements must have 255 bytes */ + if (elem_datalen < 255) + return copied; + + for (elem = next; + elem->data < ies + ieslen && + elem->data + elem->datalen < ies + ieslen; + elem = next) { + /* elem might be invalid after the memmove */ + next = (void *)(elem->data + elem->datalen); + + if (elem->id != frag_id) + break; + + elem_datalen = elem->datalen; + + if (copied + elem_datalen > data_len) + return -ENOSPC; + + memmove(data + copied, elem->data, elem_datalen); + copied += elem_datalen; + + /* Only the last fragment may be short */ + if (elem_datalen != 255) + break; + } + + return copied; +} +EXPORT_SYMBOL(cfg80211_defragment_element); + struct cfg80211_bss * cfg80211_inform_bss_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, -- cgit v1.2.3 From a76236de584ac15b2e495a613034a9e031449f7e Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Fri, 16 Jun 2023 09:54:06 +0300 Subject: wifi: mac80211: use cfg80211 defragmentation helper Use the shared functionality rather than copying it into mac80211. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.7dcbf82baade.Ic68d1f547cb75d66037abdbb0f066db20ff41ba3@changeid Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 2 + net/mac80211/util.c | 93 +++++++++++++++++----------------------------- 2 files changed, 37 insertions(+), 58 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index b05dfdcfff11..4ae9c58d6a12 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1752,6 +1752,8 @@ struct ieee802_11_elems { /* mult-link element can be de-fragmented and thus u8 is not sufficient */ size_t multi_link_len; + /* The element in the original IEs */ + const struct element *multi_link_elem; /* * store the per station profile pointer and length in case that the diff --git a/net/mac80211/util.c b/net/mac80211/util.c index a0407baa6cd3..3752e90cda54 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -984,6 +984,7 @@ ieee80211_parse_extension_element(u32 *crc, break; case WLAN_EID_EXT_EHT_MULTI_LINK: if (ieee80211_mle_size_ok(data, len)) { + elems->multi_link_elem = (void *)elem; elems->multi_link = (void *)data; elems->multi_link_len = len; } @@ -1458,56 +1459,11 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, return found ? profile_len : 0; } -static void ieee80211_defragment_element(struct ieee802_11_elems *elems, - void **elem_ptr, size_t *len, - size_t total_len, u8 frag_id) -{ - u8 *data = *elem_ptr, *pos, *start; - const struct element *elem; - - /* - * Since 'data' points to the data of the element, not the element - * itself, allow 254 in case it was an extended element where the - * extended ID isn't part of the data we see here and thus not part of - * 'len' either. - */ - if (!data || (*len != 254 && *len != 255)) - return; - - start = elems->scratch_pos; - - if (WARN_ON(*len > (elems->scratch + elems->scratch_len - - elems->scratch_pos))) - return; - - memcpy(elems->scratch_pos, data, *len); - elems->scratch_pos += *len; - - pos = data + *len; - total_len -= *len; - for_each_element(elem, pos, total_len) { - if (elem->id != frag_id) - break; - - if (WARN_ON(elem->datalen > - (elems->scratch + elems->scratch_len - - elems->scratch_pos))) - return; - - memcpy(elems->scratch_pos, elem->data, elem->datalen); - elems->scratch_pos += elem->datalen; - - *len += elem->datalen; - } - - *elem_ptr = start; -} - static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems, u8 link_id) { const struct ieee80211_multi_link_elem *ml = elems->multi_link; - size_t ml_len = elems->multi_link_len; + ssize_t ml_len = elems->multi_link_len; const struct element *sub; if (!ml || !ml_len) @@ -1519,6 +1475,7 @@ static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems, for_each_mle_subelement(sub, (u8 *)ml, ml_len) { struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; + ssize_t sta_prof_len; u16 control; if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) @@ -1536,14 +1493,23 @@ static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems, if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE)) return; - elems->prof = prof; - elems->sta_prof_len = sub->datalen; - /* the sub element can be fragmented */ - ieee80211_defragment_element(elems, (void **)&elems->prof, - &elems->sta_prof_len, - ml_len - (sub->data - (u8 *)ml), - IEEE80211_MLE_SUBELEM_FRAGMENT); + sta_prof_len = + cfg80211_defragment_element(sub, + (u8 *)ml, ml_len, + elems->scratch_pos, + elems->scratch + + elems->scratch_len - + elems->scratch_pos, + IEEE80211_MLE_SUBELEM_FRAGMENT); + + if (sta_prof_len < 0) + return; + + elems->prof = (void *)elems->scratch_pos; + elems->sta_prof_len = sta_prof_len; + elems->scratch_pos += sta_prof_len; + return; } } @@ -1557,17 +1523,28 @@ static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems, .from_ap = params->from_ap, .link_id = -1, }; + ssize_t ml_len = elems->multi_link_len; const struct element *non_inherit = NULL; const u8 *end; if (params->link_id == -1) return; - ieee80211_defragment_element(elems, (void **)&elems->multi_link, - &elems->multi_link_len, - elems->total_len - ((u8 *)elems->multi_link - - elems->ie_start), - WLAN_EID_FRAGMENT); + ml_len = cfg80211_defragment_element(elems->multi_link_elem, + elems->ie_start, + elems->total_len, + elems->scratch_pos, + elems->scratch + + elems->scratch_len - + elems->scratch_pos, + WLAN_EID_FRAGMENT); + + if (ml_len < 0) + return; + + elems->multi_link = (const void *)elems->scratch_pos; + elems->multi_link_len = ml_len; + elems->scratch_pos += ml_len; ieee80211_mle_get_sta_prof(elems, params->link_id); prof = elems->prof; -- cgit v1.2.3 From a286de1aa38f7ea6605c770278a1ebf4096c03a2 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 16 Jun 2023 09:54:07 +0300 Subject: wifi: mac80211: Rename multi_link As a preparation to support Reconfiguration Multi Link element, rename 'multi_link' and 'multi_link_len' fields in 'struct ieee802_11_elems' to 'ml_basic' and 'ml_basic_len'. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.b11370d3066a.I34280ae3728597056a6a2f313063962206c0d581@changeid Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 9 +++++---- net/mac80211/mlme.c | 8 ++++---- net/mac80211/util.c | 19 +++++++++---------- 3 files changed, 18 insertions(+), 18 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 4ae9c58d6a12..554aeb2e20d9 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1726,7 +1726,7 @@ struct ieee802_11_elems { const struct ieee80211_aid_response_ie *aid_resp; const struct ieee80211_eht_cap_elem *eht_cap; const struct ieee80211_eht_operation *eht_operation; - const struct ieee80211_multi_link_elem *multi_link; + const struct ieee80211_multi_link_elem *ml_basic; /* length of them, respectively */ u8 ext_capab_len; @@ -1751,9 +1751,10 @@ struct ieee802_11_elems { u8 eht_cap_len; /* mult-link element can be de-fragmented and thus u8 is not sufficient */ - size_t multi_link_len; - /* The element in the original IEs */ - const struct element *multi_link_elem; + size_t ml_basic_len; + + /* The basic Multi-Link element in the original IEs */ + const struct element *ml_basic_elem; /* * store the per station profile pointer and length in case that the diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1fc66f09cbb8..b8f8220cd9ff 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5277,24 +5277,24 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, } if (ieee80211_vif_is_mld(&sdata->vif)) { - if (!elems->multi_link) { + if (!elems->ml_basic) { sdata_info(sdata, "MLO association with %pM but no multi-link element in response!\n", assoc_data->ap_addr); goto abandon_assoc; } - if (le16_get_bits(elems->multi_link->control, + if (le16_get_bits(elems->ml_basic->control, IEEE80211_ML_CONTROL_TYPE) != IEEE80211_ML_CONTROL_TYPE_BASIC) { sdata_info(sdata, "bad multi-link element (control=0x%x)\n", - le16_to_cpu(elems->multi_link->control)); + le16_to_cpu(elems->ml_basic->control)); goto abandon_assoc; } else { struct ieee80211_mle_basic_common_info *common; - common = (void *)elems->multi_link->variable; + common = (void *)elems->ml_basic->variable; if (memcmp(assoc_data->ap_addr, common->mld_mac_addr, ETH_ALEN)) { diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3752e90cda54..8658c28d684f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -984,9 +984,9 @@ ieee80211_parse_extension_element(u32 *crc, break; case WLAN_EID_EXT_EHT_MULTI_LINK: if (ieee80211_mle_size_ok(data, len)) { - elems->multi_link_elem = (void *)elem; - elems->multi_link = (void *)data; - elems->multi_link_len = len; + elems->ml_basic_elem = (void *)elem; + elems->ml_basic = (void *)data; + elems->ml_basic_len = len; } break; } @@ -1462,8 +1462,8 @@ static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems, u8 link_id) { - const struct ieee80211_multi_link_elem *ml = elems->multi_link; - ssize_t ml_len = elems->multi_link_len; + const struct ieee80211_multi_link_elem *ml = elems->ml_basic; + ssize_t ml_len = elems->ml_basic_len; const struct element *sub; if (!ml || !ml_len) @@ -1523,14 +1523,14 @@ static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems, .from_ap = params->from_ap, .link_id = -1, }; - ssize_t ml_len = elems->multi_link_len; + ssize_t ml_len = elems->ml_basic_len; const struct element *non_inherit = NULL; const u8 *end; if (params->link_id == -1) return; - ml_len = cfg80211_defragment_element(elems->multi_link_elem, + ml_len = cfg80211_defragment_element(elems->ml_basic_elem, elems->ie_start, elems->total_len, elems->scratch_pos, @@ -1542,9 +1542,8 @@ static void ieee80211_mle_parse_link(struct ieee802_11_elems *elems, if (ml_len < 0) return; - elems->multi_link = (const void *)elems->scratch_pos; - elems->multi_link_len = ml_len; - elems->scratch_pos += ml_len; + elems->ml_basic = (const void *)elems->scratch_pos; + elems->ml_basic_len = ml_len; ieee80211_mle_get_sta_prof(elems, params->link_id); prof = elems->prof; -- cgit v1.2.3 From cf36cdef10e28df52d500a4829263d1b4d24bc44 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 16 Jun 2023 09:54:08 +0300 Subject: wifi: mac80211: Add support for parsing Reconfiguration Multi Link element Parse Reconfiguration Multi Link IE. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.6eeb6c9a4a6e.I1cb137da9b3c712fc7c7949a6dec9e314b5d7f63@changeid Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 5 +++++ net/mac80211/util.c | 21 ++++++++++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 554aeb2e20d9..be3294719cb4 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1727,6 +1727,7 @@ struct ieee802_11_elems { const struct ieee80211_eht_cap_elem *eht_cap; const struct ieee80211_eht_operation *eht_operation; const struct ieee80211_multi_link_elem *ml_basic; + const struct ieee80211_multi_link_elem *ml_reconf; /* length of them, respectively */ u8 ext_capab_len; @@ -1752,10 +1753,14 @@ struct ieee802_11_elems { /* mult-link element can be de-fragmented and thus u8 is not sufficient */ size_t ml_basic_len; + size_t ml_reconf_len; /* The basic Multi-Link element in the original IEs */ const struct element *ml_basic_elem; + /* The reconfiguration Multi-Link element in the original IEs */ + const struct element *ml_reconf_elem; + /* * store the per station profile pointer and length in case that the * parsing also handled Multi-Link element parsing for a specific link diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8658c28d684f..8da6bc43735a 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -984,9 +984,24 @@ ieee80211_parse_extension_element(u32 *crc, break; case WLAN_EID_EXT_EHT_MULTI_LINK: if (ieee80211_mle_size_ok(data, len)) { - elems->ml_basic_elem = (void *)elem; - elems->ml_basic = (void *)data; - elems->ml_basic_len = len; + const struct ieee80211_multi_link_elem *mle = + (void *)data; + + switch (le16_get_bits(mle->control, + IEEE80211_ML_CONTROL_TYPE)) { + case IEEE80211_ML_CONTROL_TYPE_BASIC: + elems->ml_basic_elem = (void *)elem; + elems->ml_basic = data; + elems->ml_basic_len = len; + break; + case IEEE80211_ML_CONTROL_TYPE_RECONF: + elems->ml_reconf_elem = (void *)elem; + elems->ml_reconf = data; + elems->ml_reconf_len = len; + break; + default: + break; + } } break; } -- cgit v1.2.3 From e2efec97c3ad503042db27baaf7c8cb5d1348a83 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Fri, 16 Jun 2023 09:54:09 +0300 Subject: wifi: mac80211: Rename ieee80211_mle_sta_prof_size_ok() Rename it to ieee80211_mle_basic_sta_prof_size_ok() as it validates the size of the station profile included in Basic Multi-Link element. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230616094949.9bdfd263974f.I7bebd26894f33716e93cc7da576ef3215e0ba727@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 6 ++++-- net/mac80211/util.c | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 47ddc65b443b..aeedd49e5101 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4798,11 +4798,13 @@ struct ieee80211_mle_per_sta_profile { } __packed; /** - * ieee80211_mle_sta_prof_size_ok - validate multi-link element sta profile size + * ieee80211_mle_basic_sta_prof_size_ok - validate basic multi-link element sta + * profile size * @data: pointer to the sub element data * @len: length of the containing sub element */ -static inline bool ieee80211_mle_sta_prof_size_ok(const u8 *data, size_t len) +static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, + size_t len) { const struct ieee80211_mle_per_sta_profile *prof = (const void *)data; u16 control; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 8da6bc43735a..ef53d3baece7 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1496,7 +1496,8 @@ static void ieee80211_mle_get_sta_prof(struct ieee802_11_elems *elems, if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) continue; - if (!ieee80211_mle_sta_prof_size_ok(sub->data, sub->datalen)) + if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data, + sub->datalen)) return; control = le16_to_cpu(prof->control); -- cgit v1.2.3 From b22552fcaf1970360005c805d7fba4046cf2ab4a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 16 Jun 2023 22:28:44 +0200 Subject: wifi: cfg80211: fix regulatory disconnect for non-MLO The multi-link loop here broke disconnect when multi-link operation (MLO) isn't active for a given interface, since in that case valid_links is 0 (indicating no links, i.e. no MLO.) Fix this by taking that into account properly and skipping the link only if there are valid_links in the first place. Cc: stable@vger.kernel.org Fixes: 7b0a0e3c3a88 ("wifi: cfg80211: do some rework towards MLO link APIs") Signed-off-by: Johannes Berg Link: https://lore.kernel.org/r/20230616222844.eb073d650c75.I72739923ef80919889ea9b50de9e4ba4baa836ae@changeid Signed-off-by: Johannes Berg --- net/wireless/reg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 26f11e4746c0..f5ea1f373ab7 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2352,7 +2352,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) if (!wdev->valid_links && link > 0) break; - if (!(wdev->valid_links & BIT(link))) + if (wdev->valid_links && !(wdev->valid_links & BIT(link))) continue; switch (iftype) { case NL80211_IFTYPE_AP: -- cgit v1.2.3 From e8c2af660ba0790afd14d5cbc2fd05c6dc85e207 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 16 Jun 2023 22:28:45 +0200 Subject: wifi: cfg80211: fix regulatory disconnect with OCB/NAN Since regulatory disconnect was added, OCB and NAN interface types were added, which made it completely unusable for any driver that allowed OCB/NAN. Add OCB/NAN (though NAN doesn't do anything, we don't have any info) and also remove all the logic that opts out, so it won't be broken again if/when new interface types are added. Fixes: 6e0bd6c35b02 ("cfg80211: 802.11p OCB mode handling") Fixes: cb3b7d87652a ("cfg80211: add start / stop NAN commands") Signed-off-by: Johannes Berg Link: https://lore.kernel.org/r/20230616222844.2794d1625a26.I8e78a3789a29e6149447b3139df724a6f1b46fc3@changeid Signed-off-by: Johannes Berg --- include/net/regulatory.h | 13 +------------ net/wireless/core.c | 16 ---------------- net/wireless/reg.c | 14 ++++++++++---- 3 files changed, 11 insertions(+), 32 deletions(-) (limited to 'net') diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 896191f420d5..b2cb4a9eb04d 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -140,17 +140,6 @@ struct regulatory_request { * otherwise initiating radiation is not allowed. This will enable the * relaxations enabled under the CFG80211_REG_RELAX_NO_IR configuration * option - * @REGULATORY_IGNORE_STALE_KICKOFF: the regulatory core will _not_ make sure - * all interfaces on this wiphy reside on allowed channels. If this flag - * is not set, upon a regdomain change, the interfaces are given a grace - * period (currently 60 seconds) to disconnect or move to an allowed - * channel. Interfaces on forbidden channels are forcibly disconnected. - * Currently these types of interfaces are supported for enforcement: - * NL80211_IFTYPE_ADHOC, NL80211_IFTYPE_STATION, NL80211_IFTYPE_AP, - * NL80211_IFTYPE_AP_VLAN, NL80211_IFTYPE_MONITOR, - * NL80211_IFTYPE_P2P_CLIENT, NL80211_IFTYPE_P2P_GO, - * NL80211_IFTYPE_P2P_DEVICE. The flag will be set by default if a device - * includes any modes unsupported for enforcement checking. * @REGULATORY_WIPHY_SELF_MANAGED: for devices that employ wiphy-specific * regdom management. These devices will ignore all regdom changes not * originating from their own wiphy. @@ -177,7 +166,7 @@ enum ieee80211_regulatory_flags { REGULATORY_COUNTRY_IE_FOLLOW_POWER = BIT(3), REGULATORY_COUNTRY_IE_IGNORE = BIT(4), REGULATORY_ENABLE_RELAX_NO_IR = BIT(5), - REGULATORY_IGNORE_STALE_KICKOFF = BIT(6), + /* reuse bit 6 next time */ REGULATORY_WIPHY_SELF_MANAGED = BIT(7), }; diff --git a/net/wireless/core.c b/net/wireless/core.c index fc449bea39a1..25bc2e50a061 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -756,22 +756,6 @@ int wiphy_register(struct wiphy *wiphy) return -EINVAL; } - /* - * if a wiphy has unsupported modes for regulatory channel enforcement, - * opt-out of enforcement checking - */ - if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_MESH_POINT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_P2P_DEVICE) | - BIT(NL80211_IFTYPE_NAN) | - BIT(NL80211_IFTYPE_AP_VLAN) | - BIT(NL80211_IFTYPE_MONITOR))) - wiphy->regulatory_flags |= REGULATORY_IGNORE_STALE_KICKOFF; - if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) && (wiphy->regulatory_flags & (REGULATORY_CUSTOM_REG | diff --git a/net/wireless/reg.c b/net/wireless/reg.c index f5ea1f373ab7..f9e03850d71b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2391,9 +2391,17 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) case NL80211_IFTYPE_P2P_DEVICE: /* no enforcement required */ break; + case NL80211_IFTYPE_OCB: + if (!wdev->u.ocb.chandef.chan) + continue; + chandef = wdev->u.ocb.chandef; + break; + case NL80211_IFTYPE_NAN: + /* we have no info, but NAN is also pretty universal */ + continue; default: /* others not implemented for now */ - WARN_ON(1); + WARN_ON_ONCE(1); break; } @@ -2452,9 +2460,7 @@ static void reg_check_chans_work(struct work_struct *work) rtnl_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) - if (!(rdev->wiphy.regulatory_flags & - REGULATORY_IGNORE_STALE_KICKOFF)) - reg_leave_invalid_chans(&rdev->wiphy); + reg_leave_invalid_chans(&rdev->wiphy); rtnl_unlock(); } -- cgit v1.2.3 From dbd396636870b30c2c11c098bfc30e124198d29f Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Sun, 18 Jun 2023 21:49:44 +0300 Subject: wifi: mac80211: Include Multi-Link in CRC calculation Include the Multi-Link elements found in beacon frames in the CRC calculation, as these elements are intended to reflect changes in the AP MLD state. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214435.ae8246b93d85.Ia64b45198de90ff7f70abcc997841157f148ea40@changeid Signed-off-by: Johannes Berg --- net/mac80211/util.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index ef53d3baece7..6c52c597c187 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -987,6 +987,10 @@ ieee80211_parse_extension_element(u32 *crc, const struct ieee80211_multi_link_elem *mle = (void *)data; + if (crc) + *crc = crc32_be(*crc, (void *)elem, + elem->datalen + 2); + switch (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: -- cgit v1.2.3 From eeec7574ec3c03c69adc99492df74dc1cc0ebd63 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:49:46 +0300 Subject: wifi: ieee80211: add helper to validate ML element type and size The helper functions to retrieve the EML capabilities and medium synchronization delay both assume that the type is correct. Instead of assuming the length is correct and still checking the type, add a new helper to check both and don't do any verification. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214435.1b50e7a3b3cf.I9385514d8eb6d6d3c82479a6fa732ef65313e554@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 50 +++++++++++++++++++++++++++++------------------ net/mac80211/mlme.c | 3 ++- 2 files changed, 33 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 97edc3b404dd..b107f21e1233 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4639,10 +4639,10 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) * ieee80211_mle_get_eml_sync_delay - returns the medium sync delay * @data: pointer to the multi link EHT IE * - * The element is assumed to be big enough. This must be checked by - * ieee80211_mle_size_ok(). - * If the medium synchronization can't be found (the type is not basic, or - * the medium sync presence bit is clear), 0 will be returned. + * The element is assumed to be of the correct type (BASIC) and big enough, + * this must be checked using ieee80211_mle_type_ok(). + * + * If the medium synchronization is not present, then 0 is returned. */ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) { @@ -4650,13 +4650,7 @@ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) u16 control = le16_to_cpu(mle->control); const u8 *common = mle->variable; - if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) != - IEEE80211_ML_CONTROL_TYPE_BASIC) - return 0; - - /* common points now at the beginning of - * ieee80211_mle_basic_common_info - */ + /* common points now at the beginning of ieee80211_mle_basic_common_info */ common += sizeof(struct ieee80211_mle_basic_common_info); if (!(control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)) @@ -4674,10 +4668,10 @@ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) * ieee80211_mle_get_eml_cap - returns the EML capability * @data: pointer to the multi link EHT IE * - * The element is assumed to be big enough. This must be checked by - * ieee80211_mle_size_ok(). - * If the EML capability can't be found (the type is not basic, or - * the EML capability presence bit is clear), 0 will be returned. + * The element is assumed to be of the correct type (BASIC) and big enough, + * this must be checked using ieee80211_mle_type_ok(). + * + * If the EML capability is not present, 0 will be returned. */ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) { @@ -4685,10 +4679,6 @@ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) u16 control = le16_to_cpu(mle->control); const u8 *common = mle->variable; - if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) != - IEEE80211_ML_CONTROL_TYPE_BASIC) - return 0; - /* common points now at the beginning of ieee80211_mle_basic_common_info */ common += sizeof(struct ieee80211_mle_basic_common_info); @@ -4773,6 +4763,28 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) return mle->variable[0] >= common; } +/** + * ieee80211_mle_type_ok - validate multi-link element type and size + * @data: pointer to the element data + * @type: expected type of the element + * @len: length of the containing element + */ +static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len) +{ + const struct ieee80211_multi_link_elem *mle = (const void *)data; + u16 control; + + if (!ieee80211_mle_size_ok(data, len)) + return false; + + control = le16_to_cpu(mle->control); + + if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) == type) + return true; + + return false; +} + enum ieee80211_mle_subelems { IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0, IEEE80211_MLE_SUBELEM_FRAGMENT = 254, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b8f8220cd9ff..30588703ffd3 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4852,7 +4852,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, /* data + 1 / datalen - 1 since it's an extended element */ if (eht_ml_elem && - ieee80211_mle_size_ok(eht_ml_elem->data + 1, + ieee80211_mle_type_ok(eht_ml_elem->data + 1, + IEEE80211_ML_CONTROL_TYPE_BASIC, eht_ml_elem->datalen - 1)) { sdata->vif.cfg.eml_cap = ieee80211_mle_get_eml_cap(eht_ml_elem->data + 1); -- cgit v1.2.3 From 891d4d5831ee4c5e45a3ccba11577cdc6e57a726 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:49:48 +0300 Subject: wifi: cfg80211: Always ignore ML element The element should never be inherited, so always exclude it. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214435.c0e17989b4ed.I7cecb5ab7cd6919e61839b50ce5156904b41d7d8@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index dc71c6ac5bf5..095dc9db8750 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -218,6 +218,10 @@ bool cfg80211_is_element_inherited(const struct element *elem, if (elem->id == WLAN_EID_MULTIPLE_BSSID) return false; + if (elem->id == WLAN_EID_EXTENSION && elem->datalen > 1 && + elem->data[0] == WLAN_EID_EXT_EHT_MULTI_LINK) + return false; + if (!non_inherit_elem || non_inherit_elem->datalen < 2) return true; -- cgit v1.2.3 From eb142608e2c4ea0acefefb00025af523195d30d3 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:49:50 +0300 Subject: wifi: cfg80211: use a struct for inform_single_bss data The argument is getting quite large, so use a struct internally to pass around the information. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.831ab8a87b6f.I3bcc83d90f41d6f8a47b39528575dad0a9ec3564@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 209 ++++++++++++++++++++++++++++------------------------ 1 file changed, 112 insertions(+), 97 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 095dc9db8750..974a6a8240dd 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1644,12 +1644,6 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, return true; } -struct cfg80211_non_tx_bss { - struct cfg80211_bss *tx_bss; - u8 max_bssid_indicator; - u8 bssid_index; -}; - static void cfg80211_update_hidden_bsses(struct cfg80211_internal_bss *known, const struct cfg80211_bss_ies *new_ies, const struct cfg80211_bss_ies *old_ies) @@ -1977,17 +1971,30 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, return alt_channel; } +struct cfg80211_inform_single_bss_data { + struct cfg80211_inform_bss *drv_data; + enum cfg80211_bss_frame_type ftype; + u8 bssid[ETH_ALEN]; + u64 tsf; + u16 capability; + u16 beacon_interval; + const u8 *ie; + size_t ielen; + + /* Set for nontransmitted BSSIDs */ + struct cfg80211_bss *source_bss; + u8 max_bssid_indicator; + u8 bssid_index; +}; + /* Returned bss is reference counted and must be cleaned up appropriately. */ static struct cfg80211_bss * cfg80211_inform_single_bss_data(struct wiphy *wiphy, - struct cfg80211_inform_bss *data, - enum cfg80211_bss_frame_type ftype, - const u8 *bssid, u64 tsf, u16 capability, - u16 beacon_interval, const u8 *ie, size_t ielen, - struct cfg80211_non_tx_bss *non_tx_data, + struct cfg80211_inform_single_bss_data *data, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct cfg80211_inform_bss *drv_data = data->drv_data; struct cfg80211_bss_ies *ies; struct ieee80211_channel *channel; struct cfg80211_internal_bss tmp = {}, *res; @@ -1999,40 +2006,41 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, return NULL; if (WARN_ON(wiphy->signal_type == CFG80211_SIGNAL_TYPE_UNSPEC && - (data->signal < 0 || data->signal > 100))) + (drv_data->signal < 0 || drv_data->signal > 100))) return NULL; - channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan, - data->scan_width); + channel = cfg80211_get_bss_channel(wiphy, data->ie, data->ielen, + drv_data->chan, drv_data->scan_width); if (!channel) return NULL; - memcpy(tmp.pub.bssid, bssid, ETH_ALEN); + memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN); tmp.pub.channel = channel; - tmp.pub.scan_width = data->scan_width; - tmp.pub.signal = data->signal; - tmp.pub.beacon_interval = beacon_interval; - tmp.pub.capability = capability; - tmp.ts_boottime = data->boottime_ns; - tmp.parent_tsf = data->parent_tsf; - ether_addr_copy(tmp.parent_bssid, data->parent_bssid); - - if (non_tx_data) { - tmp.pub.transmitted_bss = non_tx_data->tx_bss; - ts = bss_from_pub(non_tx_data->tx_bss)->ts; - tmp.pub.bssid_index = non_tx_data->bssid_index; - tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator; + tmp.pub.scan_width = drv_data->scan_width; + tmp.pub.signal = drv_data->signal; + tmp.pub.beacon_interval = data->beacon_interval; + tmp.pub.capability = data->capability; + tmp.ts_boottime = drv_data->boottime_ns; + tmp.parent_tsf = drv_data->parent_tsf; + ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid); + + if (data->source_bss) { + tmp.pub.transmitted_bss = data->source_bss; + ts = bss_from_pub(data->source_bss)->ts; + tmp.pub.bssid_index = data->bssid_index; + tmp.pub.max_bssid_indicator = data->max_bssid_indicator; } else { ts = jiffies; if (channel->band == NL80211_BAND_60GHZ) { - bss_type = capability & WLAN_CAPABILITY_DMG_TYPE_MASK; + bss_type = data->capability & + WLAN_CAPABILITY_DMG_TYPE_MASK; if (bss_type == WLAN_CAPABILITY_DMG_TYPE_AP || bss_type == WLAN_CAPABILITY_DMG_TYPE_PBSS) regulatory_hint_found_beacon(wiphy, channel, gfp); } else { - if (capability & WLAN_CAPABILITY_ESS) + if (data->capability & WLAN_CAPABILITY_ESS) regulatory_hint_found_beacon(wiphy, channel, gfp); } @@ -2046,15 +2054,15 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, * override the IEs pointer should we have received an earlier * indication of Probe Response data. */ - ies = kzalloc(sizeof(*ies) + ielen, gfp); + ies = kzalloc(sizeof(*ies) + data->ielen, gfp); if (!ies) return NULL; - ies->len = ielen; - ies->tsf = tsf; + ies->len = data->ielen; + ies->tsf = data->tsf; ies->from_beacon = false; - memcpy(ies->data, ie, ielen); + memcpy(ies->data, data->ie, data->ielen); - switch (ftype) { + switch (data->ftype) { case CFG80211_BSS_FTYPE_BEACON: ies->from_beacon = true; fallthrough; @@ -2067,7 +2075,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, } rcu_assign_pointer(tmp.pub.ies, ies); - signal_valid = data->chan == channel; + signal_valid = drv_data->chan == channel; spin_lock_bh(&rdev->bss_lock); res = __cfg80211_bss_update(rdev, &tmp, signal_valid, ts); if (!res) @@ -2075,12 +2083,11 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, rdev_inform_bss(rdev, &res->pub, ies, data->drv_data); - if (non_tx_data) { + if (data->source_bss) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ - if (cfg80211_add_nontrans_list(non_tx_data->tx_bss, - &res->pub)) { + if (cfg80211_add_nontrans_list(data->source_bss, &res->pub)) { if (__cfg80211_unlink_bss(rdev, res)) { rdev->bss_generation++; res = NULL; @@ -2173,43 +2180,47 @@ size_t cfg80211_merge_profile(const u8 *ie, size_t ielen, } EXPORT_SYMBOL(cfg80211_merge_profile); -static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, - struct cfg80211_inform_bss *data, - enum cfg80211_bss_frame_type ftype, - const u8 *bssid, u64 tsf, - u16 beacon_interval, const u8 *ie, - size_t ielen, - struct cfg80211_non_tx_bss *non_tx_data, - gfp_t gfp) -{ +static void +cfg80211_parse_mbssid_data(struct wiphy *wiphy, + struct cfg80211_inform_single_bss_data *tx_data, + struct cfg80211_bss *source_bss, + gfp_t gfp) +{ + struct cfg80211_inform_single_bss_data data = { + .drv_data = tx_data->drv_data, + .ftype = tx_data->ftype, + .tsf = tx_data->tsf, + .beacon_interval = tx_data->beacon_interval, + .source_bss = source_bss, + }; const u8 *mbssid_index_ie; const struct element *elem, *sub; - size_t new_ie_len; - u8 new_bssid[ETH_ALEN]; u8 *new_ie, *profile; u64 seen_indices = 0; - u16 capability; struct cfg80211_bss *bss; - if (!non_tx_data) + if (!source_bss) return; - if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, + tx_data->ie, tx_data->ielen)) return; if (!wiphy->support_mbssid) return; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, + tx_data->ie, tx_data->ielen)) return; new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); if (!new_ie) return; - profile = kmalloc(ielen, gfp); + profile = kmalloc(tx_data->ielen, gfp); if (!profile) goto out; - for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) { + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, + tx_data->ie, tx_data->ielen) { if (elem->datalen < 4) continue; if (elem->data[0] < 1 || (int)elem->data[0] > 8) @@ -2231,12 +2242,13 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, continue; } - memset(profile, 0, ielen); - profile_len = cfg80211_merge_profile(ie, ielen, + memset(profile, 0, tx_data->ielen); + profile_len = cfg80211_merge_profile(tx_data->ie, + tx_data->ielen, elem, sub, profile, - ielen); + tx_data->ielen); /* found a Nontransmitted BSSID Profile */ mbssid_index_ie = cfg80211_find_ie @@ -2256,31 +2268,27 @@ static void cfg80211_parse_mbssid_data(struct wiphy *wiphy, seen_indices |= BIT_ULL(mbssid_index_ie[2]); - non_tx_data->bssid_index = mbssid_index_ie[2]; - non_tx_data->max_bssid_indicator = elem->data[0]; + data.bssid_index = mbssid_index_ie[2]; + data.max_bssid_indicator = elem->data[0]; + + cfg80211_gen_new_bssid(tx_data->bssid, + data.max_bssid_indicator, + data.bssid_index, + data.bssid); - cfg80211_gen_new_bssid(bssid, - non_tx_data->max_bssid_indicator, - non_tx_data->bssid_index, - new_bssid); memset(new_ie, 0, IEEE80211_MAX_DATA_LEN); - new_ie_len = cfg80211_gen_new_ie(ie, ielen, + data.ie = new_ie; + data.ielen = cfg80211_gen_new_ie(tx_data->ie, + tx_data->ielen, profile, - profile_len, new_ie, + profile_len, + new_ie, IEEE80211_MAX_DATA_LEN); - if (!new_ie_len) + if (!data.ielen) continue; - capability = get_unaligned_le16(profile + 2); - bss = cfg80211_inform_single_bss_data(wiphy, data, - ftype, - new_bssid, tsf, - capability, - beacon_interval, - new_ie, - new_ie_len, - non_tx_data, - gfp); + data.capability = get_unaligned_le16(profile + 2); + bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp); if (!bss) break; cfg80211_put_bss(wiphy, bss); @@ -2360,18 +2368,24 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, u16 beacon_interval, const u8 *ie, size_t ielen, gfp_t gfp) { + struct cfg80211_inform_single_bss_data inform_data = { + .drv_data = data, + .ftype = ftype, + .tsf = tsf, + .capability = capability, + .beacon_interval = beacon_interval, + .ie = ie, + .ielen = ielen, + }; struct cfg80211_bss *res; - struct cfg80211_non_tx_bss non_tx_data; - res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf, - capability, beacon_interval, ie, - ielen, NULL, gfp); + memcpy(inform_data.bssid, bssid, ETH_ALEN); + + res = cfg80211_inform_single_bss_data(wiphy, &inform_data, gfp); if (!res) return NULL; - non_tx_data.tx_bss = res; - cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf, - beacon_interval, ie, ielen, &non_tx_data, - gfp); + + cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp); return res; } EXPORT_SYMBOL(cfg80211_inform_bss_data); @@ -2517,12 +2531,13 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, struct ieee80211_mgmt *mgmt, size_t len, gfp_t gfp) { + struct cfg80211_inform_single_bss_data inform_data = { + .drv_data = data, + .ie = mgmt->u.probe_resp.variable, + .ielen = len - offsetof(struct ieee80211_mgmt, + u.probe_resp.variable), + }; struct cfg80211_bss *res; - const u8 *ie = mgmt->u.probe_resp.variable; - size_t ielen = len - offsetof(struct ieee80211_mgmt, - u.probe_resp.variable); - enum cfg80211_bss_frame_type ftype; - struct cfg80211_non_tx_bss non_tx_data = {}; res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); @@ -2533,15 +2548,15 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, if (ieee80211_is_s1g_beacon(mgmt->frame_control)) return res; - ftype = ieee80211_is_beacon(mgmt->frame_control) ? + inform_data.ftype = ieee80211_is_beacon(mgmt->frame_control) ? CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP; - non_tx_data.tx_bss = res; + memcpy(inform_data.bssid, mgmt->bssid, ETH_ALEN); + inform_data.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp); + inform_data.beacon_interval = + le16_to_cpu(mgmt->u.probe_resp.beacon_int); /* process each non-transmitting bss */ - cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid, - le64_to_cpu(mgmt->u.probe_resp.timestamp), - le16_to_cpu(mgmt->u.probe_resp.beacon_int), - ie, ielen, &non_tx_data, gfp); + cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp); return res; } -- cgit v1.2.3 From dc92e54c30c4bc9d30e674a445dfe1afdca991cf Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:49:52 +0300 Subject: wifi: cfg80211: use structs for TBTT information access Make the data access a bit nicer overall by using structs. There is a small change here to also accept a TBTT information length of eight bytes as we do not require the 20 MHz PSD information. This also fixes a bug reading the short SSID on big endian machines. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.4c3f8901c1bc.Ic3e94fd6e1bccff7948a252ad3bb87e322690a17@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 3 --- net/wireless/scan.c | 61 +++++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 5a27c232afdb..e145af7448a3 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4483,9 +4483,6 @@ static inline bool for_each_element_completed(const struct element *element, #define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 #define IEEE80211_TBTT_INFO_TYPE_TBTT 0 #define IEEE80211_TBTT_INFO_TYPE_MLD 1 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 9 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 13 -#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM_MLD_PARAM 16 #define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01 #define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02 diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 974a6a8240dd..f0b4d7671d17 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -574,39 +574,41 @@ static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list) static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, const u8 *pos, u8 length, const struct element *ssid_elem, - int s_ssid_tmp) + u32 s_ssid_tmp) { - /* skip the TBTT offset */ - pos++; + u8 bss_params; - /* ignore entries with invalid BSSID */ - if (!is_valid_ether_addr(pos)) - return -EINVAL; - - memcpy(entry->bssid, pos, ETH_ALEN); - pos += ETH_ALEN; + /* The length is already verified by the caller to contain bss_params */ + if (length > sizeof(struct ieee80211_tbtt_info_7_8_9)) { + struct ieee80211_tbtt_info_ge_11 *tbtt_info = (void *)pos; - if (length >= IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM) { - memcpy(&entry->short_ssid, pos, - sizeof(entry->short_ssid)); + memcpy(entry->bssid, tbtt_info->bssid, ETH_ALEN); + entry->short_ssid = le32_to_cpu(tbtt_info->short_ssid); entry->short_ssid_valid = true; - pos += 4; + + bss_params = tbtt_info->bss_params; + } else { + struct ieee80211_tbtt_info_7_8_9 *tbtt_info = (void *)pos; + + memcpy(entry->bssid, tbtt_info->bssid, ETH_ALEN); + + bss_params = tbtt_info->bss_params; } + /* ignore entries with invalid BSSID */ + if (!is_valid_ether_addr(entry->bssid)) + return -EINVAL; + /* skip non colocated APs */ - if (!cfg80211_parse_bss_param(*pos, entry)) + if (!cfg80211_parse_bss_param(bss_params, entry)) return -EINVAL; - pos++; - if (length == IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM) { - /* - * no information about the short ssid. Consider the entry valid - * for now. It would later be dropped in case there are explicit - * SSIDs that need to be matched - */ - if (!entry->same_ssid) - return 0; - } + /* no information about the short ssid. Consider the entry valid + * for now. It would later be dropped in case there are explicit + * SSIDs that need to be matched + */ + if (!entry->same_ssid && !entry->short_ssid_valid) + return 0; if (entry->same_ssid) { entry->short_ssid = s_ssid_tmp; @@ -617,10 +619,10 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, * cfg80211_parse_colocated_ap(), before calling this * function. */ - memcpy(&entry->ssid, &ssid_elem->data, - ssid_elem->datalen); + memcpy(&entry->ssid, &ssid_elem->data, ssid_elem->datalen); entry->ssid_len = ssid_elem->datalen; } + return 0; } @@ -682,8 +684,11 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, * next AP info */ if (band != NL80211_BAND_6GHZ || - (length != IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM && - length < IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM)) { + !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9, + bss_params) || + length == sizeof(struct ieee80211_tbtt_info_7_8_9) || + length >= offsetofend(struct ieee80211_tbtt_info_ge_11, + bss_params))) { pos += count * length; continue; } -- cgit v1.2.3 From 2481b5da9c6b2ee1fde55a1c29eb2ca377145a10 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:49:53 +0300 Subject: wifi: cfg80211: handle BSS data contained in ML probe responses The basic multi-link element within an multi-link probe response will contain full information about BSSes that are part of an MLD AP. This BSS information may be used to associate with a link of an MLD AP without having received a beacon from the BSS itself. This patch adds parsing of the data and adding/updating the BSS using the received elements. Doing this means that userspace can discover the BSSes using an ML probe request and request association on these links. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.29593bd0ae1f.Ic9a67b8f022360aa202b870a932897a389171b14@changeid [swap loop conditions smatch complained about] Signed-off-by: Johannes Berg --- net/wireless/scan.c | 361 +++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 354 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index f0b4d7671d17..2e7787b0828b 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1979,6 +1979,7 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, struct cfg80211_inform_single_bss_data { struct cfg80211_inform_bss *drv_data; enum cfg80211_bss_frame_type ftype; + struct ieee80211_channel *channel; u8 bssid[ETH_ALEN]; u64 tsf; u16 capability; @@ -1986,7 +1987,12 @@ struct cfg80211_inform_single_bss_data { const u8 *ie; size_t ielen; - /* Set for nontransmitted BSSIDs */ + enum { + BSS_SOURCE_DIRECT = 0, + BSS_SOURCE_MBSSID, + BSS_SOURCE_STA_PROFILE, + } bss_source; + /* Set if reporting bss_source != BSS_SOURCE_DIRECT */ struct cfg80211_bss *source_bss; u8 max_bssid_indicator; u8 bssid_index; @@ -2014,22 +2020,31 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, (drv_data->signal < 0 || drv_data->signal > 100))) return NULL; - channel = cfg80211_get_bss_channel(wiphy, data->ie, data->ielen, - drv_data->chan, drv_data->scan_width); + if (WARN_ON(data->bss_source != BSS_SOURCE_DIRECT && !data->source_bss)) + return NULL; + + channel = data->channel; + if (!channel) + channel = cfg80211_get_bss_channel(wiphy, data->ie, data->ielen, + drv_data->chan, + drv_data->scan_width); if (!channel) return NULL; memcpy(tmp.pub.bssid, data->bssid, ETH_ALEN); tmp.pub.channel = channel; tmp.pub.scan_width = drv_data->scan_width; - tmp.pub.signal = drv_data->signal; + if (data->bss_source != BSS_SOURCE_STA_PROFILE) + tmp.pub.signal = drv_data->signal; + else + tmp.pub.signal = 0; tmp.pub.beacon_interval = data->beacon_interval; tmp.pub.capability = data->capability; tmp.ts_boottime = drv_data->boottime_ns; tmp.parent_tsf = drv_data->parent_tsf; ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid); - if (data->source_bss) { + if (data->bss_source != BSS_SOURCE_DIRECT) { tmp.pub.transmitted_bss = data->source_bss; ts = bss_from_pub(data->source_bss)->ts; tmp.pub.bssid_index = data->bssid_index; @@ -2088,7 +2103,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, rdev_inform_bss(rdev, &res->pub, ies, data->drv_data); - if (data->source_bss) { + if (data->bss_source == BSS_SOURCE_MBSSID) { /* this is a nontransmitting bss, we need to add it to * transmitting bss' list if it is not there */ @@ -2197,6 +2212,7 @@ cfg80211_parse_mbssid_data(struct wiphy *wiphy, .tsf = tx_data->tsf, .beacon_interval = tx_data->beacon_interval, .source_bss = source_bss, + .bss_source = BSS_SOURCE_MBSSID, }; const u8 *mbssid_index_ie; const struct element *elem, *sub; @@ -2365,6 +2381,332 @@ ssize_t cfg80211_defragment_element(const struct element *elem, const u8 *ies, } EXPORT_SYMBOL(cfg80211_defragment_element); +struct cfg80211_mle { + struct ieee80211_multi_link_elem *mle; + struct ieee80211_mle_per_sta_profile + *sta_prof[IEEE80211_MLD_MAX_NUM_LINKS]; + ssize_t sta_prof_len[IEEE80211_MLD_MAX_NUM_LINKS]; + + u8 data[]; +}; + +static struct cfg80211_mle * +cfg80211_defrag_mle(const struct element *mle, const u8 *ie, size_t ielen, + gfp_t gfp) +{ + const struct element *elem; + struct cfg80211_mle *res; + size_t buf_len; + ssize_t mle_len; + u8 common_size, idx; + + if (!mle || !ieee80211_mle_size_ok(mle->data + 1, mle->datalen - 1)) + return NULL; + + /* Required length for first defragmentation */ + buf_len = mle->datalen - 1; + for_each_element(elem, mle->data + mle->datalen, + ielen - sizeof(*mle) + mle->datalen) { + if (elem->id != WLAN_EID_FRAGMENT) + break; + + buf_len += elem->datalen; + } + + res = kzalloc(struct_size(res, data, buf_len), gfp); + if (!res) + return NULL; + + mle_len = cfg80211_defragment_element(mle, ie, ielen, + res->data, buf_len, + WLAN_EID_FRAGMENT); + if (mle_len < 0) + goto error; + + res->mle = (void *)res->data; + + /* Find the sub-element area in the buffer */ + common_size = ieee80211_mle_common_size((u8 *)res->mle); + ie = res->data + common_size; + ielen = mle_len - common_size; + + idx = 0; + for_each_element_id(elem, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE, + ie, ielen) { + res->sta_prof[idx] = (void *)elem->data; + res->sta_prof_len[idx] = elem->datalen; + + idx++; + if (idx >= IEEE80211_MLD_MAX_NUM_LINKS) + break; + } + if (!for_each_element_completed(elem, ie, ielen)) + goto error; + + /* Defragment sta_info in-place */ + for (idx = 0; idx < IEEE80211_MLD_MAX_NUM_LINKS && res->sta_prof[idx]; + idx++) { + if (res->sta_prof_len[idx] < 255) + continue; + + elem = (void *)res->sta_prof[idx] - 2; + + if (idx + 1 < ARRAY_SIZE(res->sta_prof) && + res->sta_prof[idx + 1]) + buf_len = (u8 *)res->sta_prof[idx + 1] - + (u8 *)res->sta_prof[idx]; + else + buf_len = ielen + ie - (u8 *)elem; + + res->sta_prof_len[idx] = + cfg80211_defragment_element(elem, + (u8 *)elem, buf_len, + (u8 *)res->sta_prof[idx], + buf_len, + IEEE80211_MLE_SUBELEM_FRAGMENT); + if (res->sta_prof_len[idx] < 0) + goto error; + } + + return res; + +error: + kfree(res); + return NULL; +} + +static bool +cfg80211_tbtt_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id, + const struct ieee80211_neighbor_ap_info **ap_info, + const u8 **tbtt_info) +{ + const struct ieee80211_neighbor_ap_info *info; + const struct element *rnr; + const u8 *pos, *end; + + for_each_element_id(rnr, WLAN_EID_REDUCED_NEIGHBOR_REPORT, ie, ielen) { + pos = rnr->data; + end = rnr->data + rnr->datalen; + + /* RNR IE may contain more than one NEIGHBOR_AP_INFO */ + while (sizeof(*info) <= end - pos) { + const struct ieee80211_rnr_mld_params *mld_params; + u16 params; + u8 length, i, count, mld_params_offset; + u8 type, lid; + + info = (void *)pos; + count = u8_get_bits(info->tbtt_info_hdr, + IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1; + length = info->tbtt_info_len; + + pos += sizeof(*info); + + if (count * length > end - pos) + return false; + + type = u8_get_bits(info->tbtt_info_hdr, + IEEE80211_AP_INFO_TBTT_HDR_TYPE); + + /* Only accept full TBTT information. NSTR mobile APs + * use the shortened version, but we ignore them here. + */ + if (type == IEEE80211_TBTT_INFO_TYPE_TBTT && + length >= + offsetofend(struct ieee80211_tbtt_info_ge_11, + mld_params)) { + mld_params_offset = + offsetof(struct ieee80211_tbtt_info_ge_11, mld_params); + } else { + pos += count * length; + continue; + } + + for (i = 0; i < count; i++) { + mld_params = (void *)pos + mld_params_offset; + params = le16_to_cpu(mld_params->params); + + lid = u16_get_bits(params, + IEEE80211_RNR_MLD_PARAMS_LINK_ID); + + if (mld_id == mld_params->mld_id && + link_id == lid) { + *ap_info = info; + *tbtt_info = pos; + + return true; + } + + pos += length; + } + } + } + + return false; +} + +static void cfg80211_parse_ml_sta_data(struct wiphy *wiphy, + struct cfg80211_inform_single_bss_data *tx_data, + struct cfg80211_bss *source_bss, + gfp_t gfp) +{ + struct cfg80211_inform_single_bss_data data = { + .drv_data = tx_data->drv_data, + .ftype = tx_data->ftype, + .source_bss = source_bss, + .bss_source = BSS_SOURCE_STA_PROFILE, + }; + struct ieee80211_multi_link_elem *ml_elem; + const struct element *elem; + struct cfg80211_mle *mle; + u16 control; + u8 *new_ie; + struct cfg80211_bss *bss; + int mld_id; + u16 seen_links = 0; + const u8 *pos; + u8 i; + + if (!source_bss) + return; + + if (tx_data->ftype != CFG80211_BSS_FTYPE_PRESP) + return; + + elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK, + tx_data->ie, tx_data->ielen); + if (!elem || !ieee80211_mle_size_ok(elem->data + 1, elem->datalen - 1)) + return; + + ml_elem = (void *)elem->data + 1; + control = le16_to_cpu(ml_elem->control); + if (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) + return; + + /* Must be present when transmitted by an AP (in a probe response) */ + if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) || + !(control & IEEE80211_MLC_BASIC_PRES_LINK_ID) || + !(control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)) + return; + + /* length + MLD MAC address + link ID info + BSS Params Change Count */ + pos = ml_elem->variable + 1 + 6 + 1 + 1; + + if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)) + pos += 2; + if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_EML_CAPA)) + pos += 2; + + /* MLD capabilities and operations */ + pos += 2; + + /* Not included when the (nontransmitted) AP is responding itself, + * but defined to zero then (Draft P802.11be_D3.0, 9.4.2.170.2) + */ + if (u16_get_bits(control, IEEE80211_MLC_BASIC_PRES_MLD_ID)) { + mld_id = *pos; + pos += 1; + } else { + mld_id = 0; + } + + /* Extended MLD capabilities and operations */ + pos += 2; + + /* Fully defrag the ML element for sta information/profile iteration */ + mle = cfg80211_defrag_mle(elem, tx_data->ie, tx_data->ielen, gfp); + if (!mle) + return; + + new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); + if (!new_ie) + goto out; + + for (i = 0; i < ARRAY_SIZE(mle->sta_prof) && mle->sta_prof[i]; i++) { + const struct ieee80211_neighbor_ap_info *ap_info; + enum nl80211_band band; + u32 freq; + const u8 *profile; + const u8 *tbtt_info; + ssize_t profile_len; + u8 link_id; + + if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i], + mle->sta_prof_len[i])) + continue; + + control = le16_to_cpu(mle->sta_prof[i]->control); + + if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE)) + continue; + + link_id = u16_get_bits(control, + IEEE80211_MLE_STA_CONTROL_LINK_ID); + if (seen_links & BIT(link_id)) + break; + seen_links |= BIT(link_id); + + if (!(control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT) || + !(control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT) || + !(control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT)) + continue; + + memcpy(data.bssid, mle->sta_prof[i]->variable, ETH_ALEN); + data.beacon_interval = + get_unaligned_le16(mle->sta_prof[i]->variable + 6); + data.tsf = tx_data->tsf + + get_unaligned_le64(mle->sta_prof[i]->variable + 8); + + /* sta_info_len counts itself */ + profile = mle->sta_prof[i]->variable + + mle->sta_prof[i]->sta_info_len - 1; + profile_len = (u8 *)mle->sta_prof[i] + mle->sta_prof_len[i] - + profile; + + if (profile_len < 2) + continue; + + data.capability = get_unaligned_le16(profile); + profile += 2; + profile_len -= 2; + + /* Find in RNR to look up channel information */ + if (!cfg80211_tbtt_info_for_mld_ap(tx_data->ie, tx_data->ielen, + mld_id, link_id, + &ap_info, &tbtt_info)) + continue; + + /* We could sanity check the BSSID is included */ + + if (!ieee80211_operating_class_to_band(ap_info->op_class, + &band)) + continue; + + freq = ieee80211_channel_to_freq_khz(ap_info->channel, band); + data.channel = ieee80211_get_channel_khz(wiphy, freq); + + /* Generate new elements */ + memset(new_ie, 0, IEEE80211_MAX_DATA_LEN); + data.ie = new_ie; + data.ielen = cfg80211_gen_new_ie(tx_data->ie, tx_data->ielen, + profile, profile_len, + new_ie, + IEEE80211_MAX_DATA_LEN); + if (!data.ielen) + continue; + + bss = cfg80211_inform_single_bss_data(wiphy, &data, gfp); + if (!bss) + break; + cfg80211_put_bss(wiphy, bss); + } + +out: + kfree(new_ie); + kfree(mle); +} + struct cfg80211_bss * cfg80211_inform_bss_data(struct wiphy *wiphy, struct cfg80211_inform_bss *data, @@ -2391,6 +2733,9 @@ cfg80211_inform_bss_data(struct wiphy *wiphy, return NULL; cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp); + + cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp); + return res; } EXPORT_SYMBOL(cfg80211_inform_bss_data); @@ -2549,7 +2894,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, if (!res) return NULL; - /* don't do any further MBSSID handling for S1G */ + /* don't do any further MBSSID/ML handling for S1G */ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) return res; @@ -2563,6 +2908,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, /* process each non-transmitting bss */ cfg80211_parse_mbssid_data(wiphy, &inform_data, res, gfp); + cfg80211_parse_ml_sta_data(wiphy, &inform_data, res, gfp); + return res; } EXPORT_SYMBOL(cfg80211_inform_bss_frame_data); -- cgit v1.2.3 From a0ed50112b98fa8e9bc85dbeafc82fd97ee06716 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:49:54 +0300 Subject: wifi: cfg80211: do not scan disabled links on 6GHz If a link is disabled on 6GHz, we should not send a probe request on the channel to resolve it. Simply skip such RNR entries so that the link is ignored. Userspace can still see the link in the RNR and may generate an ML probe request in order to associate to the (currently) disabled link. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.4f7384006471.Iff8f1081e76a298bd25f9468abb3a586372cddaa@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 2e7787b0828b..df868662e1e0 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -587,6 +587,13 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, entry->short_ssid_valid = true; bss_params = tbtt_info->bss_params; + + /* Ignore disabled links */ + if (length >= offsetofend(typeof(*tbtt_info), mld_params)) { + if (le16_get_bits(tbtt_info->mld_params.params, + IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK)) + return -EINVAL; + } } else { struct ieee80211_tbtt_info_7_8_9 *tbtt_info = (void *)pos; -- cgit v1.2.3 From 065563b20a664a6575dc158688dfb0e121c25b38 Mon Sep 17 00:00:00 2001 From: Veerendranath Jakkam Date: Fri, 17 Mar 2023 19:51:53 +0530 Subject: wifi: cfg80211/nl80211: Add support to indicate STA MLD setup links removal STA MLD setup links may get removed if AP MLD remove the corresponding affiliated APs with Multi-Link reconfiguration as described in P802.11be_D3.0, section 35.3.6.2.2 Removing affiliated APs. Currently, there is no support to notify such operation to cfg80211 and userspace. Add support for the drivers to indicate STA MLD setup links removal to cfg80211 and notify the same to userspace. Upon receiving such indication from the driver, clear the MLO links information of the removed links in the WDEV. Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/20230317142153.237900-1-quic_vjakkam@quicinc.com [rename function and attribute, fix kernel-doc] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 13 ++++++++ include/uapi/linux/nl80211.h | 7 +++++ net/wireless/core.h | 1 + net/wireless/nl80211.c | 70 ++++++++++++++++++++++++++++++++++++++++++++ net/wireless/sme.c | 15 ++++++++++ net/wireless/trace.h | 15 ++++++++++ 6 files changed, 121 insertions(+) (limited to 'net') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9972de114d73..3a736f9286b0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -9205,4 +9205,17 @@ static inline int cfg80211_color_change_notify(struct net_device *dev) bool cfg80211_valid_disable_subchannel_bitmap(u16 *bitmap, const struct cfg80211_chan_def *chandef); +/** + * cfg80211_links_removed - Notify about removed STA MLD setup links. + * @dev: network device. + * @link_mask: BIT mask of removed STA MLD setup link IDs. + * + * Inform cfg80211 and the userspace about removed STA MLD setup links due to + * AP MLD removing the corresponding affiliated APs with Multi-Link + * reconfiguration. Note that it's not valid to remove all links, in this + * case disconnect instead. + * Also note that the wdev mutex must be held. + */ +void cfg80211_links_removed(struct net_device *dev, u16 link_mask); + #endif /* __NET_CFG80211_H */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 03939bdb0e48..3190d34269ef 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1309,6 +1309,11 @@ * The number of peers that HW timestamping can be enabled for concurrently * is indicated by %NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS. * + * @NL80211_CMD_LINKS_REMOVED: Notify userspace about the removal of STA MLD + * setup links due to AP MLD removing the corresponding affiliated APs with + * Multi-Link reconfiguration. %NL80211_ATTR_MLO_LINKS is used to provide + * information about the removed STA MLD setup links. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1562,6 +1567,8 @@ enum nl80211_commands { NL80211_CMD_SET_HW_TIMESTAMP, + NL80211_CMD_LINKS_REMOVED, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ diff --git a/net/wireless/core.h b/net/wireless/core.h index 291c6d83d56f..8a807b609ef7 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -576,5 +576,6 @@ void cfg80211_remove_link(struct wireless_dev *wdev, unsigned int link_id); void cfg80211_remove_links(struct wireless_dev *wdev); int cfg80211_remove_virtual_intf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); +void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask); #endif /* __NET_WIRELESS_CORE_H */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 7b547aeb52f1..0da2e6a2a7ea 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -18288,6 +18288,76 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, nlmsg_free(msg); } +void cfg80211_links_removed(struct net_device *dev, u16 link_mask) +{ + struct wireless_dev *wdev = dev->ieee80211_ptr; + struct wiphy *wiphy = wdev->wiphy; + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + struct sk_buff *msg; + struct nlattr *links; + void *hdr; + + ASSERT_WDEV_LOCK(wdev); + trace_cfg80211_links_removed(dev, link_mask); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && + wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)) + return; + + if (WARN_ON(!wdev->valid_links || !link_mask || + (wdev->valid_links & link_mask) != link_mask || + wdev->valid_links == link_mask)) + return; + + cfg80211_wdev_release_link_bsses(wdev, link_mask); + wdev->valid_links &= ~link_mask; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_LINKS_REMOVED); + if (!hdr) { + nlmsg_free(msg); + return; + } + + if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || + nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + links = nla_nest_start(msg, NL80211_ATTR_MLO_LINKS); + if (!links) + goto nla_put_failure; + + while (link_mask) { + struct nlattr *link; + int link_id = __ffs(link_mask); + + link = nla_nest_start(msg, link_id + 1); + if (!link) + goto nla_put_failure; + + if (nla_put_u8(msg, NL80211_ATTR_MLO_LINK_ID, link_id)) + goto nla_put_failure; + + nla_nest_end(msg, link); + link_mask &= ~(1 << link_id); + } + + nla_nest_end(msg, links); + + genlmsg_end(msg, hdr); + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_MLME, GFP_KERNEL); + return; + + nla_put_failure: + nlmsg_free(msg); +} +EXPORT_SYMBOL(cfg80211_links_removed); + void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp) diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 247369004aaa..9bba233b5a6e 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -491,6 +491,21 @@ static void cfg80211_wdev_release_bsses(struct wireless_dev *wdev) } } +void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask) +{ + unsigned int link; + + for_each_valid_link(wdev, link) { + if (!wdev->links[link].client.current_bss || + !(link_mask & BIT(link))) + continue; + cfg80211_unhold_bss(wdev->links[link].client.current_bss); + cfg80211_put_bss(wdev->wiphy, + &wdev->links[link].client.current_bss->pub); + wdev->links[link].client.current_bss = NULL; + } +} + static int cfg80211_sme_get_conn_ies(struct wireless_dev *wdev, const u8 *ies, size_t ies_len, const u8 **out_ies, size_t *out_ies_len) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index e63990b81249..617c0d0dfa96 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -3966,6 +3966,21 @@ TRACE_EVENT(rdev_set_hw_timestamp, __entry->enable) ); +TRACE_EVENT(cfg80211_links_removed, + TP_PROTO(struct net_device *netdev, u16 link_mask), + TP_ARGS(netdev, link_mask), + TP_STRUCT__entry( + NETDEV_ENTRY + __field(u16, link_mask) + ), + TP_fast_assign( + NETDEV_ASSIGN; + __entry->link_mask = link_mask; + ), + TP_printk(NETDEV_PR_FMT ", link_mask:%u", NETDEV_PR_ARG, + __entry->link_mask) +); + #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH -- cgit v1.2.3 From ff32b4506f3ef2228aab601f6d4b37840d05ffaf Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Tue, 6 Jun 2023 17:43:26 +0200 Subject: wifi: mac80211: add ___ieee80211_disconnect variant not locking sdata There are cases where keeping sdata locked for an operation. Add a variant that does not take sdata lock to permit these usecases. Signed-off-by: Benjamin Berg Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 30588703ffd3..5c881d14b1a2 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3353,18 +3353,15 @@ static void ieee80211_report_disconnect(struct ieee80211_sub_if_data *sdata, drv_event_callback(sdata->local, sdata, &event); } -static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) +static void ___ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; bool tx; - sdata_lock(sdata); - if (!ifmgd->associated) { - sdata_unlock(sdata); + if (!ifmgd->associated) return; - } /* in MLO assume we have a link where we can TX the frame */ tx = ieee80211_vif_is_mld(&sdata->vif) || @@ -3413,7 +3410,12 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, ifmgd->reconnect); ifmgd->reconnect = false; +} +static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) +{ + sdata_lock(sdata); + ___ieee80211_disconnect(sdata); sdata_unlock(sdata); } -- cgit v1.2.3 From 79973d5cfdc15134d08036796a372d7ec8a1d51e Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Thu, 11 May 2023 13:13:21 +0200 Subject: wifi: mac80211: add set_active_links variant not locking sdata There are cases where keeping sdata locked for an operation. Add a variant that does not take sdata lock to permit these usecases. Signed-off-by: Benjamin Berg Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 1 + net/mac80211/link.c | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index be3294719cb4..49461350f909 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2034,6 +2034,7 @@ void ieee80211_link_stop(struct ieee80211_link_data *link); int ieee80211_vif_set_links(struct ieee80211_sub_if_data *sdata, u16 new_links, u16 dormant_links); void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata); +int __ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links); /* tx handling */ void ieee80211_clear_tx_pending(struct ieee80211_local *local); diff --git a/net/mac80211/link.c b/net/mac80211/link.c index d090ecc41ea2..6148208b320e 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -447,14 +447,14 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, return 0; } -int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) +int __ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); struct ieee80211_local *local = sdata->local; u16 old_active; int ret; - sdata_lock(sdata); + sdata_assert_lock(sdata); mutex_lock(&local->sta_mtx); mutex_lock(&local->mtx); mutex_lock(&local->key_mtx); @@ -476,6 +476,17 @@ int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) mutex_unlock(&local->key_mtx); mutex_unlock(&local->mtx); mutex_unlock(&local->sta_mtx); + + return ret; +} + +int ieee80211_set_active_links(struct ieee80211_vif *vif, u16 active_links) +{ + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + int ret; + + sdata_lock(sdata); + ret = __ieee80211_set_active_links(vif, active_links); sdata_unlock(sdata); return ret; -- cgit v1.2.3 From 8eb8dd2ffbbb6b0b8843b66754ee9f129f1b2d6c Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Sun, 18 Jun 2023 21:49:55 +0300 Subject: wifi: mac80211: Support link removal using Reconfiguration ML element Add support for handling link removal indicated by the Reconfiguration Multi-Link element. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.d8a046dc0c1a.I4dcf794da2a2d9f4e5f63a4b32158075d27c0660@changeid [use cfg80211_links_removed() API instead] Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 33 +++++++++ net/mac80211/ieee80211_i.h | 3 + net/mac80211/mlme.c | 169 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 205 insertions(+) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index e145af7448a3..98223b665456 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4891,6 +4891,39 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, fixed + prof->sta_info_len <= len; } +#define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f +#define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010 +#define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020 +#define IEEE80211_MLE_STA_RECONF_CONTROL_DELETE_TIMER_PRESENT 0x0040 + +/** + * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link + * element sta profile size. + * @data: pointer to the sub element data + * @len: length of the containing sub element + */ +static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, + size_t len) +{ + const struct ieee80211_mle_per_sta_profile *prof = (const void *)data; + u16 control; + u8 fixed = sizeof(*prof); + u8 info_len = 1; + + if (len < fixed) + return false; + + control = le16_to_cpu(prof->control); + + if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT) + info_len += ETH_ALEN; + if (control & IEEE80211_MLE_STA_RECONF_CONTROL_DELETE_TIMER_PRESENT) + info_len += 2; + + return prof->sta_info_len >= info_len && + fixed + prof->sta_info_len - 1 <= len; +} + #define for_each_mle_subelement(_elem, _data, _len) \ if (ieee80211_mle_size_ok(_data, _len)) \ for_each_element(_elem, \ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 49461350f909..2f7665998da0 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -551,6 +551,9 @@ struct ieee80211_if_managed { */ u8 *assoc_req_ies; size_t assoc_req_ies_len; + + struct wiphy_delayed_work ml_reconf_work; + u16 removed_links; }; struct ieee80211_if_ibss { diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 5c881d14b1a2..b60f99cf1be0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5608,6 +5608,169 @@ static bool ieee80211_config_puncturing(struct ieee80211_link_data *link, return true; } +static void ieee80211_ml_reconf_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.ml_reconf_work.work); + u16 new_valid_links, new_active_links, new_dormant_links; + int ret; + + sdata_lock(sdata); + if (!sdata->u.mgd.removed_links) { + sdata_unlock(sdata); + return; + } + + sdata_info(sdata, + "MLO Reconfiguration: work: valid=0x%x, removed=0x%x\n", + sdata->vif.valid_links, sdata->u.mgd.removed_links); + + new_valid_links = sdata->vif.valid_links & ~sdata->u.mgd.removed_links; + if (new_valid_links == sdata->vif.valid_links) { + sdata_unlock(sdata); + return; + } + + if (!new_valid_links || + !(new_valid_links & ~sdata->vif.dormant_links)) { + sdata_info(sdata, "No valid links after reconfiguration\n"); + ret = -EINVAL; + goto out; + } + + new_active_links = sdata->vif.active_links & ~sdata->u.mgd.removed_links; + if (new_active_links != sdata->vif.active_links) { + if (!new_active_links) + new_active_links = + BIT(ffs(new_valid_links & + ~sdata->vif.dormant_links) - 1); + + ret = __ieee80211_set_active_links(&sdata->vif, + new_active_links); + if (ret) { + sdata_info(sdata, + "Failed setting active links\n"); + goto out; + } + } + + new_dormant_links = sdata->vif.dormant_links & ~sdata->u.mgd.removed_links; + + ret = ieee80211_vif_set_links(sdata, new_valid_links, + new_dormant_links); + if (ret) + sdata_info(sdata, "Failed setting valid links\n"); + +out: + if (!ret) + cfg80211_links_removed(sdata->dev, sdata->u.mgd.removed_links); + else + ___ieee80211_disconnect(sdata); + + sdata->u.mgd.removed_links = 0; + + sdata_unlock(sdata); +} + +static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems) +{ + const struct ieee80211_multi_link_elem *ml; + const struct element *sub; + size_t ml_len; + unsigned long removed_links = 0; + u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + u8 link_id; + u32 delay; + + if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_reconf) + return; + + ml_len = cfg80211_defragment_element(elems->ml_reconf_elem, + elems->ie_start, + elems->total_len, + elems->scratch_pos, + elems->scratch + elems->scratch_len - + elems->scratch_pos, + WLAN_EID_FRAGMENT); + + elems->ml_reconf = (const void *)elems->scratch_pos; + elems->ml_reconf_len = ml_len; + ml = elems->ml_reconf; + + /* Directly parse the sub elements as the common information doesn't + * hold any useful information. + */ + for_each_mle_subelement(sub, (u8 *)ml, ml_len) { + struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; + u8 *pos = prof->variable; + u16 control; + + if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) + continue; + + if (!ieee80211_mle_reconf_sta_prof_size_ok(sub->data, + sub->datalen)) + return; + + control = le16_to_cpu(prof->control); + link_id = control & IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID; + + removed_links |= BIT(link_id); + + /* the MAC address should not be included, but handle it */ + if (control & + IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT) + pos += 6; + + /* According to Draft P802.11be_D3.0, the control should + * include the AP Removal Timer present. If the AP Removal Timer + * is not present assume immediate removal. + */ + if (control & + IEEE80211_MLE_STA_RECONF_CONTROL_DELETE_TIMER_PRESENT) + link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos); + } + + removed_links &= sdata->vif.valid_links; + if (!removed_links) { + /* In case the removal was cancelled, abort it */ + if (sdata->u.mgd.removed_links) { + sdata->u.mgd.removed_links = 0; + wiphy_delayed_work_cancel(sdata->local->hw.wiphy, + &sdata->u.mgd.ml_reconf_work); + } + return; + } + + delay = 0; + for_each_set_bit(link_id, &removed_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + sdata_dereference(sdata->vif.link_conf[link_id], sdata); + u32 link_delay; + + if (!link_conf) { + removed_links &= ~BIT(link_id); + continue; + } + + link_delay = link_conf->beacon_int * + link_removal_timeout[link_id]; + + if (!delay) + delay = link_delay; + else + delay = min(delay, link_delay); + } + + sdata->u.mgd.removed_links = removed_links; + wiphy_delayed_work_queue(sdata->local->hw.wiphy, + &sdata->u.mgd.ml_reconf_work, + TU_TO_JIFFIES(delay)); +} + static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, struct ieee80211_hdr *hdr, size_t len, struct ieee80211_rx_status *rx_status) @@ -5937,6 +6100,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, } } + ieee80211_ml_reconfiguration(sdata, elems); + ieee80211_link_info_change_notify(sdata, link, changed); free: kfree(elems); @@ -6563,6 +6728,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) ieee80211_csa_connection_drop_work); INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work, ieee80211_tdls_peer_del_work); + wiphy_delayed_work_init(&ifmgd->ml_reconf_work, + ieee80211_ml_reconf_work); timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0); timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0); timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0); @@ -7575,6 +7742,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); cancel_delayed_work_sync(&ifmgd->tdls_peer_del_work); + wiphy_delayed_work_cancel(sdata->local->hw.wiphy, + &ifmgd->ml_reconf_work); sdata_lock(sdata); if (ifmgd->assoc_data) -- cgit v1.2.3 From 6f2db6588b8189caeeb8249727b8344eba991250 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 18 Jun 2023 21:49:57 +0300 Subject: wifi: mac80211: agg-tx: add a few locking assertions This is all true today, but difficult to understand since the callers are in other files etc. Add two new lockdep assertions to make things easier to read. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.7f03dec6a90b.I762c11e95da005b80fa0184cb1173b99ec362acf@changeid Signed-off-by: Johannes Berg --- net/mac80211/agg-tx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 3b651e7f5a73..118ad2e24dbb 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2022 Intel Corporation + * Copyright (C) 2018 - 2023 Intel Corporation */ #include @@ -457,6 +457,8 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, u8 tid = tid_tx->tid; u16 buf_size; + lockdep_assert_held(&sta->ampdu_mlme.mtx); + /* activate the timer for the recipient's addBA response */ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", @@ -795,6 +797,8 @@ void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_local *local = sdata->local; + lockdep_assert_held(&sta->ampdu_mlme.mtx); + if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) return; -- cgit v1.2.3 From 92bf4dd35801b4e3e73d3cc4beb5c929f75e0da6 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 18 Jun 2023 21:49:58 +0300 Subject: wifi: mac80211: agg-tx: prevent start/stop race There were crashes reported in this code, and the timer_shutdown() warning in one of the previous patches indicates that the timeout timer for the AP response (addba_resp_timer) is still armed while we're stopping the aggregation session. After a very long deliberation of the code, so far the only way I could find that might cause this would be the following sequence: - session start requested - session start indicated to driver, but driver returns IEEE80211_AMPDU_TX_START_DELAY_ADDBA - session stop requested, sets HT_AGG_STATE_WANT_STOP - session stop worker runs ___ieee80211_stop_tx_ba_session(), sets HT_AGG_STATE_STOPPING From here on, the order doesn't matter exactly, but: 1. driver calls ieee80211_start_tx_ba_cb_irqsafe(), setting HT_AGG_STATE_START_CB 2. driver calls ieee80211_stop_tx_ba_cb_irqsafe(), setting HT_AGG_STATE_STOP_CB 3. the worker will run ieee80211_start_tx_ba_cb() for HT_AGG_STATE_START_CB 4. the worker will run ieee80211_stop_tx_ba_cb() for HT_AGG_STATE_STOP_CB (the order could also be 1./3./2./4.) This will cause ieee80211_start_tx_ba_cb() to send out the AddBA request frame to the AP and arm the timer, but we're already in the middle of stopping and so the ieee80211_stop_tx_ba_cb() will no longer assume it needs to stop anything. Prevent this by checking for WANT_STOP/STOPPING in the start CB, and warn if we're sending a frame on a stopping session. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.e5b52777462a.I0b2ed6658e81804279f5d7c9c1918cb1f6626bf2@changeid Signed-off-by: Johannes Berg --- net/mac80211/agg-tx.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'net') diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 118ad2e24dbb..b6b772685881 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -457,6 +457,10 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, u8 tid = tid_tx->tid; u16 buf_size; + if (WARN_ON_ONCE(test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) || + test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))) + return; + lockdep_assert_held(&sta->ampdu_mlme.mtx); /* activate the timer for the recipient's addBA response */ @@ -802,6 +806,10 @@ void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) return; + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state) || + test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) + return; + if (!test_bit(HT_AGG_STATE_SENT_ADDBA, &tid_tx->state)) { ieee80211_send_addba_with_timeout(sta, tid_tx); /* RESPONSE_RECEIVED state whould trigger the flow again */ -- cgit v1.2.3 From c870d66f1b7f51fa3401771ff6c41fd78adb869e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 18 Jun 2023 21:49:59 +0300 Subject: wifi: update multi-link element STA reconfig Update the MLE STA reconfig sub-type to 802.11be D3.0 format, which includes the operation update field. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.2e1383b31f07.I8055a111c8fcf22e833e60f5587a4d8d21caca5b@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 8 ++++++-- net/mac80211/mlme.c | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index fc3c26f1b718..d2025c986b0f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4893,7 +4893,9 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, #define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f #define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010 #define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020 -#define IEEE80211_MLE_STA_RECONF_CONTROL_DELETE_TIMER_PRESENT 0x0040 +#define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT 0x0040 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE 0x0780 +#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT 0x0800 /** * ieee80211_mle_reconf_sta_prof_size_ok - validate reconfiguration multi-link @@ -4916,7 +4918,9 @@ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, if (control & IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT) info_len += ETH_ALEN; - if (control & IEEE80211_MLE_STA_RECONF_CONTROL_DELETE_TIMER_PRESENT) + if (control & IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT) + info_len += 2; + if (control & IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT) info_len += 2; return prof->sta_info_len >= info_len && diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b60f99cf1be0..15e3decc59db 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5730,7 +5730,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata, * is not present assume immediate removal. */ if (control & - IEEE80211_MLE_STA_RECONF_CONTROL_DELETE_TIMER_PRESENT) + IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT) link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos); } -- cgit v1.2.3 From 8dcc91c446687727f88997a2e177cdab740ef092 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:50:00 +0300 Subject: wifi: cfg80211: stop parsing after allocation failure The error handling code would break out of the loop incorrectly, causing the rest of the message to be misinterpreted. Fix this by also jumping out of the surrounding while loop, which will trigger the error detection code. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.0ffac98475cf.I6f5c08a09f5c9fced01497b95a9841ffd1b039f8@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index df868662e1e0..91671698aaec 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -707,7 +707,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, GFP_ATOMIC); if (!entry) - break; + goto error; entry->center_freq = freq; @@ -723,6 +723,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, } } +error: if (pos != end) { cfg80211_free_coloc_ap_list(&ap_list); return 0; -- cgit v1.2.3 From 5461707a529c94f6f556847c25c21da5990488ba Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Sun, 18 Jun 2023 21:50:01 +0300 Subject: wifi: cfg80211: search all RNR elements for colocated APs An AP reporting colocated APs may send more than one reduced neighbor report element. As such, iterate all elements instead of only parsing the first one when looking for colocated APs. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.ffe2c014f478.I372a4f96c88f7ea28ac39e94e0abfc465b5330d4@changeid Signed-off-by: Johannes Berg --- net/wireless/scan.c | 127 ++++++++++++++++++++++++++-------------------------- 1 file changed, 63 insertions(+), 64 deletions(-) (limited to 'net') diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 91671698aaec..ede95caecb34 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -643,90 +643,89 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, int n_coloc = 0, ret; LIST_HEAD(ap_list); - elem = cfg80211_find_elem(WLAN_EID_REDUCED_NEIGHBOR_REPORT, ies->data, - ies->len); - if (!elem) - return 0; - - pos = elem->data; - end = pos + elem->datalen; - ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp); if (ret) return ret; - /* RNR IE may contain more than one NEIGHBOR_AP_INFO */ - while (pos + sizeof(*ap_info) <= end) { - enum nl80211_band band; - int freq; - u8 length, i, count; + for_each_element_id(elem, WLAN_EID_REDUCED_NEIGHBOR_REPORT, + ies->data, ies->len) { + pos = elem->data; + end = elem->data + elem->datalen; - ap_info = (void *)pos; - count = u8_get_bits(ap_info->tbtt_info_hdr, - IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1; - length = ap_info->tbtt_info_len; + /* RNR IE may contain more than one NEIGHBOR_AP_INFO */ + while (pos + sizeof(*ap_info) <= end) { + enum nl80211_band band; + int freq; + u8 length, i, count; - pos += sizeof(*ap_info); + ap_info = (void *)pos; + count = u8_get_bits(ap_info->tbtt_info_hdr, + IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1; + length = ap_info->tbtt_info_len; - if (!ieee80211_operating_class_to_band(ap_info->op_class, - &band)) - break; + pos += sizeof(*ap_info); - freq = ieee80211_channel_to_frequency(ap_info->channel, band); + if (!ieee80211_operating_class_to_band(ap_info->op_class, + &band)) + break; - if (end - pos < count * length) - break; + freq = ieee80211_channel_to_frequency(ap_info->channel, + band); - if (u8_get_bits(ap_info->tbtt_info_hdr, - IEEE80211_AP_INFO_TBTT_HDR_TYPE) != - IEEE80211_TBTT_INFO_TYPE_TBTT) { - pos += count * length; - continue; - } + if (end - pos < count * length) + break; - /* - * TBTT info must include bss param + BSSID + - * (short SSID or same_ssid bit to be set). - * ignore other options, and move to the - * next AP info - */ - if (band != NL80211_BAND_6GHZ || - !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9, - bss_params) || - length == sizeof(struct ieee80211_tbtt_info_7_8_9) || - length >= offsetofend(struct ieee80211_tbtt_info_ge_11, - bss_params))) { - pos += count * length; - continue; - } + if (u8_get_bits(ap_info->tbtt_info_hdr, + IEEE80211_AP_INFO_TBTT_HDR_TYPE) != + IEEE80211_TBTT_INFO_TYPE_TBTT) { + pos += count * length; + continue; + } - for (i = 0; i < count; i++) { - struct cfg80211_colocated_ap *entry; + /* TBTT info must include bss param + BSSID + + * (short SSID or same_ssid bit to be set). + * ignore other options, and move to the + * next AP info + */ + if (band != NL80211_BAND_6GHZ || + !(length == offsetofend(struct ieee80211_tbtt_info_7_8_9, + bss_params) || + length == sizeof(struct ieee80211_tbtt_info_7_8_9) || + length >= offsetofend(struct ieee80211_tbtt_info_ge_11, + bss_params))) { + pos += count * length; + continue; + } - entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN, - GFP_ATOMIC); + for (i = 0; i < count; i++) { + struct cfg80211_colocated_ap *entry; - if (!entry) - goto error; + entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN, + GFP_ATOMIC); - entry->center_freq = freq; + if (!entry) + goto error; - if (!cfg80211_parse_ap_info(entry, pos, length, - ssid_elem, s_ssid_tmp)) { - n_coloc++; - list_add_tail(&entry->list, &ap_list); - } else { - kfree(entry); - } + entry->center_freq = freq; + + if (!cfg80211_parse_ap_info(entry, pos, length, + ssid_elem, + s_ssid_tmp)) { + n_coloc++; + list_add_tail(&entry->list, &ap_list); + } else { + kfree(entry); + } - pos += length; + pos += length; + } } - } error: - if (pos != end) { - cfg80211_free_coloc_ap_list(&ap_list); - return 0; + if (pos != end) { + cfg80211_free_coloc_ap_list(&ap_list); + return 0; + } } list_splice_tail(&ap_list, list); -- cgit v1.2.3 From cf0b045ebf6bba7764151e1759c874c43964445a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 18 Jun 2023 21:50:02 +0300 Subject: wifi: mac80211: check EHT basic MCS/NSS set Check that all the NSS in the EHT basic MCS/NSS set are actually supported, otherwise disable EHT for the connection. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230618214436.737827c906c9.I0c11a3cd46ab4dcb774c11a5bbc30aecfb6fce11@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 28 ++++++++++----- net/mac80211/mlme.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 108 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index d2025c986b0f..fa679613c562 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1996,12 +1996,18 @@ struct ieee80211_mu_edca_param_set { * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams * supported for reception and the maximum number of spatial streams * supported for transmission for MCS 12 - 13. + * @rx_tx_max_nss: array of the previous fields for easier loop access */ struct ieee80211_eht_mcs_nss_supp_20mhz_only { - u8 rx_tx_mcs7_max_nss; - u8 rx_tx_mcs9_max_nss; - u8 rx_tx_mcs11_max_nss; - u8 rx_tx_mcs13_max_nss; + union { + struct { + u8 rx_tx_mcs7_max_nss; + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; + }; + u8 rx_tx_max_nss[4]; + }; }; /** @@ -2021,11 +2027,17 @@ struct ieee80211_eht_mcs_nss_supp_20mhz_only { * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams * supported for reception and the maximum number of spatial streams * supported for transmission for MCS 12 - 13. + * @rx_tx_max_nss: array of the previous fields for easier loop access */ struct ieee80211_eht_mcs_nss_supp_bw { - u8 rx_tx_mcs9_max_nss; - u8 rx_tx_mcs11_max_nss; - u8 rx_tx_mcs13_max_nss; + union { + struct { + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; + }; + u8 rx_tx_max_nss[3]; + }; }; /** @@ -2078,7 +2090,7 @@ struct ieee80211_eht_cap_elem { */ struct ieee80211_eht_operation { u8 params; - __le32 basic_mcs_nss; + struct ieee80211_eht_mcs_nss_supp_20mhz_only basic_mcs_nss; u8 optional[]; } __packed; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 15e3decc59db..cf15089e95f1 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4694,6 +4694,89 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata, return false; } +static u8 +ieee80211_get_eht_cap_mcs_nss(const struct ieee80211_sta_he_cap *sta_he_cap, + const struct ieee80211_sta_eht_cap *sta_eht_cap, + unsigned int idx, int bw) +{ + u8 he_phy_cap0 = sta_he_cap->he_cap_elem.phy_cap_info[0]; + u8 eht_phy_cap0 = sta_eht_cap->eht_cap_elem.phy_cap_info[0]; + + /* handle us being a 20 MHz-only EHT STA - with four values + * for MCS 0-7, 8-9, 10-11, 12-13. + */ + if (!(he_phy_cap0 & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) + return sta_eht_cap->eht_mcs_nss_supp.only_20mhz.rx_tx_max_nss[idx]; + + /* the others have MCS 0-9 together, rather than separately from 0-7 */ + if (idx > 0) + idx--; + + switch (bw) { + case 0: + return sta_eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_max_nss[idx]; + case 1: + if (!(he_phy_cap0 & + (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G))) + return 0xff; /* pass check */ + return sta_eht_cap->eht_mcs_nss_supp.bw._160.rx_tx_max_nss[idx]; + case 2: + if (!(eht_phy_cap0 & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)) + return 0xff; /* pass check */ + return sta_eht_cap->eht_mcs_nss_supp.bw._320.rx_tx_max_nss[idx]; + } + + WARN_ON(1); + return 0; +} + +static bool +ieee80211_verify_sta_eht_mcs_support(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_eht_operation *eht_op) +{ + const struct ieee80211_sta_he_cap *sta_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); + const struct ieee80211_sta_eht_cap *sta_eht_cap = + ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); + const struct ieee80211_eht_mcs_nss_supp_20mhz_only *req; + unsigned int i; + + if (!sta_he_cap || !sta_eht_cap || !eht_op) + return false; + + req = &eht_op->basic_mcs_nss; + + for (i = 0; i < ARRAY_SIZE(req->rx_tx_max_nss); i++) { + u8 req_rx_nss, req_tx_nss; + unsigned int bw; + + req_rx_nss = u8_get_bits(req->rx_tx_max_nss[i], + IEEE80211_EHT_MCS_NSS_RX); + req_tx_nss = u8_get_bits(req->rx_tx_max_nss[i], + IEEE80211_EHT_MCS_NSS_TX); + + for (bw = 0; bw < 3; bw++) { + u8 have, have_rx_nss, have_tx_nss; + + have = ieee80211_get_eht_cap_mcs_nss(sta_he_cap, + sta_eht_cap, + i, bw); + have_rx_nss = u8_get_bits(have, + IEEE80211_EHT_MCS_NSS_RX); + have_tx_nss = u8_get_bits(have, + IEEE80211_EHT_MCS_NSS_TX); + + if (req_rx_nss > have_rx_nss || + req_tx_nss > have_tx_nss) + return false; + } + } + + return true; +} + static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, struct cfg80211_bss *cbss, @@ -4849,11 +4932,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, else eht_oper = NULL; + if (!ieee80211_verify_sta_eht_mcs_support(sdata, sband, eht_oper)) + *conn_flags |= IEEE80211_CONN_DISABLE_EHT; + eht_ml_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_EHT_MULTI_LINK, cbss_ies->data, cbss_ies->len); /* data + 1 / datalen - 1 since it's an extended element */ - if (eht_ml_elem && + if (!(*conn_flags & IEEE80211_CONN_DISABLE_EHT) && + eht_ml_elem && ieee80211_mle_type_ok(eht_ml_elem->data + 1, IEEE80211_ML_CONTROL_TYPE_BASIC, eht_ml_elem->datalen - 1)) { -- cgit v1.2.3 From 6c3eba1c5e283fd2bb1c076dbfcb47f569c3bfde Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 13 Jun 2023 15:35:32 -0700 Subject: bpf: Centralize permissions checks for all BPF map types This allows to do more centralized decisions later on, and generally makes it very explicit which maps are privileged and which are not (e.g., LRU_HASH and LRU_PERCPU_HASH, which are privileged HASH variants, as opposed to unprivileged HASH and HASH_PERCPU; now this is explicit and easy to verify). Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20230613223533.3689589-4-andrii@kernel.org --- kernel/bpf/bloom_filter.c | 3 -- kernel/bpf/bpf_local_storage.c | 3 -- kernel/bpf/bpf_struct_ops.c | 3 -- kernel/bpf/cpumap.c | 4 -- kernel/bpf/devmap.c | 3 -- kernel/bpf/hashtab.c | 6 --- kernel/bpf/lpm_trie.c | 3 -- kernel/bpf/queue_stack_maps.c | 4 -- kernel/bpf/reuseport_array.c | 3 -- kernel/bpf/stackmap.c | 3 -- kernel/bpf/syscall.c | 47 ++++++++++++++++++++++ net/core/sock_map.c | 4 -- net/xdp/xskmap.c | 4 -- .../selftests/bpf/prog_tests/unpriv_bpf_disabled.c | 6 ++- 14 files changed, 52 insertions(+), 44 deletions(-) (limited to 'net') diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c index 540331b610a9..addf3dd57b59 100644 --- a/kernel/bpf/bloom_filter.c +++ b/kernel/bpf/bloom_filter.c @@ -86,9 +86,6 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) int numa_node = bpf_map_attr_numa_node(attr); struct bpf_bloom_filter *bloom; - if (!bpf_capable()) - return ERR_PTR(-EPERM); - if (attr->key_size != 0 || attr->value_size == 0 || attr->max_entries == 0 || attr->map_flags & ~BLOOM_CREATE_FLAG_MASK || diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 47d9948d768f..b5149cfce7d4 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -723,9 +723,6 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr) !attr->btf_key_type_id || !attr->btf_value_type_id) return -EINVAL; - if (!bpf_capable()) - return -EPERM; - if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) return -E2BIG; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index d3f0a4825fa6..116a0ce378ec 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -655,9 +655,6 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) const struct btf_type *t, *vt; struct bpf_map *map; - if (!bpf_capable()) - return ERR_PTR(-EPERM); - st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); if (!st_ops) return ERR_PTR(-ENOTSUPP); diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 8ec18faa74ac..8a33e8747a0e 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -89,9 +88,6 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) u32 value_size = attr->value_size; struct bpf_cpu_map *cmap; - if (!bpf_capable()) - return ERR_PTR(-EPERM); - /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || (value_size != offsetofend(struct bpf_cpumap_val, qsize) && diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 802692fa3905..49cc0b5671c6 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -160,9 +160,6 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) struct bpf_dtab *dtab; int err; - if (!capable(CAP_NET_ADMIN)) - return ERR_PTR(-EPERM); - dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE); if (!dtab) return ERR_PTR(-ENOMEM); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 9901efee4339..56d3da7d0bc6 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -422,12 +422,6 @@ static int htab_map_alloc_check(union bpf_attr *attr) BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != offsetof(struct htab_elem, hash_node.pprev)); - if (lru && !bpf_capable()) - /* LRU implementation is much complicated than other - * maps. Hence, limit to CAP_BPF. - */ - return -EPERM; - if (zero_seed && !capable(CAP_SYS_ADMIN)) /* Guard against local DoS, and discourage production use. */ return -EPERM; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index e0d3ddf2037a..17c7e7782a1f 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -544,9 +544,6 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) { struct lpm_trie *trie; - if (!bpf_capable()) - return ERR_PTR(-EPERM); - /* check sanity of attributes */ if (attr->max_entries == 0 || !(attr->map_flags & BPF_F_NO_PREALLOC) || diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 601609164ef3..8d2ddcb7566b 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include "percpu_freelist.h" @@ -46,9 +45,6 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) /* Called from syscall */ static int queue_stack_map_alloc_check(union bpf_attr *attr) { - if (!bpf_capable()) - return -EPERM; - /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 0 || attr->value_size == 0 || diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index cbf2d8d784b8..4b4f9670f1a9 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -151,9 +151,6 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr) int numa_node = bpf_map_attr_numa_node(attr); struct reuseport_array *array; - if (!bpf_capable()) - return ERR_PTR(-EPERM); - /* allocate all map elements and zero-initialize them */ array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); if (!array) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index b25fce425b2c..458bb80b14d5 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -74,9 +74,6 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) u64 cost, n_buckets; int err; - if (!bpf_capable()) - return ERR_PTR(-EPERM); - if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 6ef302709ab0..658d1154f221 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1156,6 +1156,53 @@ static int map_create(union bpf_attr *attr) if (sysctl_unprivileged_bpf_disabled && !bpf_capable()) return -EPERM; + /* check privileged map type permissions */ + switch (map_type) { + case BPF_MAP_TYPE_ARRAY: + case BPF_MAP_TYPE_PERCPU_ARRAY: + case BPF_MAP_TYPE_PROG_ARRAY: + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + case BPF_MAP_TYPE_CGROUP_ARRAY: + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_PERCPU_HASH: + case BPF_MAP_TYPE_HASH_OF_MAPS: + case BPF_MAP_TYPE_RINGBUF: + case BPF_MAP_TYPE_USER_RINGBUF: + case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: + /* unprivileged */ + break; + case BPF_MAP_TYPE_SK_STORAGE: + case BPF_MAP_TYPE_INODE_STORAGE: + case BPF_MAP_TYPE_TASK_STORAGE: + case BPF_MAP_TYPE_CGRP_STORAGE: + case BPF_MAP_TYPE_BLOOM_FILTER: + case BPF_MAP_TYPE_LPM_TRIE: + case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: + case BPF_MAP_TYPE_STACK_TRACE: + case BPF_MAP_TYPE_QUEUE: + case BPF_MAP_TYPE_STACK: + case BPF_MAP_TYPE_LRU_HASH: + case BPF_MAP_TYPE_LRU_PERCPU_HASH: + case BPF_MAP_TYPE_STRUCT_OPS: + case BPF_MAP_TYPE_CPUMAP: + if (!bpf_capable()) + return -EPERM; + break; + case BPF_MAP_TYPE_SOCKMAP: + case BPF_MAP_TYPE_SOCKHASH: + case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: + case BPF_MAP_TYPE_XSKMAP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + break; + default: + WARN(1, "unsupported map type %d", map_type); + return -EPERM; + } + map = ops->map_alloc(attr); if (IS_ERR(map)) return PTR_ERR(map); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 00afb66cd095..19538d628714 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -32,8 +32,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) { struct bpf_stab *stab; - if (!capable(CAP_NET_ADMIN)) - return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size != 4 || (attr->value_size != sizeof(u32) && @@ -1085,8 +1083,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) struct bpf_shtab *htab; int i, err; - if (!capable(CAP_NET_ADMIN)) - return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size == 0 || (attr->value_size != sizeof(u32) && diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index 2c1427074a3b..e1c526f97ce3 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -68,9 +67,6 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) int numa_node; u64 size; - if (!capable(CAP_NET_ADMIN)) - return ERR_PTR(-EPERM); - if (attr->max_entries == 0 || attr->key_size != 4 || attr->value_size != 4 || attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c index 8383a99f610f..0adf8d9475cb 100644 --- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s prog_insns, prog_insn_cnt, &load_opts), -EPERM, "prog_load_fails"); - for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) + /* some map types require particular correct parameters which could be + * sanity-checked before enforcing -EPERM, so only validate that + * the simple ARRAY and HASH maps are failing with -EPERM + */ + for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++) ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), -EPERM, "map_create_fails"); -- cgit v1.2.3 From 5a0702aac020b2e81f778e3477095d8ed39ce523 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Wed, 17 May 2023 11:44:28 -0700 Subject: wifi: mac80211: add eht_capa debugfs field Output looks like this: [root@ct523c-0b29 ~]# cat /debug/ieee80211/wiphy6/netdev\:wlan6/stations/50\:28\:4a\:bd\:f4\:a7/eht_capa EHT supported MAC-CAP: 0x82 0x00 PHY-CAP: 0x0c 0x00 0x00 0x00 0x00 0x48 0x00 0x00 0x00 OM-CONTROL MAX-MPDU-LEN: 11454 242-TONE-RU-GT20MHZ NDP-4-EHT-LFT-32-GI BEAMFORMEE-80-NSS: 0 BEAMFORMEE-160-NSS: 0 BEAMFORMEE-320-NSS: 0 SOUNDING-DIM-80-NSS: 0 SOUNDING-DIM-160-NSS: 0 SOUNDING-DIM-320-NSS: 0 MAX_NC: 0 PPE_THRESHOLD_PRESENT NOMINAL_PKT_PAD: 0us MAX-NUM-SUPP-EHT-LTF: 1 SUPP-EXTRA-EHT-LTF MCS15-SUPP-MASK: 0 EHT bw <= 80 MHz, max NSS for MCS 8-9: Rx=2, Tx=2 EHT bw <= 80 MHz, max NSS for MCS 10-11: Rx=2, Tx=2 EHT bw <= 80 MHz, max NSS for MCS 12-13: Rx=2, Tx=2 EHT bw <= 160 MHz, max NSS for MCS 8-9: Rx=0, Tx=0 EHT bw <= 160 MHz, max NSS for MCS 10-11: Rx=0, Tx=0 EHT bw <= 160 MHz, max NSS for MCS 12-13: Rx=0, Tx=0 EHT bw <= 320 MHz, max NSS for MCS 8-9: Rx=0, Tx=0 EHT bw <= 320 MHz, max NSS for MCS 10-11: Rx=0, Tx=0 EHT bw <= 320 MHz, max NSS for MCS 12-13: Rx=0, Tx=0 EHT PPE Thresholds: 0xc1 0x0e 0xe0 0x00 0x00 Signed-off-by: Ben Greear Link: https://lore.kernel.org/r/20230517184428.999384-1-greearb@candelatech.com Signed-off-by: Johannes Berg --- net/mac80211/debugfs_sta.c | 185 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) (limited to 'net') diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index f1914bf39f0e..5a97fb248c85 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -1035,6 +1035,190 @@ out: } LINK_STA_OPS(he_capa); +static ssize_t link_sta_eht_capa_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) +{ + char *buf, *p; + size_t buf_sz = PAGE_SIZE; + struct link_sta_info *link_sta = file->private_data; + struct ieee80211_sta_eht_cap *bec = &link_sta->pub->eht_cap; + struct ieee80211_eht_cap_elem_fixed *fixed = &bec->eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp *nss = &bec->eht_mcs_nss_supp; + u8 *cap; + int i; + ssize_t ret; + static const char *mcs_desc[] = { "0-7", "8-9", "10-11", "12-13"}; + + buf = kmalloc(buf_sz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + p = buf; + + p += scnprintf(p, buf_sz + buf - p, "EHT %ssupported\n", + bec->has_eht ? "" : "not "); + if (!bec->has_eht) + goto out; + + p += scnprintf(p, buf_sz + buf - p, + "MAC-CAP: %#.2x %#.2x\n", + fixed->mac_cap_info[0], fixed->mac_cap_info[1]); + p += scnprintf(p, buf_sz + buf - p, + "PHY-CAP: %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x %#.2x\n", + fixed->phy_cap_info[0], fixed->phy_cap_info[1], + fixed->phy_cap_info[2], fixed->phy_cap_info[3], + fixed->phy_cap_info[4], fixed->phy_cap_info[5], + fixed->phy_cap_info[6], fixed->phy_cap_info[7], + fixed->phy_cap_info[8]); + +#define PRINT(fmt, ...) \ + p += scnprintf(p, buf_sz + buf - p, "\t\t" fmt "\n", \ + ##__VA_ARGS__) + +#define PFLAG(t, n, a, b) \ + do { \ + if (cap[n] & IEEE80211_EHT_##t##_CAP##n##_##a) \ + PRINT("%s", b); \ + } while (0) + + cap = fixed->mac_cap_info; + PFLAG(MAC, 0, EPCS_PRIO_ACCESS, "EPCS-PRIO-ACCESS"); + PFLAG(MAC, 0, OM_CONTROL, "OM-CONTROL"); + PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE1, "TRIG-TXOP-SHARING-MODE1"); + PFLAG(MAC, 0, TRIG_TXOP_SHARING_MODE2, "TRIG-TXOP-SHARING-MODE2"); + PFLAG(MAC, 0, RESTRICTED_TWT, "RESTRICTED-TWT"); + PFLAG(MAC, 0, SCS_TRAFFIC_DESC, "SCS-TRAFFIC-DESC"); + switch ((cap[0] & 0xc0) >> 6) { + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895: + PRINT("MAX-MPDU-LEN: 3985"); + break; + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991: + PRINT("MAX-MPDU-LEN: 7991"); + break; + case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454: + PRINT("MAX-MPDU-LEN: 11454"); + break; + } + + cap = fixed->phy_cap_info; + PFLAG(PHY, 0, 320MHZ_IN_6GHZ, "320MHZ-IN-6GHZ"); + PFLAG(PHY, 0, 242_TONE_RU_GT20MHZ, "242-TONE-RU-GT20MHZ"); + PFLAG(PHY, 0, NDP_4_EHT_LFT_32_GI, "NDP-4-EHT-LFT-32-GI"); + PFLAG(PHY, 0, PARTIAL_BW_UL_MU_MIMO, "PARTIAL-BW-UL-MU-MIMO"); + PFLAG(PHY, 0, SU_BEAMFORMER, "SU-BEAMFORMER"); + PFLAG(PHY, 0, SU_BEAMFORMEE, "SU-BEAMFORMEE"); + i = cap[0] >> 7; + i |= (cap[1] & 0x3) << 1; + PRINT("BEAMFORMEE-80-NSS: %i", i); + PRINT("BEAMFORMEE-160-NSS: %i", (cap[1] >> 2) & 0x7); + PRINT("BEAMFORMEE-320-NSS: %i", (cap[1] >> 5) & 0x7); + PRINT("SOUNDING-DIM-80-NSS: %i", (cap[2] & 0x7)); + PRINT("SOUNDING-DIM-160-NSS: %i", (cap[2] >> 3) & 0x7); + i = cap[2] >> 6; + i |= (cap[3] & 0x1) << 3; + PRINT("SOUNDING-DIM-320-NSS: %i", i); + + PFLAG(PHY, 3, NG_16_SU_FEEDBACK, "NG-16-SU-FEEDBACK"); + PFLAG(PHY, 3, NG_16_MU_FEEDBACK, "NG-16-MU-FEEDBACK"); + PFLAG(PHY, 3, CODEBOOK_4_2_SU_FDBK, "CODEBOOK-4-2-SU-FDBK"); + PFLAG(PHY, 3, CODEBOOK_7_5_MU_FDBK, "CODEBOOK-7-5-MU-FDBK"); + PFLAG(PHY, 3, TRIG_SU_BF_FDBK, "TRIG-SU-BF-FDBK"); + PFLAG(PHY, 3, TRIG_MU_BF_PART_BW_FDBK, "TRIG-MU-BF-PART-BW-FDBK"); + PFLAG(PHY, 3, TRIG_CQI_FDBK, "TRIG-CQI-FDBK"); + + PFLAG(PHY, 4, PART_BW_DL_MU_MIMO, "PART-BW-DL-MU-MIMO"); + PFLAG(PHY, 4, PSR_SR_SUPP, "PSR-SR-SUPP"); + PFLAG(PHY, 4, POWER_BOOST_FACT_SUPP, "POWER-BOOST-FACT-SUPP"); + PFLAG(PHY, 4, EHT_MU_PPDU_4_EHT_LTF_08_GI, "EHT-MU-PPDU-4-EHT-LTF-08-GI"); + PRINT("MAX_NC: %i", cap[4] >> 4); + + PFLAG(PHY, 5, NON_TRIG_CQI_FEEDBACK, "NON-TRIG-CQI-FEEDBACK"); + PFLAG(PHY, 5, TX_LESS_242_TONE_RU_SUPP, "TX-LESS-242-TONE-RU-SUPP"); + PFLAG(PHY, 5, RX_LESS_242_TONE_RU_SUPP, "RX-LESS-242-TONE-RU-SUPP"); + PFLAG(PHY, 5, PPE_THRESHOLD_PRESENT, "PPE_THRESHOLD_PRESENT"); + switch (cap[5] >> 4 & 0x3) { + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US: + PRINT("NOMINAL_PKT_PAD: 0us"); + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US: + PRINT("NOMINAL_PKT_PAD: 8us"); + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US: + PRINT("NOMINAL_PKT_PAD: 16us"); + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US: + PRINT("NOMINAL_PKT_PAD: 20us"); + break; + } + i = cap[5] >> 6; + i |= cap[6] & 0x7; + PRINT("MAX-NUM-SUPP-EHT-LTF: %i", i); + PFLAG(PHY, 5, SUPP_EXTRA_EHT_LTF, "SUPP-EXTRA-EHT-LTF"); + + i = (cap[6] >> 3) & 0xf; + PRINT("MCS15-SUPP-MASK: %i", i); + PFLAG(PHY, 6, EHT_DUP_6GHZ_SUPP, "EHT-DUP-6GHZ-SUPP"); + + PFLAG(PHY, 7, 20MHZ_STA_RX_NDP_WIDER_BW, "20MHZ-STA-RX-NDP-WIDER-BW"); + PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_80MHZ, "NON-OFDMA-UL-MU-MIMO-80MHZ"); + PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_160MHZ, "NON-OFDMA-UL-MU-MIMO-160MHZ"); + PFLAG(PHY, 7, NON_OFDMA_UL_MU_MIMO_320MHZ, "NON-OFDMA-UL-MU-MIMO-320MHZ"); + PFLAG(PHY, 7, MU_BEAMFORMER_80MHZ, "MU-BEAMFORMER-80MHZ"); + PFLAG(PHY, 7, MU_BEAMFORMER_160MHZ, "MU-BEAMFORMER-160MHZ"); + PFLAG(PHY, 7, MU_BEAMFORMER_320MHZ, "MU-BEAMFORMER-320MHZ"); + PFLAG(PHY, 7, TB_SOUNDING_FDBK_RATE_LIMIT, "TB-SOUNDING-FDBK-RATE-LIMIT"); + + PFLAG(PHY, 8, RX_1024QAM_WIDER_BW_DL_OFDMA, "RX-1024QAM-WIDER-BW-DL-OFDMA"); + PFLAG(PHY, 8, RX_4096QAM_WIDER_BW_DL_OFDMA, "RX-4096QAM-WIDER-BW-DL-OFDMA"); + +#undef PFLAG + + PRINT(""); /* newline */ + if (!(link_sta->pub->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + u8 *mcs_vals = (u8 *)(&nss->only_20mhz); + + for (i = 0; i < 4; i++) + PRINT("EHT bw=20 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + } else { + u8 *mcs_vals = (u8 *)(&nss->bw._80); + + for (i = 0; i < 3; i++) + PRINT("EHT bw <= 80 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i + 1], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + + mcs_vals = (u8 *)(&nss->bw._160); + for (i = 0; i < 3; i++) + PRINT("EHT bw <= 160 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i + 1], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + + mcs_vals = (u8 *)(&nss->bw._320); + for (i = 0; i < 3; i++) + PRINT("EHT bw <= 320 MHz, max NSS for MCS %s: Rx=%u, Tx=%u", + mcs_desc[i + 1], + mcs_vals[i] & 0xf, mcs_vals[i] >> 4); + } + + if (cap[5] & IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u8 ppe_size = ieee80211_eht_ppe_size(bec->eht_ppe_thres[0], cap); + + p += scnprintf(p, buf_sz + buf - p, "EHT PPE Thresholds: "); + for (i = 0; i < ppe_size; i++) + p += scnprintf(p, buf_sz + buf - p, "0x%02x ", + bec->eht_ppe_thres[i]); + PRINT(""); /* newline */ + } + +out: + ret = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); + return ret; +} +LINK_STA_OPS(eht_capa); + #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, \ sta->debugfs_dir, sta, &sta_ ##name## _ops) @@ -1128,6 +1312,7 @@ void ieee80211_link_sta_debugfs_add(struct link_sta_info *link_sta) DEBUGFS_ADD(ht_capa); DEBUGFS_ADD(vht_capa); DEBUGFS_ADD(he_capa); + DEBUGFS_ADD(eht_capa); DEBUGFS_ADD_COUNTER(rx_duplicates, rx_stats.num_duplicates); DEBUGFS_ADD_COUNTER(rx_fragments, rx_stats.fragments); -- cgit v1.2.3 From ac9d8a66e41d553bf57fdffe88f59f1b1443191b Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 14 Jun 2023 16:01:03 -0700 Subject: ipv6: rpl: Remove pskb(_may)?_pull() in ipv6_rpl_srh_rcv(). As Eric Dumazet pointed out [0], ipv6_rthdr_rcv() pulls these data - Segment Routing Header : 8 - Hdr Ext Len : skb_transport_header(skb)[1] << 3 needed by ipv6_rpl_srh_rcv(). We can remove pskb_may_pull() and replace pskb_pull() with skb_pull() in ipv6_rpl_srh_rcv(). Link: https://lore.kernel.org/netdev/CANn89iLboLwLrHXeHJucAqBkEL_S0rJFog68t7wwwXO-aNf5Mg@mail.gmail.com/ [0] Signed-off-by: Kuniyuki Iwashima Signed-off-by: Jakub Kicinski --- include/net/rpl.h | 3 --- net/ipv6/exthdrs.c | 17 +---------------- net/ipv6/rpl.c | 7 ------- 3 files changed, 1 insertion(+), 26 deletions(-) (limited to 'net') diff --git a/include/net/rpl.h b/include/net/rpl.h index 30fe780d1e7c..74734191c458 100644 --- a/include/net/rpl.h +++ b/include/net/rpl.h @@ -23,9 +23,6 @@ static inline int rpl_init(void) static inline void rpl_exit(void) {} #endif -size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, - unsigned char cmpre); - void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr, const struct ipv6_rpl_sr_hdr *inhdr, const struct in6_addr *daddr, unsigned char n); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index a543df57801f..65adc11b59aa 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -517,11 +517,7 @@ looped_back: skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); - - if (!pskb_pull(skb, offset)) { - kfree_skb(skb); - return -1; - } + skb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); @@ -543,11 +539,6 @@ looped_back: return 1; } - if (!pskb_may_pull(skb, sizeof(*hdr))) { - kfree_skb(skb); - return -1; - } - n = (hdr->hdrlen << 3) - hdr->pad - (16 - hdr->cmpre); r = do_div(n, (16 - hdr->cmpri)); /* checks if calculation was without remainder and n fits into @@ -567,12 +558,6 @@ looped_back: return -1; } - if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri, - hdr->cmpre))) { - kfree_skb(skb); - return -1; - } - hdr->segments_left--; i = n - hdr->segments_left; diff --git a/net/ipv6/rpl.c b/net/ipv6/rpl.c index d1876f192225..e186998bfbf7 100644 --- a/net/ipv6/rpl.c +++ b/net/ipv6/rpl.c @@ -29,13 +29,6 @@ static void *ipv6_rpl_segdata_pos(const struct ipv6_rpl_sr_hdr *hdr, int i) return (void *)&hdr->rpl_segdata[i * IPV6_PFXTAIL_LEN(hdr->cmpri)]; } -size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, - unsigned char cmpre) -{ - return sizeof(struct ipv6_rpl_sr_hdr) + (n * IPV6_PFXTAIL_LEN(cmpri)) + - IPV6_PFXTAIL_LEN(cmpre); -} - void ipv6_rpl_srh_decompress(struct ipv6_rpl_sr_hdr *outhdr, const struct ipv6_rpl_sr_hdr *inhdr, const struct in6_addr *daddr, unsigned char n) -- cgit v1.2.3 From 6facbca52da2c209ddffa0c9547769cbcaa268ef Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 14 Jun 2023 16:01:04 -0700 Subject: ipv6: rpl: Remove redundant multicast tests in ipv6_rpl_srh_rcv(). ipv6_rpl_srh_rcv() checks if ipv6_hdr(skb)->daddr or ohdr->rpl_segaddr[i] is the multicast address with ipv6_addr_type(). We have the same check for ipv6_hdr(skb)->daddr in ipv6_rthdr_rcv(), so we need not recheck it in ipv6_rpl_srh_rcv(). Also, we should use ipv6_addr_is_multicast() for ohdr->rpl_segaddr[i] instead of ipv6_addr_type(). Signed-off-by: Kuniyuki Iwashima Signed-off-by: Jakub Kicinski --- net/ipv6/exthdrs.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 65adc11b59aa..6259e907f0d9 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -571,8 +571,7 @@ looped_back: ipv6_rpl_srh_decompress(ohdr, hdr, &ipv6_hdr(skb)->daddr, n); chdr = (struct ipv6_rpl_sr_hdr *)(buf + ((ohdr->hdrlen + 1) << 3)); - if ((ipv6_addr_type(&ipv6_hdr(skb)->daddr) & IPV6_ADDR_MULTICAST) || - (ipv6_addr_type(&ohdr->rpl_segaddr[i]) & IPV6_ADDR_MULTICAST)) { + if (ipv6_addr_is_multicast(&ohdr->rpl_segaddr[i])) { kfree_skb(skb); kfree(buf); return -1; -- cgit v1.2.3 From 0d2e27b858503472f8ed66910498205824b02f8a Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 14 Jun 2023 16:01:05 -0700 Subject: ipv6: exthdrs: Replace pskb_pull() with skb_pull() in ipv6_srh_rcv(). ipv6_rthdr_rcv() pulls these data - Segment Routing Header : 8 - Hdr Ext Len : skb_transport_header(skb)[1] << 3 needed by ipv6_srh_rcv(), so pskb_pull() in ipv6_srh_rcv() never fails and can be replaced with skb_pull(). Signed-off-by: Kuniyuki Iwashima Signed-off-by: Jakub Kicinski --- net/ipv6/exthdrs.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 6259e907f0d9..e62e26ac99ef 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -402,11 +402,7 @@ looped_back: skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); - - if (!pskb_pull(skb, offset)) { - kfree_skb(skb); - return -1; - } + skb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); -- cgit v1.2.3 From b83d50f43165e8c21d580f40c57d8f6fe85adb59 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 14 Jun 2023 16:01:06 -0700 Subject: ipv6: exthdrs: Reload hdr only when needed in ipv6_srh_rcv(). We need not reload hdr in ipv6_srh_rcv() unless we call pskb_expand_head(). Signed-off-by: Kuniyuki Iwashima Signed-off-by: Jakub Kicinski --- net/ipv6/exthdrs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index e62e26ac99ef..dd23531387e3 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -440,9 +440,9 @@ looped_back: kfree_skb(skb); return -1; } - } - hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); + hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb); + } hdr->segments_left--; addr = hdr->segments + hdr->segments_left; -- cgit v1.2.3 From 6db5dd2bf4817a415daaf6a0226beadef1473d30 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 14 Jun 2023 16:01:07 -0700 Subject: ipv6: exthdrs: Remove redundant skb_headlen() check in ip6_parse_tlv(). ipv6_destopt_rcv() and ipv6_parse_hopopts() pulls these data - Hop-by-Hop/Destination Options Header : 8 - Hdr Ext Len : skb_transport_header(skb)[1] << 3 and calls ip6_parse_tlv(), so it need not check if skb_headlen() is less than skb_transport_offset(skb) + (skb_transport_header(skb)[1] << 3). Signed-off-by: Kuniyuki Iwashima Signed-off-by: Jakub Kicinski --- net/ipv6/exthdrs.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'net') diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index dd23531387e3..202fc3aaa83c 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -126,9 +126,6 @@ static bool ip6_parse_tlv(bool hopbyhop, max_count = -max_count; } - if (skb_transport_offset(skb) + len > skb_headlen(skb)) - goto bad; - off += 2; len -= 2; -- cgit v1.2.3 From acf15e07eb06507c69f92394c36052677029b0a8 Mon Sep 17 00:00:00 2001 From: Azeem Shaikh Date: Tue, 13 Jun 2023 00:34:37 +0000 Subject: netfilter: ipset: Replace strlcpy with strscpy strlcpy() reads the entire source buffer first. This read may exceed the destination size limit. This is both inefficient and can lead to linear read overflows if a source string is not NUL-terminated [1]. In an effort to remove strlcpy() completely [2], replace strlcpy() here with strscpy(). Direct replacement is safe here since return value from all callers of STRLCPY macro were ignored. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html#strlcpy [2] https://github.com/KSPP/linux/issues/89 Signed-off-by: Azeem Shaikh Acked-by: Jozsef Kadlecsik Reviewed-by: Kees Cook Reviewed-by: Simon Horman Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20230613003437.3538694-1-azeemshaikh38@gmail.com --- net/netfilter/ipset/ip_set_hash_netiface.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 031073286236..95aeb31c60e0 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -40,7 +40,7 @@ MODULE_ALIAS("ip_set_hash:net,iface"); #define IP_SET_HASH_WITH_MULTI #define IP_SET_HASH_WITH_NET0 -#define STRLCPY(a, b) strlcpy(a, b, IFNAMSIZ) +#define STRSCPY(a, b) strscpy(a, b, IFNAMSIZ) /* IPv4 variant */ @@ -182,11 +182,11 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, if (!eiface) return -EINVAL; - STRLCPY(e.iface, eiface); + STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { - STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); + STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) @@ -400,11 +400,11 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, if (!eiface) return -EINVAL; - STRLCPY(e.iface, eiface); + STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { - STRLCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); + STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) -- cgit v1.2.3 From 92b08290859b09a2ead2dc553aaaadb015748536 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 18 Jun 2023 11:46:46 +0200 Subject: mptcp: Reorder fields in 'struct mptcp_pm_add_entry' Group some variables based on their sizes to reduce hole and avoid padding. On x86_64, this shrinks the size of 'struct mptcp_pm_add_entry' from 136 to 128 bytes. It saves a few bytes of memory and is more cache-line friendly. Signed-off-by: Christophe JAILLET Reviewed-by: Matthieu Baerts Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/e47b71de54fd3e580544be56fc1bb2985c77b0f4.1687081558.git.christophe.jaillet@wanadoo.fr Signed-off-by: Jakub Kicinski --- net/mptcp/pm_netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index a12a87b780f6..a56718ffdd02 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -25,9 +25,9 @@ static int pm_nl_pernet_id; struct mptcp_pm_add_entry { struct list_head list; struct mptcp_addr_info addr; + u8 retrans_times; struct timer_list add_timer; struct mptcp_sock *sock; - u8 retrans_times; }; struct pm_nl_pernet { -- cgit v1.2.3 From f0d952646bcf186d6d1bea6ec89f96b7e57f3b83 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 18 Jun 2023 12:16:41 +0200 Subject: netlabel: Reorder fields in 'struct netlbl_domaddr6_map' Group some variables based on their sizes to reduce hole and avoid padding. On x86_64, this shrinks the size of 'struct netlbl_domaddr6_map' from 72 to 64 bytes. It saves a few bytes of memory and is more cache-line friendly. Signed-off-by: Christophe JAILLET Reviewed-by: Simon Horman Reviewed-by: Jiri Pirko Acked-by: Paul Moore Link: https://lore.kernel.org/r/aa109847260e51e174c823b6d1441f75be370f01.1687083361.git.christophe.jaillet@wanadoo.fr Signed-off-by: Jakub Kicinski --- net/netlabel/netlabel_domainhash.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h index 9f80972ae39b..7eaa35fdd9bd 100644 --- a/net/netlabel/netlabel_domainhash.h +++ b/net/netlabel/netlabel_domainhash.h @@ -57,8 +57,8 @@ struct netlbl_domaddr6_map { struct netlbl_dom_map { char *domain; - u16 family; struct netlbl_dommap_def def; + u16 family; u32 valid; struct list_head list; -- cgit v1.2.3 From 634236b34d7a8c9e11c12b0746b83b8942fc8f2e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Jun 2023 12:43:35 +0000 Subject: net: remove sk_is_ipmr() and sk_is_icmpv6() helpers Blamed commit added these helpers for sake of detecting RAW sockets specific ioctl. syzbot complained about it [1]. Issue here is that RAW sockets could pretend there was no need to call ipmr_sk_ioctl() Regardless of inet_sk(sk)->inet_num, we must be prepared for ipmr_ioctl() being called later. This must happen from ipmr_sk_ioctl() context only. We could add a safety check in ipmr_ioctl() at the risk of breaking applications. Instead, remove sk_is_ipmr() and sk_is_icmpv6() because their name would be misleading, once we change their implementation. [1] BUG: KASAN: stack-out-of-bounds in ipmr_ioctl+0xb12/0xbd0 net/ipv4/ipmr.c:1654 Read of size 4 at addr ffffc90003aefae4 by task syz-executor105/5004 CPU: 0 PID: 5004 Comm: syz-executor105 Not tainted 6.4.0-rc6-syzkaller-01304-gc08afcdcf952 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xd9/0x150 lib/dump_stack.c:106 print_address_description.constprop.0+0x2c/0x3c0 mm/kasan/report.c:351 print_report mm/kasan/report.c:462 [inline] kasan_report+0x11c/0x130 mm/kasan/report.c:572 ipmr_ioctl+0xb12/0xbd0 net/ipv4/ipmr.c:1654 raw_ioctl+0x4e/0x1e0 net/ipv4/raw.c:881 sock_ioctl_out net/core/sock.c:4186 [inline] sk_ioctl+0x151/0x440 net/core/sock.c:4214 inet_ioctl+0x18c/0x380 net/ipv4/af_inet.c:1001 sock_do_ioctl+0xcc/0x230 net/socket.c:1189 sock_ioctl+0x1f8/0x680 net/socket.c:1306 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:870 [inline] __se_sys_ioctl fs/ioctl.c:856 [inline] __x64_sys_ioctl+0x197/0x210 fs/ioctl.c:856 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7f2944bf6ad9 Code: 28 c3 e8 2a 14 00 00 66 2e 0f 1f 84 00 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffd8897a028 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f2944bf6ad9 RDX: 0000000000000000 RSI: 00000000000089e1 RDI: 0000000000000003 RBP: 00007f2944bbac80 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f2944bbad10 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 The buggy address belongs to stack of task syz-executor105/5004 and is located at offset 36 in frame: sk_ioctl+0x0/0x440 net/core/sock.c:4172 This frame has 2 objects: [32, 36) 'karg' [48, 88) 'buffer' Fixes: e1d001fa5b47 ("net: ioctl: Use kernel memory on protocol ioctl callbacks") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Breno Leitao Cc: Kuniyuki Iwashima Reviewed-by: Jiri Pirko Reviewed-by: David Ahern Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/r/20230619124336.651528-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/linux/icmpv6.h | 6 ------ include/linux/mroute.h | 11 ----------- net/core/sock.c | 4 ++-- 3 files changed, 2 insertions(+), 19 deletions(-) (limited to 'net') diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 1fe33e6741cc..db0f4fcfdaf4 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -111,10 +111,4 @@ static inline bool icmpv6_is_err(int type) return false; } -static inline int sk_is_icmpv6(struct sock *sk) -{ - return sk->sk_family == AF_INET6 && - inet_sk(sk)->inet_num == IPPROTO_ICMPV6; -} - #endif diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 94c6e6f549f0..4c5003afee6c 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -16,12 +16,6 @@ static inline int ip_mroute_opt(int opt) return opt >= MRT_BASE && opt <= MRT_MAX; } -static inline int sk_is_ipmr(struct sock *sk) -{ - return sk->sk_family == AF_INET && - inet_sk(sk)->inet_num == IPPROTO_IGMP; -} - int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); int ip_mroute_getsockopt(struct sock *, int, sockptr_t, sockptr_t); int ipmr_ioctl(struct sock *sk, int cmd, void *arg); @@ -57,11 +51,6 @@ static inline int ip_mroute_opt(int opt) return 0; } -static inline int sk_is_ipmr(struct sock *sk) -{ - return 0; -} - static inline bool ipmr_rule_default(const struct fib_rule *rule) { return true; diff --git a/net/core/sock.c b/net/core/sock.c index cff3e82514d1..8ec8f4c9911f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -4199,9 +4199,9 @@ int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { int rc = 1; - if (sk_is_ipmr(sk)) + if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET) rc = ipmr_sk_ioctl(sk, cmd, arg); - else if (sk_is_icmpv6(sk)) + else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6) rc = ip6mr_sk_ioctl(sk, cmd, arg); else if (sk_is_phonet(sk)) rc = phonet_sk_ioctl(sk, cmd, arg); -- cgit v1.2.3 From 7339e0f2e1bcb732b922a1c40a01b6002bec1ee5 Mon Sep 17 00:00:00 2001 From: Alon Giladi Date: Mon, 19 Jun 2023 18:37:38 +0300 Subject: wifi: mac80211: drop unprotected robust mgmt before 4-way-HS When MFP is used, drop unprotected robust management frames also before the 4-way handshake has been completed, i.e. no key has been installed yet. Signed-off-by: Alon Giladi Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619183718.cfbefddccd0c.Ife369dbb61c87e311ce15739d5b2b4763bfdfbae@changeid Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6ebec32b4ebc..1d2e7a6dd2a1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2418,13 +2418,20 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { if (unlikely(!ieee80211_has_protected(fc) && - ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && - rx->key)) { + ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) { if (ieee80211_is_deauth(fc) || - ieee80211_is_disassoc(fc)) + ieee80211_is_disassoc(fc)) { + /* + * Permit unprotected deauth/disassoc frames + * during 4-way-HS (key is installed after HS). + */ + if (!rx->key) + return 0; + cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, rx->skb->data, rx->skb->len); + } return -EACCES; } /* BIP does not use Protected field, so need to check MMIE */ -- cgit v1.2.3 From 2cc7add345ea0e3d28a2fae29b93884909753c63 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2023 16:26:46 +0300 Subject: wifi: mac80211: move action length check up We'd like to add more checks to the function here for action frames, so move up the length check from the action processing. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.e799254e923f.I0a1de5f6bbdc1b2ef5efaa0ac80c7c3f39415538@changeid Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1d2e7a6dd2a1..e35d6ba8521b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3363,6 +3363,11 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) if (!ieee80211_is_mgmt(mgmt->frame_control)) return RX_DROP_MONITOR; + /* drop too small action frames */ + if (ieee80211_is_action(mgmt->frame_control) && + rx->skb->len < IEEE80211_MIN_ACTION_SIZE) + return RX_DROP_UNUSABLE; + if (rx->sdata->vif.type == NL80211_IFTYPE_AP && ieee80211_is_beacon(mgmt->frame_control) && !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { @@ -3452,10 +3457,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) if (!ieee80211_is_action(mgmt->frame_control)) return RX_CONTINUE; - /* drop too small frames */ - if (len < IEEE80211_MIN_ACTION_SIZE) - return RX_DROP_UNUSABLE; - if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) -- cgit v1.2.3 From 76a3059cf1246a71f242822c6d605e5baa8924a3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2023 16:26:47 +0300 Subject: wifi: mac80211: drop some unprotected action frames We should not receive/handle unicast protected dual or public action frames that aren't protected, so drop them - in the latter case of course only if MFP is used. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.eb4461108129.I3c2223cf29d8a3586dfc74b2dda3f6fa2a4eea7c@changeid Signed-off-by: Johannes Berg --- net/mac80211/rx.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index e35d6ba8521b..e579581441de 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2405,9 +2405,9 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); - __le16 fc = hdr->frame_control; + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + __le16 fc = mgmt->frame_control; /* * Pass through unencrypted frames if the hardware has @@ -2416,6 +2416,11 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) if (status->flag & RX_FLAG_DECRYPTED) return 0; + /* drop unicast protected dual (that wasn't protected) */ + if (ieee80211_is_action(fc) && + mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) + return -EACCES; + if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { if (unlikely(!ieee80211_has_protected(fc) && ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) { @@ -2458,6 +2463,12 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) if (unlikely(ieee80211_is_action(fc) && !rx->key && ieee80211_is_robust_mgmt_frame(rx->skb))) return -EACCES; + + /* drop unicast public action frames when using MPF */ + if (is_unicast_ether_addr(mgmt->da) && + ieee80211_is_public_action((void *)rx->skb->data, + rx->skb->len)) + return -EACCES; } return 0; -- cgit v1.2.3 From 5c1f97537bfb9f358e0ecc88c3c8a913c51944cb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2023 16:26:48 +0300 Subject: wifi: mac80211: store BSS param change count from assoc response When receiving a multi-link association response, make sure to track the BSS parameter change count for each link, including the assoc link. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.1799c164e7e9.I8e2c1f5eec6eec3fab525ae2dead9f6f099a2427@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 64 ++++++++++++++++++++++++++++++++++++++++++++++ net/mac80211/ieee80211_i.h | 2 ++ net/mac80211/mlme.c | 16 +++++++++++- 3 files changed, 81 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index fa679613c562..15c4e12b6fc7 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4689,6 +4689,34 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) return sizeof(*mle) + common + mle->variable[0]; } +/** + * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count + * @mle: the basic multi link element + * + * The element is assumed to be of the correct type (BASIC) and big enough, + * this must be checked using ieee80211_mle_type_ok(). + * + * If the BSS parameter change count value can't be found (the presence bit + * for it is clear), 0 will be returned. + */ +static inline u8 +ieee80211_mle_get_bss_param_ch_cnt(const struct ieee80211_multi_link_elem *mle) +{ + u16 control = le16_to_cpu(mle->control); + const u8 *common = mle->variable; + + /* common points now at the beginning of ieee80211_mle_basic_common_info */ + common += sizeof(struct ieee80211_mle_basic_common_info); + + if (!(control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)) + return 0; + + if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID) + common += 1; + + return *common; +} + /** * ieee80211_mle_get_eml_sync_delay - returns the medium sync delay * @data: pointer to the multi link EHT IE @@ -4902,6 +4930,42 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, fixed + prof->sta_info_len <= len; } +/** + * ieee80211_mle_basic_sta_prof_bss_param_ch_cnt - get per-STA profile BSS + * parameter change count + * @prof: the per-STA profile, having been checked with + * ieee80211_mle_basic_sta_prof_size_ok() for the correct length + * + * Return: The BSS parameter change count value if present, 0 otherwise. + */ +static inline u8 +ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta_profile *prof) +{ + u16 control = le16_to_cpu(prof->control); + const u8 *pos = prof->variable; + + if (!(control & IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT)) + return 0; + + if (control & IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT) + pos += 6; + if (control & IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT) + pos += 2; + if (control & IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT) + pos += 8; + if (control & IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT) + pos += 2; + if (control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE && + control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE) { + if (control & IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE) + pos += 2; + else + pos += 1; + } + + return *pos; +} + #define IEEE80211_MLE_STA_RECONF_CONTROL_LINK_ID 0x000f #define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE 0x0010 #define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT 0x0020 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 2f7665998da0..91633a0b723e 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -951,6 +951,8 @@ struct ieee80211_link_data_managed { int wmm_last_param_set; int mu_edca_last_param_set; + u8 bss_param_ch_cnt; + struct cfg80211_bss *bss; }; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cf15089e95f1..f93eb38ae0b8 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4018,6 +4018,8 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, const struct cfg80211_bss_ies *bss_ies = NULL; struct ieee80211_supported_band *sband; struct ieee802_11_elems *elems; + const __le16 prof_bss_param_ch_present = + cpu_to_le16(IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT); u16 capab_info; bool ret; @@ -4033,7 +4035,17 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, * successful, so set the status directly to success */ assoc_data->link[link_id].status = WLAN_STATUS_SUCCESS; - } else if (!elems->prof) { + if (elems->ml_basic) { + if (!(elems->ml_basic->control & + cpu_to_le16(IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT))) { + ret = false; + goto out; + } + link->u.mgd.bss_param_ch_cnt = + ieee80211_mle_get_bss_param_ch_cnt(elems->ml_basic); + } + } else if (!elems->prof || + !(elems->prof->control & prof_bss_param_ch_present)) { ret = false; goto out; } else { @@ -4046,6 +4058,8 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, */ capab_info = get_unaligned_le16(ptr); assoc_data->link[link_id].status = get_unaligned_le16(ptr + 2); + link->u.mgd.bss_param_ch_cnt = + ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(elems->prof); if (assoc_data->link[link_id].status != WLAN_STATUS_SUCCESS) { link_info(link, "association response status code=%u\n", -- cgit v1.2.3 From 4484de23ba22e3023e9cbe4246a927ffb00db56f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2023 16:26:49 +0300 Subject: wifi: mac80211: always hold sdata lock in chanctx assign/unassign Due to all the multi-link handling, we now expose the fact that the sdata/vif is locked to drivers, e.g. when the driver uses ieee80211_set_monitor_channel(). This was true when a chanctx is added to or removed from a link, _except_ in monitor mode with the virtual sdata/vif. Change that, so that drivers can make that assumption. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.a5cf7534beda.I5b51664231abee27e02f222083df7ccf88722929@changeid Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 16 +++++++++++----- net/mac80211/iface.c | 7 +++++++ 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index eea7028a46a7..e7ac24603892 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -913,24 +913,30 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, if (cfg80211_chandef_identical(&local->monitor_chandef, chandef)) return 0; - mutex_lock(&local->mtx); if (local->use_chanctx) { sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata) { + sdata_lock(sdata); + mutex_lock(&local->mtx); ieee80211_link_release_channel(&sdata->deflink); ret = ieee80211_link_use_channel(&sdata->deflink, chandef, IEEE80211_CHANCTX_EXCLUSIVE); + mutex_unlock(&local->mtx); + sdata_unlock(sdata); + } + } else { + mutex_lock(&local->mtx); + if (local->open_count == local->monitors) { + local->_oper_chandef = *chandef; + ieee80211_hw_config(local, 0); } - } else if (local->open_count == local->monitors) { - local->_oper_chandef = *chandef; - ieee80211_hw_config(local, 0); + mutex_unlock(&local->mtx); } if (ret == 0) local->monitor_chandef = *chandef; - mutex_unlock(&local->mtx); return ret; } diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 9518acf9643b..be586bc0b5b7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1133,6 +1133,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) snprintf(sdata->name, IFNAMSIZ, "%s-monitor", wiphy_name(local->hw.wiphy)); sdata->wdev.iftype = NL80211_IFTYPE_MONITOR; + mutex_init(&sdata->wdev.mtx); ieee80211_sdata_init(local, sdata); @@ -1157,16 +1158,19 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local) rcu_assign_pointer(local->monitor_sdata, sdata); mutex_unlock(&local->iflist_mtx); + sdata_lock(sdata); mutex_lock(&local->mtx); ret = ieee80211_link_use_channel(&sdata->deflink, &local->monitor_chandef, IEEE80211_CHANCTX_EXCLUSIVE); mutex_unlock(&local->mtx); + sdata_unlock(sdata); if (ret) { mutex_lock(&local->iflist_mtx); RCU_INIT_POINTER(local->monitor_sdata, NULL); mutex_unlock(&local->iflist_mtx); synchronize_net(); drv_remove_interface(local, sdata); + mutex_destroy(&sdata->wdev.mtx); kfree(sdata); return ret; } @@ -1202,12 +1206,15 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local) synchronize_net(); + sdata_lock(sdata); mutex_lock(&local->mtx); ieee80211_link_release_channel(&sdata->deflink); mutex_unlock(&local->mtx); + sdata_unlock(sdata); drv_remove_interface(local, sdata); + mutex_destroy(&sdata->wdev.mtx); kfree(sdata); } -- cgit v1.2.3 From b8b80770b26c4591f20f1cde3328e5f1489c4488 Mon Sep 17 00:00:00 2001 From: Benjamin Berg Date: Mon, 19 Jun 2023 16:26:50 +0300 Subject: wifi: mac80211: avoid lockdep checking when removing deflink struct sta_info may be removed without holding sta_mtx if it has not yet been inserted. To support this, only assert that the lock is held for links other than the deflink. This fixes lockdep issues that may be triggered in error cases. Signed-off-by: Benjamin Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.cdd81377dea0.If5a6734b4b85608a2275a09b4f99b5564d82997f@changeid Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 731b832b257c..7751f8ba960e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -355,8 +355,9 @@ static void sta_remove_link(struct sta_info *sta, unsigned int link_id, struct sta_link_alloc *alloc = NULL; struct link_sta_info *link_sta; - link_sta = rcu_dereference_protected(sta->link[link_id], - lockdep_is_held(&sta->local->sta_mtx)); + link_sta = rcu_access_pointer(sta->link[link_id]); + if (link_sta != &sta->deflink) + lockdep_assert_held(&sta->local->sta_mtx); if (WARN_ON(!link_sta)) return; -- cgit v1.2.3 From 2829b2fc8910984daa89be8005adbd9fb6205024 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2023 16:26:51 +0300 Subject: wifi: mac80211: fix CRC calculation for extended elems For extended elements, we currently only calculate the CRC for some of them, but really we should do it also for the rest that we care about, such as EHT operation and multi- link. Also, while at it, it seems we should do it even if they aren't well-formed, so we notice if that changes. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.93235d5c8651.I6615cb3c1244bc9618066baa2bdad7982e9abd1f@changeid Signed-off-by: Johannes Berg --- net/mac80211/util.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'net') diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 6c52c597c187..8a6917cf63cf 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -918,6 +918,7 @@ ieee80211_parse_extension_element(u32 *crc, struct ieee80211_elems_parse_params *params) { const void *data = elem->data + 1; + bool calc_crc = false; u8 len; if (!elem->datalen) @@ -927,12 +928,9 @@ ieee80211_parse_extension_element(u32 *crc, switch (elem->data[0]) { case WLAN_EID_EXT_HE_MU_EDCA: - if (len >= sizeof(*elems->mu_edca_param_set)) { + calc_crc = true; + if (len >= sizeof(*elems->mu_edca_param_set)) elems->mu_edca_param_set = data; - if (crc) - *crc = crc32_be(*crc, (void *)elem, - elem->datalen + 2); - } break; case WLAN_EID_EXT_HE_CAPABILITY: if (ieee80211_he_capa_size_ok(data, len)) { @@ -941,13 +939,10 @@ ieee80211_parse_extension_element(u32 *crc, } break; case WLAN_EID_EXT_HE_OPERATION: + calc_crc = true; if (len >= sizeof(*elems->he_operation) && - len >= ieee80211_he_oper_size(data) - 1) { - if (crc) - *crc = crc32_be(*crc, (void *)elem, - elem->datalen + 2); + len >= ieee80211_he_oper_size(data) - 1) elems->he_operation = data; - } break; case WLAN_EID_EXT_UORA: if (len >= 1) @@ -981,16 +976,15 @@ ieee80211_parse_extension_element(u32 *crc, case WLAN_EID_EXT_EHT_OPERATION: if (ieee80211_eht_oper_size_ok(data, len)) elems->eht_operation = data; + calc_crc = true; break; case WLAN_EID_EXT_EHT_MULTI_LINK: + calc_crc = true; + if (ieee80211_mle_size_ok(data, len)) { const struct ieee80211_multi_link_elem *mle = (void *)data; - if (crc) - *crc = crc32_be(*crc, (void *)elem, - elem->datalen + 2); - switch (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: @@ -1009,6 +1003,9 @@ ieee80211_parse_extension_element(u32 *crc, } break; } + + if (crc && calc_crc) + *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2); } static u32 -- cgit v1.2.3 From 4ef2f53e50cba9780057b51357ef45cb5f49859d Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Mon, 19 Jun 2023 16:26:52 +0300 Subject: wifi: cfg80211: Retrieve PSD information from RNR AP information Retrieve the Power Spectral Density (PSD) value from RNR AP information entry and store it so it could be used by the drivers. PSD value is explained in Section 9.4.2.170 of Draft P802.11Revme_D2.0. Signed-off-by: Ilan Peer Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.067ded2b8fc3.I9f407ab5800cbb07045a0537a513012960ced740@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 7 +++++-- include/net/cfg80211.h | 2 ++ net/wireless/scan.c | 13 +++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 15c4e12b6fc7..6f1747a9c106 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4504,6 +4504,9 @@ static inline bool for_each_element_completed(const struct element *element, #define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20 #define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40 +#define IEEE80211_RNR_TBTT_PARAMS_PSD_NO_LIMIT 127 +#define IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED -128 + struct ieee80211_neighbor_ap_info { u8 tbtt_info_hdr; u8 tbtt_info_len; @@ -4539,7 +4542,7 @@ struct ieee80211_tbtt_info_7_8_9 { /* The following element is optional, structure may not grow */ u8 bss_params; - u8 psd_20; + s8 psd_20; } __packed; /* Format of the TBTT information element if it has >= 11 bytes */ @@ -4550,7 +4553,7 @@ struct ieee80211_tbtt_info_ge_11 { /* The following elements are optional, structure may grow */ u8 bss_params; - u8 psd_20; + s8 psd_20; struct ieee80211_rnr_mld_params mld_params; } __packed; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 3a736f9286b0..7c7d03aa9d06 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -2466,6 +2466,7 @@ struct cfg80211_scan_info { * @short_ssid_valid: @short_ssid is valid and can be used * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait * 20 TUs before starting to send probe requests. + * @psd_20: The AP's 20 MHz PSD value. */ struct cfg80211_scan_6ghz_params { u32 short_ssid; @@ -2474,6 +2475,7 @@ struct cfg80211_scan_6ghz_params { bool unsolicited_probe; bool short_ssid_valid; bool psc_no_listen; + s8 psd_20; }; /** diff --git a/net/wireless/scan.c b/net/wireless/scan.c index ede95caecb34..8bf00caf5d29 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -96,6 +96,7 @@ MODULE_PARM_DESC(bss_entries_limit, * colocated and can be discovered via legacy bands. * @short_ssid_valid: short_ssid is valid and can be used * @short_ssid: the short SSID for this SSID + * @psd_20: The 20MHz PSD EIRP of the primary 20MHz channel for the reported AP */ struct cfg80211_colocated_ap { struct list_head list; @@ -111,6 +112,7 @@ struct cfg80211_colocated_ap { transmitted_bssid:1, colocated_ess:1, short_ssid_valid:1; + s8 psd_20; }; static void bss_free(struct cfg80211_internal_bss *bss) @@ -578,6 +580,8 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, { u8 bss_params; + entry->psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; + /* The length is already verified by the caller to contain bss_params */ if (length > sizeof(struct ieee80211_tbtt_info_7_8_9)) { struct ieee80211_tbtt_info_ge_11 *tbtt_info = (void *)pos; @@ -594,12 +598,20 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry, IEEE80211_RNR_MLD_PARAMS_DISABLED_LINK)) return -EINVAL; } + + if (length >= offsetofend(struct ieee80211_tbtt_info_ge_11, + psd_20)) + entry->psd_20 = tbtt_info->psd_20; } else { struct ieee80211_tbtt_info_7_8_9 *tbtt_info = (void *)pos; memcpy(entry->bssid, tbtt_info->bssid, ETH_ALEN); bss_params = tbtt_info->bss_params; + + if (length == offsetofend(struct ieee80211_tbtt_info_7_8_9, + psd_20)) + entry->psd_20 = tbtt_info->psd_20; } /* ignore entries with invalid BSSID */ @@ -904,6 +916,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev) scan_6ghz_params->short_ssid = ap->short_ssid; scan_6ghz_params->short_ssid_valid = ap->short_ssid_valid; scan_6ghz_params->unsolicited_probe = ap->unsolicited_probe; + scan_6ghz_params->psd_20 = ap->psd_20; /* * If a PSC channel is added to the scan and 'need_scan_psc' is -- cgit v1.2.3 From 6c5b9a3296e146cc74b1d006c6a546ea92534ade Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2023 16:26:53 +0300 Subject: wifi: nl80211/reg: add no-EHT regulatory flag This just propagates to the channel flags, like no-HE and similar other flags before it. Signed-off-by: Johannes Berg Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230619161906.74ce2983aed8.Ifa343ba89c11760491daad5aee5a81209d5735a7@changeid Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 2 ++ net/wireless/reg.c | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 3190d34269ef..88eb85c63029 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4450,6 +4450,7 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed * @NL80211_RRF_NO_HE: HE operation not allowed * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed + * @NL80211_RRF_NO_EHT: EHT operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4469,6 +4470,7 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_160MHZ = 1<<16, NL80211_RRF_NO_HE = 1<<17, NL80211_RRF_NO_320MHZ = 1<<18, + NL80211_RRF_NO_EHT = 1<<19, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR diff --git a/net/wireless/reg.c b/net/wireless/reg.c index f9e03850d71b..0317cf9da307 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -5,7 +5,7 @@ * Copyright 2008-2011 Luis R. Rodriguez * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2022 Intel Corporation + * Copyright (C) 2018 - 2023 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -1587,6 +1587,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_HE; if (rd_flags & NL80211_RRF_NO_320MHZ) channel_flags |= IEEE80211_CHAN_NO_320MHZ; + if (rd_flags & NL80211_RRF_NO_EHT) + channel_flags |= IEEE80211_CHAN_NO_EHT; return channel_flags; } -- cgit v1.2.3 From f52a0b408ed144afa42b45f078a28542e711551b Mon Sep 17 00:00:00 2001 From: Yedidya Benshimol Date: Wed, 21 Jun 2023 14:44:36 +0300 Subject: wifi: mac80211: mark keys as uploaded when added by the driver When the driver has some form of GTK rekeying offload, e.g. during WoWLAN, mac80211 can assume that keys that the driver adds for that are already present in the hardware acceleration. Mark them accordingly. Signed-off-by: Yedidya Benshimol Signed-off-by: Gregory Greenman Link: https://lore.kernel.org/r/20230621144414.bc78c7ff2a3d.I5e313d69e2b6a7a4766ef82d0faa122dd4c1c46d@changeid Signed-off-by: Johannes Berg --- net/mac80211/key.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/mac80211/key.c b/net/mac80211/key.c index e8f6c1e5eabf..21cf5a208910 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -6,7 +6,7 @@ * Copyright 2007-2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright 2018-2020, 2022 Intel Corporation + * Copyright 2018-2020, 2022-2023 Intel Corporation */ #include @@ -510,8 +510,12 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, ret = ieee80211_key_enable_hw_accel(new); } } else { - if (!new->local->wowlan) + if (!new->local->wowlan) { ret = ieee80211_key_enable_hw_accel(new); + } else { + assert_key_lock(new->local); + new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; + } } if (ret) -- cgit v1.2.3 From 6e98730bc0b44acaf86eccc75f823128aa9c9e79 Mon Sep 17 00:00:00 2001 From: Gilad Sever Date: Wed, 21 Jun 2023 13:42:08 +0300 Subject: bpf: Factor out socket lookup functions for the TC hookpoint. Change BPF helper socket lookup functions to use TC specific variants: bpf_tc_sk_lookup_tcp() / bpf_tc_sk_lookup_udp() / bpf_tc_skc_lookup_tcp() instead of sharing implementation with the cg / sk_skb hooking points. This allows introducing a separate logic for the TC flow. The tc functions are identical to the original code. Signed-off-by: Gilad Sever Signed-off-by: Daniel Borkmann Reviewed-by: Shmulik Ladkani Reviewed-by: Eyal Birger Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20230621104211.301902-2-gilad9366@gmail.com --- net/core/filter.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 428df050d021..11e2afbfdce9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6740,6 +6740,63 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { .arg5_type = ARG_ANYTHING, }; +BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, + netns_id, flags); +} + +static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { + .func = bpf_tc_skc_lookup_tcp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + +BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, + netns_id, flags); +} + +static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { + .func = bpf_tc_sk_lookup_tcp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + +BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb, + struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) +{ + return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, + netns_id, flags); +} + +static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = { + .func = bpf_tc_sk_lookup_udp, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_PTR_TO_SOCKET_OR_NULL, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; + BPF_CALL_1(bpf_sk_release, struct sock *, sk) { if (sk && sk_is_refcounted(sk)) @@ -7995,9 +8052,9 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) #endif #ifdef CONFIG_INET case BPF_FUNC_sk_lookup_tcp: - return &bpf_sk_lookup_tcp_proto; + return &bpf_tc_sk_lookup_tcp_proto; case BPF_FUNC_sk_lookup_udp: - return &bpf_sk_lookup_udp_proto; + return &bpf_tc_sk_lookup_udp_proto; case BPF_FUNC_sk_release: return &bpf_sk_release_proto; case BPF_FUNC_tcp_sock: @@ -8005,7 +8062,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_listener_sock: return &bpf_get_listener_sock_proto; case BPF_FUNC_skc_lookup_tcp: - return &bpf_skc_lookup_tcp_proto; + return &bpf_tc_skc_lookup_tcp_proto; case BPF_FUNC_tcp_check_syncookie: return &bpf_tcp_check_syncookie_proto; case BPF_FUNC_skb_ecn_set_ce: -- cgit v1.2.3 From 97fbfeb86917bdbe9c41d5143e335a929147f405 Mon Sep 17 00:00:00 2001 From: Gilad Sever Date: Wed, 21 Jun 2023 13:42:09 +0300 Subject: bpf: Call __bpf_sk_lookup()/__bpf_skc_lookup() directly via TC hookpoint skb->dev always exists in the tc flow. There is no need to use bpf_skc_lookup(), bpf_sk_lookup() from this code path. This change facilitates fixing the tc flow to be VRF aware. Signed-off-by: Gilad Sever Signed-off-by: Daniel Borkmann Reviewed-by: Shmulik Ladkani Reviewed-by: Eyal Birger Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20230621104211.301902-3-gilad9366@gmail.com --- net/core/filter.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/core/filter.c b/net/core/filter.c index 11e2afbfdce9..a9fb897822b2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6743,8 +6743,12 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { - return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, - netns_id, flags); + struct net *caller_net = dev_net(skb->dev); + int ifindex = skb->dev->ifindex; + + return (unsigned long)__bpf_skc_lookup(skb, tuple, len, caller_net, + ifindex, IPPROTO_TCP, netns_id, + flags); } static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { @@ -6762,8 +6766,12 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { - return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, - netns_id, flags); + struct net *caller_net = dev_net(skb->dev); + int ifindex = skb->dev->ifindex; + + return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net, + ifindex, IPPROTO_TCP, netns_id, + flags); } static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { @@ -6781,8 +6789,12 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { - return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, - netns_id, flags); + struct net *caller_net = dev_net(skb->dev); + int ifindex = skb->dev->ifindex; + + return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net, + ifindex, IPPROTO_UDP, netns_id, + flags); } static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = { -- cgit v1.2.3 From 9a5cb79762e0eda17ca15c2a6eaca4622383c21c Mon Sep 17 00:00:00 2001 From: Gilad Sever Date: Wed, 21 Jun 2023 13:42:10 +0300 Subject: bpf: Fix bpf socket lookup from tc/xdp to respect socket VRF bindings When calling bpf_sk_lookup_tcp(), bpf_sk_lookup_udp() or bpf_skc_lookup_tcp() from tc/xdp ingress, VRF socket bindings aren't respoected, i.e. unbound sockets are returned, and bound sockets aren't found. VRF binding is determined by the sdif argument to sk_lookup(), however when called from tc the IP SKB control block isn't initialized and thus inet{,6}_sdif() always returns 0. Fix by calculating sdif for the tc/xdp flows by observing the device's l3 enslaved state. The cg/sk_skb hooking points which are expected to support inet{,6}_sdif() pass sdif=-1 which makes __bpf_skc_lookup() use the existing logic. Fixes: 6acc9b432e67 ("bpf: Add helper to retrieve socket in BPF") Signed-off-by: Gilad Sever Signed-off-by: Daniel Borkmann Reviewed-by: Shmulik Ladkani Reviewed-by: Eyal Birger Acked-by: Stanislav Fomichev Cc: David Ahern Link: https://lore.kernel.org/bpf/20230621104211.301902-4-gilad9366@gmail.com --- include/linux/netdevice.h | 9 +++++++ net/core/filter.c | 69 ++++++++++++++++++++++++++--------------------- 2 files changed, 48 insertions(+), 30 deletions(-) (limited to 'net') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 08fbd4622ccf..8c95ebbcf203 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -5090,6 +5090,15 @@ static inline bool netif_is_l3_slave(const struct net_device *dev) return dev->priv_flags & IFF_L3MDEV_SLAVE; } +static inline int dev_sdif(const struct net_device *dev) +{ +#ifdef CONFIG_NET_L3_MASTER_DEV + if (netif_is_l3_slave(dev)) + return dev->ifindex; +#endif + return 0; +} + static inline bool netif_is_bridge_master(const struct net_device *dev) { return dev->priv_flags & IFF_EBRIDGE; diff --git a/net/core/filter.c b/net/core/filter.c index a9fb897822b2..06ba0e56e369 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6568,12 +6568,11 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, static struct sock * __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, - u64 flags) + u64 flags, int sdif) { struct sock *sk = NULL; struct net *net; u8 family; - int sdif; if (len == sizeof(tuple->ipv4)) family = AF_INET; @@ -6585,10 +6584,12 @@ __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, if (unlikely(flags || !((s32)netns_id < 0 || netns_id <= S32_MAX))) goto out; - if (family == AF_INET) - sdif = inet_sdif(skb); - else - sdif = inet6_sdif(skb); + if (sdif < 0) { + if (family == AF_INET) + sdif = inet_sdif(skb); + else + sdif = inet6_sdif(skb); + } if ((s32)netns_id < 0) { net = caller_net; @@ -6608,10 +6609,11 @@ out: static struct sock * __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, - u64 flags) + u64 flags, int sdif) { struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, - ifindex, proto, netns_id, flags); + ifindex, proto, netns_id, flags, + sdif); if (sk) { struct sock *sk2 = sk_to_full_sk(sk); @@ -6651,7 +6653,7 @@ bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, } return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, - netns_id, flags); + netns_id, flags, -1); } static struct sock * @@ -6743,12 +6745,13 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { BPF_CALL_5(bpf_tc_skc_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { - struct net *caller_net = dev_net(skb->dev); - int ifindex = skb->dev->ifindex; + struct net_device *dev = skb->dev; + int ifindex = dev->ifindex, sdif = dev_sdif(dev); + struct net *caller_net = dev_net(dev); return (unsigned long)__bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, - flags); + flags, sdif); } static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { @@ -6766,12 +6769,13 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { BPF_CALL_5(bpf_tc_sk_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { - struct net *caller_net = dev_net(skb->dev); - int ifindex = skb->dev->ifindex; + struct net_device *dev = skb->dev; + int ifindex = dev->ifindex, sdif = dev_sdif(dev); + struct net *caller_net = dev_net(dev); return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, - flags); + flags, sdif); } static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { @@ -6789,12 +6793,13 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { BPF_CALL_5(bpf_tc_sk_lookup_udp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { - struct net *caller_net = dev_net(skb->dev); - int ifindex = skb->dev->ifindex; + struct net_device *dev = skb->dev; + int ifindex = dev->ifindex, sdif = dev_sdif(dev); + struct net *caller_net = dev_net(dev); return (unsigned long)__bpf_sk_lookup(skb, tuple, len, caller_net, ifindex, IPPROTO_UDP, netns_id, - flags); + flags, sdif); } static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = { @@ -6826,12 +6831,13 @@ static const struct bpf_func_proto bpf_sk_release_proto = { BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { - struct net *caller_net = dev_net(ctx->rxq->dev); - int ifindex = ctx->rxq->dev->ifindex; + struct net_device *dev = ctx->rxq->dev; + int ifindex = dev->ifindex, sdif = dev_sdif(dev); + struct net *caller_net = dev_net(dev); return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_UDP, netns_id, - flags); + flags, sdif); } static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { @@ -6849,12 +6855,13 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { - struct net *caller_net = dev_net(ctx->rxq->dev); - int ifindex = ctx->rxq->dev->ifindex; + struct net_device *dev = ctx->rxq->dev; + int ifindex = dev->ifindex, sdif = dev_sdif(dev); + struct net *caller_net = dev_net(dev); return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, - flags); + flags, sdif); } static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { @@ -6872,12 +6879,13 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { - struct net *caller_net = dev_net(ctx->rxq->dev); - int ifindex = ctx->rxq->dev->ifindex; + struct net_device *dev = ctx->rxq->dev; + int ifindex = dev->ifindex, sdif = dev_sdif(dev); + struct net *caller_net = dev_net(dev); return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, - flags); + flags, sdif); } static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { @@ -6897,7 +6905,8 @@ BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx, { return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, - IPPROTO_TCP, netns_id, flags); + IPPROTO_TCP, netns_id, flags, + -1); } static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { @@ -6916,7 +6925,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx, { return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_TCP, - netns_id, flags); + netns_id, flags, -1); } static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { @@ -6935,7 +6944,7 @@ BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx, { return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_UDP, - netns_id, flags); + netns_id, flags, -1); } static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { -- cgit v1.2.3 From e748d0fd66abc4b1c136022e4e053004fce2b792 Mon Sep 17 00:00:00 2001 From: Ravi Gunasekaran Date: Wed, 14 Jun 2023 17:17:10 +0530 Subject: net: hsr: Disable promiscuous mode in offload mode When port-to-port forwarding for interfaces in HSR node is enabled, disable promiscuous mode since L2 frame forward happens at the offloaded hardware. Signed-off-by: Ravi Gunasekaran Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230614114710.31400-1-r-gunasekaran@ti.com Signed-off-by: Jakub Kicinski --- net/hsr/hsr_device.c | 5 +++++ net/hsr/hsr_main.h | 1 + net/hsr/hsr_slave.c | 15 +++++++++++---- 3 files changed, 17 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 5a236aae2366..306f942c3b28 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -531,6 +531,11 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], if (res) goto err_add_master; + /* HSR forwarding offload supported in lower device? */ + if ((slave[0]->features & NETIF_F_HW_HSR_FWD) && + (slave[1]->features & NETIF_F_HW_HSR_FWD)) + hsr->fwd_offloaded = true; + res = register_netdevice(hsr_dev); if (res) goto err_unregister; diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 5584c80a5c79..6851e33df7d1 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -208,6 +208,7 @@ struct hsr_priv { u8 net_id; /* for PRP, it occupies most significant 3 bits * of lan_id */ + bool fwd_offloaded; /* Forwarding offloaded to HW */ unsigned char sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16)); /* Align to u16 boundary to avoid unaligned access * in ether_addr_equal diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index b70e6bbf6021..e5742f2a2d52 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -131,9 +131,14 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev, struct hsr_port *master; int res; - res = dev_set_promiscuity(dev, 1); - if (res) - return res; + /* Don't use promiscuous mode for offload since L2 frame forward + * happens at the offloaded hardware. + */ + if (!port->hsr->fwd_offloaded) { + res = dev_set_promiscuity(dev, 1); + if (res) + return res; + } master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); hsr_dev = master->dev; @@ -152,7 +157,9 @@ static int hsr_portdev_setup(struct hsr_priv *hsr, struct net_device *dev, fail_rx_handler: netdev_upper_dev_unlink(dev, hsr_dev); fail_upper_dev_link: - dev_set_promiscuity(dev, -1); + if (!port->hsr->fwd_offloaded) + dev_set_promiscuity(dev, -1); + return res; } -- cgit v1.2.3 From 53bf91641ae19fe51fb24422de6d07565a3536d0 Mon Sep 17 00:00:00 2001 From: Abel Wu Date: Tue, 20 Jun 2023 17:27:11 +0800 Subject: inet: Cleanup on charging memory for newly accepted sockets If there is no net-memcg associated with the sock, don't bother calculating its memory usage for charge. Signed-off-by: Abel Wu Link: https://lore.kernel.org/r/20230620092712.16217-1-wuyun.abel@bytedance.com Signed-off-by: Jakub Kicinski --- net/ipv4/inet_connection_sock.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'net') diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 15424dec4584..0cc19cfbb673 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -706,20 +706,23 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) out: release_sock(sk); if (newsk && mem_cgroup_sockets_enabled) { - int amt; + int amt = 0; /* atomically get the memory usage, set and charge the * newsk->sk_memcg. */ lock_sock(newsk); - /* The socket has not been accepted yet, no need to look at - * newsk->sk_wmem_queued. - */ - amt = sk_mem_pages(newsk->sk_forward_alloc + - atomic_read(&newsk->sk_rmem_alloc)); mem_cgroup_sk_alloc(newsk); - if (newsk->sk_memcg && amt) + if (newsk->sk_memcg) { + /* The socket has not been accepted yet, no need + * to look at newsk->sk_wmem_queued. + */ + amt = sk_mem_pages(newsk->sk_forward_alloc + + atomic_read(&newsk->sk_rmem_alloc)); + } + + if (amt) mem_cgroup_charge_skmem(newsk->sk_memcg, amt, GFP_KERNEL | __GFP_NOFAIL); -- cgit v1.2.3 From c026d33b8f5081969503e322ea30d7c1a6b17dda Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 20 Jun 2023 18:30:14 +0200 Subject: mptcp: move snd_una update earlier for fallback socket That will avoid an unneeded conditional in both the fast-path and in the fallback case and will simplify a bit the next patch. Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Jakub Kicinski --- net/mptcp/options.c | 6 ++++++ net/mptcp/protocol.c | 6 ------ 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 8a8083207be4..4bdcd2b326bd 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -1119,6 +1119,12 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) mptcp_data_lock(subflow->conn); if (sk_stream_memory_free(sk)) __mptcp_check_push(subflow->conn, sk); + + /* on fallback we just need to ignore the msk-level snd_una, as + * this is really plain TCP + */ + msk->snd_una = READ_ONCE(msk->snd_nxt); + __mptcp_data_acked(subflow->conn); mptcp_data_unlock(subflow->conn); return true; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 992b89c75631..9c756d675d4d 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1004,12 +1004,6 @@ static void __mptcp_clean_una(struct sock *sk) struct mptcp_data_frag *dtmp, *dfrag; u64 snd_una; - /* on fallback we just need to ignore snd_una, as this is really - * plain TCP - */ - if (__mptcp_check_fallback(msk)) - msk->snd_una = READ_ONCE(msk->snd_nxt); - snd_una = msk->snd_una; list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) -- cgit v1.2.3 From 38967f424b5be79c4c676712e5640d846efd07e3 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 20 Jun 2023 18:30:15 +0200 Subject: mptcp: track some aggregate data counters Currently there are no data transfer counters accounting for all the subflows used by a given MPTCP socket. The user-space can compute such figures aggregating the subflow info, but that is inaccurate if any subflow is closed before the MPTCP socket itself. Add the new counters in the MPTCP socket itself and expose them via the existing diag and sockopt. While touching mptcp_diag_fill_info(), acquire the relevant locks before fetching the msk data, to ensure better data consistency Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/385 Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Jakub Kicinski --- include/uapi/linux/mptcp.h | 5 +++++ net/mptcp/options.c | 10 ++++++++-- net/mptcp/protocol.c | 11 ++++++++++- net/mptcp/protocol.h | 4 ++++ net/mptcp/sockopt.c | 25 ++++++++++++++++++++----- 5 files changed, 47 insertions(+), 8 deletions(-) (limited to 'net') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 32af2d278cb4..a124be6ebbba 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -123,6 +123,11 @@ struct mptcp_info { __u8 mptcpi_local_addr_used; __u8 mptcpi_local_addr_max; __u8 mptcpi_csum_enabled; + __u32 mptcpi_retransmits; + __u64 mptcpi_bytes_retrans; + __u64 mptcpi_bytes_sent; + __u64 mptcpi_bytes_received; + __u64 mptcpi_bytes_acked; }; /* diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 4bdcd2b326bd..c254accb14de 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -1026,6 +1026,12 @@ u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq) return cur_seq; } +static void __mptcp_snd_una_update(struct mptcp_sock *msk, u64 new_snd_una) +{ + msk->bytes_acked += new_snd_una - msk->snd_una; + msk->snd_una = new_snd_una; +} + static void ack_update_msk(struct mptcp_sock *msk, struct sock *ssk, struct mptcp_options_received *mp_opt) @@ -1057,7 +1063,7 @@ static void ack_update_msk(struct mptcp_sock *msk, __mptcp_check_push(sk, ssk); if (after64(new_snd_una, old_snd_una)) { - msk->snd_una = new_snd_una; + __mptcp_snd_una_update(msk, new_snd_una); __mptcp_data_acked(sk); } mptcp_data_unlock(sk); @@ -1123,7 +1129,7 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb) /* on fallback we just need to ignore the msk-level snd_una, as * this is really plain TCP */ - msk->snd_una = READ_ONCE(msk->snd_nxt); + __mptcp_snd_una_update(msk, READ_ONCE(msk->snd_nxt)); __mptcp_data_acked(subflow->conn); mptcp_data_unlock(subflow->conn); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 9c756d675d4d..d5b8e488bce1 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -377,6 +377,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { /* in sequence */ + msk->bytes_received += copy_len; WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); tail = skb_peek_tail(&sk->sk_receive_queue); if (tail && mptcp_try_coalesce(sk, tail, skb)) @@ -760,6 +761,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) MPTCP_SKB_CB(skb)->map_seq += delta; __skb_queue_tail(&sk->sk_receive_queue, skb); } + msk->bytes_received += end_seq - msk->ack_seq; msk->ack_seq = end_seq; moved = true; } @@ -1531,8 +1533,10 @@ static void mptcp_update_post_push(struct mptcp_sock *msk, * that has been handed to the subflow for transmission * and skip update in case it was old dfrag. */ - if (likely(after64(snd_nxt_new, msk->snd_nxt))) + if (likely(after64(snd_nxt_new, msk->snd_nxt))) { + msk->bytes_sent += snd_nxt_new - msk->snd_nxt; msk->snd_nxt = snd_nxt_new; + } } void mptcp_check_and_set_pending(struct sock *sk) @@ -2590,6 +2594,7 @@ static void __mptcp_retrans(struct sock *sk) } if (copied) { dfrag->already_sent = max(dfrag->already_sent, info.sent); + msk->bytes_retrans += copied; tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); WRITE_ONCE(msk->allow_infinite_fallback, false); @@ -3102,6 +3107,10 @@ static int mptcp_disconnect(struct sock *sk, int flags) WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); mptcp_pm_data_reset(msk); mptcp_ca_reset(sk); + msk->bytes_acked = 0; + msk->bytes_received = 0; + msk->bytes_sent = 0; + msk->bytes_retrans = 0; WRITE_ONCE(sk->sk_shutdown, 0); sk_error_report(sk); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 47b46602870e..27adfcc5aaa2 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -262,10 +262,13 @@ struct mptcp_sock { u64 local_key; u64 remote_key; u64 write_seq; + u64 bytes_sent; u64 snd_nxt; + u64 bytes_received; u64 ack_seq; atomic64_t rcv_wnd_sent; u64 rcv_data_fin_seq; + u64 bytes_retrans; int rmem_fwd_alloc; struct sock *last_snd; int snd_burst; @@ -274,6 +277,7 @@ struct mptcp_sock { * recovery related fields are under data_lock * protection */ + u64 bytes_acked; u64 snd_una; u64 wnd_end; unsigned long timer_ival; diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index e172a5848b0d..fa5055d5b029 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -889,7 +889,9 @@ out: void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) { + struct sock *sk = (struct sock *)msk; u32 flags = 0; + bool slow; memset(info, 0, sizeof(*info)); @@ -898,6 +900,9 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); info->mptcpi_local_addr_used = READ_ONCE(msk->pm.local_addr_used); + if (inet_sk_state_load(sk) == TCP_LISTEN) + return; + /* The following limits only make sense for the in-kernel PM */ if (mptcp_pm_is_kernel(msk)) { info->mptcpi_subflows_max = @@ -915,11 +920,21 @@ void mptcp_diag_fill_info(struct mptcp_sock *msk, struct mptcp_info *info) if (READ_ONCE(msk->can_ack)) flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; info->mptcpi_flags = flags; - info->mptcpi_token = READ_ONCE(msk->token); - info->mptcpi_write_seq = READ_ONCE(msk->write_seq); - info->mptcpi_snd_una = READ_ONCE(msk->snd_una); - info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); - info->mptcpi_csum_enabled = READ_ONCE(msk->csum_enabled); + mptcp_data_lock(sk); + info->mptcpi_snd_una = msk->snd_una; + info->mptcpi_rcv_nxt = msk->ack_seq; + info->mptcpi_bytes_acked = msk->bytes_acked; + mptcp_data_unlock(sk); + + slow = lock_sock_fast(sk); + info->mptcpi_csum_enabled = msk->csum_enabled; + info->mptcpi_token = msk->token; + info->mptcpi_write_seq = msk->write_seq; + info->mptcpi_retransmits = inet_csk(sk)->icsk_retransmits; + info->mptcpi_bytes_sent = msk->bytes_sent; + info->mptcpi_bytes_received = msk->bytes_received; + info->mptcpi_bytes_retrans = msk->bytes_retrans; + unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(mptcp_diag_fill_info); -- cgit v1.2.3 From 6f06b4d4d1cc676a3f9d947f931ec3866b6c4f6c Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 20 Jun 2023 18:30:17 +0200 Subject: mptcp: add subflow unique id The user-space need to properly account the data received/sent by individual subflows. When additional subflows are created and/or closed during the MPTCP socket lifetime, the information currently exposed via MPTCP_TCPINFO are not enough: subflows are identified only by the sequential position inside the info dumps, and that will change with the above mentioned events. To solve the above problem, this patch introduces a new subflow identifier that is unique inside the given MPTCP socket scope. The initial subflow get the id 1 and the other subflows get incremental values at join time. Link: https://github.com/multipath-tcp/mptcp_net-next/issues/388 Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 6 ++++++ net/mptcp/protocol.h | 5 ++++- net/mptcp/subflow.c | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index d5b8e488bce1..4ebd6e9aa949 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -96,6 +96,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk) list_add(&subflow->node, &msk->conn_list); sock_hold(ssock->sk); subflow->request_mptcp = 1; + subflow->subflow_id = msk->subflow_id++; /* This is the first subflow, always with id 0 */ subflow->local_id_valid = 1; @@ -847,6 +848,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) if (sk->sk_socket && !ssk->sk_socket) mptcp_sock_graft(ssk, sk->sk_socket); + mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; mptcp_sockopt_sync_locked(msk, ssk); mptcp_subflow_joined(msk, ssk); return true; @@ -2732,6 +2734,7 @@ static int __mptcp_init_sock(struct sock *sk) WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); WRITE_ONCE(msk->allow_infinite_fallback, true); msk->recovery = false; + msk->subflow_id = 1; mptcp_pm_data_init(msk); @@ -3160,6 +3163,9 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk, msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd; msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; + /* passive msk is created after the first/MPC subflow */ + msk->subflow_id = 2; + sock_reset_flag(nsk, SOCK_RCU_FREE); security_inet_csk_clone(nsk, req); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 27adfcc5aaa2..bb4cacd92778 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -323,7 +323,8 @@ struct mptcp_sock { u64 rtt_us; /* last maximum rtt of subflows */ } rcvq_space; - u32 setsockopt_seq; + u32 subflow_id; + u32 setsockopt_seq; char ca_name[TCP_CA_NAME_MAX]; struct mptcp_sock *dl_next; }; @@ -504,6 +505,8 @@ struct mptcp_subflow_context { u8 reset_reason:4; u8 stale_count; + u32 subflow_id; + long delegated_status; unsigned long fail_tout; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 4688daa6b38b..222dfcdadf2e 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -819,6 +819,7 @@ create_child: if (!ctx->conn) goto fallback; + ctx->subflow_id = 1; owner = mptcp_sk(ctx->conn); mptcp_pm_new_connection(owner, child, 1); @@ -1574,6 +1575,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc, subflow->remote_id = remote_id; subflow->request_join = 1; subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP); + subflow->subflow_id = msk->subflow_id++; mptcp_info2sockaddr(remote, &addr, ssk->sk_family); sock_hold(ssk); -- cgit v1.2.3 From 492432074e4fce4f8880213bf009b47adbf94a3a Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 20 Jun 2023 18:30:18 +0200 Subject: mptcp: introduce MPTCP_FULL_INFO getsockopt Some user-space applications want to monitor the subflows utilization. Dumping the per subflow tcp_info is not enough, as the PM could close and re-create the subflows under-the-hood, fooling the accounting. Even checking the src/dst addresses used by each subflow could not be enough, because new subflows could re-use the same address/port of the just closed one. This patch introduces a new socket option, allow dumping all the relevant information all-at-once (everything, everywhere...), in a consistent manner. Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/388 Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Jakub Kicinski --- include/uapi/linux/mptcp.h | 24 +++++++++ net/mptcp/sockopt.c | 127 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 149 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index a124be6ebbba..ee9c49f949a2 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -249,9 +249,33 @@ struct mptcp_subflow_addrs { }; }; +struct mptcp_subflow_info { + __u32 id; + struct mptcp_subflow_addrs addrs; +}; + +struct mptcp_full_info { + __u32 size_tcpinfo_kernel; /* must be 0, set by kernel */ + __u32 size_tcpinfo_user; + __u32 size_sfinfo_kernel; /* must be 0, set by kernel */ + __u32 size_sfinfo_user; + __u32 num_subflows; /* must be 0, set by kernel (real subflow count) */ + __u32 size_arrays_user; /* max subflows that userspace is interested in; + * the buffers at subflow_info/tcp_info + * are respectively at least: + * size_arrays * size_sfinfo_user + * size_arrays * size_tcpinfo_user + * bytes wide + */ + __aligned_u64 subflow_info; + __aligned_u64 tcp_info; + struct mptcp_info mptcp_info; +}; + /* MPTCP socket options */ #define MPTCP_INFO 1 #define MPTCP_TCPINFO 2 #define MPTCP_SUBFLOW_ADDRS 3 +#define MPTCP_FULL_INFO 4 #endif /* _UAPI_MPTCP_H */ diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index fa5055d5b029..63f7a09335c5 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -14,7 +14,8 @@ #include #include "protocol.h" -#define MIN_INFO_OPTLEN_SIZE 16 +#define MIN_INFO_OPTLEN_SIZE 16 +#define MIN_FULL_INFO_OPTLEN_SIZE 40 static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) { @@ -981,7 +982,8 @@ static int mptcp_put_subflow_data(struct mptcp_subflow_data *sfd, } static int mptcp_get_subflow_data(struct mptcp_subflow_data *sfd, - char __user *optval, int __user *optlen) + char __user *optval, + int __user *optlen) { int len, copylen; @@ -1162,6 +1164,125 @@ static int mptcp_getsockopt_subflow_addrs(struct mptcp_sock *msk, char __user *o return 0; } +static int mptcp_get_full_info(struct mptcp_full_info *mfi, + char __user *optval, + int __user *optlen) +{ + int len; + + BUILD_BUG_ON(offsetof(struct mptcp_full_info, mptcp_info) != + MIN_FULL_INFO_OPTLEN_SIZE); + + if (get_user(len, optlen)) + return -EFAULT; + + if (len < MIN_FULL_INFO_OPTLEN_SIZE) + return -EINVAL; + + memset(mfi, 0, sizeof(*mfi)); + if (copy_from_user(mfi, optval, MIN_FULL_INFO_OPTLEN_SIZE)) + return -EFAULT; + + if (mfi->size_tcpinfo_kernel || + mfi->size_sfinfo_kernel || + mfi->num_subflows) + return -EINVAL; + + if (mfi->size_sfinfo_user > INT_MAX || + mfi->size_tcpinfo_user > INT_MAX) + return -EINVAL; + + return len - MIN_FULL_INFO_OPTLEN_SIZE; +} + +static int mptcp_put_full_info(struct mptcp_full_info *mfi, + char __user *optval, + u32 copylen, + int __user *optlen) +{ + copylen += MIN_FULL_INFO_OPTLEN_SIZE; + if (put_user(copylen, optlen)) + return -EFAULT; + + if (copy_to_user(optval, mfi, copylen)) + return -EFAULT; + return 0; +} + +static int mptcp_getsockopt_full_info(struct mptcp_sock *msk, char __user *optval, + int __user *optlen) +{ + unsigned int sfcount = 0, copylen = 0; + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + void __user *tcpinfoptr, *sfinfoptr; + struct mptcp_full_info mfi; + int len; + + len = mptcp_get_full_info(&mfi, optval, optlen); + if (len < 0) + return len; + + /* don't bother filling the mptcp info if there is not enough + * user-space-provided storage + */ + if (len > 0) { + mptcp_diag_fill_info(msk, &mfi.mptcp_info); + copylen += min_t(unsigned int, len, sizeof(struct mptcp_info)); + } + + mfi.size_tcpinfo_kernel = sizeof(struct tcp_info); + mfi.size_tcpinfo_user = min_t(unsigned int, mfi.size_tcpinfo_user, + sizeof(struct tcp_info)); + sfinfoptr = u64_to_user_ptr(mfi.subflow_info); + mfi.size_sfinfo_kernel = sizeof(struct mptcp_subflow_info); + mfi.size_sfinfo_user = min_t(unsigned int, mfi.size_sfinfo_user, + sizeof(struct mptcp_subflow_info)); + tcpinfoptr = u64_to_user_ptr(mfi.tcp_info); + + lock_sock(sk); + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + struct mptcp_subflow_info sfinfo; + struct tcp_info tcp_info; + + if (sfcount++ >= mfi.size_arrays_user) + continue; + + /* fetch addr/tcp_info only if the user space buffers + * are wide enough + */ + memset(&sfinfo, 0, sizeof(sfinfo)); + sfinfo.id = subflow->subflow_id; + if (mfi.size_sfinfo_user > + offsetof(struct mptcp_subflow_info, addrs)) + mptcp_get_sub_addrs(ssk, &sfinfo.addrs); + if (copy_to_user(sfinfoptr, &sfinfo, mfi.size_sfinfo_user)) + goto fail_release; + + if (mfi.size_tcpinfo_user) { + tcp_get_info(ssk, &tcp_info); + if (copy_to_user(tcpinfoptr, &tcp_info, + mfi.size_tcpinfo_user)) + goto fail_release; + } + + tcpinfoptr += mfi.size_tcpinfo_user; + sfinfoptr += mfi.size_sfinfo_user; + } + release_sock(sk); + + mfi.num_subflows = sfcount; + if (mptcp_put_full_info(&mfi, optval, copylen, optlen)) + return -EFAULT; + + return 0; + +fail_release: + release_sock(sk); + return -EFAULT; +} + static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval, int __user *optlen, int val) { @@ -1235,6 +1356,8 @@ static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, switch (optname) { case MPTCP_INFO: return mptcp_getsockopt_info(msk, optval, optlen); + case MPTCP_FULL_INFO: + return mptcp_getsockopt_full_info(msk, optval, optlen); case MPTCP_TCPINFO: return mptcp_getsockopt_tcpinfo(msk, optval, optlen); case MPTCP_SUBFLOW_ADDRS: -- cgit v1.2.3 From bbd49d114d5790a2dc3f67926cec3fc48b2d5d81 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 20 Jun 2023 18:30:21 +0200 Subject: mptcp: consolidate transition to TCP_CLOSE in mptcp_do_fastclose() The MPTCP code always set the msk state to TCP_CLOSE before calling performing the fast-close. Move such state transition in mptcp_do_fastclose() to avoid some code duplication. Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Jakub Kicinski --- net/mptcp/protocol.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 4ebd6e9aa949..f65eec3e0d22 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2655,6 +2655,7 @@ static void mptcp_do_fastclose(struct sock *sk) struct mptcp_subflow_context *subflow, *tmp; struct mptcp_sock *msk = mptcp_sk(sk); + inet_sk_state_store(sk, TCP_CLOSE); mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, MPTCP_CF_FASTCLOSE); @@ -2692,10 +2693,9 @@ static void mptcp_worker(struct work_struct *work) * even if it is orphaned and in FIN_WAIT2 state */ if (sock_flag(sk, SOCK_DEAD)) { - if (mptcp_should_close(sk)) { - inet_sk_state_store(sk, TCP_CLOSE); + if (mptcp_should_close(sk)) mptcp_do_fastclose(sk); - } + if (sk->sk_state == TCP_CLOSE) { __mptcp_destroy_sock(sk); goto unlock; @@ -2938,7 +2938,6 @@ static void __mptcp_destroy_sock(struct sock *sk) void __mptcp_unaccepted_force_close(struct sock *sk) { sock_set_flag(sk, SOCK_DEAD); - inet_sk_state_store(sk, TCP_CLOSE); mptcp_do_fastclose(sk); __mptcp_destroy_sock(sk); } @@ -2980,7 +2979,6 @@ bool __mptcp_close(struct sock *sk, long timeout) /* If the msk has read data, or the caller explicitly ask it, * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose */ - inet_sk_state_store(sk, TCP_CLOSE); mptcp_do_fastclose(sk); timeout = 0; } else if (mptcp_close_state(sk)) { -- cgit v1.2.3 From 528cb5f2a1e859522f36f091f29f5c81ec6d4a4c Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Tue, 20 Jun 2023 18:30:22 +0200 Subject: mptcp: pass addr to mptcp_pm_alloc_anno_list Pass addr parameter to mptcp_pm_alloc_anno_list() instead of entry. We can reduce the scope, e.g. in mptcp_pm_alloc_anno_list(), we only access "entry->addr", we can then restrict to the pointer to "addr" then. Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Jakub Kicinski --- net/mptcp/pm_netlink.c | 8 ++++---- net/mptcp/pm_userspace.c | 2 +- net/mptcp/protocol.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index a56718ffdd02..547e51c65480 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -341,7 +341,7 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk, } bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, - const struct mptcp_pm_addr_entry *entry) + const struct mptcp_addr_info *addr) { struct mptcp_pm_add_entry *add_entry = NULL; struct sock *sk = (struct sock *)msk; @@ -349,7 +349,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, lockdep_assert_held(&msk->pm.lock); - add_entry = mptcp_lookup_anno_list_by_saddr(msk, &entry->addr); + add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); if (add_entry) { if (mptcp_pm_is_kernel(msk)) @@ -366,7 +366,7 @@ bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, list_add(&add_entry->list, &msk->pm.anno_list); - add_entry->addr = entry->addr; + add_entry->addr = *addr; add_entry->sock = msk; add_entry->retrans_times = 0; @@ -576,7 +576,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) return; if (local) { - if (mptcp_pm_alloc_anno_list(msk, local)) { + if (mptcp_pm_alloc_anno_list(msk, &local->addr)) { __clear_bit(local->addr.id, msk->pm.id_avail_bitmap); msk->pm.add_addr_signaled++; mptcp_pm_announce_addr(msk, &local->addr, false); diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 47a883a16c11..b5a8aa4c1ebd 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -193,7 +193,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) lock_sock((struct sock *)msk); spin_lock_bh(&msk->pm.lock); - if (mptcp_pm_alloc_anno_list(msk, &addr_val)) { + if (mptcp_pm_alloc_anno_list(msk, &addr_val.addr)) { msk->pm.add_addr_signaled++; mptcp_pm_announce_addr(msk, &addr_val.addr, false); mptcp_pm_nl_addr_send_ack(msk); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index bb4cacd92778..3a1a64cdeba6 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -817,7 +817,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, struct mptcp_addr_info *rem, u8 bkup); bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, - const struct mptcp_pm_addr_entry *entry); + const struct mptcp_addr_info *addr); void mptcp_pm_free_anno_list(struct mptcp_sock *msk); bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); struct mptcp_pm_add_entry * -- cgit v1.2.3 From e38910c0072b541a91954682c8b074a93e57c09b Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Wed, 7 Jun 2023 09:27:08 +0200 Subject: can: isotp: isotp_sendmsg(): fix return error fix on TX path With commit d674a8f123b4 ("can: isotp: isotp_sendmsg(): fix return error on FC timeout on TX path") the missing correct return value in the case of a protocol error was introduced. But the way the error value has been read and sent to the user space does not follow the common scheme to clear the error after reading which is provided by the sock_error() function. This leads to an error report at the following write() attempt although everything should be working. Fixes: d674a8f123b4 ("can: isotp: isotp_sendmsg(): fix return error on FC timeout on TX path") Reported-by: Carsten Schmidt Signed-off-by: Oliver Hartkopp Link: https://lore.kernel.org/all/20230607072708.38809-1-socketcan@hartkopp.net Cc: stable@vger.kernel.org Signed-off-by: Marc Kleine-Budde --- net/can/isotp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/can/isotp.c b/net/can/isotp.c index 84f9aba02901..ca9d728d6d72 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1112,8 +1112,9 @@ wait_free_buffer: if (err) goto err_event_drop; - if (sk->sk_err) - return -sk->sk_err; + err = sock_error(sk); + if (err) + return err; } return size; -- cgit v1.2.3 From 8d61f926d42045961e6b65191c09e3678d86a9cf Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 21 Jun 2023 15:43:37 +0000 Subject: netlink: fix potential deadlock in netlink_set_err() syzbot reported a possible deadlock in netlink_set_err() [1] A similar issue was fixed in commit 1d482e666b8e ("netlink: disable IRQs for netlink_lock_table()") in netlink_lock_table() This patch adds IRQ safety to netlink_set_err() and __netlink_diag_dump() which were not covered by cited commit. [1] WARNING: possible irq lock inversion dependency detected 6.4.0-rc6-syzkaller-00240-g4e9f0ec38852 #0 Not tainted syz-executor.2/23011 just changed the state of lock: ffffffff8e1a7a58 (nl_table_lock){.+.?}-{2:2}, at: netlink_set_err+0x2e/0x3a0 net/netlink/af_netlink.c:1612 but this lock was taken by another, SOFTIRQ-safe lock in the past: (&local->queue_stop_reason_lock){..-.}-{2:2} and interrupts could create inverse lock ordering between them. other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(nl_table_lock); local_irq_disable(); lock(&local->queue_stop_reason_lock); lock(nl_table_lock); lock(&local->queue_stop_reason_lock); *** DEADLOCK *** Fixes: 1d482e666b8e ("netlink: disable IRQs for netlink_lock_table()") Reported-by: syzbot+a7d200a347f912723e5c@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=a7d200a347f912723e5c Link: https://lore.kernel.org/netdev/000000000000e38d1605fea5747e@google.com/T/#u Signed-off-by: Eric Dumazet Cc: Johannes Berg Link: https://lore.kernel.org/r/20230621154337.1668594-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/netlink/af_netlink.c | 5 +++-- net/netlink/diag.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 3a1e0fd5bf14..5968b6450d82 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1600,6 +1600,7 @@ out: int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) { struct netlink_set_err_data info; + unsigned long flags; struct sock *sk; int ret = 0; @@ -1609,12 +1610,12 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) /* sk->sk_err wants a positive error value */ info.code = -code; - read_lock(&nl_table_lock); + read_lock_irqsave(&nl_table_lock, flags); sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list) ret += do_one_set_err(sk, &info); - read_unlock(&nl_table_lock); + read_unlock_irqrestore(&nl_table_lock, flags); return ret; } EXPORT_SYMBOL(netlink_set_err); diff --git a/net/netlink/diag.c b/net/netlink/diag.c index c6255eac305c..4143b2ea4195 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -94,6 +94,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net *net = sock_net(skb->sk); struct netlink_diag_req *req; struct netlink_sock *nlsk; + unsigned long flags; struct sock *sk; int num = 2; int ret = 0; @@ -152,7 +153,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, num++; mc_list: - read_lock(&nl_table_lock); + read_lock_irqsave(&nl_table_lock, flags); sk_for_each_bound(sk, &tbl->mc_list) { if (sk_hashed(sk)) continue; @@ -173,7 +174,7 @@ mc_list: } num++; } - read_unlock(&nl_table_lock); + read_unlock_irqrestore(&nl_table_lock, flags); done: cb->args[0] = num; -- cgit v1.2.3 From aa5406950726e336c5c9585b09799a734b6e77bf Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 21 Jun 2023 17:47:20 +0000 Subject: netlink: do not hard code device address lenth in fdb dumps syzbot reports that some netdev devices do not have a six bytes address [1] Replace ETH_ALEN by dev->addr_len. [1] (Case of a device where dev->addr_len = 4) BUG: KMSAN: kernel-infoleak in instrument_copy_to_user include/linux/instrumented.h:114 [inline] BUG: KMSAN: kernel-infoleak in copyout+0xb8/0x100 lib/iov_iter.c:169 instrument_copy_to_user include/linux/instrumented.h:114 [inline] copyout+0xb8/0x100 lib/iov_iter.c:169 _copy_to_iter+0x6d8/0x1d00 lib/iov_iter.c:536 copy_to_iter include/linux/uio.h:206 [inline] simple_copy_to_iter+0x68/0xa0 net/core/datagram.c:513 __skb_datagram_iter+0x123/0xdc0 net/core/datagram.c:419 skb_copy_datagram_iter+0x5c/0x200 net/core/datagram.c:527 skb_copy_datagram_msg include/linux/skbuff.h:3960 [inline] netlink_recvmsg+0x4ae/0x15a0 net/netlink/af_netlink.c:1970 sock_recvmsg_nosec net/socket.c:1019 [inline] sock_recvmsg net/socket.c:1040 [inline] ____sys_recvmsg+0x283/0x7f0 net/socket.c:2722 ___sys_recvmsg+0x223/0x840 net/socket.c:2764 do_recvmmsg+0x4f9/0xfd0 net/socket.c:2858 __sys_recvmmsg net/socket.c:2937 [inline] __do_sys_recvmmsg net/socket.c:2960 [inline] __se_sys_recvmmsg net/socket.c:2953 [inline] __x64_sys_recvmmsg+0x397/0x490 net/socket.c:2953 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Uninit was stored to memory at: __nla_put lib/nlattr.c:1009 [inline] nla_put+0x1c6/0x230 lib/nlattr.c:1067 nlmsg_populate_fdb_fill+0x2b8/0x600 net/core/rtnetlink.c:4071 nlmsg_populate_fdb net/core/rtnetlink.c:4418 [inline] ndo_dflt_fdb_dump+0x616/0x840 net/core/rtnetlink.c:4456 rtnl_fdb_dump+0x14ff/0x1fc0 net/core/rtnetlink.c:4629 netlink_dump+0x9d1/0x1310 net/netlink/af_netlink.c:2268 netlink_recvmsg+0xc5c/0x15a0 net/netlink/af_netlink.c:1995 sock_recvmsg_nosec+0x7a/0x120 net/socket.c:1019 ____sys_recvmsg+0x664/0x7f0 net/socket.c:2720 ___sys_recvmsg+0x223/0x840 net/socket.c:2764 do_recvmmsg+0x4f9/0xfd0 net/socket.c:2858 __sys_recvmmsg net/socket.c:2937 [inline] __do_sys_recvmmsg net/socket.c:2960 [inline] __se_sys_recvmmsg net/socket.c:2953 [inline] __x64_sys_recvmmsg+0x397/0x490 net/socket.c:2953 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Uninit was created at: slab_post_alloc_hook+0x12d/0xb60 mm/slab.h:716 slab_alloc_node mm/slub.c:3451 [inline] __kmem_cache_alloc_node+0x4ff/0x8b0 mm/slub.c:3490 kmalloc_trace+0x51/0x200 mm/slab_common.c:1057 kmalloc include/linux/slab.h:559 [inline] __hw_addr_create net/core/dev_addr_lists.c:60 [inline] __hw_addr_add_ex+0x2e5/0x9e0 net/core/dev_addr_lists.c:118 __dev_mc_add net/core/dev_addr_lists.c:867 [inline] dev_mc_add+0x9a/0x130 net/core/dev_addr_lists.c:885 igmp6_group_added+0x267/0xbc0 net/ipv6/mcast.c:680 ipv6_mc_up+0x296/0x3b0 net/ipv6/mcast.c:2754 ipv6_mc_remap+0x1e/0x30 net/ipv6/mcast.c:2708 addrconf_type_change net/ipv6/addrconf.c:3731 [inline] addrconf_notify+0x4d3/0x1d90 net/ipv6/addrconf.c:3699 notifier_call_chain kernel/notifier.c:93 [inline] raw_notifier_call_chain+0xe4/0x430 kernel/notifier.c:461 call_netdevice_notifiers_info net/core/dev.c:1935 [inline] call_netdevice_notifiers_extack net/core/dev.c:1973 [inline] call_netdevice_notifiers+0x1ee/0x2d0 net/core/dev.c:1987 bond_enslave+0xccd/0x53f0 drivers/net/bonding/bond_main.c:1906 do_set_master net/core/rtnetlink.c:2626 [inline] rtnl_newlink_create net/core/rtnetlink.c:3460 [inline] __rtnl_newlink net/core/rtnetlink.c:3660 [inline] rtnl_newlink+0x378c/0x40e0 net/core/rtnetlink.c:3673 rtnetlink_rcv_msg+0x16a6/0x1840 net/core/rtnetlink.c:6395 netlink_rcv_skb+0x371/0x650 net/netlink/af_netlink.c:2546 rtnetlink_rcv+0x34/0x40 net/core/rtnetlink.c:6413 netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] netlink_unicast+0xf28/0x1230 net/netlink/af_netlink.c:1365 netlink_sendmsg+0x122f/0x13d0 net/netlink/af_netlink.c:1913 sock_sendmsg_nosec net/socket.c:724 [inline] sock_sendmsg net/socket.c:747 [inline] ____sys_sendmsg+0x999/0xd50 net/socket.c:2503 ___sys_sendmsg+0x28d/0x3c0 net/socket.c:2557 __sys_sendmsg net/socket.c:2586 [inline] __do_sys_sendmsg net/socket.c:2595 [inline] __se_sys_sendmsg net/socket.c:2593 [inline] __x64_sys_sendmsg+0x304/0x490 net/socket.c:2593 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Bytes 2856-2857 of 3500 are uninitialized Memory access of size 3500 starts at ffff888018d99104 Data copied to user address 0000000020000480 Fixes: d83b06036048 ("net: add fdb generic dump routine") Reported-by: syzbot Signed-off-by: Eric Dumazet Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/20230621174720.1845040-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/core/rtnetlink.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 41de3a2f29e1..7776611a14ae 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4090,7 +4090,7 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb, ndm->ndm_ifindex = dev->ifindex; ndm->ndm_state = ndm_state; - if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) + if (nla_put(skb, NDA_LLADDR, dev->addr_len, addr)) goto nla_put_failure; if (vid) if (nla_put(skb, NDA_VLAN, sizeof(u16), &vid)) @@ -4104,10 +4104,10 @@ nla_put_failure: return -EMSGSIZE; } -static inline size_t rtnl_fdb_nlmsg_size(void) +static inline size_t rtnl_fdb_nlmsg_size(const struct net_device *dev) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + - nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ + nla_total_size(dev->addr_len) + /* NDA_LLADDR */ nla_total_size(sizeof(u16)) + /* NDA_VLAN */ 0; } @@ -4119,7 +4119,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, struct sk_buff *skb; int err = -ENOBUFS; - skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC); + skb = nlmsg_new(rtnl_fdb_nlmsg_size(dev), GFP_ATOMIC); if (!skb) goto errout; -- cgit v1.2.3 From 304b1875ba02022f2504952a32e5e90482bbdb39 Mon Sep 17 00:00:00 2001 From: Yueh-Shun Li Date: Thu, 22 Jun 2023 01:26:31 +0000 Subject: tcp: fix comment typo Spell "transmissions" properly. Found by searching for keyword "tranm". Signed-off-by: Yueh-Shun Li Link: https://lore.kernel.org/r/20230622012627.15050-6-shamrocklee@posteo.net Signed-off-by: Jakub Kicinski --- net/ipv4/tcp_input.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bf8b22218dd4..6f072095211e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2867,7 +2867,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, } if (tcp_is_reno(tp)) { /* A Reno DUPACK means new data in F-RTO step 2.b above are - * delivered. Lower inflight to clock out (re)tranmissions. + * delivered. Lower inflight to clock out (re)transmissions. */ if (after(tp->snd_nxt, tp->high_seq) && num_dupack) tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE); -- cgit v1.2.3 From a0e128ef88e4a033a52963ec4ad94d96a17f8179 Mon Sep 17 00:00:00 2001 From: Yueh-Shun Li Date: Thu, 22 Jun 2023 01:26:33 +0000 Subject: net/tls: fix comment typo Spell "retransmit" properly. Found by searching for keyword "tranm". Signed-off-by: Yueh-Shun Li Link: https://lore.kernel.org/r/20230622012627.15050-7-shamrocklee@posteo.net Signed-off-by: Jakub Kicinski --- net/tls/tls_device_fallback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 7fbb1d0b69b3..b28c5e296dfd 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -271,7 +271,7 @@ static int fill_sg_in(struct scatterlist *sg_in, * There is a corner case where the packet contains * both an acked and a non-acked record. * We currently don't handle that case and rely - * on TCP to retranmit a packet that doesn't contain + * on TCP to retransmit a packet that doesn't contain * already acked payload. */ if (!is_start_marker) -- cgit v1.2.3 From 0ec92a8f56ff07237dbe8af7c7a72aba7f957baf Mon Sep 17 00:00:00 2001 From: Piotr Gardocki Date: Wed, 21 Jun 2023 15:21:06 +0200 Subject: net: fix net device address assign type Commit ad72c4a06acc introduced optimization to return from function quickly if the MAC address is not changing at all. It was reported that such change causes dev->addr_assign_type to not change to NET_ADDR_SET from _PERM or _RANDOM. Restore the old behavior and skip only call to ndo_set_mac_address. Fixes: ad72c4a06acc ("net: add check for current MAC address in dev_set_mac_address") Reported-by: Gal Pressman Signed-off-by: Piotr Gardocki Reviewed-by: Simon Horman Reviewed-by: Jiri Pirko Link: https://lore.kernel.org/r/20230621132106.991342-1-piotrx.gardocki@intel.com Signed-off-by: Jakub Kicinski --- net/core/dev.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/core/dev.c b/net/core/dev.c index e4ff0adf5523..69a3e544676c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8781,14 +8781,14 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - if (!memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) - return 0; err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack); if (err) return err; - err = ops->ndo_set_mac_address(dev, sa); - if (err) - return err; + if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) { + err = ops->ndo_set_mac_address(dev, sa); + if (err) + return err; + } dev->addr_assign_type = NET_ADDR_SET; call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); add_device_randomness(dev->dev_addr, dev->addr_len); -- cgit v1.2.3 From 76fa88429075667fe76d4905f2f471e0ac3d543c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:53 +0100 Subject: net: convert sunrpc from pagevec to folio_batch Remove the last usage of pagevecs. There is a slight change here; we now free the folio_batch as soon as it fills up instead of freeing the folio_batch when we try to add a page to a full batch. This should have no effect in practice. Link: https://lkml.kernel.org/r/20230621164557.3510324-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Chuck Lever Signed-off-by: Andrew Morton --- include/linux/sunrpc/svc.h | 2 +- net/sunrpc/svc.c | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'net') diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 762d7231e574..a3a64fb4053c 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -223,7 +223,7 @@ struct svc_rqst { struct page * *rq_next_page; /* next reply page to use */ struct page * *rq_page_end; /* one past the last page */ - struct pagevec rq_pvec; + struct folio_batch rq_fbatch; struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 79967b6925bd..8b9011bbece7 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -649,7 +649,7 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node) if (!rqstp) return rqstp; - pagevec_init(&rqstp->rq_pvec); + folio_batch_init(&rqstp->rq_fbatch); __set_bit(RQ_BUSY, &rqstp->rq_flags); rqstp->rq_server = serv; @@ -860,9 +860,9 @@ bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) } if (*rqstp->rq_next_page) { - if (!pagevec_space(&rqstp->rq_pvec)) - __pagevec_release(&rqstp->rq_pvec); - pagevec_add(&rqstp->rq_pvec, *rqstp->rq_next_page); + if (!folio_batch_add(&rqstp->rq_fbatch, + page_folio(*rqstp->rq_next_page))) + __folio_batch_release(&rqstp->rq_fbatch); } get_page(page); @@ -896,7 +896,7 @@ void svc_rqst_release_pages(struct svc_rqst *rqstp) void svc_rqst_free(struct svc_rqst *rqstp) { - pagevec_release(&rqstp->rq_pvec); + folio_batch_release(&rqstp->rq_fbatch); svc_release_buffer(rqstp); if (rqstp->rq_scratch_page) put_page(rqstp->rq_scratch_page); -- cgit v1.2.3 From 11b73313c12403f617b47752db0ab3deef201af7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 22 Jun 2023 18:15:03 +0000 Subject: sch_netem: fix issues in netem_change() vs get_dist_table() In blamed commit, I missed that get_dist_table() was allocating memory using GFP_KERNEL, and acquiring qdisc lock to perform the swap of newly allocated table with current one. In this patch, get_dist_table() is allocating memory and copy user data before we acquire the qdisc lock. Then we perform swap operations while being protected by the lock. Note that after this patch netem_change() no longer can do partial changes. If an error is returned, qdisc conf is left unchanged. Fixes: 2174a08db80d ("sch_netem: acquire qdisc lock in netem_change()") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Stephen Hemminger Acked-by: Jamal Hadi Salim Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230622181503.2327695-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- net/sched/sch_netem.c | 59 ++++++++++++++++++++++----------------------------- 1 file changed, 25 insertions(+), 34 deletions(-) (limited to 'net') diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index e79be1b3e74d..b93ec2a3454e 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -773,12 +773,10 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, - const struct nlattr *attr) +static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) { size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); - spinlock_t *root_lock; struct disttable *d; int i; @@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_sleeping_lock(sch); - - spin_lock_bh(root_lock); - swap(*tbl, d); - spin_unlock_bh(root_lock); - - dist_free(d); + *tbl = d; return 0; } @@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; + struct disttable *delay_dist = NULL; + struct disttable *slot_dist = NULL; struct tc_netem_qopt *qopt; struct clgstate old_clg; int old_loss_model = CLG_RANDOM; @@ -966,6 +960,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (ret < 0) return ret; + if (tb[TCA_NETEM_DELAY_DIST]) { + ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto table_free; + } + sch_tree_lock(sch); /* backup q->clg and q->loss_model */ old_clg = q->clg; @@ -975,26 +981,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); if (ret) { q->loss_model = old_loss_model; + q->clg = old_clg; goto unlock; } } else { q->loss_model = CLG_RANDOM; } - if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, &q->delay_dist, - tb[TCA_NETEM_DELAY_DIST]); - if (ret) - goto get_table_failure; - } - - if (tb[TCA_NETEM_SLOT_DIST]) { - ret = get_dist_table(sch, &q->slot_dist, - tb[TCA_NETEM_SLOT_DIST]); - if (ret) - goto get_table_failure; - } - + if (delay_dist) + swap(q->delay_dist, delay_dist); + if (slot_dist) + swap(q->slot_dist, slot_dist); sch->limit = qopt->limit; q->latency = PSCHED_TICKS2NS(qopt->latency); @@ -1044,17 +1041,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, unlock: sch_tree_unlock(sch); - return ret; -get_table_failure: - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - - goto unlock; +table_free: + dist_free(delay_dist); + dist_free(slot_dist); + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, -- cgit v1.2.3 From 3f5f118bb657f94641ea383c7c1b8c09a5d46ea2 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Thu, 22 Jun 2023 11:43:51 -0700 Subject: af_unix: Call scm_recv() only after scm_set_cred(). syzkaller hit a WARN_ON_ONCE(!scm->pid) in scm_pidfd_recv(). In unix_stream_read_generic(), if there is no skb in the queue, we could bail out the do-while loop without calling scm_set_cred(): 1. No skb in the queue 2. sk is non-blocking or shutdown(sk, RCV_SHUTDOWN) is called concurrently or peer calls close() If the socket is configured with SO_PASSCRED or SO_PASSPIDFD, scm_recv() would populate cmsg with garbage. Let's not call scm_recv() unless there is skb to receive. WARNING: CPU: 1 PID: 3245 at include/net/scm.h:138 scm_pidfd_recv include/net/scm.h:138 [inline] WARNING: CPU: 1 PID: 3245 at include/net/scm.h:138 scm_recv.constprop.0+0x754/0x850 include/net/scm.h:177 Modules linked in: CPU: 1 PID: 3245 Comm: syz-executor.1 Not tainted 6.4.0-rc5-01219-gfa0e21fa4443 #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 RIP: 0010:scm_pidfd_recv include/net/scm.h:138 [inline] RIP: 0010:scm_recv.constprop.0+0x754/0x850 include/net/scm.h:177 Code: 67 fd e9 55 fd ff ff e8 4a 70 67 fd e9 7f fd ff ff e8 40 70 67 fd e9 3e fb ff ff e8 36 70 67 fd e9 02 fd ff ff e8 8c 3a 20 fd <0f> 0b e9 fe fb ff ff e8 50 70 67 fd e9 2e f9 ff ff e8 46 70 67 fd RSP: 0018:ffffc90009af7660 EFLAGS: 00010216 RAX: 00000000000000a1 RBX: ffff888041e58a80 RCX: ffffc90003852000 RDX: 0000000000040000 RSI: ffffffff842675b4 RDI: 0000000000000007 RBP: ffffc90009af7810 R08: 0000000000000007 R09: 0000000000000013 R10: 00000000000000f8 R11: 0000000000000001 R12: ffffc90009af7db0 R13: 0000000000000000 R14: ffff888041e58a88 R15: 1ffff9200135eecc FS: 00007f6b7113f640(0000) GS:ffff88806cf00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f6b7111de38 CR3: 0000000012a6e002 CR4: 0000000000770ee0 PKRU: 55555554 Call Trace: unix_stream_read_generic+0x5fe/0x1f50 net/unix/af_unix.c:2830 unix_stream_recvmsg+0x194/0x1c0 net/unix/af_unix.c:2880 sock_recvmsg_nosec net/socket.c:1019 [inline] sock_recvmsg+0x188/0x1d0 net/socket.c:1040 ____sys_recvmsg+0x210/0x610 net/socket.c:2712 ___sys_recvmsg+0xff/0x190 net/socket.c:2754 do_recvmmsg+0x25d/0x6c0 net/socket.c:2848 __sys_recvmmsg net/socket.c:2927 [inline] __do_sys_recvmmsg net/socket.c:2950 [inline] __se_sys_recvmmsg net/socket.c:2943 [inline] __x64_sys_recvmmsg+0x224/0x290 net/socket.c:2943 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3f/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc RIP: 0033:0x7f6b71da2e5d Code: ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 9f 1b 00 f7 d8 64 89 01 48 RSP: 002b:00007f6b7113ecc8 EFLAGS: 00000246 ORIG_RAX: 000000000000012b RAX: ffffffffffffffda RBX: 00000000004bc050 RCX: 00007f6b71da2e5d RDX: 0000000000000007 RSI: 0000000020006600 RDI: 000000000000000b RBP: 00000000004bc050 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000120 R11: 0000000000000246 R12: 0000000000000000 R13: 000000000000006e R14: 00007f6b71e03530 R15: 0000000000000000 Fixes: 5e2ff6704a27 ("scm: add SO_PASSPIDFD and SCM_PIDFD") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzkaller Signed-off-by: Kuniyuki Iwashima Reviewed-by: Alexander Mikhalitsyn Link: https://lore.kernel.org/r/20230622184351.91544-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 73c61a010b01..f9d196439b49 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2826,7 +2826,7 @@ unlock: } while (size); mutex_unlock(&u->iolock); - if (state->msg) + if (state->msg && check_creds) scm_recv(sock, state->msg, &scm, flags); else scm_destroy(&scm); -- cgit v1.2.3 From 2fe11c9d36ee2f3dc3c642588c5d9a22190674c9 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 23 Jun 2023 13:38:55 +0100 Subject: net/tcp: optimise locking for blocking splice Even when tcp_splice_read() reads all it was asked for, for blocking sockets it'll release and immediately regrab the socket lock, loop around and break on the while check. Check tss.len right after we adjust it, and return if we're done. That saves us one release_sock(); lock_sock(); pair per successful blocking splice read. Signed-off-by: Pavel Begunkov Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/80736a2cc6d478c383ea565ba825eaf4d1abd876.1687523671.git.asml.silence@gmail.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71b42eef9dbf..d56edc2c885f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -839,7 +839,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, tss.len -= ret; spliced += ret; - if (!timeo) + if (!tss.len || !timeo) break; release_sock(sk); lock_sock(sk); -- cgit v1.2.3 From f8dd95b29d7ef08c19ec9720564acf72243ddcf6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:54:58 +0100 Subject: tcp_bpf, smc, tls, espintcp, siw: Reduce MSG_SENDPAGE_NOTLAST usage As MSG_SENDPAGE_NOTLAST is being phased out along with sendpage(), don't use it further in than the sendpage methods, but rather translate it to MSG_MORE and use that instead. Signed-off-by: David Howells cc: Willem de Bruijn cc: Bernard Metzler cc: Jason Gunthorpe cc: Leon Romanovsky cc: John Fastabend cc: Jakub Sitnicki cc: David Ahern cc: Karsten Graul cc: Wenjia Zhang cc: Jan Karcher cc: "D. Wythe" cc: Tony Lu cc: Wen Gu cc: Boris Pismenny cc: Steffen Klassert cc: Herbert Xu Link: https://lore.kernel.org/r/20230623225513.2732256-2-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- drivers/infiniband/sw/siw/siw_qp_tx.c | 5 ++--- net/ipv4/tcp_bpf.c | 5 +++-- net/smc/smc_tx.c | 6 ++++-- net/tls/tls_device.c | 4 ++-- net/xfrm/espintcp.c | 10 ++++++---- 5 files changed, 17 insertions(+), 13 deletions(-) (limited to 'net') diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index ffb16beb6c30..7c7a51d36d0c 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -325,8 +325,7 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, { struct bio_vec bvec; struct msghdr msg = { - .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST | - MSG_SPLICE_PAGES), + .msg_flags = (MSG_MORE | MSG_DONTWAIT | MSG_SPLICE_PAGES), }; struct sock *sk = s->sk; int i = 0, rv = 0, sent = 0; @@ -335,7 +334,7 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, size_t bytes = min_t(size_t, PAGE_SIZE - offset, size); if (size + offset <= PAGE_SIZE) - msg.msg_flags &= ~MSG_SENDPAGE_NOTLAST; + msg.msg_flags &= ~MSG_MORE; tcp_rate_check_app_limited(sk); bvec_set_page(&bvec, page[i], bytes, offset); diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 5a84053ac62b..31d6005cea9b 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -88,9 +88,9 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, int flags, bool uncharge) { + struct msghdr msghdr = {}; bool apply = apply_bytes; struct scatterlist *sge; - struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, }; struct page *page; int size, ret = 0; u32 off; @@ -107,11 +107,12 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, tcp_rate_check_app_limited(sk); retry: + msghdr.msg_flags = flags | MSG_SPLICE_PAGES; has_tx_ulp = tls_sw_has_ctx_tx(sk); if (has_tx_ulp) msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; - if (flags & MSG_SENDPAGE_NOTLAST) + if (size < sge->length && msg->sg.start != msg->sg.end) msghdr.msg_flags |= MSG_MORE; bvec_set_page(&bvec, page, size, off); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 45128443f1f1..9b9e0a190734 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -168,8 +168,7 @@ static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg) * should known how/when to uncork it. */ if ((msg->msg_flags & MSG_MORE || - smc_tx_is_corked(smc) || - msg->msg_flags & MSG_SENDPAGE_NOTLAST) && + smc_tx_is_corked(smc)) && atomic_read(&conn->sndbuf_space)) return true; @@ -306,6 +305,9 @@ int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset, struct kvec iov; int rc; + if (flags & MSG_SENDPAGE_NOTLAST) + msg.msg_flags |= MSG_MORE; + iov.iov_base = kaddr + offset; iov.iov_len = size; iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index b82770f68807..975299d7213b 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -449,7 +449,7 @@ static int tls_push_data(struct sock *sk, return -sk->sk_err; flags |= MSG_SENDPAGE_DECRYPTED; - tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; + tls_push_record_flags = flags | MSG_MORE; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if (tls_is_partially_sent_record(tls_ctx)) { @@ -532,7 +532,7 @@ handle_error: if (!size) { last_record: tls_push_record_flags = flags; - if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) { + if (flags & MSG_MORE) { more = true; break; } diff --git a/net/xfrm/espintcp.c b/net/xfrm/espintcp.c index 3504925babdb..d3b3f9e720b3 100644 --- a/net/xfrm/espintcp.c +++ b/net/xfrm/espintcp.c @@ -205,13 +205,15 @@ static int espintcp_sendskb_locked(struct sock *sk, struct espintcp_msg *emsg, static int espintcp_sendskmsg_locked(struct sock *sk, struct espintcp_msg *emsg, int flags) { - struct msghdr msghdr = { .msg_flags = flags | MSG_SPLICE_PAGES, }; + struct msghdr msghdr = { + .msg_flags = flags | MSG_SPLICE_PAGES | MSG_MORE, + }; struct sk_msg *skmsg = &emsg->skmsg; + bool more = flags & MSG_MORE; struct scatterlist *sg; int done = 0; int ret; - msghdr.msg_flags |= MSG_SENDPAGE_NOTLAST; sg = &skmsg->sg.data[skmsg->sg.start]; do { struct bio_vec bvec; @@ -221,8 +223,8 @@ static int espintcp_sendskmsg_locked(struct sock *sk, emsg->offset = 0; - if (sg_is_last(sg)) - msghdr.msg_flags &= ~MSG_SENDPAGE_NOTLAST; + if (sg_is_last(sg) && !more) + msghdr.msg_flags &= ~MSG_MORE; p = sg_page(sg); retry: -- cgit v1.2.3 From c729ed6f5be57b4d164dbbc415554c066e29306b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:54:59 +0100 Subject: net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() Use sendmsg() with MSG_SPLICE_PAGES rather than sendpage in skb_send_sock(). This causes pages to be spliced from the source iterator if possible. This allows ->sendpage() to be replaced by something that can handle multiple multipage folios in a single transaction. Note that this could perhaps be improved to fill out a bvec array with all the frags and then make a single sendmsg call, possibly sticking the header on the front also. Signed-off-by: David Howells cc: Jens Axboe cc: Matthew Wilcox Link: https://lore.kernel.org/r/20230623225513.2732256-3-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/core/skbuff.c | 50 ++++++++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 22 deletions(-) (limited to 'net') diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fee2b1c105fe..6c5915efbc17 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2989,32 +2989,32 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, } EXPORT_SYMBOL_GPL(skb_splice_bits); -static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg, - struct kvec *vec, size_t num, size_t size) +static int sendmsg_locked(struct sock *sk, struct msghdr *msg) { struct socket *sock = sk->sk_socket; + size_t size = msg_data_left(msg); if (!sock) return -EINVAL; - return kernel_sendmsg(sock, msg, vec, num, size); + + if (!sock->ops->sendmsg_locked) + return sock_no_sendmsg_locked(sk, msg, size); + + return sock->ops->sendmsg_locked(sk, msg, size); } -static int sendpage_unlocked(struct sock *sk, struct page *page, int offset, - size_t size, int flags) +static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) { struct socket *sock = sk->sk_socket; if (!sock) return -EINVAL; - return kernel_sendpage(sock, page, offset, size, flags); + return sock_sendmsg(sock, msg); } -typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg, - struct kvec *vec, size_t num, size_t size); -typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset, - size_t size, int flags); +typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, - int len, sendmsg_func sendmsg, sendpage_func sendpage) + int len, sendmsg_func sendmsg) { unsigned int orig_len = len; struct sk_buff *head = skb; @@ -3034,8 +3034,9 @@ do_frag_list: memset(&msg, 0, sizeof(msg)); msg.msg_flags = MSG_DONTWAIT; - ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked, - sendmsg_unlocked, sk, &msg, &kv, 1, slen); + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); + ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, + sendmsg_unlocked, sk, &msg); if (ret <= 0) goto error; @@ -3066,11 +3067,18 @@ do_frag_list: slen = min_t(size_t, len, skb_frag_size(frag) - offset); while (slen) { - ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked, - sendpage_unlocked, sk, - skb_frag_page(frag), - skb_frag_off(frag) + offset, - slen, MSG_DONTWAIT); + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, + }; + + bvec_set_page(&bvec, skb_frag_page(frag), slen, + skb_frag_off(frag) + offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, + slen); + + ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, + sendmsg_unlocked, sk, &msg); if (ret <= 0) goto error; @@ -3107,16 +3115,14 @@ error: int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len) { - return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, - kernel_sendpage_locked); + return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); } EXPORT_SYMBOL_GPL(skb_send_sock_locked); /* Send skb data on a socket. Socket must be unlocked. */ int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) { - return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, - sendpage_unlocked); + return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); } /** -- cgit v1.2.3 From 40a8c17aa7709caaeb9c6945297a98fe8699ca1b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:55:00 +0100 Subject: ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage Use sendmsg() and MSG_SPLICE_PAGES rather than sendpage in ceph when transmitting data. For the moment, this can only transmit one page at a time because of the architecture of net/ceph/, but if write_partial_message_data() can be given a bvec[] at a time by the iteration code, this would allow pages to be sent in a batch. Signed-off-by: David Howells cc: Ilya Dryomov cc: Xiubo Li cc: Jeff Layton cc: Jens Axboe cc: Matthew Wilcox Link: https://lore.kernel.org/r/20230623225513.2732256-4-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/ceph/messenger_v1.c | 60 +++++++++++++++++-------------------------------- 1 file changed, 20 insertions(+), 40 deletions(-) (limited to 'net') diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c index d664cb1593a7..814579f27f04 100644 --- a/net/ceph/messenger_v1.c +++ b/net/ceph/messenger_v1.c @@ -74,37 +74,6 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, return r; } -/* - * @more: either or both of MSG_MORE and MSG_SENDPAGE_NOTLAST - */ -static int ceph_tcp_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int more) -{ - ssize_t (*sendpage)(struct socket *sock, struct page *page, - int offset, size_t size, int flags); - int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more; - int ret; - - /* - * sendpage cannot properly handle pages with page_count == 0, - * we need to fall back to sendmsg if that's the case. - * - * Same goes for slab pages: skb_can_coalesce() allows - * coalescing neighboring slab objects into a single frag which - * triggers one of hardened usercopy checks. - */ - if (sendpage_ok(page)) - sendpage = sock->ops->sendpage; - else - sendpage = sock_no_sendpage; - - ret = sendpage(sock, page, offset, size, flags); - if (ret == -EAGAIN) - ret = 0; - - return ret; -} - static void con_out_kvec_reset(struct ceph_connection *con) { BUG_ON(con->v1.out_skip); @@ -464,7 +433,6 @@ static int write_partial_message_data(struct ceph_connection *con) struct ceph_msg *msg = con->out_msg; struct ceph_msg_data_cursor *cursor = &msg->cursor; bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC); - int more = MSG_MORE | MSG_SENDPAGE_NOTLAST; u32 crc; dout("%s %p msg %p\n", __func__, con, msg); @@ -482,6 +450,10 @@ static int write_partial_message_data(struct ceph_connection *con) */ crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; while (cursor->total_resid) { + struct bio_vec bvec; + struct msghdr msghdr = { + .msg_flags = MSG_SPLICE_PAGES, + }; struct page *page; size_t page_offset; size_t length; @@ -493,10 +465,13 @@ static int write_partial_message_data(struct ceph_connection *con) } page = ceph_msg_data_next(cursor, &page_offset, &length); - if (length == cursor->total_resid) - more = MSG_MORE; - ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, - more); + if (length != cursor->total_resid) + msghdr.msg_flags |= MSG_MORE; + + bvec_set_page(&bvec, page, length, page_offset); + iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, length); + + ret = sock_sendmsg(con->sock, &msghdr); if (ret <= 0) { if (do_datacrc) msg->footer.data_crc = cpu_to_le32(crc); @@ -526,7 +501,10 @@ static int write_partial_message_data(struct ceph_connection *con) */ static int write_partial_skip(struct ceph_connection *con) { - int more = MSG_MORE | MSG_SENDPAGE_NOTLAST; + struct bio_vec bvec; + struct msghdr msghdr = { + .msg_flags = MSG_SPLICE_PAGES | MSG_MORE, + }; int ret; dout("%s %p %d left\n", __func__, con, con->v1.out_skip); @@ -534,9 +512,11 @@ static int write_partial_skip(struct ceph_connection *con) size_t size = min(con->v1.out_skip, (int)PAGE_SIZE); if (size == con->v1.out_skip) - more = MSG_MORE; - ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size, - more); + msghdr.msg_flags &= ~MSG_MORE; + bvec_set_page(&bvec, ZERO_PAGE(0), size, 0); + iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); + + ret = sock_sendmsg(con->sock, &msghdr); if (ret <= 0) goto out; con->v1.out_skip -= ret; -- cgit v1.2.3 From fa094ccae1e7904d9fa7bb1bc44c9873eec7baf4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:55:01 +0100 Subject: ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() Use sendmsg() and MSG_SPLICE_PAGES rather than sendpage in ceph when transmitting data. For the moment, this can only transmit one page at a time because of the architecture of net/ceph/, but if write_partial_message_data() can be given a bvec[] at a time by the iteration code, this would allow pages to be sent in a batch. Signed-off-by: David Howells cc: Ilya Dryomov cc: Xiubo Li cc: Jeff Layton cc: Jens Axboe cc: Matthew Wilcox Link: https://lore.kernel.org/r/20230623225513.2732256-5-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/ceph/messenger_v2.c | 91 +++++++++++-------------------------------------- 1 file changed, 19 insertions(+), 72 deletions(-) (limited to 'net') diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index 301a991dc6a6..87ac97073e75 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -117,91 +117,38 @@ static int ceph_tcp_recv(struct ceph_connection *con) return ret; } -static int do_sendmsg(struct socket *sock, struct iov_iter *it) -{ - struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; - int ret; - - msg.msg_iter = *it; - while (iov_iter_count(it)) { - ret = sock_sendmsg(sock, &msg); - if (ret <= 0) { - if (ret == -EAGAIN) - ret = 0; - return ret; - } - - iov_iter_advance(it, ret); - } - - WARN_ON(msg_data_left(&msg)); - return 1; -} - -static int do_try_sendpage(struct socket *sock, struct iov_iter *it) -{ - struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; - struct bio_vec bv; - int ret; - - if (WARN_ON(!iov_iter_is_bvec(it))) - return -EINVAL; - - while (iov_iter_count(it)) { - /* iov_iter_iovec() for ITER_BVEC */ - bvec_set_page(&bv, it->bvec->bv_page, - min(iov_iter_count(it), - it->bvec->bv_len - it->iov_offset), - it->bvec->bv_offset + it->iov_offset); - - /* - * sendpage cannot properly handle pages with - * page_count == 0, we need to fall back to sendmsg if - * that's the case. - * - * Same goes for slab pages: skb_can_coalesce() allows - * coalescing neighboring slab objects into a single frag - * which triggers one of hardened usercopy checks. - */ - if (sendpage_ok(bv.bv_page)) { - ret = sock->ops->sendpage(sock, bv.bv_page, - bv.bv_offset, bv.bv_len, - CEPH_MSG_FLAGS); - } else { - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len); - ret = sock_sendmsg(sock, &msg); - } - if (ret <= 0) { - if (ret == -EAGAIN) - ret = 0; - return ret; - } - - iov_iter_advance(it, ret); - } - - return 1; -} - /* * Write as much as possible. The socket is expected to be corked, - * so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here. + * so we don't bother with MSG_MORE here. * * Return: - * 1 - done, nothing (else) to write + * >0 - done, nothing (else) to write * 0 - socket is full, need to wait * <0 - error */ static int ceph_tcp_send(struct ceph_connection *con) { + struct msghdr msg = { + .msg_iter = con->v2.out_iter, + .msg_flags = CEPH_MSG_FLAGS, + }; int ret; + if (WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter))) + return -EINVAL; + + if (con->v2.out_iter_sendpage) + msg.msg_flags |= MSG_SPLICE_PAGES; + dout("%s con %p have %zu try_sendpage %d\n", __func__, con, iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage); - if (con->v2.out_iter_sendpage) - ret = do_try_sendpage(con->sock, &con->v2.out_iter); - else - ret = do_sendmsg(con->sock, &con->v2.out_iter); + + ret = sock_sendmsg(con->sock, &msg); + if (ret > 0) + iov_iter_advance(&con->v2.out_iter, ret); + else if (ret == -EAGAIN) + ret = 0; + dout("%s con %p ret %d left %zu\n", __func__, con, ret, iov_iter_count(&con->v2.out_iter)); return ret; -- cgit v1.2.3 From 572efade27c55f11436ed10bbb59ef10c6d83d93 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:55:02 +0100 Subject: rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage When transmitting data, call down into TCP using a single sendmsg with MSG_SPLICE_PAGES to indicate that content should be spliced. To make this work, the data is assembled in a bio_vec array and attached to a BVEC-type iterator. Signed-off-by: David Howells cc: Santosh Shilimkar cc: Jens Axboe cc: Matthew Wilcox cc: rds-devel@oss.oracle.com Link: https://lore.kernel.org/r/20230623225513.2732256-6-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/rds/tcp_send.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 8c4d1d6e9249..7d284ac7e81a 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -72,9 +72,10 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, { struct rds_conn_path *cp = rm->m_inc.i_conn_path; struct rds_tcp_connection *tc = cp->cp_transport_data; + struct msghdr msg = {}; + struct bio_vec bvec; int done = 0; int ret = 0; - int more; if (hdr_off == 0) { /* @@ -111,15 +112,17 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, goto out; } - more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0; while (sg < rm->data.op_nents) { - int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more; - - ret = tc->t_sock->ops->sendpage(tc->t_sock, - sg_page(&rm->data.op_sg[sg]), - rm->data.op_sg[sg].offset + off, - rm->data.op_sg[sg].length - off, - flags); + msg.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL; + if (sg + 1 < rm->data.op_nents) + msg.msg_flags |= MSG_MORE; + + bvec_set_page(&bvec, sg_page(&rm->data.op_sg[sg]), + rm->data.op_sg[sg].length - off, + rm->data.op_sg[sg].offset + off); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, + rm->data.op_sg[sg].length - off); + ret = sock_sendmsg(tc->t_sock, &msg); rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]), rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off, ret); @@ -132,8 +135,6 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, off = 0; sg++; } - if (sg == rm->data.op_nents - 1) - more = 0; } out: -- cgit v1.2.3 From 2f8bc2bbb0fa87bcf7fb9eeb65eb6d79c5a08895 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:55:06 +0100 Subject: smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES Drop the smc_sendpage() code as smc_sendmsg() just passes the call down to the underlying TCP socket and smc_tx_sendpage() is just a wrapper around its sendmsg implementation. Signed-off-by: David Howells cc: Karsten Graul cc: Wenjia Zhang cc: Jan Karcher cc: "D. Wythe" cc: Tony Lu cc: Wen Gu cc: Jens Axboe cc: Matthew Wilcox Link: https://lore.kernel.org/r/20230623225513.2732256-10-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- net/smc/af_smc.c | 29 ----------------------------- net/smc/smc_stats.c | 2 +- net/smc/smc_stats.h | 1 - net/smc/smc_tx.c | 19 ------------------- net/smc/smc_tx.h | 2 -- 5 files changed, 1 insertion(+), 52 deletions(-) (limited to 'net') diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 538e9c6ec8c9..a7f887d91d89 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -3133,34 +3133,6 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, return put_user(answ, (int __user *)arg); } -static ssize_t smc_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - struct sock *sk = sock->sk; - struct smc_sock *smc; - int rc = -EPIPE; - - smc = smc_sk(sk); - lock_sock(sk); - if (sk->sk_state != SMC_ACTIVE) { - release_sock(sk); - goto out; - } - release_sock(sk); - if (smc->use_fallback) { - rc = kernel_sendpage(smc->clcsock, page, offset, - size, flags); - } else { - lock_sock(sk); - rc = smc_tx_sendpage(smc, page, offset, size, flags); - release_sock(sk); - SMC_STAT_INC(smc, sendpage_cnt); - } - -out: - return rc; -} - /* Map the affected portions of the rmbe into an spd, note the number of bytes * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor * updates till whenever a respective page has been fully processed. @@ -3232,7 +3204,6 @@ static const struct proto_ops smc_sock_ops = { .sendmsg = smc_sendmsg, .recvmsg = smc_recvmsg, .mmap = sock_no_mmap, - .sendpage = smc_sendpage, .splice_read = smc_splice_read, }; diff --git a/net/smc/smc_stats.c b/net/smc/smc_stats.c index e80e34f7ac15..ca14c0f3a07d 100644 --- a/net/smc/smc_stats.c +++ b/net/smc/smc_stats.c @@ -227,7 +227,7 @@ static int smc_nl_fill_stats_tech_data(struct sk_buff *skb, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_SENDPAGE_CNT, - smc_tech->sendpage_cnt, + 0, SMC_NLA_STATS_PAD)) goto errattr; if (nla_put_u64_64bit(skb, SMC_NLA_STATS_T_CORK_CNT, diff --git a/net/smc/smc_stats.h b/net/smc/smc_stats.h index 84b7ecd8c05c..b60fe1eb37ab 100644 --- a/net/smc/smc_stats.h +++ b/net/smc/smc_stats.h @@ -71,7 +71,6 @@ struct smc_stats_tech { u64 clnt_v2_succ_cnt; u64 srv_v1_succ_cnt; u64 srv_v2_succ_cnt; - u64 sendpage_cnt; u64 urg_data_cnt; u64 splice_cnt; u64 cork_cnt; diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 9b9e0a190734..3b0ff3b589c7 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -297,25 +297,6 @@ out_err: return rc; } -int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset, - size_t size, int flags) -{ - struct msghdr msg = {.msg_flags = flags}; - char *kaddr = kmap(page); - struct kvec iov; - int rc; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - iov.iov_base = kaddr + offset; - iov.iov_len = size; - iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size); - rc = smc_tx_sendmsg(smc, &msg, size); - kunmap(page); - return rc; -} - /***************************** sndbuf consumer *******************************/ /* sndbuf consumer: actual data transfer of one target chunk with ISM write */ diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h index 34b578498b1f..a59f370b8b43 100644 --- a/net/smc/smc_tx.h +++ b/net/smc/smc_tx.h @@ -31,8 +31,6 @@ void smc_tx_pending(struct smc_connection *conn); void smc_tx_work(struct work_struct *work); void smc_tx_init(struct smc_sock *smc); int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len); -int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset, - size_t size, int flags); int smc_tx_sndbuf_nonempty(struct smc_connection *conn); void smc_tx_sndbuf_nonfull(struct smc_sock *smc); void smc_tx_consumer_update(struct smc_connection *conn, bool force); -- cgit v1.2.3 From dc97391e661009eab46783030d2404c9b6e6f2e7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:55:12 +0100 Subject: sock: Remove ->sendpage*() in favour of sendmsg(MSG_SPLICE_PAGES) Remove ->sendpage() and ->sendpage_locked(). sendmsg() with MSG_SPLICE_PAGES should be used instead. This allows multiple pages and multipage folios to be passed through. Signed-off-by: David Howells Acked-by: Marc Kleine-Budde # for net/can cc: Jens Axboe cc: Matthew Wilcox cc: linux-afs@lists.infradead.org cc: mptcp@lists.linux.dev cc: rds-devel@oss.oracle.com cc: tipc-discussion@lists.sourceforge.net cc: virtualization@lists.linux-foundation.org Link: https://lore.kernel.org/r/20230623225513.2732256-16-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- Documentation/bpf/map_sockmap.rst | 10 ++--- Documentation/filesystems/locking.rst | 2 - Documentation/filesystems/vfs.rst | 1 - Documentation/networking/scaling.rst | 4 +- crypto/af_alg.c | 28 ------------- crypto/algif_aead.c | 22 ++-------- crypto/algif_rng.c | 2 - crypto/algif_skcipher.c | 14 ------- .../ethernet/chelsio/inline_crypto/chtls/chtls.h | 2 - .../chelsio/inline_crypto/chtls/chtls_io.c | 14 ------- .../chelsio/inline_crypto/chtls/chtls_main.c | 1 - fs/nfsd/vfs.c | 2 +- include/crypto/if_alg.h | 2 - include/linux/net.h | 8 ---- include/net/inet_common.h | 2 - include/net/sock.h | 6 --- include/net/tcp.h | 4 -- net/appletalk/ddp.c | 1 - net/atm/pvc.c | 1 - net/atm/svc.c | 1 - net/ax25/af_ax25.c | 1 - net/caif/caif_socket.c | 2 - net/can/bcm.c | 1 - net/can/isotp.c | 1 - net/can/j1939/socket.c | 1 - net/can/raw.c | 1 - net/core/sock.c | 35 +--------------- net/dccp/ipv4.c | 1 - net/dccp/ipv6.c | 1 - net/ieee802154/socket.c | 2 - net/ipv4/af_inet.c | 21 ---------- net/ipv4/tcp.c | 43 ++----------------- net/ipv4/tcp_bpf.c | 23 +---------- net/ipv4/tcp_ipv4.c | 1 - net/ipv4/udp.c | 15 ------- net/ipv4/udp_impl.h | 2 - net/ipv4/udplite.c | 1 - net/ipv6/af_inet6.c | 3 -- net/ipv6/raw.c | 1 - net/ipv6/tcp_ipv6.c | 1 - net/kcm/kcmsock.c | 20 --------- net/key/af_key.c | 1 - net/l2tp/l2tp_ip.c | 1 - net/l2tp/l2tp_ip6.c | 1 - net/llc/af_llc.c | 1 - net/mctp/af_mctp.c | 1 - net/mptcp/protocol.c | 2 - net/netlink/af_netlink.c | 1 - net/netrom/af_netrom.c | 1 - net/packet/af_packet.c | 2 - net/phonet/socket.c | 2 - net/qrtr/af_qrtr.c | 1 - net/rds/af_rds.c | 1 - net/rose/af_rose.c | 1 - net/rxrpc/af_rxrpc.c | 1 - net/sctp/protocol.c | 1 - net/socket.c | 48 ---------------------- net/tipc/socket.c | 3 -- net/tls/tls.h | 6 --- net/tls/tls_device.c | 17 -------- net/tls/tls_main.c | 7 ---- net/tls/tls_sw.c | 35 ---------------- net/unix/af_unix.c | 19 --------- net/vmw_vsock/af_vsock.c | 3 -- net/x25/af_x25.c | 1 - net/xdp/xsk.c | 1 - 66 files changed, 20 insertions(+), 442 deletions(-) (limited to 'net') diff --git a/Documentation/bpf/map_sockmap.rst b/Documentation/bpf/map_sockmap.rst index cc92047c6630..2d630686a00b 100644 --- a/Documentation/bpf/map_sockmap.rst +++ b/Documentation/bpf/map_sockmap.rst @@ -240,11 +240,11 @@ offsets into ``msg``, respectively. If a program of type ``BPF_PROG_TYPE_SK_MSG`` is run on a ``msg`` it can only parse data that the (``data``, ``data_end``) pointers have already consumed. For ``sendmsg()`` hooks this is likely the first scatterlist element. But for -calls relying on the ``sendpage`` handler (e.g., ``sendfile()``) this will be -the range (**0**, **0**) because the data is shared with user space and by -default the objective is to avoid allowing user space to modify data while (or -after) BPF verdict is being decided. This helper can be used to pull in data -and to set the start and end pointers to given values. Data will be copied if +calls relying on MSG_SPLICE_PAGES (e.g., ``sendfile()``) this will be the +range (**0**, **0**) because the data is shared with user space and by default +the objective is to avoid allowing user space to modify data while (or after) +BPF verdict is being decided. This helper can be used to pull in data and to +set the start and end pointers to given values. Data will be copied if necessary (i.e., if data was not linear and if start and end pointers do not point to the same chunk). diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst index aa1a233b0fa8..ed148919e11a 100644 --- a/Documentation/filesystems/locking.rst +++ b/Documentation/filesystems/locking.rst @@ -521,8 +521,6 @@ prototypes:: int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); - ssize_t (*sendpage) (struct file *, struct page *, int, size_t, - loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index 769be5230210..cb2a97e49872 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -1086,7 +1086,6 @@ This describes how the VFS can manipulate an open file. As of kernel int (*fsync) (struct file *, loff_t, loff_t, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); - ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); int (*flock) (struct file *, int, struct file_lock *); diff --git a/Documentation/networking/scaling.rst b/Documentation/networking/scaling.rst index 3d435caa3ef2..92c9fb46d6a2 100644 --- a/Documentation/networking/scaling.rst +++ b/Documentation/networking/scaling.rst @@ -269,8 +269,8 @@ a single application thread handles flows with many different flow hashes. rps_sock_flow_table is a global flow table that contains the *desired* CPU for flows: the CPU that is currently processing the flow in userspace. Each table value is a CPU index that is updated during calls to recvmsg -and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() -and tcp_splice_read()). +and sendmsg (specifically, inet_recvmsg(), inet_sendmsg() and +tcp_splice_read()). When the scheduler moves a thread to a new CPU while it has outstanding receive packets on the old CPU, packets may arrive out of order. To diff --git a/crypto/af_alg.c b/crypto/af_alg.c index cdb1dcc5dd1a..6218c773d71c 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -482,7 +482,6 @@ static const struct proto_ops alg_proto_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, @@ -1106,33 +1105,6 @@ unlock: } EXPORT_SYMBOL_GPL(af_alg_sendmsg); -/** - * af_alg_sendpage - sendpage system call handler - * @sock: socket of connection to user space to write to - * @page: data to send - * @offset: offset into page to begin sending - * @size: length of data - * @flags: message send/receive flags - * - * This is a generic implementation of sendpage to fill ctx->tsgl_list. - */ -ssize_t af_alg_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { - .msg_flags = flags | MSG_SPLICE_PAGES, - }; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return sock_sendmsg(sock, &msg); -} -EXPORT_SYMBOL_GPL(af_alg_sendpage); - /** * af_alg_free_resources - release resources required for crypto request * @areq: Request holding the TX and RX SGL diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 35bfa283748d..7d58cbbce4af 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -9,10 +9,10 @@ * The following concept of the memory management is used: * * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is - * filled by user space with the data submitted via sendpage. Filling up - * the TX SGL does not cause a crypto operation -- the data will only be - * tracked by the kernel. Upon receipt of one recvmsg call, the caller must - * provide a buffer which is tracked with the RX SGL. + * filled by user space with the data submitted via sendmsg (maybe with + * MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation + * -- the data will only be tracked by the kernel. Upon receipt of one recvmsg + * call, the caller must provide a buffer which is tracked with the RX SGL. * * During the processing of the recvmsg operation, the cipher request is * allocated and prepared. As part of the recvmsg operation, the processed @@ -370,7 +370,6 @@ static struct proto_ops algif_aead_ops = { .release = af_alg_release, .sendmsg = aead_sendmsg, - .sendpage = af_alg_sendpage, .recvmsg = aead_recvmsg, .poll = af_alg_poll, }; @@ -422,18 +421,6 @@ static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, return aead_sendmsg(sock, msg, size); } -static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - int err; - - err = aead_check_key(sock); - if (err) - return err; - - return af_alg_sendpage(sock, page, offset, size, flags); -} - static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -461,7 +448,6 @@ static struct proto_ops algif_aead_ops_nokey = { .release = af_alg_release, .sendmsg = aead_sendmsg_nokey, - .sendpage = aead_sendpage_nokey, .recvmsg = aead_recvmsg_nokey, .poll = af_alg_poll, }; diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 407408c43730..10c41adac3b1 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -174,7 +174,6 @@ static struct proto_ops algif_rng_ops = { .bind = sock_no_bind, .accept = sock_no_accept, .sendmsg = sock_no_sendmsg, - .sendpage = sock_no_sendpage, .release = af_alg_release, .recvmsg = rng_recvmsg, @@ -192,7 +191,6 @@ static struct proto_ops __maybe_unused algif_rng_test_ops = { .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .sendpage = sock_no_sendpage, .release = af_alg_release, .recvmsg = rng_test_recvmsg, diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index b1f321b9f846..9ada9b741af8 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -194,7 +194,6 @@ static struct proto_ops algif_skcipher_ops = { .release = af_alg_release, .sendmsg = skcipher_sendmsg, - .sendpage = af_alg_sendpage, .recvmsg = skcipher_recvmsg, .poll = af_alg_poll, }; @@ -246,18 +245,6 @@ static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, return skcipher_sendmsg(sock, msg, size); } -static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, - int offset, size_t size, int flags) -{ - int err; - - err = skcipher_check_key(sock); - if (err) - return err; - - return af_alg_sendpage(sock, page, offset, size, flags); -} - static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, size_t ignored, int flags) { @@ -285,7 +272,6 @@ static struct proto_ops algif_skcipher_ops_nokey = { .release = af_alg_release, .sendmsg = skcipher_sendmsg_nokey, - .sendpage = skcipher_sendpage_nokey, .recvmsg = skcipher_recvmsg_nokey, .poll = af_alg_poll, }; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h index da4818d2c856..68562a82d036 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h @@ -569,8 +569,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); void chtls_splice_eof(struct socket *sock); -int chtls_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags); int send_tx_flowc_wr(struct sock *sk, int compl, u32 snd_nxt, u32 rcv_nxt); void chtls_tcp_push(struct sock *sk, int flags); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c index e08ac960c967..5fc64e47568a 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c @@ -1246,20 +1246,6 @@ void chtls_splice_eof(struct socket *sock) release_sock(sk); } -int chtls_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - struct bio_vec bvec; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return chtls_sendmsg(sk, &msg, size); -} - static void chtls_select_window(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c index 6b6787eafd2f..455a54708be4 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c @@ -607,7 +607,6 @@ static void __init chtls_init_ulp_ops(void) chtls_cpl_prot.shutdown = chtls_shutdown; chtls_cpl_prot.sendmsg = chtls_sendmsg; chtls_cpl_prot.splice_eof = chtls_splice_eof; - chtls_cpl_prot.sendpage = chtls_sendpage; chtls_cpl_prot.recvmsg = chtls_recvmsg; chtls_cpl_prot.setsockopt = chtls_setsockopt; chtls_cpl_prot.getsockopt = chtls_getsockopt; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index db67f8e19344..8879e207ff5a 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -936,7 +936,7 @@ nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags, /* * Grab and keep cached pages associated with a file in the svc_rqst - * so that they can be passed to the network sendmsg/sendpage routines + * so that they can be passed to the network sendmsg routines * directly. They will be released after the sending has completed. * * Return values: Number of bytes consumed, or -EIO if there are no diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 34224e77f5a2..ef8ce86b1f78 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -229,8 +229,6 @@ void af_alg_wmem_wakeup(struct sock *sk); int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min); int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize); -ssize_t af_alg_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags); void af_alg_free_resources(struct af_alg_async_req *areq); void af_alg_async_cb(void *data, int err); __poll_t af_alg_poll(struct file *file, struct socket *sock, diff --git a/include/linux/net.h b/include/linux/net.h index 23324e9a2b3d..41c608c1b02c 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -207,8 +207,6 @@ struct proto_ops { size_t total_len, int flags); int (*mmap) (struct file *file, struct socket *sock, struct vm_area_struct * vma); - ssize_t (*sendpage) (struct socket *sock, struct page *page, - int offset, size_t size, int flags); ssize_t (*splice_read)(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); void (*splice_eof)(struct socket *sock); @@ -222,8 +220,6 @@ struct proto_ops { sk_read_actor_t recv_actor); /* This is different from read_sock(), it reads an entire skb at a time. */ int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor); - int (*sendpage_locked)(struct sock *sk, struct page *page, - int offset, size_t size, int flags); int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, size_t size); int (*set_rcvlowat)(struct sock *sk, int val); @@ -341,10 +337,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags); int kernel_getsockname(struct socket *sock, struct sockaddr *addr); int kernel_getpeername(struct socket *sock, struct sockaddr *addr); -int kernel_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags); -int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, - size_t size, int flags); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); /* Routine returns the IP overhead imposed by a (caller-protected) socket. */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index a75333342c4e..b86b8e21de7f 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -36,8 +36,6 @@ void __inet_accept(struct socket *sock, struct socket *newsock, int inet_send_prepare(struct sock *sk); int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); void inet_splice_eof(struct socket *sock); -ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags); int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int inet_shutdown(struct socket *sock, int how); diff --git a/include/net/sock.h b/include/net/sock.h index 62a1b99da349..121284f455a8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1277,8 +1277,6 @@ struct proto { size_t len); int (*recvmsg)(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); - int (*sendpage)(struct sock *sk, struct page *page, - int offset, size_t size, int flags); void (*splice_eof)(struct socket *sock); int (*bind)(struct sock *sk, struct sockaddr *addr, int addr_len); @@ -1919,10 +1917,6 @@ int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma); -ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags); -ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, - int offset, size_t size, int flags); /* * Functions to fill in entries in struct proto_ops when a protocol diff --git a/include/net/tcp.h b/include/net/tcp.h index 31b534370787..226bce6d1e8c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -329,10 +329,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, size_t size, struct ubuf_info *uarg); void tcp_splice_eof(struct socket *sock); -int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, - int flags); -int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, - size_t size, int flags); int tcp_send_mss(struct sock *sk, int *size_goal, int flags); int tcp_wmem_schedule(struct sock *sk, int copy); void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index a06f4d4a6f47..8978fb6212ff 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1929,7 +1929,6 @@ static const struct proto_ops atalk_dgram_ops = { .sendmsg = atalk_sendmsg, .recvmsg = atalk_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct notifier_block ddp_notifier = { diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 53e7d3f39e26..66d9a9bd5896 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -126,7 +126,6 @@ static const struct proto_ops pvc_proto_ops = { .sendmsg = vcc_sendmsg, .recvmsg = vcc_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; diff --git a/net/atm/svc.c b/net/atm/svc.c index d83556d8beb9..36a814f1fbd1 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -654,7 +654,6 @@ static const struct proto_ops svc_proto_ops = { .sendmsg = vcc_sendmsg, .recvmsg = vcc_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index d8da400cb4de..5db805d5f74d 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -2022,7 +2022,6 @@ static const struct proto_ops ax25_proto_ops = { .sendmsg = ax25_sendmsg, .recvmsg = ax25_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; /* diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 4eebcc66c19a..9c82698da4f5 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -976,7 +976,6 @@ static const struct proto_ops caif_seqpacket_ops = { .sendmsg = caif_seqpkt_sendmsg, .recvmsg = caif_seqpkt_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static const struct proto_ops caif_stream_ops = { @@ -996,7 +995,6 @@ static const struct proto_ops caif_stream_ops = { .sendmsg = caif_stream_sendmsg, .recvmsg = caif_stream_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; /* This function is called when a socket is finally destroyed. */ diff --git a/net/can/bcm.c b/net/can/bcm.c index a962ec2b8ba5..9ba35685b043 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1703,7 +1703,6 @@ static const struct proto_ops bcm_ops = { .sendmsg = bcm_sendmsg, .recvmsg = bcm_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct proto bcm_proto __read_mostly = { diff --git a/net/can/isotp.c b/net/can/isotp.c index 84f9aba02901..1f25b45868cf 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1699,7 +1699,6 @@ static const struct proto_ops isotp_ops = { .sendmsg = isotp_sendmsg, .recvmsg = isotp_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct proto isotp_proto __read_mostly = { diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 35970c25496a..feaec4ad6d16 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -1306,7 +1306,6 @@ static const struct proto_ops j1939_ops = { .sendmsg = j1939_sk_sendmsg, .recvmsg = j1939_sk_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct proto j1939_proto __read_mostly = { diff --git a/net/can/raw.c b/net/can/raw.c index f64469b98260..15c79b079184 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -962,7 +962,6 @@ static const struct proto_ops raw_ops = { .sendmsg = raw_sendmsg, .recvmsg = raw_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct proto raw_proto __read_mostly = { diff --git a/net/core/sock.c b/net/core/sock.c index 5f1747c12004..de719094b804 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3261,36 +3261,6 @@ void __receive_sock(struct file *file) } } -ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) -{ - ssize_t res; - struct msghdr msg = {.msg_flags = flags}; - struct kvec iov; - char *kaddr = kmap(page); - iov.iov_base = kaddr + offset; - iov.iov_len = size; - res = kernel_sendmsg(sock, &msg, &iov, 1, size); - kunmap(page); - return res; -} -EXPORT_SYMBOL(sock_no_sendpage); - -ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - ssize_t res; - struct msghdr msg = {.msg_flags = flags}; - struct kvec iov; - char *kaddr = kmap(page); - - iov.iov_base = kaddr + offset; - iov.iov_len = size; - res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size); - kunmap(page); - return res; -} -EXPORT_SYMBOL(sock_no_sendpage_locked); - /* * Default Socket Callbacks */ @@ -4046,7 +4016,7 @@ static void proto_seq_printf(struct seq_file *seq, struct proto *proto) { seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " - "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", + "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", proto->name, proto->obj_size, sock_prot_inuse_get(seq_file_net(seq), proto), @@ -4067,7 +4037,6 @@ static void proto_seq_printf(struct seq_file *seq, struct proto *proto) proto_method_implemented(proto->getsockopt), proto_method_implemented(proto->sendmsg), proto_method_implemented(proto->recvmsg), - proto_method_implemented(proto->sendpage), proto_method_implemented(proto->bind), proto_method_implemented(proto->backlog_rcv), proto_method_implemented(proto->hash), @@ -4088,7 +4057,7 @@ static int proto_seq_show(struct seq_file *seq, void *v) "maxhdr", "slab", "module", - "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); + "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n"); else proto_seq_printf(seq, list_entry(v, struct proto, node)); return 0; diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 3ab68415d121..fa8079303cb0 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -1010,7 +1010,6 @@ static const struct proto_ops inet_dccp_ops = { .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct inet_protosw dccp_v4_protosw = { diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 93c98990d726..7249ef218178 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1087,7 +1087,6 @@ static const struct proto_ops inet6_dccp_ops = { .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 9c124705120d..00302e8b9615 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -426,7 +426,6 @@ static const struct proto_ops ieee802154_raw_ops = { .sendmsg = ieee802154_sock_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; /* DGRAM Sockets (802.15.4 dataframes) */ @@ -989,7 +988,6 @@ static const struct proto_ops ieee802154_dgram_ops = { .sendmsg = ieee802154_sock_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static void ieee802154_sock_destruct(struct sock *sk) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 38e649fb4474..9b2ca2fcc5a1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -847,23 +847,6 @@ void inet_splice_eof(struct socket *sock) } EXPORT_SYMBOL_GPL(inet_splice_eof); -ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags) -{ - struct sock *sk = sock->sk; - const struct proto *prot; - - if (unlikely(inet_send_prepare(sk))) - return -EAGAIN; - - /* IPV6_ADDRFORM can change sk->sk_prot under us. */ - prot = READ_ONCE(sk->sk_prot); - if (prot->sendpage) - return prot->sendpage(sk, page, offset, size, flags); - return sock_no_sendpage(sock, page, offset, size, flags); -} -EXPORT_SYMBOL(inet_sendpage); - INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *, size_t, int, int *)); int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, @@ -1067,12 +1050,10 @@ const struct proto_ops inet_stream_ops = { .mmap = tcp_mmap, #endif .splice_eof = inet_splice_eof, - .sendpage = inet_sendpage, .splice_read = tcp_splice_read, .read_sock = tcp_read_sock, .read_skb = tcp_read_skb, .sendmsg_locked = tcp_sendmsg_locked, - .sendpage_locked = tcp_sendpage_locked, .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, @@ -1102,7 +1083,6 @@ const struct proto_ops inet_dgram_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .splice_eof = inet_splice_eof, - .sendpage = inet_sendpage, .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, @@ -1134,7 +1114,6 @@ static const struct proto_ops inet_sockraw_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .splice_eof = inet_splice_eof, - .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet_compat_ioctl, #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d56edc2c885f..e03e08745308 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -923,11 +923,10 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags) return mss_now; } -/* In some cases, both sendpage() and sendmsg() could have added - * an skb to the write queue, but failed adding payload on it. - * We need to remove it to consume less memory, but more - * importantly be able to generate EPOLLOUT for Edge Trigger epoll() - * users. +/* In some cases, both sendmsg() could have added an skb to the write queue, + * but failed adding payload on it. We need to remove it to consume less + * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger + * epoll() users. */ void tcp_remove_empty_skb(struct sock *sk) { @@ -975,40 +974,6 @@ int tcp_wmem_schedule(struct sock *sk, int copy) return min(copy, sk->sk_forward_alloc); } -int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, - size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - - if (!(sk->sk_route_caps & NETIF_F_SG)) - return sock_no_sendpage_locked(sk, page, offset, size, flags); - - tcp_rate_check_app_limited(sk); /* is sending application-limited? */ - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - return tcp_sendmsg_locked(sk, &msg, size); -} -EXPORT_SYMBOL_GPL(tcp_sendpage_locked); - -int tcp_sendpage(struct sock *sk, struct page *page, int offset, - size_t size, int flags) -{ - int ret; - - lock_sock(sk); - ret = tcp_sendpage_locked(sk, page, offset, size, flags); - release_sock(sk); - - return ret; -} -EXPORT_SYMBOL(tcp_sendpage); - void tcp_free_fastopen_req(struct tcp_sock *tp) { if (tp->fastopen_req) { diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 31d6005cea9b..81f0dff69e0b 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -486,7 +486,7 @@ static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) long timeo; int flags; - /* Don't let internal sendpage flags through */ + /* Don't let internal flags through */ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); flags |= MSG_NO_SHARED_FRAGS; @@ -566,23 +566,6 @@ out_err: return copied ? copied : err; } -static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset, - size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { - .msg_flags = flags | MSG_SPLICE_PAGES, - }; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - return tcp_bpf_sendmsg(sk, &msg, size); -} - enum { TCP_BPF_IPV4, TCP_BPF_IPV6, @@ -612,7 +595,6 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; - prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage; prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; @@ -647,8 +629,7 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops) * indeed valid assumptions. */ return ops->recvmsg == tcp_recvmsg && - ops->sendmsg == tcp_sendmsg && - ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; + ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; } int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9213804b034f..fd365de4d5ff 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -3117,7 +3117,6 @@ struct proto tcp_prot = { .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .splice_eof = tcp_splice_eof, - .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .release_cb = tcp_release_cb, .hash = inet_hash, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 48fdcd3cad9c..42a96b3547c9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1340,20 +1340,6 @@ void udp_splice_eof(struct socket *sock) } EXPORT_SYMBOL_GPL(udp_splice_eof); -int udp_sendpage(struct sock *sk, struct page *page, int offset, - size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES }; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return udp_sendmsg(sk, &msg, size); -} - #define UDP_SKB_IS_STATELESS 0x80000000 /* all head states (dst, sk, nf conntrack) except skb extensions are @@ -2933,7 +2919,6 @@ struct proto udp_prot = { .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .splice_eof = udp_splice_eof, - .sendpage = udp_sendpage, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 4ba7a88a1b1d..e1ff3a375996 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -19,8 +19,6 @@ int udp_getsockopt(struct sock *sk, int level, int optname, int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len); -int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, - int flags); void udp_destroy_sock(struct sock *sk); #ifdef CONFIG_PROC_FS diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 143f93a12f25..39ecdad1b50c 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -56,7 +56,6 @@ struct proto udplite_prot = { .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, - .sendpage = udp_sendpage, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b3451cf47d29..5d593ddc0347 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -696,9 +696,7 @@ const struct proto_ops inet6_stream_ops = { .mmap = tcp_mmap, #endif .splice_eof = inet_splice_eof, - .sendpage = inet_sendpage, .sendmsg_locked = tcp_sendmsg_locked, - .sendpage_locked = tcp_sendpage_locked, .splice_read = tcp_splice_read, .read_sock = tcp_read_sock, .read_skb = tcp_read_skb, @@ -729,7 +727,6 @@ const struct proto_ops inet6_dgram_ops = { .recvmsg = inet6_recvmsg, /* retpoline's sake */ .read_skb = udp_read_skb, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index c9caeb5a43ed..ac1cef094c5f 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1296,7 +1296,6 @@ const struct proto_ops inet6_sockraw_ops = { .sendmsg = inet_sendmsg, /* ok */ .recvmsg = sock_common_recvmsg, /* ok */ .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c17c8ff94b79..40dd92a2f480 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2151,7 +2151,6 @@ struct proto tcpv6_prot = { .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .splice_eof = tcp_splice_eof, - .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, .hash = inet6_hash, diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index d0537c1c8cd7..393f01b2a7e6 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -963,24 +963,6 @@ static void kcm_splice_eof(struct socket *sock) release_sock(sk); } -static ssize_t kcm_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int flags) - -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - if (flags & MSG_OOB) - return -EOPNOTSUPP; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return kcm_sendmsg(sock, &msg, size); -} - static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { @@ -1769,7 +1751,6 @@ static const struct proto_ops kcm_dgram_ops = { .recvmsg = kcm_recvmsg, .mmap = sock_no_mmap, .splice_eof = kcm_splice_eof, - .sendpage = kcm_sendpage, }; static const struct proto_ops kcm_seqpacket_ops = { @@ -1791,7 +1772,6 @@ static const struct proto_ops kcm_seqpacket_ops = { .recvmsg = kcm_recvmsg, .mmap = sock_no_mmap, .splice_eof = kcm_splice_eof, - .sendpage = kcm_sendpage, .splice_read = kcm_splice_read, }; diff --git a/net/key/af_key.c b/net/key/af_key.c index 31ab12fd720a..ede3c6a60353 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3761,7 +3761,6 @@ static const struct proto_ops pfkey_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, /* Now the operations that really occur. */ .release = pfkey_release, diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 2b795c1064f5..f9073bc7281f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -624,7 +624,6 @@ static const struct proto_ops l2tp_ip_ops = { .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct inet_protosw l2tp_ip_protosw = { diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 5137ea1861ce..b1623f9c4f92 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -751,7 +751,6 @@ static const struct proto_ops l2tp_ip6_ops = { .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 9ffbc667be6c..57c35c960b2c 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -1232,7 +1232,6 @@ static const struct proto_ops llc_ui_ops = { .sendmsg = llc_ui_sendmsg, .recvmsg = llc_ui_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static const char llc_proc_err_msg[] __initconst = diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index bb4bd0b6a4f7..f6be58b68c6f 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -485,7 +485,6 @@ static const struct proto_ops mctp_dgram_ops = { .sendmsg = mctp_sendmsg, .recvmsg = mctp_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = mctp_compat_ioctl, #endif diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index bd023debedc8..e892673deb73 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3866,7 +3866,6 @@ static const struct proto_ops mptcp_stream_ops = { .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, - .sendpage = inet_sendpage, }; static struct inet_protosw mptcp_protosw = { @@ -3961,7 +3960,6 @@ static const struct proto_ops mptcp_v6_stream_ops = { .sendmsg = inet6_sendmsg, .recvmsg = inet6_recvmsg, .mmap = sock_no_mmap, - .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index cbd9aa7ee24a..39cfb778ebc5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2815,7 +2815,6 @@ static const struct proto_ops netlink_ops = { .sendmsg = netlink_sendmsg, .recvmsg = netlink_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static const struct net_proto_family netlink_family_ops = { diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 5a4cb796150f..eb8ccbd58df7 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1364,7 +1364,6 @@ static const struct proto_ops nr_proto_ops = { .sendmsg = nr_sendmsg, .recvmsg = nr_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct notifier_block nr_dev_notifier = { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index a2dbeb264f26..85ff90a03b0c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4621,7 +4621,6 @@ static const struct proto_ops packet_ops_spkt = { .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static const struct proto_ops packet_ops = { @@ -4643,7 +4642,6 @@ static const struct proto_ops packet_ops = { .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, - .sendpage = sock_no_sendpage, }; static const struct net_proto_family packet_family_ops = { diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 967f9b4dc026..1018340d89a7 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -441,7 +441,6 @@ const struct proto_ops phonet_dgram_ops = { .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; const struct proto_ops phonet_stream_ops = { @@ -462,7 +461,6 @@ const struct proto_ops phonet_stream_ops = { .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; EXPORT_SYMBOL(phonet_stream_ops); diff --git a/net/qrtr/af_qrtr.c b/net/qrtr/af_qrtr.c index 76f0434d3d06..78beb74146e7 100644 --- a/net/qrtr/af_qrtr.c +++ b/net/qrtr/af_qrtr.c @@ -1244,7 +1244,6 @@ static const struct proto_ops qrtr_proto_ops = { .shutdown = sock_no_shutdown, .release = qrtr_release, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct proto qrtr_proto = { diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 3ff6995244e5..01c4cdfef45d 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -653,7 +653,6 @@ static const struct proto_ops rds_proto_ops = { .sendmsg = rds_sendmsg, .recvmsg = rds_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static void rds_sock_destruct(struct sock *sk) diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index ca2b17f32670..49dafe9ac72f 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1496,7 +1496,6 @@ static const struct proto_ops rose_proto_ops = { .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index da0b3b5157d5..f2cf4aa99db2 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -954,7 +954,6 @@ static const struct proto_ops rxrpc_rpc_ops = { .sendmsg = rxrpc_sendmsg, .recvmsg = rxrpc_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct proto rxrpc_proto = { diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 664d1f2e9121..274d07bd774f 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1133,7 +1133,6 @@ static const struct proto_ops inet_seqpacket_ops = { .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; /* Registration with AF_INET family. */ diff --git a/net/socket.c b/net/socket.c index b778fc03c6e0..8c3c8b29995a 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3552,54 +3552,6 @@ int kernel_getpeername(struct socket *sock, struct sockaddr *addr) } EXPORT_SYMBOL(kernel_getpeername); -/** - * kernel_sendpage - send a &page through a socket (kernel space) - * @sock: socket - * @page: page - * @offset: page offset - * @size: total size in bytes - * @flags: flags (MSG_DONTWAIT, ...) - * - * Returns the total amount sent in bytes or an error. - */ - -int kernel_sendpage(struct socket *sock, struct page *page, int offset, - size_t size, int flags) -{ - if (sock->ops->sendpage) { - /* Warn in case the improper page to zero-copy send */ - WARN_ONCE(!sendpage_ok(page), "improper page for zero-copy send"); - return sock->ops->sendpage(sock, page, offset, size, flags); - } - return sock_no_sendpage(sock, page, offset, size, flags); -} -EXPORT_SYMBOL(kernel_sendpage); - -/** - * kernel_sendpage_locked - send a &page through the locked sock (kernel space) - * @sk: sock - * @page: page - * @offset: page offset - * @size: total size in bytes - * @flags: flags (MSG_DONTWAIT, ...) - * - * Returns the total amount sent in bytes or an error. - * Caller must hold @sk. - */ - -int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, - size_t size, int flags) -{ - struct socket *sock = sk->sk_socket; - - if (sock->ops->sendpage_locked) - return sock->ops->sendpage_locked(sk, page, offset, size, - flags); - - return sock_no_sendpage_locked(sk, page, offset, size, flags); -} -EXPORT_SYMBOL(kernel_sendpage_locked); - /** * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space) * @sock: socket diff --git a/net/tipc/socket.c b/net/tipc/socket.c index dd73d71c02a9..ef8e5139a873 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3375,7 +3375,6 @@ static const struct proto_ops msg_ops = { .sendmsg = tipc_sendmsg, .recvmsg = tipc_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage }; static const struct proto_ops packet_ops = { @@ -3396,7 +3395,6 @@ static const struct proto_ops packet_ops = { .sendmsg = tipc_send_packet, .recvmsg = tipc_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage }; static const struct proto_ops stream_ops = { @@ -3417,7 +3415,6 @@ static const struct proto_ops stream_ops = { .sendmsg = tipc_sendstream, .recvmsg = tipc_recvstream, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage }; static const struct net_proto_family tipc_family_ops = { diff --git a/net/tls/tls.h b/net/tls/tls.h index d002c3af1966..86cef1c68e03 100644 --- a/net/tls/tls.h +++ b/net/tls/tls.h @@ -98,10 +98,6 @@ void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); void tls_sw_strparser_done(struct tls_context *tls_ctx); int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); void tls_sw_splice_eof(struct socket *sock); -int tls_sw_sendpage_locked(struct sock *sk, struct page *page, - int offset, size_t size, int flags); -int tls_sw_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags); void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); void tls_sw_release_resources_tx(struct sock *sk); void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); @@ -117,8 +113,6 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); void tls_device_splice_eof(struct socket *sock); -int tls_device_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags); int tls_tx_records(struct sock *sk, int flags); void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 975299d7213b..840ee06f1708 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -621,23 +621,6 @@ void tls_device_splice_eof(struct socket *sock) mutex_unlock(&tls_ctx->tx_lock); } -int tls_device_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - if (flags & MSG_OOB) - return -EOPNOTSUPP; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return tls_device_sendmsg(sk, &msg, size); -} - struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn) { diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 7b9c83dd7de2..d5ed4d47b16e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -958,7 +958,6 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG] ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof; - ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked; ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read; @@ -970,17 +969,14 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG] #ifdef CONFIG_TLS_DEVICE ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; - ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL; ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ]; - ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL; ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ]; ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ]; ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ]; - ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL; #endif #ifdef CONFIG_TLS_TOE ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base; @@ -1029,7 +1025,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof; - prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; @@ -1045,12 +1040,10 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_HW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; prot[TLS_HW][TLS_BASE].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_BASE].splice_eof = tls_device_splice_eof; - prot[TLS_HW][TLS_BASE].sendpage = tls_device_sendpage; prot[TLS_HW][TLS_SW] = prot[TLS_BASE][TLS_SW]; prot[TLS_HW][TLS_SW].sendmsg = tls_device_sendmsg; prot[TLS_HW][TLS_SW].splice_eof = tls_device_splice_eof; - prot[TLS_HW][TLS_SW].sendpage = tls_device_sendpage; prot[TLS_BASE][TLS_HW] = prot[TLS_BASE][TLS_SW]; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 319f61590d2c..9b3aa89a4292 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1281,41 +1281,6 @@ unlock: mutex_unlock(&tls_ctx->tx_lock); } -int tls_sw_sendpage_locked(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - - if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | - MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | - MSG_NO_SHARED_FRAGS)) - return -EOPNOTSUPP; - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return tls_sw_sendmsg_locked(sk, &msg, size); -} - -int tls_sw_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; - - if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | - MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) - return -EOPNOTSUPP; - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return tls_sw_sendmsg(sk, &msg, size); -} - static int tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock, bool released) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f9d196439b49..f2f234f0b92c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -758,8 +758,6 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); -static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset, - size_t size, int flags); static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos, struct pipe_inode_info *, size_t size, unsigned int flags); @@ -852,7 +850,6 @@ static const struct proto_ops unix_stream_ops = { .recvmsg = unix_stream_recvmsg, .read_skb = unix_stream_read_skb, .mmap = sock_no_mmap, - .sendpage = unix_stream_sendpage, .splice_read = unix_stream_splice_read, .set_peek_off = unix_set_peek_off, .show_fdinfo = unix_show_fdinfo, @@ -878,7 +875,6 @@ static const struct proto_ops unix_dgram_ops = { .read_skb = unix_read_skb, .recvmsg = unix_dgram_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .set_peek_off = unix_set_peek_off, .show_fdinfo = unix_show_fdinfo, }; @@ -902,7 +898,6 @@ static const struct proto_ops unix_seqpacket_ops = { .sendmsg = unix_seqpacket_sendmsg, .recvmsg = unix_seqpacket_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .set_peek_off = unix_set_peek_off, .show_fdinfo = unix_show_fdinfo, }; @@ -2294,20 +2289,6 @@ out_err: return sent ? : err; } -static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, - int offset, size_t size, int flags) -{ - struct bio_vec bvec; - struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES }; - - if (flags & MSG_SENDPAGE_NOTLAST) - msg.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, size, offset); - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); - return unix_stream_sendmsg(socket, &msg, size); -} - static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index efb8a0937a13..020cf17ab7e4 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1306,7 +1306,6 @@ static const struct proto_ops vsock_dgram_ops = { .sendmsg = vsock_dgram_sendmsg, .recvmsg = vsock_dgram_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .read_skb = vsock_read_skb, }; @@ -2234,7 +2233,6 @@ static const struct proto_ops vsock_stream_ops = { .sendmsg = vsock_connectible_sendmsg, .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .set_rcvlowat = vsock_set_rcvlowat, .read_skb = vsock_read_skb, }; @@ -2257,7 +2255,6 @@ static const struct proto_ops vsock_seqpacket_ops = { .sendmsg = vsock_connectible_sendmsg, .recvmsg = vsock_connectible_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, .read_skb = vsock_read_skb, }; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5c7ad301d742..0fb5143bec7a 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1757,7 +1757,6 @@ static const struct proto_ops x25_proto_ops = { .sendmsg = x25_sendmsg, .recvmsg = x25_recvmsg, .mmap = sock_no_mmap, - .sendpage = sock_no_sendpage, }; static struct packet_type x25_packet_type __read_mostly = { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index cc1e7f15fa73..5a8c0dd250af 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1389,7 +1389,6 @@ static const struct proto_ops xsk_proto_ops = { .sendmsg = xsk_sendmsg, .recvmsg = xsk_recvmsg, .mmap = xsk_mmap, - .sendpage = sock_no_sendpage, }; static void xsk_destruct(struct sock *sk) -- cgit v1.2.3 From b848b26c6672c9b977890ba85f5a155e5eb221f0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 23 Jun 2023 23:55:13 +0100 Subject: net: Kill MSG_SENDPAGE_NOTLAST Now that ->sendpage() has been removed, MSG_SENDPAGE_NOTLAST can be cleaned up. Things were converted to use MSG_MORE instead, but the protocol sendpage stubs still convert MSG_SENDPAGE_NOTLAST to MSG_MORE, which is now unnecessary. Signed-off-by: David Howells cc: Jens Axboe cc: Matthew Wilcox cc: linux-afs@lists.infradead.org cc: mptcp@lists.linux.dev cc: rds-devel@oss.oracle.com cc: tipc-discussion@lists.sourceforge.net cc: virtualization@lists.linux-foundation.org Link: https://lore.kernel.org/r/20230623225513.2732256-17-dhowells@redhat.com Signed-off-by: Jakub Kicinski --- include/linux/socket.h | 4 +--- net/tls/tls_device.c | 3 +-- net/tls/tls_main.c | 2 +- net/tls/tls_sw.c | 2 +- tools/perf/trace/beauty/include/linux/socket.h | 1 - tools/perf/trace/beauty/msg_flags.c | 5 +---- 6 files changed, 5 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/include/linux/socket.h b/include/linux/socket.h index 58204700018a..39b74d83c7c4 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -319,7 +319,6 @@ struct ucred { #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ #define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */ -#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ @@ -341,8 +340,7 @@ struct ucred { /* Flags to be cleared on entry by sendmsg and sendmmsg syscalls */ #define MSG_INTERNAL_SENDMSG_FLAGS \ - (MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_NOTLAST | \ - MSG_SENDPAGE_DECRYPTED) + (MSG_SPLICE_PAGES | MSG_SENDPAGE_NOPOLICY | MSG_SENDPAGE_DECRYPTED) /* Setsockoptions(2) level. Thanks to BSD these must match IPPROTO_xxx */ #define SOL_IP 0 diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 840ee06f1708..2021fe557e50 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -441,8 +441,7 @@ static int tls_push_data(struct sock *sk, long timeo; if (flags & - ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SENDPAGE_NOTLAST | - MSG_SPLICE_PAGES)) + ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_SPLICE_PAGES)) return -EOPNOTSUPP; if (unlikely(sk->sk_err)) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index d5ed4d47b16e..b6896126bb92 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -127,7 +127,7 @@ int tls_push_sg(struct sock *sk, { struct bio_vec bvec; struct msghdr msg = { - .msg_flags = MSG_SENDPAGE_NOTLAST | MSG_SPLICE_PAGES | flags, + .msg_flags = MSG_SPLICE_PAGES | flags, }; int ret = 0; struct page *p; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 9b3aa89a4292..53f944e6d8ef 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -1194,7 +1194,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_CMSG_COMPAT | MSG_SPLICE_PAGES | - MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) + MSG_SENDPAGE_NOPOLICY)) return -EOPNOTSUPP; ret = mutex_lock_interruptible(&tls_ctx->tx_lock); diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h index 13c3a237b9c9..3bef212a24d7 100644 --- a/tools/perf/trace/beauty/include/linux/socket.h +++ b/tools/perf/trace/beauty/include/linux/socket.h @@ -318,7 +318,6 @@ struct ucred { #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ #define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */ -#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN #define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */ diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c index ea68db08b8e7..5cdebd7ece7e 100644 --- a/tools/perf/trace/beauty/msg_flags.c +++ b/tools/perf/trace/beauty/msg_flags.c @@ -8,9 +8,6 @@ #ifndef MSG_WAITFORONE #define MSG_WAITFORONE 0x10000 #endif -#ifndef MSG_SENDPAGE_NOTLAST -#define MSG_SENDPAGE_NOTLAST 0x20000 -#endif #ifndef MSG_FASTOPEN #define MSG_FASTOPEN 0x20000000 #endif @@ -50,7 +47,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size, P_MSG_FLAG(NOSIGNAL); P_MSG_FLAG(MORE); P_MSG_FLAG(WAITFORONE); - P_MSG_FLAG(SENDPAGE_NOTLAST); + P_MSG_FLAG(SPLICE_PAGES); P_MSG_FLAG(FASTOPEN); P_MSG_FLAG(CMSG_CLOEXEC); #undef P_MSG_FLAG -- cgit v1.2.3 From de6843be3082d416eaf2a00b72dad95c784ca980 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 6 Jun 2023 09:38:42 +0200 Subject: netfilter: nft_payload: rebuild vlan header when needed Skip rebuilding the vlan header when accessing destination and source mac address. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_payload.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index 3a3c7746e88f..8cb800989947 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -171,7 +171,8 @@ void nft_payload_eval(const struct nft_expr *expr, if (!skb_mac_header_was_set(skb)) goto err; - if (skb_vlan_tag_present(skb)) { + if (skb_vlan_tag_present(skb) && + priv->offset >= offsetof(struct ethhdr, h_proto)) { if (!nft_payload_copy_vlan(dest, skb, priv->offset, priv->len)) goto err; -- cgit v1.2.3 From 78aa23d0081b2029a5763c4f7d396bf2666c0c87 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 6 Jun 2023 13:58:27 +0200 Subject: netfilter: ipset: remove rcu_read_lock_bh pair from ip_set_test Callers already hold rcu_read_lock. Prior to RCU conversion this used to be a read_lock_bh(), but now the bh-disable isn't needed anymore. Cc: Jozsef Kadlecsik Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipset/ip_set_core.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'net') diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 46ebee9400da..a77c75f6dfb9 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -739,9 +739,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb, !(opt->family == set->family || set->family == NFPROTO_UNSPEC)) return 0; - rcu_read_lock_bh(); ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt); - rcu_read_unlock_bh(); if (ret == -EAGAIN) { /* Type requests element to be completed */ -- cgit v1.2.3 From 96b2ef9b16cb302d0b47c5670d30a05963e0e1e3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 6 Jun 2023 14:08:49 +0200 Subject: netfilter: nf_tables: permit update of set size Now that set->nelems is always updated permit update of the sets max size. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 3 +++ net/netfilter/nf_tables_api.c | 4 ++++ 2 files changed, 7 insertions(+) (limited to 'net') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 2e24ea1d744c..89b1ac4e6d4a 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1589,6 +1589,7 @@ struct nft_trans_set { u64 timeout; bool update; bool bound; + u32 size; }; #define nft_trans_set(trans) \ @@ -1603,6 +1604,8 @@ struct nft_trans_set { (((struct nft_trans_set *)trans->data)->timeout) #define nft_trans_set_gc_int(trans) \ (((struct nft_trans_set *)trans->data)->gc_int) +#define nft_trans_set_size(trans) \ + (((struct nft_trans_set *)trans->data)->size) struct nft_trans_chain { bool update; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0396fd8f4e71..dfd441ff1e3e 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -483,6 +483,7 @@ static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, nft_trans_set_update(trans) = true; nft_trans_set_gc_int(trans) = desc->gc_int; nft_trans_set_timeout(trans) = desc->timeout; + nft_trans_set_size(trans) = desc->size; } nft_trans_commit_list_add_tail(ctx->net, trans); @@ -9428,6 +9429,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) WRITE_ONCE(set->timeout, nft_trans_set_timeout(trans)); WRITE_ONCE(set->gc_int, nft_trans_set_gc_int(trans)); + + if (nft_trans_set_size(trans)) + WRITE_ONCE(set->size, nft_trans_set_size(trans)); } else { nft_clear(net, nft_trans_set(trans)); /* This avoids hitting -EBUSY when deleting the table -- cgit v1.2.3 From 4589725502871e77d06464f731f92fd9173e2be6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 6 Jun 2023 22:59:30 +0200 Subject: netfilter: snat: evict closing tcp entries on reply tuple collision When all tried source tuples are in use, the connection request (skb) and the new conntrack will be dropped in nf_confirm() due to the non-recoverable clash. Make it so that the last 32 attempts are allowed to evict a colliding entry if this connection is already closing and the new sequence number has advanced past the old one. Such "all tuples taken" secenario can happen with tcp-rpc workloads where same dst:dport gets queried repeatedly. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_nat_core.c | 92 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 88 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index ce829d434f13..fadbd4ed3dc0 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -27,6 +27,9 @@ #include "nf_internals.h" +#define NF_NAT_MAX_ATTEMPTS 128 +#define NF_NAT_HARDER_THRESH (NF_NAT_MAX_ATTEMPTS / 4) + static spinlock_t nf_nat_locks[CONNTRACK_LOCKS]; static DEFINE_MUTEX(nf_nat_proto_mutex); @@ -197,6 +200,88 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple, return nf_conntrack_tuple_taken(&reply, ignored_conntrack); } +static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags) +{ + static const unsigned long flags_refuse = IPS_FIXED_TIMEOUT | + IPS_DYING; + static const unsigned long flags_needed = IPS_SRC_NAT; + enum tcp_conntrack old_state; + + old_state = READ_ONCE(ct->proto.tcp.state); + if (old_state < TCP_CONNTRACK_TIME_WAIT) + return false; + + if (flags & flags_refuse) + return false; + + return (flags & flags_needed) == flags_needed; +} + +/* reverse direction will send packets to new source, so + * make sure such packets are invalid. + */ +static bool nf_seq_has_advanced(const struct nf_conn *old, const struct nf_conn *new) +{ + return (__s32)(new->proto.tcp.seen[0].td_end - + old->proto.tcp.seen[0].td_end) > 0; +} + +static int +nf_nat_used_tuple_harder(const struct nf_conntrack_tuple *tuple, + const struct nf_conn *ignored_conntrack, + unsigned int attempts_left) +{ + static const unsigned long flags_offload = IPS_OFFLOAD | IPS_HW_OFFLOAD; + struct nf_conntrack_tuple_hash *thash; + const struct nf_conntrack_zone *zone; + struct nf_conntrack_tuple reply; + unsigned long flags; + struct nf_conn *ct; + bool taken = true; + struct net *net; + + nf_ct_invert_tuple(&reply, tuple); + + if (attempts_left > NF_NAT_HARDER_THRESH || + tuple->dst.protonum != IPPROTO_TCP || + ignored_conntrack->proto.tcp.state != TCP_CONNTRACK_SYN_SENT) + return nf_conntrack_tuple_taken(&reply, ignored_conntrack); + + /* :ast few attempts to find a free tcp port. Destructive + * action: evict colliding if its in timewait state and the + * tcp sequence number has advanced past the one used by the + * old entry. + */ + net = nf_ct_net(ignored_conntrack); + zone = nf_ct_zone(ignored_conntrack); + + thash = nf_conntrack_find_get(net, zone, &reply); + if (!thash) + return false; + + ct = nf_ct_tuplehash_to_ctrack(thash); + + if (thash->tuple.dst.dir == IP_CT_DIR_ORIGINAL) + goto out; + + if (WARN_ON_ONCE(ct == ignored_conntrack)) + goto out; + + flags = READ_ONCE(ct->status); + if (!nf_nat_may_kill(ct, flags)) + goto out; + + if (!nf_seq_has_advanced(ct, ignored_conntrack)) + goto out; + + /* Even if we can evict do not reuse if entry is offloaded. */ + if (nf_ct_kill(ct)) + taken = flags & flags_offload; +out: + nf_ct_put(ct); + return taken; +} + static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t, const struct nf_nat_range2 *range) { @@ -385,7 +470,6 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, unsigned int range_size, min, max, i, attempts; __be16 *keyptr; u16 off; - static const unsigned int max_attempts = 128; switch (tuple->dst.protonum) { case IPPROTO_ICMP: @@ -471,8 +555,8 @@ find_free_id: off = get_random_u16(); attempts = range_size; - if (attempts > max_attempts) - attempts = max_attempts; + if (attempts > NF_NAT_MAX_ATTEMPTS) + attempts = NF_NAT_MAX_ATTEMPTS; /* We are in softirq; doing a search of the entire range risks * soft lockup when all tuples are already used. @@ -483,7 +567,7 @@ find_free_id: another_round: for (i = 0; i < attempts; i++, off++) { *keyptr = htons(min + off % range_size); - if (!nf_nat_used_tuple(tuple, ct)) + if (!nf_nat_used_tuple_harder(tuple, ct, attempts - i)) return; } -- cgit v1.2.3 From 079cd633219d7298d087cd115c17682264244c18 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 15 Jun 2023 16:31:40 +0200 Subject: netfilter: nf_tables: Introduce NFT_MSG_GETSETELEM_RESET Analogous to NFT_MSG_GETOBJ_RESET, but for set elements with a timeout or attached stateful expressions like counters or quotas - reset them all at once. Respect a per element timeout value if present to reset the 'expires' value to. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 + net/netfilter/nf_tables_api.c | 68 ++++++++++++++++++++++---------- 2 files changed, 50 insertions(+), 20 deletions(-) (limited to 'net') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e059dc2644df..8466c2a9938f 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -105,6 +105,7 @@ enum nft_verdicts { * @NFT_MSG_DESTROYSETELEM: destroy a set element (enum nft_set_elem_attributes) * @NFT_MSG_DESTROYOBJ: destroy a stateful object (enum nft_object_attributes) * @NFT_MSG_DESTROYFLOWTABLE: destroy flow table (enum nft_flowtable_attributes) + * @NFT_MSG_GETSETELEM_RESET: get set elements and reset attached stateful expressions (enum nft_set_elem_attributes) */ enum nf_tables_msg_types { NFT_MSG_NEWTABLE, @@ -140,6 +141,7 @@ enum nf_tables_msg_types { NFT_MSG_DESTROYSETELEM, NFT_MSG_DESTROYOBJ, NFT_MSG_DESTROYFLOWTABLE, + NFT_MSG_GETSETELEM_RESET, NFT_MSG_MAX, }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index dfd441ff1e3e..30fd62224df9 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5229,7 +5229,8 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + static int nft_set_elem_expr_dump(struct sk_buff *skb, const struct nft_set *set, - const struct nft_set_ext *ext) + const struct nft_set_ext *ext, + bool reset) { struct nft_set_elem_expr *elem_expr; u32 size, num_exprs = 0; @@ -5242,7 +5243,7 @@ static int nft_set_elem_expr_dump(struct sk_buff *skb, if (num_exprs == 1) { expr = nft_setelem_expr_at(elem_expr, 0); - if (nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, expr, false) < 0) + if (nft_expr_dump(skb, NFTA_SET_ELEM_EXPR, expr, reset) < 0) return -1; return 0; @@ -5253,7 +5254,7 @@ static int nft_set_elem_expr_dump(struct sk_buff *skb, nft_setelem_expr_foreach(expr, elem_expr, size) { expr = nft_setelem_expr_at(elem_expr, size); - if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr, false) < 0) + if (nft_expr_dump(skb, NFTA_LIST_ELEM, expr, reset) < 0) goto nla_put_failure; } nla_nest_end(skb, nest); @@ -5266,11 +5267,13 @@ nla_put_failure: static int nf_tables_fill_setelem(struct sk_buff *skb, const struct nft_set *set, - const struct nft_set_elem *elem) + const struct nft_set_elem *elem, + bool reset) { const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv); unsigned char *b = skb_tail_pointer(skb); struct nlattr *nest; + u64 timeout = 0; nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM); if (nest == NULL) @@ -5293,7 +5296,7 @@ static int nf_tables_fill_setelem(struct sk_buff *skb, goto nla_put_failure; if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPRESSIONS) && - nft_set_elem_expr_dump(skb, set, ext)) + nft_set_elem_expr_dump(skb, set, ext, reset)) goto nla_put_failure; if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) && @@ -5306,11 +5309,15 @@ static int nf_tables_fill_setelem(struct sk_buff *skb, htonl(*nft_set_ext_flags(ext)))) goto nla_put_failure; - if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && - nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, - nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)), - NFTA_SET_ELEM_PAD)) - goto nla_put_failure; + if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) { + timeout = *nft_set_ext_timeout(ext); + if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, + nf_jiffies64_to_msecs(timeout), + NFTA_SET_ELEM_PAD)) + goto nla_put_failure; + } else if (set->flags & NFT_SET_TIMEOUT) { + timeout = READ_ONCE(set->timeout); + } if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) { u64 expires, now = get_jiffies_64(); @@ -5325,6 +5332,9 @@ static int nf_tables_fill_setelem(struct sk_buff *skb, nf_jiffies64_to_msecs(expires), NFTA_SET_ELEM_PAD)) goto nla_put_failure; + + if (reset) + *nft_set_ext_expiration(ext) = now + timeout; } if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) { @@ -5348,6 +5358,7 @@ struct nft_set_dump_args { const struct netlink_callback *cb; struct nft_set_iter iter; struct sk_buff *skb; + bool reset; }; static int nf_tables_dump_setelem(const struct nft_ctx *ctx, @@ -5358,7 +5369,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, struct nft_set_dump_args *args; args = container_of(iter, struct nft_set_dump_args, iter); - return nf_tables_fill_setelem(args->skb, set, elem); + return nf_tables_fill_setelem(args->skb, set, elem, args->reset); } struct nft_set_dump_ctx { @@ -5367,7 +5378,7 @@ struct nft_set_dump_ctx { }; static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb, - const struct nft_set *set) + const struct nft_set *set, bool reset) { struct nft_set_elem_catchall *catchall; u8 genmask = nft_genmask_cur(net); @@ -5382,7 +5393,7 @@ static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb, continue; elem.priv = catchall->elem; - ret = nf_tables_fill_setelem(skb, set, &elem); + ret = nf_tables_fill_setelem(skb, set, &elem, reset); break; } @@ -5400,6 +5411,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) bool set_found = false; struct nlmsghdr *nlh; struct nlattr *nest; + bool reset = false; u32 portid, seq; int event; @@ -5447,8 +5459,12 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) if (nest == NULL) goto nla_put_failure; + if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETSETELEM_RESET) + reset = true; + args.cb = cb; args.skb = skb; + args.reset = reset; args.iter.genmask = nft_genmask_cur(net); args.iter.skip = cb->args[0]; args.iter.count = 0; @@ -5457,7 +5473,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) set->ops->walk(&dump_ctx->ctx, set, &args.iter); if (!args.iter.err && args.iter.count == cb->args[0]) - args.iter.err = nft_set_catchall_dump(net, skb, set); + args.iter.err = nft_set_catchall_dump(net, skb, set, reset); rcu_read_unlock(); nla_nest_end(skb, nest); @@ -5495,7 +5511,8 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, const struct nft_ctx *ctx, u32 seq, u32 portid, int event, u16 flags, const struct nft_set *set, - const struct nft_set_elem *elem) + const struct nft_set_elem *elem, + bool reset) { struct nlmsghdr *nlh; struct nlattr *nest; @@ -5516,7 +5533,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, if (nest == NULL) goto nla_put_failure; - err = nf_tables_fill_setelem(skb, set, elem); + err = nf_tables_fill_setelem(skb, set, elem, reset); if (err < 0) goto nla_put_failure; @@ -5622,7 +5639,7 @@ static int nft_setelem_get(struct nft_ctx *ctx, struct nft_set *set, } static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, - const struct nlattr *attr) + const struct nlattr *attr, bool reset) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; struct nft_set_elem elem; @@ -5666,7 +5683,8 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid, - NFT_MSG_NEWSETELEM, 0, set, &elem); + NFT_MSG_NEWSETELEM, 0, set, &elem, + reset); if (err < 0) goto err_fill_setelem; @@ -5690,6 +5708,7 @@ static int nf_tables_getsetelem(struct sk_buff *skb, struct nft_set *set; struct nlattr *attr; struct nft_ctx ctx; + bool reset = false; int rem, err = 0; table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, @@ -5724,8 +5743,11 @@ static int nf_tables_getsetelem(struct sk_buff *skb, if (!nla[NFTA_SET_ELEM_LIST_ELEMENTS]) return -EINVAL; + if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_GETSETELEM_RESET) + reset = true; + nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { - err = nft_get_set_elem(&ctx, set, attr); + err = nft_get_set_elem(&ctx, set, attr, reset); if (err < 0) { NL_SET_BAD_ATTR(extack, attr); break; @@ -5758,7 +5780,7 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx, flags |= ctx->flags & (NLM_F_CREATE | NLM_F_EXCL); err = nf_tables_fill_setelem_info(skb, ctx, 0, portid, event, flags, - set, elem); + set, elem, false); if (err < 0) { kfree_skb(skb); goto err; @@ -8715,6 +8737,12 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { .attr_count = NFTA_SET_ELEM_LIST_MAX, .policy = nft_set_elem_list_policy, }, + [NFT_MSG_GETSETELEM_RESET] = { + .call = nf_tables_getsetelem, + .type = NFNL_CB_RCU, + .attr_count = NFTA_SET_ELEM_LIST_MAX, + .policy = nft_set_elem_list_policy, + }, [NFT_MSG_DELSETELEM] = { .call = nf_tables_delsetelem, .type = NFNL_CB_BATCH, -- cgit v1.2.3 From a412dbf40ff37515acca4bba666f5386aa37246e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 21 Jun 2023 21:11:03 +0200 Subject: netfilter: nf_tables: limit allowed range via nla_policy These NLA_U32 types get stored in u8 fields, reject invalid values instead of silently casting to u8. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_bitwise.c | 2 +- net/netfilter/nft_byteorder.c | 6 +++--- net/netfilter/nft_ct.c | 2 +- net/netfilter/nft_dynset.c | 2 +- net/netfilter/nft_exthdr.c | 4 ++-- net/netfilter/nft_fwd_netdev.c | 2 +- net/netfilter/nft_hash.c | 2 +- net/netfilter/nft_meta.c | 2 +- net/netfilter/nft_range.c | 2 +- net/netfilter/nft_reject.c | 2 +- net/netfilter/nft_rt.c | 2 +- net/netfilter/nft_socket.c | 4 ++-- net/netfilter/nft_tproxy.c | 2 +- net/netfilter/nft_tunnel.c | 4 ++-- net/netfilter/nft_xfrm.c | 4 ++-- 15 files changed, 21 insertions(+), 21 deletions(-) (limited to 'net') diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 84eae7cabc67..14e3c44ef959 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -86,7 +86,7 @@ static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = { [NFTA_BITWISE_LEN] = { .type = NLA_U32 }, [NFTA_BITWISE_MASK] = { .type = NLA_NESTED }, [NFTA_BITWISE_XOR] = { .type = NLA_NESTED }, - [NFTA_BITWISE_OP] = { .type = NLA_U32 }, + [NFTA_BITWISE_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_BITWISE_DATA] = { .type = NLA_NESTED }, }; diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c index b66647a5a171..9a85e797ed58 100644 --- a/net/netfilter/nft_byteorder.c +++ b/net/netfilter/nft_byteorder.c @@ -88,9 +88,9 @@ void nft_byteorder_eval(const struct nft_expr *expr, static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = { [NFTA_BYTEORDER_SREG] = { .type = NLA_U32 }, [NFTA_BYTEORDER_DREG] = { .type = NLA_U32 }, - [NFTA_BYTEORDER_OP] = { .type = NLA_U32 }, - [NFTA_BYTEORDER_LEN] = { .type = NLA_U32 }, - [NFTA_BYTEORDER_SIZE] = { .type = NLA_U32 }, + [NFTA_BYTEORDER_OP] = NLA_POLICY_MAX(NLA_BE32, 255), + [NFTA_BYTEORDER_LEN] = NLA_POLICY_MAX(NLA_BE32, 255), + [NFTA_BYTEORDER_SIZE] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_byteorder_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index b9c84499438b..38958e067aa8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -332,7 +332,7 @@ static void nft_ct_set_eval(const struct nft_expr *expr, static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = { [NFTA_CT_DREG] = { .type = NLA_U32 }, - [NFTA_CT_KEY] = { .type = NLA_U32 }, + [NFTA_CT_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_CT_DIRECTION] = { .type = NLA_U8 }, [NFTA_CT_SREG] = { .type = NLA_U32 }, }; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index bd19c7aec92e..4fb34d76dbea 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -148,7 +148,7 @@ static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = { [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING, .len = NFT_SET_MAXNAMELEN - 1 }, [NFTA_DYNSET_SET_ID] = { .type = NLA_U32 }, - [NFTA_DYNSET_OP] = { .type = NLA_U32 }, + [NFTA_DYNSET_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 }, [NFTA_DYNSET_SREG_DATA] = { .type = NLA_U32 }, [NFTA_DYNSET_TIMEOUT] = { .type = NLA_U64 }, diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 671474e59817..7f856ceb3a66 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -487,9 +487,9 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_DREG] = { .type = NLA_U32 }, [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, - [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, + [NFTA_EXTHDR_LEN] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, - [NFTA_EXTHDR_OP] = { .type = NLA_U32 }, + [NFTA_EXTHDR_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_EXTHDR_SREG] = { .type = NLA_U32 }, }; diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c index 7b9d4d1bd17c..a5268e6dd32f 100644 --- a/net/netfilter/nft_fwd_netdev.c +++ b/net/netfilter/nft_fwd_netdev.c @@ -40,7 +40,7 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, static const struct nla_policy nft_fwd_netdev_policy[NFTA_FWD_MAX + 1] = { [NFTA_FWD_SREG_DEV] = { .type = NLA_U32 }, [NFTA_FWD_SREG_ADDR] = { .type = NLA_U32 }, - [NFTA_FWD_NFPROTO] = { .type = NLA_U32 }, + [NFTA_FWD_NFPROTO] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_fwd_netdev_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index ee8d487b69c0..92d47e469204 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -59,7 +59,7 @@ static void nft_symhash_eval(const struct nft_expr *expr, static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { [NFTA_HASH_SREG] = { .type = NLA_U32 }, [NFTA_HASH_DREG] = { .type = NLA_U32 }, - [NFTA_HASH_LEN] = { .type = NLA_U32 }, + [NFTA_HASH_LEN] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, [NFTA_HASH_SEED] = { .type = NLA_U32 }, [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e384e0de7a54..8fdc7318c03c 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -458,7 +458,7 @@ EXPORT_SYMBOL_GPL(nft_meta_set_eval); const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = { [NFTA_META_DREG] = { .type = NLA_U32 }, - [NFTA_META_KEY] = { .type = NLA_U32 }, + [NFTA_META_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_META_SREG] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(nft_meta_policy); diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 0566d6aaf1e5..51ae64cd268f 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -42,7 +42,7 @@ void nft_range_eval(const struct nft_expr *expr, static const struct nla_policy nft_range_policy[NFTA_RANGE_MAX + 1] = { [NFTA_RANGE_SREG] = { .type = NLA_U32 }, - [NFTA_RANGE_OP] = { .type = NLA_U32 }, + [NFTA_RANGE_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_RANGE_FROM_DATA] = { .type = NLA_NESTED }, [NFTA_RANGE_TO_DATA] = { .type = NLA_NESTED }, }; diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index f2addc844dd2..ed2e668474d6 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -18,7 +18,7 @@ #include const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = { - [NFTA_REJECT_TYPE] = { .type = NLA_U32 }, + [NFTA_REJECT_TYPE] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 }, }; EXPORT_SYMBOL_GPL(nft_reject_policy); diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c index 5990fdd7b3cc..35a2c28caa60 100644 --- a/net/netfilter/nft_rt.c +++ b/net/netfilter/nft_rt.c @@ -104,7 +104,7 @@ err: static const struct nla_policy nft_rt_policy[NFTA_RT_MAX + 1] = { [NFTA_RT_DREG] = { .type = NLA_U32 }, - [NFTA_RT_KEY] = { .type = NLA_U32 }, + [NFTA_RT_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_rt_get_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 85f8df87efda..84def74698b7 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -138,9 +138,9 @@ static void nft_socket_eval(const struct nft_expr *expr, } static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = { - [NFTA_SOCKET_KEY] = { .type = NLA_U32 }, + [NFTA_SOCKET_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_SOCKET_DREG] = { .type = NLA_U32 }, - [NFTA_SOCKET_LEVEL] = { .type = NLA_U32 }, + [NFTA_SOCKET_LEVEL] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_socket_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_tproxy.c b/net/netfilter/nft_tproxy.c index ea83f661417e..ae15cd693f0e 100644 --- a/net/netfilter/nft_tproxy.c +++ b/net/netfilter/nft_tproxy.c @@ -183,7 +183,7 @@ static void nft_tproxy_eval(const struct nft_expr *expr, } static const struct nla_policy nft_tproxy_policy[NFTA_TPROXY_MAX + 1] = { - [NFTA_TPROXY_FAMILY] = { .type = NLA_U32 }, + [NFTA_TPROXY_FAMILY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_TPROXY_REG_ADDR] = { .type = NLA_U32 }, [NFTA_TPROXY_REG_PORT] = { .type = NLA_U32 }, }; diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index b059aa541798..9f21953c7433 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -66,9 +66,9 @@ static void nft_tunnel_get_eval(const struct nft_expr *expr, } static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = { - [NFTA_TUNNEL_KEY] = { .type = NLA_U32 }, + [NFTA_TUNNEL_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_TUNNEL_DREG] = { .type = NLA_U32 }, - [NFTA_TUNNEL_MODE] = { .type = NLA_U32 }, + [NFTA_TUNNEL_MODE] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_tunnel_get_init(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c index c88fd078a9ae..452f8587adda 100644 --- a/net/netfilter/nft_xfrm.c +++ b/net/netfilter/nft_xfrm.c @@ -16,9 +16,9 @@ #include static const struct nla_policy nft_xfrm_policy[NFTA_XFRM_MAX + 1] = { - [NFTA_XFRM_KEY] = { .type = NLA_U32 }, + [NFTA_XFRM_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_XFRM_DIR] = { .type = NLA_U8 }, - [NFTA_XFRM_SPNUM] = { .type = NLA_U32 }, + [NFTA_XFRM_SPNUM] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_XFRM_DREG] = { .type = NLA_U32 }, }; -- cgit v1.2.3 From 6709d4b7bc2e079241fdef15d1160581c5261c10 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Sun, 25 Jun 2023 17:10:07 +0800 Subject: net: nfc: Fix use-after-free caused by nfc_llcp_find_local This commit fixes several use-after-free that caused by function nfc_llcp_find_local(). For example, one UAF can happen when below buggy time window occurs. // nfc_genl_llc_get_params | // nfc_unregister_device | dev = nfc_get_device(idx); | device_lock(...) if (!dev) | dev->shutting_down = true; return -ENODEV; | device_unlock(...); | device_lock(...); | // nfc_llcp_unregister_device | nfc_llcp_find_local() nfc_llcp_find_local(...); | | local_cleanup() if (!local) { | rc = -ENODEV; | // nfc_llcp_local_put goto exit; | kref_put(.., local_release) } | | // local_release | list_del(&local->list) // nfc_genl_send_params | kfree() local->dev->idx !!!UAF!!! | | and the crash trace for the one of the discussed UAF like: BUG: KASAN: slab-use-after-free in nfc_genl_llc_get_params+0x72f/0x780 net/nfc/netlink.c:1045 Read of size 8 at addr ffff888105b0e410 by task 20114 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x72/0xa0 lib/dump_stack.c:106 print_address_description mm/kasan/report.c:319 [inline] print_report+0xcc/0x620 mm/kasan/report.c:430 kasan_report+0xb2/0xe0 mm/kasan/report.c:536 nfc_genl_send_params net/nfc/netlink.c:999 [inline] nfc_genl_llc_get_params+0x72f/0x780 net/nfc/netlink.c:1045 genl_family_rcv_msg_doit.isra.0+0x1ee/0x2e0 net/netlink/genetlink.c:968 genl_family_rcv_msg net/netlink/genetlink.c:1048 [inline] genl_rcv_msg+0x503/0x7d0 net/netlink/genetlink.c:1065 netlink_rcv_skb+0x161/0x430 net/netlink/af_netlink.c:2548 genl_rcv+0x28/0x40 net/netlink/genetlink.c:1076 netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] netlink_unicast+0x644/0x900 net/netlink/af_netlink.c:1365 netlink_sendmsg+0x934/0xe70 net/netlink/af_netlink.c:1913 sock_sendmsg_nosec net/socket.c:724 [inline] sock_sendmsg+0x1b6/0x200 net/socket.c:747 ____sys_sendmsg+0x6e9/0x890 net/socket.c:2501 ___sys_sendmsg+0x110/0x1b0 net/socket.c:2555 __sys_sendmsg+0xf7/0x1d0 net/socket.c:2584 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3f/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc RIP: 0033:0x7f34640a2389 RSP: 002b:00007f3463415168 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 00007f34641c1f80 RCX: 00007f34640a2389 RDX: 0000000000000000 RSI: 0000000020000240 RDI: 0000000000000006 RBP: 00007f34640ed493 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffe38449ecf R14: 00007f3463415300 R15: 0000000000022000 Allocated by task 20116: kasan_save_stack+0x22/0x50 mm/kasan/common.c:45 kasan_set_track+0x25/0x30 mm/kasan/common.c:52 ____kasan_kmalloc mm/kasan/common.c:374 [inline] __kasan_kmalloc+0x7f/0x90 mm/kasan/common.c:383 kmalloc include/linux/slab.h:580 [inline] kzalloc include/linux/slab.h:720 [inline] nfc_llcp_register_device+0x49/0xa40 net/nfc/llcp_core.c:1567 nfc_register_device+0x61/0x260 net/nfc/core.c:1124 nci_register_device+0x776/0xb20 net/nfc/nci/core.c:1257 virtual_ncidev_open+0x147/0x230 drivers/nfc/virtual_ncidev.c:148 misc_open+0x379/0x4a0 drivers/char/misc.c:165 chrdev_open+0x26c/0x780 fs/char_dev.c:414 do_dentry_open+0x6c4/0x12a0 fs/open.c:920 do_open fs/namei.c:3560 [inline] path_openat+0x24fe/0x37e0 fs/namei.c:3715 do_filp_open+0x1ba/0x410 fs/namei.c:3742 do_sys_openat2+0x171/0x4c0 fs/open.c:1356 do_sys_open fs/open.c:1372 [inline] __do_sys_openat fs/open.c:1388 [inline] __se_sys_openat fs/open.c:1383 [inline] __x64_sys_openat+0x143/0x200 fs/open.c:1383 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3f/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc Freed by task 20115: kasan_save_stack+0x22/0x50 mm/kasan/common.c:45 kasan_set_track+0x25/0x30 mm/kasan/common.c:52 kasan_save_free_info+0x2e/0x50 mm/kasan/generic.c:521 ____kasan_slab_free mm/kasan/common.c:236 [inline] ____kasan_slab_free mm/kasan/common.c:200 [inline] __kasan_slab_free+0x10a/0x190 mm/kasan/common.c:244 kasan_slab_free include/linux/kasan.h:162 [inline] slab_free_hook mm/slub.c:1781 [inline] slab_free_freelist_hook mm/slub.c:1807 [inline] slab_free mm/slub.c:3787 [inline] __kmem_cache_free+0x7a/0x190 mm/slub.c:3800 local_release net/nfc/llcp_core.c:174 [inline] kref_put include/linux/kref.h:65 [inline] nfc_llcp_local_put net/nfc/llcp_core.c:182 [inline] nfc_llcp_local_put net/nfc/llcp_core.c:177 [inline] nfc_llcp_unregister_device+0x206/0x290 net/nfc/llcp_core.c:1620 nfc_unregister_device+0x160/0x1d0 net/nfc/core.c:1179 virtual_ncidev_close+0x52/0xa0 drivers/nfc/virtual_ncidev.c:163 __fput+0x252/0xa20 fs/file_table.c:321 task_work_run+0x174/0x270 kernel/task_work.c:179 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x108/0x110 kernel/entry/common.c:204 __syscall_exit_to_user_mode_work kernel/entry/common.c:286 [inline] syscall_exit_to_user_mode+0x21/0x50 kernel/entry/common.c:297 do_syscall_64+0x4c/0x90 arch/x86/entry/common.c:86 entry_SYSCALL_64_after_hwframe+0x72/0xdc Last potentially related work creation: kasan_save_stack+0x22/0x50 mm/kasan/common.c:45 __kasan_record_aux_stack+0x95/0xb0 mm/kasan/generic.c:491 kvfree_call_rcu+0x29/0xa80 kernel/rcu/tree.c:3328 drop_sysctl_table+0x3be/0x4e0 fs/proc/proc_sysctl.c:1735 unregister_sysctl_table.part.0+0x9c/0x190 fs/proc/proc_sysctl.c:1773 unregister_sysctl_table+0x24/0x30 fs/proc/proc_sysctl.c:1753 neigh_sysctl_unregister+0x5f/0x80 net/core/neighbour.c:3895 addrconf_notify+0x140/0x17b0 net/ipv6/addrconf.c:3684 notifier_call_chain+0xbe/0x210 kernel/notifier.c:87 call_netdevice_notifiers_info+0xb5/0x150 net/core/dev.c:1937 call_netdevice_notifiers_extack net/core/dev.c:1975 [inline] call_netdevice_notifiers net/core/dev.c:1989 [inline] dev_change_name+0x3c3/0x870 net/core/dev.c:1211 dev_ifsioc+0x800/0xf70 net/core/dev_ioctl.c:376 dev_ioctl+0x3d9/0xf80 net/core/dev_ioctl.c:542 sock_do_ioctl+0x160/0x260 net/socket.c:1213 sock_ioctl+0x3f9/0x670 net/socket.c:1316 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:870 [inline] __se_sys_ioctl fs/ioctl.c:856 [inline] __x64_sys_ioctl+0x19e/0x210 fs/ioctl.c:856 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3f/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc The buggy address belongs to the object at ffff888105b0e400 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 16 bytes inside of freed 1024-byte region [ffff888105b0e400, ffff888105b0e800) The buggy address belongs to the physical page: head:ffffea000416c200 order:3 entire_mapcount:0 nr_pages_mapped:0 pincount:0 flags: 0x200000000010200(slab|head|node=0|zone=2) raw: 0200000000010200 ffff8881000430c0 ffffea00044c7010 ffffea0004510e10 raw: 0000000000000000 00000000000a000a 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff888105b0e300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff888105b0e380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff888105b0e400: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff888105b0e480: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888105b0e500: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb In summary, this patch solves those use-after-free by 1. Re-implement the nfc_llcp_find_local(). The current version does not grab the reference when getting the local from the linked list. For example, the llcp_sock_bind() gets the reference like below: // llcp_sock_bind() local = nfc_llcp_find_local(dev); // A ..... \ | raceable ..... / llcp_sock->local = nfc_llcp_local_get(local); // B There is an apparent race window that one can drop the reference and free the local object fetched in (A) before (B) gets the reference. 2. Some callers of the nfc_llcp_find_local() do not grab the reference at all. For example, the nfc_genl_llc_{{get/set}_params/sdreq} functions. We add the nfc_llcp_local_put() for them. Moreover, we add the necessary error handling function to put the reference. 3. Add the nfc_llcp_remove_local() helper. The local object is removed from the linked list in local_release() when all reference is gone. This patch removes it when nfc_llcp_unregister_device() is called. Therefore, every caller of nfc_llcp_find_local() will get a reference even when the nfc_llcp_unregister_device() is called. This promises no use-after-free for the local object is ever possible. Fixes: 52feb444a903 ("NFC: Extend netlink interface for LTO, RW, and MIUX parameters support") Fixes: c7aa12252f51 ("NFC: Take a reference on the LLCP local pointer when creating a socket") Signed-off-by: Lin Ma Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- net/nfc/llcp.h | 1 - net/nfc/llcp_commands.c | 12 +++++++++--- net/nfc/llcp_core.c | 49 ++++++++++++++++++++++++++++++++++++++++++------- net/nfc/llcp_sock.c | 18 ++++++++++-------- net/nfc/netlink.c | 20 +++++++++++++++----- net/nfc/nfc.h | 1 + 6 files changed, 77 insertions(+), 24 deletions(-) (limited to 'net') diff --git a/net/nfc/llcp.h b/net/nfc/llcp.h index c1d9be636933..d8345ed57c95 100644 --- a/net/nfc/llcp.h +++ b/net/nfc/llcp.h @@ -201,7 +201,6 @@ void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s); void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s); void nfc_llcp_socket_remote_param_init(struct nfc_llcp_sock *sock); struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); -struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local); int nfc_llcp_local_put(struct nfc_llcp_local *local); u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local, struct nfc_llcp_sock *sock); diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index 41e3a20c8935..5d2d4bc26ef9 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -359,6 +359,7 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) struct sk_buff *skb; struct nfc_llcp_local *local; u16 size = 0; + int err; local = nfc_llcp_find_local(dev); if (local == NULL) @@ -368,8 +369,10 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; skb = alloc_skb(size, GFP_KERNEL); - if (skb == NULL) - return -ENOMEM; + if (skb == NULL) { + err = -ENOMEM; + goto out; + } skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); @@ -379,8 +382,11 @@ int nfc_llcp_send_symm(struct nfc_dev *dev) nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX); - return nfc_data_exchange(dev, local->target_idx, skb, + err = nfc_data_exchange(dev, local->target_idx, skb, nfc_llcp_recv, local); +out: + nfc_llcp_local_put(local); + return err; } int nfc_llcp_send_connect(struct nfc_llcp_sock *sock) diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index a27e1842b2a0..f60e424e0607 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -17,6 +17,8 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d}; static LIST_HEAD(llcp_devices); +/* Protects llcp_devices list */ +static DEFINE_SPINLOCK(llcp_devices_lock); static void nfc_llcp_rx_skb(struct nfc_llcp_local *local, struct sk_buff *skb); @@ -141,7 +143,7 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool device, write_unlock(&local->raw_sockets.lock); } -struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) +static struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) { kref_get(&local->ref); @@ -169,7 +171,6 @@ static void local_release(struct kref *ref) local = container_of(ref, struct nfc_llcp_local, ref); - list_del(&local->list); local_cleanup(local); kfree(local); } @@ -282,12 +283,33 @@ static void nfc_llcp_sdreq_timer(struct timer_list *t) struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) { struct nfc_llcp_local *local; + struct nfc_llcp_local *res = NULL; + spin_lock(&llcp_devices_lock); list_for_each_entry(local, &llcp_devices, list) - if (local->dev == dev) + if (local->dev == dev) { + res = nfc_llcp_local_get(local); + break; + } + spin_unlock(&llcp_devices_lock); + + return res; +} + +static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev) +{ + struct nfc_llcp_local *local, *tmp; + + spin_lock(&llcp_devices_lock); + list_for_each_entry_safe(local, tmp, &llcp_devices, list) + if (local->dev == dev) { + list_del(&local->list); + spin_unlock(&llcp_devices_lock); return local; + } + spin_unlock(&llcp_devices_lock); - pr_debug("No device found\n"); + pr_warn("Shutting down device not found\n"); return NULL; } @@ -608,12 +630,15 @@ u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len) *general_bytes_len = local->gb_len; + nfc_llcp_local_put(local); + return local->gb; } int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len) { struct nfc_llcp_local *local; + int err; if (gb_len < 3 || gb_len > NFC_MAX_GT_LEN) return -EINVAL; @@ -630,12 +655,16 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len) if (memcmp(local->remote_gb, llcp_magic, 3)) { pr_err("MAC does not support LLCP\n"); - return -EINVAL; + err = -EINVAL; + goto out; } - return nfc_llcp_parse_gb_tlv(local, + err = nfc_llcp_parse_gb_tlv(local, &local->remote_gb[3], local->remote_gb_len - 3); +out: + nfc_llcp_local_put(local); + return err; } static u8 nfc_llcp_dsap(const struct sk_buff *pdu) @@ -1517,6 +1546,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb) __nfc_llcp_recv(local, skb); + nfc_llcp_local_put(local); + return 0; } @@ -1533,6 +1564,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev) /* Close and purge all existing sockets */ nfc_llcp_socket_release(local, true, 0); + + nfc_llcp_local_put(local); } void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, @@ -1558,6 +1591,8 @@ void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, mod_timer(&local->link_timer, jiffies + msecs_to_jiffies(local->remote_lto)); } + + nfc_llcp_local_put(local); } int nfc_llcp_register_device(struct nfc_dev *ndev) @@ -1608,7 +1643,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev) void nfc_llcp_unregister_device(struct nfc_dev *dev) { - struct nfc_llcp_local *local = nfc_llcp_find_local(dev); + struct nfc_llcp_local *local = nfc_llcp_remove_local(dev); if (local == NULL) { pr_debug("No such device\n"); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 77642d18a3b4..645677f84dba 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -99,7 +99,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) } llcp_sock->dev = dev; - llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, llcp_addr.service_name_len, @@ -186,7 +186,7 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, } llcp_sock->dev = dev; - llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; nfc_llcp_sock_link(&local->raw_sockets, sk); @@ -696,22 +696,22 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, if (dev->dep_link_up == false) { ret = -ENOLINK; device_unlock(&dev->dev); - goto put_dev; + goto sock_llcp_put_local; } device_unlock(&dev->dev); if (local->rf_mode == NFC_RF_INITIATOR && addr->target_idx != local->target_idx) { ret = -ENOLINK; - goto put_dev; + goto sock_llcp_put_local; } llcp_sock->dev = dev; - llcp_sock->local = nfc_llcp_local_get(local); + llcp_sock->local = local; llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -ENOMEM; - goto sock_llcp_put_local; + goto sock_llcp_nullify; } llcp_sock->reserved_ssap = llcp_sock->ssap; @@ -757,11 +757,13 @@ sock_unlink: sock_llcp_release: nfc_llcp_put_ssap(local, llcp_sock->ssap); -sock_llcp_put_local: - nfc_llcp_local_put(llcp_sock->local); +sock_llcp_nullify: llcp_sock->local = NULL; llcp_sock->dev = NULL; +sock_llcp_put_local: + nfc_llcp_local_put(local); + put_dev: nfc_put_device(dev); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index b9264e730fd9..e9ac6a6f934e 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1039,11 +1039,14 @@ static int nfc_genl_llc_get_params(struct sk_buff *skb, struct genl_info *info) msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { rc = -ENOMEM; - goto exit; + goto put_local; } rc = nfc_genl_send_params(msg, local, info->snd_portid, info->snd_seq); +put_local: + nfc_llcp_local_put(local); + exit: device_unlock(&dev->dev); @@ -1105,7 +1108,7 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NFC_ATTR_LLC_PARAM_LTO]) { if (dev->dep_link_up) { rc = -EINPROGRESS; - goto exit; + goto put_local; } local->lto = nla_get_u8(info->attrs[NFC_ATTR_LLC_PARAM_LTO]); @@ -1117,6 +1120,9 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info) if (info->attrs[NFC_ATTR_LLC_PARAM_MIUX]) local->miux = cpu_to_be16(miux); +put_local: + nfc_llcp_local_put(local); + exit: device_unlock(&dev->dev); @@ -1172,7 +1178,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) if (rc != 0) { rc = -EINVAL; - goto exit; + goto put_local; } if (!sdp_attrs[NFC_SDP_ATTR_URI]) @@ -1191,7 +1197,7 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len); if (sdreq == NULL) { rc = -ENOMEM; - goto exit; + goto put_local; } tlvs_len += sdreq->tlv_len; @@ -1201,10 +1207,14 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info) if (hlist_empty(&sdreq_list)) { rc = -EINVAL; - goto exit; + goto put_local; } rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len); + +put_local: + nfc_llcp_local_put(local); + exit: device_unlock(&dev->dev); diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h index de2ec66d7e83..0b1e6466f4fb 100644 --- a/net/nfc/nfc.h +++ b/net/nfc/nfc.h @@ -52,6 +52,7 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, const u8 *gb, u8 gb_len); u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len); int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb); struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); +int nfc_llcp_local_put(struct nfc_llcp_local *local); int __init nfc_llcp_init(void); void nfc_llcp_exit(void); void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp); -- cgit v1.2.3 From ff0a3a7d52ff7282dbd183e7fc29a1fe386b0c30 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 21 Jun 2023 17:56:53 +0200 Subject: netfilter: conntrack: dccp: copy entire header to stack buffer, not just basic one Eric Dumazet says: nf_conntrack_dccp_packet() has an unique: dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); And nothing more is 'pulled' from the packet, depending on the content. dh->dccph_doff, and/or dh->dccph_x ...) So dccp_ack_seq() is happily reading stuff past the _dh buffer. BUG: KASAN: stack-out-of-bounds in nf_conntrack_dccp_packet+0x1134/0x11c0 Read of size 4 at addr ffff000128f66e0c by task syz-executor.2/29371 [..] Fix this by increasing the stack buffer to also include room for the extra sequence numbers and all the known dccp packet type headers, then pull again after the initial validation of the basic header. While at it, mark packets invalid that lack 48bit sequence bit but where RFC says the type MUST use them. Compile tested only. v2: first skb_header_pointer() now needs to adjust the size to only pull the generic header. (Eric) Heads-up: I intend to remove dccp conntrack support later this year. Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support") Reported-by: Eric Dumazet Signed-off-by: Florian Westphal Reviewed-by: Eric Dumazet Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_proto_dccp.c | 52 +++++++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index c1557d47ccd1..d4fd626d2b8c 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -432,9 +432,19 @@ static bool dccp_error(const struct dccp_hdr *dh, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state) { + static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST | + 1 << DCCP_PKT_RESPONSE | + 1 << DCCP_PKT_CLOSEREQ | + 1 << DCCP_PKT_CLOSE | + 1 << DCCP_PKT_RESET | + 1 << DCCP_PKT_SYNC | + 1 << DCCP_PKT_SYNCACK; unsigned int dccp_len = skb->len - dataoff; unsigned int cscov; const char *msg; + u8 type; + + BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG); if (dh->dccph_doff * 4 < sizeof(struct dccp_hdr) || dh->dccph_doff * 4 > dccp_len) { @@ -459,34 +469,70 @@ static bool dccp_error(const struct dccp_hdr *dh, goto out_invalid; } - if (dh->dccph_type >= DCCP_PKT_INVALID) { + type = dh->dccph_type; + if (type >= DCCP_PKT_INVALID) { msg = "nf_ct_dccp: reserved packet type "; goto out_invalid; } + + if (test_bit(type, &require_seq48) && !dh->dccph_x) { + msg = "nf_ct_dccp: type lacks 48bit sequence numbers"; + goto out_invalid; + } + return false; out_invalid: nf_l4proto_log_invalid(skb, state, IPPROTO_DCCP, "%s", msg); return true; } +struct nf_conntrack_dccp_buf { + struct dccp_hdr dh; /* generic header part */ + struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */ + union { /* depends on header type */ + struct dccp_hdr_ack_bits ack; + struct dccp_hdr_request req; + struct dccp_hdr_response response; + struct dccp_hdr_reset rst; + } u; +}; + +static struct dccp_hdr * +dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh, + struct nf_conntrack_dccp_buf *buf) +{ + unsigned int hdrlen = __dccp_hdr_len(dh); + + if (hdrlen > sizeof(*buf)) + return NULL; + + return skb_header_pointer(skb, offset, hdrlen, buf); +} + int nf_conntrack_dccp_packet(struct nf_conn *ct, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - struct dccp_hdr _dh, *dh; + struct nf_conntrack_dccp_buf _dh; u_int8_t type, old_state, new_state; enum ct_dccp_roles role; unsigned int *timeouts; + struct dccp_hdr *dh; - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh); + dh = skb_header_pointer(skb, dataoff, sizeof(*dh), &_dh.dh); if (!dh) return NF_DROP; if (dccp_error(dh, skb, dataoff, state)) return -NF_ACCEPT; + /* pull again, including possible 48 bit sequences and subtype header */ + dh = dccp_header_pointer(skb, dataoff, dh, &_dh); + if (!dh) + return NF_DROP; + type = dh->dccph_type; if (!nf_ct_is_confirmed(ct) && !dccp_new(ct, skb, dh, state)) return -NF_ACCEPT; -- cgit v1.2.3 From f188d30087480eab421cd8ca552fb15f55d57f4d Mon Sep 17 00:00:00 2001 From: "Ilia.Gavrilov" Date: Fri, 23 Jun 2023 11:23:46 +0000 Subject: netfilter: nf_conntrack_sip: fix the ct_sip_parse_numerical_param() return value. ct_sip_parse_numerical_param() returns only 0 or 1 now. But process_register_request() and process_register_response() imply checking for a negative value if parsing of a numerical header parameter failed. The invocation in nf_nat_sip() looks correct: if (ct_sip_parse_numerical_param(...) > 0 && ...) { ... } Make the return value of the function ct_sip_parse_numerical_param() a tristate to fix all the cases a) return 1 if value is found; *val is set b) return 0 if value is not found; *val is unchanged c) return -1 on error; *val is undefined Found by InfoTeCS on behalf of Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 0f32a40fc91a ("[NETFILTER]: nf_conntrack_sip: create signalling expectations") Signed-off-by: Ilia.Gavrilov Reviewed-by: Simon Horman Reviewed-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_sip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 77f5e82d8e3f..d0eac27f6ba0 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -611,7 +611,7 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, start += strlen(name); *val = simple_strtoul(start, &end, 0); if (start == end) - return 0; + return -1; if (matchoff && matchlen) { *matchoff = start - dptr; *matchlen = end - start; -- cgit v1.2.3 From 3e70489721b6c870252c9082c496703677240f53 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 26 Jun 2023 00:42:18 +0200 Subject: netfilter: nf_tables: unbind non-anonymous set if rule construction fails Otherwise a dangling reference to a rule object that is gone remains in the set binding list. Fixes: 26b5a5712eb8 ("netfilter: nf_tables: add NFT_TRANS_PREPARE_ERROR to deal with bound set/chain") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4c7937fd803f..1d64c163076a 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5343,6 +5343,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, nft_set_trans_unbind(ctx, set); if (nft_set_is_anonymous(set)) nft_deactivate_next(ctx->net, set); + else + list_del_rcu(&binding->list); set->use--; break; -- cgit v1.2.3 From b389139f12f287b8ed2e2628b72df89a081f0b59 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Mon, 26 Jun 2023 00:42:19 +0200 Subject: netfilter: nf_tables: fix underflow in chain reference counter Set element addition error path decrements reference counter on chains twice: once on element release and again via nft_data_release(). Then, d6b478666ffa ("netfilter: nf_tables: fix underflow in object reference counter") incorrectly fixed this by removing the stateful object reference count decrement. Restore the stateful object decrement as in b91d90368837 ("netfilter: nf_tables: fix leaking object reference count") and let nft_data_release() decrement the chain reference counter, so this is done only once. Fixes: d6b478666ffa ("netfilter: nf_tables: fix underflow in object reference counter") Fixes: 628bd3e49cba ("netfilter: nf_tables: drop map element references from preparation phase") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1d64c163076a..c742918f22a4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6771,7 +6771,9 @@ err_set_full: err_element_clash: kfree(trans); err_elem_free: - nft_set_elem_destroy(set, elem.priv, true); + nf_tables_set_elem_destroy(ctx, set, elem.priv); + if (obj) + obj->use--; err_parse_data: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_release(&elem.data.val, desc.type); -- cgit v1.2.3 From 5da4d7b8e6dfd5bd7d61f7fe1338b1e5d5b4f76c Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Jun 2023 14:49:48 +0100 Subject: libceph: Partially revert changes to support MSG_SPLICE_PAGES Fix the mishandling of MSG_DONTWAIT and also reinstates the per-page checking of the source pages (which might have come from a DIO write by userspace) by partially reverting the changes to support MSG_SPLICE_PAGES and doing things a little differently. In messenger_v1: (1) The ceph_tcp_sendpage() is resurrected and the callers reverted to use that. (2) The callers now pass MSG_MORE unconditionally. Previously, they were passing in MSG_MORE|MSG_SENDPAGE_NOTLAST and then degrading that to just MSG_MORE on the last call to ->sendpage(). (3) Make ceph_tcp_sendpage() a wrapper around sendmsg() rather than sendpage(), setting MSG_SPLICE_PAGES if sendpage_ok() returns true on the page. In messenger_v2: (4) Bring back do_try_sendpage() and make the callers use that. (5) Make do_try_sendpage() use sendmsg() for both cases and set MSG_SPLICE_PAGES if sendpage_ok() is set. Fixes: 40a8c17aa770 ("ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage") Fixes: fa094ccae1e7 ("ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage()") Reported-by: Ilya Dryomov Link: https://lore.kernel.org/r/CAOi1vP9vjLfk3W+AJFeexC93jqPaPUn2dD_4NrzxwoZTbYfOnw@mail.gmail.com/ Link: https://lore.kernel.org/r/CAOi1vP_Bn918j24S94MuGyn+Gxk212btw7yWeDrRcW1U8pc_BA@mail.gmail.com/ Signed-off-by: David Howells cc: Xiubo Li cc: Jeff Layton cc: Jens Axboe cc: Matthew Wilcox Link: https://lore.kernel.org/r/3101881.1687801973@warthog.procyon.org.uk/ # v1 Link: https://lore.kernel.org/r/3111635.1687813501@warthog.procyon.org.uk/ # v2 Reviewed-by: Ilya Dryomov Link: https://lore.kernel.org/r/3199652.1687873788@warthog.procyon.org.uk Signed-off-by: Jakub Kicinski --- net/ceph/messenger_v1.c | 58 ++++++++++++++++++++------------ net/ceph/messenger_v2.c | 88 +++++++++++++++++++++++++++++++++++++++---------- 2 files changed, 107 insertions(+), 39 deletions(-) (limited to 'net') diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c index 814579f27f04..3d57bb48a2b4 100644 --- a/net/ceph/messenger_v1.c +++ b/net/ceph/messenger_v1.c @@ -74,6 +74,39 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, return r; } +/* + * @more: MSG_MORE or 0. + */ +static int ceph_tcp_sendpage(struct socket *sock, struct page *page, + int offset, size_t size, int more) +{ + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL | more, + }; + struct bio_vec bvec; + int ret; + + /* + * MSG_SPLICE_PAGES cannot properly handle pages with page_count == 0, + * we need to fall back to sendmsg if that's the case. + * + * Same goes for slab pages: skb_can_coalesce() allows + * coalescing neighboring slab objects into a single frag which + * triggers one of hardened usercopy checks. + */ + if (sendpage_ok(page)) + msg.msg_flags |= MSG_SPLICE_PAGES; + + bvec_set_page(&bvec, page, size, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); + + ret = sock_sendmsg(sock, &msg); + if (ret == -EAGAIN) + ret = 0; + + return ret; +} + static void con_out_kvec_reset(struct ceph_connection *con) { BUG_ON(con->v1.out_skip); @@ -450,10 +483,6 @@ static int write_partial_message_data(struct ceph_connection *con) */ crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; while (cursor->total_resid) { - struct bio_vec bvec; - struct msghdr msghdr = { - .msg_flags = MSG_SPLICE_PAGES, - }; struct page *page; size_t page_offset; size_t length; @@ -465,13 +494,8 @@ static int write_partial_message_data(struct ceph_connection *con) } page = ceph_msg_data_next(cursor, &page_offset, &length); - if (length != cursor->total_resid) - msghdr.msg_flags |= MSG_MORE; - - bvec_set_page(&bvec, page, length, page_offset); - iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, length); - - ret = sock_sendmsg(con->sock, &msghdr); + ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, + MSG_MORE); if (ret <= 0) { if (do_datacrc) msg->footer.data_crc = cpu_to_le32(crc); @@ -501,22 +525,14 @@ static int write_partial_message_data(struct ceph_connection *con) */ static int write_partial_skip(struct ceph_connection *con) { - struct bio_vec bvec; - struct msghdr msghdr = { - .msg_flags = MSG_SPLICE_PAGES | MSG_MORE, - }; int ret; dout("%s %p %d left\n", __func__, con, con->v1.out_skip); while (con->v1.out_skip > 0) { size_t size = min(con->v1.out_skip, (int)PAGE_SIZE); - if (size == con->v1.out_skip) - msghdr.msg_flags &= ~MSG_MORE; - bvec_set_page(&bvec, ZERO_PAGE(0), size, 0); - iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); - - ret = sock_sendmsg(con->sock, &msghdr); + ret = ceph_tcp_sendpage(con->sock, ceph_zero_page, 0, size, + MSG_MORE); if (ret <= 0) goto out; con->v1.out_skip -= ret; diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c index 87ac97073e75..1a888b86a494 100644 --- a/net/ceph/messenger_v2.c +++ b/net/ceph/messenger_v2.c @@ -117,38 +117,90 @@ static int ceph_tcp_recv(struct ceph_connection *con) return ret; } +static int do_sendmsg(struct socket *sock, struct iov_iter *it) +{ + struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; + int ret; + + msg.msg_iter = *it; + while (iov_iter_count(it)) { + ret = sock_sendmsg(sock, &msg); + if (ret <= 0) { + if (ret == -EAGAIN) + ret = 0; + return ret; + } + + iov_iter_advance(it, ret); + } + + WARN_ON(msg_data_left(&msg)); + return 1; +} + +static int do_try_sendpage(struct socket *sock, struct iov_iter *it) +{ + struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS }; + struct bio_vec bv; + int ret; + + if (WARN_ON(!iov_iter_is_bvec(it))) + return -EINVAL; + + while (iov_iter_count(it)) { + /* iov_iter_iovec() for ITER_BVEC */ + bvec_set_page(&bv, it->bvec->bv_page, + min(iov_iter_count(it), + it->bvec->bv_len - it->iov_offset), + it->bvec->bv_offset + it->iov_offset); + + /* + * MSG_SPLICE_PAGES cannot properly handle pages with + * page_count == 0, we need to fall back to sendmsg if + * that's the case. + * + * Same goes for slab pages: skb_can_coalesce() allows + * coalescing neighboring slab objects into a single frag + * which triggers one of hardened usercopy checks. + */ + if (sendpage_ok(bv.bv_page)) + msg.msg_flags |= MSG_SPLICE_PAGES; + else + msg.msg_flags &= ~MSG_SPLICE_PAGES; + + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len); + ret = sock_sendmsg(sock, &msg); + if (ret <= 0) { + if (ret == -EAGAIN) + ret = 0; + return ret; + } + + iov_iter_advance(it, ret); + } + + return 1; +} + /* * Write as much as possible. The socket is expected to be corked, * so we don't bother with MSG_MORE here. * * Return: - * >0 - done, nothing (else) to write + * 1 - done, nothing (else) to write * 0 - socket is full, need to wait * <0 - error */ static int ceph_tcp_send(struct ceph_connection *con) { - struct msghdr msg = { - .msg_iter = con->v2.out_iter, - .msg_flags = CEPH_MSG_FLAGS, - }; int ret; - if (WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter))) - return -EINVAL; - - if (con->v2.out_iter_sendpage) - msg.msg_flags |= MSG_SPLICE_PAGES; - dout("%s con %p have %zu try_sendpage %d\n", __func__, con, iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage); - - ret = sock_sendmsg(con->sock, &msg); - if (ret > 0) - iov_iter_advance(&con->v2.out_iter, ret); - else if (ret == -EAGAIN) - ret = 0; - + if (con->v2.out_iter_sendpage) + ret = do_try_sendpage(con->sock, &con->v2.out_iter); + else + ret = do_sendmsg(con->sock, &con->v2.out_iter); dout("%s con %p ret %d left %zu\n", __func__, con, ret, iov_iter_count(&con->v2.out_iter)); return ret; -- cgit v1.2.3 From 9d797ee2dce1e3e243bcc18dad7728df72fd11a4 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Mon, 26 Jun 2023 13:58:37 -0700 Subject: Revert "af_unix: Call scm_recv() only after scm_set_cred()." This reverts commit 3f5f118bb657f94641ea383c7c1b8c09a5d46ea2. Konrad reported that desktop environment below cannot be reached after commit 3f5f118bb657 ("af_unix: Call scm_recv() only after scm_set_cred().") - postmarketOS (Alpine Linux w/ musl 1.2.4) - busybox 1.36.1 - GNOME 44.1 - networkmanager 1.42.6 - openrc 0.47 Regarding to the warning of SO_PASSPIDFD, I'll post another patch to suppress it by skipping SCM_PIDFD if scm->pid == NULL in scm_pidfd_recv(). Reported-by: Konrad Dybcio Link: https://lore.kernel.org/netdev/8c7f9abd-4f84-7296-2788-1e130d6304a0@kernel.org/ Signed-off-by: Kuniyuki Iwashima Tested-by: Ido Schimmel Tested-by: Gal Pressman Link: https://lore.kernel.org/r/20230626205837.82086-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- net/unix/af_unix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f2f234f0b92c..3953daa2e1d0 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2807,7 +2807,7 @@ unlock: } while (size); mutex_unlock(&u->iolock); - if (state->msg && check_creds) + if (state->msg) scm_recv(sock, state->msg, &scm, flags); else scm_destroy(&scm); -- cgit v1.2.3 From d06f925f13976ab82167c93467c70a337a0a3cda Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 26 Jun 2023 18:44:02 +0300 Subject: net: dsa: avoid suspicious RCU usage for synced VLAN-aware MAC addresses When using the felix driver (the only one which supports UC filtering and MC filtering) as a DSA master for a random other DSA switch, one can see the following stack trace when the downstream switch ports join a VLAN-aware bridge: ============================= WARNING: suspicious RCU usage ----------------------------- net/8021q/vlan_core.c:238 suspicious rcu_dereference_protected() usage! stack backtrace: Workqueue: dsa_ordered dsa_slave_switchdev_event_work Call trace: lockdep_rcu_suspicious+0x170/0x210 vlan_for_each+0x8c/0x188 dsa_slave_sync_uc+0x128/0x178 __hw_addr_sync_dev+0x138/0x158 dsa_slave_set_rx_mode+0x58/0x70 __dev_set_rx_mode+0x88/0xa8 dev_uc_add+0x74/0xa0 dsa_port_bridge_host_fdb_add+0xec/0x180 dsa_slave_switchdev_event_work+0x7c/0x1c8 process_one_work+0x290/0x568 What it's saying is that vlan_for_each() expects rtnl_lock() context and it's not getting it, when it's called from the DSA master's ndo_set_rx_mode(). The caller of that - dsa_slave_set_rx_mode() - is the slave DSA interface's dsa_port_bridge_host_fdb_add() which comes from the deferred dsa_slave_switchdev_event_work(). We went to great lengths to avoid the rtnl_lock() context in that call path in commit 0faf890fc519 ("net: dsa: drop rtnl_lock from dsa_slave_switchdev_event_work"), and calling rtnl_lock() is simply not an option due to the possibility of deadlocking when calling dsa_flush_workqueue() from the call paths that do hold rtnl_lock() - basically all of them. So, when the DSA master calls vlan_for_each() from its ndo_set_rx_mode(), the state of the 8021q driver on this device is really not protected from concurrent access by anything. Looking at net/8021q/, I don't think that vlan_info->vid_list was particularly designed with RCU traversal in mind, so introducing an RCU read-side form of vlan_for_each() - vlan_for_each_rcu() - won't be so easy, and it also wouldn't be exactly what we need anyway. In general I believe that the solution isn't in net/8021q/ anyway; vlan_for_each() is not cut out for this task. DSA doesn't need rtnl_lock() to be held per se - since it's not a netdev state change that we're blocking, but rather, just concurrent additions/removals to a VLAN list. We don't even need sleepable context - the callback of vlan_for_each() just schedules deferred work. The proposed escape is to remove the dependency on vlan_for_each() and to open-code a non-sleepable, rtnl-free alternative to that, based on copies of the VLAN list modified from .ndo_vlan_rx_add_vid() and .ndo_vlan_rx_kill_vid(). Fixes: 64fdc5f341db ("net: dsa: sync unicast and multicast addresses for VLAN filters too") Signed-off-by: Vladimir Oltean Link: https://lore.kernel.org/r/20230626154402.3154454-1-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski --- include/net/dsa.h | 12 ++++++-- net/dsa/dsa.c | 2 +- net/dsa/slave.c | 84 ++++++++++++++++++++++++++++++++++++++----------------- net/dsa/switch.c | 4 +-- net/dsa/switch.h | 3 ++ 5 files changed, 74 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/include/net/dsa.h b/include/net/dsa.h index ab0f0a5b0860..197c5a6ca8f7 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -314,9 +314,17 @@ struct dsa_port { struct list_head fdbs; struct list_head mdbs; - /* List of VLANs that CPU and DSA ports are members of. */ struct mutex vlans_lock; - struct list_head vlans; + union { + /* List of VLANs that CPU and DSA ports are members of. + * Access to this is serialized by the sleepable @vlans_lock. + */ + struct list_head vlans; + /* List of VLANs that user ports are members of. + * Access to this is serialized by netif_addr_lock_bh(). + */ + struct list_head user_vlans; + }; }; /* TODO: ideally DSA ports would have a single dp->link_dp member, diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 1afed89e03c0..ccbdb98109f8 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -1106,7 +1106,7 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) mutex_init(&dp->vlans_lock); INIT_LIST_HEAD(&dp->fdbs); INIT_LIST_HEAD(&dp->mdbs); - INIT_LIST_HEAD(&dp->vlans); + INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */ INIT_LIST_HEAD(&dp->list); list_add_tail(&dp->list, &dst->ports); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 165bb2cb8431..527b1d576460 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -27,6 +27,7 @@ #include "master.h" #include "netlink.h" #include "slave.h" +#include "switch.h" #include "tag.h" struct dsa_switchdev_event_work { @@ -161,8 +162,7 @@ static int dsa_slave_schedule_standalone_work(struct net_device *dev, return 0; } -static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid, - void *arg) +static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid) { struct dsa_host_vlan_rx_filtering_ctx *ctx = arg; @@ -170,6 +170,28 @@ static int dsa_slave_host_vlan_rx_filtering(struct net_device *vdev, int vid, ctx->addr, vid); } +static int dsa_slave_vlan_for_each(struct net_device *dev, + int (*cb)(void *arg, int vid), void *arg) +{ + struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_vlan *v; + int err; + + lockdep_assert_held(&dev->addr_list_lock); + + err = cb(arg, 0); + if (err) + return err; + + list_for_each_entry(v, &dp->user_vlans, list) { + err = cb(arg, v->vid); + if (err) + return err; + } + + return 0; +} + static int dsa_slave_sync_uc(struct net_device *dev, const unsigned char *addr) { @@ -180,18 +202,14 @@ static int dsa_slave_sync_uc(struct net_device *dev, .addr = addr, .event = DSA_UC_ADD, }; - int err; dev_uc_add(master, addr); if (!dsa_switch_supports_uc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } static int dsa_slave_unsync_uc(struct net_device *dev, @@ -204,18 +222,14 @@ static int dsa_slave_unsync_uc(struct net_device *dev, .addr = addr, .event = DSA_UC_DEL, }; - int err; dev_uc_del(master, addr); if (!dsa_switch_supports_uc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } static int dsa_slave_sync_mc(struct net_device *dev, @@ -228,18 +242,14 @@ static int dsa_slave_sync_mc(struct net_device *dev, .addr = addr, .event = DSA_MC_ADD, }; - int err; dev_mc_add(master, addr); if (!dsa_switch_supports_mc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } static int dsa_slave_unsync_mc(struct net_device *dev, @@ -252,18 +262,14 @@ static int dsa_slave_unsync_mc(struct net_device *dev, .addr = addr, .event = DSA_MC_DEL, }; - int err; dev_mc_del(master, addr); if (!dsa_switch_supports_mc_filtering(dp->ds)) return 0; - err = dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, addr, 0); - if (err) - return err; - - return vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, &ctx); + return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, + &ctx); } void dsa_slave_sync_ha(struct net_device *dev) @@ -1759,6 +1765,7 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, struct netlink_ext_ack extack = {0}; struct dsa_switch *ds = dp->ds; struct netdev_hw_addr *ha; + struct dsa_vlan *v; int ret; /* User port... */ @@ -1782,8 +1789,17 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, !dsa_switch_supports_mc_filtering(ds)) return 0; + v = kzalloc(sizeof(*v), GFP_KERNEL); + if (!v) { + ret = -ENOMEM; + goto rollback; + } + netif_addr_lock_bh(dev); + v->vid = vid; + list_add_tail(&v->list, &dp->user_vlans); + if (dsa_switch_supports_mc_filtering(ds)) { netdev_for_each_synced_mc_addr(ha, dev) { dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, @@ -1803,6 +1819,12 @@ static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, dsa_flush_workqueue(); return 0; + +rollback: + dsa_port_host_vlan_del(dp, &vlan); + dsa_port_vlan_del(dp, &vlan); + + return ret; } static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, @@ -1816,6 +1838,7 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, }; struct dsa_switch *ds = dp->ds; struct netdev_hw_addr *ha; + struct dsa_vlan *v; int err; err = dsa_port_vlan_del(dp, &vlan); @@ -1832,6 +1855,15 @@ static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, netif_addr_lock_bh(dev); + v = dsa_vlan_find(&dp->user_vlans, &vlan); + if (!v) { + netif_addr_unlock_bh(dev); + return -ENOENT; + } + + list_del(&v->list); + kfree(v); + if (dsa_switch_supports_mc_filtering(ds)) { netdev_for_each_synced_mc_addr(ha, dev) { dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 8c9a9f94b756..1a42f9317334 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -673,8 +673,8 @@ static bool dsa_port_host_vlan_match(struct dsa_port *dp, return false; } -static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, - const struct switchdev_obj_port_vlan *vlan) +struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, + const struct switchdev_obj_port_vlan *vlan) { struct dsa_vlan *v; diff --git a/net/dsa/switch.h b/net/dsa/switch.h index 15e67b95eb6e..ea034677da15 100644 --- a/net/dsa/switch.h +++ b/net/dsa/switch.h @@ -111,6 +111,9 @@ struct dsa_notifier_master_state_info { bool operational; }; +struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list, + const struct switchdev_obj_port_vlan *vlan); + int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v); int dsa_broadcast(unsigned long e, void *v); -- cgit v1.2.3 From 25a9c8a4431c364f97f75558cb346d2ad3f53fbb Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Mon, 26 Jun 2023 09:43:13 -0700 Subject: netlink: Add __sock_i_ino() for __netlink_diag_dump(). syzbot reported a warning in __local_bh_enable_ip(). [0] Commit 8d61f926d420 ("netlink: fix potential deadlock in netlink_set_err()") converted read_lock(&nl_table_lock) to read_lock_irqsave() in __netlink_diag_dump() to prevent a deadlock. However, __netlink_diag_dump() calls sock_i_ino() that uses read_lock_bh() and read_unlock_bh(). If CONFIG_TRACE_IRQFLAGS=y, read_unlock_bh() finally enables IRQ even though it should stay disabled until the following read_unlock_irqrestore(). Using read_lock() in sock_i_ino() would trigger a lockdep splat in another place that was fixed in commit f064af1e500a ("net: fix a lockdep splat"), so let's add __sock_i_ino() that would be safe to use under BH disabled. [0]: WARNING: CPU: 0 PID: 5012 at kernel/softirq.c:376 __local_bh_enable_ip+0xbe/0x130 kernel/softirq.c:376 Modules linked in: CPU: 0 PID: 5012 Comm: syz-executor487 Not tainted 6.4.0-rc7-syzkaller-00202-g6f68fc395f49 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 RIP: 0010:__local_bh_enable_ip+0xbe/0x130 kernel/softirq.c:376 Code: 45 bf 01 00 00 00 e8 91 5b 0a 00 e8 3c 15 3d 00 fb 65 8b 05 ec e9 b5 7e 85 c0 74 58 5b 5d c3 65 8b 05 b2 b6 b4 7e 85 c0 75 a2 <0f> 0b eb 9e e8 89 15 3d 00 eb 9f 48 89 ef e8 6f 49 18 00 eb a8 0f RSP: 0018:ffffc90003a1f3d0 EFLAGS: 00010046 RAX: 0000000000000000 RBX: 0000000000000201 RCX: 1ffffffff1cf5996 RDX: 0000000000000000 RSI: 0000000000000201 RDI: ffffffff8805c6f3 RBP: ffffffff8805c6f3 R08: 0000000000000001 R09: ffff8880152b03a3 R10: ffffed1002a56074 R11: 0000000000000005 R12: 00000000000073e4 R13: dffffc0000000000 R14: 0000000000000002 R15: 0000000000000000 FS: 0000555556726300(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000000045ad50 CR3: 000000007c646000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: sock_i_ino+0x83/0xa0 net/core/sock.c:2559 __netlink_diag_dump+0x45c/0x790 net/netlink/diag.c:171 netlink_diag_dump+0xd6/0x230 net/netlink/diag.c:207 netlink_dump+0x570/0xc50 net/netlink/af_netlink.c:2269 __netlink_dump_start+0x64b/0x910 net/netlink/af_netlink.c:2374 netlink_dump_start include/linux/netlink.h:329 [inline] netlink_diag_handler_dump+0x1ae/0x250 net/netlink/diag.c:238 __sock_diag_cmd net/core/sock_diag.c:238 [inline] sock_diag_rcv_msg+0x31e/0x440 net/core/sock_diag.c:269 netlink_rcv_skb+0x165/0x440 net/netlink/af_netlink.c:2547 sock_diag_rcv+0x2a/0x40 net/core/sock_diag.c:280 netlink_unicast_kernel net/netlink/af_netlink.c:1339 [inline] netlink_unicast+0x547/0x7f0 net/netlink/af_netlink.c:1365 netlink_sendmsg+0x925/0xe30 net/netlink/af_netlink.c:1914 sock_sendmsg_nosec net/socket.c:724 [inline] sock_sendmsg+0xde/0x190 net/socket.c:747 ____sys_sendmsg+0x71c/0x900 net/socket.c:2503 ___sys_sendmsg+0x110/0x1b0 net/socket.c:2557 __sys_sendmsg+0xf7/0x1c0 net/socket.c:2586 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x39/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7f5303aaabb9 Code: 28 c3 e8 2a 14 00 00 66 2e 0f 1f 84 00 00 00 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 c0 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffc7506e548 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f5303aaabb9 RDX: 0000000000000000 RSI: 0000000020000180 RDI: 0000000000000003 RBP: 00007f5303a6ed60 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f5303a6edf0 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Fixes: 8d61f926d420 ("netlink: fix potential deadlock in netlink_set_err()") Reported-by: syzbot+5da61cf6a9bc1902d422@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=5da61cf6a9bc1902d422 Suggested-by: Eric Dumazet Signed-off-by: Kuniyuki Iwashima Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230626164313.52528-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- include/net/sock.h | 1 + net/core/sock.c | 17 ++++++++++++++--- net/netlink/diag.c | 2 +- 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'net') diff --git a/include/net/sock.h b/include/net/sock.h index 6f428a7f3567..ad468fe71413 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2100,6 +2100,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent) } kuid_t sock_i_uid(struct sock *sk); +unsigned long __sock_i_ino(struct sock *sk); unsigned long sock_i_ino(struct sock *sk); static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk) diff --git a/net/core/sock.c b/net/core/sock.c index 6e5662ca00fe..4a0edccf8606 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2550,13 +2550,24 @@ kuid_t sock_i_uid(struct sock *sk) } EXPORT_SYMBOL(sock_i_uid); -unsigned long sock_i_ino(struct sock *sk) +unsigned long __sock_i_ino(struct sock *sk) { unsigned long ino; - read_lock_bh(&sk->sk_callback_lock); + read_lock(&sk->sk_callback_lock); ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; - read_unlock_bh(&sk->sk_callback_lock); + read_unlock(&sk->sk_callback_lock); + return ino; +} +EXPORT_SYMBOL(__sock_i_ino); + +unsigned long sock_i_ino(struct sock *sk) +{ + unsigned long ino; + + local_bh_disable(); + ino = __sock_i_ino(sk); + local_bh_enable(); return ino; } EXPORT_SYMBOL(sock_i_ino); diff --git a/net/netlink/diag.c b/net/netlink/diag.c index 4143b2ea4195..e4f21b1067bc 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -168,7 +168,7 @@ mc_list: NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - sock_i_ino(sk)) < 0) { + __sock_i_ino(sk)) < 0) { ret = 1; break; } -- cgit v1.2.3 From a9c49cc2f5b578c4ffa0ee135aa552d06dec0e82 Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Tue, 27 Jun 2023 10:43:14 -0700 Subject: net: scm: introduce and use scm_recv_unix helper Recently, our friends from bluetooth subsystem reported [1] that after commit 5e2ff6704a27 ("scm: add SO_PASSPIDFD and SCM_PIDFD") scm_recv() helper become unusable in kernel modules (because it uses unexported pidfd_prepare() API). We were aware of this issue and workarounded it in a hard way by commit 97154bcf4d1b ("af_unix: Kconfig: make CONFIG_UNIX bool"). But recently a new functionality was added in the scope of commit 817efd3cad74 ("Bluetooth: hci_sock: Forward credentials to monitor") and after that bluetooth can't be compiled as a kernel module. After some discussion in [1] we decided to split scm_recv() into two helpers, one won't support SCM_PIDFD (used for unix sockets), and another one will be completely the same as it was before commit 5e2ff6704a27 ("scm: add SO_PASSPIDFD and SCM_PIDFD"). Link: https://lore.kernel.org/lkml/CAJqdLrpFcga4n7wxBhsFqPQiN8PKFVr6U10fKcJ9W7AcZn+o6Q@mail.gmail.com/ [1] Fixes: 5e2ff6704a27 ("scm: add SO_PASSPIDFD and SCM_PIDFD") Signed-off-by: Alexander Mikhalitsyn Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230627174314.67688-3-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- include/net/scm.h | 35 +++++++++++++++++++++++++---------- net/unix/af_unix.c | 4 ++-- 2 files changed, 27 insertions(+), 12 deletions(-) (limited to 'net') diff --git a/include/net/scm.h b/include/net/scm.h index d456fc41b8bf..c5bcdf65f55c 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -153,8 +153,8 @@ static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm fd_install(pidfd, pidfd_file); } -static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm, int flags) +static inline bool __scm_recv_common(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags) { if (!msg->msg_control) { if (test_bit(SOCK_PASSCRED, &sock->flags) || @@ -162,7 +162,7 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, scm->fp || scm_has_secdata(sock)) msg->msg_flags |= MSG_CTRUNC; scm_destroy(scm); - return; + return false; } if (test_bit(SOCK_PASSCRED, &sock->flags)) { @@ -175,19 +175,34 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(ucreds), &ucreds); } - if (test_bit(SOCK_PASSPIDFD, &sock->flags)) - scm_pidfd_recv(msg, scm); + scm_passec(sock, msg, scm); - scm_destroy_cred(scm); + if (scm->fp) + scm_detach_fds(msg, scm); - scm_passec(sock, msg, scm); + return true; +} - if (!scm->fp) +static inline void scm_recv(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags) +{ + if (!__scm_recv_common(sock, msg, scm, flags)) return; - - scm_detach_fds(msg, scm); + + scm_destroy_cred(scm); } +static inline void scm_recv_unix(struct socket *sock, struct msghdr *msg, + struct scm_cookie *scm, int flags) +{ + if (!__scm_recv_common(sock, msg, scm, flags)) + return; + + if (test_bit(SOCK_PASSPIDFD, &sock->flags)) + scm_pidfd_recv(msg, scm); + + scm_destroy_cred(scm); +} #endif /* __LINUX_NET_SCM_H */ diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3953daa2e1d0..123b35ddfd71 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2427,7 +2427,7 @@ int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, } err = (flags & MSG_TRUNC) ? skb->len - skip : size; - scm_recv(sock, msg, &scm, flags); + scm_recv_unix(sock, msg, &scm, flags); out_free: skb_free_datagram(sk, skb); @@ -2808,7 +2808,7 @@ unlock: mutex_unlock(&u->iolock); if (state->msg) - scm_recv(sock, state->msg, &scm, flags); + scm_recv_unix(sock, state->msg, &scm, flags); else scm_destroy(&scm); out: -- cgit v1.2.3