summaryrefslogtreecommitdiff
path: root/net/mptcp/protocol.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mptcp/protocol.c')
-rw-r--r--net/mptcp/protocol.c235
1 files changed, 136 insertions, 99 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 9a287b75c1b3..0292162a14ee 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -12,6 +12,7 @@
#include <linux/sched/signal.h>
#include <linux/atomic.h>
#include <net/aligned_data.h>
+#include <net/rps.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
@@ -137,26 +138,37 @@ struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
{
- sk_drops_add(sk, skb);
+ sk_drops_skbadd(sk, skb);
__kfree_skb(skb);
}
-static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
- struct sk_buff *from)
+static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ struct sk_buff *from, bool *fragstolen,
+ int *delta)
{
- bool fragstolen;
- int delta;
+ int limit = READ_ONCE(sk->sk_rcvbuf);
if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) ||
MPTCP_SKB_CB(from)->offset ||
- ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) ||
- !skb_try_coalesce(to, from, &fragstolen, &delta))
+ ((to->len + from->len) > (limit >> 3)) ||
+ !skb_try_coalesce(to, from, fragstolen, delta))
return false;
pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n",
MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
to->len, MPTCP_SKB_CB(from)->end_seq);
MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+ return true;
+}
+
+static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ struct sk_buff *from)
+{
+ bool fragstolen;
+ int delta;
+
+ if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta))
+ return false;
/* note the fwd memory can reach a negative value after accounting
* for the delta, but the later skb free will restore a non
@@ -178,6 +190,35 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
return mptcp_try_coalesce((struct sock *)msk, to, from);
}
+/* "inspired" by tcp_rcvbuf_grow(), main difference:
+ * - mptcp does not maintain a msk-level window clamp
+ * - returns true when the receive buffer is actually updated
+ */
+static bool mptcp_rcvbuf_grow(struct sock *sk)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ const struct net *net = sock_net(sk);
+ int rcvwin, rcvbuf, cap;
+
+ if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
+ (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
+ return false;
+
+ rcvwin = msk->rcvq_space.space << 1;
+
+ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
+ rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq;
+
+ cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
+
+ rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap);
+ if (rcvbuf > sk->sk_rcvbuf) {
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+ return true;
+ }
+ return false;
+}
+
/* "inspired" by tcp_data_queue_ofo(), main differences:
* - use mptcp seqs
* - don't cope with sacks
@@ -291,29 +332,16 @@ merge_right:
end:
skb_condense(skb);
skb_set_owner_r(skb, sk);
+ /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
+ if (sk->sk_socket)
+ mptcp_rcvbuf_grow(sk);
}
-static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
- struct sk_buff *skb, unsigned int offset,
- size_t copy_len)
+static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
+ int copy_len)
{
- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- struct sock *sk = (struct sock *)msk;
- struct sk_buff *tail;
- bool has_rxtstamp;
-
- __skb_unlink(skb, &ssk->sk_receive_queue);
-
- skb_ext_reset(skb);
- skb_orphan(skb);
-
- /* try to fetch required memory from subflow */
- if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
- goto drop;
- }
-
- has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+ const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
/* the skb map_seq accounts for the skb offset:
* mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
@@ -325,6 +353,24 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
MPTCP_SKB_CB(skb)->cant_coalesce = 0;
+ __skb_unlink(skb, &ssk->sk_receive_queue);
+
+ skb_ext_reset(skb);
+ skb_dst_drop(skb);
+}
+
+static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb)
+{
+ u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq;
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ struct sk_buff *tail;
+
+ /* try to fetch required memory from subflow */
+ if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ goto drop;
+ }
+
if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
/* in sequence */
msk->bytes_received += copy_len;
@@ -371,6 +417,20 @@ static void mptcp_close_wake_up(struct sock *sk)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
+static void mptcp_shutdown_subflows(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow;
+
+ slow = lock_sock_fast(ssk);
+ tcp_shutdown(ssk, SEND_SHUTDOWN);
+ unlock_sock_fast(ssk, slow);
+ }
+}
+
/* called under the msk socket lock */
static bool mptcp_pending_data_fin_ack(struct sock *sk)
{
@@ -395,6 +455,7 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
break;
case TCP_CLOSING:
case TCP_LAST_ACK:
+ mptcp_shutdown_subflows(msk);
mptcp_set_state(sk, TCP_CLOSE);
break;
}
@@ -529,11 +590,10 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
}
}
-static bool mptcp_check_data_fin(struct sock *sk)
+static void mptcp_check_data_fin(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
u64 rcv_data_fin_seq;
- bool ret = false;
/* Need to ack a DATA_FIN received from a peer while this side
* of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
@@ -563,6 +623,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
mptcp_set_state(sk, TCP_CLOSING);
break;
case TCP_FIN_WAIT2:
+ mptcp_shutdown_subflows(msk);
mptcp_set_state(sk, TCP_CLOSE);
break;
default:
@@ -571,12 +632,10 @@ static bool mptcp_check_data_fin(struct sock *sk)
break;
}
- ret = true;
if (!__mptcp_check_fallback(msk))
mptcp_send_ack(msk);
mptcp_close_wake_up(sk);
}
- return ret;
}
static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
@@ -632,7 +691,9 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
if (offset < skb->len) {
size_t len = skb->len - offset;
- ret = __mptcp_move_skb(msk, ssk, skb, offset, len) || ret;
+ mptcp_init_skb(ssk, skb, offset, len);
+ skb_orphan(skb);
+ ret = __mptcp_move_skb(sk, skb) || ret;
seq += len;
if (unlikely(map_remaining < len)) {
@@ -753,12 +814,8 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
moved = __mptcp_move_skbs_from_subflow(msk, ssk);
__mptcp_ofo_queue(msk);
- if (unlikely(ssk->sk_err)) {
- if (!sock_owned_by_user(sk))
- __mptcp_error_report(sk);
- else
- __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags);
- }
+ if (unlikely(ssk->sk_err))
+ __mptcp_subflow_error_report(sk, ssk);
/* If the moves have caught up with the DATA_FIN sequence number
* it's time to ack the DATA_FIN and change socket state, but
@@ -770,18 +827,10 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
return moved;
}
-static void __mptcp_rcvbuf_update(struct sock *sk, struct sock *ssk)
-{
- if (unlikely(ssk->sk_rcvbuf > sk->sk_rcvbuf))
- WRITE_ONCE(sk->sk_rcvbuf, ssk->sk_rcvbuf);
-}
-
static void __mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- __mptcp_rcvbuf_update(sk, ssk);
-
/* Wake-up the reader only for in-sequence data */
if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
sk->sk_data_ready(sk);
@@ -1740,6 +1789,20 @@ static u32 mptcp_send_limit(const struct sock *sk)
return limit - not_sent;
}
+static void mptcp_rps_record_subflows(const struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ if (!rfs_is_needed())
+ return;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ sock_rps_record_flow(ssk);
+ }
+}
+
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1753,6 +1816,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
lock_sock(sk);
+ mptcp_rps_record_subflows(msk);
+
if (unlikely(inet_test_bit(DEFER_CONNECT, sk) ||
msg->msg_flags & MSG_FASTOPEN)) {
int copied_syn = 0;
@@ -1913,12 +1978,13 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
}
if (!(flags & MSG_PEEK)) {
- /* avoid the indirect call, we know the destructor is sock_wfree */
+ /* avoid the indirect call, we know the destructor is sock_rfree */
skb->destructor = NULL;
+ skb->sk = NULL;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_uncharge(sk, skb->truesize);
__skb_unlink(skb, &sk->sk_receive_queue);
- __kfree_skb(skb);
+ skb_attempt_defer_free(skb);
msk->bytes_consumed += count;
}
@@ -1983,48 +2049,26 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
goto new_measure;
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
- u64 rcvwin, grow;
- int rcvbuf;
-
- rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
-
- grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
-
- do_div(grow, msk->rcvq_space.space);
- rcvwin += (grow << 1);
-
- rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin),
- READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
-
- if (rcvbuf > sk->sk_rcvbuf) {
- u32 window_clamp;
-
- window_clamp = mptcp_win_from_space(sk, rcvbuf);
- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+ msk->rcvq_space.space = msk->rcvq_space.copied;
+ if (mptcp_rcvbuf_grow(sk)) {
- /* Make subflows follow along. If we do not do this, we
- * get drops at subflow level if skbs can't be moved to
- * the mptcp rx queue fast enough (announced rcv_win can
- * exceed ssk->sk_rcvbuf).
- */
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk;
- bool slow;
+ /* Make subflows follow along. If we do not do this, we
+ * get drops at subflow level if skbs can't be moved to
+ * the mptcp rx queue fast enough (announced rcv_win can
+ * exceed ssk->sk_rcvbuf).
+ */
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk;
+ bool slow;
- ssk = mptcp_subflow_tcp_sock(subflow);
- slow = lock_sock_fast(ssk);
- WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
- WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp);
- if (tcp_can_send_ack(ssk))
- tcp_cleanup_rbuf(ssk, 1);
- unlock_sock_fast(ssk, slow);
- }
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
+ tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied;
+ tcp_rcvbuf_grow(ssk);
+ unlock_sock_fast(ssk, slow);
}
}
- msk->rcvq_space.space = msk->rcvq_space.copied;
new_measure:
msk->rcvq_space.copied = 0;
msk->rcvq_space.time = mstamp;
@@ -2053,11 +2097,6 @@ static bool __mptcp_move_skbs(struct sock *sk)
if (list_empty(&msk->conn_list))
return false;
- /* verify we can move any data from the subflow, eventually updating */
- if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
- mptcp_for_each_subflow(msk, subflow)
- __mptcp_rcvbuf_update(sk, subflow->tcp_sock);
-
subflow = list_first_entry(&msk->conn_list,
struct mptcp_subflow_context, node);
for (;;) {
@@ -2131,6 +2170,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
goto out_err;
}
+ mptcp_rps_record_subflows(msk);
+
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
len = min_t(size_t, len, INT_MAX);
@@ -2173,14 +2214,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
break;
}
- if (sk->sk_shutdown & RCV_SHUTDOWN) {
- /* race breaker: the shutdown could be after the
- * previous receive queue check
- */
- if (__mptcp_move_skbs(sk))
- continue;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
- }
if (sk->sk_state == TCP_CLOSE) {
copied = -ENOTCONN;
@@ -2587,7 +2622,8 @@ static void __mptcp_retrans(struct sock *sk)
if (mptcp_data_fin_enabled(msk)) {
struct inet_connection_sock *icsk = inet_csk(sk);
- icsk->icsk_retransmits++;
+ WRITE_ONCE(icsk->icsk_retransmits,
+ icsk->icsk_retransmits + 1);
mptcp_set_datafin_timeout(sk);
mptcp_send_ack(msk);
@@ -3554,7 +3590,6 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent)
write_lock_bh(&sk->sk_callback_lock);
rcu_assign_pointer(sk->sk_wq, &parent->wq);
sk_set_socket(sk, parent);
- WRITE_ONCE(sk->sk_uid, SOCK_INODE(parent)->i_uid);
write_unlock_bh(&sk->sk_callback_lock);
}
@@ -3921,6 +3956,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
mptcp_sock_graft(ssk, newsock);
}
+ mptcp_rps_record_subflows(msk);
+
/* Do late cleanup for the first subflow as necessary. Also
* deal with bad peers not doing a complete shutdown.
*/