diff options
Diffstat (limited to 'net/mptcp/protocol.c')
| -rw-r--r-- | net/mptcp/protocol.c | 461 |
1 files changed, 323 insertions, 138 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 1e413426deee..e212c1374bd0 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -358,7 +358,7 @@ end: static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, int copy_len) { - const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; /* the skb map_seq accounts for the skb offset: @@ -383,11 +383,7 @@ static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb) struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *tail; - /* try to fetch required memory from subflow */ - if (!sk_rmem_schedule(sk, skb, skb->truesize)) { - MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); - goto drop; - } + mptcp_borrow_fwdmem(sk, skb); if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { /* in sequence */ @@ -409,16 +405,13 @@ static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb) * will retransmit as needed, if needed. */ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); -drop: mptcp_drop(sk, skb); return false; } static void mptcp_stop_rtx_timer(struct sock *sk) { - struct inet_connection_sock *icsk = inet_csk(sk); - - sk_stop_timer(sk, &icsk->icsk_retransmit_timer); + sk_stop_timer(sk, &sk->mptcp_retransmit_timer); mptcp_sk(sk)->timer_ival = 0; } @@ -524,7 +517,7 @@ static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subfl const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? - icsk_timeout(inet_csk(ssk)) - jiffies : 0; + tcp_timeout_expires(ssk) - jiffies : 0; } static void mptcp_set_timeout(struct sock *sk) @@ -664,8 +657,50 @@ static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) } } +static void __mptcp_add_backlog(struct sock *sk, + struct mptcp_subflow_context *subflow, + struct sk_buff *skb) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + struct sk_buff *tail = NULL; + struct sock *ssk = skb->sk; + bool fragstolen; + int delta; + + if (unlikely(sk->sk_state == TCP_CLOSE)) { + kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); + return; + } + + /* Try to coalesce with the last skb in our backlog */ + if (!list_empty(&msk->backlog_list)) + tail = list_last_entry(&msk->backlog_list, struct sk_buff, list); + + if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq && + ssk == tail->sk && + __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) { + skb->truesize -= delta; + kfree_skb_partial(skb, fragstolen); + __mptcp_subflow_lend_fwdmem(subflow, delta); + goto account; + } + + list_add_tail(&skb->list, &msk->backlog_list); + mptcp_subflow_lend_fwdmem(subflow, skb); + delta = skb->truesize; + +account: + WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta); + + /* Possibly not accept()ed yet, keep track of memory not CG + * accounted, mptcp_graft_subflows() will handle it. + */ + if (!mem_cgroup_from_sk(ssk)) + msk->backlog_unaccounted += delta; +} + static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, - struct sock *ssk) + struct sock *ssk, bool own_msk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = (struct sock *)msk; @@ -681,9 +716,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, struct sk_buff *skb; bool fin; - if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) - break; - /* try to move as much data as available */ map_remaining = subflow->map_data_len - mptcp_subflow_get_map_offset(subflow); @@ -710,8 +742,13 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, size_t len = skb->len - offset; mptcp_init_skb(ssk, skb, offset, len); - skb_orphan(skb); - ret = __mptcp_move_skb(sk, skb) || ret; + + if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) { + mptcp_subflow_lend_fwdmem(subflow, skb); + ret |= __mptcp_move_skb(sk, skb); + } else { + __mptcp_add_backlog(sk, subflow, skb); + } seq += len; if (unlikely(map_remaining < len)) { @@ -830,7 +867,7 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) struct sock *sk = (struct sock *)msk; bool moved; - moved = __mptcp_move_skbs_from_subflow(msk, ssk); + moved = __mptcp_move_skbs_from_subflow(msk, ssk, true); __mptcp_ofo_queue(msk); if (unlikely(ssk->sk_err)) __mptcp_subflow_error_report(sk, ssk); @@ -845,31 +882,26 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) return moved; } -static void __mptcp_data_ready(struct sock *sk, struct sock *ssk) -{ - struct mptcp_sock *msk = mptcp_sk(sk); - - /* Wake-up the reader only for in-sequence data */ - if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) - sk->sk_data_ready(sk); -} - void mptcp_data_ready(struct sock *sk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); + struct mptcp_sock *msk = mptcp_sk(sk); /* The peer can send data while we are shutting down this - * subflow at msk destruction time, but we must avoid enqueuing + * subflow at subflow destruction time, but we must avoid enqueuing * more data to the msk receive queue */ - if (unlikely(subflow->disposable)) + if (unlikely(subflow->closing)) return; mptcp_data_lock(sk); - if (!sock_owned_by_user(sk)) - __mptcp_data_ready(sk, ssk); - else - __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags); + if (!sock_owned_by_user(sk)) { + /* Wake-up the reader only for in-sequence data */ + if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) + sk->sk_data_ready(sk); + } else { + __mptcp_move_skbs_from_subflow(msk, ssk, false); + } mptcp_data_unlock(sk); } @@ -895,12 +927,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) mptcp_subflow_joined(msk, ssk); spin_unlock_bh(&msk->fallback_lock); - /* attach to msk socket only after we are sure we will deal with it - * at close time - */ - if (sk->sk_socket && !ssk->sk_socket) - mptcp_sock_graft(ssk, sk->sk_socket); - mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; mptcp_sockopt_sync_locked(msk, ssk); mptcp_stop_tout_timer(sk); @@ -926,12 +952,11 @@ static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list static bool mptcp_rtx_timer_pending(struct sock *sk) { - return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); + return timer_pending(&sk->mptcp_retransmit_timer); } static void mptcp_reset_rtx_timer(struct sock *sk) { - struct inet_connection_sock *icsk = inet_csk(sk); unsigned long tout; /* prevent rescheduling on close */ @@ -939,7 +964,7 @@ static void mptcp_reset_rtx_timer(struct sock *sk) return; tout = mptcp_sk(sk)->timer_ival; - sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); + sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout); } bool mptcp_schedule_work(struct sock *sk) @@ -1088,11 +1113,12 @@ static void mptcp_enter_memory_pressure(struct sock *sk) mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - if (first) + if (first && !ssk->sk_bypass_prot_mem) { tcp_enter_memory_pressure(ssk); - sk_stream_moderate_sndbuf(ssk); + first = false; + } - first = false; + sk_stream_moderate_sndbuf(ssk); } __mptcp_sync_sndbuf(sk); } @@ -2113,60 +2139,80 @@ new_measure: msk->rcvq_space.time = mstamp; } -static struct mptcp_subflow_context * -__mptcp_first_ready_from(struct mptcp_sock *msk, - struct mptcp_subflow_context *subflow) +static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta) { - struct mptcp_subflow_context *start_subflow = subflow; + struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list); + struct mptcp_sock *msk = mptcp_sk(sk); + bool moved = false; + + *delta = 0; + while (1) { + /* If the msk recvbuf is full stop, don't drop */ + if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) + break; + + prefetch(skb->next); + list_del(&skb->list); + *delta += skb->truesize; - while (!READ_ONCE(subflow->data_avail)) { - subflow = mptcp_next_subflow(msk, subflow); - if (subflow == start_subflow) - return NULL; + moved |= __mptcp_move_skb(sk, skb); + if (list_empty(skbs)) + break; + + skb = list_first_entry(skbs, struct sk_buff, list); } - return subflow; + + __mptcp_ofo_queue(msk); + if (moved) + mptcp_check_data_fin((struct sock *)msk); + return moved; } -static bool __mptcp_move_skbs(struct sock *sk) +static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs) { - struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); - bool ret = false; - if (list_empty(&msk->conn_list)) + /* After CG initialization, subflows should never add skb before + * gaining the CG themself. + */ + DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket && + mem_cgroup_from_sk(sk)); + + /* Don't spool the backlog if the rcvbuf is full. */ + if (list_empty(&msk->backlog_list) || + sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) return false; - subflow = list_first_entry(&msk->conn_list, - struct mptcp_subflow_context, node); - for (;;) { - struct sock *ssk; - bool slowpath; + INIT_LIST_HEAD(skbs); + list_splice_init(&msk->backlog_list, skbs); + return true; +} - /* - * As an optimization avoid traversing the subflows list - * and ev. acquiring the subflow socket lock before baling out - */ - if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) - break; +static void mptcp_backlog_spooled(struct sock *sk, u32 moved, + struct list_head *skbs) +{ + struct mptcp_sock *msk = mptcp_sk(sk); - subflow = __mptcp_first_ready_from(msk, subflow); - if (!subflow) - break; + WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved); + list_splice(skbs, &msk->backlog_list); +} - ssk = mptcp_subflow_tcp_sock(subflow); - slowpath = lock_sock_fast(ssk); - ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret; - if (unlikely(ssk->sk_err)) - __mptcp_error_report(sk); - unlock_sock_fast(ssk, slowpath); +static bool mptcp_move_skbs(struct sock *sk) +{ + struct list_head skbs; + bool enqueued = false; + u32 moved; - subflow = mptcp_next_subflow(msk, subflow); - } + mptcp_data_lock(sk); + while (mptcp_can_spool_backlog(sk, &skbs)) { + mptcp_data_unlock(sk); + enqueued |= __mptcp_move_skbs(sk, &skbs, &moved); - __mptcp_ofo_queue(msk); - if (ret) - mptcp_check_data_fin((struct sock *)msk); - return ret; + mptcp_data_lock(sk); + mptcp_backlog_spooled(sk, moved, &skbs); + } + mptcp_data_unlock(sk); + return enqueued; } static unsigned int mptcp_inq_hint(const struct sock *sk) @@ -2232,7 +2278,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, copied += bytes_read; - if (skb_queue_empty(&sk->sk_receive_queue) && __mptcp_move_skbs(sk)) + if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk)) continue; /* only the MPTCP socket status is relevant here. The exit @@ -2305,9 +2351,7 @@ out_err: static void mptcp_retransmit_timer(struct timer_list *t) { - struct inet_connection_sock *icsk = timer_container_of(icsk, t, - icsk_retransmit_timer); - struct sock *sk = &icsk->icsk_inet.sk; + struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer); struct mptcp_sock *msk = mptcp_sk(sk); bh_lock_sock(sk); @@ -2325,7 +2369,9 @@ static void mptcp_retransmit_timer(struct timer_list *t) static void mptcp_tout_timer(struct timer_list *t) { - struct sock *sk = timer_container_of(sk, t, sk_timer); + struct inet_connection_sock *icsk = + timer_container_of(icsk, t, mptcp_tout_timer); + struct sock *sk = &icsk->icsk_inet.sk; mptcp_schedule_work(sk); sock_put(sk); @@ -2446,6 +2492,25 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, { struct mptcp_sock *msk = mptcp_sk(sk); bool dispose_it, need_push = false; + int fwd_remaining; + + /* Do not pass RX data to the msk, even if the subflow socket is not + * going to be freed (i.e. even for the first subflow on graceful + * subflow close. + */ + lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); + subflow->closing = 1; + + /* Borrow the fwd allocated page left-over; fwd memory for the subflow + * could be negative at this point, but will be reach zero soon - when + * the data allocated using such fragment will be freed. + */ + if (subflow->lent_mem_frag) { + fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag; + sk_forward_alloc_add(sk, fwd_remaining); + sk_forward_alloc_add(ssk, -fwd_remaining); + subflow->lent_mem_frag = 0; + } /* If the first subflow moved to a close state before accept, e.g. due * to an incoming reset or listener shutdown, the subflow socket is @@ -2457,7 +2522,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, /* ensure later check in mptcp_worker() will dispose the msk */ sock_set_flag(sk, SOCK_DEAD); mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1)); - lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); mptcp_subflow_drop_ctx(ssk); goto out_release; } @@ -2466,8 +2530,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, if (dispose_it) list_del(&subflow->node); - lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); - if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE) tcp_set_state(ssk, TCP_CLOSE); @@ -2530,6 +2592,9 @@ out: void mptcp_close_ssk(struct sock *sk, struct sock *ssk, struct mptcp_subflow_context *subflow) { + struct mptcp_sock *msk = mptcp_sk(sk); + struct sk_buff *skb; + /* The first subflow can already be closed and still in the list */ if (subflow->close_event_done) return; @@ -2539,6 +2604,17 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk, if (sk->sk_state == TCP_ESTABLISHED) mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); + /* Remove any reference from the backlog to this ssk; backlog skbs consume + * space in the msk receive queue, no need to touch sk->sk_rmem_alloc + */ + list_for_each_entry(skb, &msk->backlog_list, list) { + if (skb->sk != ssk) + continue; + + atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc); + skb->sk = NULL; + } + /* subflow aborted before reaching the fully_established status * attempt the creation of the next subflow */ @@ -2758,7 +2834,7 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) */ timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout; - sk_reset_timer(sk, &sk->sk_timer, timeout); + sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout); } static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) @@ -2777,12 +2853,31 @@ static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) unlock_sock_fast(ssk, slow); } +static void mptcp_backlog_purge(struct sock *sk) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + struct sk_buff *tmp, *skb; + LIST_HEAD(backlog); + + mptcp_data_lock(sk); + list_splice_init(&msk->backlog_list, &backlog); + msk->backlog_len = 0; + mptcp_data_unlock(sk); + + list_for_each_entry_safe(skb, tmp, &backlog, list) { + mptcp_borrow_fwdmem(sk, skb); + kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); + } + sk_mem_reclaim(sk); +} + static void mptcp_do_fastclose(struct sock *sk) { struct mptcp_subflow_context *subflow, *tmp; struct mptcp_sock *msk = mptcp_sk(sk); mptcp_set_state(sk, TCP_CLOSE); + mptcp_backlog_purge(sk); /* Explicitly send the fastclose reset as need */ if (__mptcp_check_fallback(msk)) @@ -2867,11 +2962,13 @@ static void __mptcp_init_sock(struct sock *sk) INIT_LIST_HEAD(&msk->conn_list); INIT_LIST_HEAD(&msk->join_list); INIT_LIST_HEAD(&msk->rtx_queue); + INIT_LIST_HEAD(&msk->backlog_list); INIT_WORK(&msk->work, mptcp_worker); msk->out_of_order_queue = RB_ROOT; msk->first_pending = NULL; msk->timer_ival = TCP_RTO_MIN; msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; + msk->backlog_len = 0; WRITE_ONCE(msk->first, NULL); inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; @@ -2888,8 +2985,8 @@ static void __mptcp_init_sock(struct sock *sk) spin_lock_init(&msk->fallback_lock); /* re-use the csk retrans timer for MPTCP-level retrans */ - timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); - timer_setup(&sk->sk_timer, mptcp_tout_timer, 0); + timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0); + timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0); } static void mptcp_ca_reset(struct sock *sk) @@ -3091,7 +3188,7 @@ static void __mptcp_destroy_sock(struct sock *sk) might_sleep(); mptcp_stop_rtx_timer(sk); - sk_stop_timer(sk, &sk->sk_timer); + sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer); msk->pm.status = 0; mptcp_release_sched(msk); @@ -3242,6 +3339,28 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; } +static void mptcp_destroy_common(struct mptcp_sock *msk) +{ + struct mptcp_subflow_context *subflow, *tmp; + struct sock *sk = (struct sock *)msk; + + __mptcp_clear_xmit(sk); + mptcp_backlog_purge(sk); + + /* join list will be eventually flushed (with rst) at sock lock release time */ + mptcp_for_each_subflow_safe(msk, subflow, tmp) + __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0); + + __skb_queue_purge(&sk->sk_receive_queue); + skb_rbtree_purge(&msk->out_of_order_queue); + + /* move all the rx fwd alloc into the sk_mem_reclaim_final in + * inet_sock_destruct() will dispose it + */ + mptcp_token_destroy(msk); + mptcp_pm_destroy(msk); +} + static int mptcp_disconnect(struct sock *sk, int flags) { struct mptcp_sock *msk = mptcp_sk(sk); @@ -3294,6 +3413,9 @@ static int mptcp_disconnect(struct sock *sk, int flags) msk->bytes_retrans = 0; msk->rcvspace_init = 0; + /* for fallback's sake */ + WRITE_ONCE(msk->ack_seq, 0); + WRITE_ONCE(sk->sk_shutdown, 0); sk_error_report(sk); return 0; @@ -3444,27 +3566,6 @@ void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; } -void mptcp_destroy_common(struct mptcp_sock *msk) -{ - struct mptcp_subflow_context *subflow, *tmp; - struct sock *sk = (struct sock *)msk; - - __mptcp_clear_xmit(sk); - - /* join list will be eventually flushed (with rst) at sock lock release time */ - mptcp_for_each_subflow_safe(msk, subflow, tmp) - __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0); - - __skb_queue_purge(&sk->sk_receive_queue); - skb_rbtree_purge(&msk->out_of_order_queue); - - /* move all the rx fwd alloc into the sk_mem_reclaim_final in - * inet_sock_destruct() will dispose it - */ - mptcp_token_destroy(msk); - mptcp_pm_destroy(msk); -} - static void mptcp_destroy(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); @@ -3493,8 +3594,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk) #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \ BIT(MPTCP_RETRANSMIT) | \ - BIT(MPTCP_FLUSH_JOIN_LIST) | \ - BIT(MPTCP_DEQUEUE)) + BIT(MPTCP_FLUSH_JOIN_LIST)) /* processes deferred events and flush wmem */ static void mptcp_release_cb(struct sock *sk) @@ -3504,9 +3604,12 @@ static void mptcp_release_cb(struct sock *sk) for (;;) { unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); - struct list_head join_list; + struct list_head join_list, skbs; + bool spool_bl; + u32 moved; - if (!flags) + spool_bl = mptcp_can_spool_backlog(sk, &skbs); + if (!flags && !spool_bl) break; INIT_LIST_HEAD(&join_list); @@ -3528,7 +3631,7 @@ static void mptcp_release_cb(struct sock *sk) __mptcp_push_pending(sk, 0); if (flags & BIT(MPTCP_RETRANSMIT)) __mptcp_retrans(sk); - if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk)) { + if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) { /* notify ack seq update */ mptcp_cleanup_rbuf(msk, 0); sk->sk_data_ready(sk); @@ -3536,6 +3639,8 @@ static void mptcp_release_cb(struct sock *sk) cond_resched(); spin_lock_bh(&sk->sk_lock.slock); + if (spool_bl) + mptcp_backlog_spooled(sk, moved, &skbs); } if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) @@ -3661,6 +3766,23 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent) write_unlock_bh(&sk->sk_callback_lock); } +/* Can be called without holding the msk socket lock; use the callback lock + * to avoid {READ_,WRITE_}ONCE annotations on sk_socket. + */ +static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk) +{ + struct socket *sock; + + write_lock_bh(&sk->sk_callback_lock); + sock = sk->sk_socket; + write_unlock_bh(&sk->sk_callback_lock); + if (sock) { + mptcp_sock_graft(ssk, sock); + __mptcp_inherit_cgrp_data(sk, ssk); + __mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC); + } +} + bool mptcp_finish_join(struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); @@ -3676,7 +3798,9 @@ bool mptcp_finish_join(struct sock *ssk) return false; } - /* active subflow, already present inside the conn_list */ + /* Active subflow, already present inside the conn_list; is grafted + * either by __mptcp_subflow_connect() or accept. + */ if (!list_empty(&subflow->node)) { spin_lock_bh(&msk->fallback_lock); if (!msk->allow_subflows) { @@ -3703,11 +3827,17 @@ bool mptcp_finish_join(struct sock *ssk) if (ret) { sock_hold(ssk); list_add_tail(&subflow->node, &msk->conn_list); + mptcp_sock_check_graft(parent, ssk); } } else { sock_hold(ssk); list_add_tail(&subflow->node, &msk->join_list); __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); + + /* In case of later failures, __mptcp_flush_join_list() will + * properly orphan the ssk via mptcp_close_ssk(). + */ + mptcp_sock_check_graft(parent, ssk); } mptcp_data_unlock(parent); @@ -3768,7 +3898,7 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) return -EINVAL; lock_sock(sk); - if (__mptcp_move_skbs(sk)) + if (mptcp_move_skbs(sk)) mptcp_cleanup_rbuf(msk, 0); *karg = mptcp_inq_hint(sk); release_sock(sk); @@ -3790,7 +3920,8 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) return 0; } -static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr, + int addr_len) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); @@ -3900,7 +4031,7 @@ static struct proto mptcp_prot = { .no_autobind = true, }; -static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) +static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct sock *ssk, *sk = sock->sk; @@ -3967,6 +4098,69 @@ unlock: return err; } +static void mptcp_graft_subflows(struct sock *sk) +{ + struct mptcp_subflow_context *subflow; + struct mptcp_sock *msk = mptcp_sk(sk); + + if (mem_cgroup_sockets_enabled) { + LIST_HEAD(join_list); + + /* Subflows joining after __inet_accept() will get the + * mem CG properly initialized at mptcp_finish_join() time, + * but subflows pending in join_list need explicit + * initialization before flushing `backlog_unaccounted` + * or MPTCP can later unexpectedly observe unaccounted memory. + */ + mptcp_data_lock(sk); + list_splice_init(&msk->join_list, &join_list); + mptcp_data_unlock(sk); + + __mptcp_flush_join_list(sk, &join_list); + } + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + lock_sock(ssk); + + /* Set ssk->sk_socket of accept()ed flows to mptcp socket. + * This is needed so NOSPACE flag can be set from tcp stack. + */ + if (!ssk->sk_socket) + mptcp_sock_graft(ssk, sk->sk_socket); + + if (!mem_cgroup_sk_enabled(sk)) + goto unlock; + + __mptcp_inherit_cgrp_data(sk, ssk); + __mptcp_inherit_memcg(sk, ssk, GFP_KERNEL); + +unlock: + release_sock(ssk); + } + + if (mem_cgroup_sk_enabled(sk)) { + gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; + int amt; + + /* Account the backlog memory; prior accept() is aware of + * fwd and rmem only. + */ + mptcp_data_lock(sk); + amt = sk_mem_pages(sk->sk_forward_alloc + + msk->backlog_unaccounted + + atomic_read(&sk->sk_rmem_alloc)) - + sk_mem_pages(sk->sk_forward_alloc + + atomic_read(&sk->sk_rmem_alloc)); + msk->backlog_unaccounted = 0; + mptcp_data_unlock(sk); + + if (amt) + mem_cgroup_sk_charge(sk, amt, gfp); + } +} + static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { @@ -4014,26 +4208,17 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, msk = mptcp_sk(newsk); msk->in_accept_queue = 0; - /* set ssk->sk_socket of accept()ed flows to mptcp socket. - * This is needed so NOSPACE flag can be set from tcp stack. - */ - mptcp_for_each_subflow(msk, subflow) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - - if (!ssk->sk_socket) - mptcp_sock_graft(ssk, newsock); - } - + mptcp_graft_subflows(newsk); mptcp_rps_record_subflows(msk); /* Do late cleanup for the first subflow as necessary. Also * deal with bad peers not doing a complete shutdown. */ if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { - __mptcp_close_ssk(newsk, msk->first, - mptcp_subflow_ctx(msk->first), 0); if (unlikely(list_is_singular(&msk->conn_list))) mptcp_set_state(newsk, TCP_CLOSE); + mptcp_close_ssk(newsk, msk->first, + mptcp_subflow_ctx(msk->first)); } } else { tcpfallback: |
