Skip to content

Commit 1d2fbaa

Browse files
edumazetkuba-moo
authored andcommitted
tcp: stronger sk_rcvbuf checks
Currently, TCP stack accepts incoming packet if sizes of receive queues are below sk->sk_rcvbuf limit. This can cause memory overshoot if the packet is big, like an 1/2 MB BIG TCP one. Refine the check to take into account the incoming skb truesize. Note that we still accept the packet if the receive queue is empty, to not completely freeze TCP flows in pathological conditions. Signed-off-by: Eric Dumazet <[email protected]> Reviewed-by: Kuniyuki Iwashima <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 75dff05 commit 1d2fbaa

File tree

1 file changed

+16
-6
lines changed

1 file changed

+16
-6
lines changed

net/ipv4/tcp_input.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4888,10 +4888,20 @@ static void tcp_ofo_queue(struct sock *sk)
48884888
static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
48894889
static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
48904890

4891+
/* Check if this incoming skb can be added to socket receive queues
4892+
* while satisfying sk->sk_rcvbuf limit.
4893+
*/
4894+
static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
4895+
{
4896+
unsigned int new_mem = atomic_read(&sk->sk_rmem_alloc) + skb->truesize;
4897+
4898+
return new_mem <= sk->sk_rcvbuf;
4899+
}
4900+
48914901
static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
48924902
unsigned int size)
48934903
{
4894-
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4904+
if (!tcp_can_ingest(sk, skb) ||
48954905
!sk_rmem_schedule(sk, skb, size)) {
48964906

48974907
if (tcp_prune_queue(sk, skb) < 0)
@@ -5507,7 +5517,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
55075517
tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
55085518
tp->ooo_last_skb = rb_to_skb(prev);
55095519
if (!prev || goal <= 0) {
5510-
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5520+
if (tcp_can_ingest(sk, skb) &&
55115521
!tcp_under_memory_pressure(sk))
55125522
break;
55135523
goal = sk->sk_rcvbuf >> 3;
@@ -5541,12 +5551,12 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
55415551

55425552
NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
55435553

5544-
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
5554+
if (!tcp_can_ingest(sk, in_skb))
55455555
tcp_clamp_window(sk);
55465556
else if (tcp_under_memory_pressure(sk))
55475557
tcp_adjust_rcv_ssthresh(sk);
55485558

5549-
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5559+
if (tcp_can_ingest(sk, in_skb))
55505560
return 0;
55515561

55525562
tcp_collapse_ofo_queue(sk);
@@ -5556,15 +5566,15 @@ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
55565566
NULL,
55575567
tp->copied_seq, tp->rcv_nxt);
55585568

5559-
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5569+
if (tcp_can_ingest(sk, in_skb))
55605570
return 0;
55615571

55625572
/* Collapsing did not help, destructive actions follow.
55635573
* This must not ever occur. */
55645574

55655575
tcp_prune_ofo_queue(sk, in_skb);
55665576

5567-
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5577+
if (tcp_can_ingest(sk, in_skb))
55685578
return 0;
55695579

55705580
/* If we are really being abused, tell the caller to silently

0 commit comments

Comments
 (0)