tcp: fix tcp_cleanup_rbuf() for tcp_read_skb()
tcp_cleanup_rbuf() retrieves the skb from sk_receive_queue, it assumes the skb is not yet dequeued. This is no longer true for tcp_read_skb() case where we dequeue the skb first. Fix this by introducing a helper __tcp_cleanup_rbuf() which does not require any skb and calling it in tcp_read_skb(). Fixes: 04919bed948d ("tcp: Introduce tcp_read_skb()") Cc: Eric Dumazet <edumazet@google.com> Cc: John Fastabend <john.fastabend@gmail.com> Cc: Jakub Sitnicki <jakub@cloudflare.com> Signed-off-by: Cong Wang <cong.wang@bytedance.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
e9c6e79760
commit
c457985aaa
@ -1567,17 +1567,11 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
|
|||||||
* calculation of whether or not we must ACK for the sake of
|
* calculation of whether or not we must ACK for the sake of
|
||||||
* a window update.
|
* a window update.
|
||||||
*/
|
*/
|
||||||
void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
bool time_to_ack = false;
|
bool time_to_ack = false;
|
||||||
|
|
||||||
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
|
|
||||||
|
|
||||||
WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
|
|
||||||
"cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
|
|
||||||
tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
|
|
||||||
|
|
||||||
if (inet_csk_ack_scheduled(sk)) {
|
if (inet_csk_ack_scheduled(sk)) {
|
||||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
|
|
||||||
@ -1623,6 +1617,17 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
|||||||
tcp_send_ack(sk);
|
tcp_send_ack(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||||
|
{
|
||||||
|
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
|
||||||
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
|
WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
|
||||||
|
"cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
|
||||||
|
tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
|
||||||
|
__tcp_cleanup_rbuf(sk, copied);
|
||||||
|
}
|
||||||
|
|
||||||
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
|
static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||||
@ -1771,20 +1776,19 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
|||||||
copied += used;
|
copied += used;
|
||||||
|
|
||||||
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
|
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
|
||||||
consume_skb(skb);
|
|
||||||
++seq;
|
++seq;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
consume_skb(skb);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
consume_skb(skb);
|
||||||
WRITE_ONCE(tp->copied_seq, seq);
|
WRITE_ONCE(tp->copied_seq, seq);
|
||||||
|
|
||||||
tcp_rcv_space_adjust(sk);
|
tcp_rcv_space_adjust(sk);
|
||||||
|
|
||||||
/* Clean up data we have read: This will do ACK frames. */
|
/* Clean up data we have read: This will do ACK frames. */
|
||||||
if (copied > 0)
|
if (copied > 0)
|
||||||
tcp_cleanup_rbuf(sk, copied);
|
__tcp_cleanup_rbuf(sk, copied);
|
||||||
|
|
||||||
return copied;
|
return copied;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user