tcp: Call tcp_syn_ack_timeout() directly.

Since DCCP has been removed, we do not need to use
request_sock_ops.syn_ack_timeout().

Let's call tcp_syn_ack_timeout() directly.

Now other function pointers of request_sock_ops are
protocol-dependent.

Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251106003357.273403-2-kuniyu@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Kuniyuki Iwashima 2025-11-06 00:32:40 +00:00 committed by Jakub Kicinski
parent c6934c4e04
commit be88c549e9
5 changed files with 4 additions and 6 deletions

View File

@ -36,7 +36,6 @@ struct request_sock_ops {
struct sk_buff *skb, struct sk_buff *skb,
enum sk_rst_reason reason); enum sk_rst_reason reason);
void (*destructor)(struct request_sock *req); void (*destructor)(struct request_sock *req);
void (*syn_ack_timeout)(const struct request_sock *req);
}; };
struct saved_syn { struct saved_syn {

View File

@ -1096,9 +1096,11 @@ static void reqsk_timer_handler(struct timer_list *t)
young <<= 1; young <<= 1;
} }
} }
syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
&expire, &resend); &expire, &resend);
req->rsk_ops->syn_ack_timeout(req); tcp_syn_ack_timeout(req);
if (!expire && if (!expire &&
(!resend || (!resend ||
!tcp_rtx_synack(sk_listener, req) || !tcp_rtx_synack(sk_listener, req) ||

View File

@ -1660,7 +1660,6 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.send_ack = tcp_v4_reqsk_send_ack, .send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor, .destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset, .send_reset = tcp_v4_send_reset,
.syn_ack_timeout = tcp_syn_ack_timeout,
}; };
const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {

View File

@ -458,7 +458,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
int max_retries; int max_retries;
req->rsk_ops->syn_ack_timeout(req); tcp_syn_ack_timeout(req);
/* Add one more retry for fastopen. /* Add one more retry for fastopen.
* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() * Paired with WRITE_ONCE() in tcp_sock_set_syncnt()
@ -752,7 +752,6 @@ void tcp_syn_ack_timeout(const struct request_sock *req)
__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
} }
EXPORT_IPV6_MOD(tcp_syn_ack_timeout);
void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len) void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len)
{ {

View File

@ -796,7 +796,6 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.send_ack = tcp_v6_reqsk_send_ack, .send_ack = tcp_v6_reqsk_send_ack,
.destructor = tcp_v6_reqsk_destructor, .destructor = tcp_v6_reqsk_destructor,
.send_reset = tcp_v6_send_reset, .send_reset = tcp_v6_send_reset,
.syn_ack_timeout = tcp_syn_ack_timeout,
}; };
const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {