mirror of https://github.com/torvalds/linux.git
mptcp: track fallbacks accurately via mibs
Add the mibs required to cover the few possible fallback causes still lacking suck info. Move the relevant mib increment into the fallback helper, so that no eventual future fallback operation will miss a paired mib increment. Additionally track failed fallback via its own mib, such mib is incremented only when a fallback mandated by the protocol fails - due to racing subflow creation. While at the above, rename an existing helper to reduce long lines problems all along. Signed-off-by: Paolo Abeni <pabeni@redhat.com> Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Link: https://patch.msgid.link/20250723-net-next-mptcp-track-fallbacks-v1-1-a83cce08f2d5@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
5ec9b15d8d
commit
c65c2e3bae
|
|
@ -533,9 +533,9 @@ void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
|
|||
to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback;
|
||||
|
||||
if (timeouts == to_max || (timeouts < to_max && expired)) {
|
||||
MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP);
|
||||
subflow->mpc_drop = 1;
|
||||
mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
|
||||
mptcp_early_fallback(mptcp_sk(subflow->conn), subflow,
|
||||
MPTCP_MIB_MPCAPABLEACTIVEDROP);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -80,6 +80,11 @@ static const struct snmp_mib mptcp_snmp_list[] = {
|
|||
SNMP_MIB_ITEM("RcvWndConflict", MPTCP_MIB_RCVWNDCONFLICT),
|
||||
SNMP_MIB_ITEM("MPCurrEstab", MPTCP_MIB_CURRESTAB),
|
||||
SNMP_MIB_ITEM("Blackhole", MPTCP_MIB_BLACKHOLE),
|
||||
SNMP_MIB_ITEM("MPCapableDataFallback", MPTCP_MIB_MPCAPABLEDATAFALLBACK),
|
||||
SNMP_MIB_ITEM("MD5SigFallback", MPTCP_MIB_MD5SIGFALLBACK),
|
||||
SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK),
|
||||
SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK),
|
||||
SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED),
|
||||
SNMP_MIB_SENTINEL
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -81,6 +81,13 @@ enum linux_mptcp_mib_field {
|
|||
MPTCP_MIB_RCVWNDCONFLICT, /* Conflict with while updating msk rcv wnd */
|
||||
MPTCP_MIB_CURRESTAB, /* Current established MPTCP connections */
|
||||
MPTCP_MIB_BLACKHOLE, /* A blackhole has been detected */
|
||||
MPTCP_MIB_MPCAPABLEDATAFALLBACK, /* Missing DSS/MPC+data on first
|
||||
* established packet
|
||||
*/
|
||||
MPTCP_MIB_MD5SIGFALLBACK, /* Conflicting TCP option enabled */
|
||||
MPTCP_MIB_DSSFALLBACK, /* Bad or missing DSS */
|
||||
MPTCP_MIB_SIMULTCONNFALLBACK, /* Simultaneous connect */
|
||||
MPTCP_MIB_FALLBACKFAILED, /* Can't fallback due to msk status */
|
||||
__MPTCP_MIB_MAX
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -978,8 +978,10 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
|
|||
if (subflow->mp_join)
|
||||
goto reset;
|
||||
subflow->mp_capable = 0;
|
||||
if (!mptcp_try_fallback(ssk))
|
||||
if (!mptcp_try_fallback(ssk, MPTCP_MIB_MPCAPABLEDATAFALLBACK)) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
|
||||
goto reset;
|
||||
}
|
||||
pr_fallback(msk);
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -68,6 +68,26 @@ static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk)
|
|||
return &inet_stream_ops;
|
||||
}
|
||||
|
||||
bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib)
|
||||
{
|
||||
struct net *net = sock_net((struct sock *)msk);
|
||||
|
||||
if (__mptcp_check_fallback(msk))
|
||||
return true;
|
||||
|
||||
spin_lock_bh(&msk->fallback_lock);
|
||||
if (!msk->allow_infinite_fallback) {
|
||||
spin_unlock_bh(&msk->fallback_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
msk->allow_subflows = false;
|
||||
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
|
||||
__MPTCP_INC_STATS(net, fb_mib);
|
||||
spin_unlock_bh(&msk->fallback_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int __mptcp_socket_create(struct mptcp_sock *msk)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow;
|
||||
|
|
@ -561,10 +581,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
|
|||
|
||||
static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk)
|
||||
{
|
||||
if (mptcp_try_fallback(ssk)) {
|
||||
MPTCP_INC_STATS(sock_net(ssk),
|
||||
MPTCP_MIB_DSSCORRUPTIONFALLBACK);
|
||||
} else {
|
||||
if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET);
|
||||
mptcp_subflow_reset(ssk);
|
||||
}
|
||||
|
|
@ -1143,12 +1160,12 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
|
|||
mpext->infinite_map = 1;
|
||||
mpext->data_len = 0;
|
||||
|
||||
if (!mptcp_try_fallback(ssk)) {
|
||||
if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED);
|
||||
mptcp_subflow_reset(ssk);
|
||||
return;
|
||||
}
|
||||
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
|
||||
mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
|
||||
pr_fallback(msk);
|
||||
}
|
||||
|
|
@ -3689,16 +3706,15 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
* TCP option space.
|
||||
*/
|
||||
if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK);
|
||||
#endif
|
||||
if (subflow->request_mptcp) {
|
||||
if (mptcp_active_should_disable(sk)) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
} else if (mptcp_token_new_connect(ssk) < 0) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
|
||||
mptcp_subflow_early_fallback(msk, subflow);
|
||||
}
|
||||
if (mptcp_active_should_disable(sk))
|
||||
mptcp_early_fallback(msk, subflow,
|
||||
MPTCP_MIB_MPCAPABLEACTIVEDISABLED);
|
||||
else if (mptcp_token_new_connect(ssk) < 0)
|
||||
mptcp_early_fallback(msk, subflow,
|
||||
MPTCP_MIB_TOKENFALLBACKINIT);
|
||||
}
|
||||
|
||||
WRITE_ONCE(msk->write_seq, subflow->idsn);
|
||||
|
|
|
|||
|
|
@ -1223,24 +1223,6 @@ static inline bool mptcp_check_fallback(const struct sock *sk)
|
|||
return __mptcp_check_fallback(msk);
|
||||
}
|
||||
|
||||
static inline bool __mptcp_try_fallback(struct mptcp_sock *msk)
|
||||
{
|
||||
if (__mptcp_check_fallback(msk)) {
|
||||
pr_debug("TCP fallback already done (msk=%p)\n", msk);
|
||||
return true;
|
||||
}
|
||||
spin_lock_bh(&msk->fallback_lock);
|
||||
if (!msk->allow_infinite_fallback) {
|
||||
spin_unlock_bh(&msk->fallback_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
msk->allow_subflows = false;
|
||||
set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
|
||||
spin_unlock_bh(&msk->fallback_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
|
||||
{
|
||||
struct sock *ssk = READ_ONCE(msk->first);
|
||||
|
|
@ -1250,14 +1232,16 @@ static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
|
|||
TCPF_SYN_RECV | TCPF_LISTEN));
|
||||
}
|
||||
|
||||
static inline bool mptcp_try_fallback(struct sock *ssk)
|
||||
bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib);
|
||||
|
||||
static inline bool mptcp_try_fallback(struct sock *ssk, int fb_mib)
|
||||
{
|
||||
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
|
||||
struct sock *sk = subflow->conn;
|
||||
struct mptcp_sock *msk;
|
||||
|
||||
msk = mptcp_sk(sk);
|
||||
if (!__mptcp_try_fallback(msk))
|
||||
if (!__mptcp_try_fallback(msk, fb_mib))
|
||||
return false;
|
||||
if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
|
||||
gfp_t saved_allocation = ssk->sk_allocation;
|
||||
|
|
@ -1275,12 +1259,13 @@ static inline bool mptcp_try_fallback(struct sock *ssk)
|
|||
|
||||
#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a)
|
||||
|
||||
static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
|
||||
struct mptcp_subflow_context *subflow)
|
||||
static inline void mptcp_early_fallback(struct mptcp_sock *msk,
|
||||
struct mptcp_subflow_context *subflow,
|
||||
int fb_mib)
|
||||
{
|
||||
pr_fallback(msk);
|
||||
subflow->request_mptcp = 0;
|
||||
WARN_ON_ONCE(!__mptcp_try_fallback(msk));
|
||||
WARN_ON_ONCE(!__mptcp_try_fallback(msk, fb_mib));
|
||||
}
|
||||
|
||||
static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
|
||||
|
|
|
|||
|
|
@ -544,11 +544,13 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
|
|||
mptcp_get_options(skb, &mp_opt);
|
||||
if (subflow->request_mptcp) {
|
||||
if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
|
||||
if (!mptcp_try_fallback(sk))
|
||||
if (!mptcp_try_fallback(sk,
|
||||
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK)) {
|
||||
MPTCP_INC_STATS(sock_net(sk),
|
||||
MPTCP_MIB_FALLBACKFAILED);
|
||||
goto do_reset;
|
||||
}
|
||||
|
||||
MPTCP_INC_STATS(sock_net(sk),
|
||||
MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
|
||||
pr_fallback(msk);
|
||||
goto fallback;
|
||||
}
|
||||
|
|
@ -1406,7 +1408,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
|
|||
return true;
|
||||
}
|
||||
|
||||
if (!mptcp_try_fallback(ssk)) {
|
||||
if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSFALLBACK)) {
|
||||
/* fatal protocol error, close the socket.
|
||||
* subflow_error_report() will introduce the appropriate barriers
|
||||
*/
|
||||
|
|
@ -1859,7 +1861,7 @@ static void subflow_state_change(struct sock *sk)
|
|||
|
||||
msk = mptcp_sk(parent);
|
||||
if (subflow_simultaneous_connect(sk)) {
|
||||
WARN_ON_ONCE(!mptcp_try_fallback(sk));
|
||||
WARN_ON_ONCE(!mptcp_try_fallback(sk, MPTCP_MIB_SIMULTCONNFALLBACK));
|
||||
pr_fallback(msk);
|
||||
subflow->conn_finished = 1;
|
||||
mptcp_propagate_state(parent, sk, subflow, NULL);
|
||||
|
|
|
|||
Loading…
Reference in New Issue