mirror of https://github.com/torvalds/linux.git
net: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_unbound_wq should be the default workqueue so as not to enforce locality constraints for random work whenever it's not required. Adding system_dfl_wq to encourage its use when unbound work should be used. The old system_unbound_wq will be kept for a few release cycles. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://patch.msgid.link/20250918142427.309519-3-marco.crivellari@suse.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
9870d350e4
commit
5fd8bb982e
|
|
@ -1281,7 +1281,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
|||
time = jiffies - channel->rfs_last_expiry;
|
||||
/* Would our quota be >= 20? */
|
||||
if (channel->rfs_filter_count * time >= 600 * HZ)
|
||||
mod_delayed_work(system_wq, &channel->filter_work, 0);
|
||||
mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
|
||||
#endif
|
||||
|
||||
/* There is no race here; although napi_disable() will
|
||||
|
|
|
|||
|
|
@ -1300,7 +1300,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
|
|||
time = jiffies - channel->rfs_last_expiry;
|
||||
/* Would our quota be >= 20? */
|
||||
if (channel->rfs_filter_count * time >= 600 * HZ)
|
||||
mod_delayed_work(system_wq, &channel->filter_work, 0);
|
||||
mod_delayed_work(system_percpu_wq, &channel->filter_work, 0);
|
||||
#endif
|
||||
|
||||
/* There is no race here; although napi_disable() will
|
||||
|
|
|
|||
|
|
@ -911,7 +911,7 @@ static void sfp_soft_start_poll(struct sfp *sfp)
|
|||
|
||||
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) &&
|
||||
!sfp->need_poll)
|
||||
mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
|
||||
mod_delayed_work(system_percpu_wq, &sfp->poll, poll_jiffies);
|
||||
mutex_unlock(&sfp->st_mutex);
|
||||
}
|
||||
|
||||
|
|
@ -1682,7 +1682,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
|
|||
err = sfp_read(sfp, true, 0, &sfp->diag, sizeof(sfp->diag));
|
||||
if (err < 0) {
|
||||
if (sfp->hwmon_tries--) {
|
||||
mod_delayed_work(system_wq, &sfp->hwmon_probe,
|
||||
mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe,
|
||||
T_PROBE_RETRY_SLOW);
|
||||
} else {
|
||||
dev_warn(sfp->dev, "hwmon probe failed: %pe\n",
|
||||
|
|
@ -1709,7 +1709,7 @@ static void sfp_hwmon_probe(struct work_struct *work)
|
|||
static int sfp_hwmon_insert(struct sfp *sfp)
|
||||
{
|
||||
if (sfp->have_a2 && sfp->id.ext.diagmon & SFP_DIAGMON_DDM) {
|
||||
mod_delayed_work(system_wq, &sfp->hwmon_probe, 1);
|
||||
mod_delayed_work(system_percpu_wq, &sfp->hwmon_probe, 1);
|
||||
sfp->hwmon_tries = R_PROBE_RETRY_SLOW;
|
||||
}
|
||||
|
||||
|
|
@ -2563,7 +2563,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
|
|||
/* Force a poll to re-read the hardware signal state after
|
||||
* sfp_sm_mod_probe() changed state_hw_mask.
|
||||
*/
|
||||
mod_delayed_work(system_wq, &sfp->poll, 1);
|
||||
mod_delayed_work(system_percpu_wq, &sfp->poll, 1);
|
||||
|
||||
err = sfp_hwmon_insert(sfp);
|
||||
if (err)
|
||||
|
|
@ -3008,7 +3008,7 @@ static void sfp_poll(struct work_struct *work)
|
|||
// it's unimportant if we race while reading this.
|
||||
if (sfp->state_soft_mask & (SFP_F_LOS | SFP_F_TX_FAULT) ||
|
||||
sfp->need_poll)
|
||||
mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
|
||||
mod_delayed_work(system_percpu_wq, &sfp->poll, poll_jiffies);
|
||||
}
|
||||
|
||||
static struct sfp *sfp_alloc(struct device *dev)
|
||||
|
|
@ -3178,7 +3178,7 @@ static int sfp_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
if (sfp->need_poll)
|
||||
mod_delayed_work(system_wq, &sfp->poll, poll_jiffies);
|
||||
mod_delayed_work(system_percpu_wq, &sfp->poll, poll_jiffies);
|
||||
|
||||
/* We could have an issue in cases no Tx disable pin is available or
|
||||
* wired as modules using a laser as their light source will continue to
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ static void ccm_rx_timer_start(struct br_cfm_peer_mep *peer_mep)
|
|||
* of the configured CC 'expected_interval'
|
||||
* in order to detect CCM defect after 3.25 interval.
|
||||
*/
|
||||
queue_delayed_work(system_wq, &peer_mep->ccm_rx_dwork,
|
||||
queue_delayed_work(system_percpu_wq, &peer_mep->ccm_rx_dwork,
|
||||
usecs_to_jiffies(interval_us / 4));
|
||||
}
|
||||
|
||||
|
|
@ -285,7 +285,7 @@ static void ccm_tx_work_expired(struct work_struct *work)
|
|||
ccm_frame_tx(skb);
|
||||
|
||||
interval_us = interval_to_us(mep->cc_config.exp_interval);
|
||||
queue_delayed_work(system_wq, &mep->ccm_tx_dwork,
|
||||
queue_delayed_work(system_percpu_wq, &mep->ccm_tx_dwork,
|
||||
usecs_to_jiffies(interval_us));
|
||||
}
|
||||
|
||||
|
|
@ -809,7 +809,7 @@ int br_cfm_cc_ccm_tx(struct net_bridge *br, const u32 instance,
|
|||
* to send first frame immediately
|
||||
*/
|
||||
mep->ccm_tx_end = jiffies + usecs_to_jiffies(tx_info->period * 1000000);
|
||||
queue_delayed_work(system_wq, &mep->ccm_tx_dwork, 0);
|
||||
queue_delayed_work(system_percpu_wq, &mep->ccm_tx_dwork, 0);
|
||||
|
||||
save:
|
||||
mep->cc_ccm_tx_info = *tx_info;
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ static void br_mrp_test_work_expired(struct work_struct *work)
|
|||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
queue_delayed_work(system_wq, &mrp->test_work,
|
||||
queue_delayed_work(system_percpu_wq, &mrp->test_work,
|
||||
usecs_to_jiffies(mrp->test_interval));
|
||||
}
|
||||
|
||||
|
|
@ -418,7 +418,7 @@ static void br_mrp_in_test_work_expired(struct work_struct *work)
|
|||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
queue_delayed_work(system_wq, &mrp->in_test_work,
|
||||
queue_delayed_work(system_percpu_wq, &mrp->in_test_work,
|
||||
usecs_to_jiffies(mrp->in_test_interval));
|
||||
}
|
||||
|
||||
|
|
@ -725,7 +725,7 @@ int br_mrp_start_test(struct net_bridge *br,
|
|||
mrp->test_max_miss = test->max_miss;
|
||||
mrp->test_monitor = test->monitor;
|
||||
mrp->test_count_miss = 0;
|
||||
queue_delayed_work(system_wq, &mrp->test_work,
|
||||
queue_delayed_work(system_percpu_wq, &mrp->test_work,
|
||||
usecs_to_jiffies(test->interval));
|
||||
|
||||
return 0;
|
||||
|
|
@ -865,7 +865,7 @@ int br_mrp_start_in_test(struct net_bridge *br,
|
|||
mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
|
||||
mrp->in_test_max_miss = in_test->max_miss;
|
||||
mrp->in_test_count_miss = 0;
|
||||
queue_delayed_work(system_wq, &mrp->in_test_work,
|
||||
queue_delayed_work(system_percpu_wq, &mrp->in_test_work,
|
||||
usecs_to_jiffies(in_test->interval));
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc)
|
|||
delay = CEPH_MONC_PING_INTERVAL;
|
||||
|
||||
dout("__schedule_delayed after %lu\n", delay);
|
||||
mod_delayed_work(system_wq, &monc->delayed_work,
|
||||
mod_delayed_work(system_percpu_wq, &monc->delayed_work,
|
||||
round_jiffies_relative(delay));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -876,7 +876,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
|
|||
sk_psock_stop(psock);
|
||||
|
||||
INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
|
||||
queue_rcu_work(system_wq, &psock->rwork);
|
||||
queue_rcu_work(system_percpu_wq, &psock->rwork);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_psock_drop);
|
||||
|
||||
|
|
|
|||
|
|
@ -320,7 +320,7 @@ static void devlink_release(struct work_struct *work)
|
|||
void devlink_put(struct devlink *devlink)
|
||||
{
|
||||
if (refcount_dec_and_test(&devlink->refcount))
|
||||
queue_rcu_work(system_wq, &devlink->rwork);
|
||||
queue_rcu_work(system_percpu_wq, &devlink->rwork);
|
||||
}
|
||||
|
||||
struct devlink *devlinks_xa_find_get(struct net *net, unsigned long *indexp)
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ static void fqdir_work_fn(struct work_struct *work)
|
|||
rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
|
||||
|
||||
if (llist_add(&fqdir->free_list, &fqdir_free_list))
|
||||
queue_delayed_work(system_wq, &fqdir_free_work, HZ);
|
||||
queue_delayed_work(system_percpu_wq, &fqdir_free_work, HZ);
|
||||
}
|
||||
|
||||
int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
|
||||
|
|
|
|||
|
|
@ -301,7 +301,7 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state)
|
|||
net->ct.ecache_dwork_pending = true;
|
||||
} else if (state == NFCT_ECACHE_DESTROY_SENT) {
|
||||
if (!hlist_nulls_empty(&cnet->ecache.dying_list))
|
||||
mod_delayed_work(system_wq, &cnet->ecache.dwork, 0);
|
||||
mod_delayed_work(system_percpu_wq, &cnet->ecache.dwork, 0);
|
||||
else
|
||||
net->ct.ecache_dwork_pending = false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
|
|||
|
||||
/* schedule vport destroy, dev_put and genl notification */
|
||||
ovs_net = net_generic(dev_net(dev), ovs_net_id);
|
||||
queue_work(system_wq, &ovs_net->dp_notify_work);
|
||||
queue_work(system_percpu_wq, &ovs_net->dp_notify_work);
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
|
|
|
|||
|
|
@ -159,7 +159,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op)
|
|||
rfkill_op_pending = true;
|
||||
if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
|
||||
/* bypass the limiter for EPO */
|
||||
mod_delayed_work(system_wq, &rfkill_op_work, 0);
|
||||
mod_delayed_work(system_percpu_wq, &rfkill_op_work, 0);
|
||||
rfkill_last_scheduled = jiffies;
|
||||
} else
|
||||
rfkill_schedule_ratelimited();
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
|
|||
* otherwise there is a risk of out-of-sync link groups.
|
||||
*/
|
||||
if (!lgr->freeing) {
|
||||
mod_delayed_work(system_wq, &lgr->free_work,
|
||||
mod_delayed_work(system_percpu_wq, &lgr->free_work,
|
||||
(!lgr->is_smcd && lgr->role == SMC_CLNT) ?
|
||||
SMC_LGR_FREE_DELAY_CLNT :
|
||||
SMC_LGR_FREE_DELAY_SERV);
|
||||
|
|
|
|||
|
|
@ -1649,7 +1649,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
|
|||
* reschedule it, then ungrab the socket refcount to
|
||||
* keep it balanced.
|
||||
*/
|
||||
if (mod_delayed_work(system_wq, &vsk->connect_work,
|
||||
if (mod_delayed_work(system_percpu_wq, &vsk->connect_work,
|
||||
timeout))
|
||||
sock_put(sk);
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue