mirror of https://github.com/torvalds/linux.git
fs: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. system_wq is a per-CPU worqueue, yet nothing in its name tells about that CPU affinity constraint, which is very often not required by users. Make it clear by adding a system_percpu_wq to all the fs subsystem. The old wq will be kept for a few release cylces. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Link: https://lore.kernel.org/20250916082906.77439-3-marco.crivellari@suse.com Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
7a4f92d39f
commit
4ef64db060
2
fs/aio.c
2
fs/aio.c
|
|
@ -636,7 +636,7 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
|
|||
|
||||
/* Synchronize against RCU protected table->table[] dereferences */
|
||||
INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
|
||||
queue_rcu_work(system_wq, &ctx->free_rwork);
|
||||
queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -2442,7 +2442,7 @@ static int dirtytime_interval_handler(const struct ctl_table *table, int write,
|
|||
|
||||
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
if (ret == 0 && write)
|
||||
mod_delayed_work(system_wq, &dirtytime_work, 0);
|
||||
mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ void fuse_check_timeout(struct work_struct *work)
|
|||
goto abort_conn;
|
||||
|
||||
out:
|
||||
queue_delayed_work(system_wq, &fc->timeout.work,
|
||||
queue_delayed_work(system_percpu_wq, &fc->timeout.work,
|
||||
fuse_timeout_timer_freq);
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -1273,7 +1273,7 @@ static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
|
|||
{
|
||||
fc->timeout.req_timeout = secs_to_jiffies(timeout);
|
||||
INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
|
||||
queue_delayed_work(system_wq, &fc->timeout.work,
|
||||
queue_delayed_work(system_percpu_wq, &fc->timeout.work,
|
||||
fuse_timeout_timer_freq);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -335,7 +335,7 @@ static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp)
|
|||
num *= HZ;
|
||||
*((int *)kp->arg) = num;
|
||||
if (!list_empty(&nfs_automount_list))
|
||||
mod_delayed_work(system_wq, &nfs_automount_task, num);
|
||||
mod_delayed_work(system_percpu_wq, &nfs_automount_task, num);
|
||||
} else {
|
||||
*((int *)kp->arg) = -1*HZ;
|
||||
cancel_delayed_work(&nfs_automount_task);
|
||||
|
|
|
|||
|
|
@ -122,7 +122,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
|
|||
timeout = 5 * HZ;
|
||||
dprintk("%s: requeueing work. Lease period = %ld\n",
|
||||
__func__, (timeout + HZ - 1) / HZ);
|
||||
mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
|
||||
mod_delayed_work(system_percpu_wq, &clp->cl_renewd, timeout);
|
||||
set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
|
||||
spin_unlock(&clp->cl_lock);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue