fs: replace use of system_wq with system_percpu_wq

Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

system_wq is a per-CPU worqueue, yet nothing in its name tells about that
CPU affinity constraint, which is very often not required by users.
Make it clear by adding a system_percpu_wq to all the fs subsystem.

The old wq will be kept for a few release cylces.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://lore.kernel.org/20250916082906.77439-3-marco.crivellari@suse.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Marco Crivellari 2025-09-16 10:29:05 +02:00 committed by Christian Brauner
parent 7a4f92d39f
commit 4ef64db060
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
6 changed files with 6 additions and 6 deletions

View File

@ -636,7 +636,7 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
/* Synchronize against RCU protected table->table[] dereferences */ /* Synchronize against RCU protected table->table[] dereferences */
INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
queue_rcu_work(system_wq, &ctx->free_rwork); queue_rcu_work(system_percpu_wq, &ctx->free_rwork);
} }
/* /*

View File

@ -2442,7 +2442,7 @@ static int dirtytime_interval_handler(const struct ctl_table *table, int write,
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write) if (ret == 0 && write)
mod_delayed_work(system_wq, &dirtytime_work, 0); mod_delayed_work(system_percpu_wq, &dirtytime_work, 0);
return ret; return ret;
} }

View File

@ -119,7 +119,7 @@ void fuse_check_timeout(struct work_struct *work)
goto abort_conn; goto abort_conn;
out: out:
queue_delayed_work(system_wq, &fc->timeout.work, queue_delayed_work(system_percpu_wq, &fc->timeout.work,
fuse_timeout_timer_freq); fuse_timeout_timer_freq);
return; return;

View File

@ -1273,7 +1273,7 @@ static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
{ {
fc->timeout.req_timeout = secs_to_jiffies(timeout); fc->timeout.req_timeout = secs_to_jiffies(timeout);
INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout); INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
queue_delayed_work(system_wq, &fc->timeout.work, queue_delayed_work(system_percpu_wq, &fc->timeout.work,
fuse_timeout_timer_freq); fuse_timeout_timer_freq);
} }

View File

@ -335,7 +335,7 @@ static int param_set_nfs_timeout(const char *val, const struct kernel_param *kp)
num *= HZ; num *= HZ;
*((int *)kp->arg) = num; *((int *)kp->arg) = num;
if (!list_empty(&nfs_automount_list)) if (!list_empty(&nfs_automount_list))
mod_delayed_work(system_wq, &nfs_automount_task, num); mod_delayed_work(system_percpu_wq, &nfs_automount_task, num);
} else { } else {
*((int *)kp->arg) = -1*HZ; *((int *)kp->arg) = -1*HZ;
cancel_delayed_work(&nfs_automount_task); cancel_delayed_work(&nfs_automount_task);

View File

@ -122,7 +122,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
timeout = 5 * HZ; timeout = 5 * HZ;
dprintk("%s: requeueing work. Lease period = %ld\n", dprintk("%s: requeueing work. Lease period = %ld\n",
__func__, (timeout + HZ - 1) / HZ); __func__, (timeout + HZ - 1) / HZ);
mod_delayed_work(system_wq, &clp->cl_renewd, timeout); mod_delayed_work(system_percpu_wq, &clp->cl_renewd, timeout);
set_bit(NFS_CS_RENEWD, &clp->cl_res_state); set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
spin_unlock(&clp->cl_lock); spin_unlock(&clp->cl_lock);
} }