mirror of https://github.com/torvalds/linux.git
drivers/block: WQ_PERCPU added to alloc_workqueue users
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistentcy cannot be addressed without refactoring the API. alloc_workqueue() treats all queues as per-CPU by default, while unbound workqueues must opt-in via WQ_UNBOUND. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This default is suboptimal: most workloads benefit from unbound queues, allowing the scheduler to place worker threads where they’re needed and reducing noise when CPUs are isolated. This patch adds a new WQ_PERCPU flag to explicitly request the use of the per-CPU behavior. Both flags coexist for one release cycle to allow callers to transition their calls. Once migration is complete, WQ_UNBOUND can be removed and unbound will become the implicit default. With the introduction of the WQ_PERCPU flag (equivalent to !WQ_UNBOUND), any alloc_workqueue() caller that doesn’t explicitly specify WQ_UNBOUND must now use WQ_PERCPU. All existing users have been updated accordingly. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
456cefcb31
commit
d7b1cdc910
|
|
@ -44,7 +44,7 @@ aoe_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
|
aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
|
||||||
if (!aoe_wq)
|
if (!aoe_wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7389,7 +7389,7 @@ static int __init rbd_init(void)
|
||||||
* The number of active work items is limited by the number of
|
* The number of active work items is limited by the number of
|
||||||
* rbd devices * queue depth, so leave @max_active at default.
|
* rbd devices * queue depth, so leave @max_active at default.
|
||||||
*/
|
*/
|
||||||
rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
|
rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
|
||||||
if (!rbd_wq) {
|
if (!rbd_wq) {
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto err_out_slab;
|
goto err_out_slab;
|
||||||
|
|
|
||||||
|
|
@ -1809,7 +1809,7 @@ static int __init rnbd_client_init(void)
|
||||||
unregister_blkdev(rnbd_client_major, "rnbd");
|
unregister_blkdev(rnbd_client_major, "rnbd");
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
|
rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
|
||||||
if (!rnbd_clt_wq) {
|
if (!rnbd_clt_wq) {
|
||||||
pr_err("Failed to load module, alloc_workqueue failed.\n");
|
pr_err("Failed to load module, alloc_workqueue failed.\n");
|
||||||
rnbd_clt_destroy_sysfs_files();
|
rnbd_clt_destroy_sysfs_files();
|
||||||
|
|
|
||||||
|
|
@ -1216,7 +1216,7 @@ static int __init vdc_init(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
|
sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
|
||||||
if (!sunvdc_wq)
|
if (!sunvdc_wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1682,7 +1682,7 @@ static int __init virtio_blk_init(void)
|
||||||
{
|
{
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
|
virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
|
||||||
if (!virtblk_wq)
|
if (!virtblk_wq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue