aboutsummaryrefslogtreecommitdiff
path: root/drivers/s390/cio/css.c
diff options
context:
space:
mode:
authorSebastian Ott <sebott@linux.vnet.ibm.com>2010-02-26 22:37:24 +0100
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-02-26 22:37:29 +0100
commitbe5d3823f29c09676abd2eeea4f9767bc4a1a531 (patch)
tree0a89be6c2b36e1db6f1118cf2b483c84d1ac8def /drivers/s390/cio/css.c
parent6f5d09a0e9731a39a4d52a5902daec72c1e43692 (diff)
[S390] cio: consolidate workqueues
We used to maintain 2 singlethreaded workqueues for synchronization and to trigger work from interrupt context. Since our latest cio changes we only use one of these workqueues. So get rid of the unused workqueue, rename the remaining one to "cio_work_q" and move its ownership to the channel subsystem driver. Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r--drivers/s390/cio/css.c26
1 files changed, 18 insertions, 8 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7679aee6fa14..99fcf9d0ea14 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -232,7 +232,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
if (!get_device(&sch->dev))
return;
sch->todo = todo;
- if (!queue_work(slow_path_wq, &sch->todo_work)) {
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&sch->dev);
}
@@ -543,7 +543,7 @@ static void css_slow_path_func(struct work_struct *unused)
}
static DECLARE_WORK(slow_path_work, css_slow_path_func);
-struct workqueue_struct *slow_path_wq;
+struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
@@ -552,7 +552,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -563,7 +563,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
@@ -594,14 +594,14 @@ void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1);
- queue_work(slow_path_wq, &slow_path_work);
+ queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set);
}
void css_wait_for_slow_path(void)
{
- flush_workqueue(slow_path_wq);
+ flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all unregistered subchannels. */
@@ -992,12 +992,21 @@ static int __init channel_subsystem_init(void)
ret = css_bus_init();
if (ret)
return ret;
-
+ cio_work_q = create_singlethread_workqueue("cio");
+ if (!cio_work_q) {
+ ret = -ENOMEM;
+ goto out_bus;
+ }
ret = io_subchannel_init();
if (ret)
- css_bus_cleanup();
+ goto out_wq;
return ret;
+out_wq:
+ destroy_workqueue(cio_work_q);
+out_bus:
+ css_bus_cleanup();
+ return ret;
}
subsys_initcall(channel_subsystem_init);
@@ -1020,6 +1029,7 @@ static int __init channel_subsystem_init_sync(void)
css_schedule_eval_all();
/* Wait for the evaluation of subchannels to finish. */
wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
+ flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
}