aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2012-11-28 13:42:38 +0100
committerJens Axboe <axboe@kernel.dk>2012-12-06 14:30:58 +0100
commit3f3299d5c0268d6cc3f47b446e8aca436e4a5651 (patch)
tree5c962e7f6c6f1e8f17b612bb4c144ec610abfd8e /block
parent8fa72d234da9b6b473bbb1f74d533663e4996e6b (diff)
block: Rename queue dead flag
QUEUE_FLAG_DEAD is used to indicate that queuing new requests must stop. After this flag has been set queue draining starts. However, during the queue draining phase it is still safe to invoke the queue's request_fn, so QUEUE_FLAG_DYING is a better name for this flag. This patch has been generated by running the following command over the kernel source tree: git grep -lEw 'blk_queue_dead|QUEUE_FLAG_DEAD' | xargs sed -i.tmp -e 's/blk_queue_dead/blk_queue_dying/g' \ -e 's/QUEUE_FLAG_DEAD/QUEUE_FLAG_DYING/g'; \ sed -i.tmp -e "s/QUEUE_FLAG_DYING$(printf \\t)*5/QUEUE_FLAG_DYING$(printf \\t)5/g" \ include/linux/blkdev.h; \ sed -i.tmp -e 's/ DEAD/ DYING/g' -e 's/dead queue/a dying queue/' \ -e 's/Dead queue/A dying queue/' block/blk-core.c Signed-off-by: Bart Van Assche <bvanassche@acm.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: James Bottomley <JBottomley@Parallels.com> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Jens Axboe <axboe@kernel.dk> Cc: Chanho Min <chanho.min@lge.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk.h2
6 files changed, 19 insertions, 19 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index d0b770391ad4..5dea4e8dbc55 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -231,7 +231,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
+ return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
return __blkg_lookup_create(blkcg, q, NULL);
}
EXPORT_SYMBOL_GPL(blkg_lookup_create);
diff --git a/block/blk-core.c b/block/blk-core.c
index ee0e5cafa859..1a95272cca50 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,20 +473,20 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
- * Mark @q DEAD, drain all pending requests, destroy and put it. All
+ * Mark @q DYING, drain all pending requests, destroy and put it. All
* future requests will be failed immediately with -ENODEV.
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;
- /* mark @q DEAD, no new request or merges will be allowed afterwards */
+ /* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
spin_lock_irq(lock);
/*
- * Dead queue is permanently in bypass mode till released. Note
+ * A dying queue is permanently in bypass mode till released. Note
* that, unlike blk_queue_bypass_start(), we aren't performing
* synchronize_rcu() after entering bypass mode to avoid the delay
* as some drivers create and destroy a lot of queues while
@@ -499,11 +499,11 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
- /* drain all requests queued before DEAD marking */
+ /* drain all requests queued before DYING marking */
blk_drain_queue(q, true);
/* @q won't process any more request, flush async actions */
@@ -716,7 +716,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue);
bool blk_get_queue(struct request_queue *q)
{
- if (likely(!blk_queue_dead(q))) {
+ if (likely(!blk_queue_dying(q))) {
__blk_get_queue(q);
return true;
}
@@ -870,7 +870,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
const bool is_sync = rw_is_sync(rw_flags) != 0;
int may_queue;
- if (unlikely(blk_queue_dead(q)))
+ if (unlikely(blk_queue_dying(q)))
return NULL;
may_queue = elv_may_queue(q, rw_flags);
@@ -1050,7 +1050,7 @@ retry:
if (rq)
return rq;
- if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
+ if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dying(q))) {
blk_put_rl(rl);
return NULL;
}
@@ -1910,7 +1910,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
return -EIO;
spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock_irqrestore(q->queue_lock, flags);
return -ENODEV;
}
@@ -2885,9 +2885,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
trace_block_unplug(q, depth, !from_schedule);
/*
- * Don't mess with dead queue.
+ * Don't mess with a dying queue.
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
spin_unlock(q->queue_lock);
return;
}
@@ -2996,7 +2996,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/*
* Short-circuit if @q is dead
*/
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
__blk_end_request_all(rq, -ENODEV);
continue;
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 8b6dc5bd4dd0..4aec98df7ba5 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -60,7 +60,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);
- if (unlikely(blk_queue_dead(q))) {
+ if (unlikely(blk_queue_dying(q))) {
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index ce6204608822..788147797a79 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -466,7 +466,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
if (!entry->show)
return -EIO;
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
@@ -488,7 +488,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock);
- if (blk_queue_dead(q)) {
+ if (blk_queue_dying(q)) {
mutex_unlock(&q->sysfs_lock);
return -ENOENT;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a9664fa0b609..31146225f3d0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -302,7 +302,7 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
/* if %NULL and @q is alive, fall back to root_tg */
if (!IS_ERR(blkg))
tg = blkg_to_tg(blkg);
- else if (!blk_queue_dead(q))
+ else if (!blk_queue_dying(q))
tg = td_root_tg(td);
}
diff --git a/block/blk.h b/block/blk.h
index ca51543b248c..2218a8a78292 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -96,7 +96,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dead(q)) ||
+ if (unlikely(blk_queue_dying(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}