aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c20
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c101
-rw-r--r--block/blk-lib.c8
-rw-r--r--block/blk-merge.c91
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c143
-rw-r--r--block/blk-mq.h4
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-timeout.c2
-rw-r--r--block/blk.h2
11 files changed, 219 insertions, 158 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index c00e0bdeab4a..853f92749202 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
+ uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!uninit_q->flush_rq)
+ goto out_cleanup_queue;
+
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
- blk_cleanup_queue(uninit_q);
-
+ goto out_free_flush_rq;
return q;
+
+out_free_flush_rq:
+ kfree(uninit_q->flush_rq);
+out_cleanup_queue:
+ blk_cleanup_queue(uninit_q);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_queue_node);
@@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
- return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ return blk_mq_alloc_request(q, rw, gfp_mask);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@@ -1278,6 +1287,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(!q))
return;
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
+ return;
+ }
+
blk_pm_put_request(req);
elv_completed_request(q, req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index bbfc072a79c2..c68613bb4c79 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, true);
+ blk_mq_insert_request(q, rq, at_head, true);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9288aaf35c21..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
- rq = container_of(work, struct request, mq_flush_data);
+ rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
}
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
{
- INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
- kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+ if (rq->q->mq_ops) {
+ INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+ return false;
+ } else {
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- if (q->mq_ops)
- blk_mq_flush_data_insert(rq);
- else {
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
- }
+ queued = blk_flush_queue_rq(rq);
break;
case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
}
kicked = blk_kick_flush(q);
- /* blk_mq_run_flush will run queue */
- if (q->mq_ops)
- return queued;
return kicked | queued;
}
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n;
unsigned long flags = 0;
- if (q->mq_ops) {
- blk_mq_free_request(flush_rq);
+ if (q->mq_ops)
spin_lock_irqsave(&q->mq_flush_lock, flags);
- }
+
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd.
*/
if (queued || q->flush_queue_delayed) {
- if (!q->mq_ops)
- blk_run_queue_async(q);
- else
- /*
- * This can be optimized to only run queues with requests
- * queued if necessary.
- */
- blk_mq_run_queues(q, true);
+ WARN_ON(q->mq_ops);
+ blk_run_queue_async(q);
}
q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
-static void mq_flush_work(struct work_struct *work)
-{
- struct request_queue *q;
- struct request *rq;
-
- q = container_of(work, struct request_queue, mq_flush_work);
-
- /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
- rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
- __GFP_WAIT|GFP_ATOMIC, true);
- rq->cmd_type = REQ_TYPE_FS;
- rq->end_io = flush_end_io;
-
- blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
- kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight.
*/
q->flush_pending_idx ^= 1;
+
if (q->mq_ops) {
- mq_run_flush(q);
- return true;
+ struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ blk_mq_rq_init(hctx, q->flush_rq);
+ q->flush_rq->mq_ctx = ctx;
+
+ /*
+ * Reuse the tag value from the fist waiting request,
+ * with blk-mq the tag is generated during request
+ * allocation and drivers can rely on it being inside
+ * the range they asked for.
+ */
+ q->flush_rq->tag = first_rq->tag;
+ } else {
+ blk_rq_init(q, q->flush_rq);
}
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+ return blk_flush_queue_rq(q->flush_rq);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
- INIT_WORK(&q->mq_flush_work, mq_flush_work);
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2da76c999ef3..97a733cf3d5f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
blk_finish_plug(&plug);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8f8adaa95466..6c583f9c5b65 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (!bio)
return 0;
+ /*
+ * This should probably be returning 0, but blk_add_request_payload()
+ * (Christoph!!!!)
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0;
@@ -161,30 +171,60 @@ new_segment:
*bvprv = *bvec;
}
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
{
struct bio_vec bvec, bvprv = { NULL };
- struct req_iterator iter;
- struct scatterlist *sg;
+ struct bvec_iter iter;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
- /*
- * for each bio in rq
- */
- sg = NULL;
- rq_for_each_segment(bvec, rq, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in rq */
+ if (bio->bi_rw & REQ_DISCARD) {
+ /*
+ * This is a hack - drivers should be neither modifying the
+ * biovec, nor relying on bi_vcnt - but because of
+ * blk_add_request_payload(), a discard bio may or may not have
+ * a payload we need to set up here (thank you Christoph) and
+ * bi_vcnt is really the only way of telling if we need to.
+ */
+
+ if (bio->bi_vcnt)
+ goto single_segment;
+
+ return 0;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+ *sg = sglist;
+ bvec = bio_iovec(bio);
+ sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ return 1;
+ }
+
+ for_each_bio(bio)
+ bio_for_each_segment(bvec, bio, iter)
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+ &nsegs, &cluster);
+ return nsegs;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = NULL;
+ int nsegs = 0;
+
+ if (rq->bio)
+ nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec bvec, bvprv = { NULL };
- struct scatterlist *sg;
- int nsegs, cluster;
- struct bvec_iter iter;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- sg = NULL;
- bio_for_each_segment(bvec, bio, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in bio */
+ struct scatterlist *sg = NULL;
+ int nsegs;
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+ bio->bi_next = next;
if (sg)
sg_mark_end(sg);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5d70edc9855f..83ae96c51a27 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
- int cpu;
+ unsigned int cpu;
if (!tags)
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 57039fcd9c93..1fa9dd153fde 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -226,15 +226,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
return rq;
}
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
- gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
{
struct request *rq;
if (blk_mq_queue_enter(q))
return NULL;
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
if (rq)
blk_mq_put_ctx(rq->mq_ctx);
return rq;
@@ -258,7 +257,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
/*
* Re-init and set pdu, if we have it
*/
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
blk_rq_init(hctx->queue, rq);
@@ -305,7 +304,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
bio_endio(bio, error);
}
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_end_io(struct request *rq, int error)
{
struct bio *bio = rq->bio;
unsigned int bytes = 0;
@@ -330,48 +329,55 @@ void blk_mq_complete_request(struct request *rq, int error)
else
blk_mq_free_request(rq);
}
+EXPORT_SYMBOL(blk_mq_end_io);
-void __blk_mq_end_io(struct request *rq, int error)
-{
- if (!blk_mark_rq_complete(rq))
- blk_mq_complete_request(rq, error);
-}
-
-static void blk_mq_end_io_remote(void *data)
+static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
- __blk_mq_end_io(rq, rq->errors);
+ rq->q->softirq_done_fn(rq);
}
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
int cpu;
- if (!ctx->ipi_redirect)
- return __blk_mq_end_io(rq, error);
+ if (!ctx->ipi_redirect) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
cpu = get_cpu();
if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
- rq->errors = error;
- rq->csd.func = blk_mq_end_io_remote;
+ rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
} else {
- __blk_mq_end_io(rq, error);
+ rq->q->softirq_done_fn(rq);
}
put_cpu();
}
-EXPORT_SYMBOL(blk_mq_end_io);
-static void blk_mq_start_request(struct request *rq)
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ if (unlikely(blk_should_fake_timeout(rq->q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
{
struct request_queue *q = rq->q;
@@ -384,6 +390,25 @@ static void blk_mq_start_request(struct request *rq)
*/
rq->deadline = jiffies + q->rq_timeout;
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * Make sure space for the drain appears. We know we can do
+ * this because max_hw_segments has been adjusted to be one
+ * fewer than the device can handle.
+ */
+ rq->nr_phys_segments++;
+ }
+
+ /*
+ * Flag the last request in the series so that drivers know when IO
+ * should be kicked off, if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers should do kick off.
+ * If drive is busy, the last request might not have the bit set.
+ */
+ if (last)
+ rq->cmd_flags |= REQ_END;
}
static void blk_mq_requeue_request(struct request *rq)
@@ -392,6 +417,11 @@ static void blk_mq_requeue_request(struct request *rq)
trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ rq->cmd_flags &= ~REQ_END;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
}
struct blk_mq_timeout_data {
@@ -559,19 +589,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_start_request(rq);
- /*
- * Last request in the series. Flag it as such, this
- * enables drivers to know when IO should be kicked off,
- * if they don't do it on a per-request basis.
- *
- * Note: the flag isn't the only condition drivers
- * should do kick off. If drive is busy, the last
- * request might not have the bit set.
- */
- if (list_empty(&rq_list))
- rq->cmd_flags |= REQ_END;
+ blk_mq_start_request(rq, list_empty(&rq_list));
ret = q->mq_ops->queue_rq(hctx, rq);
switch (ret) {
@@ -589,8 +608,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
break;
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
- rq->errors = -EIO;
case BLK_MQ_RQ_QUEUE_ERROR:
+ rq->errors = -EIO;
blk_mq_end_io(rq, rq->errors);
break;
}
@@ -693,13 +712,16 @@ static void blk_mq_work_fn(struct work_struct *work)
}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+ struct request *rq, bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
/*
@@ -709,7 +731,7 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
}
void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool run_queue)
+ bool at_head, bool run_queue)
{
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx, *current_ctx;
@@ -728,7 +750,7 @@ void blk_mq_insert_request(struct request_queue *q, struct request *rq,
rq->mq_ctx = ctx;
}
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -760,7 +782,7 @@ void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
/* ctx->cpu might be offline */
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
spin_unlock(&ctx->lock);
blk_mq_put_ctx(current_ctx);
@@ -798,7 +820,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -888,6 +910,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
return;
@@ -950,7 +977,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_mq_free_request(hctx, ctx, rq);
else {
blk_mq_bio_to_request(rq, bio);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -1309,15 +1336,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
reg->queue_depth = BLK_MQ_MAX_DEPTH;
}
- /*
- * Set aside a tag for flush requests. It will only be used while
- * another flush request is in progress but outside the driver.
- *
- * TODO: only allocate if flushes are supported
- */
- reg->queue_depth++;
- reg->reserved_tags++;
-
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
return ERR_PTR(-EINVAL);
@@ -1360,17 +1378,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->mq_ops = reg->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ q->sg_reserved_size = INT_MAX;
+
blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout);
if (reg->timeout)
blk_queue_rq_timeout(q, reg->timeout);
+ if (reg->ops->complete)
+ blk_queue_softirq_done(q, reg->ops->complete);
+
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
- if (blk_mq_init_hw_queues(q, reg, driver_data))
+ q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+ cache_line_size()), GFP_KERNEL);
+ if (!q->flush_rq)
goto err_hw;
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_flush_rq;
+
blk_mq_map_swqueue(q);
mutex_lock(&all_q_mutex);
@@ -1378,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
mutex_unlock(&all_q_mutex);
return q;
+
+err_flush_rq:
+ kfree(q->flush_rq);
err_hw:
kfree(q->mq_map);
err_map:
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5c3917984b00..ed0035cd458e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,13 +22,13 @@ struct blk_mq_ctx {
struct kobject kobj;
};
-void __blk_mq_end_io(struct request *rq, int error);
-void blk_mq_complete_request(struct request *rq, int error);
+void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
/*
* CPU hotplug helpers
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8095c4a21fc0..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
if (q->mq_ops)
blk_mq_free_queue(q);
+ kfree(q->flush_rq);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
case BLK_EH_HANDLED:
/* Can we use req->errors here? */
if (q->mq_ops)
- blk_mq_complete_request(req, req->errors);
+ __blk_mq_complete_request(req);
else
__blk_complete_request(req);
break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8f7a2b..d23b415b8a28 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dying(q)) ||
+ if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}