aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig11
-rw-r--r--block/as-iosched.c24
-rw-r--r--block/blk-barrier.c19
-rw-r--r--block/blk-core.c740
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-map.c21
-rw-r--r--block/blk-merge.c46
-rw-r--r--block/blk-tag.c17
-rw-r--r--block/blk-timeout.c22
-rw-r--r--block/blk.h51
-rw-r--r--block/bsg.c8
-rw-r--r--block/cfq-iosched.c36
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c167
-rw-r--r--block/scsi_ioctl.c5
15 files changed, 599 insertions, 571 deletions
diff --git a/block/Kconfig b/block/Kconfig
index e7d12782bcfb..2c39527aa7db 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,6 +26,7 @@ if BLOCK
config LBD
bool "Support for large block devices and files"
depends on !64BIT
+ default y
help
Enable block devices or files of size 2TB and larger.
@@ -38,11 +39,13 @@ config LBD
The ext4 filesystem requires that this feature be enabled in
order to support filesystems that have the huge_file feature
- enabled. Otherwise, it will refuse to mount any filesystems
- that use the huge_file feature, which is enabled by default
- by mke2fs.ext4. The GFS2 filesystem also requires this feature.
+ enabled. Otherwise, it will refuse to mount in the read-write
+ mode any filesystems that use the huge_file feature, which is
+ enabled by default by mke2fs.ext4.
- If unsure, say N.
+ The GFS2 filesystem also requires this feature.
+
+ If unsure, say Y.
config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"
diff --git a/block/as-iosched.c b/block/as-iosched.c
index c48fa670d221..7a12cf6ee1d3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
data_dir = rq_is_sync(rq1);
last = ad->last_sector[data_dir];
- s1 = rq1->sector;
- s2 = rq2->sector;
+ s1 = blk_rq_pos(rq1);
+ s2 = blk_rq_pos(rq2);
BUG_ON(data_dir != rq_is_sync(rq2));
@@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
as_update_thinktime(ad, aic, thinktime);
/* Calculate read -> read seek distance */
- if (aic->last_request_pos < rq->sector)
- seek_dist = rq->sector - aic->last_request_pos;
+ if (aic->last_request_pos < blk_rq_pos(rq))
+ seek_dist = blk_rq_pos(rq) -
+ aic->last_request_pos;
else
- seek_dist = aic->last_request_pos - rq->sector;
+ seek_dist = aic->last_request_pos -
+ blk_rq_pos(rq);
as_update_seekdist(ad, aic, seek_dist);
}
- aic->last_request_pos = rq->sector + rq->nr_sectors;
+ aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
set_bit(AS_TASK_IOSTARTED, &aic->state);
spin_unlock(&aic->lock);
}
@@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
{
unsigned long delay; /* jiffies */
sector_t last = ad->last_sector[ad->batch_data_dir];
- sector_t next = rq->sector;
+ sector_t next = blk_rq_pos(rq);
sector_t delta; /* acceptable close offset (in sectors) */
sector_t s;
@@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
* This has to be set in order to be correctly updated by
* as_find_next_rq
*/
- ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
+ ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (data_dir == BLK_RW_SYNC) {
struct io_context *ioc = RQ_IOC(rq);
@@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
static void as_work_handler(struct work_struct *work)
{
struct as_data *ad = container_of(work, struct as_data, antic_work);
- struct request_queue *q = ad->q;
- unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queueing(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_run_queue(ad->q);
}
static int as_may_queue(struct request_queue *q, int rw)
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 20b4111fa050..0d98054cdbd7 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
*/
q->ordseq = 0;
rq = q->orig_bar_rq;
-
- if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
- BUG();
-
+ __blk_end_request_all(rq, q->orderr);
return true;
}
@@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
- if (!rq->hard_nr_sectors) {
+ if (!blk_rq_sectors(rq)) {
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
/*
@@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
}
/* stash away the original request */
- elv_dequeue_request(q, rq);
+ blk_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = NULL;
@@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
} else
skip |= QUEUE_ORDSEQ_PREFLUSH;
- if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
+ if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
rq = NULL;
else
skip |= QUEUE_ORDSEQ_DRAIN;
@@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
* Queue ordering not supported. Terminate
* with prejudice.
*/
- elv_dequeue_request(q, rq);
- if (__blk_end_request(rq, -EOPNOTSUPP,
- blk_rq_bytes(rq)))
- BUG();
+ blk_dequeue_request(rq);
+ __blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL;
return false;
}
@@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
/*
* The driver must store the error location in ->bi_sector, if
* it supports it. For non-stacked drivers, this should be copied
- * from rq->sector.
+ * from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
diff --git a/block/blk-core.c b/block/blk-core.c
index c89883be8737..59c4af523112 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -68,11 +68,11 @@ static void drive_stat_acct(struct request *rq, int new_io)
int rw = rq_data_dir(rq);
int cpu;
- if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
+ if (!blk_do_io_stat(rq))
return;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
+ part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!new_io)
part_stat_inc(cpu, part, merges[rw]);
@@ -127,13 +127,14 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
- rq->sector = rq->hard_sector = (sector_t) -1;
+ rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1;
rq->ref_count = 1;
+ rq->start_time = jiffies;
}
EXPORT_SYMBOL(blk_rq_init);
@@ -184,14 +185,11 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq->cmd_flags);
- printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
- (unsigned long long)rq->sector,
- rq->nr_sectors,
- rq->current_nr_sectors);
- printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
- rq->bio, rq->biotail,
- rq->buffer, rq->data,
- rq->data_len);
+ printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
+ (unsigned long long)blk_rq_pos(rq),
+ blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
+ printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
+ rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: ");
@@ -333,24 +331,6 @@ void blk_unplug(struct request_queue *q)
}
EXPORT_SYMBOL(blk_unplug);
-static void blk_invoke_request_fn(struct request_queue *q)
-{
- if (unlikely(blk_queue_stopped(q)))
- return;
-
- /*
- * one level of recursion is ok and is much faster than kicking
- * the unplug handling
- */
- if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
- q->request_fn(q);
- queue_flag_clear(QUEUE_FLAG_REENTER, q);
- } else {
- queue_flag_set(QUEUE_FLAG_PLUGGED, q);
- kblockd_schedule_work(q, &q->unplug_work);
- }
-}
-
/**
* blk_start_queue - restart a previously stopped queue
* @q: The &struct request_queue in question
@@ -365,7 +345,7 @@ void blk_start_queue(struct request_queue *q)
WARN_ON(!irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- blk_invoke_request_fn(q);
+ __blk_run_queue(q);
}
EXPORT_SYMBOL(blk_start_queue);
@@ -425,12 +405,23 @@ void __blk_run_queue(struct request_queue *q)
{
blk_remove_plug(q);
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+
+ if (elv_queue_empty(q))
+ return;
+
/*
* Only recurse once to avoid overrunning the stack, let the unplug
* handling reinvoke the handler shortly if we already got there.
*/
- if (!elv_queue_empty(q))
- blk_invoke_request_fn(q);
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
+ q->request_fn(q);
+ queue_flag_clear(QUEUE_FLAG_REENTER, q);
+ } else {
+ queue_flag_set(QUEUE_FLAG_PLUGGED, q);
+ kblockd_schedule_work(q, &q->unplug_work);
+ }
}
EXPORT_SYMBOL(__blk_run_queue);
@@ -440,9 +431,7 @@ EXPORT_SYMBOL(__blk_run_queue);
*
* Description:
* Invoke request handling on this queue, if it has pending work to do.
- * May be used to restart queueing when a request has completed. Also
- * See @blk_start_queueing.
- *
+ * May be used to restart queueing when a request has completed.
*/
void blk_run_queue(struct request_queue *q)
{
@@ -902,26 +891,58 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
EXPORT_SYMBOL(blk_get_request);
/**
- * blk_start_queueing - initiate dispatch of requests to device
- * @q: request queue to kick into gear
+ * blk_make_request - given a bio, allocate a corresponding struct request.
+ *
+ * @bio: The bio describing the memory mappings that will be submitted for IO.
+ * It may be a chained-bio properly constructed by block/bio layer.
+ *
+ * blk_make_request is the parallel of generic_make_request for BLOCK_PC
+ * type commands. Where the struct request needs to be farther initialized by
+ * the caller. It is passed a &struct bio, which describes the memory info of
+ * the I/O transfer.
*
- * This is basically a helper to remove the need to know whether a queue
- * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue. Should be used to start queueing on a device outside
- * of ->request_fn() context. Also see @blk_run_queue.
+ * The caller of blk_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffers. That bio_data_dir() will return
+ * the needed direction of the request. (And all bio's in the passed bio-chain
+ * are properly set accordingly)
*
- * The queue lock must be held with interrupts disabled.
+ * If called under none-sleepable conditions, mapped bio buffers must not
+ * need bouncing, by calling the appropriate masked or flagged allocator,
+ * suitable for the target device. Otherwise the call to blk_queue_bounce will
+ * BUG.
+ *
+ * WARNING: When allocating/cloning a bio-chain, careful consideration should be
+ * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
+ * anything but the first bio in the chain. Otherwise you risk waiting for IO
+ * completion of a bio that hasn't been submitted yet, thus resulting in a
+ * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
+ * of bio_alloc(), as that avoids the mempool deadlock.
+ * If possible a big IO should be split into smaller parts when allocation
+ * fails. Partial allocation should not be an error, or you risk a live-lock.
*/
-void blk_start_queueing(struct request_queue *q)
+struct request *blk_make_request(struct request_queue *q, struct bio *bio,
+ gfp_t gfp_mask)
{
- if (!blk_queue_plugged(q)) {
- if (unlikely(blk_queue_stopped(q)))
- return;
- q->request_fn(q);
- } else
- __generic_unplug_device(q);
+ struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
+
+ if (unlikely(!rq))
+ return ERR_PTR(-ENOMEM);
+
+ for_each_bio(bio) {
+ struct bio *bounce_bio = bio;
+ int ret;
+
+ blk_queue_bounce(q, &bounce_bio);
+ ret = blk_rq_append_bio(q, rq, bounce_bio);
+ if (unlikely(ret)) {
+ blk_put_request(rq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return rq;
}
-EXPORT_SYMBOL(blk_start_queueing);
+EXPORT_SYMBOL(blk_make_request);
/**
* blk_requeue_request - put a request back on queue
@@ -935,6 +956,8 @@ EXPORT_SYMBOL(blk_start_queueing);
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
+ BUG_ON(blk_queued_rq(rq));
+
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
@@ -977,7 +1000,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
* barrier
*/
rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd_flags |= REQ_SOFTBARRIER;
rq->special = data;
@@ -991,7 +1013,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
drive_stat_acct(rq, 1);
__elv_add_request(q, rq, where, 0);
- blk_start_queueing(q);
+ __blk_run_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -1113,16 +1135,13 @@ void init_request_from_bio(struct request *req, struct bio *bio)
if (bio_failfast_driver(bio))
req->cmd_flags |= REQ_FAILFAST_DRIVER;
- /*
- * REQ_BARRIER implies no merging, but lets make it explicit
- */
if (unlikely(bio_discard(bio))) {
req->cmd_flags |= REQ_DISCARD;
if (bio_barrier(bio))
req->cmd_flags |= REQ_SOFTBARRIER;
req->q->prepare_discard_fn(req->q, req);
} else if (unlikely(bio_barrier(bio)))
- req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+ req->cmd_flags |= REQ_HARDBARRIER;
if (bio_sync(bio))
req->cmd_flags |= REQ_RW_SYNC;
@@ -1132,9 +1151,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
- req->hard_sector = req->sector = bio->bi_sector;
+ req->__sector = bio->bi_sector;
req->ioprio = bio_prio(bio);
- req->start_time = jiffies;
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1150,14 +1168,13 @@ static inline bool queue_should_plug(struct request_queue *q)
static int __make_request(struct request_queue *q, struct bio *bio)
{
struct request *req;
- int el_ret, nr_sectors;
+ int el_ret;
+ unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio);
int rw_flags;
- nr_sectors = bio_sectors(bio);
-
/*
* low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even
@@ -1182,7 +1199,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail->bi_next = bio;
req->biotail = bio;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1208,10 +1225,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->current_nr_sectors = bio_cur_sectors(bio);
- req->hard_cur_sectors = req->current_nr_sectors;
- req->sector = req->hard_sector = bio->bi_sector;
- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+ req->__sector = bio->bi_sector;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1593,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->nr_sectors > q->max_sectors ||
- rq->data_len > q->max_hw_sectors << 9) {
+ if (blk_rq_sectors(rq) > q->max_sectors ||
+ blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -1651,40 +1666,15 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
-/**
- * blkdev_dequeue_request - dequeue request and start timeout timer
- * @req: request to dequeue
- *
- * Dequeue @req and start timeout timer on it. This hands off the
- * request to the driver.
- *
- * Block internal functions which don't want to start timer should
- * call elv_dequeue_request().
- */
-void blkdev_dequeue_request(struct request *req)
-{
- elv_dequeue_request(req->q, req);
-
- /*
- * We are now handing the request to the hardware, add the
- * timeout handler.
- */
- blk_add_timer(req);
-}
-EXPORT_SYMBOL(blkdev_dequeue_request);
-
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
- if (!blk_do_io_stat(req))
- return;
-
- if (blk_fs_request(req)) {
+ if (blk_do_io_stat(req)) {
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_add(cpu, part, sectors[rw], bytes >> 9);
part_stat_unlock();
}
@@ -1692,22 +1682,19 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
static void blk_account_io_done(struct request *req)
{
- if (!blk_do_io_stat(req))
- return;
-
/*
* Account IO completion. bar_rq isn't accounted as a normal
* IO on queueing nor completion. Accounting the containing
* request is enough.
*/
- if (blk_fs_request(req) && req != &req->q->bar_rq) {
+ if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
int cpu;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
@@ -1719,38 +1706,218 @@ static void blk_account_io_done(struct request *req)
}
/**
- * __end_that_request_first - end I/O on a request
- * @req: the request being processed
+ * blk_peek_request - peek at the top of a request queue
+ * @q: request queue to peek at
+ *
+ * Description:
+ * Return the request at the top of @q. The returned request
+ * should be started using blk_start_request() before LLD starts
+ * processing it.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_peek_request(struct request_queue *q)
+{
+ struct request *rq;
+ int ret;
+
+ while ((rq = __elv_next_request(q)) != NULL) {
+ if (!(rq->cmd_flags & REQ_STARTED)) {
+ /*
+ * This is the first time the device driver
+ * sees this request (possibly after
+ * requeueing). Notify IO scheduler.
+ */
+ if (blk_sorted_rq(rq))
+ elv_activate_rq(q, rq);
+
+ /*
+ * just mark as started even if we don't start
+ * it, a request that has been delayed should
+ * not be passed by new incoming requests
+ */
+ rq->cmd_flags |= REQ_STARTED;
+ trace_block_rq_issue(q, rq);
+ }
+
+ if (!q->boundary_rq || q->boundary_rq == rq) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = NULL;
+ }
+
+ if (rq->cmd_flags & REQ_DONTPREP)
+ break;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * make sure space for the drain appears we
+ * know we can do this because max_hw_segments
+ * has been adjusted to be one fewer than the
+ * device can handle
+ */
+ rq->nr_phys_segments++;
+ }
+
+ if (!q->prep_rq_fn)
+ break;
+
+ ret = q->prep_rq_fn(q, rq);
+ if (ret == BLKPREP_OK) {
+ break;
+ } else if (ret == BLKPREP_DEFER) {
+ /*
+ * the request may have been (partially) prepped.
+ * we need to keep this request in the front to
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
+ */
+ if (q->dma_drain_size && blk_rq_bytes(rq) &&
+ !(rq->cmd_flags & REQ_DONTPREP)) {
+ /*
+ * remove the space for the drain we added
+ * so that we don't add it again
+ */
+ --rq->nr_phys_segments;
+ }
+
+ rq = NULL;
+ break;
+ } else if (ret == BLKPREP_KILL) {
+ rq->cmd_flags |= REQ_QUIET;
+ __blk_end_request_all(rq, -EIO);
+ } else {
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
+ break;
+ }
+ }
+
+ return rq;
+}
+EXPORT_SYMBOL(blk_peek_request);
+
+void blk_dequeue_request(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ BUG_ON(list_empty(&rq->queuelist));
+ BUG_ON(ELV_ON_HASH(rq));
+
+ list_del_init(&rq->queuelist);
+
+ /*
+ * the time frame between a request being removed from the lists
+ * and to it is freed is accounted as io that is in progress at
+ * the driver side.
+ */
+ if (blk_account_rq(rq))
+ q->in_flight[rq_is_sync(rq)]++;
+}
+
+/**
+ * blk_start_request - start request processing on the driver
+ * @req: request to dequeue
+ *
+ * Description:
+ * Dequeue @req and start timeout timer on it. This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call blk_dequeue_request().
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+void blk_start_request(struct request *req)
+{
+ blk_dequeue_request(req);
+
+ /*
+ * We are now handing the request to the hardware, initialize
+ * resid_len to full count and add the timeout handler.
+ */
+ req->resid_len = blk_rq_bytes(req);
+ blk_add_timer(req);
+}
+EXPORT_SYMBOL(blk_start_request);
+
+/**
+ * blk_fetch_request - fetch a request from a request queue
+ * @q: request queue to fetch a request from
+ *
+ * Description:
+ * Return the request at the top of @q. The request is started on
+ * return and LLD can start processing it immediately.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_fetch_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ rq = blk_peek_request(q);
+ if (rq)
+ blk_start_request(rq);
+ return rq;
+}
+EXPORT_SYMBOL(blk_fetch_request);
+
+/**
+ * blk_update_request - Special helper function for request stacking drivers
+ * @rq: the request being processed
* @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * @nr_bytes: number of bytes to complete @rq
*
* Description:
- * Ends I/O on a number of bytes attached to @req, and sets it up
- * for the next range of segments (if any) in the cluster.
+ * Ends I/O on a number of bytes attached to @rq, but doesn't complete
+ * the request structure even if @rq doesn't have leftover.
+ * If @rq has leftover, sets it up for the next range of segments.
+ *
+ * This special helper function is only for request stacking drivers
+ * (e.g. request-based dm) so that they can handle partial completion.
+ * Actual device drivers should use blk_end_request instead.
+ *
+ * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
+ * %false return from this function.
*
* Return:
- * %0 - we are done with this request, call end_that_request_last()
- * %1 - still buffers pending for this request
+ * %false - this request doesn't have any more data
+ * %true - this request has more data
**/
-static int __end_that_request_first(struct request *req, int error,
- int nr_bytes)
+bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
{
int total_bytes, bio_nbytes, next_idx = 0;
struct bio *bio;
+ if (!req->bio)
+ return false;
+
trace_block_rq_complete(req->q, req);
/*
- * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual
- * sense key with us all the way through
+ * For fs requests, rq is just carrier of independent bio's
+ * and each partial completion should be handled separately.
+ * Reset per-request error on each partial completion.
+ *
+ * TODO: tj: This is too subtle. It would be better to let
+ * low level drivers do what they see fit.
*/
- if (!blk_pc_request(req))
+ if (blk_fs_request(req))
req->errors = 0;
if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
- (unsigned long long)req->sector);
+ (unsigned long long)blk_rq_pos(req));
}
blk_account_io_completion(req, nr_bytes);
@@ -1810,8 +1977,15 @@ static int __end_that_request_first(struct request *req, int error,
/*
* completely done
*/
- if (!req->bio)
- return 0;
+ if (!req->bio) {
+ /*
+ * Reset counters so that the request stacking driver
+ * can find how many bytes remain in the request
+ * later.
+ */
+ req->__data_len = 0;
+ return false;
+ }
/*
* if the request wasn't completed, update state
@@ -1823,22 +1997,56 @@ static int __end_that_request_first(struct request *req, int error,
bio_iovec(bio)->bv_len -= nr_bytes;
}
- blk_recalc_rq_sectors(req, total_bytes >> 9);
+ req->__data_len -= total_bytes;
+ req->buffer = bio_data(req->bio);
+
+ /* update sector only for requests with clear definition of sector */
+ if (blk_fs_request(req) || blk_discard_rq(req))
+ req->__sector += total_bytes >> 9;
+
+ /*
+ * If total number of sectors is less than the first segment
+ * size, something has gone terribly wrong.
+ */
+ if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
+ printk(KERN_ERR "blk: request botched\n");
+ req->__data_len = blk_rq_cur_bytes(req);
+ }
+
+ /* recalculate the number of segments */
blk_recalc_rq_segments(req);
- return 1;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(blk_update_request);
+
+static bool blk_update_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes,
+ unsigned int bidi_bytes)
+{
+ if (blk_update_request(rq, error, nr_bytes))
+ return true;
+
+ /* Bidi request must be completed as a whole */
+ if (unlikely(blk_bidi_rq(rq)) &&
+ blk_update_request(rq->next_rq, error, bidi_bytes))
+ return true;
+
+ add_disk_randomness(rq->rq_disk);
+
+ return false;
}
/*
* queue lock must be held
*/
-static void end_that_request_last(struct request *req, int error)
+static void blk_finish_request(struct request *req, int error)
{
+ BUG_ON(blk_queued_rq(req));
+
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
- if (blk_queued_rq(req))
- elv_dequeue_request(req->q, req);
-
if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion();
@@ -1857,117 +2065,62 @@ static void end_that_request_last(struct request *req, int error)
}
/**
- * blk_rq_bytes - Returns bytes left to complete in the entire request
- * @rq: the request being processed
- **/
-unsigned int blk_rq_bytes(struct request *rq)
-{
- if (blk_fs_request(rq))
- return rq->hard_nr_sectors << 9;
-
- return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_bytes);
-
-/**
- * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
- * @rq: the request being processed
- **/
-unsigned int blk_rq_cur_bytes(struct request *rq)
-{
- if (blk_fs_request(rq))
- return rq->current_nr_sectors << 9;
-
- if (rq->bio)
- return rq->bio->bi_size;
-
- return rq->data_len;
-}
-EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-
-/**
- * end_request - end I/O on the current segment of the request
- * @req: the request being processed
- * @uptodate: error value or %0/%1 uptodate flag
+ * blk_end_bidi_request - Complete a bidi request
+ * @rq: the request to complete
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
*
* Description:
- * Ends I/O on the current segment of a request. If that is the only
- * remaining segment, the request is also completed and freed.
- *
- * This is a remnant of how older block drivers handled I/O completions.
- * Modern drivers typically end I/O on the full request in one go, unless
- * they have a residual value to account for. For that case this function
- * isn't really useful, unless the residual just happens to be the
- * full current segment. In other words, don't use this function in new
- * code. Use blk_end_request() or __blk_end_request() to end a request.
+ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ * Drivers that supports bidi can safely call this member for any
+ * type of request, bidi or uni. In the later case @bidi_bytes is
+ * just ignored.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-void end_request(struct request *req, int uptodate)
-{
- int error = 0;
-
- if (uptodate <= 0)
- error = uptodate ? uptodate : -EIO;
-
- __blk_end_request(req, error, req->hard_cur_sectors << 9);
-}
-EXPORT_SYMBOL(end_request);
-
-static int end_that_request_data(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, int error,
unsigned int nr_bytes, unsigned int bidi_bytes)
{
- if (rq->bio) {
- if (__end_that_request_first(rq, error, nr_bytes))
- return 1;
+ struct request_queue *q = rq->q;
+ unsigned long flags;
- /* Bidi request must be completed as a whole */
- if (blk_bidi_rq(rq) &&
- __end_that_request_first(rq->next_rq, error, bidi_bytes))
- return 1;
- }
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+ return true;
- return 0;
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_finish_request(rq, error);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ return false;
}
/**
- * blk_end_io - Generic end_io function to complete a request.
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
- * @drv_callback: function called between completion of bios in the request
- * and completion of the request.
- * If the callback returns non %0, this helper returns without
- * completion of the request.
+ * __blk_end_bidi_request - Complete a bidi request with queue lock held
+ * @rq: the request to complete
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete @rq
+ * @bidi_bytes: number of bytes to complete @rq->next_rq
*
* Description:
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
- * If @rq has leftover, sets it up for the next range of segments.
+ * Identical to blk_end_bidi_request() except that queue lock is
+ * assumed to be locked on entry and remains so on return.
*
* Return:
- * %0 - we are done with this request
- * %1 - this request is not freed yet, it still has pending buffers.
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
- unsigned int bidi_bytes,
- int (drv_callback)(struct request *))
+static bool __blk_end_bidi_request(struct request *rq, int error,
+ unsigned int nr_bytes, unsigned int bidi_bytes)
{
- struct request_queue *q = rq->q;
- unsigned long flags = 0UL;
-
- if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
- return 1;
-
- /* Special feature for tricky drivers */
- if (drv_callback && drv_callback(rq))
- return 1;
-
- add_disk_randomness(rq->rq_disk);
+ if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
+ return true;
- spin_lock_irqsave(q->queue_lock, flags);
- end_that_request_last(rq, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_finish_request(rq, error);
- return 0;
+ return false;
}
/**
@@ -1981,124 +2134,112 @@ static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
* If @rq has leftover, sets it up for the next range of segments.
*
* Return:
- * %0 - we are done with this request
- * %1 - still buffers pending for this request
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
- return blk_end_io(rq, error, nr_bytes, 0, NULL);
+ return blk_end_bidi_request(rq, error, nr_bytes, 0);
}
EXPORT_SYMBOL_GPL(blk_end_request);
/**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
+ * blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- * %0 - we are done with this request
- * %1 - still buffers pending for this request
- **/
-int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+ * Completely finish @rq.
+ */
+void blk_end_request_all(struct request *rq, int error)
{
- if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
- return 1;
+ bool pending;
+ unsigned int bidi_bytes = 0;
- add_disk_randomness(rq->rq_disk);
+ if (unlikely(blk_bidi_rq(rq)))
+ bidi_bytes = blk_rq_bytes(rq->next_rq);
- end_that_request_last(rq, error);
+ pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+ BUG_ON(pending);
+}
+EXPORT_SYMBOL_GPL(blk_end_request_all);
- return 0;
+/**
+ * blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
+ *
+ * Description:
+ * Complete the current consecutively mapped chunk from @rq.
+ *
+ * Return:
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool blk_end_request_cur(struct request *rq, int error)
+{
+ return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
-EXPORT_SYMBOL_GPL(__blk_end_request);
+EXPORT_SYMBOL_GPL(blk_end_request_cur);
/**
- * blk_end_bidi_request - Helper function for drivers to complete bidi request.
- * @rq: the bidi request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq: the request being processed
+ * @error: %0 for success, < %0 for error
+ * @nr_bytes: number of bytes to complete
*
* Description:
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
+ * Must be called with queue lock held unlike blk_end_request().
*
* Return:
- * %0 - we are done with this request
- * %1 - still buffers pending for this request
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
**/
-int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
- unsigned int bidi_bytes)
+bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
{
- return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
+ return __blk_end_bidi_request(rq, error, nr_bytes, 0);
}
-EXPORT_SYMBOL_GPL(blk_end_bidi_request);
+EXPORT_SYMBOL_GPL(__blk_end_request);
/**
- * blk_update_request - Special helper function for request stacking drivers
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete @rq
+ * __blk_end_request_all - Helper function for drives to finish the request.
+ * @rq: the request to finish
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Ends I/O on a number of bytes attached to @rq, but doesn't complete
- * the request structure even if @rq doesn't have leftover.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * This special helper function is only for request stacking drivers
- * (e.g. request-based dm) so that they can handle partial completion.
- * Actual device drivers should use blk_end_request instead.
+ * Completely finish @rq. Must be called with queue lock held.
*/
-void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
+void __blk_end_request_all(struct request *rq, int error)
{
- if (!end_that_request_data(rq, error, nr_bytes, 0)) {
- /*
- * These members are not updated in end_that_request_data()
- * when all bios are completed.
- * Update them so that the request stacking driver can find
- * how many bytes remain in the request later.
- */
- rq->nr_sectors = rq->hard_nr_sectors = 0;
- rq->current_nr_sectors = rq->hard_cur_sectors = 0;
- }
+ bool pending;
+ unsigned int bidi_bytes = 0;
+
+ if (unlikely(blk_bidi_rq(rq)))
+ bidi_bytes = blk_rq_bytes(rq->next_rq);
+
+ pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
+ BUG_ON(pending);
}
-EXPORT_SYMBOL_GPL(blk_update_request);
+EXPORT_SYMBOL_GPL(__blk_end_request_all);
/**
- * blk_end_request_callback - Special helper function for tricky drivers
- * @rq: the request being processed
- * @error: %0 for success, < %0 for error
- * @nr_bytes: number of bytes to complete
- * @drv_callback: function called between completion of bios in the request
- * and completion of the request.
- * If the callback returns non %0, this helper returns without
- * completion of the request.
+ * __blk_end_request_cur - Helper function to finish the current request chunk.
+ * @rq: the request to finish the current chunk for
+ * @err: %0 for success, < %0 for error
*
* Description:
- * Ends I/O on a number of bytes attached to @rq.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * This special helper function is used only for existing tricky drivers.
- * (e.g. cdrom_newpc_intr() of ide-cd)
- * This interface will be removed when such drivers are rewritten.
- * Don't use this interface in other places anymore.
+ * Complete the current consecutively mapped chunk from @rq. Must
+ * be called with queue lock held.
*
* Return:
- * %0 - we are done with this request
- * %1 - this request is not freed yet.
- * this request still has pending buffers or
- * the driver doesn't want to finish this request yet.
- **/
-int blk_end_request_callback(struct request *rq, int error,
- unsigned int nr_bytes,
- int (drv_callback)(struct request *))
+ * %false - we are done with this request
+ * %true - still buffers pending for this request
+ */
+bool __blk_end_request_cur(struct request *rq, int error)
{
- return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
+ return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
}
-EXPORT_SYMBOL_GPL(blk_end_request_callback);
+EXPORT_SYMBOL_GPL(__blk_end_request_cur);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
@@ -2111,11 +2252,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->current_nr_sectors = bio_cur_sectors(bio);
- rq->hard_cur_sectors = rq->current_nr_sectors;
- rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
- rq->data_len = bio->bi_size;
-
+ rq->__data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)
@@ -2158,6 +2295,9 @@ EXPORT_SYMBOL(kblockd_schedule_work);
int __init blk_dev_init(void)
{
+ BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+ sizeof(((struct request *)0)->cmd_flags));
+
kblockd_workqueue = create_workqueue("kblockd");
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 6af716d1e54e..49557e91f0da 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk;
- rq->cmd_flags |= REQ_NOMERGE;
rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
diff --git a/block/blk-map.c b/block/blk-map.c
index f103729b462f..ef2492adca7e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->data_len += bio->bi_size;
+ rq->__data_len += bio->bi_size;
}
return 0;
}
-EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_unmap_user(struct bio *bio)
{
@@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
- rq->buffer = rq->data = NULL;
+ rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
@@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
- rq->buffer = rq->data = NULL;
+ rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
@@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
- * buffer is used.
+ * buffer is used. Can be called multple times to append multple
+ * buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
@@ -290,6 +290,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
+ int ret;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
@@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
- blk_rq_bio_prep(q, rq, bio);
+ ret = blk_rq_append_bio(q, rq, bio);
+ if (unlikely(ret)) {
+ /* request is too big */
+ bio_put(bio);
+ return ret;
+ }
+
blk_queue_bounce(q, &rq->bio);
- rq->buffer = rq->data = NULL;
+ rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 23d2a6fe34a3..4974dd5767e5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,35 +9,6 @@
#include "blk.h"
-void blk_recalc_rq_sectors(struct request *rq, int nsect)
-{
- if (blk_fs_request(rq) || blk_discard_rq(rq)) {
- rq->hard_sector += nsect;
- rq->hard_nr_sectors -= nsect;
-
- /*
- * Move the I/O submission pointers ahead if required.
- */
- if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
- (rq->sector <= rq->hard_sector)) {
- rq->sector = rq->hard_sector;
- rq->nr_sectors = rq->hard_nr_sectors;
- rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
- rq->current_nr_sectors = rq->hard_cur_sectors;
- rq->buffer = bio_data(rq->bio);
- }
-
- /*
- * if total number of sectors is less than the first segment
- * size, something has gone terribly wrong
- */
- if (rq->nr_sectors < rq->current_nr_sectors) {
- printk(KERN_ERR "blk: request botched\n");
- rq->nr_sectors = rq->current_nr_sectors;
- }
- }
-}
-
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
@@ -199,8 +170,9 @@ new_segment:
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
- (rq->data_len & q->dma_pad_mask)) {
- unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+ (blk_rq_bytes(rq) & q->dma_pad_mask)) {
+ unsigned int pad_len =
+ (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
@@ -259,7 +231,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
else
max_sectors = q->max_sectors;
- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -284,7 +256,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
max_sectors = q->max_sectors;
- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -315,7 +287,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/*
* Will it become too large?
*/
- if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -345,7 +317,7 @@ static void blk_account_io_merge(struct request *req)
int cpu;
cpu = part_stat_lock();
- part = disk_map_sector_rcu(req->rq_disk, req->sector);
+ part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
part_dec_in_flight(part);
@@ -366,7 +338,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
/*
* not contiguous
*/
- if (req->sector + req->nr_sectors != next->sector)
+ if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
return 0;
if (rq_data_dir(req) != rq_data_dir(next)
@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
- req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+ req->__data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next);
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 3c518e3303ae..2e5cfeb59333 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
- unsigned max_depth, offset;
+ unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
- if (rq_is_sync(rq))
- offset = 0;
- else
- offset = max_depth >> 2;
+ if (!rq_is_sync(rq) && max_depth > 1) {
+ max_depth -= 2;
+ if (!max_depth)
+ max_depth = 1;
+ if (q->in_flight[0] > max_depth)
+ return 1;
+ }
do {
- tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+ tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
@@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ec0d503cacd..1ba7e0aca878 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data)
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
- } else {
- if (!next || time_after(next, rq->deadline))
- next = rq->deadline;
- }
+ } else if (!next || time_after(next, rq->deadline))
+ next = rq->deadline;
}
/*
@@ -176,16 +174,14 @@ void blk_add_timer(struct request *req)
BUG_ON(!list_empty(&req->timeout_list));
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
- if (req->timeout)
- req->deadline = jiffies + req->timeout;
- else {
- req->deadline = jiffies + q->rq_timeout;
- /*
- * Some LLDs, like scsi, peek at the timeout to prevent
- * a command from being retried forever.
- */
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent a
+ * command from being retried forever.
+ */
+ if (!req->timeout)
req->timeout = q->rq_timeout;
- }
+
+ req->deadline = jiffies + req->timeout;
list_add_tail(&req->timeout_list, &q->timeout_list);
/*
diff --git a/block/blk.h b/block/blk.h
index 79c85f7c9ff5..c863ec2281e0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype;
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
+int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+ struct bio *bio);
+void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
@@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
+/*
+ * Internal elevator interface
+ */
+#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+
+static inline struct request *__elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ while (1) {
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ if (blk_do_ordered(q, &rq))
+ return rq;
+ }
+
+ if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+}
+
+static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_activate_req_fn)
+ e->ops->elevator_activate_req_fn(q, rq);
+}
+
+static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_deactivate_req_fn)
+ e->ops->elevator_deactivate_req_fn(q, rq);
+}
+
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
@@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
-void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q);
@@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu)
#endif
}
+/*
+ * Contribute to IO statistics IFF:
+ *
+ * a) it's attached to a gendisk, and
+ * b) the queue had IO stats enabled when this request was started, and
+ * c) it's a file system request
+ */
static inline int blk_do_io_stat(struct request *rq)
{
- return rq->rq_disk && blk_rq_io_stat(rq);
+ return rq->rq_disk && blk_rq_io_stat(rq) && blk_fs_request(rq) &&
+ blk_discard_rq(rq);
}
#endif
diff --git a/block/bsg.c b/block/bsg.c
index 206060e795da..2d746e34f4c2 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -445,14 +445,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
- hdr->dout_resid = rq->data_len;
- hdr->din_resid = rq->next_rq->data_len;
+ hdr->dout_resid = rq->resid_len;
+ hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
- hdr->din_resid = rq->data_len;
+ hdr->din_resid = rq->resid_len;
else
- hdr->dout_resid = rq->data_len;
+ hdr->dout_resid = rq->resid_len;
/*
* If the request generated a negative error number, return it
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a55a9bd75bd1..99ac4304d711 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
return rq2;
- s1 = rq1->sector;
- s2 = rq2->sector;
+ s1 = blk_rq_pos(rq1);
+ s2 = blk_rq_pos(rq2);
last = cfqd->last_position;
@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
- if (sector > cfqq->next_rq->sector)
+ if (sector > blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_right;
- else if (sector < cfqq->next_rq->sector)
+ else if (sector < blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_left;
else
break;
@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
- __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector,
- &parent, &p);
+ __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
+ blk_rq_pos(cfqq->next_rq), &parent, &p);
if (!__cfqq) {
rb_link_node(&cfqq->p_node, parent, p);
rb_insert_color(&cfqq->p_node, cfqq->p_root);
@@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
- cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
+ cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
struct request *rq)
{
- if (rq->sector >= cfqd->last_position)
- return rq->sector - cfqd->last_position;
+ if (blk_rq_pos(rq) >= cfqd->last_position)
+ return blk_rq_pos(rq) - cfqd->last_position;
else
- return cfqd->last_position - rq->sector;
+ return cfqd->last_position - blk_rq_pos(rq);
}
#define CIC_SEEK_THR 8 * 1024
@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
if (cfq_rq_close(cfqd, __cfqq->next_rq))
return __cfqq;
- if (__cfqq->next_rq->sector < sector)
+ if (blk_rq_pos(__cfqq->next_rq) < sector)
node = rb_next(&__cfqq->p_node);
else
node = rb_prev(&__cfqq->p_node);
@@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
if (!cic->last_request_pos)
sdist = 0;
- else if (cic->last_request_pos < rq->sector)
- sdist = rq->sector - cic->last_request_pos;
+ else if (cic->last_request_pos < blk_rq_pos(rq))
+ sdist = blk_rq_pos(rq) - cic->last_request_pos;
else
- sdist = cic->last_request_pos - rq->sector;
+ sdist = cic->last_request_pos - blk_rq_pos(rq);
/*
* Don't allow the seek distance to get too large from the
@@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_update_io_seektime(cfqd, cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
- cic->last_request_pos = rq->sector + rq->nr_sectors;
+ cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (cfqq == cfqd->active_queue) {
/*
@@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
del_timer(&cfqd->idle_slice_timer);
- blk_start_queueing(cfqd->queue);
+ __blk_run_queue(cfqd->queue);
}
cfq_mark_cfqq_must_dispatch(cfqq);
}
@@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
- blk_start_queueing(cfqd->queue);
+ __blk_run_queue(cfqd->queue);
}
}
@@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
- blk_start_queueing(q);
+ __blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index c4d991d4adef..b547cbca7b23 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {
- BUG_ON(sector != __rq->sector);
+ BUG_ON(sector != blk_rq_pos(__rq));
if (elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;
diff --git a/block/elevator.c b/block/elevator.c
index 7073a9072577..ebee948293eb 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -52,8 +52,7 @@ static const int elv_hash_shift = 6;
#define ELV_HASH_FN(sec) \
(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
-#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
-#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
DEFINE_TRACE(block_rq_insert);
DEFINE_TRACE(block_rq_issue);
@@ -120,9 +119,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
* we can merge and sequence is ok, check if it's possible
*/
if (elv_rq_merge_ok(__rq, bio)) {
- if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+ if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
ret = ELEVATOR_BACK_MERGE;
- else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+ else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
ret = ELEVATOR_FRONT_MERGE;
}
@@ -310,22 +309,6 @@ void elevator_exit(struct elevator_queue *e)
}
EXPORT_SYMBOL(elevator_exit);
-static void elv_activate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->ops->elevator_activate_req_fn)
- e->ops->elevator_activate_req_fn(q, rq);
-}
-
-static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->ops->elevator_deactivate_req_fn)
- e->ops->elevator_deactivate_req_fn(q, rq);
-}
-
static inline void __elv_rqhash_del(struct request *rq)
{
hlist_del_init(&rq->hash);
@@ -387,9 +370,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
parent = *p;
__rq = rb_entry(parent, struct request, rb_node);
- if (rq->sector < __rq->sector)
+ if (blk_rq_pos(rq) < blk_rq_pos(__rq))
p = &(*p)->rb_left;
- else if (rq->sector > __rq->sector)
+ else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
p = &(*p)->rb_right;
else
return __rq;
@@ -417,9 +400,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
while (n) {
rq = rb_entry(n, struct request, rb_node);
- if (sector < rq->sector)
+ if (sector < blk_rq_pos(rq))
n = n->rb_left;
- else if (sector > rq->sector)
+ else if (sector > blk_rq_pos(rq))
n = n->rb_right;
else
return rq;
@@ -458,14 +441,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
break;
if (pos->cmd_flags & stop_flags)
break;
- if (rq->sector >= boundary) {
- if (pos->sector < boundary)
+ if (blk_rq_pos(rq) >= boundary) {
+ if (blk_rq_pos(pos) < boundary)
continue;
} else {
- if (pos->sector >= boundary)
+ if (blk_rq_pos(pos) >= boundary)
break;
}
- if (rq->sector >= pos->sector)
+ if (blk_rq_pos(rq) >= blk_rq_pos(pos))
break;
}
@@ -563,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
* in_flight count again
*/
if (blk_account_rq(rq)) {
- q->in_flight--;
+ q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq))
elv_deactivate_rq(q, rq);
}
@@ -599,7 +582,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
- blk_start_queueing(q);
+ __blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@@ -643,8 +626,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
- blk_remove_plug(q);
- blk_start_queueing(q);
+ __blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT:
@@ -703,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- - q->in_flight;
+ - queue_in_flight(q);
if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
@@ -759,117 +741,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
}
EXPORT_SYMBOL(elv_add_request);
-static inline struct request *__elv_next_request(struct request_queue *q)
-{
- struct request *rq;
-
- while (1) {
- while (!list_empty(&q->queue_head)) {
- rq = list_entry_rq(q->queue_head.next);
- if (blk_do_ordered(q, &rq))
- return rq;
- }
-
- if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
-struct request *elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- int ret;
-
- while ((rq = __elv_next_request(q)) != NULL) {
- if (!(rq->cmd_flags & REQ_STARTED)) {
- /*
- * This is the first time the device driver
- * sees this request (possibly after
- * requeueing). Notify IO scheduler.
- */
- if (blk_sorted_rq(rq))
- elv_activate_rq(q, rq);
-
- /*
- * just mark as started even if we don't start
- * it, a request that has been delayed should
- * not be passed by new incoming requests
- */
- rq->cmd_flags |= REQ_STARTED;
- trace_block_rq_issue(q, rq);
- }
-
- if (!q->boundary_rq || q->boundary_rq == rq) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = NULL;
- }
-
- if (rq->cmd_flags & REQ_DONTPREP)
- break;
-
- if (q->dma_drain_size && rq->data_len) {
- /*
- * make sure space for the drain appears we
- * know we can do this because max_hw_segments
- * has been adjusted to be one fewer than the
- * device can handle
- */
- rq->nr_phys_segments++;
- }
-
- if (!q->prep_rq_fn)
- break;
-
- ret = q->prep_rq_fn(q, rq);
- if (ret == BLKPREP_OK) {
- break;
- } else if (ret == BLKPREP_DEFER) {
- /*
- * the request may have been (partially) prepped.
- * we need to keep this request in the front to
- * avoid resource deadlock. REQ_STARTED will
- * prevent other fs requests from passing this one.
- */
- if (q->dma_drain_size && rq->data_len &&
- !(rq->cmd_flags & REQ_DONTPREP)) {
- /*
- * remove the space for the drain we added
- * so that we don't add it again
- */
- --rq->nr_phys_segments;
- }
-
- rq = NULL;
- break;
- } else if (ret == BLKPREP_KILL) {
- rq->cmd_flags |= REQ_QUIET;
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
- } else {
- printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
- break;
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(elv_next_request);
-
-void elv_dequeue_request(struct request_queue *q, struct request *rq)
-{
- BUG_ON(list_empty(&rq->queuelist));
- BUG_ON(ELV_ON_HASH(rq));
-
- list_del_init(&rq->queuelist);
-
- /*
- * the time frame between a request being removed from the lists
- * and to it is freed is accounted as io that is in progress at
- * the driver side.
- */
- if (blk_account_rq(rq))
- q->in_flight++;
-}
-
int elv_queue_empty(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
@@ -939,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
trace_block_rq_abort(q, rq);
- __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ __blk_end_request_all(rq, -EIO);
}
}
EXPORT_SYMBOL(elv_abort_queue);
@@ -952,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* request is released from the driver, io must be done
*/
if (blk_account_rq(rq)) {
- q->in_flight--;
+ q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
@@ -967,11 +838,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
if (!list_empty(&q->queue_head))
next = list_entry_rq(q->queue_head.next);
- if (!q->in_flight &&
+ if (!queue_in_flight(q) &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
- blk_start_queueing(q);
+ __blk_run_queue(q);
}
}
}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 82a0ca2f6729..a9670dd4b5de 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = rq->data_len;
+ hdr->resid = rq->resid_len;
hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) {
@@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- rq->data = NULL;
- rq->data_len = 0;
- rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq->cmd[0] = cmd;
rq->cmd[4] = data;