aboutsummaryrefslogtreecommitdiff
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-07 22:24:44 +0900
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 09:50:55 +0200
commita2dec7b36364a5cc564c4d76cf16d2e7d33f5c05 (patch)
tree76dfb84ba5fa097a929ab81cf5718c6fcbc9d720 /block/blk-core.c
parent34b7d2c957199834c474c9d46739265643f4d9c7 (diff)
block: hide request sector and data_len
Block low level drivers for some reason have been pretty good at abusing block layer API. Especially struct request's fields tend to get violated in all possible ways. Make it clear that low level drivers MUST NOT access or manipulate rq->sector and rq->data_len directly by prefixing them with double underscores. This change is also necessary to break build of out-of-tree codes which assume the previous block API where internal fields can be manipulated and rq->data_len carries residual count on completion. [ Impact: hide internal fields, block API change ] Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3596ca71909..6226a380fb6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
- rq->sector = (sector_t) -1;
+ rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd;
@@ -1095,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
- req->sector = bio->bi_sector;
+ req->__sector = bio->bi_sector;
req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio);
}
@@ -1143,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail->bi_next = bio;
req->biotail = bio;
- req->data_len += bytes;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1169,8 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
- req->sector = bio->bi_sector;
- req->data_len += bytes;
+ req->__sector = bio->bi_sector;
+ req->__data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu;
@@ -1878,7 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* can find how many bytes remain in the request
* later.
*/
- req->data_len = 0;
+ req->__data_len = 0;
return false;
}
@@ -1892,12 +1892,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
bio_iovec(bio)->bv_len -= nr_bytes;
}
- req->data_len -= total_bytes;
+ req->__data_len -= total_bytes;
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
if (blk_fs_request(req) || blk_discard_rq(req))
- req->sector += total_bytes >> 9;
+ req->__sector += total_bytes >> 9;
/*
* If total number of sectors is less than the first segment
@@ -1905,7 +1905,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
printk(KERN_ERR "blk: request botched\n");
- req->data_len = blk_rq_cur_bytes(req);
+ req->__data_len = blk_rq_cur_bytes(req);
}
/* recalculate the number of segments */
@@ -2032,7 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
- rq->data_len = bio->bi_size;
+ rq->__data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
if (bio->bi_bdev)