aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-01-29 13:54:41 +0100
committerJens Axboe <jens.axboe@oracle.com>2008-01-29 21:55:09 +0100
commit26b8256e2bb930a8e4d4d10aa74950d8921376b8 (patch)
tree36fc1011aa68526dc1fb5b237e330ca2c27c9939 /block
parent86db1e29772372155db08ff48a9ceb76e11a2ad1 (diff)
block: get rid of unnecessary forward declarations in blk-core.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c62
1 files changed, 30 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 2c73ed1a813..3d415ec10fb 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -33,9 +33,7 @@
#include "blk.h"
-static void drive_stat_acct(struct request *rq, int new_io);
static int __make_request(struct request_queue *q, struct bio *bio);
-static void blk_recalc_rq_segments(struct request *rq);
/*
* For the allocated request tables
@@ -54,6 +52,21 @@ static struct workqueue_struct *kblockd_workqueue;
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static void drive_stat_acct(struct request *rq, int new_io)
+{
+ int rw = rq_data_dir(rq);
+
+ if (!blk_fs_request(rq) || !rq->rq_disk)
+ return;
+
+ if (!new_io) {
+ __disk_stat_inc(rq->rq_disk, merges[rw]);
+ } else {
+ disk_round_stats(rq->rq_disk);
+ rq->rq_disk->in_flight++;
+ }
+}
+
void blk_queue_congestion_threshold(struct request_queue *q)
{
int nr;
@@ -168,21 +181,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
EXPORT_SYMBOL(blk_dump_rq_flags);
-void blk_recount_segments(struct request_queue *q, struct bio *bio)
-{
- struct request rq;
- struct bio *nxt = bio->bi_next;
- rq.q = q;
- rq.bio = rq.biotail = bio;
- bio->bi_next = NULL;
- blk_recalc_rq_segments(&rq);
- bio->bi_next = nxt;
- bio->bi_phys_segments = rq.nr_phys_segments;
- bio->bi_hw_segments = rq.nr_hw_segments;
- bio->bi_flags |= (1 << BIO_SEG_VALID);
-}
-EXPORT_SYMBOL(blk_recount_segments);
-
static void blk_recalc_rq_segments(struct request *rq)
{
int nr_phys_segs;
@@ -255,6 +253,21 @@ new_hw_segment:
rq->nr_hw_segments = nr_hw_segs;
}
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
+{
+ struct request rq;
+ struct bio *nxt = bio->bi_next;
+ rq.q = q;
+ rq.bio = rq.biotail = bio;
+ bio->bi_next = NULL;
+ blk_recalc_rq_segments(&rq);
+ bio->bi_next = nxt;
+ bio->bi_phys_segments = rq.nr_phys_segments;
+ bio->bi_hw_segments = rq.nr_hw_segments;
+ bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+EXPORT_SYMBOL(blk_recount_segments);
+
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
{
@@ -1305,21 +1318,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
EXPORT_SYMBOL(blk_insert_request);
-static void drive_stat_acct(struct request *rq, int new_io)
-{
- int rw = rq_data_dir(rq);
-
- if (!blk_fs_request(rq) || !rq->rq_disk)
- return;
-
- if (!new_io) {
- __disk_stat_inc(rq->rq_disk, merges[rw]);
- } else {
- disk_round_stats(rq->rq_disk);
- rq->rq_disk->in_flight++;
- }
-}
-
/*
* add-request adds a request to the linked list.
* queue lock is held and interrupts disabled, as we muck with the