aboutsummaryrefslogtreecommitdiff
path: root/arch/um/drivers
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-28 13:06:09 +0900
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 08:14:50 +0200
commit4d6c84d91d1a539ebc47d1a36a35e9390ba11fdc (patch)
tree62917c8a59bef037231ba0869a688651aba099dc /arch/um/drivers
parent044208506d35bd62396c4673176e2c12393905b8 (diff)
ubd: cleanup completion path
ubd had its own block request partial completion mechanism, which is unnecessary as block layer already does it. Kill ubd_end_request() and ubd_finish() and replace them with direct call to blk_end_request(). [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r--arch/um/drivers/ubd_kern.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8e..36ca9fa89d0 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
-
-static void ubd_end_request(struct request *req, int bytes, int error)
-{
- blk_end_request(req, error, bytes);
-}
-
-/* Callable only from interrupt context - otherwise you need to do
- * spin_lock_irq()/spin_lock_irqsave() */
-static inline void ubd_finish(struct request *req, int bytes)
-{
- if(bytes < 0){
- ubd_end_request(req, 0, -EIO);
- return;
- }
- ubd_end_request(req, bytes, 0);
-}
-
static LIST_HEAD(restart);
/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
static void ubd_handler(void)
{
struct io_thread_req *req;
- struct request *rq;
struct ubd *ubd;
struct list_head *list, *next_ele;
unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
return;
}
- rq = req->req;
- rq->nr_sectors -= req->length >> 9;
- if(rq->nr_sectors == 0)
- ubd_finish(rq, rq->hard_nr_sectors << 9);
+ blk_end_request(req->req, 0, req->length);
kfree(req);
}
reactivate_fd(thread_fd, UBD_IRQ);