blob: 4cd5b49de0c10f30db68e96be1ad4c69bd692a3e [file] [log] [blame]
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -04001/******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
3 *
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
9 *
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38#include <linux/spinlock.h>
39#include <linux/kthread.h>
40#include <linux/list.h>
41#include <linux/delay.h>
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080042#include <linux/freezer.h>
Jeremy Fitzhardingeafd91d02009-09-15 14:12:37 -070043
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -080044#include <xen/events.h>
45#include <xen/page.h>
46#include <asm/xen/hypervisor.h>
47#include <asm/xen/hypercall.h>
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040048#include "common.h"
49
Tom Goetz314146e2011-03-17 12:14:29 -040050#define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA)
51
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040052/*
53 * These are rather arbitrary. They are fairly large because adjacent requests
54 * pulled from a communication ring are quite likely to end up being part of
55 * the same scatter/gather request at the disc.
56 *
57 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
58 *
59 * This will increase the chances of being able to write whole tracks.
60 * 64 should be enough to keep us competitive with Linux.
61 */
62static int blkif_reqs = 64;
63module_param_named(reqs, blkif_reqs, int, 0);
64MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
65
66/* Run-time switchable: /sys/module/blkback/parameters/ */
67static unsigned int log_stats = 0;
68static unsigned int debug_lvl = 0;
69module_param(log_stats, int, 0644);
70module_param(debug_lvl, int, 0644);
71
72/*
73 * Each outstanding request that we've passed to the lower device layers has a
74 * 'pending_req' allocated to it. Each buffer_head that completes decrements
75 * the pendcnt towards zero. When it hits zero, the specified domain has a
76 * response queued for it, with the saved 'id' passed back.
77 */
78typedef struct {
79 blkif_t *blkif;
80 u64 id;
81 int nr_pages;
82 atomic_t pendcnt;
83 unsigned short operation;
84 int status;
85 struct list_head free_list;
86} pending_req_t;
87
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -040088#define BLKBACK_INVALID_HANDLE (~0)
89
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -050090struct xen_blkbk {
91 pending_req_t *pending_reqs;
92 struct list_head pending_free;
93 spinlock_t pending_free_lock;
94 wait_queue_head_t pending_free_wq;
95 struct page **pending_pages;
96 grant_handle_t *pending_grant_handles;
97};
98
99static struct xen_blkbk *blkbk;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400100
101static inline int vaddr_pagenr(pending_req_t *req, int seg)
102{
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500103 return (req - blkbk->pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400104}
105
Jan Beulichefe08a32010-02-05 14:19:33 -0500106#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
107
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400108static inline unsigned long vaddr(pending_req_t *req, int seg)
109{
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500110 unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400111 return (unsigned long)pfn_to_kaddr(pfn);
112}
113
114#define pending_handle(_req, _seg) \
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500115 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400116
117
118static int do_block_io_op(blkif_t *blkif);
119static void dispatch_rw_block_io(blkif_t *blkif,
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800120 struct blkif_request *req,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400121 pending_req_t *pending_req);
122static void make_response(blkif_t *blkif, u64 id,
123 unsigned short op, int st);
124
125/******************************************************************
126 * misc small helpers
127 */
128static pending_req_t* alloc_req(void)
129{
130 pending_req_t *req = NULL;
131 unsigned long flags;
132
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500133 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
134 if (!list_empty(&blkbk->pending_free)) {
135 req = list_entry(blkbk->pending_free.next, pending_req_t, free_list);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400136 list_del(&req->free_list);
137 }
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500138 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400139 return req;
140}
141
142static void free_req(pending_req_t *req)
143{
144 unsigned long flags;
145 int was_empty;
146
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500147 spin_lock_irqsave(&blkbk->pending_free_lock, flags);
148 was_empty = list_empty(&blkbk->pending_free);
149 list_add(&req->free_list, &blkbk->pending_free);
150 spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400151 if (was_empty)
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500152 wake_up(&blkbk->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400153}
154
155static void unplug_queue(blkif_t *blkif)
156{
157 if (blkif->plug == NULL)
158 return;
159 if (blkif->plug->unplug_fn)
160 blkif->plug->unplug_fn(blkif->plug);
161 blk_put_queue(blkif->plug);
162 blkif->plug = NULL;
163}
164
165static void plug_queue(blkif_t *blkif, struct block_device *bdev)
166{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800167 struct request_queue *q = bdev_get_queue(bdev);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400168
169 if (q == blkif->plug)
170 return;
171 unplug_queue(blkif);
172 blk_get_queue(q);
173 blkif->plug = q;
174}
175
176static void fast_flush_area(pending_req_t *req)
177{
178 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
179 unsigned int i, invcount = 0;
180 grant_handle_t handle;
181 int ret;
182
183 for (i = 0; i < req->nr_pages; i++) {
184 handle = pending_handle(req, i);
185 if (handle == BLKBACK_INVALID_HANDLE)
186 continue;
187 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
188 GNTMAP_host_map, handle);
189 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
190 invcount++;
191 }
192
193 ret = HYPERVISOR_grant_table_op(
194 GNTTABOP_unmap_grant_ref, unmap, invcount);
195 BUG_ON(ret);
Konrad Rzeszutek Wilk5dc03632011-03-01 16:46:45 -0500196 /* Note, we use invcount, so nr->pages, so we can't index
197 * using vaddr(req, i). */
198 for (i = 0; i < invcount; i++) {
199 ret = m2p_remove_override(
200 virt_to_page(unmap[i].host_addr), false);
201 if (ret) {
202 printk(KERN_ALERT "Failed to remove M2P override for " \
203 "%lx\n", (unsigned long)unmap[i].host_addr);
204 continue;
205 }
206 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400207}
208
209/******************************************************************
210 * SCHEDULER FUNCTIONS
211 */
212
213static void print_stats(blkif_t *blkif)
214{
215 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
216 current->comm, blkif->st_oo_req,
217 blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
218 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
219 blkif->st_rd_req = 0;
220 blkif->st_wr_req = 0;
221 blkif->st_oo_req = 0;
222}
223
224int blkif_schedule(void *arg)
225{
226 blkif_t *blkif = arg;
K. Y. Srinivasan2ccbfe22010-03-11 13:39:50 -0800227 struct vbd *vbd = &blkif->vbd;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400228
229 blkif_get(blkif);
230
231 if (debug_lvl)
232 printk(KERN_DEBUG "%s: started\n", current->comm);
233
234 while (!kthread_should_stop()) {
235 if (try_to_freeze())
236 continue;
K. Y. Srinivasan2ccbfe22010-03-11 13:39:50 -0800237 if (unlikely(vbd->size != vbd_size(vbd)))
238 vbd_resize(blkif);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400239
240 wait_event_interruptible(
241 blkif->wq,
242 blkif->waiting_reqs || kthread_should_stop());
243 wait_event_interruptible(
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500244 blkbk->pending_free_wq,
245 !list_empty(&blkbk->pending_free) || kthread_should_stop());
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400246
247 blkif->waiting_reqs = 0;
248 smp_mb(); /* clear flag *before* checking for work */
249
250 if (do_block_io_op(blkif))
251 blkif->waiting_reqs = 1;
252 unplug_queue(blkif);
253
254 if (log_stats && time_after(jiffies, blkif->st_print))
255 print_stats(blkif);
256 }
257
258 if (log_stats)
259 print_stats(blkif);
260 if (debug_lvl)
261 printk(KERN_DEBUG "%s: exiting\n", current->comm);
262
263 blkif->xenblkd = NULL;
264 blkif_put(blkif);
265
266 return 0;
267}
268
269/******************************************************************
270 * COMPLETION CALLBACK -- Called as bh->b_end_io()
271 */
272
273static void __end_block_io_op(pending_req_t *pending_req, int error)
274{
275 /* An error fails the entire request. */
276 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
277 (error == -EOPNOTSUPP)) {
278 DPRINTK("blkback: write barrier op failed, not supported\n");
279 blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
280 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
281 } else if (error) {
282 DPRINTK("Buffer not up-to-date at end of operation, "
283 "error=%d\n", error);
284 pending_req->status = BLKIF_RSP_ERROR;
285 }
286
287 if (atomic_dec_and_test(&pending_req->pendcnt)) {
288 fast_flush_area(pending_req);
289 make_response(pending_req->blkif, pending_req->id,
290 pending_req->operation, pending_req->status);
291 blkif_put(pending_req->blkif);
292 free_req(pending_req);
293 }
294}
295
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800296static void end_block_io_op(struct bio *bio, int error)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400297{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400298 __end_block_io_op(bio->bi_private, error);
299 bio_put(bio);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400300}
301
302
303/******************************************************************************
304 * NOTIFICATION FROM GUEST OS.
305 */
306
307static void blkif_notify_work(blkif_t *blkif)
308{
309 blkif->waiting_reqs = 1;
310 wake_up(&blkif->wq);
311}
312
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800313irqreturn_t blkif_be_int(int irq, void *dev_id)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400314{
315 blkif_notify_work(dev_id);
316 return IRQ_HANDLED;
317}
318
319
320
321/******************************************************************
322 * DOWNWARD CALLS -- These interface with the block-device layer proper.
323 */
324
325static int do_block_io_op(blkif_t *blkif)
326{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800327 union blkif_back_rings *blk_rings = &blkif->blk_rings;
328 struct blkif_request req;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400329 pending_req_t *pending_req;
330 RING_IDX rc, rp;
331 int more_to_do = 0;
332
333 rc = blk_rings->common.req_cons;
334 rp = blk_rings->common.sring->req_prod;
335 rmb(); /* Ensure we see queued requests up to 'rp'. */
336
337 while (rc != rp) {
338
339 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
340 break;
341
Keir Fraser8270b452009-03-06 08:29:15 +0000342 if (kthread_should_stop()) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400343 more_to_do = 1;
344 break;
345 }
346
Keir Fraser8270b452009-03-06 08:29:15 +0000347 pending_req = alloc_req();
348 if (NULL == pending_req) {
349 blkif->st_oo_req++;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400350 more_to_do = 1;
351 break;
352 }
353
354 switch (blkif->blk_protocol) {
355 case BLKIF_PROTOCOL_NATIVE:
356 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
357 break;
358 case BLKIF_PROTOCOL_X86_32:
359 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
360 break;
361 case BLKIF_PROTOCOL_X86_64:
362 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
363 break;
364 default:
365 BUG();
366 }
367 blk_rings->common.req_cons = ++rc; /* before make_response() */
368
369 /* Apply all sanity checks to /private copy/ of request. */
370 barrier();
371
372 switch (req.operation) {
373 case BLKIF_OP_READ:
374 blkif->st_rd_req++;
375 dispatch_rw_block_io(blkif, &req, pending_req);
376 break;
377 case BLKIF_OP_WRITE_BARRIER:
378 blkif->st_br_req++;
379 /* fall through */
380 case BLKIF_OP_WRITE:
381 blkif->st_wr_req++;
382 dispatch_rw_block_io(blkif, &req, pending_req);
383 break;
384 default:
385 /* A good sign something is wrong: sleep for a while to
386 * avoid excessive CPU consumption by a bad guest. */
387 msleep(1);
388 DPRINTK("error: unknown block io operation [%d]\n",
389 req.operation);
390 make_response(blkif, req.id, req.operation,
391 BLKIF_RSP_ERROR);
392 free_req(pending_req);
393 break;
394 }
395
396 /* Yield point for this unbounded loop. */
397 cond_resched();
398 }
399
400 return more_to_do;
401}
402
403static void dispatch_rw_block_io(blkif_t *blkif,
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800404 struct blkif_request *req,
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400405 pending_req_t *pending_req)
406{
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400407 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
408 struct phys_req preq;
409 struct {
410 unsigned long buf; unsigned int nsec;
411 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
412 unsigned int nseg;
413 struct bio *bio = NULL;
414 int ret, i;
415 int operation;
416
417 switch (req->operation) {
418 case BLKIF_OP_READ:
419 operation = READ;
420 break;
421 case BLKIF_OP_WRITE:
422 operation = WRITE;
423 break;
424 case BLKIF_OP_WRITE_BARRIER:
Tom Goetz314146e2011-03-17 12:14:29 -0400425 operation = WRITE_BARRIER;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400426 break;
427 default:
428 operation = 0; /* make gcc happy */
429 BUG();
430 }
431
432 /* Check that number of segments is sane. */
433 nseg = req->nr_segments;
Tom Goetz314146e2011-03-17 12:14:29 -0400434 if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400435 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
436 DPRINTK("Bad number of segments in request (%d)\n", nseg);
437 goto fail_response;
438 }
439
440 preq.dev = req->handle;
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500441 preq.sector_number = req->u.rw.sector_number;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400442 preq.nr_sects = 0;
443
444 pending_req->blkif = blkif;
445 pending_req->id = req->id;
446 pending_req->operation = req->operation;
447 pending_req->status = BLKIF_RSP_OKAY;
448 pending_req->nr_pages = nseg;
449
450 for (i = 0; i < nseg; i++) {
451 uint32_t flags;
452
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500453 seg[i].nsec = req->u.rw.seg[i].last_sect -
454 req->u.rw.seg[i].first_sect + 1;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400455
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500456 if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
457 (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400458 goto fail_response;
459 preq.nr_sects += seg[i].nsec;
460
461 flags = GNTMAP_host_map;
462 if (operation != READ)
463 flags |= GNTMAP_readonly;
464 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500465 req->u.rw.seg[i].gref, blkif->domid);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400466 }
467
468 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
469 BUG_ON(ret);
470
471 for (i = 0; i < nseg; i++) {
472 if (unlikely(map[i].status != 0)) {
473 DPRINTK("invalid buffer -- could not remap it\n");
474 map[i].handle = BLKBACK_INVALID_HANDLE;
475 ret |= 1;
476 }
477
478 pending_handle(pending_req, i) = map[i].handle;
479
480 if (ret)
481 continue;
Konrad Rzeszutek Wilk5dc03632011-03-01 16:46:45 -0500482
483 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
484 blkbk->pending_page(pending_req, i), false);
485 if (ret) {
486 printk(KERN_ALERT "Failed to install M2P override for"\
487 " %lx (ret: %d)\n", (unsigned long)map[i].dev_bus_addr, ret);
488 continue;
489 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400490
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400491 seg[i].buf = map[i].dev_bus_addr |
Konrad Rzeszutek Wilkc35950b2011-03-01 16:22:28 -0500492 (req->u.rw.seg[i].first_sect << 9);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400493 }
494
495 if (ret)
496 goto fail_flush;
497
498 if (vbd_translate(&preq, blkif, operation) != 0) {
499 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
500 operation == READ ? "read" : "write",
501 preq.sector_number,
502 preq.sector_number + preq.nr_sects, preq.dev);
503 goto fail_flush;
504 }
505
506 plug_queue(blkif, preq.bdev);
507 atomic_set(&pending_req->pendcnt, 1);
508 blkif_get(blkif);
509
510 for (i = 0; i < nseg; i++) {
511 if (((int)preq.sector_number|(int)seg[i].nsec) &
Jeremy Fitzhardinge05d43862009-06-29 14:58:45 -0700512 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400513 DPRINTK("Misaligned I/O request from domain %d",
514 blkif->domid);
515 goto fail_put_bio;
516 }
517
518 while ((bio == NULL) ||
519 (bio_add_page(bio,
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500520 blkbk->pending_page(pending_req, i),
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400521 seg[i].nsec << 9,
522 seg[i].buf & ~PAGE_MASK) == 0)) {
523 if (bio) {
524 atomic_inc(&pending_req->pendcnt);
525 submit_bio(operation, bio);
526 }
527
528 bio = bio_alloc(GFP_KERNEL, nseg-i);
529 if (unlikely(bio == NULL))
530 goto fail_put_bio;
531
532 bio->bi_bdev = preq.bdev;
533 bio->bi_private = pending_req;
534 bio->bi_end_io = end_block_io_op;
535 bio->bi_sector = preq.sector_number;
536 }
537
538 preq.sector_number += seg[i].nsec;
539 }
540
541 if (!bio) {
Tom Goetz314146e2011-03-17 12:14:29 -0400542 BUG_ON(operation != WRITE_BARRIER);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400543 bio = bio_alloc(GFP_KERNEL, 0);
544 if (unlikely(bio == NULL))
545 goto fail_put_bio;
546
547 bio->bi_bdev = preq.bdev;
548 bio->bi_private = pending_req;
549 bio->bi_end_io = end_block_io_op;
550 bio->bi_sector = -1;
551 }
552
553 submit_bio(operation, bio);
554
555 if (operation == READ)
556 blkif->st_rd_sect += preq.nr_sects;
Tom Goetz314146e2011-03-17 12:14:29 -0400557 else if (operation == WRITE || operation == WRITE_BARRIER)
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400558 blkif->st_wr_sect += preq.nr_sects;
559
560 return;
561
562 fail_flush:
563 fast_flush_area(pending_req);
564 fail_response:
565 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
566 free_req(pending_req);
567 msleep(1); /* back off a bit */
568 return;
569
570 fail_put_bio:
571 __end_block_io_op(pending_req, -EINVAL);
572 if (bio)
573 bio_put(bio);
574 unplug_queue(blkif);
575 msleep(1); /* back off a bit */
576 return;
577}
578
579
580
581/******************************************************************
582 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
583 */
584
585
586static void make_response(blkif_t *blkif, u64 id,
587 unsigned short op, int st)
588{
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800589 struct blkif_response resp;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400590 unsigned long flags;
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800591 union blkif_back_rings *blk_rings = &blkif->blk_rings;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400592 int more_to_do = 0;
593 int notify;
594
595 resp.id = id;
596 resp.operation = op;
597 resp.status = st;
598
599 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
600 /* Place on the response ring for the relevant domain. */
601 switch (blkif->blk_protocol) {
602 case BLKIF_PROTOCOL_NATIVE:
603 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
604 &resp, sizeof(resp));
605 break;
606 case BLKIF_PROTOCOL_X86_32:
607 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
608 &resp, sizeof(resp));
609 break;
610 case BLKIF_PROTOCOL_X86_64:
611 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
612 &resp, sizeof(resp));
613 break;
614 default:
615 BUG();
616 }
617 blk_rings->common.rsp_prod_pvt++;
618 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
619 if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
620 /*
621 * Tail check for pending requests. Allows frontend to avoid
622 * notifications if requests are already in flight (lower
623 * overheads and promotes batching).
624 */
625 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
626
627 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
628 more_to_do = 1;
629 }
630
631 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
632
633 if (more_to_do)
634 blkif_notify_work(blkif);
635 if (notify)
636 notify_remote_via_irq(blkif->irq);
637}
638
639static int __init blkif_init(void)
640{
641 int i, mmap_pages;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400642 int rc = 0;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400643
Jeremy Fitzhardinge88122932009-02-09 12:05:51 -0800644 if (!xen_pv_domain())
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400645 return -ENODEV;
646
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400647 blkbk = (struct xen_blkbk *)kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500648 if (!blkbk) {
649 printk(KERN_ALERT "%s: out of memory!\n", __func__);
650 return -ENOMEM;
651 }
652
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400653 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
654
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500655 blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) *
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400656 blkif_reqs, GFP_KERNEL);
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400657 blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
658 mmap_pages, GFP_KERNEL);
659 blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) *
660 mmap_pages, GFP_KERNEL);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400661
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500662 if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || !blkbk->pending_pages) {
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400663 rc = -ENOMEM;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400664 goto out_of_memory;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400665 }
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400666
Konrad Rzeszutek Wilk464fb412011-03-01 16:26:10 -0500667 for (i = 0; i < mmap_pages; i++) {
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500668 blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400669 blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
Konrad Rzeszutek Wilk464fb412011-03-01 16:26:10 -0500670 if (blkbk->pending_pages[i] == NULL) {
671 rc = -ENOMEM;
672 goto out_of_memory;
673 }
674 }
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400675 rc = blkif_interface_init();
676 if (rc)
677 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400678
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500679 memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));
680
681 INIT_LIST_HEAD(&blkbk->pending_free);
682 spin_lock_init(&blkbk->pending_free_lock);
683 init_waitqueue_head(&blkbk->pending_free_wq);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400684
685 for (i = 0; i < blkif_reqs; i++)
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500686 list_add_tail(&blkbk->pending_reqs[i].free_list, &blkbk->pending_free);
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400687
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400688 rc = blkif_xenbus_init();
689 if (rc)
690 goto failed_init;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400691
692 return 0;
693
694 out_of_memory:
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400695 printk(KERN_ERR "%s: out of memory\n", __func__);
696 failed_init:
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500697 kfree(blkbk->pending_reqs);
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400698 kfree(blkbk->pending_grant_handles);
Konrad Rzeszutek Wilk464fb412011-03-01 16:26:10 -0500699 for (i = 0; i < mmap_pages; i++) {
700 if (blkbk->pending_pages[i])
701 __free_page(blkbk->pending_pages[i]);
702 }
Konrad Rzeszutek Wilka742b022011-03-14 12:41:26 -0400703 kfree(blkbk->pending_pages);
704 kfree(blkbk);
Konrad Rzeszutek Wilke8e28872011-02-25 10:51:29 -0500705 blkbk = NULL;
Konrad Rzeszutek Wilk8770b262009-10-08 13:23:09 -0400706 return rc;
Konrad Rzeszutek Wilk4d05a282011-04-14 18:25:47 -0400707}
708
709module_init(blkif_init);
710
711MODULE_LICENSE("Dual BSD/GPL");