blob: 0ba21b0f3972fc7f7e53bb7980e7069e0cae11b7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka586e80e2008-10-21 17:44:59 +01008#include <linux/device-mapper.h>
9
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include "dm-path-selector.h"
Mike Andersonb15546f2007-10-19 22:48:02 +010011#include "dm-uevent.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/workqueue.h>
Mikulas Patocka35991652012-06-03 00:29:58 +010021#include <linux/delay.h>
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070022#include <scsi/scsi_dh.h>
Arun Sharma600634972011-07-26 16:09:06 -070023#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025#define DM_MSG_PREFIX "multipath"
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000026#define DM_PG_INIT_DELAY_MSECS 2000
27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
29/* Path properties */
30struct pgpath {
31 struct list_head list;
32
33 struct priority_group *pg; /* Owning PG */
Kiyoshi Ueda66800732008-10-10 13:36:58 +010034 unsigned is_active; /* Path status */
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 unsigned fail_count; /* Cumulative failure count */
36
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -080037 struct dm_path path;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000038 struct delayed_work activate_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039};
40
41#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
42
43/*
44 * Paths are grouped into Priority Groups and numbered from 1 upwards.
45 * Each has a path selector which controls which path gets used.
46 */
47struct priority_group {
48 struct list_head list;
49
50 struct multipath *m; /* Owning multipath instance */
51 struct path_selector ps;
52
53 unsigned pg_num; /* Reference number */
54 unsigned bypassed; /* Temporarily bypass this PG? */
55
56 unsigned nr_pgpaths; /* Number of paths in PG */
57 struct list_head pgpaths;
58};
59
60/* Multipath context */
61struct multipath {
62 struct list_head list;
63 struct dm_target *ti;
64
Chandra Seetharamancfae5c92008-05-01 14:50:11 -070065 const char *hw_handler_name;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -070066 char *hw_handler_params;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000067
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010068 spinlock_t lock;
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 unsigned nr_priority_groups;
71 struct list_head priority_groups;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000072
73 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 unsigned pg_init_required; /* pg_init needs calling? */
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -070076 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000077 unsigned pg_init_delay_retry; /* Delay pg_init retry? */
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
79 unsigned nr_valid_paths; /* Total number of usable paths */
80 struct pgpath *current_pgpath;
81 struct priority_group *current_pg;
82 struct priority_group *next_pg; /* Switch to this PG if set */
83 unsigned repeat_count; /* I/Os left before calling PS again */
84
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010085 unsigned queue_io:1; /* Must we queue all I/O? */
86 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
87 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
Mike Snitzera58a9352012-07-27 15:08:04 +010088 unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
Shiva Krishna Merla9fb1b9d2013-10-30 03:26:38 +000089 unsigned pg_init_disabled:1; /* pg_init is not currently allowed */
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010090
Dave Wysochanskic9e45582007-10-19 22:47:53 +010091 unsigned pg_init_retries; /* Number of times to retry pg_init */
92 unsigned pg_init_count; /* Number of times pg_init called */
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +000093 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Mike Snitzer1fbdd2b2012-06-03 00:29:43 +010095 unsigned queue_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 struct work_struct process_queued_ios;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +010097 struct list_head queued_ios;
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99 struct work_struct trigger_event;
100
101 /*
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100102 * We must use a mempool of dm_mpath_io structs so that we
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 * can resubmit bios on error.
104 */
105 mempool_t *mpio_pool;
Mike Anderson6380f262009-12-10 23:52:21 +0000106
107 struct mutex work_mutex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108};
109
110/*
111 * Context information attached to each bio we process.
112 */
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100113struct dm_mpath_io {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 struct pgpath *pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100115 size_t nr_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116};
117
118typedef int (*action_fn) (struct pgpath *pgpath);
119
120#define MIN_IOS 256 /* Mempool size */
121
Christoph Lametere18b8902006-12-06 20:33:20 -0800122static struct kmem_cache *_mpio_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700124static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
David Howellsc4028952006-11-22 14:57:56 +0000125static void process_queued_ios(struct work_struct *work);
126static void trigger_event(struct work_struct *work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700127static void activate_path(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129
130/*-----------------------------------------------
131 * Allocation routines
132 *-----------------------------------------------*/
133
134static struct pgpath *alloc_pgpath(void)
135{
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700136 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
Mike Anderson224cb3e2008-08-29 09:36:09 +0200138 if (pgpath) {
Kiyoshi Ueda66800732008-10-10 13:36:58 +0100139 pgpath->is_active = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000140 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
Mike Anderson224cb3e2008-08-29 09:36:09 +0200141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
143 return pgpath;
144}
145
Alasdair G Kergon028867a2007-07-12 17:26:32 +0100146static void free_pgpath(struct pgpath *pgpath)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147{
148 kfree(pgpath);
149}
150
151static struct priority_group *alloc_priority_group(void)
152{
153 struct priority_group *pg;
154
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700155 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700157 if (pg)
158 INIT_LIST_HEAD(&pg->pgpaths);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 return pg;
161}
162
163static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
164{
165 struct pgpath *pgpath, *tmp;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700166 struct multipath *m = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
169 list_del(&pgpath->list);
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700170 if (m->hw_handler_name)
171 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 dm_put_device(ti, pgpath->path.dev);
173 free_pgpath(pgpath);
174 }
175}
176
177static void free_priority_group(struct priority_group *pg,
178 struct dm_target *ti)
179{
180 struct path_selector *ps = &pg->ps;
181
182 if (ps->type) {
183 ps->type->destroy(ps);
184 dm_put_path_selector(ps->type);
185 }
186
187 free_pgpaths(&pg->pgpaths, ti);
188 kfree(pg);
189}
190
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700191static struct multipath *alloc_multipath(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192{
193 struct multipath *m;
194
Micha³ Miros³awe69fae52006-10-03 01:15:34 -0700195 m = kzalloc(sizeof(*m), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 if (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 INIT_LIST_HEAD(&m->priority_groups);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100198 INIT_LIST_HEAD(&m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 spin_lock_init(&m->lock);
200 m->queue_io = 1;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000201 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
David Howellsc4028952006-11-22 14:57:56 +0000202 INIT_WORK(&m->process_queued_ios, process_queued_ios);
203 INIT_WORK(&m->trigger_event, trigger_event);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000204 init_waitqueue_head(&m->pg_init_wait);
Mike Anderson6380f262009-12-10 23:52:21 +0000205 mutex_init(&m->work_mutex);
Matthew Dobson93d23412006-03-26 01:37:50 -0800206 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 if (!m->mpio_pool) {
208 kfree(m);
209 return NULL;
210 }
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700211 m->ti = ti;
212 ti->private = m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214
215 return m;
216}
217
218static void free_multipath(struct multipath *m)
219{
220 struct priority_group *pg, *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
223 list_del(&pg->list);
224 free_priority_group(pg, m->ti);
225 }
226
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700227 kfree(m->hw_handler_name);
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700228 kfree(m->hw_handler_params);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 mempool_destroy(m->mpio_pool);
230 kfree(m);
231}
232
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100233static int set_mapinfo(struct multipath *m, union map_info *info)
234{
235 struct dm_mpath_io *mpio;
236
237 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
238 if (!mpio)
239 return -ENOMEM;
240
241 memset(mpio, 0, sizeof(*mpio));
242 info->ptr = mpio;
243
244 return 0;
245}
246
247static void clear_mapinfo(struct multipath *m, union map_info *info)
248{
249 struct dm_mpath_io *mpio = info->ptr;
250
251 info->ptr = NULL;
252 mempool_free(mpio, m->mpio_pool);
253}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255/*-----------------------------------------------
256 * Path selection
257 *-----------------------------------------------*/
258
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000259static void __pg_init_all_paths(struct multipath *m)
260{
261 struct pgpath *pgpath;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000262 unsigned long pg_init_delay = 0;
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000263
264 m->pg_init_count++;
265 m->pg_init_required = 0;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000266 if (m->pg_init_delay_retry)
267 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
268 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000269 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
270 /* Skip failed paths */
271 if (!pgpath->is_active)
272 continue;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000273 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
274 pg_init_delay))
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000275 m->pg_init_in_progress++;
276 }
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
280{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 m->current_pg = pgpath->pg;
282
283 /* Must we initialise the PG first, and queue I/O till it's ready? */
Chandra Seetharamancfae5c92008-05-01 14:50:11 -0700284 if (m->hw_handler_name) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 m->pg_init_required = 1;
286 m->queue_io = 1;
287 } else {
288 m->pg_init_required = 0;
289 m->queue_io = 0;
290 }
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100291
292 m->pg_init_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100295static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg,
296 size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297{
Josef "Jeff" Sipekc922d5f2006-12-08 02:36:33 -0800298 struct dm_path *path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100300 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 if (!path)
302 return -ENXIO;
303
304 m->current_pgpath = path_to_pgpath(path);
305
306 if (m->current_pg != pg)
307 __switch_pg(m, m->current_pgpath);
308
309 return 0;
310}
311
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100312static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
314 struct priority_group *pg;
315 unsigned bypassed = 1;
316
317 if (!m->nr_valid_paths)
318 goto failed;
319
320 /* Were we instructed to switch PG? */
321 if (m->next_pg) {
322 pg = m->next_pg;
323 m->next_pg = NULL;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100324 if (!__choose_path_in_pg(m, pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 return;
326 }
327
328 /* Don't change PG until it has no remaining paths */
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100329 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 return;
331
332 /*
333 * Loop through priority groups until we find a valid path.
334 * First time we skip PGs marked 'bypassed'.
Mike Christief220fd42012-06-03 00:29:45 +0100335 * Second time we only try the ones we skipped, but set
336 * pg_init_delay_retry so we do not hammer controllers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 */
338 do {
339 list_for_each_entry(pg, &m->priority_groups, list) {
340 if (pg->bypassed == bypassed)
341 continue;
Mike Christief220fd42012-06-03 00:29:45 +0100342 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
343 if (!bypassed)
344 m->pg_init_delay_retry = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 return;
Mike Christief220fd42012-06-03 00:29:45 +0100346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 }
348 } while (bypassed--);
349
350failed:
351 m->current_pgpath = NULL;
352 m->current_pg = NULL;
353}
354
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800355/*
356 * Check whether bios must be queued in the device-mapper core rather
357 * than here in the target.
358 *
359 * m->lock must be held on entry.
360 *
361 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the
362 * same value then we are not between multipath_presuspend()
363 * and multipath_resume() calls and we have no need to check
364 * for the DMF_NOFLUSH_SUSPENDING flag.
365 */
366static int __must_push_back(struct multipath *m)
367{
368 return (m->queue_if_no_path != m->saved_queue_if_no_path &&
369 dm_noflush_suspending(m->ti));
370}
371
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100372static int map_io(struct multipath *m, struct request *clone,
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100373 union map_info *map_context, unsigned was_queued)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800375 int r = DM_MAPIO_REMAPPED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100376 size_t nr_bytes = blk_rq_bytes(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 unsigned long flags;
378 struct pgpath *pgpath;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100379 struct block_device *bdev;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100380 struct dm_mpath_io *mpio = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 spin_lock_irqsave(&m->lock, flags);
383
384 /* Do we need to select a new pgpath? */
385 if (!m->current_pgpath ||
386 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0)))
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100387 __choose_pgpath(m, nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 pgpath = m->current_pgpath;
390
391 if (was_queued)
392 m->queue_size--;
393
394 if ((pgpath && m->queue_io) ||
Alasdair G Kergon436d4102005-07-12 15:53:03 -0700395 (!pgpath && m->queue_if_no_path)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 /* Queue for the daemon to resubmit */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100397 list_add_tail(&clone->queuelist, &m->queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 m->queue_size++;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700399 if ((m->pg_init_required && !m->pg_init_in_progress) ||
400 !m->queue_io)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700401 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 pgpath = NULL;
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -0800403 r = DM_MAPIO_SUBMITTED;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100404 } else if (pgpath) {
405 bdev = pgpath->path.dev->bdev;
406 clone->q = bdev_get_queue(bdev);
407 clone->rq_disk = bdev->bd_disk;
408 } else if (__must_push_back(m))
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800409 r = DM_MAPIO_REQUEUE;
410 else
411 r = -EIO; /* Failed */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 mpio->pgpath = pgpath;
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100414 mpio->nr_bytes = nr_bytes;
415
416 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io)
417 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path,
418 nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
420 spin_unlock_irqrestore(&m->lock, flags);
421
422 return r;
423}
424
425/*
426 * If we run out of usable paths, should we queue I/O or error it?
427 */
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700428static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
429 unsigned save_old_value)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
431 unsigned long flags;
432
433 spin_lock_irqsave(&m->lock, flags);
434
Alasdair G Kergon485ef692005-09-27 21:45:45 -0700435 if (save_old_value)
436 m->saved_queue_if_no_path = m->queue_if_no_path;
437 else
438 m->saved_queue_if_no_path = queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 m->queue_if_no_path = queue_if_no_path;
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700440 if (!m->queue_if_no_path && m->queue_size)
Alasdair G Kergonc5573082005-05-05 16:16:07 -0700441 queue_work(kmultipathd, &m->process_queued_ios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 spin_unlock_irqrestore(&m->lock, flags);
444
445 return 0;
446}
447
448/*-----------------------------------------------------------------
449 * The multipath daemon is responsible for resubmitting queued ios.
450 *---------------------------------------------------------------*/
451
452static void dispatch_queued_ios(struct multipath *m)
453{
454 int r;
455 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 union map_info *info;
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100457 struct request *clone, *n;
458 LIST_HEAD(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 spin_lock_irqsave(&m->lock, flags);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100461 list_splice_init(&m->queued_ios, &cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 spin_unlock_irqrestore(&m->lock, flags);
463
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100464 list_for_each_entry_safe(clone, n, &cl, queuelist) {
465 list_del_init(&clone->queuelist);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100467 info = dm_get_rq_mapinfo(clone);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100469 r = map_io(m, clone, info, 1);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100470 if (r < 0) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100471 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100472 dm_kill_unmapped_request(clone, r);
473 } else if (r == DM_MAPIO_REMAPPED)
474 dm_dispatch_request(clone);
475 else if (r == DM_MAPIO_REQUEUE) {
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100476 clear_mapinfo(m, info);
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100477 dm_requeue_unmapped_request(clone);
478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 }
480}
481
David Howellsc4028952006-11-22 14:57:56 +0000482static void process_queued_ios(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
David Howellsc4028952006-11-22 14:57:56 +0000484 struct multipath *m =
485 container_of(work, struct multipath, process_queued_ios);
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000486 struct pgpath *pgpath = NULL;
Chandra Seetharamane54f77d2009-06-22 10:12:12 +0100487 unsigned must_queue = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 unsigned long flags;
489
490 spin_lock_irqsave(&m->lock, flags);
491
492 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +0100493 __choose_pgpath(m, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 pgpath = m->current_pgpath;
496
Alasdair G Kergonc3cd4f62005-07-12 15:53:04 -0700497 if ((pgpath && !m->queue_io) ||
498 (!pgpath && !m->queue_if_no_path))
499 must_queue = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Shiva Krishna Merla9fb1b9d2013-10-30 03:26:38 +0000501 if (m->pg_init_required && !m->pg_init_in_progress && pgpath &&
502 !m->pg_init_disabled)
Kiyoshi Uedafb612642010-03-06 02:32:18 +0000503 __pg_init_all_paths(m);
504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 if (!must_queue)
507 dispatch_queued_ios(m);
508}
509
510/*
511 * An event is triggered whenever a path is taken out of use.
512 * Includes path failure and PG bypass.
513 */
David Howellsc4028952006-11-22 14:57:56 +0000514static void trigger_event(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515{
David Howellsc4028952006-11-22 14:57:56 +0000516 struct multipath *m =
517 container_of(work, struct multipath, trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 dm_table_event(m->ti->table);
520}
521
522/*-----------------------------------------------------------------
523 * Constructor/argument parsing:
524 * <#multipath feature args> [<arg>]*
525 * <#hw_handler args> [hw_handler [<arg>]*]
526 * <#priority groups>
527 * <initial priority group>
528 * [<selector> <#selector args> [<arg>]*
529 * <#paths> <#per-path selector args>
530 * [<path> [<arg>]* ]+ ]+
531 *---------------------------------------------------------------*/
Mike Snitzer498f0102011-08-02 12:32:04 +0100532static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 struct dm_target *ti)
534{
535 int r;
536 struct path_selector_type *pst;
537 unsigned ps_argc;
538
Mike Snitzer498f0102011-08-02 12:32:04 +0100539 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700540 {0, 1024, "invalid number of path selector args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 };
542
Mike Snitzer498f0102011-08-02 12:32:04 +0100543 pst = dm_get_path_selector(dm_shift_arg(as));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 if (!pst) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700545 ti->error = "unknown path selector type";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return -EINVAL;
547 }
548
Mike Snitzer498f0102011-08-02 12:32:04 +0100549 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100550 if (r) {
551 dm_put_path_selector(pst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 return -EINVAL;
Mikulas Patocka371b2e32008-07-21 12:00:24 +0100553 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
555 r = pst->create(&pg->ps, ps_argc, as->argv);
556 if (r) {
557 dm_put_path_selector(pst);
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700558 ti->error = "path selector constructor failed";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 return r;
560 }
561
562 pg->ps.type = pst;
Mike Snitzer498f0102011-08-02 12:32:04 +0100563 dm_consume_args(as, ps_argc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
565 return 0;
566}
567
Mike Snitzer498f0102011-08-02 12:32:04 +0100568static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 struct dm_target *ti)
570{
571 int r;
572 struct pgpath *p;
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700573 struct multipath *m = ti->private;
Mike Snitzera58a9352012-07-27 15:08:04 +0100574 struct request_queue *q = NULL;
575 const char *attached_handler_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 /* we need at least a path arg */
578 if (as->argc < 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700579 ti->error = "no device given";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100580 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 }
582
583 p = alloc_pgpath();
584 if (!p)
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100585 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
Mike Snitzer498f0102011-08-02 12:32:04 +0100587 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +0000588 &p->path.dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700590 ti->error = "error getting device";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 goto bad;
592 }
593
Mike Snitzera58a9352012-07-27 15:08:04 +0100594 if (m->retain_attached_hw_handler || m->hw_handler_name)
595 q = bdev_get_queue(p->path.dev->bdev);
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100596
Mike Snitzera58a9352012-07-27 15:08:04 +0100597 if (m->retain_attached_hw_handler) {
598 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
599 if (attached_handler_name) {
600 /*
601 * Reset hw_handler_name to match the attached handler
602 * and clear any hw_handler_params associated with the
603 * ignored handler.
604 *
605 * NB. This modifies the table line to show the actual
606 * handler instead of the original table passed in.
607 */
608 kfree(m->hw_handler_name);
609 m->hw_handler_name = attached_handler_name;
610
611 kfree(m->hw_handler_params);
612 m->hw_handler_params = NULL;
613 }
614 }
615
616 if (m->hw_handler_name) {
617 /*
618 * Increments scsi_dh reference, even when using an
619 * already-attached handler.
620 */
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100621 r = scsi_dh_attach(q, m->hw_handler_name);
622 if (r == -EBUSY) {
623 /*
Mike Snitzera58a9352012-07-27 15:08:04 +0100624 * Already attached to different hw_handler:
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100625 * try to reattach with correct one.
626 */
627 scsi_dh_detach(q);
628 r = scsi_dh_attach(q, m->hw_handler_name);
629 }
630
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700631 if (r < 0) {
Hannes Reineckea0cf7ea2009-06-22 10:12:11 +0100632 ti->error = "error attaching hardware handler";
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700633 dm_put_device(ti, p->path.dev);
634 goto bad;
635 }
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700636
637 if (m->hw_handler_params) {
638 r = scsi_dh_set_params(q, m->hw_handler_params);
639 if (r < 0) {
640 ti->error = "unable to set hardware "
641 "handler parameters";
642 scsi_dh_detach(q);
643 dm_put_device(ti, p->path.dev);
644 goto bad;
645 }
646 }
Hannes Reineckeae11b1b2008-07-17 17:49:02 -0700647 }
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
650 if (r) {
651 dm_put_device(ti, p->path.dev);
652 goto bad;
653 }
654
655 return p;
656
657 bad:
658 free_pgpath(p);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100659 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660}
661
Mike Snitzer498f0102011-08-02 12:32:04 +0100662static struct priority_group *parse_priority_group(struct dm_arg_set *as,
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700663 struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Mike Snitzer498f0102011-08-02 12:32:04 +0100665 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700666 {1, 1024, "invalid number of paths"},
667 {0, 1024, "invalid number of selector args"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 };
669
670 int r;
Mike Snitzer498f0102011-08-02 12:32:04 +0100671 unsigned i, nr_selector_args, nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 struct priority_group *pg;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700673 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
675 if (as->argc < 2) {
676 as->argc = 0;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100677 ti->error = "not enough priority group arguments";
678 return ERR_PTR(-EINVAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 }
680
681 pg = alloc_priority_group();
682 if (!pg) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700683 ti->error = "couldn't allocate priority group";
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100684 return ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
686 pg->m = m;
687
688 r = parse_path_selector(as, pg, ti);
689 if (r)
690 goto bad;
691
692 /*
693 * read the paths
694 */
Mike Snitzer498f0102011-08-02 12:32:04 +0100695 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 if (r)
697 goto bad;
698
Mike Snitzer498f0102011-08-02 12:32:04 +0100699 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (r)
701 goto bad;
702
Mike Snitzer498f0102011-08-02 12:32:04 +0100703 nr_args = 1 + nr_selector_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 for (i = 0; i < pg->nr_pgpaths; i++) {
705 struct pgpath *pgpath;
Mike Snitzer498f0102011-08-02 12:32:04 +0100706 struct dm_arg_set path_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Mike Snitzer498f0102011-08-02 12:32:04 +0100708 if (as->argc < nr_args) {
Mikulas Patocka148acff2008-07-21 12:00:30 +0100709 ti->error = "not enough path parameters";
Alasdair G Kergon6bbf79a2010-08-12 04:13:49 +0100710 r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 goto bad;
Mikulas Patocka148acff2008-07-21 12:00:30 +0100712 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
Mike Snitzer498f0102011-08-02 12:32:04 +0100714 path_args.argc = nr_args;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 path_args.argv = as->argv;
716
717 pgpath = parse_path(&path_args, &pg->ps, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100718 if (IS_ERR(pgpath)) {
719 r = PTR_ERR(pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 goto bad;
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 pgpath->pg = pg;
724 list_add_tail(&pgpath->list, &pg->pgpaths);
Mike Snitzer498f0102011-08-02 12:32:04 +0100725 dm_consume_args(as, nr_args);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
727
728 return pg;
729
730 bad:
731 free_priority_group(pg, ti);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100732 return ERR_PTR(r);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733}
734
Mike Snitzer498f0102011-08-02 12:32:04 +0100735static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 unsigned hw_argc;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700738 int ret;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700739 struct dm_target *ti = m->ti;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Mike Snitzer498f0102011-08-02 12:32:04 +0100741 static struct dm_arg _args[] = {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700742 {0, 1024, "invalid number of hardware handler args"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 };
744
Mike Snitzer498f0102011-08-02 12:32:04 +0100745 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 return -EINVAL;
747
748 if (!hw_argc)
749 return 0;
750
Mike Snitzer498f0102011-08-02 12:32:04 +0100751 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
Mike Snitzer510193a2012-05-12 01:43:21 +0100752 if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name),
753 "scsi_dh_%s", m->hw_handler_name)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700754 ti->error = "unknown hardware handler type";
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700755 ret = -EINVAL;
756 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 }
Chandra Seetharaman14e98c52008-11-13 23:39:06 +0000758
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700759 if (hw_argc > 1) {
760 char *p;
761 int i, j, len = 4;
762
763 for (i = 0; i <= hw_argc - 2; i++)
764 len += strlen(as->argv[i]) + 1;
765 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
766 if (!p) {
767 ti->error = "memory allocation failed";
768 ret = -ENOMEM;
769 goto fail;
770 }
771 j = sprintf(p, "%d", hw_argc - 1);
772 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
773 j = sprintf(p, "%s", as->argv[i]);
774 }
Mike Snitzer498f0102011-08-02 12:32:04 +0100775 dm_consume_args(as, hw_argc - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 return 0;
Chandra Seetharaman2bfd2e12009-08-03 12:42:45 -0700778fail:
779 kfree(m->hw_handler_name);
780 m->hw_handler_name = NULL;
781 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782}
783
Mike Snitzer498f0102011-08-02 12:32:04 +0100784static int parse_features(struct dm_arg_set *as, struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785{
786 int r;
787 unsigned argc;
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700788 struct dm_target *ti = m->ti;
Mike Snitzer498f0102011-08-02 12:32:04 +0100789 const char *arg_name;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
Mike Snitzer498f0102011-08-02 12:32:04 +0100791 static struct dm_arg _args[] = {
Mike Snitzera58a9352012-07-27 15:08:04 +0100792 {0, 6, "invalid number of feature args"},
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100793 {1, 50, "pg_init_retries must be between 1 and 50"},
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000794 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 };
796
Mike Snitzer498f0102011-08-02 12:32:04 +0100797 r = dm_read_arg_group(_args, as, &argc, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 if (r)
799 return -EINVAL;
800
801 if (!argc)
802 return 0;
803
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100804 do {
Mike Snitzer498f0102011-08-02 12:32:04 +0100805 arg_name = dm_shift_arg(as);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100806 argc--;
807
Mike Snitzer498f0102011-08-02 12:32:04 +0100808 if (!strcasecmp(arg_name, "queue_if_no_path")) {
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100809 r = queue_if_no_path(m, 1, 0);
810 continue;
811 }
812
Mike Snitzera58a9352012-07-27 15:08:04 +0100813 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
814 m->retain_attached_hw_handler = 1;
815 continue;
816 }
817
Mike Snitzer498f0102011-08-02 12:32:04 +0100818 if (!strcasecmp(arg_name, "pg_init_retries") &&
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100819 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100820 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100821 argc--;
822 continue;
823 }
824
Mike Snitzer498f0102011-08-02 12:32:04 +0100825 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000826 (argc >= 1)) {
Mike Snitzer498f0102011-08-02 12:32:04 +0100827 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +0000828 argc--;
829 continue;
830 }
831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 ti->error = "Unrecognised multipath feature request";
Dave Wysochanskic9e45582007-10-19 22:47:53 +0100833 r = -EINVAL;
834 } while (argc && !r);
835
836 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837}
838
839static int multipath_ctr(struct dm_target *ti, unsigned int argc,
840 char **argv)
841{
Mike Snitzer498f0102011-08-02 12:32:04 +0100842 /* target arguments */
843 static struct dm_arg _args[] = {
Mike Snitzera490a072011-03-24 13:54:33 +0000844 {0, 1024, "invalid number of priority groups"},
845 {0, 1024, "invalid initial priority group number"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 };
847
848 int r;
849 struct multipath *m;
Mike Snitzer498f0102011-08-02 12:32:04 +0100850 struct dm_arg_set as;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 unsigned pg_count = 0;
852 unsigned next_pg_num;
853
854 as.argc = argc;
855 as.argv = argv;
856
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700857 m = alloc_multipath(ti);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 if (!m) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700859 ti->error = "can't allocate multipath";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return -EINVAL;
861 }
862
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700863 r = parse_features(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 if (r)
865 goto bad;
866
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700867 r = parse_hw_handler(&as, m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 if (r)
869 goto bad;
870
Mike Snitzer498f0102011-08-02 12:32:04 +0100871 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 if (r)
873 goto bad;
874
Mike Snitzer498f0102011-08-02 12:32:04 +0100875 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 if (r)
877 goto bad;
878
Mike Snitzera490a072011-03-24 13:54:33 +0000879 if ((!m->nr_priority_groups && next_pg_num) ||
880 (m->nr_priority_groups && !next_pg_num)) {
881 ti->error = "invalid initial priority group";
882 r = -EINVAL;
883 goto bad;
884 }
885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 /* parse the priority groups */
887 while (as.argc) {
888 struct priority_group *pg;
889
Micha³ Miros³aw28f16c22006-10-03 01:15:33 -0700890 pg = parse_priority_group(&as, m);
Benjamin Marzinski01460f32008-10-10 13:36:57 +0100891 if (IS_ERR(pg)) {
892 r = PTR_ERR(pg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 goto bad;
894 }
895
896 m->nr_valid_paths += pg->nr_pgpaths;
897 list_add_tail(&pg->list, &m->priority_groups);
898 pg_count++;
899 pg->pg_num = pg_count;
900 if (!--next_pg_num)
901 m->next_pg = pg;
902 }
903
904 if (pg_count != m->nr_priority_groups) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700905 ti->error = "priority group count mismatch";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 r = -EINVAL;
907 goto bad;
908 }
909
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +0000910 ti->num_flush_bios = 1;
911 ti->num_discard_bios = 1;
Mike Snitzer042bcef2013-05-10 14:37:16 +0100912 ti->num_write_same_bios = 1;
Mikulas Patocka86279212009-06-22 10:12:24 +0100913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 return 0;
915
916 bad:
917 free_multipath(m);
918 return r;
919}
920
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000921static void multipath_wait_for_pg_init_completion(struct multipath *m)
922{
923 DECLARE_WAITQUEUE(wait, current);
924 unsigned long flags;
925
926 add_wait_queue(&m->pg_init_wait, &wait);
927
928 while (1) {
929 set_current_state(TASK_UNINTERRUPTIBLE);
930
931 spin_lock_irqsave(&m->lock, flags);
932 if (!m->pg_init_in_progress) {
933 spin_unlock_irqrestore(&m->lock, flags);
934 break;
935 }
936 spin_unlock_irqrestore(&m->lock, flags);
937
938 io_schedule();
939 }
940 set_current_state(TASK_RUNNING);
941
942 remove_wait_queue(&m->pg_init_wait, &wait);
943}
944
945static void flush_multipath_work(struct multipath *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
Shiva Krishna Merla9fb1b9d2013-10-30 03:26:38 +0000947 unsigned long flags;
948
949 spin_lock_irqsave(&m->lock, flags);
950 m->pg_init_disabled = 1;
951 spin_unlock_irqrestore(&m->lock, flags);
952
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -0700953 flush_workqueue(kmpath_handlerd);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000954 multipath_wait_for_pg_init_completion(m);
Alasdair G Kergona044d012005-07-12 15:53:02 -0700955 flush_workqueue(kmultipathd);
Tejun Heo43829732012-08-20 14:51:24 -0700956 flush_work(&m->trigger_event);
Shiva Krishna Merla9fb1b9d2013-10-30 03:26:38 +0000957
958 spin_lock_irqsave(&m->lock, flags);
959 m->pg_init_disabled = 0;
960 spin_unlock_irqrestore(&m->lock, flags);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +0000961}
962
963static void multipath_dtr(struct dm_target *ti)
964{
965 struct multipath *m = ti->private;
966
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +0000967 flush_multipath_work(m);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 free_multipath(m);
969}
970
971/*
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100972 * Map cloned requests
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100974static int multipath_map(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 union map_info *map_context)
976{
977 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 struct multipath *m = (struct multipath *) ti->private;
979
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100980 if (set_mapinfo(m, map_context) < 0)
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100981 /* ENOMEM, requeue */
982 return DM_MAPIO_REQUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +0100984 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100985 r = map_io(m, clone, map_context, 0);
Kiyoshi Ueda45e15722006-12-08 02:41:10 -0800986 if (r < 0 || r == DM_MAPIO_REQUEUE)
Jun'ichi Nomura466891f2012-03-28 18:41:25 +0100987 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
989 return r;
990}
991
992/*
993 * Take a path out of use.
994 */
995static int fail_path(struct pgpath *pgpath)
996{
997 unsigned long flags;
998 struct multipath *m = pgpath->pg->m;
999
1000 spin_lock_irqsave(&m->lock, flags);
1001
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001002 if (!pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 goto out;
1004
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001005 DMWARN("Failing path %s.", pgpath->path.dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001008 pgpath->is_active = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 pgpath->fail_count++;
1010
1011 m->nr_valid_paths--;
1012
1013 if (pgpath == m->current_pgpath)
1014 m->current_pgpath = NULL;
1015
Mike Andersonb15546f2007-10-19 22:48:02 +01001016 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1017 pgpath->path.dev->name, m->nr_valid_paths);
1018
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001019 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021out:
1022 spin_unlock_irqrestore(&m->lock, flags);
1023
1024 return 0;
1025}
1026
1027/*
1028 * Reinstate a previously-failed path
1029 */
1030static int reinstate_path(struct pgpath *pgpath)
1031{
1032 int r = 0;
1033 unsigned long flags;
1034 struct multipath *m = pgpath->pg->m;
1035
1036 spin_lock_irqsave(&m->lock, flags);
1037
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001038 if (pgpath->is_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 goto out;
1040
Alasdair G Kergondef052d2008-07-21 12:00:31 +01001041 if (!pgpath->pg->ps.type->reinstate_path) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 DMWARN("Reinstate path not supported by path selector %s",
1043 pgpath->pg->ps.type->name);
1044 r = -EINVAL;
1045 goto out;
1046 }
1047
1048 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1049 if (r)
1050 goto out;
1051
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001052 pgpath->is_active = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001054 if (!m->nr_valid_paths++ && m->queue_size) {
1055 m->current_pgpath = NULL;
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001056 queue_work(kmultipathd, &m->process_queued_ios);
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001057 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001058 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001059 m->pg_init_in_progress++;
1060 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
Mike Andersonb15546f2007-10-19 22:48:02 +01001062 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1063 pgpath->path.dev->name, m->nr_valid_paths);
1064
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001065 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
1067out:
1068 spin_unlock_irqrestore(&m->lock, flags);
1069
1070 return r;
1071}
1072
1073/*
1074 * Fail or reinstate all paths that match the provided struct dm_dev.
1075 */
1076static int action_dev(struct multipath *m, struct dm_dev *dev,
1077 action_fn action)
1078{
Mike Snitzer19040c02011-03-24 13:54:31 +00001079 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 struct pgpath *pgpath;
1081 struct priority_group *pg;
1082
1083 list_for_each_entry(pg, &m->priority_groups, list) {
1084 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1085 if (pgpath->path.dev == dev)
1086 r = action(pgpath);
1087 }
1088 }
1089
1090 return r;
1091}
1092
1093/*
1094 * Temporarily try to avoid having to use the specified PG
1095 */
1096static void bypass_pg(struct multipath *m, struct priority_group *pg,
1097 int bypassed)
1098{
1099 unsigned long flags;
1100
1101 spin_lock_irqsave(&m->lock, flags);
1102
1103 pg->bypassed = bypassed;
1104 m->current_pgpath = NULL;
1105 m->current_pg = NULL;
1106
1107 spin_unlock_irqrestore(&m->lock, flags);
1108
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001109 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110}
1111
1112/*
1113 * Switch to using the specified PG from the next I/O that gets mapped
1114 */
1115static int switch_pg_num(struct multipath *m, const char *pgstr)
1116{
1117 struct priority_group *pg;
1118 unsigned pgnum;
1119 unsigned long flags;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001120 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001122 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 (pgnum > m->nr_priority_groups)) {
1124 DMWARN("invalid PG number supplied to switch_pg_num");
1125 return -EINVAL;
1126 }
1127
1128 spin_lock_irqsave(&m->lock, flags);
1129 list_for_each_entry(pg, &m->priority_groups, list) {
1130 pg->bypassed = 0;
1131 if (--pgnum)
1132 continue;
1133
1134 m->current_pgpath = NULL;
1135 m->current_pg = NULL;
1136 m->next_pg = pg;
1137 }
1138 spin_unlock_irqrestore(&m->lock, flags);
1139
Alasdair G Kergonfe9cf302009-01-06 03:05:13 +00001140 schedule_work(&m->trigger_event);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 return 0;
1142}
1143
1144/*
1145 * Set/clear bypassed status of a PG.
1146 * PGs are numbered upwards from 1 in the order they were declared.
1147 */
1148static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed)
1149{
1150 struct priority_group *pg;
1151 unsigned pgnum;
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001152 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Mikulas Patocka31998ef12012-03-28 18:41:26 +01001154 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 (pgnum > m->nr_priority_groups)) {
1156 DMWARN("invalid PG number supplied to bypass_pg");
1157 return -EINVAL;
1158 }
1159
1160 list_for_each_entry(pg, &m->priority_groups, list) {
1161 if (!--pgnum)
1162 break;
1163 }
1164
1165 bypass_pg(m, pg, bypassed);
1166 return 0;
1167}
1168
1169/*
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001170 * Should we retry pg_init immediately?
1171 */
1172static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1173{
1174 unsigned long flags;
1175 int limit_reached = 0;
1176
1177 spin_lock_irqsave(&m->lock, flags);
1178
Shiva Krishna Merla9fb1b9d2013-10-30 03:26:38 +00001179 if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001180 m->pg_init_required = 1;
1181 else
1182 limit_reached = 1;
1183
1184 spin_unlock_irqrestore(&m->lock, flags);
1185
1186 return limit_reached;
1187}
1188
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001189static void pg_init_done(void *data, int errors)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001190{
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001191 struct pgpath *pgpath = data;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001192 struct priority_group *pg = pgpath->pg;
1193 struct multipath *m = pg->m;
1194 unsigned long flags;
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001195 unsigned delay_retry = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001196
1197 /* device or driver problems */
1198 switch (errors) {
1199 case SCSI_DH_OK:
1200 break;
1201 case SCSI_DH_NOSYS:
1202 if (!m->hw_handler_name) {
1203 errors = 0;
1204 break;
1205 }
Moger, Babuf7b934c2010-03-06 02:29:49 +00001206 DMERR("Could not failover the device: Handler scsi_dh_%s "
1207 "Error %d.", m->hw_handler_name, errors);
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001208 /*
1209 * Fail path for now, so we do not ping pong
1210 */
1211 fail_path(pgpath);
1212 break;
1213 case SCSI_DH_DEV_TEMP_BUSY:
1214 /*
1215 * Probably doing something like FW upgrade on the
1216 * controller so try the other pg.
1217 */
1218 bypass_pg(m, pg, 1);
1219 break;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001220 case SCSI_DH_RETRY:
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001221 /* Wait before retrying. */
1222 delay_retry = 1;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001223 case SCSI_DH_IMM_RETRY:
1224 case SCSI_DH_RES_TEMP_UNAVAIL:
1225 if (pg_init_limit_reached(m, pgpath))
1226 fail_path(pgpath);
1227 errors = 0;
1228 break;
1229 default:
1230 /*
1231 * We probably do not want to fail the path for a device
1232 * error, but this is what the old dm did. In future
1233 * patches we can do more advanced handling.
1234 */
1235 fail_path(pgpath);
1236 }
1237
1238 spin_lock_irqsave(&m->lock, flags);
1239 if (errors) {
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001240 if (pgpath == m->current_pgpath) {
1241 DMERR("Could not failover device. Error %d.", errors);
1242 m->current_pgpath = NULL;
1243 m->current_pg = NULL;
1244 }
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001245 } else if (!m->pg_init_required)
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001246 pg->bypassed = 0;
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001247
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001248 if (--m->pg_init_in_progress)
1249 /* Activations of other paths are still on going */
1250 goto out;
1251
1252 if (!m->pg_init_required)
1253 m->queue_io = 0;
1254
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001255 m->pg_init_delay_retry = delay_retry;
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001256 queue_work(kmultipathd, &m->process_queued_ios);
1257
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001258 /*
1259 * Wake up any thread waiting to suspend.
1260 */
1261 wake_up(&m->pg_init_wait);
1262
Kiyoshi Uedad0259bf2010-03-06 02:30:02 +00001263out:
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001264 spin_unlock_irqrestore(&m->lock, flags);
1265}
1266
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001267static void activate_path(struct work_struct *work)
1268{
Chandra Seetharamane54f77d2009-06-22 10:12:12 +01001269 struct pgpath *pgpath =
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001270 container_of(work, struct pgpath, activate_path.work);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001271
Chandra Seetharaman3ae31f62009-10-21 09:22:46 -07001272 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
Moger, Babu83c0d5d2010-03-06 02:29:45 +00001273 pg_init_done, pgpath);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001274}
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276/*
1277 * end_io handling
1278 */
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001279static int do_end_io(struct multipath *m, struct request *clone,
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001280 int error, struct dm_mpath_io *mpio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281{
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001282 /*
1283 * We don't queue any clone request inside the multipath target
1284 * during end I/O handling, since those clone requests don't have
1285 * bio clones. If we queue them inside the multipath target,
1286 * we need to make bio clones, that requires memory allocation.
1287 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests
1288 * don't have bio clones.)
1289 * Instead of queueing the clone request here, we queue the original
1290 * request into dm core, which will remake a clone request and
1291 * clone bios for it and resubmit it later.
1292 */
1293 int r = DM_ENDIO_REQUEUE;
Stefan Bader640eb3b2005-11-21 21:32:35 -08001294 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001296 if (!error && !clone->errors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 return 0; /* I/O complete */
1298
Mike Snitzere9d60f62013-09-19 12:13:58 -04001299 if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) {
1300 if ((clone->cmd_flags & REQ_WRITE_SAME) &&
1301 !clone->q->limits.max_write_same_sectors) {
1302 struct queue_limits *limits;
1303
1304 /* device doesn't really support WRITE SAME, disable it */
1305 limits = dm_get_queue_limits(dm_table_get_md(m->ti->table));
1306 limits->max_write_same_sectors = 0;
1307 }
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001308 return error;
Mike Snitzere9d60f62013-09-19 12:13:58 -04001309 }
Mike Snitzer959eb4e2010-08-12 04:14:32 +01001310
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001311 if (mpio->pgpath)
1312 fail_path(mpio->pgpath);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Stefan Bader640eb3b2005-11-21 21:32:35 -08001314 spin_lock_irqsave(&m->lock, flags);
Hannes Reinecke751b2a72011-01-18 10:13:12 +01001315 if (!m->nr_valid_paths) {
1316 if (!m->queue_if_no_path) {
1317 if (!__must_push_back(m))
1318 r = -EIO;
1319 } else {
1320 if (error == -EBADE)
1321 r = error;
1322 }
1323 }
Stefan Bader640eb3b2005-11-21 21:32:35 -08001324 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001326 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327}
1328
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001329static int multipath_end_io(struct dm_target *ti, struct request *clone,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 int error, union map_info *map_context)
1331{
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001332 struct multipath *m = ti->private;
1333 struct dm_mpath_io *mpio = map_context->ptr;
Wei Yongjuna71a2612012-10-12 16:59:42 +01001334 struct pgpath *pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 struct path_selector *ps;
1336 int r;
1337
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001338 BUG_ON(!mpio);
1339
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001340 r = do_end_io(m, clone, error, mpio);
Wei Yongjuna71a2612012-10-12 16:59:42 +01001341 pgpath = mpio->pgpath;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 if (pgpath) {
1343 ps = &pgpath->pg->ps;
1344 if (ps->type->end_io)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001345 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 }
Jun'ichi Nomura466891f2012-03-28 18:41:25 +01001347 clear_mapinfo(m, map_context);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348
1349 return r;
1350}
1351
1352/*
1353 * Suspend can't complete until all the I/O is processed so if
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001354 * the last path fails we must error any remaining I/O.
1355 * Note that if the freeze_bdev fails while suspending, the
1356 * queue_if_no_path state is lost - userspace should reset it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 */
1358static void multipath_presuspend(struct dm_target *ti)
1359{
1360 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Alasdair G Kergon485ef692005-09-27 21:45:45 -07001362 queue_if_no_path(m, 0, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001365static void multipath_postsuspend(struct dm_target *ti)
1366{
Mike Anderson6380f262009-12-10 23:52:21 +00001367 struct multipath *m = ti->private;
1368
1369 mutex_lock(&m->work_mutex);
Kiyoshi Ueda2bded7b2010-03-06 02:32:13 +00001370 flush_multipath_work(m);
Mike Anderson6380f262009-12-10 23:52:21 +00001371 mutex_unlock(&m->work_mutex);
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001372}
1373
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001374/*
1375 * Restore the queue_if_no_path setting.
1376 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377static void multipath_resume(struct dm_target *ti)
1378{
1379 struct multipath *m = (struct multipath *) ti->private;
1380 unsigned long flags;
1381
1382 spin_lock_irqsave(&m->lock, flags);
Alasdair G Kergon436d4102005-07-12 15:53:03 -07001383 m->queue_if_no_path = m->saved_queue_if_no_path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 spin_unlock_irqrestore(&m->lock, flags);
1385}
1386
1387/*
1388 * Info output has the following format:
1389 * num_multipath_feature_args [multipath_feature_args]*
1390 * num_handler_status_args [handler_status_args]*
1391 * num_groups init_group_number
1392 * [A|D|E num_ps_status_args [ps_status_args]*
1393 * num_paths num_selector_args
1394 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1395 *
1396 * Table output has the following format (identical to the constructor string):
1397 * num_feature_args [features_args]*
1398 * num_handler_args hw_handler [hw_handler_args]*
1399 * num_groups init_group_number
1400 * [priority selector-name num_ps_args [ps_args]*
1401 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1402 */
Mikulas Patockafd7c092e2013-03-01 22:45:44 +00001403static void multipath_status(struct dm_target *ti, status_type_t type,
1404 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405{
1406 int sz = 0;
1407 unsigned long flags;
1408 struct multipath *m = (struct multipath *) ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 struct priority_group *pg;
1410 struct pgpath *p;
1411 unsigned pg_num;
1412 char state;
1413
1414 spin_lock_irqsave(&m->lock, flags);
1415
1416 /* Features */
1417 if (type == STATUSTYPE_INFO)
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001418 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count);
1419 else {
1420 DMEMIT("%u ", m->queue_if_no_path +
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001421 (m->pg_init_retries > 0) * 2 +
Mike Snitzera58a9352012-07-27 15:08:04 +01001422 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1423 m->retain_attached_hw_handler);
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001424 if (m->queue_if_no_path)
1425 DMEMIT("queue_if_no_path ");
1426 if (m->pg_init_retries)
1427 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
Chandra Seetharaman4e2d19e2011-01-13 20:00:01 +00001428 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1429 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
Mike Snitzera58a9352012-07-27 15:08:04 +01001430 if (m->retain_attached_hw_handler)
1431 DMEMIT("retain_attached_hw_handler ");
Dave Wysochanskic9e45582007-10-19 22:47:53 +01001432 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001434 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 DMEMIT("0 ");
1436 else
Chandra Seetharamancfae5c92008-05-01 14:50:11 -07001437 DMEMIT("1 %s ", m->hw_handler_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
1439 DMEMIT("%u ", m->nr_priority_groups);
1440
1441 if (m->next_pg)
1442 pg_num = m->next_pg->pg_num;
1443 else if (m->current_pg)
1444 pg_num = m->current_pg->pg_num;
1445 else
Mike Snitzera490a072011-03-24 13:54:33 +00001446 pg_num = (m->nr_priority_groups ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 DMEMIT("%u ", pg_num);
1449
1450 switch (type) {
1451 case STATUSTYPE_INFO:
1452 list_for_each_entry(pg, &m->priority_groups, list) {
1453 if (pg->bypassed)
1454 state = 'D'; /* Disabled */
1455 else if (pg == m->current_pg)
1456 state = 'A'; /* Currently Active */
1457 else
1458 state = 'E'; /* Enabled */
1459
1460 DMEMIT("%c ", state);
1461
1462 if (pg->ps.type->status)
1463 sz += pg->ps.type->status(&pg->ps, NULL, type,
1464 result + sz,
1465 maxlen - sz);
1466 else
1467 DMEMIT("0 ");
1468
1469 DMEMIT("%u %u ", pg->nr_pgpaths,
1470 pg->ps.type->info_args);
1471
1472 list_for_each_entry(p, &pg->pgpaths, list) {
1473 DMEMIT("%s %s %u ", p->path.dev->name,
Kiyoshi Ueda66800732008-10-10 13:36:58 +01001474 p->is_active ? "A" : "F",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 p->fail_count);
1476 if (pg->ps.type->status)
1477 sz += pg->ps.type->status(&pg->ps,
1478 &p->path, type, result + sz,
1479 maxlen - sz);
1480 }
1481 }
1482 break;
1483
1484 case STATUSTYPE_TABLE:
1485 list_for_each_entry(pg, &m->priority_groups, list) {
1486 DMEMIT("%s ", pg->ps.type->name);
1487
1488 if (pg->ps.type->status)
1489 sz += pg->ps.type->status(&pg->ps, NULL, type,
1490 result + sz,
1491 maxlen - sz);
1492 else
1493 DMEMIT("0 ");
1494
1495 DMEMIT("%u %u ", pg->nr_pgpaths,
1496 pg->ps.type->table_args);
1497
1498 list_for_each_entry(p, &pg->pgpaths, list) {
1499 DMEMIT("%s ", p->path.dev->name);
1500 if (pg->ps.type->status)
1501 sz += pg->ps.type->status(&pg->ps,
1502 &p->path, type, result + sz,
1503 maxlen - sz);
1504 }
1505 }
1506 break;
1507 }
1508
1509 spin_unlock_irqrestore(&m->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510}
1511
1512static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
1513{
Mike Anderson6380f262009-12-10 23:52:21 +00001514 int r = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 struct dm_dev *dev;
1516 struct multipath *m = (struct multipath *) ti->private;
1517 action_fn action;
1518
Mike Anderson6380f262009-12-10 23:52:21 +00001519 mutex_lock(&m->work_mutex);
1520
Kiyoshi Uedac2f3d242009-12-10 23:52:27 +00001521 if (dm_suspended(ti)) {
1522 r = -EBUSY;
1523 goto out;
1524 }
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (argc == 1) {
Mike Snitzer498f0102011-08-02 12:32:04 +01001527 if (!strcasecmp(argv[0], "queue_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001528 r = queue_if_no_path(m, 1, 0);
1529 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001530 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001531 r = queue_if_no_path(m, 0, 0);
1532 goto out;
1533 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
1535
Mike Anderson6380f262009-12-10 23:52:21 +00001536 if (argc != 2) {
1537 DMWARN("Unrecognised multipath message received.");
1538 goto out;
1539 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Mike Snitzer498f0102011-08-02 12:32:04 +01001541 if (!strcasecmp(argv[0], "disable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001542 r = bypass_pg_num(m, argv[1], 1);
1543 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001544 } else if (!strcasecmp(argv[0], "enable_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001545 r = bypass_pg_num(m, argv[1], 0);
1546 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001547 } else if (!strcasecmp(argv[0], "switch_group")) {
Mike Anderson6380f262009-12-10 23:52:21 +00001548 r = switch_pg_num(m, argv[1]);
1549 goto out;
Mike Snitzer498f0102011-08-02 12:32:04 +01001550 } else if (!strcasecmp(argv[0], "reinstate_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 action = reinstate_path;
Mike Snitzer498f0102011-08-02 12:32:04 +01001552 else if (!strcasecmp(argv[0], "fail_path"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 action = fail_path;
Mike Anderson6380f262009-12-10 23:52:21 +00001554 else {
1555 DMWARN("Unrecognised multipath message received.");
1556 goto out;
1557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
Nikanth Karthikesan8215d6e2010-03-06 02:32:27 +00001559 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 if (r) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001561 DMWARN("message: error getting device %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 argv[1]);
Mike Anderson6380f262009-12-10 23:52:21 +00001563 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 }
1565
1566 r = action_dev(m, dev, action);
1567
1568 dm_put_device(ti, dev);
1569
Mike Anderson6380f262009-12-10 23:52:21 +00001570out:
1571 mutex_unlock(&m->work_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573}
1574
Al Viro647b3d02007-08-28 22:15:59 -04001575static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
Milan Broz9af4aa32006-10-03 01:15:20 -07001576 unsigned long arg)
1577{
Mikulas Patocka35991652012-06-03 00:29:58 +01001578 struct multipath *m = ti->private;
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001579 struct pgpath *pgpath;
Mikulas Patocka35991652012-06-03 00:29:58 +01001580 struct block_device *bdev;
1581 fmode_t mode;
Milan Broz9af4aa32006-10-03 01:15:20 -07001582 unsigned long flags;
Mikulas Patocka35991652012-06-03 00:29:58 +01001583 int r;
1584
Mikulas Patocka35991652012-06-03 00:29:58 +01001585 bdev = NULL;
1586 mode = 0;
1587 r = 0;
Milan Broz9af4aa32006-10-03 01:15:20 -07001588
1589 spin_lock_irqsave(&m->lock, flags);
1590
1591 if (!m->current_pgpath)
Kiyoshi Ueda02ab8232009-06-22 10:12:27 +01001592 __choose_pgpath(m, 0);
Milan Broz9af4aa32006-10-03 01:15:20 -07001593
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001594 pgpath = m->current_pgpath;
1595
1596 if (pgpath) {
1597 bdev = pgpath->path.dev->bdev;
1598 mode = pgpath->path.dev->mode;
Milan Broze90dae12006-10-03 01:15:22 -07001599 }
Milan Broz9af4aa32006-10-03 01:15:20 -07001600
Mike Snitzer7ba10aa2012-09-26 23:45:41 +01001601 if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
Hannes Reinecke565ff952013-07-10 23:41:15 +01001602 r = -ENOTCONN;
Milan Broz9af4aa32006-10-03 01:15:20 -07001603 else if (!bdev)
1604 r = -EIO;
1605
1606 spin_unlock_irqrestore(&m->lock, flags);
1607
Paolo Bonziniec8013b2012-01-12 16:01:29 +01001608 /*
1609 * Only pass ioctls through if the device sizes match exactly.
1610 */
Hannes Reineckef4124bc2014-02-26 10:07:04 +01001611 if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
1612 int err = scsi_verify_blk_ioctl(NULL, cmd);
1613 if (err)
1614 r = err;
1615 }
Paolo Bonziniec8013b2012-01-12 16:01:29 +01001616
Hannes Reinecke565ff952013-07-10 23:41:15 +01001617 if (r == -ENOTCONN && !fatal_signal_pending(current))
Mikulas Patocka35991652012-06-03 00:29:58 +01001618 queue_work(kmultipathd, &m->process_queued_ios);
Mikulas Patocka35991652012-06-03 00:29:58 +01001619
Al Viro633a08b2007-08-29 20:34:12 -04001620 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
Milan Broz9af4aa32006-10-03 01:15:20 -07001621}
1622
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001623static int multipath_iterate_devices(struct dm_target *ti,
1624 iterate_devices_callout_fn fn, void *data)
1625{
1626 struct multipath *m = ti->private;
1627 struct priority_group *pg;
1628 struct pgpath *p;
1629 int ret = 0;
1630
1631 list_for_each_entry(pg, &m->priority_groups, list) {
1632 list_for_each_entry(p, &pg->pgpaths, list) {
Mike Snitzer5dea2712009-07-23 20:30:42 +01001633 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001634 if (ret)
1635 goto out;
1636 }
1637 }
1638
1639out:
1640 return ret;
1641}
1642
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001643static int __pgpath_busy(struct pgpath *pgpath)
1644{
1645 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1646
1647 return dm_underlying_device_busy(q);
1648}
1649
1650/*
1651 * We return "busy", only when we can map I/Os but underlying devices
1652 * are busy (so even if we map I/Os now, the I/Os will wait on
1653 * the underlying queue).
1654 * In other words, if we want to kill I/Os or queue them inside us
1655 * due to map unavailability, we don't return "busy". Otherwise,
1656 * dm core won't give us the I/Os and we can't do what we want.
1657 */
1658static int multipath_busy(struct dm_target *ti)
1659{
1660 int busy = 0, has_active = 0;
1661 struct multipath *m = ti->private;
1662 struct priority_group *pg;
1663 struct pgpath *pgpath;
1664 unsigned long flags;
1665
1666 spin_lock_irqsave(&m->lock, flags);
1667
1668 /* Guess which priority_group will be used at next mapping time */
1669 if (unlikely(!m->current_pgpath && m->next_pg))
1670 pg = m->next_pg;
1671 else if (likely(m->current_pg))
1672 pg = m->current_pg;
1673 else
1674 /*
1675 * We don't know which pg will be used at next mapping time.
1676 * We don't call __choose_pgpath() here to avoid to trigger
1677 * pg_init just by busy checking.
1678 * So we don't know whether underlying devices we will be using
1679 * at next mapping time are busy or not. Just try mapping.
1680 */
1681 goto out;
1682
1683 /*
1684 * If there is one non-busy active path at least, the path selector
1685 * will be able to select it. So we consider such a pg as not busy.
1686 */
1687 busy = 1;
1688 list_for_each_entry(pgpath, &pg->pgpaths, list)
1689 if (pgpath->is_active) {
1690 has_active = 1;
1691
1692 if (!__pgpath_busy(pgpath)) {
1693 busy = 0;
1694 break;
1695 }
1696 }
1697
1698 if (!has_active)
1699 /*
1700 * No active path in this pg, so this pg won't be used and
1701 * the current_pg will be changed at next mapping time.
1702 * We need to try mapping to determine it.
1703 */
1704 busy = 0;
1705
1706out:
1707 spin_unlock_irqrestore(&m->lock, flags);
1708
1709 return busy;
1710}
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712/*-----------------------------------------------------------------
1713 * Module setup
1714 *---------------------------------------------------------------*/
1715static struct target_type multipath_target = {
1716 .name = "multipath",
Shiva Krishna Merla9fb1b9d2013-10-30 03:26:38 +00001717 .version = {1, 6, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 .module = THIS_MODULE,
1719 .ctr = multipath_ctr,
1720 .dtr = multipath_dtr,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001721 .map_rq = multipath_map,
1722 .rq_end_io = multipath_end_io,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 .presuspend = multipath_presuspend,
Kiyoshi Ueda6df400a2009-12-10 23:52:19 +00001724 .postsuspend = multipath_postsuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 .resume = multipath_resume,
1726 .status = multipath_status,
1727 .message = multipath_message,
Milan Broz9af4aa32006-10-03 01:15:20 -07001728 .ioctl = multipath_ioctl,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001729 .iterate_devices = multipath_iterate_devices,
Kiyoshi Uedaf40c67f2009-06-22 10:12:37 +01001730 .busy = multipath_busy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731};
1732
1733static int __init dm_multipath_init(void)
1734{
1735 int r;
1736
1737 /* allocate a slab for the dm_ios */
Alasdair G Kergon028867a2007-07-12 17:26:32 +01001738 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 if (!_mpio_cache)
1740 return -ENOMEM;
1741
1742 r = dm_register_target(&multipath_target);
1743 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001744 DMERR("register failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 kmem_cache_destroy(_mpio_cache);
1746 return -EINVAL;
1747 }
1748
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001749 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001750 if (!kmultipathd) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001751 DMERR("failed to create workqueue kmpathd");
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001752 dm_unregister_target(&multipath_target);
1753 kmem_cache_destroy(_mpio_cache);
1754 return -ENOMEM;
1755 }
1756
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001757 /*
1758 * A separate workqueue is used to handle the device handlers
1759 * to avoid overloading existing workqueue. Overloading the
1760 * old workqueue would also create a bottleneck in the
1761 * path of the storage hardware device activation.
1762 */
Tejun Heo4d4d66a2011-01-13 19:59:57 +00001763 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
1764 WQ_MEM_RECLAIM);
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001765 if (!kmpath_handlerd) {
1766 DMERR("failed to create workqueue kmpath_handlerd");
1767 destroy_workqueue(kmultipathd);
1768 dm_unregister_target(&multipath_target);
1769 kmem_cache_destroy(_mpio_cache);
1770 return -ENOMEM;
1771 }
1772
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001773 DMINFO("version %u.%u.%u loaded",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 multipath_target.version[0], multipath_target.version[1],
1775 multipath_target.version[2]);
1776
1777 return r;
1778}
1779
1780static void __exit dm_multipath_exit(void)
1781{
Chandra Seetharamanbab7cfc2008-05-01 14:50:22 -07001782 destroy_workqueue(kmpath_handlerd);
Alasdair G Kergonc5573082005-05-05 16:16:07 -07001783 destroy_workqueue(kmultipathd);
1784
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001785 dm_unregister_target(&multipath_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 kmem_cache_destroy(_mpio_cache);
1787}
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789module_init(dm_multipath_init);
1790module_exit(dm_multipath_exit);
1791
1792MODULE_DESCRIPTION(DM_NAME " multipath target");
1793MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
1794MODULE_LICENSE("GPL");