2010-07-07 11:12:45 +00:00
|
|
|
From 950e1951d452d08a5bd95d82d4cad7fa97fa4464 Mon Sep 17 00:00:00 2001
|
|
|
|
From: Hannes Reinecke <hare@suse.de>
|
|
|
|
Date: Thu, 19 Nov 2009 13:54:56 +0100
|
|
|
|
Subject: Accept failed paths for multipath maps
|
|
|
|
References: bnc#458037,bnc#458393
|
|
|
|
Patch-mainline: Not yet
|
|
|
|
|
|
|
|
The multipath kernel module is rejecting any map with an invalid
|
|
|
|
device. However, as the multipathd is processing the events serially
|
|
|
|
it will try to push a map with invalid devices if more than one
|
|
|
|
device failed at the same time.
|
|
|
|
So we can as well accept those maps and make sure to mark the
|
|
|
|
paths as down.
|
|
|
|
|
|
|
|
Signed-off-by: Hannes Reinecke <hare@suse.de>
|
|
|
|
---
|
|
|
|
---
|
|
|
|
drivers/md/dm-mpath.c | 71 ++++++++++++++++++++++++++++++++++++++++----------
|
|
|
|
drivers/md/dm-mpath.h | 1
|
|
|
|
drivers/md/dm-table.c | 7 +++-
|
|
|
|
3 files changed, 64 insertions(+), 15 deletions(-)
|
|
|
|
|
|
|
|
--- a/drivers/md/dm-mpath.c
|
|
|
|
+++ b/drivers/md/dm-mpath.c
|
|
|
|
@@ -146,7 +146,8 @@ static void deactivate_path(struct work_
|
|
|
|
struct pgpath *pgpath =
|
|
|
|
container_of(work, struct pgpath, deactivate_path);
|
|
|
|
|
|
|
|
- blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
|
|
|
|
+ if (pgpath->path.dev)
|
|
|
|
+ blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct priority_group *alloc_priority_group(void)
|
|
|
|
@@ -275,6 +276,11 @@ static int __choose_path_in_pg(struct mu
|
|
|
|
|
|
|
|
m->current_pgpath = path_to_pgpath(path);
|
|
|
|
|
|
|
|
+ if (!m->current_pgpath->path.dev) {
|
|
|
|
+ m->current_pgpath = NULL;
|
|
|
|
+ return -ENODEV;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
if (m->current_pg != pg)
|
|
|
|
__switch_pg(m, m->current_pgpath);
|
|
|
|
|
|
|
|
@@ -593,6 +599,7 @@ static struct pgpath *parse_path(struct
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct pgpath *p;
|
|
|
|
+ char *path;
|
|
|
|
struct multipath *m = ti->private;
|
|
|
|
|
|
|
|
/* we need at least a path arg */
|
|
|
|
@@ -605,14 +612,37 @@ static struct pgpath *parse_path(struct
|
|
|
|
if (!p)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
- r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
|
|
|
|
+ path = shift(as);
|
|
|
|
+ r = dm_get_device(ti, path, dm_table_get_mode(ti->table),
|
|
|
|
&p->path.dev);
|
|
|
|
if (r) {
|
|
|
|
- ti->error = "error getting device";
|
|
|
|
- goto bad;
|
|
|
|
+ unsigned major, minor;
|
|
|
|
+
|
|
|
|
+ /* Try to add a failed device */
|
|
|
|
+ if (r == -ENXIO && sscanf(path, "%u:%u", &major, &minor) == 2) {
|
|
|
|
+ dev_t dev;
|
|
|
|
+
|
|
|
|
+ /* Extract the major/minor numbers */
|
|
|
|
+ dev = MKDEV(major, minor);
|
|
|
|
+ if (MAJOR(dev) != major || MINOR(dev) != minor) {
|
|
|
|
+ /* Nice try, didn't work */
|
|
|
|
+ DMWARN("Invalid device path %s", path);
|
|
|
|
+ ti->error = "error converting devnum";
|
|
|
|
+ goto bad;
|
|
|
|
+ }
|
|
|
|
+ DMWARN("adding disabled device %d:%d", major, minor);
|
|
|
|
+ p->path.dev = NULL;
|
|
|
|
+ format_dev_t(p->path.pdev, dev);
|
|
|
|
+ p->is_active = 0;
|
|
|
|
+ } else {
|
|
|
|
+ ti->error = "error getting device";
|
|
|
|
+ goto bad;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ memcpy(p->path.pdev, p->path.dev->name, 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
- if (m->hw_handler_name) {
|
|
|
|
+ if (m->hw_handler_name && p->path.dev) {
|
|
|
|
struct request_queue *q = bdev_get_queue(p->path.dev->bdev);
|
|
|
|
|
|
|
|
r = scsi_dh_attach(q, m->hw_handler_name);
|
|
|
|
@@ -649,6 +679,11 @@ static struct pgpath *parse_path(struct
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
+ if (!p->is_active) {
|
|
|
|
+ ps->type->fail_path(ps, &p->path);
|
|
|
|
+ p->fail_count++;
|
|
|
|
+ m->nr_valid_paths--;
|
|
|
|
+ }
|
|
|
|
return p;
|
|
|
|
|
|
|
|
bad:
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -978,7 +1013,7 @@ static int fail_path(struct pgpath *pgpa
|
2010-07-07 11:12:45 +00:00
|
|
|
if (!pgpath->is_active)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
- DMWARN("Failing path %s.", pgpath->path.dev->name);
|
|
|
|
+ DMWARN("Failing path %s.", pgpath->path.pdev);
|
|
|
|
|
|
|
|
pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
|
|
|
|
pgpath->is_active = 0;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -990,7 +1025,7 @@ static int fail_path(struct pgpath *pgpa
|
2010-07-07 11:12:45 +00:00
|
|
|
m->current_pgpath = NULL;
|
|
|
|
|
|
|
|
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
|
|
|
|
- pgpath->path.dev->name, m->nr_valid_paths);
|
|
|
|
+ pgpath->path.pdev, m->nr_valid_paths);
|
|
|
|
|
|
|
|
schedule_work(&m->trigger_event);
|
|
|
|
queue_work(kmultipathd, &pgpath->deactivate_path);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1015,6 +1050,12 @@ static int reinstate_path(struct pgpath
|
2010-07-07 11:12:45 +00:00
|
|
|
if (pgpath->is_active)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
+ if (!pgpath->path.dev) {
|
|
|
|
+ DMWARN("Cannot reinstate disabled path %s", pgpath->path.pdev);
|
|
|
|
+ r = -ENODEV;
|
|
|
|
+ goto out;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
if (!pgpath->pg->ps.type->reinstate_path) {
|
|
|
|
DMWARN("Reinstate path not supported by path selector %s",
|
|
|
|
pgpath->pg->ps.type->name);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1037,7 +1078,7 @@ static int reinstate_path(struct pgpath
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
|
|
|
|
- pgpath->path.dev->name, m->nr_valid_paths);
|
|
|
|
+ pgpath->path.pdev, m->nr_valid_paths);
|
|
|
|
|
|
|
|
schedule_work(&m->trigger_event);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1057,6 +1098,9 @@ static int action_dev(struct multipath *
|
2010-07-07 11:12:45 +00:00
|
|
|
struct pgpath *pgpath;
|
|
|
|
struct priority_group *pg;
|
|
|
|
|
|
|
|
+ if (!dev)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
list_for_each_entry(pg, &m->priority_groups, list) {
|
|
|
|
list_for_each_entry(pgpath, &pg->pgpaths, list) {
|
|
|
|
if (pgpath->path.dev == dev)
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1241,8 +1285,9 @@ static void activate_path(struct work_st
|
2010-07-07 11:12:45 +00:00
|
|
|
struct pgpath *pgpath =
|
|
|
|
container_of(work, struct pgpath, activate_path);
|
|
|
|
|
|
|
|
- scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
|
|
|
|
- pg_init_done, pgpath);
|
|
|
|
+ if (pgpath->path.dev)
|
|
|
|
+ scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
|
|
|
|
+ pg_init_done, pgpath);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1426,7 +1471,7 @@ static int multipath_status(struct dm_ta
|
2010-07-07 11:12:45 +00:00
|
|
|
pg->ps.type->info_args);
|
|
|
|
|
|
|
|
list_for_each_entry(p, &pg->pgpaths, list) {
|
|
|
|
- DMEMIT("%s %s %u ", p->path.dev->name,
|
|
|
|
+ DMEMIT("%s %s %u ", p->path.pdev,
|
|
|
|
p->is_active ? "A" : "F",
|
|
|
|
p->fail_count);
|
|
|
|
if (pg->ps.type->status)
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1452,7 +1497,7 @@ static int multipath_status(struct dm_ta
|
2010-07-07 11:12:45 +00:00
|
|
|
pg->ps.type->table_args);
|
|
|
|
|
|
|
|
list_for_each_entry(p, &pg->pgpaths, list) {
|
|
|
|
- DMEMIT("%s ", p->path.dev->name);
|
|
|
|
+ DMEMIT("%s ", p->path.pdev);
|
|
|
|
if (pg->ps.type->status)
|
|
|
|
sz += pg->ps.type->status(&pg->ps,
|
|
|
|
&p->path, type, result + sz,
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1544,7 +1589,7 @@ static int multipath_ioctl(struct dm_tar
|
2010-07-07 11:12:45 +00:00
|
|
|
if (!m->current_pgpath)
|
|
|
|
__choose_pgpath(m, 0);
|
|
|
|
|
|
|
|
- if (m->current_pgpath) {
|
|
|
|
+ if (m->current_pgpath && m->current_pgpath->path.dev) {
|
|
|
|
bdev = m->current_pgpath->path.dev->bdev;
|
|
|
|
mode = m->current_pgpath->path.dev->mode;
|
|
|
|
}
|
|
|
|
--- a/drivers/md/dm-mpath.h
|
|
|
|
+++ b/drivers/md/dm-mpath.h
|
|
|
|
@@ -12,6 +12,7 @@
|
|
|
|
struct dm_dev;
|
|
|
|
|
|
|
|
struct dm_path {
|
|
|
|
+ char pdev[16]; /* Requested physical device */
|
|
|
|
struct dm_dev *dev; /* Read-only */
|
|
|
|
void *pscontext; /* For path-selector use */
|
|
|
|
};
|
|
|
|
--- a/drivers/md/dm-table.c
|
|
|
|
+++ b/drivers/md/dm-table.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -541,9 +541,12 @@ int dm_get_device(struct dm_target *ti,
|
2010-07-07 11:12:45 +00:00
|
|
|
*/
|
|
|
|
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
|
|
|
|
{
|
|
|
|
- struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
|
|
|
|
- dm_dev);
|
|
|
|
+ struct dm_dev_internal *dd;
|
|
|
|
|
|
|
|
+ if (!d)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ dd = container_of(d, struct dm_dev_internal, dm_dev);
|
|
|
|
if (atomic_dec_and_test(&dd->count)) {
|
|
|
|
close_dev(dd, ti->table->md);
|
|
|
|
list_del(&dd->list);
|