aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-integrity.c4
-rw-r--r--block/blk-ioc.c3
-rw-r--r--block/bsg.c29
-rw-r--r--block/cfq-iosched.c3
-rw-r--r--block/elevator.c39
-rw-r--r--block/genhd.c76
-rw-r--r--block/partition-generic.c6
-rw-r--r--block/partitions/check.c37
-rw-r--r--block/partitions/check.h4
-rw-r--r--block/partitions/efi.c12
-rw-r--r--block/partitions/mac.c4
-rw-r--r--block/partitions/msdos.c11
15 files changed, 164 insertions, 79 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 87ea95d1f53..b2b9837f9dd 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -459,7 +459,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
{
struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
struct blkcg_gq *blkg;
- struct hlist_node *n;
int i;
mutex_lock(&blkcg_pol_mutex);
@@ -470,7 +469,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
* stat updates. This is a debug feature which shouldn't exist
* anyway. If you get hit by a race, retry.
*/
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -518,11 +517,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
bool show_total)
{
struct blkcg_gq *blkg;
- struct hlist_node *n;
u64 total = 0;
rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
spin_lock_irq(blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
diff --git a/block/blk-core.c b/block/blk-core.c
index 66d31687cf6..074b758efc4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1473,6 +1473,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
*/
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
@@ -1706,9 +1711,6 @@ generic_make_request_checks(struct bio *bio)
*/
blk_partition_remap(bio);
- if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
- goto end_io;
-
if (bio_check_eod(bio, nr_sectors))
goto end_io;
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f634de772b0..e7062139612 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/sched/sysctl.h>
#include "blk.h"
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index da2a818c3a9..dabd221857e 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -420,6 +420,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
} else
bi->name = bi_unsupported_name;
+ disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
+
return 0;
}
EXPORT_SYMBOL(blk_integrity_register);
@@ -438,6 +440,8 @@ void blk_integrity_unregister(struct gendisk *disk)
if (!disk || !disk->integrity)
return;
+ disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+
bi = disk->integrity;
kobject_uevent(&bi->kobj, KOBJ_REMOVE);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index fab4cdd3f7b..9c4bb8266bc 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context);
*/
void put_io_context_active(struct io_context *ioc)
{
- struct hlist_node *n;
unsigned long flags;
struct io_cq *icq;
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc)
*/
retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
- hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
+ hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
if (spin_trylock(icq->q->queue_lock)) {
diff --git a/block/bsg.c b/block/bsg.c
index ff64ae3bace..420a5a9f1b2 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;
- struct hlist_node *entry;
mutex_lock(&bsg_mutex);
- hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
+ hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
atomic_inc(&bd->ref_count);
goto found;
@@ -997,7 +996,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
{
struct bsg_class_device *bcd;
dev_t dev;
- int ret, minor;
+ int ret;
struct device *class_dev = NULL;
const char *devname;
@@ -1017,23 +1016,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
mutex_lock(&bsg_mutex);
- ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
- if (!ret) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
- if (ret < 0)
+ ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ printk(KERN_ERR "bsg: too many bsg devices\n");
+ ret = -EINVAL;
+ }
goto unlock;
-
- if (minor >= BSG_MAX_DEVS) {
- printk(KERN_ERR "bsg: too many bsg devices\n");
- ret = -EINVAL;
- goto remove_idr;
}
- bcd->minor = minor;
+ bcd->minor = ret;
bcd->queue = q;
bcd->parent = get_device(parent);
bcd->release = release;
@@ -1059,8 +1051,7 @@ unregister_class_dev:
device_unregister(class_dev);
put_dev:
put_device(parent);
-remove_idr:
- idr_remove(&bsg_minor_idr, minor);
+ idr_remove(&bsg_minor_idr, bcd->minor);
unlock:
mutex_unlock(&bsg_mutex);
return ret;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 1bf9307e8f5..4f0ade74cfd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1697,7 +1697,6 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
{
struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
struct blkcg_gq *blkg;
- struct hlist_node *n;
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
return -EINVAL;
@@ -1709,7 +1708,7 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
else
blkcg->cfq_leaf_weight = val;
- hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
struct cfq_group *cfqg = blkg_to_cfqg(blkg);
if (!cfqg)
diff --git a/block/elevator.c b/block/elevator.c
index 11683bb10b7..a0ffdd943c9 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -95,14 +95,14 @@ static void elevator_put(struct elevator_type *e)
module_put(e->elevator_owner);
}
-static struct elevator_type *elevator_get(const char *name)
+static struct elevator_type *elevator_get(const char *name, bool try_loading)
{
struct elevator_type *e;
spin_lock(&elv_list_lock);
e = elevator_find(name);
- if (!e) {
+ if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
@@ -131,6 +131,22 @@ static int __init elevator_setup(char *str)
__setup("elevator=", elevator_setup);
+/* called during boot to load the elevator chosen by the elevator param */
+void __init load_default_elevator_module(void)
+{
+ struct elevator_type *e;
+
+ if (!chosen_elevator[0])
+ return;
+
+ spin_lock(&elv_list_lock);
+ e = elevator_find(chosen_elevator);
+ spin_unlock(&elv_list_lock);
+
+ if (!e)
+ request_module("%s-iosched", chosen_elevator);
+}
+
static struct kobj_type elv_ktype;
static struct elevator_queue *elevator_alloc(struct request_queue *q,
@@ -177,25 +193,30 @@ int elevator_init(struct request_queue *q, char *name)
q->boundary_rq = NULL;
if (name) {
- e = elevator_get(name);
+ e = elevator_get(name, true);
if (!e)
return -EINVAL;
}
+ /*
+ * Use the default elevator specified by config boot param or
+ * config option. Don't try to load modules as we could be running
+ * off async and request_module() isn't allowed from async.
+ */
if (!e && *chosen_elevator) {
- e = elevator_get(chosen_elevator);
+ e = elevator_get(chosen_elevator, false);
if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator);
}
if (!e) {
- e = elevator_get(CONFIG_DEFAULT_IOSCHED);
+ e = elevator_get(CONFIG_DEFAULT_IOSCHED, false);
if (!e) {
printk(KERN_ERR
"Default I/O scheduler not found. " \
"Using noop.\n");
- e = elevator_get("noop");
+ e = elevator_get("noop", false);
}
}
@@ -252,10 +273,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
{
struct elevator_queue *e = q->elevator;
- struct hlist_node *entry, *next;
+ struct hlist_node *next;
struct request *rq;
- hash_for_each_possible_safe(e->hash, rq, entry, next, hash, offset) {
+ hash_for_each_possible_safe(e->hash, rq, next, hash, offset) {
BUG_ON(!ELV_ON_HASH(rq));
if (unlikely(!rq_mergeable(rq))) {
@@ -936,7 +957,7 @@ int elevator_change(struct request_queue *q, const char *name)
return -ENXIO;
strlcpy(elevator_name, name, sizeof(elevator_name));
- e = elevator_get(strstrip(elevator_name));
+ e = elevator_get(strstrip(elevator_name), true);
if (!e) {
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
return -EINVAL;
diff --git a/block/genhd.c b/block/genhd.c
index 9a289d7c84b..3c001fba80c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include "blk.h"
@@ -25,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock);
struct kobject *block_depr;
/* for extended dynamic devt allocation, currently only one major is used */
-#define MAX_EXT_DEVT (1 << MINORBITS)
+#define NR_EXT_DEVT (1 << MINORBITS)
/* For extended devt allocation. ext_devt_mutex prevents look up
* results from going away underneath its user.
@@ -35,6 +36,8 @@ static DEFINE_IDR(ext_devt_idr);
static struct device_type disk_type;
+static void disk_check_events(struct disk_events *ev,
+ unsigned int *clearing_ptr);
static void disk_alloc_events(struct gendisk *disk);
static void disk_add_events(struct gendisk *disk);
static void disk_del_events(struct gendisk *disk);
@@ -408,7 +411,7 @@ static int blk_mangle_minor(int minor)
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
{
struct gendisk *disk = part_to_disk(part);
- int idx, rc;
+ int idx;
/* in consecutive minor range? */
if (part->partno < disk->minors) {
@@ -417,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
}
/* allocate ext devt */
- do {
- if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
- return -ENOMEM;
- rc = idr_get_new(&ext_devt_idr, part, &idx);
- } while (rc == -EAGAIN);
-
- if (rc)
- return rc;
-
- if (idx > MAX_EXT_DEVT) {
- idr_remove(&ext_devt_idr, idx);
- return -EBUSY;
- }
+ mutex_lock(&ext_devt_mutex);
+ idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL);
+ mutex_unlock(&ext_devt_mutex);
+ if (idx < 0)
+ return idx == -ENOSPC ? -EBUSY : idx;
*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
return 0;
@@ -532,6 +527,14 @@ static void register_disk(struct gendisk *disk)
return;
}
}
+
+ /*
+ * avoid probable deadlock caused by allocating memory with
+ * GFP_KERNEL in runtime_resume callback of its all ancestor
+ * devices
+ */
+ pm_runtime_set_memalloc_noio(ddev, true);
+
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
@@ -644,7 +647,6 @@ void del_gendisk(struct gendisk *disk)
disk_part_iter_exit(&piter);
invalidate_partition(disk, 0);
- blk_free_devt(disk_to_dev(disk)->devt);
set_capacity(disk, 0);
disk->flags &= ~GENHD_FL_UP;
@@ -661,7 +663,9 @@ void del_gendisk(struct gendisk *disk)
disk->driverfs_dev = NULL;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+ pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk));
+ blk_free_devt(disk_to_dev(disk)->devt);
}
EXPORT_SYMBOL(del_gendisk);
@@ -1549,6 +1553,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
const struct block_device_operations *bdops = disk->fops;
struct disk_events *ev = disk->ev;
unsigned int pending;
+ unsigned int clearing = mask;
if (!ev) {
/* for drivers still using the old ->media_changed method */
@@ -1558,34 +1563,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
return 0;
}
- /* tell the workfn about the events being cleared */
+ disk_block_events(disk);
+
+ /*
+ * store the union of mask and ev->clearing on the stack so that the
+ * race with disk_flush_events does not cause ambiguity (ev->clearing
+ * can still be modified even if events are blocked).
+ */
spin_lock_irq(&ev->lock);
- ev->clearing |= mask;
+ clearing |= ev->clearing;
+ ev->clearing = 0;
spin_unlock_irq(&ev->lock);
- /* uncondtionally schedule event check and wait for it to finish */
- disk_block_events(disk);
- queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
- flush_delayed_work(&ev->dwork);
- __disk_unblock_events(disk, false);
+ disk_check_events(ev, &clearing);
+ /*
+ * if ev->clearing is not 0, the disk_flush_events got called in the
+ * middle of this function, so we want to run the workfn without delay.
+ */
+ __disk_unblock_events(disk, ev->clearing ? true : false);
/* then, fetch and clear pending events */
spin_lock_irq(&ev->lock);
- WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
pending = ev->pending & mask;
ev->pending &= ~mask;
spin_unlock_irq(&ev->lock);
+ WARN_ON_ONCE(clearing & mask);
return pending;
}
+/*
+ * Separate this part out so that a different pointer for clearing_ptr can be
+ * passed in for disk_clear_events.
+ */
static void disk_events_workfn(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
+
+ disk_check_events(ev, &ev->clearing);
+}
+
+static void disk_check_events(struct disk_events *ev,
+ unsigned int *clearing_ptr)
+{
struct gendisk *disk = ev->disk;
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
- unsigned int clearing = ev->clearing;
+ unsigned int clearing = *clearing_ptr;
unsigned int events;
unsigned long intv;
int nr_events = 0, i;
@@ -1598,7 +1622,7 @@ static void disk_events_workfn(struct work_struct *work)
events &= ~ev->pending;
ev->pending |= events;
- ev->clearing &= ~clearing;
+ *clearing_ptr &= ~clearing;
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
diff --git a/block/partition-generic.c b/block/partition-generic.c
index f1d14519cc0..789cdea0589 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno)
if (!part)
return;
- blk_free_devt(part_devt(part));
rcu_assign_pointer(ptbl->part[partno], NULL);
rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
+ blk_free_devt(part_devt(part));
hd_struct_put(part);
}
@@ -418,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
int p, highest, res;
rescan:
if (state && !IS_ERR(state)) {
- kfree(state);
+ free_partitions(state);
state = NULL;
}
@@ -525,7 +525,7 @@ rescan:
md_autodetect_dev(part_to_dev(part)->devt);
#endif
}
- kfree(state);
+ free_partitions(state);
return 0;
}
diff --git a/block/partitions/check.c b/block/partitions/check.c
index bc908672c97..19ba207ea7d 100644
--- a/block/partitions/check.c
+++ b/block/partitions/check.c
@@ -14,6 +14,7 @@
*/
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <linux/ctype.h>
#include <linux/genhd.h>
@@ -106,18 +107,45 @@ static int (*check_part[])(struct parsed_partitions *) = {
NULL
};
+static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
+{
+ struct parsed_partitions *state;
+ int nr;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ nr = disk_max_parts(hd);
+ state->parts = vzalloc(nr * sizeof(state->parts[0]));
+ if (!state->parts) {
+ kfree(state);
+ return NULL;
+ }
+
+ state->limit = nr;
+
+ return state;
+}
+
+void free_partitions(struct parsed_partitions *state)
+{
+ vfree(state->parts);
+ kfree(state);
+}
+
struct parsed_partitions *
check_partition(struct gendisk *hd, struct block_device *bdev)
{
struct parsed_partitions *state;
int i, res, err;
- state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+ state = allocate_partitions(hd);
if (!state)
return NULL;
state->pp_buf = (char *)__get_free_page(GFP_KERNEL);
if (!state->pp_buf) {
- kfree(state);
+ free_partitions(state);
return NULL;
}
state->pp_buf[0] = '\0';
@@ -128,10 +156,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
if (isdigit(state->name[strlen(state->name)-1]))
sprintf(state->name, "p");
- state->limit = disk_max_parts(hd);
i = res = err = 0;
while (!res && check_part[i]) {
- memset(&state->parts, 0, sizeof(state->parts));
+ memset(state->parts, 0, state->limit * sizeof(state->parts[0]));
res = check_part[i++](state);
if (res < 0) {
/* We have hit an I/O error which we don't report now.
@@ -161,6 +188,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
printk(KERN_INFO "%s", state->pp_buf);
free_page((unsigned long)state->pp_buf);
- kfree(state);
+ free_partitions(state);
return ERR_PTR(res);
}
diff --git a/block/partitions/check.h b/block/partitions/check.h
index 52b100311ec..eade17ea910 100644
--- a/block/partitions/check.h
+++ b/block/partitions/check.h
@@ -15,13 +15,15 @@ struct parsed_partitions {
int flags;
bool has_info;
struct partition_meta_info info;
- } parts[DISK_MAX_PARTS];
+ } *parts;
int next;
int limit;
bool access_beyond_eod;
char *pp_buf;
};
+void free_partitions(struct parsed_partitions *state);
+
struct parsed_partitions *
check_partition(struct gendisk *, struct block_device *);
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index b62fb88b871..ff5804e2f1d 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -310,15 +310,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
goto fail;
}
- /* Check the GUID Partition Table header size */
+ /* Check the GUID Partition Table header size is too big */
if (le32_to_cpu((*gpt)->header_size) >
bdev_logical_block_size(state->bdev)) {
- pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+ pr_debug("GUID Partition Table Header size is too large: %u > %u\n",
le32_to_cpu((*gpt)->header_size),
bdev_logical_block_size(state->bdev));
goto fail;
}
+ /* Check the GUID Partition Table header size is too small */
+ if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) {
+ pr_debug("GUID Partition Table Header size is too small: %u < %zu\n",
+ le32_to_cpu((*gpt)->header_size),
+ sizeof(gpt_header));
+ goto fail;
+ }
+
/* Check the GUID Partition Table CRC */
origcrc = le32_to_cpu((*gpt)->header_crc32);
(*gpt)->header_crc32 = 0;
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index 11f688bd76c..76d8ba6379a 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state)
put_dev_sector(sect);
return 0;
}
+
+ if (blocks_in_map >= state->limit)
+ blocks_in_map = state->limit - 1;
+
strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
for (slot = 1; slot <= blocks_in_map; ++slot) {
int pos = slot * secsize;
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 8752a5d2656..7681cd295ab 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -455,14 +455,19 @@ int msdos_partition(struct parsed_partitions *state)
data = read_part_sector(state, 0, &sect);
if (!data)
return -1;
- if (!msdos_magic_present(data + 510)) {
+
+ /*
+ * Note order! (some AIX disks, e.g. unbootable kind,
+ * have no MSDOS 55aa)
+ */
+ if (aix_magic_present(state, data)) {
put_dev_sector(sect);
+ strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
}
- if (aix_magic_present(state, data)) {
+ if (!msdos_magic_present(data + 510)) {
put_dev_sector(sect);
- strlcat(state->pp_buf, " [AIX]", PAGE_SIZE);
return 0;
}